Rotating kv cache positions (#2901)
* Retrieve the current positions for rotating KV caches. * Add the function to the kv cache too. * More testing.
This commit is contained in:
parent
76e565c4ab
commit
7f0f83a7c1
|
@ -294,6 +294,27 @@ impl RotatingCache {
|
|||
Tensor::from_slice(&mask, (size1, size2), device)
|
||||
}
|
||||
|
||||
/// Returns the positions corresponding to all the elements that will be retured
|
||||
/// *after* adding `seq_len` to the cache.
|
||||
pub fn positions(&self, seq_len: usize) -> Vec<usize> {
|
||||
if seq_len <= self.max_seq_len {
|
||||
let upd_offset = (self.offset + seq_len) % self.max_seq_len;
|
||||
let cache_out_len = (self.current_seq_len + seq_len).min(self.max_seq_len);
|
||||
(0..cache_out_len)
|
||||
.map(|i| {
|
||||
let pos_cache = self.current_seq_len + seq_len + i - upd_offset;
|
||||
if i < upd_offset {
|
||||
pos_cache
|
||||
} else {
|
||||
pos_cache - self.max_seq_len
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
(self.current_seq_len..(self.current_seq_len + seq_len)).collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the attn_mask to be applied *after* adding `seq_len` to the cache.
|
||||
pub fn attn_mask(&self, seq_len: usize, device: &Device) -> Result<Option<Tensor>> {
|
||||
let mask = if seq_len == 1 {
|
||||
|
@ -362,10 +383,17 @@ impl RotatingKvCache {
|
|||
self.k.current_seq_len()
|
||||
}
|
||||
|
||||
/// Returns the attn_mask to be applied *after* adding `seq_len` to the cache.
|
||||
pub fn attn_mask(&self, seq_len: usize, device: &Device) -> Result<Option<Tensor>> {
|
||||
self.k.attn_mask(seq_len, device)
|
||||
}
|
||||
|
||||
/// Returns the positions corresponding to all the elements that will be retured
|
||||
/// *after* adding `seq_len` to the cache.
|
||||
pub fn positions(&self, seq_len: usize) -> Vec<usize> {
|
||||
self.k.positions(seq_len)
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
self.k.reset();
|
||||
self.v.reset();
|
||||
|
|
|
@ -39,9 +39,16 @@ fn rotating_kv_cache() -> Result<()> {
|
|||
assert_eq!(cache.current_seq_len(), 0);
|
||||
let data = cache.current_data()?;
|
||||
assert!(data.is_none());
|
||||
assert_eq!(cache.positions(1), &[0]);
|
||||
assert_eq!(cache.positions(2), &[0, 1]);
|
||||
let t = Tensor::new(&[1., 2., 3.], &Device::Cpu)?;
|
||||
let data = cache.append(&t)?;
|
||||
assert_eq!(data.to_vec1::<f64>()?, [1., 2., 3.]);
|
||||
assert_eq!(cache.positions(0), &[0, 1, 2]);
|
||||
assert_eq!(cache.positions(1), &[0, 1, 2, 3]);
|
||||
assert_eq!(cache.positions(2), &[0, 1, 2, 3, 4]);
|
||||
assert_eq!(cache.positions(3), &[0, 1, 2, 3, 4, 5]);
|
||||
assert_eq!(cache.positions(4), &[6, 1, 2, 3, 4, 5]);
|
||||
let t = Tensor::new(&[4.], &Device::Cpu)?;
|
||||
let data = cache.append(&t)?;
|
||||
assert_eq!(data.to_vec1::<f64>()?, [1., 2., 3., 4.]);
|
||||
|
@ -79,11 +86,17 @@ fn rotating_kv_cache() -> Result<()> {
|
|||
mask.to_vec2::<u8>()?,
|
||||
&[[0, 0, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0]],
|
||||
);
|
||||
assert_eq!(cache.positions(0), &[12, 7, 8, 9, 10, 11]);
|
||||
assert_eq!(cache.positions(2), &[12, 13, 14, 9, 10, 11]);
|
||||
assert_eq!(cache.positions(3), &[12, 13, 14, 15, 10, 11]);
|
||||
assert_eq!(cache.positions(8), &[13, 14, 15, 16, 17, 18, 19, 20]);
|
||||
let t = Tensor::new(&[0., 1., 2., 3., 4., 5., 6., 7., 8.], &Device::Cpu)?;
|
||||
let data = cache.append(&t)?;
|
||||
assert_eq!(data.to_vec1::<f64>()?, [0., 1., 2., 3., 4., 5., 6., 7., 8.]);
|
||||
assert_eq!(cache.current_seq_len(), 22);
|
||||
assert_eq!(cache.offset(), 0);
|
||||
assert_eq!(cache.positions(0), &[16, 17, 18, 19, 20, 21]);
|
||||
assert_eq!(cache.positions(1), &[22, 17, 18, 19, 20, 21]);
|
||||
|
||||
let mask = cache.attn_mask(1, &Device::Cpu)?;
|
||||
assert!(mask.is_none());
|
||||
|
|
Loading…
Reference in New Issue