-
Notifications
You must be signed in to change notification settings - Fork 19
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add encoded offsets for each chunksizer
- Loading branch information
Showing
4 changed files
with
156 additions
and
14 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,31 +1,59 @@ | ||
use tiktoken_rs::CoreBPE; | ||
|
||
use crate::ChunkSizer; | ||
use crate::{ChunkSizer, EncodedOffsets}; | ||
|
||
impl ChunkSizer for CoreBPE { | ||
/// Return offsets for each unit of text used to calculate chunk size. | ||
/// Should return an exclusive byte range for each element counted. | ||
fn encoded_offsets(&self, chunk: &str) -> EncodedOffsets { | ||
encoded_offsets(self, chunk) | ||
} | ||
|
||
/// Returns the number of tokens in a given text after tokenization. | ||
/// | ||
/// # Panics | ||
/// | ||
/// Will panic if you don't have a byte-level tokenizer and the splitter | ||
/// encounters text it can't tokenize. | ||
fn chunk_size(&self, text: &str) -> usize { | ||
chunk_size(self, text) | ||
} | ||
} | ||
|
||
impl ChunkSizer for &CoreBPE { | ||
/// Return offsets for each unit of text used to calculate chunk size. | ||
/// Should return an exclusive byte range for each element counted. | ||
fn encoded_offsets(&self, chunk: &str) -> EncodedOffsets { | ||
encoded_offsets(self, chunk) | ||
} | ||
|
||
/// Returns the number of tokens in a given text after tokenization. | ||
/// | ||
/// # Panics | ||
/// | ||
/// Will panic if you don't have a byte-level tokenizer and the splitter | ||
/// encounters text it can't tokenize. | ||
fn chunk_size(&self, text: &str) -> usize { | ||
chunk_size(self, text) | ||
} | ||
} | ||
|
||
fn encoded_offsets(bpe: &CoreBPE, chunk: &str) -> EncodedOffsets { | ||
let tokens = bpe.encode_ordinary(chunk); | ||
let decoded = bpe | ||
._decode_native_and_split(tokens) | ||
.scan(0usize, |offset, bytes| { | ||
let end = *offset + bytes.len(); | ||
let item = *offset..end; | ||
*offset = end; | ||
Some(item) | ||
}); | ||
EncodedOffsets::new(decoded.collect()) | ||
} | ||
|
||
fn chunk_size(bpe: &CoreBPE, text: &str) -> usize { | ||
bpe.encode_ordinary(text).len() | ||
} | ||
|
||
#[cfg(test)] | ||
mod tests { | ||
use super::*; | ||
|
||
use tiktoken_rs::cl100k_base; | ||
|
||
#[test] | ||
fn returns_offsets() { | ||
let offsets = cl100k_base().unwrap().encoded_offsets("An apple a"); | ||
assert_eq!(offsets, EncodedOffsets::new(vec![0..2, 2..8, 8..10])); | ||
} | ||
} |