-
Notifications
You must be signed in to change notification settings - Fork 14
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #17 from aurelio-labs/simonas/regex-chunker
feat: regex chunker
- Loading branch information
Showing
16 changed files
with
441 additions
and
292 deletions.
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,6 @@ | ||
[tool.poetry] | ||
name = "semantic-chunkers" | ||
version = "0.0.8" | ||
version = "0.0.9" | ||
description = "Super advanced chunking methods for AI" | ||
authors = ["Aurelio AI <[email protected]>"] | ||
readme = "README.md" | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,11 +1,13 @@ | ||
from semantic_chunkers.chunkers.base import BaseChunker | ||
from semantic_chunkers.chunkers.consecutive import ConsecutiveChunker | ||
from semantic_chunkers.chunkers.cumulative import CumulativeChunker | ||
from semantic_chunkers.chunkers.regex import RegexChunker | ||
from semantic_chunkers.chunkers.statistical import StatisticalChunker | ||
|
||
__all__ = [ | ||
"BaseChunker", | ||
"ConsecutiveChunker", | ||
"CumulativeChunker", | ||
"StatisticalChunker", | ||
"RegexChunker", | ||
] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,58 @@ | ||
import asyncio | ||
from typing import List, Union | ||
|
||
import regex | ||
|
||
from semantic_chunkers.chunkers.base import BaseChunker | ||
from semantic_chunkers.schema import Chunk | ||
from semantic_chunkers.splitters import RegexSplitter | ||
from semantic_chunkers.utils import text | ||
|
||
|
||
class RegexChunker(BaseChunker): | ||
def __init__( | ||
self, | ||
splitter: RegexSplitter = RegexSplitter(), | ||
max_chunk_tokens: int = 300, | ||
delimiters: List[Union[str, regex.Pattern]] = [], | ||
): | ||
super().__init__(name="regex_chunker", encoder=None, splitter=splitter) | ||
self.splitter: RegexSplitter = splitter | ||
self.max_chunk_tokens = max_chunk_tokens | ||
self.delimiters = delimiters | ||
|
||
def __call__(self, docs: list[str]) -> List[List[Chunk]]: | ||
chunks = [] | ||
current_chunk = Chunk( | ||
splits=[], | ||
metadata={}, | ||
) | ||
current_chunk.token_count = 0 | ||
|
||
for doc in docs: | ||
sentences = self.splitter(doc, delimiters=self.delimiters) | ||
for sentence in sentences: | ||
sentence_token_count = text.tiktoken_length(sentence) | ||
if ( | ||
current_chunk.token_count + sentence_token_count | ||
> self.max_chunk_tokens | ||
): | ||
if current_chunk.splits: | ||
chunks.append(current_chunk) | ||
current_chunk = Chunk(splits=[]) | ||
current_chunk.token_count = 0 | ||
|
||
current_chunk.splits.append(sentence) | ||
if current_chunk.token_count is None: | ||
current_chunk.token_count = 0 | ||
current_chunk.token_count += sentence_token_count | ||
|
||
# Last chunk | ||
if current_chunk.splits: | ||
chunks.append(current_chunk) | ||
|
||
return [chunks] | ||
|
||
async def acall(self, docs: list[str]) -> List[List[Chunk]]: | ||
chunks = await asyncio.to_thread(self.__call__, docs) | ||
return chunks |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,79 @@ | ||
from typing import List, Union | ||
|
||
import regex | ||
|
||
from semantic_chunkers.splitters.base import BaseSplitter | ||
|
||
|
||
class RegexSplitter(BaseSplitter): | ||
""" | ||
Enhanced regex pattern to split a given text into sentences more accurately. | ||
""" | ||
|
||
regex_pattern = r""" | ||
# Negative lookbehind for word boundary, word char, dot, word char | ||
(?<!\b\w\.\w.) | ||
# Negative lookbehind for single uppercase initials like "A." | ||
(?<!\b[A-Z][a-z]\.) | ||
# Negative lookbehind for abbreviations like "U.S." | ||
(?<!\b[A-Z]\.) | ||
# Negative lookbehind for abbreviations with uppercase letters and dots | ||
(?<!\b\p{Lu}\.\p{Lu}.) | ||
# Negative lookbehind for numbers, to avoid splitting decimals | ||
(?<!\b\p{N}\.) | ||
# Positive lookbehind for punctuation followed by whitespace | ||
(?<=\.|\?|!|:|\.\.\.)\s+ | ||
# Positive lookahead for uppercase letter or opening quote at word boundary | ||
(?="?(?=[A-Z])|"\b) | ||
# OR | ||
| | ||
# Splits after punctuation that follows closing punctuation, followed by | ||
# whitespace | ||
(?<=[\"\'\]\)\}][\.!?])\s+(?=[\"\'\(A-Z]) | ||
# OR | ||
| | ||
# Splits after punctuation if not preceded by a period | ||
(?<=[^\.][\.!?])\s+(?=[A-Z]) | ||
# OR | ||
| | ||
# Handles splitting after ellipses | ||
(?<=\.\.\.)\s+(?=[A-Z]) | ||
# OR | ||
| | ||
# Matches and removes control characters and format characters | ||
[\p{Cc}\p{Cf}]+ | ||
# OR | ||
| | ||
# Splits after punctuation marks followed by another punctuation mark | ||
(?<=[\.!?])(?=[\.!?]) | ||
# OR | ||
| | ||
# Splits after exclamation or question marks followed by whitespace or end of string | ||
(?<=[!?])(?=\s|$) | ||
""" | ||
|
||
def __call__( | ||
self, doc: str, delimiters: List[Union[str, regex.Pattern]] = [] | ||
) -> List[str]: | ||
if not delimiters: | ||
compiled_pattern = regex.compile(self.regex_pattern) | ||
delimiters.append(compiled_pattern) | ||
sentences = [doc] | ||
for delimiter in delimiters: | ||
sentences_for_next_delimiter = [] | ||
for sentence in sentences: | ||
if isinstance(delimiter, regex.Pattern): | ||
sub_sentences = regex.split( | ||
self.regex_pattern, doc, flags=regex.VERBOSE | ||
) | ||
split_char = "" # No single character to append for regex pattern | ||
else: | ||
sub_sentences = sentence.split(delimiter) | ||
split_char = delimiter | ||
for i, sub_sentence in enumerate(sub_sentences): | ||
if i < len(sub_sentences) - 1: | ||
sub_sentence += split_char # Append delimiter to sub_sentence | ||
if sub_sentence.strip(): | ||
sentences_for_next_delimiter.append(sub_sentence.strip()) | ||
sentences = sentences_for_next_delimiter | ||
return sentences |
This file was deleted.
Oops, something went wrong.
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,48 @@ | ||
import asyncio | ||
import unittest | ||
|
||
from semantic_chunkers.chunkers.regex import RegexChunker | ||
from semantic_chunkers.schema import Chunk | ||
from semantic_chunkers.utils import text | ||
|
||
|
||
class TestRegexChunker(unittest.TestCase): | ||
def setUp(self): | ||
self.chunker = RegexChunker(max_chunk_tokens=10) | ||
|
||
def test_call(self): | ||
docs = ["This is a test. This is only a test."] | ||
chunks_list = self.chunker(docs) | ||
chunks = chunks_list[0] | ||
|
||
self.assertIsInstance(chunks, list) | ||
self.assertTrue(all(isinstance(chunk, Chunk) for chunk in chunks)) | ||
self.assertGreater(len(chunks), 0) | ||
self.assertTrue( | ||
all( | ||
text.tiktoken_length(chunk.content) <= self.chunker.max_chunk_tokens | ||
for chunk in chunks | ||
) | ||
) | ||
|
||
def test_acall(self): | ||
docs = ["This is a test. This is only a test."] | ||
|
||
async def run_test(): | ||
chunks_list = await self.chunker.acall(docs) | ||
chunks = chunks_list[0] | ||
self.assertIsInstance(chunks, list) | ||
self.assertTrue(all(isinstance(chunk, Chunk) for chunk in chunks)) | ||
self.assertGreater(len(chunks), 0) | ||
self.assertTrue( | ||
all( | ||
text.tiktoken_length(chunk.content) <= self.chunker.max_chunk_tokens | ||
for chunk in chunks | ||
) | ||
) | ||
|
||
asyncio.run(run_test()) | ||
|
||
|
||
if __name__ == "__main__": | ||
unittest.main() |
Oops, something went wrong.