Skip to content

Commit

Permalink
added break for recursive summary when number of partial summary does…
Browse files Browse the repository at this point in the history
…n't change
  • Loading branch information
JohannesWesch authored and FelixFehseTNG committed Feb 6, 2024
1 parent 128e8cd commit e15956a
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 8 deletions.
4 changes: 1 addition & 3 deletions src/intelligence_layer/core/chunk.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,7 @@ def __init__(
overlap_length_tokens, max_tokens_per_chunk
)
)
self.chunk_task = ChunkTask(
client, model, overlap_length_tokens // 2
)
self.chunk_task = ChunkTask(client, model, overlap_length_tokens // 2)
self.tokenizer = client.tokenizer(model)
self.max_tokens_per_chunk = max_tokens_per_chunk
self.overlap_length_tokens = overlap_length_tokens
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,20 +27,22 @@ def __init__(
def do_run(
self, input: LongContextSummarizeInput, task_span: TaskSpan
) -> SummarizeOutput:
num_partial_summaries = 0
text = input.text
loop_count = 0
while True:
summarize_output = self.long_context_summarize_task.run(
LongContextSummarizeInput(text=text, language=input.language), task_span
)
if num_partial_summaries == len(summarize_output.partial_summaries):
break
num_partial_summaries = len(summarize_output.partial_summaries)

num_generated_tokens = 0
text = ""
for partial_summary in summarize_output.partial_summaries:
num_generated_tokens += partial_summary.generated_tokens
text += partial_summary.summary + "\n"

loop_count += 1

if len(summarize_output.partial_summaries) == 1:
break

Expand Down
5 changes: 3 additions & 2 deletions tests/core/test_chunk.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,15 @@ def test_overlapped_chunking(
print(first)

assert (
len(first) <= MAX_TOKENS + 2
len(first)
<= MAX_TOKENS + 2
# `+2` because re-tokenizing the chunk can add a few extra tokens at
# the beginning or end of each chunk. This is a hack.
)
next = output_tokenized[chunk_index + 1].tokens

found = False
for offset in range(len(next)-OVERLAP//2):
for offset in range(len(next) - OVERLAP // 2):
if first[-OVERLAP // 2 :] != next[offset : offset + OVERLAP // 2]:
continue
found = True
Expand Down
20 changes: 20 additions & 0 deletions tests/use_cases/summarize/test_recursive_summarize.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,18 @@
from aleph_alpha_client import Client, CompletionRequest, CompletionResponse
from pytest import fixture

from intelligence_layer.connectors.limited_concurrency_client import (
AlephAlphaClientProtocol,
)
from intelligence_layer.core import NoOpTracer
from intelligence_layer.use_cases import (
LongContextHighCompressionSummarize,
LongContextSummarizeInput,
RecursiveSummarize,
)
from intelligence_layer.use_cases.summarize.steerable_long_context_summarize import (
SteerableLongContextSummarize,
)


class RecursiveCountingClient(Client):
Expand Down Expand Up @@ -52,6 +58,20 @@ def test_recursive_summarize_stops_when_hitting_max_tokens(
assert "new orleans" in output.summary.lower()


def test_recursive_summarize_stops_when_num_partial_summaries_stays_same(
client: AlephAlphaClientProtocol,
) -> None:
max_tokens = None
slcs = SteerableLongContextSummarize(
client, model="luminous-base", max_generated_tokens=75, max_tokens_per_chunk=145
)
input = LongContextSummarizeInput(text=short_text, max_tokens=max_tokens)
task = RecursiveSummarize(slcs)
output = task.run(input, NoOpTracer())

assert output.generated_tokens > 145


def test_recursive_summarize_stops_after_one_chunk(
recursive_counting_client: RecursiveCountingClient,
) -> None:
Expand Down

0 comments on commit e15956a

Please sign in to comment.