Skip to content

Commit

Permalink
IL-239 add overlap_length_tokens to SteerableLongContextSummarize
Browse files Browse the repository at this point in the history
  • Loading branch information
FelixFehseTNG committed Feb 5, 2024
1 parent e9f8cb6 commit fdffb7a
Showing 1 changed file with 14 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

from intelligence_layer.connectors import AlephAlphaClientProtocol
from intelligence_layer.core import ChunkInput, ChunkTask, Task, TaskSpan
from intelligence_layer.core.chunk import ChunkOutput, ChunkOverlapTask
from intelligence_layer.core.detect_language import Language
from intelligence_layer.use_cases.summarize.steerable_single_chunk_summarize import (
SteerableSingleChunkSummarize,
Expand Down Expand Up @@ -43,16 +44,26 @@ def __init__(
client: AlephAlphaClientProtocol,
max_generated_tokens: int,
max_tokens_per_chunk: int,
overlap_length_tokens: int = 0,
model: str = "luminous-base-control",
instruction_configs: Mapping[Language, str] = INSTRUCTION_CONFIGS,
) -> None:
super().__init__()
self._summarize = SteerableSingleChunkSummarize(
client, model, max_generated_tokens, instruction_configs
)
self._chunk_task = ChunkTask(
client, model=model, max_tokens_per_chunk=max_tokens_per_chunk
)
self._chunk_task: Task[ChunkInput, ChunkOutput]
if overlap_length_tokens == 0:
self._chunk_task = ChunkTask(
client, model=model, max_tokens_per_chunk=max_tokens_per_chunk
)
else:
self._chunk_task = ChunkOverlapTask(
client,
model=model,
max_tokens_per_chunk=max_tokens_per_chunk,
overlap_length_tokens=overlap_length_tokens,
)

def do_run(
self, input: LongContextSummarizeInput, task_span: TaskSpan
Expand Down

0 comments on commit fdffb7a

Please sign in to comment.