Skip to content

Commit

Permalink
Fix/app runner typo (langgenius#2661)
Browse files Browse the repository at this point in the history
  • Loading branch information
kingo233 authored Mar 4, 2024
1 parent ee8ae23 commit 51618fa
Show file tree
Hide file tree
Showing 4 changed files with 4 additions and 4 deletions.
2 changes: 1 addition & 1 deletion api/core/app_runner/app_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def get_pre_calculate_rest_tokens(self, app_record: App,

return rest_tokens

def recale_llm_max_tokens(self, model_config: ModelConfigEntity,
def recalc_llm_max_tokens(self, model_config: ModelConfigEntity,
prompt_messages: list[PromptMessage]):
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
model_type_instance = model_config.provider_model_bundle.model_type_instance
Expand Down
2 changes: 1 addition & 1 deletion api/core/app_runner/basic_app_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def run(self, application_generate_entity: ApplicationGenerateEntity,
return

# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
self.recale_llm_max_tokens(
self.recalc_llm_max_tokens(
model_config=app_orchestration_config.model_config,
prompt_messages=prompt_messages
)
Expand Down
2 changes: 1 addition & 1 deletion api/core/features/assistant_cot_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage):
)

# recale llm max tokens
self.recale_llm_max_tokens(self.model_config, prompt_messages)
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model
chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm(
prompt_messages=prompt_messages,
Expand Down
2 changes: 1 addition & 1 deletion api/core/features/assistant_fc_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage):
)

# recale llm max tokens
self.recale_llm_max_tokens(self.model_config, prompt_messages)
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
prompt_messages=prompt_messages,
Expand Down

0 comments on commit 51618fa

Please sign in to comment.