From 2e88f87713dcc67bc5315091fb1f6e4fc7f78c9f Mon Sep 17 00:00:00 2001 From: Nam Vu Date: Mon, 23 Sep 2024 20:32:58 +0700 Subject: [PATCH] fix: typos and improve naming conventions: (#8687) --- api/commands.py | 2 +- .../api_resource/chat/async_completions.py | 2 +- .../api_resource/chat/completions.py | 2 +- .../zhipuai/zhipuai_sdk/core/_base_models.py | 3 +-- .../zhipuai/zhipuai_sdk/core/_http_client.py | 12 +++++----- .../zhipuai_sdk/core/_legacy_response.py | 2 +- .../zhipuai/zhipuai_sdk/core/_response.py | 4 ++-- .../types/knowledge/document/__init__.py | 4 ++-- .../types/knowledge/document/document.py | 6 ++--- .../nodes/end/end_stream_processor.py | 22 +++++++++---------- web/app/components/base/chat/chat/hooks.ts | 4 ++-- .../base/image-uploader/image-preview.tsx | 4 ++-- .../develop/template/template_workflow.en.mdx | 2 +- .../workflow/hooks/use-workflow-run.ts | 6 ++--- 14 files changed, 37 insertions(+), 38 deletions(-) diff --git a/api/commands.py b/api/commands.py index b8fc81af673af..7ef4aed7f7766 100644 --- a/api/commands.py +++ b/api/commands.py @@ -652,7 +652,7 @@ def fix_app_site_missing(): app_was_created.send(app, account=account) except Exception as e: failed_app_ids.append(app_id) - click.echo(click.style("FFailed to fix missing site for app {}".format(app_id), fg="red")) + click.echo(click.style("Failed to fix missing site for app {}".format(app_id), fg="red")) logging.exception(f"Fix app related site missing issue failed, error: {e}") continue diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py index d8ecc310644d1..05510a3ec421d 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py @@ -57,7 +57,7 @@ def create( if temperature <= 0: do_sample = False temperature = 0.01 - # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperture不生效)") # noqa: E501 + # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperature不生效)") # noqa: E501 if temperature >= 1: temperature = 0.99 # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间") diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py index 1c23473a03ae3..8e5bb454e6ce7 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py @@ -60,7 +60,7 @@ def create( if temperature <= 0: do_sample = False temperature = 0.01 - # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperture不生效)") # noqa: E501 + # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperature不生效)") # noqa: E501 if temperature >= 1: temperature = 0.99 # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间") diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py index 5e9a7e0a987e2..6d8ba700b7b1d 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py @@ -630,8 +630,7 @@ def validate_type(*, type_: type[_T], value: object) -> _T: return cast(_T, _validate_non_model_type(type_=type_, value=value)) -# our use of subclasssing here causes weirdness for type checkers, -# so we just pretend that we don't subclass +# Subclassing here confuses type checkers, so we treat this class as non-inheriting. if TYPE_CHECKING: GenericModel = BaseModel else: diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py index d0f933d814138..ffdafb85d581f 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py @@ -169,7 +169,7 @@ def _set_private_attributes( # Pydantic uses a custom `__iter__` method to support casting BaseModels # to dictionaries. e.g. dict(model). # As we want to support `for item in page`, this is inherently incompatible - # with the default pydantic behaviour. It is not possible to support both + # with the default pydantic behavior. It is not possible to support both # use cases at once. Fortunately, this is not a big deal as all other pydantic # methods should continue to work as expected as there is an alternative method # to cast a model to a dictionary, model.dict(), which is used internally @@ -356,16 +356,16 @@ def _build_request(self, options: FinalRequestOptions) -> httpx.Request: **kwargs, ) - def _object_to_formfata(self, key: str, value: Data | Mapping[object, object]) -> list[tuple[str, str]]: + def _object_to_formdata(self, key: str, value: Data | Mapping[object, object]) -> list[tuple[str, str]]: items = [] if isinstance(value, Mapping): for k, v in value.items(): - items.extend(self._object_to_formfata(f"{key}[{k}]", v)) + items.extend(self._object_to_formdata(f"{key}[{k}]", v)) return items if isinstance(value, list | tuple): for v in value: - items.extend(self._object_to_formfata(key + "[]", v)) + items.extend(self._object_to_formdata(key + "[]", v)) return items def _primitive_value_to_str(val) -> str: @@ -385,7 +385,7 @@ def _primitive_value_to_str(val) -> str: return [(key, str_data)] def _make_multipartform(self, data: Mapping[object, object]) -> dict[str, object]: - items = flatten(list(starmap(self._object_to_formfata, data.items()))) + items = flatten(list(starmap(self._object_to_formdata, data.items()))) serialized: dict[str, object] = {} for key, value in items: @@ -620,7 +620,7 @@ def _process_response( stream: bool, stream_cls: type[StreamResponse] | None, ) -> ResponseT: - # _legacy_response with raw_response_header to paser method + # _legacy_response with raw_response_header to parser method if response.request.headers.get(RAW_RESPONSE_HEADER) == "true": return cast( ResponseT, diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py index 47183b9eee9c0..51bf21bcdc17a 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py @@ -87,7 +87,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T: For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. - You can customise the type that the response is parsed into through + You can customize the type that the response is parsed into through the `to` argument, e.g. ```py diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py index 45443da662d57..92e601805569f 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py @@ -252,7 +252,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T: For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. - You can customise the type that the response is parsed into through + You can customize the type that the response is parsed into through the `to` argument, e.g. ```py @@ -363,7 +363,7 @@ class StreamAlreadyConsumed(ZhipuAIError): # noqa: N818 # ^ error ``` - If you want this behaviour you'll need to either manually accumulate the response + If you want this behavior you'll need to either manually accumulate the response content or call `await response.read()` before iterating over the stream. """ diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py index 32e23e6dab307..59cb41d7124a7 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py @@ -1,8 +1,8 @@ -from .document import DocumentData, DocumentFailedInfo, DocumentObject, DocumentSuccessinfo +from .document import DocumentData, DocumentFailedInfo, DocumentObject, DocumentSuccessInfo __all__ = [ "DocumentData", "DocumentObject", - "DocumentSuccessinfo", + "DocumentSuccessInfo", "DocumentFailedInfo", ] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py index b9a1646391ece..980bc6f4a7c40 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py @@ -2,10 +2,10 @@ from ....core import BaseModel -__all__ = ["DocumentData", "DocumentObject", "DocumentSuccessinfo", "DocumentFailedInfo"] +__all__ = ["DocumentData", "DocumentObject", "DocumentSuccessInfo", "DocumentFailedInfo"] -class DocumentSuccessinfo(BaseModel): +class DocumentSuccessInfo(BaseModel): documentId: Optional[str] = None """文件id""" filename: Optional[str] = None @@ -24,7 +24,7 @@ class DocumentFailedInfo(BaseModel): class DocumentObject(BaseModel): """文档信息""" - successInfos: Optional[list[DocumentSuccessinfo]] = None + successInfos: Optional[list[DocumentSuccessInfo]] = None """上传成功的文件信息""" failedInfos: Optional[list[DocumentFailedInfo]] = None """上传失败的文件信息""" diff --git a/api/core/workflow/nodes/end/end_stream_processor.py b/api/core/workflow/nodes/end/end_stream_processor.py index 0366d7965d7c1..1aecf863ac5fb 100644 --- a/api/core/workflow/nodes/end/end_stream_processor.py +++ b/api/core/workflow/nodes/end/end_stream_processor.py @@ -22,8 +22,8 @@ def __init__(self, graph: Graph, variable_pool: VariablePool) -> None: for end_node_id, _ in self.end_stream_param.end_stream_variable_selector_mapping.items(): self.route_position[end_node_id] = 0 self.current_stream_chunk_generating_node_ids: dict[str, list[str]] = {} - self.has_outputed = False - self.outputed_node_ids = set() + self.has_output = False + self.output_node_ids = set() def process(self, generator: Generator[GraphEngineEvent, None, None]) -> Generator[GraphEngineEvent, None, None]: for event in generator: @@ -34,11 +34,11 @@ def process(self, generator: Generator[GraphEngineEvent, None, None]) -> Generat yield event elif isinstance(event, NodeRunStreamChunkEvent): if event.in_iteration_id: - if self.has_outputed and event.node_id not in self.outputed_node_ids: + if self.has_output and event.node_id not in self.output_node_ids: event.chunk_content = "\n" + event.chunk_content - self.outputed_node_ids.add(event.node_id) - self.has_outputed = True + self.output_node_ids.add(event.node_id) + self.has_output = True yield event continue @@ -53,11 +53,11 @@ def process(self, generator: Generator[GraphEngineEvent, None, None]) -> Generat ) if stream_out_end_node_ids: - if self.has_outputed and event.node_id not in self.outputed_node_ids: + if self.has_output and event.node_id not in self.output_node_ids: event.chunk_content = "\n" + event.chunk_content - self.outputed_node_ids.add(event.node_id) - self.has_outputed = True + self.output_node_ids.add(event.node_id) + self.has_output = True yield event elif isinstance(event, NodeRunSucceededEvent): yield event @@ -124,11 +124,11 @@ def _generate_stream_outputs_when_node_finished( if text: current_node_id = value_selector[0] - if self.has_outputed and current_node_id not in self.outputed_node_ids: + if self.has_output and current_node_id not in self.output_node_ids: text = "\n" + text - self.outputed_node_ids.add(current_node_id) - self.has_outputed = True + self.output_node_ids.add(current_node_id) + self.has_output = True yield NodeRunStreamChunkEvent( id=event.id, node_id=event.node_id, diff --git a/web/app/components/base/chat/chat/hooks.ts b/web/app/components/base/chat/chat/hooks.ts index dfb5a1b6855d5..64c238f9d1e56 100644 --- a/web/app/components/base/chat/chat/hooks.ts +++ b/web/app/components/base/chat/chat/hooks.ts @@ -334,9 +334,9 @@ export const useChat = ( const newChatList = produce(chatListRef.current, (draft) => { const index = draft.findIndex(item => item.id === responseItem.id) if (index !== -1) { - const requestion = draft[index - 1] + const question = draft[index - 1] draft[index - 1] = { - ...requestion, + ...question, } draft[index] = { ...draft[index], diff --git a/web/app/components/base/image-uploader/image-preview.tsx b/web/app/components/base/image-uploader/image-preview.tsx index e5bd4c1bbc0dc..096facabfd2a8 100644 --- a/web/app/components/base/image-uploader/image-preview.tsx +++ b/web/app/components/base/image-uploader/image-preview.tsx @@ -88,7 +88,7 @@ const ImagePreview: FC = ({ }) } - const imageTobase64ToBlob = (base64: string, type = 'image/png'): Blob => { + const imageBase64ToBlob = (base64: string, type = 'image/png'): Blob => { const byteCharacters = atob(base64) const byteArrays = [] @@ -109,7 +109,7 @@ const ImagePreview: FC = ({ const shareImage = async () => { try { const base64Data = url.split(',')[1] - const blob = imageTobase64ToBlob(base64Data, 'image/png') + const blob = imageBase64ToBlob(base64Data, 'image/png') await navigator.clipboard.write([ new ClipboardItem({ diff --git a/web/app/components/develop/template/template_workflow.en.mdx b/web/app/components/develop/template/template_workflow.en.mdx index 2bd0fe9daf436..5c712c2c2979e 100644 --- a/web/app/components/develop/template/template_workflow.en.mdx +++ b/web/app/components/develop/template/template_workflow.en.mdx @@ -424,7 +424,7 @@ Workflow applications offers non-session support and is ideal for translation, a /> - Returns worklfow logs, with the first page returning the latest `{limit}` messages, i.e., in reverse order. + Returns workflow logs, with the first page returning the latest `{limit}` messages, i.e., in reverse order. ### Query diff --git a/web/app/components/workflow/hooks/use-workflow-run.ts b/web/app/components/workflow/hooks/use-workflow-run.ts index e1da503f3825f..68c3ff0a4b458 100644 --- a/web/app/components/workflow/hooks/use-workflow-run.ts +++ b/web/app/components/workflow/hooks/use-workflow-run.ts @@ -185,7 +185,7 @@ export const useWorkflowRun = () => { draft.forEach((edge) => { edge.data = { ...edge.data, - _runned: false, + _run: false, } }) }) @@ -292,7 +292,7 @@ export const useWorkflowRun = () => { const newEdges = produce(edges, (draft) => { draft.forEach((edge) => { if (edge.target === data.node_id && incomeNodesId.includes(edge.source)) - edge.data = { ...edge.data, _runned: true } as any + edge.data = { ...edge.data, _run: true } as any }) }) setEdges(newEdges) @@ -416,7 +416,7 @@ export const useWorkflowRun = () => { const edge = draft.find(edge => edge.target === data.node_id && edge.source === prevNodeId) if (edge) - edge.data = { ...edge.data, _runned: true } as any + edge.data = { ...edge.data, _run: true } as any }) setEdges(newEdges)