Skip to content

Commit

Permalink
fix: typos and improve naming conventions: (langgenius#8687)
Browse files Browse the repository at this point in the history
  • Loading branch information
ZuzooVn authored and JunXu01 committed Nov 9, 2024
1 parent aa3d032 commit 2e88f87
Show file tree
Hide file tree
Showing 14 changed files with 37 additions and 38 deletions.
2 changes: 1 addition & 1 deletion api/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -652,7 +652,7 @@ def fix_app_site_missing():
app_was_created.send(app, account=account)
except Exception as e:
failed_app_ids.append(app_id)
click.echo(click.style("FFailed to fix missing site for app {}".format(app_id), fg="red"))
click.echo(click.style("Failed to fix missing site for app {}".format(app_id), fg="red"))
logging.exception(f"Fix app related site missing issue failed, error: {e}")
continue

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def create(
if temperature <= 0:
do_sample = False
temperature = 0.01
# logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperture不生效)") # noqa: E501
# logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperature不生效)") # noqa: E501
if temperature >= 1:
temperature = 0.99
# logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def create(
if temperature <= 0:
do_sample = False
temperature = 0.01
# logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperture不生效)") # noqa: E501
# logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperature不生效)") # noqa: E501
if temperature >= 1:
temperature = 0.99
# logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -630,8 +630,7 @@ def validate_type(*, type_: type[_T], value: object) -> _T:
return cast(_T, _validate_non_model_type(type_=type_, value=value))


# our use of subclasssing here causes weirdness for type checkers,
# so we just pretend that we don't subclass
# Subclassing here confuses type checkers, so we treat this class as non-inheriting.
if TYPE_CHECKING:
GenericModel = BaseModel
else:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def _set_private_attributes(
# Pydantic uses a custom `__iter__` method to support casting BaseModels
# to dictionaries. e.g. dict(model).
# As we want to support `for item in page`, this is inherently incompatible
# with the default pydantic behaviour. It is not possible to support both
# with the default pydantic behavior. It is not possible to support both
# use cases at once. Fortunately, this is not a big deal as all other pydantic
# methods should continue to work as expected as there is an alternative method
# to cast a model to a dictionary, model.dict(), which is used internally
Expand Down Expand Up @@ -356,16 +356,16 @@ def _build_request(self, options: FinalRequestOptions) -> httpx.Request:
**kwargs,
)

def _object_to_formfata(self, key: str, value: Data | Mapping[object, object]) -> list[tuple[str, str]]:
def _object_to_formdata(self, key: str, value: Data | Mapping[object, object]) -> list[tuple[str, str]]:
items = []

if isinstance(value, Mapping):
for k, v in value.items():
items.extend(self._object_to_formfata(f"{key}[{k}]", v))
items.extend(self._object_to_formdata(f"{key}[{k}]", v))
return items
if isinstance(value, list | tuple):
for v in value:
items.extend(self._object_to_formfata(key + "[]", v))
items.extend(self._object_to_formdata(key + "[]", v))
return items

def _primitive_value_to_str(val) -> str:
Expand All @@ -385,7 +385,7 @@ def _primitive_value_to_str(val) -> str:
return [(key, str_data)]

def _make_multipartform(self, data: Mapping[object, object]) -> dict[str, object]:
items = flatten(list(starmap(self._object_to_formfata, data.items())))
items = flatten(list(starmap(self._object_to_formdata, data.items())))

serialized: dict[str, object] = {}
for key, value in items:
Expand Down Expand Up @@ -620,7 +620,7 @@ def _process_response(
stream: bool,
stream_cls: type[StreamResponse] | None,
) -> ResponseT:
# _legacy_response with raw_response_header to paser method
# _legacy_response with raw_response_header to parser method
if response.request.headers.get(RAW_RESPONSE_HEADER) == "true":
return cast(
ResponseT,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T:
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
You can customise the type that the response is parsed into through
You can customize the type that the response is parsed into through
the `to` argument, e.g.
```py
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T:
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
You can customise the type that the response is parsed into through
You can customize the type that the response is parsed into through
the `to` argument, e.g.
```py
Expand Down Expand Up @@ -363,7 +363,7 @@ class StreamAlreadyConsumed(ZhipuAIError): # noqa: N818
# ^ error
```
If you want this behaviour you'll need to either manually accumulate the response
If you want this behavior you'll need to either manually accumulate the response
content or call `await response.read()` before iterating over the stream.
"""

Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from .document import DocumentData, DocumentFailedInfo, DocumentObject, DocumentSuccessinfo
from .document import DocumentData, DocumentFailedInfo, DocumentObject, DocumentSuccessInfo

__all__ = [
"DocumentData",
"DocumentObject",
"DocumentSuccessinfo",
"DocumentSuccessInfo",
"DocumentFailedInfo",
]
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

from ....core import BaseModel

__all__ = ["DocumentData", "DocumentObject", "DocumentSuccessinfo", "DocumentFailedInfo"]
__all__ = ["DocumentData", "DocumentObject", "DocumentSuccessInfo", "DocumentFailedInfo"]


class DocumentSuccessinfo(BaseModel):
class DocumentSuccessInfo(BaseModel):
documentId: Optional[str] = None
"""文件id"""
filename: Optional[str] = None
Expand All @@ -24,7 +24,7 @@ class DocumentFailedInfo(BaseModel):
class DocumentObject(BaseModel):
"""文档信息"""

successInfos: Optional[list[DocumentSuccessinfo]] = None
successInfos: Optional[list[DocumentSuccessInfo]] = None
"""上传成功的文件信息"""
failedInfos: Optional[list[DocumentFailedInfo]] = None
"""上传失败的文件信息"""
Expand Down
22 changes: 11 additions & 11 deletions api/core/workflow/nodes/end/end_stream_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ def __init__(self, graph: Graph, variable_pool: VariablePool) -> None:
for end_node_id, _ in self.end_stream_param.end_stream_variable_selector_mapping.items():
self.route_position[end_node_id] = 0
self.current_stream_chunk_generating_node_ids: dict[str, list[str]] = {}
self.has_outputed = False
self.outputed_node_ids = set()
self.has_output = False
self.output_node_ids = set()

def process(self, generator: Generator[GraphEngineEvent, None, None]) -> Generator[GraphEngineEvent, None, None]:
for event in generator:
Expand All @@ -34,11 +34,11 @@ def process(self, generator: Generator[GraphEngineEvent, None, None]) -> Generat
yield event
elif isinstance(event, NodeRunStreamChunkEvent):
if event.in_iteration_id:
if self.has_outputed and event.node_id not in self.outputed_node_ids:
if self.has_output and event.node_id not in self.output_node_ids:
event.chunk_content = "\n" + event.chunk_content

self.outputed_node_ids.add(event.node_id)
self.has_outputed = True
self.output_node_ids.add(event.node_id)
self.has_output = True
yield event
continue

Expand All @@ -53,11 +53,11 @@ def process(self, generator: Generator[GraphEngineEvent, None, None]) -> Generat
)

if stream_out_end_node_ids:
if self.has_outputed and event.node_id not in self.outputed_node_ids:
if self.has_output and event.node_id not in self.output_node_ids:
event.chunk_content = "\n" + event.chunk_content

self.outputed_node_ids.add(event.node_id)
self.has_outputed = True
self.output_node_ids.add(event.node_id)
self.has_output = True
yield event
elif isinstance(event, NodeRunSucceededEvent):
yield event
Expand Down Expand Up @@ -124,11 +124,11 @@ def _generate_stream_outputs_when_node_finished(

if text:
current_node_id = value_selector[0]
if self.has_outputed and current_node_id not in self.outputed_node_ids:
if self.has_output and current_node_id not in self.output_node_ids:
text = "\n" + text

self.outputed_node_ids.add(current_node_id)
self.has_outputed = True
self.output_node_ids.add(current_node_id)
self.has_output = True
yield NodeRunStreamChunkEvent(
id=event.id,
node_id=event.node_id,
Expand Down
4 changes: 2 additions & 2 deletions web/app/components/base/chat/chat/hooks.ts
Original file line number Diff line number Diff line change
Expand Up @@ -334,9 +334,9 @@ export const useChat = (
const newChatList = produce(chatListRef.current, (draft) => {
const index = draft.findIndex(item => item.id === responseItem.id)
if (index !== -1) {
const requestion = draft[index - 1]
const question = draft[index - 1]
draft[index - 1] = {
...requestion,
...question,
}
draft[index] = {
...draft[index],
Expand Down
4 changes: 2 additions & 2 deletions web/app/components/base/image-uploader/image-preview.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ const ImagePreview: FC<ImagePreviewProps> = ({
})
}

const imageTobase64ToBlob = (base64: string, type = 'image/png'): Blob => {
const imageBase64ToBlob = (base64: string, type = 'image/png'): Blob => {
const byteCharacters = atob(base64)
const byteArrays = []

Expand All @@ -109,7 +109,7 @@ const ImagePreview: FC<ImagePreviewProps> = ({
const shareImage = async () => {
try {
const base64Data = url.split(',')[1]
const blob = imageTobase64ToBlob(base64Data, 'image/png')
const blob = imageBase64ToBlob(base64Data, 'image/png')

await navigator.clipboard.write([
new ClipboardItem({
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ Workflow applications offers non-session support and is ideal for translation, a
/>
<Row>
<Col>
Returns worklfow logs, with the first page returning the latest `{limit}` messages, i.e., in reverse order.
Returns workflow logs, with the first page returning the latest `{limit}` messages, i.e., in reverse order.

### Query

Expand Down
6 changes: 3 additions & 3 deletions web/app/components/workflow/hooks/use-workflow-run.ts
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ export const useWorkflowRun = () => {
draft.forEach((edge) => {
edge.data = {
...edge.data,
_runned: false,
_run: false,
}
})
})
Expand Down Expand Up @@ -292,7 +292,7 @@ export const useWorkflowRun = () => {
const newEdges = produce(edges, (draft) => {
draft.forEach((edge) => {
if (edge.target === data.node_id && incomeNodesId.includes(edge.source))
edge.data = { ...edge.data, _runned: true } as any
edge.data = { ...edge.data, _run: true } as any
})
})
setEdges(newEdges)
Expand Down Expand Up @@ -416,7 +416,7 @@ export const useWorkflowRun = () => {
const edge = draft.find(edge => edge.target === data.node_id && edge.source === prevNodeId)

if (edge)
edge.data = { ...edge.data, _runned: true } as any
edge.data = { ...edge.data, _run: true } as any
})
setEdges(newEdges)

Expand Down

0 comments on commit 2e88f87

Please sign in to comment.