Skip to content

Commit

Permalink
use text instead of content in Cohere and Anthropic
Browse files Browse the repository at this point in the history
  • Loading branch information
anakin87 committed Dec 9, 2024
1 parent 1959ab1 commit da7278b
Show file tree
Hide file tree
Showing 5 changed files with 20 additions and 20 deletions.
14 changes: 7 additions & 7 deletions integrations/anthropic/tests/test_chat_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,9 +188,9 @@ def test_default_inference_params(self, chat_messages):

first_reply = replies[0]
assert isinstance(first_reply, ChatMessage), "First reply is not a ChatMessage instance"
assert first_reply.content, "First reply has no content"
assert first_reply.text, "First reply has no text"
assert ChatMessage.is_from(first_reply, ChatRole.ASSISTANT), "First reply is not from the assistant"
assert "paris" in first_reply.content.lower(), "First reply does not contain 'paris'"
assert "paris" in first_reply.text.lower(), "First reply does not contain 'paris'"
assert first_reply.meta, "First reply has no metadata"

@pytest.mark.skipif(
Expand Down Expand Up @@ -221,9 +221,9 @@ def streaming_callback(chunk: StreamingChunk):

first_reply = replies[0]
assert isinstance(first_reply, ChatMessage), "First reply is not a ChatMessage instance"
assert first_reply.content, "First reply has no content"
assert first_reply.text, "First reply has no text"
assert ChatMessage.is_from(first_reply, ChatRole.ASSISTANT), "First reply is not from the assistant"
assert "paris" in first_reply.content.lower(), "First reply does not contain 'paris'"
assert "paris" in first_reply.text.lower(), "First reply does not contain 'paris'"
assert first_reply.meta, "First reply has no metadata"

@pytest.mark.skipif(
Expand Down Expand Up @@ -255,11 +255,11 @@ def test_tools_use(self):

first_reply = replies[0]
assert isinstance(first_reply, ChatMessage), "First reply is not a ChatMessage instance"
assert first_reply.content, "First reply has no content"
assert first_reply.text, "First reply has no text"
assert ChatMessage.is_from(first_reply, ChatRole.ASSISTANT), "First reply is not from the assistant"
assert "get_stock_price" in first_reply.content.lower(), "First reply does not contain get_stock_price"
assert "get_stock_price" in first_reply.text.lower(), "First reply does not contain get_stock_price"
assert first_reply.meta, "First reply has no metadata"
fc_response = json.loads(first_reply.content)
fc_response = json.loads(first_reply.text)
assert "name" in fc_response, "First reply does not contain name of the tool"
assert "input" in fc_response, "First reply does not contain input of the tool"

Expand Down
4 changes: 2 additions & 2 deletions integrations/anthropic/tests/test_vertex_chat_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,9 +188,9 @@ def test_default_inference_params(self, chat_messages):

first_reply = replies[0]
assert isinstance(first_reply, ChatMessage), "First reply is not a ChatMessage instance"
assert first_reply.content, "First reply has no content"
assert first_reply.text, "First reply has no text"
assert ChatMessage.is_from(first_reply, ChatRole.ASSISTANT), "First reply is not from the assistant"
assert "paris" in first_reply.content.lower(), "First reply does not contain 'paris'"
assert "paris" in first_reply.text.lower(), "First reply does not contain 'paris'"
assert first_reply.meta, "First reply has no metadata"

# Anthropic messages API is similar for AnthropicVertex and Anthropic endpoint,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def from_dict(cls, data: Dict[str, Any]) -> "CohereChatGenerator":

def _message_to_dict(self, message: ChatMessage) -> Dict[str, str]:
role = "User" if message.role == ChatRole.USER else "Chatbot"
chat_message = {"user_name": role, "text": message.content}
chat_message = {"user_name": role, "text": message.text}
return chat_message

@component.output_types(replies=List[ChatMessage])
Expand All @@ -157,7 +157,7 @@ def run(self, messages: List[ChatMessage], generation_kwargs: Optional[Dict[str,
chat_history = [self._message_to_dict(m) for m in messages[:-1]]
if self.streaming_callback:
response = self.client.chat_stream(
message=messages[-1].content,
message=messages[-1].text,
model=self.model,
chat_history=chat_history,
**generation_kwargs,
Expand Down Expand Up @@ -190,7 +190,7 @@ def run(self, messages: List[ChatMessage], generation_kwargs: Optional[Dict[str,
)
else:
response = self.client.chat(
message=messages[-1].content,
message=messages[-1].text,
model=self.model,
chat_history=chat_history,
**generation_kwargs,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,4 +67,4 @@ def run(self, prompt: str):
chat_message = ChatMessage.from_user(prompt)
# Note we have to call super() like this because of the way components are dynamically built with the decorator
results = super(CohereGenerator, self).run([chat_message]) # noqa
return {"replies": [results["replies"][0].content], "meta": [results["replies"][0].meta]}
return {"replies": [results["replies"][0].text], "meta": [results["replies"][0].meta]}
14 changes: 7 additions & 7 deletions integrations/cohere/tests/test_cohere_chat_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def test_live_run(self):
results = component.run(chat_messages)
assert len(results["replies"]) == 1
message: ChatMessage = results["replies"][0]
assert "Paris" in message.content
assert "Paris" in message.text
assert "usage" in message.meta
assert "prompt_tokens" in message.meta["usage"]
assert "completion_tokens" in message.meta["usage"]
Expand Down Expand Up @@ -205,7 +205,7 @@ def __call__(self, chunk: StreamingChunk) -> None:

assert len(results["replies"]) == 1
message: ChatMessage = results["replies"][0]
assert "Paris" in message.content
assert "Paris" in message.text

assert message.meta["finish_reason"] == "COMPLETE"

Expand All @@ -227,7 +227,7 @@ def test_live_run_with_connector(self):
results = component.run(chat_messages, generation_kwargs={"connectors": [{"id": "web-search"}]})
assert len(results["replies"]) == 1
message: ChatMessage = results["replies"][0]
assert "Paris" in message.content
assert "Paris" in message.text
assert message.meta["documents"] is not None
assert "citations" in message.meta # Citations might be None

Expand All @@ -253,7 +253,7 @@ def __call__(self, chunk: StreamingChunk) -> None:

assert len(results["replies"]) == 1
message: ChatMessage = results["replies"][0]
assert "Paris" in message.content
assert "Paris" in message.text

assert message.meta["finish_reason"] == "COMPLETE"

Expand Down Expand Up @@ -291,10 +291,10 @@ def test_tools_use(self):

first_reply = replies[0]
assert isinstance(first_reply, ChatMessage), "First reply is not a ChatMessage instance"
assert first_reply.content, "First reply has no content"
assert first_reply.text, "First reply has no text"
assert ChatMessage.is_from(first_reply, ChatRole.ASSISTANT), "First reply is not from the assistant"
assert "get_stock_price" in first_reply.content.lower(), "First reply does not contain get_stock_price"
assert "get_stock_price" in first_reply.text.lower(), "First reply does not contain get_stock_price"
assert first_reply.meta, "First reply has no metadata"
fc_response = json.loads(first_reply.content)
fc_response = json.loads(first_reply.text)
assert "name" in fc_response, "First reply does not contain name of the tool"
assert "parameters" in fc_response, "First reply does not contain parameters of the tool"

0 comments on commit da7278b

Please sign in to comment.