diff --git a/libs/aws/langchain_aws/chat_models/bedrock_converse.py b/libs/aws/langchain_aws/chat_models/bedrock_converse.py index ff8a3e26..c478b4e6 100644 --- a/libs/aws/langchain_aws/chat_models/bedrock_converse.py +++ b/libs/aws/langchain_aws/chat_models/bedrock_converse.py @@ -649,7 +649,7 @@ def _messages_to_bedrock( ) bedrock_messages.append(curr) else: - raise ValueError() + raise ValueError(f"Unsupported message type {type(msg)}") return bedrock_messages, bedrock_system diff --git a/libs/aws/langchain_aws/embeddings/bedrock.py b/libs/aws/langchain_aws/embeddings/bedrock.py index fe5c9675..f0c009a6 100644 --- a/libs/aws/langchain_aws/embeddings/bedrock.py +++ b/libs/aws/langchain_aws/embeddings/bedrock.py @@ -1,5 +1,6 @@ import asyncio import json +import logging import os from typing import Any, Dict, List, Optional @@ -153,7 +154,8 @@ def _embedding_func(self, text: str) -> List[float]: return response_body.get("embedding") except Exception as e: - raise ValueError(f"Error raised by inference endpoint: {e}") + logging.error(f"Error raised by inference endpoint: {e}") + raise e def _normalize_vector(self, embeddings: List[float]) -> List[float]: """Normalize the embedding to a unit vector.""" diff --git a/libs/aws/langchain_aws/llms/bedrock.py b/libs/aws/langchain_aws/llms/bedrock.py index 172688ec..b35126cc 100644 --- a/libs/aws/langchain_aws/llms/bedrock.py +++ b/libs/aws/langchain_aws/llms/bedrock.py @@ -1,5 +1,6 @@ import asyncio import json +import logging import os import warnings from abc import ABC @@ -734,7 +735,10 @@ def _prepare_input_and_invoke( ) = LLMInputOutputAdapter.prepare_output(provider, response).values() except Exception as e: - raise ValueError(f"Error raised by bedrock service: {e}") + logging.error(f"Error raised by bedrock service: {e}") + if run_manager is not None: + run_manager.on_llm_error(e) + raise e if stop is not None: text = enforce_stop_tokens(text, stop) @@ -854,7 +858,10 @@ def _prepare_input_and_invoke_stream( response = self.client.invoke_model_with_response_stream(**request_options) except Exception as e: - raise ValueError(f"Error raised by bedrock service: {e}") + logging.error(f"Error raised by bedrock service: {e}") + if run_manager is not None: + run_manager.on_llm_error(e) + raise e for chunk in LLMInputOutputAdapter.prepare_output_stream( provider, diff --git a/libs/aws/langchain_aws/llms/sagemaker_endpoint.py b/libs/aws/langchain_aws/llms/sagemaker_endpoint.py index c8706669..202aee71 100644 --- a/libs/aws/langchain_aws/llms/sagemaker_endpoint.py +++ b/libs/aws/langchain_aws/llms/sagemaker_endpoint.py @@ -1,6 +1,7 @@ """Sagemaker InvokeEndpoint API.""" import io +import logging import re from abc import abstractmethod from typing import Any, Dict, Generic, Iterator, List, Mapping, Optional, TypeVar, Union @@ -338,7 +339,10 @@ def _stream( run_manager.on_llm_new_token(chunk.text) except Exception as e: - raise ValueError(f"Error raised by streaming inference endpoint: {e}") + logging.error(f"Error raised by streaming inference endpoint: {e}") + if run_manager is not None: + run_manager.on_llm_error(e) + raise e def _call( self, @@ -384,7 +388,10 @@ def _call( **_endpoint_kwargs, ) except Exception as e: - raise ValueError(f"Error raised by inference endpoint: {e}") + logging.error(f"Error raised by inference endpoint: {e}") + if run_manager is not None: + run_manager.on_llm_error(e) + raise e text = self.content_handler.transform_output(response["Body"]) if stop is not None: