Skip to content

Commit

Permalink
fix: raise botocore exceptions (#193)
Browse files Browse the repository at this point in the history
Motivation:
Old implementation is swallowing botocore exceptions and translating it
into `ValueError` exception which complicates handling different errors,
some botocore exceptions could be just retried in the user code, whereas
for others the user wants to just trigger alert and re-raise or raise
some specific exception. Raising `ValueError` for all botocore
exceptions just make it impossible.
  • Loading branch information
pavelm10 authored Sep 19, 2024
1 parent abd3b20 commit 653a8b3
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 6 deletions.
2 changes: 1 addition & 1 deletion libs/aws/langchain_aws/chat_models/bedrock_converse.py
Original file line number Diff line number Diff line change
Expand Up @@ -705,7 +705,7 @@ def _messages_to_bedrock(
)
bedrock_messages.append(curr)
else:
raise ValueError()
raise ValueError(f"Unsupported message type {type(msg)}")
return bedrock_messages, bedrock_system


Expand Down
4 changes: 3 additions & 1 deletion libs/aws/langchain_aws/embeddings/bedrock.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import asyncio
import json
import logging
import os
from typing import Any, Dict, List, Optional

Expand Down Expand Up @@ -154,7 +155,8 @@ def _embedding_func(self, text: str) -> List[float]:
return response_body.get("embedding")

except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
logging.error(f"Error raised by inference endpoint: {e}")
raise e

def _normalize_vector(self, embeddings: List[float]) -> List[float]:
"""Normalize the embedding to a unit vector."""
Expand Down
11 changes: 9 additions & 2 deletions libs/aws/langchain_aws/llms/bedrock.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import asyncio
import json
import logging
import os
import warnings
from abc import ABC
Expand Down Expand Up @@ -788,7 +789,10 @@ def _prepare_input_and_invoke(
) = LLMInputOutputAdapter.prepare_output(provider, response).values()

except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
logging.error(f"Error raised by bedrock service: {e}")
if run_manager is not None:
run_manager.on_llm_error(e)
raise e

if stop is not None:
text = enforce_stop_tokens(text, stop)
Expand Down Expand Up @@ -908,7 +912,10 @@ def _prepare_input_and_invoke_stream(
response = self.client.invoke_model_with_response_stream(**request_options)

except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
logging.error(f"Error raised by bedrock service: {e}")
if run_manager is not None:
run_manager.on_llm_error(e)
raise e

for chunk in LLMInputOutputAdapter.prepare_output_stream(
provider,
Expand Down
11 changes: 9 additions & 2 deletions libs/aws/langchain_aws/llms/sagemaker_endpoint.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""Sagemaker InvokeEndpoint API."""

import io
import logging
import re
from abc import abstractmethod
from typing import Any, Dict, Generic, Iterator, List, Mapping, Optional, TypeVar, Union
Expand Down Expand Up @@ -336,7 +337,10 @@ def _stream(
run_manager.on_llm_new_token(chunk.text)

except Exception as e:
raise ValueError(f"Error raised by streaming inference endpoint: {e}")
logging.error(f"Error raised by streaming inference endpoint: {e}")
if run_manager is not None:
run_manager.on_llm_error(e)
raise e

def _call(
self,
Expand Down Expand Up @@ -382,7 +386,10 @@ def _call(
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
logging.error(f"Error raised by inference endpoint: {e}")
if run_manager is not None:
run_manager.on_llm_error(e)
raise e

text = self.content_handler.transform_output(response["Body"])
if stop is not None:
Expand Down

0 comments on commit 653a8b3

Please sign in to comment.