Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

BUG-285: Improvements in functional tests and set default temperature on LLMs #337

Merged
merged 1 commit into from
Dec 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion aixplain/factories/model_factory/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,13 @@ def create_model_from_response(response: Dict) -> Model:
parameters[param["name"]] = [w["value"] for w in param["values"]]

function = Function(response["function"]["id"])
inputs = []
inputs, temperature = [], None
ModelClass = Model
if function == Function.TEXT_GENERATION:
ModelClass = LLM
f = [p for p in response.get("params", []) if p["name"] == "temperature"]
if len(f) > 0 and len(f[0].get("defaultValues", [])) > 0:
temperature = float(f[0]["defaultValues"][0]["value"])
elif function == Function.UTILITIES:
ModelClass = UtilityModel
inputs = [
Expand Down Expand Up @@ -67,6 +70,7 @@ def create_model_from_response(response: Dict) -> Model:
is_subscribed=True if "subscription" in response else False,
version=response["version"]["id"],
inputs=inputs,
temperature=temperature,
)


Expand Down
14 changes: 8 additions & 6 deletions aixplain/modules/model/llm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ def __init__(
function: Optional[Function] = None,
is_subscribed: bool = False,
cost: Optional[Dict] = None,
temperature: float = 0.001,
**additional_info,
) -> None:
"""LLM Init
Expand Down Expand Up @@ -92,14 +93,15 @@ def __init__(
)
self.url = config.MODELS_RUN_URL
self.backend_url = config.BACKEND_URL
self.temperature = temperature

def run(
self,
data: Text,
context: Optional[Text] = None,
prompt: Optional[Text] = None,
history: Optional[List[Dict]] = None,
temperature: float = 0.001,
temperature: Optional[float] = None,
max_tokens: int = 128,
top_p: float = 1.0,
name: Text = "model_process",
Expand All @@ -114,7 +116,7 @@ def run(
context (Optional[Text], optional): System message. Defaults to None.
prompt (Optional[Text], optional): Prompt Message which comes on the left side of the last utterance. Defaults to None.
history (Optional[List[Dict]], optional): Conversation history in OpenAI format ([{ "role": "assistant", "content": "Hello, world!"}]). Defaults to None.
temperature (float, optional): LLM temperature. Defaults to 0.001.
temperature (Optional[float], optional): LLM temperature. Defaults to None.
max_tokens (int, optional): Maximum Generation Tokens. Defaults to 128.
top_p (float, optional): Top P. Defaults to 1.0.
name (Text, optional): ID given to a call. Defaults to "model_process".
Expand All @@ -135,7 +137,7 @@ def run(
parameters.setdefault("context", context)
parameters.setdefault("prompt", prompt)
parameters.setdefault("history", history)
parameters.setdefault("temperature", temperature)
parameters.setdefault("temperature", temperature if temperature is not None else self.temperature)
parameters.setdefault("max_tokens", max_tokens)
parameters.setdefault("top_p", top_p)

Expand Down Expand Up @@ -173,7 +175,7 @@ def run_async(
context: Optional[Text] = None,
prompt: Optional[Text] = None,
history: Optional[List[Dict]] = None,
temperature: float = 0.001,
temperature: Optional[float] = None,
max_tokens: int = 128,
top_p: float = 1.0,
name: Text = "model_process",
Expand All @@ -186,7 +188,7 @@ def run_async(
context (Optional[Text], optional): System message. Defaults to None.
prompt (Optional[Text], optional): Prompt Message which comes on the left side of the last utterance. Defaults to None.
history (Optional[List[Dict]], optional): Conversation history in OpenAI format ([{ "role": "assistant", "content": "Hello, world!"}]). Defaults to None.
temperature (float, optional): LLM temperature. Defaults to 0.001.
temperature (Optional[float], optional): LLM temperature. Defaults to None.
max_tokens (int, optional): Maximum Generation Tokens. Defaults to 128.
top_p (float, optional): Top P. Defaults to 1.0.
name (Text, optional): ID given to a call. Defaults to "model_process".
Expand All @@ -206,7 +208,7 @@ def run_async(
parameters.setdefault("context", context)
parameters.setdefault("prompt", prompt)
parameters.setdefault("history", history)
parameters.setdefault("temperature", temperature)
parameters.setdefault("temperature", temperature if temperature is not None else self.temperature)
parameters.setdefault("max_tokens", max_tokens)
parameters.setdefault("top_p", top_p)
payload = build_payload(data=data, parameters=parameters)
Expand Down
4 changes: 2 additions & 2 deletions tests/functional/general_assets/asset_functional_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def inputs():


def __get_asset_factory(asset_name):
if asset_name == "model":
if "model" in asset_name:
AssetFactory = ModelFactory
elif asset_name == "dataset":
AssetFactory = DatasetFactory
Expand All @@ -40,7 +40,7 @@ def test_list(asset_name):
assert asset_list["page_total"] == len(asset_list["results"])


@pytest.mark.parametrize("asset_name", ["model", "pipeline", "metric"])
@pytest.mark.parametrize("asset_name", ["model", "model2", "model3", "pipeline", "metric"])
def test_run(inputs, asset_name):
asset_details = inputs[asset_name]
AssetFactory = __get_asset_factory(asset_name)
Expand Down
4 changes: 4 additions & 0 deletions tests/functional/general_assets/data/asset_run_test_data.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@
"id" : "60ddefab8d38c51c5885ee38",
"data": "https://aixplain-platform-assets.s3.amazonaws.com/samples/en/myname.mp3"
},
"model3" : {
"id" : "6736411cf127849667606689",
"data": "How to cook a shrimp risotto?"
},
"pipeline": {
"name": "SingleNodePipeline",
"data": "This is a test sentence."
Expand Down
17 changes: 17 additions & 0 deletions tests/functional/pipelines/run_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,3 +251,20 @@ def test_run_script(version: str):
assert response["status"] == "SUCCESS"
data = response["data"][0]["segments"][0]["response"]
assert data.startswith("SCRIPT MODIFIED:")


@pytest.mark.parametrize("version", ["2.0", "3.0"])
def test_run_text_reconstruction(version: str):
pipeline = PipelineFactory.list(query="Text Reconstruction - DO NOT DELETE")["results"][0]
response = pipeline.run("Segment A\nSegment B\nSegment C", **{"version": version})

assert response["status"] == "SUCCESS"
labels = [d["label"] for d in response["data"]]
assert "Audio (Direct)" in labels
assert "Audio (Text Reconstruction)" in labels
assert "Audio (Audio Reconstruction)" in labels
assert "Text Reconstruction" in labels

for d in response["data"]:
assert len(d["segments"]) > 0
assert d["segments"][0]["success"] is True