diff --git a/tests/test_client_calls.py b/tests/test_client_calls.py index 26f3d3b23..babb2c471 100644 --- a/tests/test_client_calls.py +++ b/tests/test_client_calls.py @@ -4844,7 +4844,8 @@ def test_max_new_tokens(max_new_tokens, temperature): @wrap_test_forked @pytest.mark.parametrize("base_model", vision_models) @pytest.mark.parametrize("langchain_mode", ['LLM', 'MyData']) -def test_client1_image_qa(langchain_mode, base_model): +@pytest.mark.parametrize("langchain_action", [LangChainAction.QUERY.value, LangChainAction.SUMMARIZE_MAP.value]) +def test_client1_image_qa(langchain_action, langchain_mode, base_model): inference_server = os.getenv('TEST_SERVER', 'https://gpt.h2o.ai') if inference_server == 'https://gpt.h2o.ai': auth_kwargs = dict(auth=('guest', 'guest')) @@ -4872,6 +4873,7 @@ def test_client1_image_qa(langchain_mode, base_model): visible_models=base_model, stream_output=False, langchain_mode=langchain_mode, + langchain_action=langchain_action, h2ogpt_key=h2ogpt_key) try: res = client.predict(str(dict(kwargs)), api_name='/submit_nochat_api') @@ -4897,7 +4899,8 @@ def test_client1_image_qa(langchain_mode, base_model): @wrap_test_forked @pytest.mark.parametrize("base_model", vision_models) @pytest.mark.parametrize("langchain_mode", ['LLM', 'MyData']) -def test_client1_images_qa(langchain_mode, base_model): +@pytest.mark.parametrize("langchain_action", [LangChainAction.QUERY.value, LangChainAction.SUMMARIZE_MAP.value]) +def test_client1_images_qa(langchain_action, langchain_mode, base_model): image_dir = 'pdf_images' makedirs(image_dir) os.system('pdftoppm tests/2403.09629.pdf %s/outputname -jpeg' % image_dir) @@ -4930,6 +4933,7 @@ def test_client1_images_qa(langchain_mode, base_model): visible_models=base_model, stream_output=False, langchain_mode=langchain_mode, + langchain_action=langchain_action, h2ogpt_key=h2ogpt_key) res_dict = client.predict(str(dict(kwargs)), api_name='/submit_nochat_api') response = ast.literal_eval(res_dict)['response']