diff --git a/bigframes/ml/llm.py b/bigframes/ml/llm.py index 3920da6c71..b757a57502 100644 --- a/bigframes/ml/llm.py +++ b/bigframes/ml/llm.py @@ -58,13 +58,17 @@ _GEMINI_1P5_PRO_PREVIEW_ENDPOINT = "gemini-1.5-pro-preview-0514" _GEMINI_1P5_PRO_FLASH_PREVIEW_ENDPOINT = "gemini-1.5-flash-preview-0514" _GEMINI_1P5_PRO_001_ENDPOINT = "gemini-1.5-pro-001" +_GEMINI_1P5_PRO_002_ENDPOINT = "gemini-1.5-pro-002" _GEMINI_1P5_FLASH_001_ENDPOINT = "gemini-1.5-flash-001" +_GEMINI_1P5_FLASH_002_ENDPOINT = "gemini-1.5-flash-002" _GEMINI_ENDPOINTS = ( _GEMINI_PRO_ENDPOINT, _GEMINI_1P5_PRO_PREVIEW_ENDPOINT, _GEMINI_1P5_PRO_FLASH_PREVIEW_ENDPOINT, _GEMINI_1P5_PRO_001_ENDPOINT, + _GEMINI_1P5_PRO_002_ENDPOINT, _GEMINI_1P5_FLASH_001_ENDPOINT, + _GEMINI_1P5_FLASH_002_ENDPOINT, ) _CLAUDE_3_SONNET_ENDPOINT = "claude-3-sonnet" @@ -749,7 +753,7 @@ class GeminiTextGenerator(base.BaseEstimator): Args: model_name (str, Default to "gemini-pro"): - The model for natural language tasks. Accepted values are "gemini-pro", "gemini-1.5-pro-preview-0514", "gemini-1.5-flash-preview-0514", "gemini-1.5-pro-001" and "gemini-1.5-flash-001". Default to "gemini-pro". + The model for natural language tasks. Accepted values are "gemini-pro", "gemini-1.5-pro-preview-0514", "gemini-1.5-flash-preview-0514", "gemini-1.5-pro-001", "gemini-1.5-pro-002", "gemini-1.5-flash-001" and "gemini-1.5-flash-002". Default to "gemini-pro". .. note:: "gemini-1.5-pro-preview-0514" and "gemini-1.5-flash-preview-0514" is subject to the "Pre-GA Offerings Terms" in the General Service Terms section of the @@ -775,7 +779,9 @@ def __init__( "gemini-1.5-pro-preview-0514", "gemini-1.5-flash-preview-0514", "gemini-1.5-pro-001", + "gemini-1.5-pro-002", "gemini-1.5-flash-001", + "gemini-1.5-flash-002", ] = "gemini-pro", session: Optional[bigframes.Session] = None, connection_name: Optional[str] = None, diff --git a/bigframes/ml/loader.py b/bigframes/ml/loader.py index de9681660e..0ebf65b893 100644 --- a/bigframes/ml/loader.py +++ b/bigframes/ml/loader.py @@ -64,7 +64,9 @@ llm._GEMINI_1P5_PRO_PREVIEW_ENDPOINT: llm.GeminiTextGenerator, llm._GEMINI_1P5_PRO_FLASH_PREVIEW_ENDPOINT: llm.GeminiTextGenerator, llm._GEMINI_1P5_PRO_001_ENDPOINT: llm.GeminiTextGenerator, + llm._GEMINI_1P5_PRO_002_ENDPOINT: llm.GeminiTextGenerator, llm._GEMINI_1P5_FLASH_001_ENDPOINT: llm.GeminiTextGenerator, + llm._GEMINI_1P5_FLASH_002_ENDPOINT: llm.GeminiTextGenerator, llm._CLAUDE_3_HAIKU_ENDPOINT: llm.Claude3TextGenerator, llm._CLAUDE_3_SONNET_ENDPOINT: llm.Claude3TextGenerator, llm._CLAUDE_3_5_SONNET_ENDPOINT: llm.Claude3TextGenerator, diff --git a/tests/system/small/ml/test_llm.py b/tests/system/small/ml/test_llm.py index 78fed6b82f..40862b3086 100644 --- a/tests/system/small/ml/test_llm.py +++ b/tests/system/small/ml/test_llm.py @@ -264,7 +264,9 @@ def test_text_embedding_generator_multi_cols_predict_success( "gemini-1.5-pro-preview-0514", "gemini-1.5-flash-preview-0514", "gemini-1.5-pro-001", + "gemini-1.5-pro-002", "gemini-1.5-flash-001", + "gemini-1.5-flash-002", ), ) def test_create_load_gemini_text_generator_model( @@ -292,7 +294,9 @@ def test_create_load_gemini_text_generator_model( "gemini-1.5-pro-preview-0514", "gemini-1.5-flash-preview-0514", "gemini-1.5-pro-001", + "gemini-1.5-pro-002", "gemini-1.5-flash-001", + "gemini-1.5-flash-002", ), ) @pytest.mark.flaky(retries=2) @@ -315,7 +319,9 @@ def test_gemini_text_generator_predict_default_params_success( "gemini-1.5-pro-preview-0514", "gemini-1.5-flash-preview-0514", "gemini-1.5-pro-001", + "gemini-1.5-pro-002", "gemini-1.5-flash-001", + "gemini-1.5-flash-002", ), ) @pytest.mark.flaky(retries=2) @@ -340,7 +346,9 @@ def test_gemini_text_generator_predict_with_params_success( "gemini-1.5-pro-preview-0514", "gemini-1.5-flash-preview-0514", "gemini-1.5-pro-001", + "gemini-1.5-pro-002", "gemini-1.5-flash-001", + "gemini-1.5-flash-002", ), ) @pytest.mark.flaky(retries=2)