From 10b43e4fe4c28ee6eb03daf8c57100fd1b44a053 Mon Sep 17 00:00:00 2001 From: Emanuel Burgess Date: Sat, 3 Aug 2024 13:22:20 -0400 Subject: [PATCH 1/7] gradio additions --- ai-ml/gemini-chatbot-app/gradio/.gcloudignore | 1 + ai-ml/gemini-chatbot-app/gradio/.gitignore | 6 +++ ai-ml/gemini-chatbot-app/gradio/Procfile | 1 + ai-ml/gemini-chatbot-app/gradio/README.md | 53 +++++++++++++++++++ ai-ml/gemini-chatbot-app/gradio/deploy.sh | 12 +++++ ai-ml/gemini-chatbot-app/gradio/gradio_app.py | 41 ++++++++++++++ ai-ml/gemini-chatbot-app/gradio/llm.py | 46 ++++++++++++++++ .../gradio/requirements.txt | 3 ++ 8 files changed, 163 insertions(+) create mode 100644 ai-ml/gemini-chatbot-app/gradio/.gcloudignore create mode 100644 ai-ml/gemini-chatbot-app/gradio/.gitignore create mode 100644 ai-ml/gemini-chatbot-app/gradio/Procfile create mode 100644 ai-ml/gemini-chatbot-app/gradio/README.md create mode 100644 ai-ml/gemini-chatbot-app/gradio/deploy.sh create mode 100644 ai-ml/gemini-chatbot-app/gradio/gradio_app.py create mode 100644 ai-ml/gemini-chatbot-app/gradio/llm.py create mode 100644 ai-ml/gemini-chatbot-app/gradio/requirements.txt diff --git a/ai-ml/gemini-chatbot-app/gradio/.gcloudignore b/ai-ml/gemini-chatbot-app/gradio/.gcloudignore new file mode 100644 index 0000000..c18dd8d --- /dev/null +++ b/ai-ml/gemini-chatbot-app/gradio/.gcloudignore @@ -0,0 +1 @@ +__pycache__/ diff --git a/ai-ml/gemini-chatbot-app/gradio/.gitignore b/ai-ml/gemini-chatbot-app/gradio/.gitignore new file mode 100644 index 0000000..2efa028 --- /dev/null +++ b/ai-ml/gemini-chatbot-app/gradio/.gitignore @@ -0,0 +1,6 @@ +# virtual env +.venv + +# Cache +__pycache__/ +.ipynb_checkpoints/ \ No newline at end of file diff --git a/ai-ml/gemini-chatbot-app/gradio/Procfile b/ai-ml/gemini-chatbot-app/gradio/Procfile new file mode 100644 index 0000000..8a76310 --- /dev/null +++ b/ai-ml/gemini-chatbot-app/gradio/Procfile @@ -0,0 +1 @@ +web: python3 gradio_app.py --server.port=8080 --server.address=0.0.0.0 --server.enableCORS=false --browser.gatherUsageStats=false \ No newline at end of file diff --git a/ai-ml/gemini-chatbot-app/gradio/README.md b/ai-ml/gemini-chatbot-app/gradio/README.md new file mode 100644 index 0000000..1860dbc --- /dev/null +++ b/ai-ml/gemini-chatbot-app/gradio/README.md @@ -0,0 +1,53 @@ +# Chatbot with Gemini Flash 1.5 + +Goal: To create a simple chat bot using following resoruces + +* Gemini Flash 1.5: A powerful large language model from Google AI and also a Lightweight, fast and cost-efficient AI model. +* Gradio: An open-source Python framework for data scientists and AI/ML engineers for building interactive data apps. +* Cloud Run: A fully managed platform that enables you to run your code directly on top of Google's scalable infrastructure. + + +## Prerequisites + +* Google Chrome (browser) +* Google Cloud Project + +For development environments or IDEs, the recommended option is to use Google Cloud Shell, a simple and convenient tool. Alternatively, you can set up local access to your project using the gcloud CLI and the latest version of Python. + +If you are using + +## How to run? + +To run the app, you can the following code in code in this repo. +``` +python3 -m venv .venv +source .venv/bin/activate +python3 -m pip install -r requirements.txt + +python3 run app.py +``` + +## Folder structure + +Here is the folder structure of the repository. +``` + +├── Procfile +├── requirements.txt +├── gradio_app.py +├── LICENSE +└── README.md +``` + + +* `deploy.sh` for deploying your code to Cloud Run +* `Procfile` has configuration for deploying the Gradio application in Cloud Run. +* `requirements.txt` has required packages for this applicaiton. +* `gradio_app.py` has the Gradio application code. +* `llm.py` contains the application code found in https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/generative_ai/inference + + +### Chatbot Permission + +* To access Gemini Flash 1.5 AI Model from your local environment, you may need to setup [Application Default Credentials](https://cloud.google.com/docs/authentication/provide-credentials-adc). +* To access Gemini Flash 1.5 AI Model from your Cloud Run instance, you may need to update Cloud Run Serivce Account IAM permissions. \ No newline at end of file diff --git a/ai-ml/gemini-chatbot-app/gradio/deploy.sh b/ai-ml/gemini-chatbot-app/gradio/deploy.sh new file mode 100644 index 0000000..253f701 --- /dev/null +++ b/ai-ml/gemini-chatbot-app/gradio/deploy.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# Purpose: To deploy the App to Cloud Run. + +# Google Cloud Project +PROJECT=emanuel-ai + +# Google Cloud Region +LOCATION=us-east1 + +# Depolying app from source code +gcloud run deploy gradio-app --source=. --port=8080 --region=$LOCATION --project=$PROJECT --allow-unauthenticated \ No newline at end of file diff --git a/ai-ml/gemini-chatbot-app/gradio/gradio_app.py b/ai-ml/gemini-chatbot-app/gradio/gradio_app.py new file mode 100644 index 0000000..ec99bdd --- /dev/null +++ b/ai-ml/gemini-chatbot-app/gradio/gradio_app.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# +# Copyright 2024 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +import gradio as gr +import llm + +client = llm.get_chat_client(llm.PROJECT_ID, llm.LOCATION) + +def predict(message, history): + history_openai_format = [] + for human, assistant in history: + history_openai_format.append({"role": "user", "content": human }) + history_openai_format.append({"role": "assistant", "content":assistant}) + history_openai_format.append({"role": "user", "content": message}) + + response = client.chat.completions.create(model=llm.MODEL_NAME, + messages= history_openai_format, + temperature = 1.0, + stream=True) + + partial_message = "" + for chunk in response: + if chunk.choices[0].delta.content is not None: + partial_message = partial_message + chunk.choices[0].delta.content + yield partial_message + +gr.ChatInterface(predict).launch(share=True) \ No newline at end of file diff --git a/ai-ml/gemini-chatbot-app/gradio/llm.py b/ai-ml/gemini-chatbot-app/gradio/llm.py new file mode 100644 index 0000000..031d2d2 --- /dev/null +++ b/ai-ml/gemini-chatbot-app/gradio/llm.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# +# Copyright 2024 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +import vertexai + +from google.auth import default, transport + + +# TODO(Developer): Update project name +MODEL_NAME = "google/gemini-1.5-flash-001" +PROJECT_ID = "emanuel-ai" +LOCATION = "us-central1" + + +def get_chat_client(project_id, location): + # Initialize vertexai + vertexai.init(project=project_id, location=location) + + # Programmatically get an access token + credentials, _ = default(scopes=["https://www.googleapis.com/auth/cloud-platform"]) + auth_request = transport.requests.Request() + credentials.refresh(auth_request) + + # OpenAI client for Gemini-Flash-1.5 + client = openai.OpenAI( + base_url=f"https://{location}-aiplatform.googleapis.com/v1beta1/projects/{project_id}/locations/{location}/endpoints/openapi", + api_key=credentials.token, + ) + return client + +def main(): + return get_chat_client(PROJECT_ID, LOCATION) \ No newline at end of file diff --git a/ai-ml/gemini-chatbot-app/gradio/requirements.txt b/ai-ml/gemini-chatbot-app/gradio/requirements.txt new file mode 100644 index 0000000..7a9e165 --- /dev/null +++ b/ai-ml/gemini-chatbot-app/gradio/requirements.txt @@ -0,0 +1,3 @@ +openai +google-cloud-aiplatform +gradio \ No newline at end of file From ee9b0ede2fe6237cbea6f98a60b6d28884fd213b Mon Sep 17 00:00:00 2001 From: EmanuelB25 Date: Sat, 3 Aug 2024 18:40:48 +0000 Subject: [PATCH 2/7] updating dockerfile and app --- ai-ml/gemini-chatbot-app/gradio/Dockerfile | 18 ++++++++++++++++++ ai-ml/gemini-chatbot-app/gradio/Procfile | 1 - ai-ml/gemini-chatbot-app/gradio/gradio_app.py | 7 ++++++- 3 files changed, 24 insertions(+), 2 deletions(-) create mode 100644 ai-ml/gemini-chatbot-app/gradio/Dockerfile delete mode 100644 ai-ml/gemini-chatbot-app/gradio/Procfile diff --git a/ai-ml/gemini-chatbot-app/gradio/Dockerfile b/ai-ml/gemini-chatbot-app/gradio/Dockerfile new file mode 100644 index 0000000..f99be40 --- /dev/null +++ b/ai-ml/gemini-chatbot-app/gradio/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.11-slim + +WORKDIR /usr/src/app + +ARG GRADIO_SERVER_PORT=8080 + +COPY requirements.txt ./ + +COPY gradio_app.py ./ + +RUN pip3 install -r requirements.txt + +# Dirty fix for https://github.com/gradio-app/gradio/pull/7707 regression +RUN sed -i 's/except (ConnectionError, httpx.ConnectError)/except (ConnectionError, httpx.ConnectError, httpx.TimeoutException)/g' /usr/local/lib/python3.11/site-packages/gradio/networking.py + +COPY . . + +CMD ["python3", "./gradio_app.py"] diff --git a/ai-ml/gemini-chatbot-app/gradio/Procfile b/ai-ml/gemini-chatbot-app/gradio/Procfile deleted file mode 100644 index 8a76310..0000000 --- a/ai-ml/gemini-chatbot-app/gradio/Procfile +++ /dev/null @@ -1 +0,0 @@ -web: python3 gradio_app.py --server.port=8080 --server.address=0.0.0.0 --server.enableCORS=false --browser.gatherUsageStats=false \ No newline at end of file diff --git a/ai-ml/gemini-chatbot-app/gradio/gradio_app.py b/ai-ml/gemini-chatbot-app/gradio/gradio_app.py index ec99bdd..0e63e74 100644 --- a/ai-ml/gemini-chatbot-app/gradio/gradio_app.py +++ b/ai-ml/gemini-chatbot-app/gradio/gradio_app.py @@ -17,7 +17,9 @@ import openai import gradio as gr import llm +import os +K_SERVICE = os.getenv("K_SERVICE", "local") client = llm.get_chat_client(llm.PROJECT_ID, llm.LOCATION) def predict(message, history): @@ -38,4 +40,7 @@ def predict(message, history): partial_message = partial_message + chunk.choices[0].delta.content yield partial_message -gr.ChatInterface(predict).launch(share=True) \ No newline at end of file +gr.ChatInterface(predict).launch( + share=(False if K_SERVICE == "local" else True), + server_name="0.0.0.0", + server_port=8080) From 8f16188778ec1f011ddd4432925d09bf572d07df Mon Sep 17 00:00:00 2001 From: EmanuelB25 Date: Sat, 3 Aug 2024 19:08:13 +0000 Subject: [PATCH 3/7] update llm --- ai-ml/gemini-chatbot-app/gradio/llm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ai-ml/gemini-chatbot-app/gradio/llm.py b/ai-ml/gemini-chatbot-app/gradio/llm.py index 031d2d2..7180e2f 100644 --- a/ai-ml/gemini-chatbot-app/gradio/llm.py +++ b/ai-ml/gemini-chatbot-app/gradio/llm.py @@ -22,7 +22,7 @@ # TODO(Developer): Update project name MODEL_NAME = "google/gemini-1.5-flash-001" -PROJECT_ID = "emanuel-ai" +PROJECT_ID = "your-project-id" LOCATION = "us-central1" @@ -43,4 +43,4 @@ def get_chat_client(project_id, location): return client def main(): - return get_chat_client(PROJECT_ID, LOCATION) \ No newline at end of file + return get_chat_client(PROJECT_ID, LOCATION) From 1fc6b0502d7275480eb1e2e1f3a0f92277fadb89 Mon Sep 17 00:00:00 2001 From: EmanuelB25 <140216796+EmanuelB25@users.noreply.github.com> Date: Mon, 5 Aug 2024 08:22:59 -0400 Subject: [PATCH 4/7] Update README.md readme update --- ai-ml/gemini-chatbot-app/gradio/README.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ai-ml/gemini-chatbot-app/gradio/README.md b/ai-ml/gemini-chatbot-app/gradio/README.md index 1860dbc..bda7323 100644 --- a/ai-ml/gemini-chatbot-app/gradio/README.md +++ b/ai-ml/gemini-chatbot-app/gradio/README.md @@ -24,7 +24,7 @@ python3 -m venv .venv source .venv/bin/activate python3 -m pip install -r requirements.txt -python3 run app.py +python3 run gradio_app.py ``` ## Folder structure @@ -32,22 +32,24 @@ python3 run app.py Here is the folder structure of the repository. ``` -├── Procfile +├── Dockerfile ├── requirements.txt ├── gradio_app.py +├── deploy.sh ├── LICENSE └── README.md ``` * `deploy.sh` for deploying your code to Cloud Run -* `Procfile` has configuration for deploying the Gradio application in Cloud Run. +* `Dockerfile` Docker container configuration for deploying the Gradio application in Cloud Run. * `requirements.txt` has required packages for this applicaiton. * `gradio_app.py` has the Gradio application code. +* `deploy.sh` deployment script * `llm.py` contains the application code found in https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/generative_ai/inference ### Chatbot Permission * To access Gemini Flash 1.5 AI Model from your local environment, you may need to setup [Application Default Credentials](https://cloud.google.com/docs/authentication/provide-credentials-adc). -* To access Gemini Flash 1.5 AI Model from your Cloud Run instance, you may need to update Cloud Run Serivce Account IAM permissions. \ No newline at end of file +* To access Gemini Flash 1.5 AI Model from your Cloud Run instance, you may need to update Cloud Run Serivce Account IAM permissions. From 219b5805452d227db5ecf54aae51504bc87d853f Mon Sep 17 00:00:00 2001 From: Emanuel Burgess Date: Mon, 5 Aug 2024 22:38:43 -0400 Subject: [PATCH 5/7] changing folder structure and hardcoded values --- ai-ml/gemini-chatbot-app-gradio/Dockerfile | 18 ++++++ ai-ml/gemini-chatbot-app-gradio/README.md | 55 +++++++++++++++++++ ai-ml/gemini-chatbot-app-gradio/deploy.sh | 12 ++++ ai-ml/gemini-chatbot-app-gradio/gradio_app.py | 46 ++++++++++++++++ ai-ml/gemini-chatbot-app-gradio/llm.py | 46 ++++++++++++++++ .../requirements.txt | 3 + 6 files changed, 180 insertions(+) create mode 100644 ai-ml/gemini-chatbot-app-gradio/Dockerfile create mode 100644 ai-ml/gemini-chatbot-app-gradio/README.md create mode 100644 ai-ml/gemini-chatbot-app-gradio/deploy.sh create mode 100644 ai-ml/gemini-chatbot-app-gradio/gradio_app.py create mode 100644 ai-ml/gemini-chatbot-app-gradio/llm.py create mode 100644 ai-ml/gemini-chatbot-app-gradio/requirements.txt diff --git a/ai-ml/gemini-chatbot-app-gradio/Dockerfile b/ai-ml/gemini-chatbot-app-gradio/Dockerfile new file mode 100644 index 0000000..1fe69c1 --- /dev/null +++ b/ai-ml/gemini-chatbot-app-gradio/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.11-slim + +WORKDIR /usr/src/app + +ARG GRADIO_SERVER_PORT=8080 + +COPY requirements.txt ./ + +COPY gradio_app.py ./ + +RUN pip3 install -r requirements.txt + +# Dirty fix, won't be necessary when https://github.com/gradio-app/gradio/pull/8962 gets into Gradio release +RUN sed -i 's/except (ConnectionError, httpx.ConnectError)/except (ConnectionError, httpx.ConnectError, httpx.TimeoutException)/g' /usr/local/lib/python3.11/site-packages/gradio/networking.py + +COPY . . + +CMD ["python3", "./gradio_app.py"] diff --git a/ai-ml/gemini-chatbot-app-gradio/README.md b/ai-ml/gemini-chatbot-app-gradio/README.md new file mode 100644 index 0000000..bda7323 --- /dev/null +++ b/ai-ml/gemini-chatbot-app-gradio/README.md @@ -0,0 +1,55 @@ +# Chatbot with Gemini Flash 1.5 + +Goal: To create a simple chat bot using following resoruces + +* Gemini Flash 1.5: A powerful large language model from Google AI and also a Lightweight, fast and cost-efficient AI model. +* Gradio: An open-source Python framework for data scientists and AI/ML engineers for building interactive data apps. +* Cloud Run: A fully managed platform that enables you to run your code directly on top of Google's scalable infrastructure. + + +## Prerequisites + +* Google Chrome (browser) +* Google Cloud Project + +For development environments or IDEs, the recommended option is to use Google Cloud Shell, a simple and convenient tool. Alternatively, you can set up local access to your project using the gcloud CLI and the latest version of Python. + +If you are using + +## How to run? + +To run the app, you can the following code in code in this repo. +``` +python3 -m venv .venv +source .venv/bin/activate +python3 -m pip install -r requirements.txt + +python3 run gradio_app.py +``` + +## Folder structure + +Here is the folder structure of the repository. +``` + +├── Dockerfile +├── requirements.txt +├── gradio_app.py +├── deploy.sh +├── LICENSE +└── README.md +``` + + +* `deploy.sh` for deploying your code to Cloud Run +* `Dockerfile` Docker container configuration for deploying the Gradio application in Cloud Run. +* `requirements.txt` has required packages for this applicaiton. +* `gradio_app.py` has the Gradio application code. +* `deploy.sh` deployment script +* `llm.py` contains the application code found in https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/generative_ai/inference + + +### Chatbot Permission + +* To access Gemini Flash 1.5 AI Model from your local environment, you may need to setup [Application Default Credentials](https://cloud.google.com/docs/authentication/provide-credentials-adc). +* To access Gemini Flash 1.5 AI Model from your Cloud Run instance, you may need to update Cloud Run Serivce Account IAM permissions. diff --git a/ai-ml/gemini-chatbot-app-gradio/deploy.sh b/ai-ml/gemini-chatbot-app-gradio/deploy.sh new file mode 100644 index 0000000..de0a1fb --- /dev/null +++ b/ai-ml/gemini-chatbot-app-gradio/deploy.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# Purpose: To deploy the App to Cloud Run. + +# @TODO Replace with your Google Cloud Project ID +PROJECT= + +# Google Cloud Region +LOCATION=us-east1 + +# Depolying app from source code +gcloud run deploy gradio-app --source=. --port=8080 --region=$LOCATION --project=$PROJECT --allow-unauthenticated \ No newline at end of file diff --git a/ai-ml/gemini-chatbot-app-gradio/gradio_app.py b/ai-ml/gemini-chatbot-app-gradio/gradio_app.py new file mode 100644 index 0000000..0e63e74 --- /dev/null +++ b/ai-ml/gemini-chatbot-app-gradio/gradio_app.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# +# Copyright 2024 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +import gradio as gr +import llm +import os + +K_SERVICE = os.getenv("K_SERVICE", "local") +client = llm.get_chat_client(llm.PROJECT_ID, llm.LOCATION) + +def predict(message, history): + history_openai_format = [] + for human, assistant in history: + history_openai_format.append({"role": "user", "content": human }) + history_openai_format.append({"role": "assistant", "content":assistant}) + history_openai_format.append({"role": "user", "content": message}) + + response = client.chat.completions.create(model=llm.MODEL_NAME, + messages= history_openai_format, + temperature = 1.0, + stream=True) + + partial_message = "" + for chunk in response: + if chunk.choices[0].delta.content is not None: + partial_message = partial_message + chunk.choices[0].delta.content + yield partial_message + +gr.ChatInterface(predict).launch( + share=(False if K_SERVICE == "local" else True), + server_name="0.0.0.0", + server_port=8080) diff --git a/ai-ml/gemini-chatbot-app-gradio/llm.py b/ai-ml/gemini-chatbot-app-gradio/llm.py new file mode 100644 index 0000000..7180e2f --- /dev/null +++ b/ai-ml/gemini-chatbot-app-gradio/llm.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# +# Copyright 2024 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai +import vertexai + +from google.auth import default, transport + + +# TODO(Developer): Update project name +MODEL_NAME = "google/gemini-1.5-flash-001" +PROJECT_ID = "your-project-id" +LOCATION = "us-central1" + + +def get_chat_client(project_id, location): + # Initialize vertexai + vertexai.init(project=project_id, location=location) + + # Programmatically get an access token + credentials, _ = default(scopes=["https://www.googleapis.com/auth/cloud-platform"]) + auth_request = transport.requests.Request() + credentials.refresh(auth_request) + + # OpenAI client for Gemini-Flash-1.5 + client = openai.OpenAI( + base_url=f"https://{location}-aiplatform.googleapis.com/v1beta1/projects/{project_id}/locations/{location}/endpoints/openapi", + api_key=credentials.token, + ) + return client + +def main(): + return get_chat_client(PROJECT_ID, LOCATION) diff --git a/ai-ml/gemini-chatbot-app-gradio/requirements.txt b/ai-ml/gemini-chatbot-app-gradio/requirements.txt new file mode 100644 index 0000000..7a9e165 --- /dev/null +++ b/ai-ml/gemini-chatbot-app-gradio/requirements.txt @@ -0,0 +1,3 @@ +openai +google-cloud-aiplatform +gradio \ No newline at end of file From f6a2c2c8b4732030c7fc470cf242a98f5b759d72 Mon Sep 17 00:00:00 2001 From: Emanuel Burgess Date: Thu, 8 Aug 2024 07:32:25 -0400 Subject: [PATCH 6/7] removing dup folder & add apache license --- ai-ml/gemini-chatbot-app-gradio/LICENSE | 201 ++++++++++++++++++++++++ 1 file changed, 201 insertions(+) create mode 100644 ai-ml/gemini-chatbot-app-gradio/LICENSE diff --git a/ai-ml/gemini-chatbot-app-gradio/LICENSE b/ai-ml/gemini-chatbot-app-gradio/LICENSE new file mode 100644 index 0000000..56ee3c8 --- /dev/null +++ b/ai-ml/gemini-chatbot-app-gradio/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file From 6c423978ec26452df9297becc0a4f9be67cbe647 Mon Sep 17 00:00:00 2001 From: EmanuelB25 <140216796+EmanuelB25@users.noreply.github.com> Date: Thu, 8 Aug 2024 07:38:45 -0400 Subject: [PATCH 7/7] Delete ai-ml/gemini-chatbot-app/gradio directory removing directory --- ai-ml/gemini-chatbot-app/gradio/.gcloudignore | 1 - ai-ml/gemini-chatbot-app/gradio/.gitignore | 6 -- ai-ml/gemini-chatbot-app/gradio/Dockerfile | 18 ------ ai-ml/gemini-chatbot-app/gradio/README.md | 55 ------------------- ai-ml/gemini-chatbot-app/gradio/deploy.sh | 12 ---- ai-ml/gemini-chatbot-app/gradio/gradio_app.py | 46 ---------------- ai-ml/gemini-chatbot-app/gradio/llm.py | 46 ---------------- .../gradio/requirements.txt | 3 - 8 files changed, 187 deletions(-) delete mode 100644 ai-ml/gemini-chatbot-app/gradio/.gcloudignore delete mode 100644 ai-ml/gemini-chatbot-app/gradio/.gitignore delete mode 100644 ai-ml/gemini-chatbot-app/gradio/Dockerfile delete mode 100644 ai-ml/gemini-chatbot-app/gradio/README.md delete mode 100644 ai-ml/gemini-chatbot-app/gradio/deploy.sh delete mode 100644 ai-ml/gemini-chatbot-app/gradio/gradio_app.py delete mode 100644 ai-ml/gemini-chatbot-app/gradio/llm.py delete mode 100644 ai-ml/gemini-chatbot-app/gradio/requirements.txt diff --git a/ai-ml/gemini-chatbot-app/gradio/.gcloudignore b/ai-ml/gemini-chatbot-app/gradio/.gcloudignore deleted file mode 100644 index c18dd8d..0000000 --- a/ai-ml/gemini-chatbot-app/gradio/.gcloudignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__/ diff --git a/ai-ml/gemini-chatbot-app/gradio/.gitignore b/ai-ml/gemini-chatbot-app/gradio/.gitignore deleted file mode 100644 index 2efa028..0000000 --- a/ai-ml/gemini-chatbot-app/gradio/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# virtual env -.venv - -# Cache -__pycache__/ -.ipynb_checkpoints/ \ No newline at end of file diff --git a/ai-ml/gemini-chatbot-app/gradio/Dockerfile b/ai-ml/gemini-chatbot-app/gradio/Dockerfile deleted file mode 100644 index f99be40..0000000 --- a/ai-ml/gemini-chatbot-app/gradio/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM python:3.11-slim - -WORKDIR /usr/src/app - -ARG GRADIO_SERVER_PORT=8080 - -COPY requirements.txt ./ - -COPY gradio_app.py ./ - -RUN pip3 install -r requirements.txt - -# Dirty fix for https://github.com/gradio-app/gradio/pull/7707 regression -RUN sed -i 's/except (ConnectionError, httpx.ConnectError)/except (ConnectionError, httpx.ConnectError, httpx.TimeoutException)/g' /usr/local/lib/python3.11/site-packages/gradio/networking.py - -COPY . . - -CMD ["python3", "./gradio_app.py"] diff --git a/ai-ml/gemini-chatbot-app/gradio/README.md b/ai-ml/gemini-chatbot-app/gradio/README.md deleted file mode 100644 index bda7323..0000000 --- a/ai-ml/gemini-chatbot-app/gradio/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# Chatbot with Gemini Flash 1.5 - -Goal: To create a simple chat bot using following resoruces - -* Gemini Flash 1.5: A powerful large language model from Google AI and also a Lightweight, fast and cost-efficient AI model. -* Gradio: An open-source Python framework for data scientists and AI/ML engineers for building interactive data apps. -* Cloud Run: A fully managed platform that enables you to run your code directly on top of Google's scalable infrastructure. - - -## Prerequisites - -* Google Chrome (browser) -* Google Cloud Project - -For development environments or IDEs, the recommended option is to use Google Cloud Shell, a simple and convenient tool. Alternatively, you can set up local access to your project using the gcloud CLI and the latest version of Python. - -If you are using - -## How to run? - -To run the app, you can the following code in code in this repo. -``` -python3 -m venv .venv -source .venv/bin/activate -python3 -m pip install -r requirements.txt - -python3 run gradio_app.py -``` - -## Folder structure - -Here is the folder structure of the repository. -``` - -├── Dockerfile -├── requirements.txt -├── gradio_app.py -├── deploy.sh -├── LICENSE -└── README.md -``` - - -* `deploy.sh` for deploying your code to Cloud Run -* `Dockerfile` Docker container configuration for deploying the Gradio application in Cloud Run. -* `requirements.txt` has required packages for this applicaiton. -* `gradio_app.py` has the Gradio application code. -* `deploy.sh` deployment script -* `llm.py` contains the application code found in https://github.com/GoogleCloudPlatform/python-docs-samples/tree/main/generative_ai/inference - - -### Chatbot Permission - -* To access Gemini Flash 1.5 AI Model from your local environment, you may need to setup [Application Default Credentials](https://cloud.google.com/docs/authentication/provide-credentials-adc). -* To access Gemini Flash 1.5 AI Model from your Cloud Run instance, you may need to update Cloud Run Serivce Account IAM permissions. diff --git a/ai-ml/gemini-chatbot-app/gradio/deploy.sh b/ai-ml/gemini-chatbot-app/gradio/deploy.sh deleted file mode 100644 index 253f701..0000000 --- a/ai-ml/gemini-chatbot-app/gradio/deploy.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Purpose: To deploy the App to Cloud Run. - -# Google Cloud Project -PROJECT=emanuel-ai - -# Google Cloud Region -LOCATION=us-east1 - -# Depolying app from source code -gcloud run deploy gradio-app --source=. --port=8080 --region=$LOCATION --project=$PROJECT --allow-unauthenticated \ No newline at end of file diff --git a/ai-ml/gemini-chatbot-app/gradio/gradio_app.py b/ai-ml/gemini-chatbot-app/gradio/gradio_app.py deleted file mode 100644 index 0e63e74..0000000 --- a/ai-ml/gemini-chatbot-app/gradio/gradio_app.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2024 Google, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import openai -import gradio as gr -import llm -import os - -K_SERVICE = os.getenv("K_SERVICE", "local") -client = llm.get_chat_client(llm.PROJECT_ID, llm.LOCATION) - -def predict(message, history): - history_openai_format = [] - for human, assistant in history: - history_openai_format.append({"role": "user", "content": human }) - history_openai_format.append({"role": "assistant", "content":assistant}) - history_openai_format.append({"role": "user", "content": message}) - - response = client.chat.completions.create(model=llm.MODEL_NAME, - messages= history_openai_format, - temperature = 1.0, - stream=True) - - partial_message = "" - for chunk in response: - if chunk.choices[0].delta.content is not None: - partial_message = partial_message + chunk.choices[0].delta.content - yield partial_message - -gr.ChatInterface(predict).launch( - share=(False if K_SERVICE == "local" else True), - server_name="0.0.0.0", - server_port=8080) diff --git a/ai-ml/gemini-chatbot-app/gradio/llm.py b/ai-ml/gemini-chatbot-app/gradio/llm.py deleted file mode 100644 index 7180e2f..0000000 --- a/ai-ml/gemini-chatbot-app/gradio/llm.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2024 Google, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import openai -import vertexai - -from google.auth import default, transport - - -# TODO(Developer): Update project name -MODEL_NAME = "google/gemini-1.5-flash-001" -PROJECT_ID = "your-project-id" -LOCATION = "us-central1" - - -def get_chat_client(project_id, location): - # Initialize vertexai - vertexai.init(project=project_id, location=location) - - # Programmatically get an access token - credentials, _ = default(scopes=["https://www.googleapis.com/auth/cloud-platform"]) - auth_request = transport.requests.Request() - credentials.refresh(auth_request) - - # OpenAI client for Gemini-Flash-1.5 - client = openai.OpenAI( - base_url=f"https://{location}-aiplatform.googleapis.com/v1beta1/projects/{project_id}/locations/{location}/endpoints/openapi", - api_key=credentials.token, - ) - return client - -def main(): - return get_chat_client(PROJECT_ID, LOCATION) diff --git a/ai-ml/gemini-chatbot-app/gradio/requirements.txt b/ai-ml/gemini-chatbot-app/gradio/requirements.txt deleted file mode 100644 index 7a9e165..0000000 --- a/ai-ml/gemini-chatbot-app/gradio/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -openai -google-cloud-aiplatform -gradio \ No newline at end of file