Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added unit test and removed parameters #22

Merged
merged 1 commit into from
Apr 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 1 addition & 10 deletions src/translator.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@
# from google.cloud import aiplatform
import google.generativeai as genai

# os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = "/Users/larissatyagi/Desktop/translator-service/translator-service-418821-9352d29e6139.json"
# aiplatform.init(project='translator-service-418821', location='us-central1')

os.environ['GOOGLE_API_KEY'] = 'AIzaSyA5nw5uJld70nkV-0D2C1gmhqo5ql9OdRw' # Replace with your actual API key
genai.configure(api_key=os.environ['GOOGLE_API_KEY'])

Expand All @@ -17,10 +16,6 @@ def get_translation(post: str) -> str:
+"please translate this to English. "
+ f"Prompt: {post}")
model = genai.GenerativeModel(model_name="gemini-pro")
parameters = {
"temperature": 0.7, # Temperature controls the degree of randomness in token selection.
"max_output_tokens": 256, # Token limit determines the maximum amount of text output.
}

response = model.generate_content(context)
return response.text
Expand All @@ -30,10 +25,6 @@ def get_translation(post: str) -> str:

def get_language(post: str) -> str:
model = genai.GenerativeModel(model_name="gemini-pro")
parameters = {
"temperature": 0.7, # Temperature controls the degree of randomness in token selection.
"max_output_tokens": 256, # Token limit determines the maximum amount of text output.
}

context = f"Context = 'I want to know what language the prompt is', prompt = '{post}'"
response = model.generate_content(context+post)
Expand Down
25 changes: 15 additions & 10 deletions test/unit/test_translator.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# from src.translator import translate_content #,query_llm_robust
from src.translator import translate_content #,query_llm_robust
# from mock import patch
# from vertexai.language_models import ChatModel, InputOutputTextPair
# from ../src/translator import query_llm_robust
Expand All @@ -16,15 +16,20 @@
# assert response == "I don't understand your request"
# assert response is not None and response != ""

### UNIT TESTING ###
def test_chinese():
is_english, translated_content = translate_content("这是一条中文消息")
assert is_english == False
assert ("chinese" in translated_content or "Chinese" in translate_content)

# def test_chinese():
# is_english, translated_content = translate_content("这是一条中文消息")
# assert is_english == False
# assert translated_content == "This is a Chinese message"

def test_llm_normal_response():
is_english, translated_content = translate_content("to be or not to be")
assert is_english == True
assert ("to be or not to be"== translate_content)


# def test_llm_normal_response():
# pass

# def test_llm_gibberish_response():
# pass
def test_llm_gibberish_response():
is_english, translated_content = translate_content("jfdjshghui bjfhsdufh")
assert ((is_english == False) or translated_content == "jfdjshghui bjfhsdufh")

Loading