Skip to content

Commit

Permalink
Foundations for firebase_vertexai version, BYO Firebase Project #53
Browse files Browse the repository at this point in the history
Currently battling with [firebase_functions/internal] Response is not valid JSON object.
errorCode: firebase_functions
errorMessage: com.google.firebase.functions.FirebaseFunctionsException: Response is not valid JSON object.
  • Loading branch information
MrCsabaToth committed Sep 21, 2024
1 parent bda16aa commit 03e0b40
Show file tree
Hide file tree
Showing 43 changed files with 613 additions and 112 deletions.
5 changes: 5 additions & 0 deletions .firebaserc
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"projects": {
"default": "open-mmpa"
}
}
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -117,3 +117,4 @@ untranslated_messages.txt
firebase.json
lib/firebase_options.dart
android/app/google-services.json
functions/venv/
4 changes: 4 additions & 0 deletions android/app/build.gradle
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
plugins {
id "com.android.application"
// START: FlutterFire Configuration
id "com.google.gms.google-services"
// END: FlutterFire Configuration
id "kotlin-android"
id "dev.flutter.flutter-gradle-plugin"
}
Expand Down Expand Up @@ -113,4 +116,5 @@ flutter {

dependencies {
implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.7.10"
// implementation "com.google.gms.google-services:4.4.2"
}
3 changes: 3 additions & 0 deletions android/settings.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ pluginManagement {
plugins {
id "dev.flutter.flutter-plugin-loader" version "1.0.0"
id "com.android.application" version '8.5.2' apply false
// START: FlutterFire Configuration
id "com.google.gms.google-services" version "4.4.2" apply false
// END: FlutterFire Configuration
id "org.jetbrains.kotlin.android" version "1.9.20" apply false
}

Expand Down
4 changes: 4 additions & 0 deletions firestore.indexes.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"indexes": [],
"fieldOverrides": []
}
7 changes: 7 additions & 0 deletions firestore.rules
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
service cloud.firestore {
match /databases/{database}/documents {
match /{document=**} {
allow read, write: if request.auth.uid != null;
}
}
}
1 change: 1 addition & 0 deletions functions/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
*.local
Empty file added functions/fn_impl/__init__.py
Empty file.
113 changes: 113 additions & 0 deletions functions/fn_impl/chirp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
from firebase_functions import https_fn
from firebase_admin import initialize_app, storage
import firebase_admin
import google.cloud.logging
import logging

from flask import jsonify
from google.api_core.client_options import ClientOptions
from google.cloud.speech_v2 import SpeechClient
from google.cloud.speech_v2.types import cloud_speech

def transcribe_chirp_auto_detect_language(
project_id: str,
region: str,
audio_bytes: str,
) -> cloud_speech.RecognizeResponse:
"""Transcribe an audio file and auto-detect spoken language using Chirp.
Please see https://cloud.google.com/speech-to-text/v2/docs/encoding for more
information on which audio encodings are supported.
"""
# Instantiates a client
client = SpeechClient(
client_options=ClientOptions(
api_endpoint=f"{region}-speech.googleapis.com",
)
)

config = cloud_speech.RecognitionConfig(
auto_decoding_config=cloud_speech.AutoDetectDecodingConfig(),
language_codes=["auto"], # Set language code to auto to detect language.
model="chirp",
)

request = cloud_speech.RecognizeRequest(
recognizer=f"projects/{project_id}/locations/{region}/recognizers/_",
config=config,
content=audio_bytes,
)

# Transcribes the audio into text
response = client.recognize(request=request)

transcripts = []
for result in response.results:
transcripts.append(result.alternatives[0].transcript.strip())
transcripts.append(result.language_code)

return transcripts

@https_fn.on_request()
def chirp(req: https_fn.Request) -> https_fn.Response:
"""Chirp audio to text.
Args:
request (flask.Request): Carries the PCM16 audio with WAV header as body.
<https://flask.palletsprojects.com/en/1.1.x/api/#incoming-request-data>
Returns:
Alternating list of transcript and the language of the preceding transcript.
<https://flask.palletsprojects.com/en/1.1.x/api/#flask.make_response>.
"""
# Set CORS headers for the preflight request
if req.method == 'OPTIONS':
# Allows GET requests from any origin with the Content-Type
# header and caches preflight response for an 3600s
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET, POST',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600'
}

return ('', 204, headers)

# Set CORS headers for the main request
headers = {
'Content-Type':'application/json',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': 'Content-Type',
}
# END CORS

if not firebase_admin._apps:
initialize_app()

request_json = req.get_json(silent=True)
request_args = req.args

transcripts = []
project_id = 'open-mmpa'
region = 'us-central1'

if request_json and 'recording_file_name' in request_json:
recording_file_name = request_json['recording_file_name']
elif request_args and 'recording_file_name' in request_args:
recording_file_name = request_args['recording_file_name']
else:
recording_file_name = None

if not recording_file_name:
return transcripts

try:
bucket = storage.bucket(f'{project_id}.appspot.com')
blob = bucket.blob(recording_file_name)
wav_bytes = blob.download_as_bytes()
transcripts = transcribe_chirp_auto_detect_language(project_id, region, wav_bytes)
except Exception as e:
client = google.cloud.logging.Client()
client.setup_logging()
logging.exception(e)
return transcripts

return (jsonify(dict(transcripts=transcripts)), 200, headers)
Empty file added functions/fn_impl/embedding.py
Empty file.
Empty file added functions/fn_impl/reranking.py
Empty file.
91 changes: 91 additions & 0 deletions functions/fn_impl/tts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
from firebase_functions import https_fn
from firebase_admin import initialize_app, storage
import datetime
import firebase_admin
import os

from flask import jsonify
from google.cloud import texttospeech

@https_fn.on_request()
def tts(req: https_fn.Request) -> https_fn.Response:
"""Synthesizes speech from the input string of text or ssml.
Returns:
Encoded audio file in the body.
Note: ssml must be well-formed according to:
https://www.w3.org/TR/speech-synthesis/
"""
# Set CORS headers for the preflight request
if req.method == 'OPTIONS':
# Allows GET requests from any origin with the Content-Type
# header and caches preflight response for an 3600s
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET, POST',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600'
}

return ('', 204, headers)

# Set CORS headers for the main request
headers = {
'Content-Type':'application/json',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Headers': 'Content-Type',
}
# END CORS

if not firebase_admin._apps:
initialize_app()

request_json = req.get_json(silent=True)
request_args = req.args

if request_json and 'language_code' in request_json:
language_code = request_json['language_code']
elif request_args and 'language_code' in request_args:
language_code = request_args['language_code']
else:
language_code = os.environ.get('LANGUAGE_CODE', 'en-US')

if request_json and 'text' in request_json:
text = request_json['text']
elif request_args and 'text' in request_args:
text = request_args['text']
else:
text = ''

# Instantiates a client
client = texttospeech.TextToSpeechClient()

# Set the text input to be synthesized
synthesis_input = texttospeech.SynthesisInput(text=text)

# Build the voice request, select the language code ("en-US") and the ssml
# voice gender ("neutral")
voice = texttospeech.VoiceSelectionParams(
language_code=language_code, ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL
)

# Select the type of audio file you want returned
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.OGG_OPUS
)

# Perform the text-to-speech request on the text input with the selected
# voice parameters and audio file type
response = client.synthesize_speech(
input=synthesis_input, voice=voice, audio_config=audio_config
)

now = datetime.datetime.now()
file_name = now.strftime('tts_%m%d%Y_%H%M%S.ogg')
project_id = 'open-mmpa'
bucket = storage.bucket(f'{project_id}.appspot.com')
synth_blob = bucket.blob(file_name)
synth_blob.upload_from_string(response.audio_content, content_type='audio/ogg')
synth_file_name = synth_blob.public_url.split('/')[-1].split('?')[0]
synth_result = dict(synth_file_name=synth_file_name)

return (jsonify(synth_result), 200, headers)
5 changes: 5 additions & 0 deletions functions/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# https://firebase.google.com/docs/functions/organize-functions?gen=2nd#python
from fn_impl.chirp import *
from fn_impl.embedding import *
from fn_impl.reranking import *
from fn_impl.tts import *
5 changes: 5 additions & 0 deletions functions/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
firebase_functions~=0.1.0
google-api-core==2.19.0
google-cloud-logging==3.10.0
google-cloud-speech==2.26.0
google-cloud-texttospeech==2.16.3
15 changes: 6 additions & 9 deletions lib/ai/service/ai_service.dart
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
import 'dart:developer';

import 'package:firebase_vertexai/firebase_vertexai.dart';
import 'package:flutter/foundation.dart';
import 'package:get_it/get_it.dart';
import 'package:google_generative_ai/google_generative_ai.dart';
import 'package:inspector_gadget/ai/prompts/closing_parts.dart';
import 'package:inspector_gadget/ai/prompts/history_rag_stuffing.dart';
import 'package:inspector_gadget/ai/prompts/personalization_rag_stuffing.dart';
import 'package:inspector_gadget/ai/prompts/request_instruction.dart';
import 'package:inspector_gadget/ai/prompts/resolver_few_shot.dart';
import 'package:inspector_gadget/ai/prompts/system_instruction.dart';
import 'package:inspector_gadget/ai/prompts/translate_instruction.dart';
import 'package:inspector_gadget/ai/service/firebase_mixin.dart';
import 'package:inspector_gadget/ai/service/generated_content_response.dart';
import 'package:inspector_gadget/ai/tools/tools_mixin.dart';
import 'package:inspector_gadget/camera/service/m_file.dart';
Expand All @@ -23,7 +24,7 @@ import 'package:inspector_gadget/preferences/service/preferences.dart';
import 'package:strings/strings.dart';
import 'package:translator/translator.dart';

class AiService with ToolsMixin {
class AiService with FirebaseMixin, ToolsMixin {
AiService() {
watermark = DateTime.now();
}
Expand All @@ -37,10 +38,8 @@ class AiService with ToolsMixin {
}) {
final preferences = GetIt.I.get<PreferencesService>();
final modelType = preferences.fastLlmMode ? 'flash-latest' : 'pro';
return GenerativeModel(
return FirebaseVertexAI.instance.generativeModel(
model: 'gemini-1.5-$modelType',
apiKey: preferences.geminiApiKey,
// requestOptions: const RequestOptions(apiVersion: 'v2beta'),
safetySettings: [
SafetySetting(
HarmCategory.harassment,
Expand Down Expand Up @@ -282,10 +281,8 @@ class AiService with ToolsMixin {
}

Future<List<double>> obtainEmbedding(String prompt) async {
final preferences = GetIt.I.get<PreferencesService>();
final model = GenerativeModel(
model: 'text-embedding-004',
apiKey: preferences.geminiApiKey,
final model = FirebaseVertexAI.instance.generativeModel(
model: 'text-embedding-004', // 'text-multilingual-embedding-002',
);
final content = Content.text(prompt);
final embeddingResult = await model.embedContent(content);
Expand Down
26 changes: 26 additions & 0 deletions lib/ai/service/firebase_mixin.dart
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import 'dart:developer';

import 'package:firebase_auth/firebase_auth.dart';
import 'package:firebase_core/firebase_core.dart';
import 'package:inspector_gadget/firebase_options.dart';

mixin FirebaseMixin {
static Future<void> initFirebase() async {
await Firebase.initializeApp(
options: DefaultFirebaseOptions.currentPlatform,
);

try {
final userCredential = await FirebaseAuth.instance.signInAnonymously();
final user = userCredential.user;

if (user != null) {
log('Signed in anonymously with user ID: ${user.uid}');
} else {
log('Error signing in anonymously: $userCredential');
}
} on FirebaseAuthException catch (e) {
log('Error signing in anonymously: ${e.message}');
}
}
}
2 changes: 1 addition & 1 deletion lib/ai/service/generated_content_response.dart
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import 'package:google_generative_ai/google_generative_ai.dart';
import 'package:firebase_vertexai/firebase_vertexai.dart';

extension GeneratedContentResponse on GenerateContentResponse {
String strippedText() {
Expand Down
2 changes: 1 addition & 1 deletion lib/ai/tools/alpha_vantage_tool.dart
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import 'package:dart_helper_utils/dart_helper_utils.dart';
import 'package:firebase_vertexai/firebase_vertexai.dart';
import 'package:fl_location/fl_location.dart';
import 'package:google_generative_ai/google_generative_ai.dart';
import 'package:http/http.dart' as http;
import 'package:inspector_gadget/ai/tools/function_tool.dart';
import 'package:inspector_gadget/preferences/service/preferences.dart';
Expand Down
2 changes: 1 addition & 1 deletion lib/ai/tools/exchange_tool.dart
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import 'dart:convert';

import 'package:dart_helper_utils/dart_helper_utils.dart';
import 'package:firebase_vertexai/firebase_vertexai.dart';
import 'package:fl_location/fl_location.dart';
import 'package:google_generative_ai/google_generative_ai.dart';
import 'package:http/http.dart' as http;
import 'package:inspector_gadget/ai/tools/currency_request.dart';
import 'package:inspector_gadget/ai/tools/function_tool.dart';
Expand Down
2 changes: 1 addition & 1 deletion lib/ai/tools/function_tool.dart
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import 'package:firebase_vertexai/firebase_vertexai.dart';
import 'package:fl_location/fl_location.dart';
import 'package:google_generative_ai/google_generative_ai.dart';
import 'package:inspector_gadget/preferences/service/preferences.dart';

abstract class FunctionTool {
Expand Down
2 changes: 1 addition & 1 deletion lib/ai/tools/local_tool.dart
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import 'dart:convert';

import 'package:firebase_vertexai/firebase_vertexai.dart';
import 'package:fl_location/fl_location.dart';
import 'package:google_generative_ai/google_generative_ai.dart';
import 'package:inspector_gadget/ai/tools/function_tool.dart';
import 'package:inspector_gadget/preferences/service/preferences.dart';

Expand Down
2 changes: 1 addition & 1 deletion lib/ai/tools/lyrics_tool.dart
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import 'dart:convert';

import 'package:dart_helper_utils/dart_helper_utils.dart';
import 'package:firebase_vertexai/firebase_vertexai.dart';
import 'package:fl_location/fl_location.dart';
import 'package:google_generative_ai/google_generative_ai.dart';
import 'package:http/http.dart' as http;
import 'package:inspector_gadget/ai/tools/function_tool.dart';
import 'package:inspector_gadget/preferences/service/preferences.dart';
Expand Down
Loading

0 comments on commit 03e0b40

Please sign in to comment.