From c79b0b7746d4ea3fdae3869002b0f625464e40f2 Mon Sep 17 00:00:00 2001 From: Cdreetz Date: Thu, 22 Aug 2024 12:27:22 -0500 Subject: [PATCH] i forgot, hope it doesnt break --- README.md | 3 + app/api/chat/lambda/route.ts | 21 ++++ app/audio-analytics/page.tsx | 223 +++++++++++++++++++++++++++++++++++ app/layout.tsx | 10 +- components/HeaderBar.tsx | 3 + 5 files changed, 258 insertions(+), 2 deletions(-) create mode 100644 app/api/chat/lambda/route.ts create mode 100644 app/audio-analytics/page.tsx diff --git a/README.md b/README.md index 842d19b..3add842 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,9 @@ Transcribe audio - ~~stt page with Groq whisper~~ - transcription optional arguments +- lambda chat api support +- hermes 3 models added + v0.2.0 - auth / sign up / login pages - save prompt collection diff --git a/app/api/chat/lambda/route.ts b/app/api/chat/lambda/route.ts new file mode 100644 index 0000000..303b91e --- /dev/null +++ b/app/api/chat/lambda/route.ts @@ -0,0 +1,21 @@ +import { createOpenAI as createLambdaLabs } from '@ai-sdk/openai'; +import { convertToCoreMessages, streamText } from 'ai'; + +const lambdalabs = createLambdaLabs({ + baseURL: 'https://cloud.lambdalabs.com/api/v1/chat/completions', + apiKey: process.env.LAMBDALABS_API_KEY, +}) + +export const maxDuration = 30; + +export async function POST(req: Request) { + const { messages, model } = await req.json(); + + const result = await streamText({ + model: lambdalabs(model || 'hermes-3-llama-3.1-405b-fp8'), + messages: convertToCoreMessages(messages), + }); + + return result.toAIStreamResponse(); +} + diff --git a/app/audio-analytics/page.tsx b/app/audio-analytics/page.tsx new file mode 100644 index 0000000..772d4b3 --- /dev/null +++ b/app/audio-analytics/page.tsx @@ -0,0 +1,223 @@ +'use client' + +import React, { useState, useRef } from 'react'; +import { Button } from '@/components/ui/button'; +import { Input } from '@/components/ui/input'; +import { Textarea } from '@/components/ui/textarea'; +import { Alert, AlertDescription } from '@/components/ui/alert'; +import InfoButton from '@/components/InfoButton'; +import { Switch } from "@/components/ui/switch" +import { Label } from "@/components/ui/label" +import { ScrollArea } from "@/components/ui/scroll-area" + +export default function AudioAnalyticsPage() { + const [file, setFile] = useState(null); + const [transcription, setTranscription] = useState(''); + const [summary, setSummary] = useState(''); + const [sentiment, setSentiment] = useState(''); + const [entities, setEntities] = useState(''); + const [customClassification, setCustomClassification] = useState(''); + const [customPrompt, setCustomPrompt] = useState(''); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState(''); + const [fileDetails, setFileDetails] = useState<{ name: string; duration: string } | null>(null); + const audioRef = useRef(null); + + const [enableSummary, setEnableSummary] = useState(true); + const [enableSentiment, setEnableSentiment] = useState(true); + const [enableEntities, setEnableEntities] = useState(true); + const [enableCustom, setEnableCustom] = useState(false); + + const handleFileChange = (event: React.ChangeEvent) => { + if (event.target.files && event.target.files[0]) { + const selectedFile = event.target.files[0]; + setFile(selectedFile); + setFileDetails(null); + + const audioUrl = URL.createObjectURL(selectedFile); + if (audioRef.current) { + audioRef.current.src = audioUrl; + audioRef.current.onloadedmetadata = () => { + const duration = audioRef.current?.duration || 0; + const minutes = Math.floor(duration / 60); + const seconds = Math.floor(duration % 60); + setFileDetails({ + name: selectedFile.name, + duration: `${minutes}:${seconds.toString().padStart(2, '0')}`, + }); + }; + } + } + }; + + const handleTranscribe = async () => { + if (!file) { + setError('Please select an audio file.'); + return; + } + + setIsLoading(true); + setError(''); + + try { + const formData = new FormData(); + formData.append('file', file); + formData.append('model', 'whisper-large-v3'); + + const response = await fetch('/api/transcribe', { + method: 'POST', + body: formData, + }); + + if (!response.ok) { + throw new Error('Transcription failed'); + } + + const data = await response.json(); + setTranscription(data.text); + + // Process the transcription + await processTranscription(data.text); + } catch (err) { + setError('An error occurred during processing. Please try again.'); + } finally { + setIsLoading(false); + } + }; + + const processTranscription = async (text: string) => { + const tasks = [ + { prompt: `Summarize the following text:\n\n${text}`, setter: setSummary, enabled: enableSummary }, + { prompt: `Analyze the sentiment of the following text:\n\n${text}`, setter: setSentiment, enabled: enableSentiment }, + { prompt: `Extract named entities from the following text:\n\n${text}`, setter: setEntities, enabled: enableEntities }, + { prompt: customPrompt + `\n\n${text}`, setter: setCustomClassification, enabled: enableCustom }, + ]; + + for (const task of tasks) { + if (!task.enabled) continue; + + try { + const response = await fetch('/api/generate-text', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + model: 'groq:llama-3.1-70b-versatile', + prompt: task.prompt, + }), + }); + + if (!response.ok) throw new Error('API response was not ok'); + + const data = await response.json(); + task.setter(data.text); + } catch (error) { + console.error(`Error processing task:`, error); + task.setter('Error occurred'); + } + } + }; + + return ( +
+ +
    +
  1. Upload an audio file
  2. +
  3. Select analysis options
  4. +
  5. Click Process Audio
  6. +
  7. View the transcription and analysis results
  8. +
+
+

Audio Analytics

+ {error && ( + + {error} + + )} +
+
+ + {fileDetails && ( +
+

File Details:

+

Name: {fileDetails.name}

+

Duration: {fileDetails.duration}

+
+ )} +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ {enableCustom && ( +