diff --git a/.gitignore b/.gitignore
index facabaa08..1f3260375 100644
Binary files a/.gitignore and b/.gitignore differ
diff --git a/App_Function_Libraries/Article_Summarization_Lib.py b/App_Function_Libraries/Article_Summarization_Lib.py
index 510eff78a..07e0d7258 100644
--- a/App_Function_Libraries/Article_Summarization_Lib.py
+++ b/App_Function_Libraries/Article_Summarization_Lib.py
@@ -140,7 +140,7 @@ def scrape_and_summarize_multiple(urls, custom_prompt_arg, api_name, api_key, ke
return combined_output
-def scrape_and_summarize(url, custom_prompt_arg, api_name, api_key, keywords, custom_article_title):
+def scrape_and_summarize(url, custom_prompt_arg, api_name, api_key, keywords, custom_article_title, system_message=None):
try:
# Step 1: Scrape the article
article_data = scrape_article(url)
@@ -156,8 +156,10 @@ def scrape_and_summarize(url, custom_prompt_arg, api_name, api_key, keywords, cu
print(f"Title: {title}, Author: {author}, Content Length: {len(content)}") # Debugging statement
+ # Custom system prompt for the article
+ system_message = system_message or "Act as a professional summarizer and summarize this article."
# Custom prompt for the article
- article_custom_prompt = custom_prompt_arg or "Summarize this article."
+ article_custom_prompt = custom_prompt_arg or "Act as a professional summarizer and summarize this article."
# Step 2: Summarize the article
summary = None
@@ -175,64 +177,64 @@ def scrape_and_summarize(url, custom_prompt_arg, api_name, api_key, keywords, cu
try:
if api_name.lower() == 'openai':
# def summarize_with_openai(api_key, input_data, custom_prompt_arg)
- summary = summarize_with_openai(api_key, json_file_path, article_custom_prompt)
+ summary = summarize_with_openai(api_key, json_file_path, article_custom_prompt, system_message)
elif api_name.lower() == "anthropic":
# def summarize_with_anthropic(api_key, input_data, model, custom_prompt_arg, max_retries=3, retry_delay=5):
- summary = summarize_with_anthropic(api_key, json_file_path, article_custom_prompt)
+ summary = summarize_with_anthropic(api_key, json_file_path, article_custom_prompt, system_message)
elif api_name.lower() == "cohere":
# def summarize_with_cohere(api_key, input_data, model, custom_prompt_arg)
- summary = summarize_with_cohere(api_key, json_file_path, article_custom_prompt)
+ summary = summarize_with_cohere(api_key, json_file_path, article_custom_prompt, system_message)
elif api_name.lower() == "groq":
logging.debug(f"MAIN: Trying to summarize with groq")
# def summarize_with_groq(api_key, input_data, model, custom_prompt_arg):
- summary = summarize_with_groq(api_key, json_file_path, article_custom_prompt)
+ summary = summarize_with_groq(api_key, json_file_path, article_custom_prompt, system_message)
elif api_name.lower() == "openrouter":
logging.debug(f"MAIN: Trying to summarize with OpenRouter")
# def summarize_with_openrouter(api_key, input_data, custom_prompt_arg):
- summary = summarize_with_openrouter(api_key, json_file_path, article_custom_prompt)
+ summary = summarize_with_openrouter(api_key, json_file_path, article_custom_prompt, system_message)
elif api_name.lower() == "deepseek":
logging.debug(f"MAIN: Trying to summarize with DeepSeek")
# def summarize_with_deepseek(api_key, input_data, custom_prompt_arg):
- summary = summarize_with_deepseek(api_key, json_file_path, article_custom_prompt)
+ summary = summarize_with_deepseek(api_key, json_file_path, article_custom_prompt, system_message)
elif api_name.lower() == "mistral":
- summary = summarize_with_mistral(api_key, json_file_path, article_custom_prompt)
+ summary = summarize_with_mistral(api_key, json_file_path, article_custom_prompt, system_message)
elif api_name.lower() == "llama.cpp":
logging.debug(f"MAIN: Trying to summarize with Llama.cpp")
# def summarize_with_llama(api_url, file_path, token, custom_prompt)
- summary = summarize_with_llama(json_file_path, article_custom_prompt)
+ summary = summarize_with_llama(json_file_path, article_custom_prompt, system_message)
elif api_name.lower() == "kobold":
logging.debug(f"MAIN: Trying to summarize with Kobold.cpp")
# def summarize_with_kobold(input_data, kobold_api_token, custom_prompt_input, api_url):
- summary = summarize_with_kobold(json_file_path, api_key, article_custom_prompt)
+ summary = summarize_with_kobold(json_file_path, api_key, article_custom_prompt, system_message)
elif api_name.lower() == "ooba":
# def summarize_with_oobabooga(input_data, api_key, custom_prompt, api_url):
- summary = summarize_with_oobabooga(json_file_path, api_key, article_custom_prompt)
+ summary = summarize_with_oobabooga(json_file_path, api_key, article_custom_prompt, system_message)
elif api_name.lower() == "tabbyapi":
# def summarize_with_tabbyapi(input_data, tabby_model, custom_prompt_input, api_key=None, api_IP):
- summary = summarize_with_tabbyapi(json_file_path, article_custom_prompt)
+ summary = summarize_with_tabbyapi(json_file_path, article_custom_prompt, system_message)
elif api_name.lower() == "vllm":
logging.debug(f"MAIN: Trying to summarize with VLLM")
# def summarize_with_vllm(api_key, input_data, custom_prompt_input):
- summary = summarize_with_vllm(json_file_path, article_custom_prompt)
+ summary = summarize_with_vllm(json_file_path, article_custom_prompt, system_message)
elif api_name.lower() == "local-llm":
logging.debug(f"MAIN: Trying to summarize with Local LLM")
- summary = summarize_with_local_llm(json_file_path, article_custom_prompt)
+ summary = summarize_with_local_llm(json_file_path, article_custom_prompt, system_message)
elif api_name.lower() == "huggingface":
logging.debug(f"MAIN: Trying to summarize with huggingface")
# def summarize_with_huggingface(api_key, input_data, custom_prompt_arg):
- summarize_with_huggingface(api_key, json_file_path, article_custom_prompt)
+ summarize_with_huggingface(api_key, json_file_path, article_custom_prompt, system_message)
# Add additional API handlers here...
except requests.exceptions.ConnectionError as e:
logging.error(f"Connection error while trying to summarize with {api_name}: {str(e)}")
@@ -259,7 +261,7 @@ def scrape_and_summarize(url, custom_prompt_arg, api_name, api_key, keywords, cu
return f"Failed to process URL {url}: {str(e)}"
-def ingest_unstructured_text(text, custom_prompt, api_name, api_key, keywords, custom_article_title):
+def ingest_unstructured_text(text, custom_prompt, api_name, api_key, keywords, custom_article_title, system_message=None):
title = custom_article_title.strip() if custom_article_title else "Unstructured Text"
author = "Unknown"
ingestion_date = datetime.now().strftime('%Y-%m-%d')
@@ -271,7 +273,7 @@ def ingest_unstructured_text(text, custom_prompt, api_name, api_key, keywords, c
json.dump([{'text': text}], json_file, indent=2)
if api_name.lower() == 'openai':
- summary = summarize_with_openai(api_key, json_file_path, custom_prompt)
+ summary = summarize_with_openai(api_key, json_file_path, custom_prompt, system_message)
# Add other APIs as needed
else:
summary = "Unsupported API."
diff --git a/App_Function_Libraries/Gradio_Related.py b/App_Function_Libraries/Gradio_Related.py
index 03c24742c..2455fabae 100644
--- a/App_Function_Libraries/Gradio_Related.py
+++ b/App_Function_Libraries/Gradio_Related.py
@@ -73,6 +73,25 @@
custom_prompt_input = None
server_mode = False
share_public = False
+custom_prompt_summarize_bulleted_notes = ("""
+ You are a bulleted notes specialist. [INST]```When creating comprehensive bulleted notes, you should follow these guidelines: Use multiple headings based on the referenced topics, not categories like quotes or terms. Headings should be surrounded by bold formatting and not be listed as bullet points themselves. Leave no space between headings and their corresponding list items underneath. Important terms within the content should be emphasized by setting them in bold font. Any text that ends with a colon should also be bolded. Before submitting your response, review the instructions, and make any corrections necessary to adhered to the specified format. Do not reference these instructions within the notes.``` \nBased on the content between backticks create comprehensive bulleted notes.[/INST]
+ **Bulleted Note Creation Guidelines**
+
+ **Headings**:
+ - Based on referenced topics, not categories like quotes or terms
+ - Surrounded by **bold** formatting
+ - Not listed as bullet points
+ - No space between headings and list items underneath
+
+ **Emphasis**:
+ - **Important terms** set in bold font
+ - **Text ending in a colon**: also bolded
+
+ **Review**:
+ - Ensure adherence to specified format
+ - Do not reference these instructions in your response.[INST] {{ .Prompt }} [/INST]
+ """)
+
def gradio_download_youtube_video(url):
try:
@@ -790,10 +809,32 @@ def process_videos_with_error_handling(inputs, start_time, end_time, diarize, wh
'language': chunk_language
} if chunking_options_checkbox else None
+ if custom_prompt_checkbox:
+ custom_prompt = custom_prompt
+ else:
+ custom_prompt = ("""
+ You are a bulleted notes specialist. [INST]```When creating comprehensive bulleted notes, you should follow these guidelines: Use multiple headings based on the referenced topics, not categories like quotes or terms. Headings should be surrounded by bold formatting and not be listed as bullet points themselves. Leave no space between headings and their corresponding list items underneath. Important terms within the content should be emphasized by setting them in bold font. Any text that ends with a colon should also be bolded. Before submitting your response, review the instructions, and make any corrections necessary to adhered to the specified format. Do not reference these instructions within the notes.``` \nBased on the content between backticks create comprehensive bulleted notes.[/INST]
+ **Bulleted Note Creation Guidelines**
+
+ **Headings**:
+ - Based on referenced topics, not categories like quotes or terms
+ - Surrounded by **bold** formatting
+ - Not listed as bullet points
+ - No space between headings and list items underneath
+
+ **Emphasis**:
+ - **Important terms** set in bold font
+ - **Text ending in a colon**: also bolded
+
+ **Review**:
+ - Ensure adherence to specified format
+ - Do not reference these instructions in your response.[INST] {{ .Prompt }} [/INST]
+ """)
+
logging.debug("Gradio_Related.py: process_url_with_metadata being called")
result = process_url_with_metadata(
input_item, 2, whisper_model,
- custom_prompt if custom_prompt_checkbox else None,
+ custom_prompt,
start_seconds, api_name, api_key,
False, False, False, False, 0.01, None, keywords, None, diarize,
end_time=end_seconds,
@@ -1212,8 +1253,8 @@ def create_audio_processing_tab():
with gr.Row():
custom_prompt_checkbox = gr.Checkbox(label="Use a Custom Prompt",
- value=False,
- visible=True)
+ visible=True,
+ value=False)
preset_prompt_checkbox = gr.Checkbox(label="Use a pre-set Prompt",
value=False,
visible=True)
@@ -1223,7 +1264,10 @@ def create_audio_processing_tab():
custom_prompt_input = gr.Textbox(label="Custom Prompt",
placeholder="Enter custom prompt here",
lines=3,
- visible=False)
+ visible=False,
+ value=custom_prompt_summarize_bulleted_notes
+ )
+
custom_prompt_checkbox.change(
fn=lambda x: gr.update(visible=x),
inputs=[custom_prompt_checkbox],
@@ -1311,7 +1355,8 @@ def create_podcast_tab():
podcast_custom_prompt_input = gr.Textbox(label="Custom Prompt",
placeholder="Enter custom prompt here",
lines=3,
- visible=False)
+ visible=False,
+ value=custom_prompt_summarize_bulleted_notes)
podcast_custom_prompt_checkbox.change(
fn=lambda x: gr.update(visible=x),
inputs=[podcast_custom_prompt_checkbox],
@@ -1413,7 +1458,8 @@ def create_website_scraping_tab():
website_custom_prompt_input = gr.Textbox(label="Custom Prompt",
placeholder="Enter custom prompt here",
lines=3,
- visible=False)
+ visible=False,
+ value=custom_prompt_summarize_bulleted_notes)
website_custom_prompt_checkbox.change(
fn=lambda x: gr.update(visible=x),
inputs=[website_custom_prompt_checkbox],
@@ -1470,7 +1516,8 @@ def create_pdf_ingestion_tab():
pdf_custom_prompt_input = gr.Textbox(label="Custom Prompt",
placeholder="Enter custom prompt here",
lines=3,
- visible=False)
+ visible=False,
+ value=custom_prompt_summarize_bulleted_notes)
pdf_custom_prompt_checkbox.change(
fn=lambda x: gr.update(visible=x),
inputs=[pdf_custom_prompt_checkbox],
@@ -1541,7 +1588,8 @@ def create_resummary_tab():
custom_prompt_input = gr.Textbox(label="Custom Prompt",
placeholder="Enter custom prompt here",
lines=3,
- visible=False)
+ visible=False,
+ value=custom_prompt_summarize_bulleted_notes)
preset_prompt.change(update_user_prompt, inputs=preset_prompt, outputs=custom_prompt_input)
resummarize_button = gr.Button("Re-Summarize")
@@ -2358,7 +2406,8 @@ def create_llamafile_advanced_inputs():
# FIXME - not adding content from selected item to query
-def chat(message, history, media_content, selected_parts, api_endpoint, api_key, prompt, temperature):
+def chat(message, history, media_content, selected_parts, api_endpoint, api_key, prompt, temperature,
+ system_message=None):
try:
logging.info(f"Debug - Chat Function - Message: {message}")
logging.info(f"Debug - Chat Function - Media Content: {media_content}")
@@ -2385,6 +2434,9 @@ def chat(message, history, media_content, selected_parts, api_endpoint, api_key,
# Print first 500 chars
logging.info(f"Debug - Chat Function - Input Data: {input_data[:500]}...")
+ if system_message:
+ print(f"System message: {system_message}")
+ logging.debug(f"Debug - Chat Function - System Message: {system_message}")
temperature = float(temperature) if temperature else 0.7
temp = temperature
@@ -2395,31 +2447,31 @@ def chat(message, history, media_content, selected_parts, api_endpoint, api_key,
# Use the existing API request code based on the selected endpoint
logging.info(f"Debug - Chat Function - API Endpoint: {api_endpoint}")
if api_endpoint.lower() == 'openai':
- response = summarize_with_openai(api_key, input_data, prompt, temp)
+ response = summarize_with_openai(api_key, input_data, prompt, temp, system_message)
elif api_endpoint.lower() == "anthropic":
- response = summarize_with_anthropic(api_key, input_data, prompt, temp)
+ response = summarize_with_anthropic(api_key, input_data, prompt, temp, system_message)
elif api_endpoint.lower() == "cohere":
- response = summarize_with_cohere(api_key, input_data, prompt, temp)
+ response = summarize_with_cohere(api_key, input_data, prompt, temp, system_message)
elif api_endpoint.lower() == "groq":
- response = summarize_with_groq(api_key, input_data, prompt, temp)
+ response = summarize_with_groq(api_key, input_data, prompt, temp, system_message)
elif api_endpoint.lower() == "openrouter":
- response = summarize_with_openrouter(api_key, input_data, prompt, temp)
+ response = summarize_with_openrouter(api_key, input_data, prompt, temp, system_message)
elif api_endpoint.lower() == "deepseek":
- response = summarize_with_deepseek(api_key, input_data, prompt, temp)
+ response = summarize_with_deepseek(api_key, input_data, prompt, temp, system_message)
elif api_endpoint.lower() == "llama.cpp":
- response = summarize_with_llama(input_data, prompt, temp)
+ response = summarize_with_llama(input_data, prompt, temp, system_message)
elif api_endpoint.lower() == "kobold":
- response = summarize_with_kobold(input_data, api_key, prompt, temp)
+ response = summarize_with_kobold(input_data, api_key, prompt, temp, system_message)
elif api_endpoint.lower() == "ooba":
- response = summarize_with_oobabooga(input_data, api_key, prompt, temp)
+ response = summarize_with_oobabooga(input_data, api_key, prompt, temp, system_message)
elif api_endpoint.lower() == "tabbyapi":
- response = summarize_with_tabbyapi(input_data, prompt, temp)
+ response = summarize_with_tabbyapi(input_data, prompt, temp, system_message)
elif api_endpoint.lower() == "vllm":
- response = summarize_with_vllm(input_data, prompt, temp)
+ response = summarize_with_vllm(input_data, prompt, temp, system_message)
elif api_endpoint.lower() == "local-llm":
- response = summarize_with_local_llm(input_data, prompt, temp)
+ response = summarize_with_local_llm(input_data, prompt, temp, system_message)
elif api_endpoint.lower() == "huggingface":
- response = summarize_with_huggingface(api_key, input_data, prompt, temp)
+ response = summarize_with_huggingface(api_key, input_data, prompt, temp, system_message)
else:
raise ValueError(f"Unsupported API endpoint: {api_endpoint}")
diff --git a/App_Function_Libraries/Summarization_General_Lib.py b/App_Function_Libraries/Summarization_General_Lib.py
index 42b1d70cd..586c9feb0 100644
--- a/App_Function_Libraries/Summarization_General_Lib.py
+++ b/App_Function_Libraries/Summarization_General_Lib.py
@@ -63,7 +63,7 @@ def extract_text_from_segments(segments):
return text.strip()
-def summarize_with_openai(api_key, input_data, custom_prompt_arg, temp=None):
+def summarize_with_openai(api_key, input_data, custom_prompt_arg, temp=None, system_message=None):
loaded_config_data = load_and_log_configs()
try:
@@ -143,7 +143,7 @@ def summarize_with_openai(api_key, input_data, custom_prompt_arg, temp=None):
data = {
"model": openai_model,
"messages": [
- {"role": "system", "content": "You are a professional summarizer."},
+ {"role": "system", "content": system_message},
{"role": "user", "content": openai_prompt}
],
"max_tokens": 4096,
@@ -314,7 +314,7 @@ def summarize_with_anthropic(api_key, input_data, custom_prompt_arg, temp=None,
# Summarize with Cohere
-def summarize_with_cohere(api_key, input_data, custom_prompt_arg, temp=None):
+def summarize_with_cohere(api_key, input_data, custom_prompt_arg, temp=None, system_message=None):
logging.debug("Cohere: Summarization process starting...")
try:
logging.debug("Cohere: Loading and validating configurations")
@@ -341,6 +341,11 @@ def summarize_with_cohere(api_key, input_data, custom_prompt_arg, temp=None):
# You might want to raise an exception here or handle this case as appropriate for your application
# For example: raise ValueError("No valid Anthropic API key available")
+ if custom_prompt_arg is None:
+ custom_prompt_arg = ""
+
+ if system_message is None:
+ system_message = ""
logging.debug(f"Cohere: Using API Key: {cohere_api_key[:5]}...{cohere_api_key[-5:]}")
@@ -389,7 +394,7 @@ def summarize_with_cohere(api_key, input_data, custom_prompt_arg, temp=None):
"chat_history": [
{"role": "USER", "message": cohere_prompt}
],
- "message": "Please provide a summary.",
+ "message": system_message,
"model": cohere_model,
# "connectors": [{"id": "web-search"}],
"temperature": temp
@@ -420,7 +425,7 @@ def summarize_with_cohere(api_key, input_data, custom_prompt_arg, temp=None):
# https://console.groq.com/docs/quickstart
-def summarize_with_groq(api_key, input_data, custom_prompt_arg, temp=None):
+def summarize_with_groq(api_key, input_data, custom_prompt_arg, temp=None, system_message=None):
logging.debug("Groq: Summarization process starting...")
try:
logging.debug("Groq: Loading and validating configurations")
@@ -494,6 +499,10 @@ def summarize_with_groq(api_key, input_data, custom_prompt_arg, temp=None):
data = {
"messages": [
+ {
+ "role": "system",
+ "content": system_message,
+ },
{
"role": "user",
"content": groq_prompt,
@@ -528,7 +537,7 @@ def summarize_with_groq(api_key, input_data, custom_prompt_arg, temp=None):
return f"groq: Error occurred while processing summary with groq: {str(e)}"
-def summarize_with_openrouter(api_key, input_data, custom_prompt_arg, temp=None):
+def summarize_with_openrouter(api_key, input_data, custom_prompt_arg, temp=None, system_message=None):
import requests
import json
global openrouter_model, openrouter_api_key
@@ -612,6 +621,7 @@ def summarize_with_openrouter(api_key, input_data, custom_prompt_arg, temp=None)
data=json.dumps({
"model": openrouter_model,
"messages": [
+ {"role": "system", "content": system_message},
{"role": "user", "content": openrouter_prompt}
],
"temperature": temp
@@ -729,7 +739,7 @@ def summarize_with_huggingface(api_key, input_data, custom_prompt_arg, temp=None
return None
-def summarize_with_deepseek(api_key, input_data, custom_prompt_arg, temp=None):
+def summarize_with_deepseek(api_key, input_data, custom_prompt_arg, temp=None, system_message=None):
logging.debug("DeepSeek: Summarization process starting...")
try:
logging.debug("DeepSeek: Loading and validating configurations")
@@ -804,7 +814,7 @@ def summarize_with_deepseek(api_key, input_data, custom_prompt_arg, temp=None):
data = {
"model": deepseek_model,
"messages": [
- {"role": "system", "content": "You are a professional summarizer."},
+ {"role": "system", "content": system_message},
{"role": "user", "content": deepseek_prompt}
],
"stream": False,
@@ -983,34 +993,32 @@ def update_progress(index, url, message):
for index, url in enumerate(url_list):
try:
logging.info(f"Starting to process video {index + 1}/{len(url_list)}: {url}")
- transcription, summary, json_file_path, summary_file_path, _, _ = process_url(
- url=url,
- num_speakers=num_speakers,
- whisper_model=whisper_model,
- custom_prompt_input=custom_prompt_input,
- offset=offset,
- api_name=api_name,
- api_key=api_key,
- vad_filter=vad_filter,
- download_video_flag=download_video_flag,
- download_audio=download_audio,
- rolling_summarization=rolling_summarization,
- detail_level=detail_level,
- question_box=question_box,
- keywords=keywords,
- chunk_text_by_words=chunk_text_by_words,
- max_words=max_words,
- chunk_text_by_sentences=chunk_text_by_sentences,
- max_sentences=max_sentences,
- chunk_text_by_paragraphs=chunk_text_by_paragraphs,
- max_paragraphs=max_paragraphs,
- chunk_text_by_tokens=chunk_text_by_tokens,
- max_tokens=max_tokens,
- chunk_by_semantic=chunk_by_semantic,
- semantic_chunk_size=semantic_chunk_size,
- semantic_chunk_overlap=semantic_chunk_overlap,
- recursive_summarization=recursive_summarization
- )
+ transcription, summary, json_file_path, summary_file_path, _, _ = process_url(url=url,
+ num_speakers=num_speakers,
+ whisper_model=whisper_model,
+ custom_prompt_input=custom_prompt_input,
+ offset=offset,
+ api_name=api_name,
+ api_key=api_key,
+ vad_filter=vad_filter,
+ download_video_flag=download_video_flag,
+ download_audio=download_audio,
+ rolling_summarization=rolling_summarization,
+ detail_level=detail_level,
+ question_box=question_box,
+ keywords=keywords,
+ chunk_text_by_words=chunk_text_by_words,
+ max_words=max_words,
+ chunk_text_by_sentences=chunk_text_by_sentences,
+ max_sentences=max_sentences,
+ chunk_text_by_paragraphs=chunk_text_by_paragraphs,
+ max_paragraphs=max_paragraphs,
+ chunk_text_by_tokens=chunk_text_by_tokens,
+ max_tokens=max_tokens,
+ chunk_by_semantic=chunk_by_semantic,
+ semantic_chunk_size=semantic_chunk_size,
+ semantic_chunk_overlap=semantic_chunk_overlap,
+ recursive_summarization=recursive_summarization)
# Update progress and transcription properly
current_progress, current_status = update_progress(index, url, "Video processed and ingested into the database.")
@@ -1120,37 +1128,37 @@ def save_transcription_and_summary(transcription_text, summary_text, download_pa
return None, None
-def summarize_chunk(api_name, text, custom_prompt_input, api_key, temp=None):
+def summarize_chunk(api_name, text, custom_prompt_input, api_key, temp=None, system_message=None):
logging.debug("Entered 'summarize_chunk' function")
try:
if api_name.lower() == 'openai':
- return summarize_with_openai(api_key, text, custom_prompt_input, temp)
+ return summarize_with_openai(api_key, text, custom_prompt_input, temp, system_message)
elif api_name.lower() == "anthropic":
- return summarize_with_anthropic(api_key, text, custom_prompt_input, temp)
+ return summarize_with_anthropic(api_key, text, custom_prompt_input, temp, system_message)
elif api_name.lower() == "cohere":
- return summarize_with_cohere(api_key, text, custom_prompt_input, temp)
+ return summarize_with_cohere(api_key, text, custom_prompt_input, temp, system_message)
elif api_name.lower() == "groq":
- return summarize_with_groq(api_key, text, custom_prompt_input, temp)
+ return summarize_with_groq(api_key, text, custom_prompt_input, temp, system_message)
elif api_name.lower() == "openrouter":
- return summarize_with_openrouter(api_key, text, custom_prompt_input, temp)
+ return summarize_with_openrouter(api_key, text, custom_prompt_input, temp, system_message)
elif api_name.lower() == "deepseek":
- return summarize_with_deepseek(api_key, text, custom_prompt_input, temp)
+ return summarize_with_deepseek(api_key, text, custom_prompt_input, temp, system_message)
elif api_name.lower() == "mistral":
- return summarize_with_mistral(api_key, text, custom_prompt_input, temp)
+ return summarize_with_mistral(api_key, text, custom_prompt_input, temp, system_message)
elif api_name.lower() == "llama.cpp":
- return summarize_with_llama(text, custom_prompt_input, temp)
+ return summarize_with_llama(text, custom_prompt_input, temp, system_message)
elif api_name.lower() == "kobold":
- return summarize_with_kobold(text, api_key, custom_prompt_input, temp)
+ return summarize_with_kobold(text, api_key, custom_prompt_input, temp, system_message)
elif api_name.lower() == "ooba":
- return summarize_with_oobabooga(text, api_key, custom_prompt_input, temp)
+ return summarize_with_oobabooga(text, api_key, custom_prompt_input, temp, system_message)
elif api_name.lower() == "tabbyapi":
- return summarize_with_tabbyapi(text, custom_prompt_input, temp)
+ return summarize_with_tabbyapi(text, custom_prompt_input, temp, system_message)
elif api_name.lower() == "vllm":
- return summarize_with_vllm(text, custom_prompt_input, temp)
+ return summarize_with_vllm(text, custom_prompt_input, temp, system_message)
elif api_name.lower() == "local-llm":
- return summarize_with_local_llm(text, custom_prompt_input, temp)
+ return summarize_with_local_llm(text, custom_prompt_input, temp, system_message)
elif api_name.lower() == "huggingface":
- return summarize_with_huggingface(api_key, text, custom_prompt_input, temp)
+ return summarize_with_huggingface(api_key, text, custom_prompt_input, temp, system_message)
else:
logging.warning(f"Unsupported API: {api_name}")
return None
@@ -1341,8 +1349,9 @@ def process_url(
semantic_chunk_overlap,
local_file_path=None,
diarize=False,
- recursive_summarization=False
-):
+ recursive_summarization=False,
+ temp=None,
+ system_message=None):
# Handle the chunk summarization options
set_chunk_txt_by_words = chunk_text_by_words
set_max_txt_chunk_words = max_words
@@ -1359,30 +1368,10 @@ def process_url(
progress = []
success_message = "All videos processed successfully. Transcriptions and summaries have been ingested into the database."
- if custom_prompt_input is None:
- custom_prompt_input = """
- You are a bulleted notes specialist. ```When creating comprehensive bulleted notes, you should follow these guidelines: Use multiple headings based on the referenced topics, not categories like quotes or terms. Headings should be surrounded by bold formatting and not be listed as bullet points themselves. Leave no space between headings and their corresponding list items underneath. Important terms within the content should be emphasized by setting them in bold font. Any text that ends with a colon should also be bolded. Before submitting your response, review the instructions, and make any corrections necessary to adhered to the specified format. Do not reference these instructions within the notes.``` \nBased on the content between backticks create comprehensive bulleted notes.
- **Bulleted Note Creation Guidelines**
-
- **Headings**:
- - Based on referenced topics, not categories like quotes or terms
- - Surrounded by **bold** formatting
- - Not listed as bullet points
- - No space between headings and list items underneath
-
- **Emphasis**:
- - **Important terms** set in bold font
- - **Text ending in a colon**: also bolded
-
- **Review**:
- - Ensure adherence to specified format
- - Do not reference these instructions in your response.[INST] {{ .Prompt }} [/INST]"""
-
# Validate input
if not url and not local_file_path:
return "Process_URL: No URL provided.", "No URL provided.", None, None, None, None, None, None
- # FIXME - Chatgpt again?
if isinstance(url, str):
urls = url.strip().split('\n')
if len(urls) > 1:
@@ -1477,25 +1466,25 @@ def process_url(
if api_name == "anthropic":
summary = summarize_with_anthropic(api_key, chunk, custom_prompt_input)
elif api_name == "cohere":
- summary = summarize_with_cohere(api_key, chunk, custom_prompt_input)
+ summary = summarize_with_cohere(api_key, chunk, custom_prompt_input, temp, system_message)
elif api_name == "openai":
- summary = summarize_with_openai(api_key, chunk, custom_prompt_input)
+ summary = summarize_with_openai(api_key, chunk, custom_prompt_input, temp, system_message)
elif api_name == "Groq":
- summary = summarize_with_groq(api_key, chunk, custom_prompt_input)
+ summary = summarize_with_groq(api_key, chunk, custom_prompt_input, temp, system_message)
elif api_name == "DeepSeek":
- summary = summarize_with_deepseek(api_key, chunk, custom_prompt_input)
+ summary = summarize_with_deepseek(api_key, chunk, custom_prompt_input, temp, system_message)
elif api_name == "OpenRouter":
- summary = summarize_with_openrouter(api_key, chunk, custom_prompt_input)
+ summary = summarize_with_openrouter(api_key, chunk, custom_prompt_input, temp, system_message)
elif api_name == "Llama.cpp":
- summary = summarize_with_llama(chunk, custom_prompt_input)
+ summary = summarize_with_llama(chunk, custom_prompt_input, temp, system_message)
elif api_name == "Kobold":
- summary = summarize_with_kobold(chunk, custom_prompt_input)
+ summary = summarize_with_kobold(chunk, custom_prompt_input, temp, system_message)
elif api_name == "Ooba":
- summary = summarize_with_oobabooga(chunk, custom_prompt_input)
+ summary = summarize_with_oobabooga(chunk, custom_prompt_input, temp, system_message)
elif api_name == "Tabbyapi":
- summary = summarize_with_tabbyapi(chunk, custom_prompt_input)
+ summary = summarize_with_tabbyapi(chunk, custom_prompt_input, temp, system_message)
elif api_name == "VLLM":
- summary = summarize_with_vllm(chunk, custom_prompt_input)
+ summary = summarize_with_vllm(chunk, custom_prompt_input, temp, system_message)
summarized_chunk_transcriptions.append(summary)
# Combine chunked transcriptions into a single file
diff --git a/summarize.py b/summarize.py
index 6db67aa2f..e0a7eb0de 100644
--- a/summarize.py
+++ b/summarize.py
@@ -42,11 +42,26 @@
#
#############
# Global variables setup
-custom_prompt_input = ("Above is the transcript of a video. Please read through the transcript carefully. Identify the "
-"main topics that are discussed over the course of the transcript. Then, summarize the key points about each main "
-"topic in bullet points. The bullet points should cover the key information conveyed about each topic in the video, "
-"but should be much shorter than the full transcript. Please output your bullet point summary inside "
-"tags.")
+# FIXME
+custom_prompt_summarize_bulleted_notes = ("""
+ You are a bulleted notes specialist. [INST]```When creating comprehensive bulleted notes, you should follow these guidelines: Use multiple headings based on the referenced topics, not categories like quotes or terms. Headings should be surrounded by bold formatting and not be listed as bullet points themselves. Leave no space between headings and their corresponding list items underneath. Important terms within the content should be emphasized by setting them in bold font. Any text that ends with a colon should also be bolded. Before submitting your response, review the instructions, and make any corrections necessary to adhered to the specified format. Do not reference these instructions within the notes.``` \nBased on the content between backticks create comprehensive bulleted notes.[/INST]
+ **Bulleted Note Creation Guidelines**
+
+ **Headings**:
+ - Based on referenced topics, not categories like quotes or terms
+ - Surrounded by **bold** formatting
+ - Not listed as bullet points
+ - No space between headings and list items underneath
+
+ **Emphasis**:
+ - **Important terms** set in bold font
+ - **Text ending in a colon**: also bolded
+
+ **Review**:
+ - Ensure adherence to specified format
+ - Do not reference these instructions in your response.[INST] {{ .Prompt }} [/INST]
+ """)
+
#
# Global variables
whisper_models = ["small", "medium", "small.en", "medium.en", "medium", "large", "large-v1", "large-v2", "large-v3",
@@ -429,8 +444,8 @@ def main(input_path, api_name=None, api_key=None,
chunk_overlap=100,
chunk_unit='tokens',
summarize_chunks=None,
- diarize=False
- ):
+ diarize=False,
+ system_message=None):
global detail_level_number, summary, audio_file, transcription_text, info_dict
detail_level = detail
@@ -529,15 +544,15 @@ def main(input_path, api_name=None, api_key=None,
if summarize_chunks:
summary = None
if summarize_chunks == 'openai':
- summary = summarize_with_openai(api_key, chunk_text, custom_prompt)
+ summary = summarize_with_openai(api_key, chunk_text, custom_prompt, system_message)
elif summarize_chunks == 'anthropic':
- summary = summarize_with_anthropic(api_key, chunk_text, custom_prompt)
+ summary = summarize_with_anthropic(api_key, chunk_text, custom_prompt, system_message)
elif summarize_chunks == 'cohere':
- summary = summarize_with_cohere(api_key, chunk_text, custom_prompt)
+ summary = summarize_with_cohere(api_key, chunk_text, custom_prompt, system_message)
elif summarize_chunks == 'groq':
- summary = summarize_with_groq(api_key, chunk_text, custom_prompt)
+ summary = summarize_with_groq(api_key, chunk_text, custom_prompt, system_message)
elif summarize_chunks == 'local-llm':
- summary = summarize_with_local_llm(chunk_text, custom_prompt)
+ summary = summarize_with_local_llm(chunk_text, custom_prompt, system_message)
# FIXME - Add more summarization methods as needed
if summary:
@@ -766,6 +781,7 @@ def signal_handler(sig, frame):
set_max_txt_chunk_paragraphs = 0
set_chunk_txt_by_tokens = False
set_max_txt_chunk_tokens = 0
+ custom_prompt_input = args.custom_prompt
if args.server_mode:
server_mode = args.server_mode