Summary/save conversation using autogen #4055
glarunsingh
started this conversation in
General
Replies: 0 comments
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
-
Objective: I want to ask ask follow up questions to user to understand the process. once I understand the process, I want to save the conversation and summary of it in a variable to pass it to another agent. i figure out to have conversation and when I exit a can't find a way to view the conversation and summary of the process. conversation_history - returns null.
The "user_proxy_agent_result" doesn't have any attribute when I click ctrl+space in vs code. Help me in generating response and save it in a variable to pass it to another agent using langraph.
import os
import json
from langchain_openai import AzureChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from docx import Document as DocxDocument
from PyPDF2 import PdfReader
from dotenv import load_dotenv
from autogen import AssistantAgent, UserProxyAgent
import autogen
-------------------- Load Environment Variables --------------------
load_dotenv()
api_version = os.getenv("AZURE_OPENAI_API_VERSION")
endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
api_key = os.getenv("AZURE_OPENAI_API_KEY")
deployment_name = os.getenv("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME")
llm_model = os.getenv("LLM_MODEL")
if not all([api_version, endpoint, api_key, deployment_name, llm_model]):
raise ValueError("Some environment variables are missing. Check your .env file.")
Configure Autogen LLM
llm_config = {
"timeout": 600,
"config_list": autogen.config_list_from_json("OAI_CONFIG_LIST"),
"temperature": 0,
}
Initialize Autogen Agents
input_assistant = AssistantAgent(
name="input_assistant",
system_message="""You are a helpful AI assistant.
Your task is to analyze the content provided by the user, ask clarifying questions, and collect responses one by one.
Use follow-up questions to understand requirements thoroughly. Type 'TERMINATE' when the data collection is complete.
""",
llm_config=llm_config,
)
user_proxy = UserProxyAgent(
"user_proxy",
human_input_mode="ALWAYS",
llm_config=llm_config,
code_execution_config=False,
system_message="You are a helpful assistant."
)
-------------------- Helper Functions --------------------
def extract_text_from_docx(docx_path):
doc = DocxDocument(docx_path)
return "\n".join([para.text for para in doc.paragraphs])
def extract_text_from_pdf(pdf_path):
reader = PdfReader(pdf_path)
return "\n".join(page.extract_text() for page in reader.pages)
def read_document(file_path):
if not file_path or not os.path.exists(file_path):
return None
-------------------- Main Logic --------------------
def analyze_and_start_autogen_qa(content):
"""Use Autogen to analyze content and initiate Q&A."""
user_input = content
conversation_history = [] # Store the entire conversation
def process_document_or_summary(doc_path=None):
"""Process document or initiate with user-provided summary."""
content = read_document(doc_path)
-------------------- Execution --------------------
if name == "main":
doc_path = input("Enter the document path (or press Enter to skip): ").strip()
process_document_or_summary(doc_path)
Beta Was this translation helpful? Give feedback.
All reactions