Skip to content

Commit

Permalink
Merge pull request #43 from alkem-io/update=prompt
Browse files Browse the repository at this point in the history
Move to full chat prompt
  • Loading branch information
renehonig authored Jan 9, 2024
2 parents c0437c1 + dc9fa8b commit 50beb01
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 18 deletions.
31 changes: 21 additions & 10 deletions ai_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from langchain.vectorstores import FAISS
from langchain_openai import AzureOpenAI
from langchain.prompts.prompt import PromptTemplate
from langchain.prompts import ChatPromptTemplate
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate
from langchain_openai import AzureChatOpenAI
from langchain.schema import StrOutputParser
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
Expand Down Expand Up @@ -75,19 +75,23 @@ def get_language_by_code(language_code):
return language_mapping.get(language_code, 'English')


chat_template = """
You are a friendly, talkative, chatty and warm conversational agent. Use the following step-by-step instructions to respond to user inputs.
chat_system_template = """
You are a friendly and talkative conversational agent, tasked with answering questions about Alkemio.\
Use the following step-by-step instructions to respond to user inputs.
1 - If the question is in a different language than English, translate the question to English before answering.
2 - The text provided in the info delimited by triple pluses may contain questions. Remove those questions from the website.
3 - Provide an up to three paragraghs answer that is engaging, accurate and exthausive, taking into account the info delimited by triple pluses.
If the answer cannot be found within the info, write 'I could not find an answer to your question'.
2 - The text provided in the context delimited by triple pluses is retrieved from the Alkemio website, not part of the conversation with the user.
3 - Provide an answer of 250 words or less that is engaging, accurate and exthausive, taking into account the context delimited by triple pluses.
If the answer cannot be found within the context, write 'Hmm, I am not sure'.
4 - Only return the answer from step 3, do not show any code or additional information.
5 - Answer the question in the {language} language.
+++
Info:
context:
{context}
+++
Question: {question}
REMEMBER: If there is no relevant information within the context, just say "Hmm, I am \
not sure." Don't try to make up an answer. Anything between in preceding context \
is retrieved from the website, not part of the conversation with the user.\
"""

condense_question_template = """"
Expand All @@ -104,7 +108,13 @@ def get_language_by_code(language_code):

condense_question_prompt = PromptTemplate.from_template(condense_question_template)

chat_prompt = ChatPromptTemplate.from_template(chat_template)
chat_prompt = ChatPromptTemplate.from_messages(
[
("system", chat_system_template),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{question}"),
]
)


generic_llm = AzureOpenAI(azure_deployment=os.environ["LLM_DEPLOYMENT_NAME"],
Expand Down Expand Up @@ -167,7 +177,6 @@ def _combine_documents(
doc_strings = [format_document(doc, document_prompt) for doc in docs]
return document_separator.join(doc_strings)


async def query_chain(question, language, chat_history):

# check whether the chat history is empty
Expand All @@ -191,6 +200,7 @@ async def query_chain(question, language, chat_history):
logger.debug(f"loaded memory {loaded_memory}\n")
logger.debug(f"chat history {chat_history}\n")


# Now we calculate the standalone question if the chat_history is not empty
standalone_question = {
"standalone_question": {
Expand Down Expand Up @@ -221,6 +231,7 @@ async def query_chain(question, language, chat_history):
# Now we construct the inputs for the final prompt
final_inputs = {
"context": lambda x: _combine_documents(x["docs"]),
"chat_history" : lambda x: chat_history.buffer,
"question": itemgetter("question"),
"language": lambda x: language['language'],
}
Expand Down
4 changes: 2 additions & 2 deletions guidance_engine.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from langchain.callbacks import get_openai_callback
from langchain.memory import ConversationBufferMemory
from langchain.memory import ConversationBufferWindowMemory
# import pika
import json
import ai_adapter
Expand Down Expand Up @@ -81,7 +81,7 @@ async def query(user_id, query, language_code):

if user_id not in user_data:
user_data[user_id] = {}
user_data[user_id]['chat_history'] = ConversationBufferMemory(return_messages=True, output_key="answer", input_key="question")
user_data[user_id]['chat_history'] = ConversationBufferWindowMemory(k=3, return_messages=True, output_key="answer", input_key="question")
# user_chain[user_id]=ai_utils.setup_chain()
reset(user_id)
# chat_history=[]
Expand Down
12 changes: 6 additions & 6 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit 50beb01

Please sign in to comment.