-
Notifications
You must be signed in to change notification settings - Fork 0
/
98.py
94 lines (79 loc) · 3.27 KB
/
98.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import time
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
import os
import streamlit as st
import tempfile
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
st.sidebar.markdown("""# How to use
1. Enter your [OpenAI API key](https://platform.openai.com/account/api-keys) below🔑
2. Run the provided code in your Python environment.
3. Fill in the "OpenAI API Key" field with your API key.
4. Enter your message in the chat input.
5. Wait for the response from Elon Musk.
6. View the generated response in the chat interface.""")
openai_api_key = st.sidebar.text_input(
"OpenAI API Key",
placeholder="sk-...",
value=os.getenv("OPENAI_API_KEY", ""),
type="password",
)
st.sidebar.markdown("# About")
st.sidebar.markdown("Elon 2.0: Be Like Musk! is an innovative app that allows you to experience the brilliance of Elon Musk. With this app, you can explore his mindset, strategies, and achievements, and learn how to emulate his success in your own life.")
st.title('Elon 2.0: Be Like Musk!')
# Get message from the user
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if message := st.chat_input("Enter the message"):
with st.chat_message("user"):
st.markdown(message)
st.session_state.messages.append({"role": "user", "content": message})
# Generate a response as if it's coming from Elon Musk
msgs = StreamlitChatMessageHistory()
def elon_musk_response(message):
prompt = PromptTemplate(
input_variables=['chat_history', 'message'], template='''You are Elon Musk. Craft a response based on the given message.
{chat_history}
Message: {message}
Elon Musk:'''
)
memory = ConversationBufferMemory(
memory_key="chat_history", input_key="message", chat_memory=msgs, return_messages=True)
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k",
openai_api_key=openai_api_key, temperature=0.7)
chat_llm_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=False,
memory=memory
)
return chat_llm_chain.run(message=message)
if not openai_api_key.startswith('sk-'):
st.warning('Please enter your OpenAI API key!', icon='⚠')
elon_response = ""
elif message:
with st.spinner('DemoGPT is working on it. It takes less than 10 seconds...'):
elon_response = elon_musk_response(message)
else:
elon_response = ""
# Display the generated response to the user with chat interface
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# Simulate stream of response with milliseconds delay
for chunk in elon_response.split():
full_response += chunk + " "
time.sleep(0.05)
# Add a blinking cursor to simulate typing
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
# Add assistant response to chat history
if full_response:
st.session_state.messages.append(
{"role": "assistant", "content": full_response})