priceofdev commited on
Commit
7a0e422
·
1 Parent(s): a8b05cf

version upgrade

Browse files
Files changed (2) hide show
  1. app.py +131 -0
  2. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pinecone
3
+ from langchain.llms import OpenAI
4
+ from langchain.prompts import PromptTemplate
5
+ from langchain.vectorstores import Pinecone
6
+ from langchain.embeddings.openai import OpenAIEmbeddings
7
+ from langchain.chains.question_answering import load_qa_chain
8
+ from langchain.memory import ConversationBufferMemory, CombinedMemory, ConversationKGMemory
9
+
10
+ if "generated" not in st.session_state:
11
+ st.session_state["generated"] = []
12
+ if "past" not in st.session_state:
13
+ st.session_state["past"] = []
14
+ if "input" not in st.session_state:
15
+ st.session_state["input"] = ""
16
+ if "stored_session" not in st.session_state:
17
+ st.session_state["stored_session"] = []
18
+
19
+ # Define function to get user input
20
+ def get_text():
21
+ """
22
+ Get user input text.
23
+ Returns:
24
+ str: The text entered by the user.
25
+ """
26
+ input_text = st.text_input("You:", st.session_state["input"], key="input",
27
+ placeholder="Enter your message here...", label_visibility='hidden')
28
+ return input_text
29
+
30
+ def reset_entity_memory():
31
+ """
32
+ Resets the entity memory to its initial state.
33
+ """
34
+ st.session_state["entity_memory"] = CombinedMemory(memories=[KG, CBM])
35
+
36
+ def new_chat():
37
+ """
38
+ Clears session state and starts a new chat.
39
+ """
40
+ save = []
41
+ for i in range(len(st.session_state['generated'])-1, -1, -1):
42
+ save.append("User:" + st.session_state["past"][i])
43
+ save.append("Bot:" + st.session_state["generated"][i]['output_text'])
44
+ st.session_state["stored_session"].append(save)
45
+ st.session_state["generated"] = []
46
+ st.session_state["past"] = []
47
+ st.session_state["input"] = ""
48
+ if "entity_memory" in st.session_state:
49
+ reset_entity_memory()
50
+
51
+ # Set up the Streamlit app layout
52
+ st.title("ChatBot with Pinecone")
53
+ st.markdown(
54
+ '''
55
+ > :black[**A Chat Bot that queries your own corpus in Pinecone.**]
56
+ ''')
57
+ # st.markdown(" > Powered by - 🦜 LangChain + OpenAI + Streamlit")
58
+
59
+ openai_api = st.sidebar.text_input("OpenAI API Key", type="password")
60
+ pinecone_api = st.sidebar.text_input("Pinecone API Key", type="password")
61
+ pinecone_env = st.sidebar.text_input("Pinecone Environment")
62
+ pinecone_index = st.sidebar.text_input("Pinecone Index")
63
+ MODEL = st.sidebar.selectbox("Model", ["gpt-4","gpt-3.5-turbo", "text-davinci-003"])
64
+
65
+ if openai_api and pinecone_api and pinecone_env and pinecone_index:
66
+
67
+ # Create Pinecone Instance
68
+ pinecone.init(api_key=pinecone_api, environment=pinecone_env)
69
+
70
+ # Create OpenAI Instance
71
+ llm = OpenAI(
72
+ temperature=0,
73
+ openai_api_key=openai_api,
74
+ model_name=MODEL,
75
+ max_tokens=2500,
76
+ )
77
+
78
+ # Create a ConversationEntityMemory object if not already created
79
+ if 'entity_memory' not in st.session_state:
80
+ KG = ConversationKGMemory(llm=llm, input_key="human_input")
81
+ CBM = ConversationBufferMemory(memory_key="chat_history", input_key="human_input")
82
+ st.session_state["entity_memory"] = CombinedMemory(memories=[KG, CBM])
83
+
84
+
85
+ # Set Template
86
+ # template = """You are a chatbot having a conversation with a human.
87
+ # Given the following extracted parts of a long document and a question, create a final answer.
88
+ # {context}
89
+ # Human: {human_input}
90
+ # Chatbot:"""
91
+
92
+ template = """As an advanced AI chatbot, you are engaged in a detailed conversation with a human user. You have access to a vast knowledge base stored in Pinecone's vector database. Your task is to leverage this information to provide comprehensive, accurate, and helpful responses. Given the following segments extracted from an extensive document and a user's question, analyze the context, draw specific information from the Pinecone memory, and formulate a well-rounded answer that not only addresses the query but also provides additional relevant information that could be beneficial to the user.
93
+ Context: {context}
94
+ Human: {human_input}
95
+ Your Response as Chatbot:"""
96
+
97
+ # Set the Prompt
98
+ prompt = PromptTemplate(
99
+ input_variables=["human_input", "context"],
100
+ template=template
101
+ )
102
+
103
+ # Get Context
104
+ embeddings = OpenAIEmbeddings(openai_api_key=openai_api)
105
+ docsearch = Pinecone.from_existing_index(pinecone_index, embedding=embeddings)
106
+ # Get user input
107
+
108
+ else:
109
+ st.error("Please enter your API keys in the sidebar.")
110
+
111
+ input_text = get_text()
112
+
113
+ if input_text:
114
+ # Fetch docs using user input for cosine similarity
115
+ docs = docsearch.similarity_search(input_text, k=6)
116
+
117
+ # # Get Response
118
+ chain = load_qa_chain(OpenAI(temperature=0, openai_api_key=openai_api), chain_type="stuff", memory=st.session_state["entity_memory"], prompt=prompt, verbose=True)
119
+ # Generate the output using user input and store it in the session state
120
+ output = chain({"input_documents": docs, "human_input": input_text}, return_only_outputs=True)
121
+
122
+ st.session_state.past.append(input_text)
123
+ st.session_state.generated.append(output)
124
+
125
+ with st.expander("Conversation"):
126
+ for i in range(len(st.session_state["generated"])-1, -1, -1):
127
+ st.info(st.session_state["past"][i])
128
+ st.success(st.session_state["generated"][i]['output_text'])
129
+
130
+ # # Create button to start a new chat
131
+ # st.sidebar.button("New Chat", on_click=new_chat, type="primary")
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ langchain==0.0.180
2
+ openai==0.27.7
3
+ tiktoken==0.4.0
4
+ pinecone-client==2.2.2
5
+ pinecone-text==0.5.3
6
+ streamlit==1.22.0
7
+ networkx==3.1