sakthi07 commited on
Commit
9935eea
·
verified ·
1 Parent(s): 1902e2d

Upload streamlit_app.py

Browse files

modified streamlit related code

Files changed (1) hide show
  1. streamlit_app.py +98 -0
streamlit_app.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import os
3
+
4
+
5
+ # Make Streamlit write config locally (avoids PermissionError in Spaces)
6
+ os.environ["STREAMLIT_BROWSER_GATHER_USAGE_STATS"] = "false" # disable usage stats
7
+ os.environ["STREAMLIT_CONFIG_DIR"] = os.getcwd() # store Streamlit configs locally
8
+
9
+ # os.environ["STREAMLIT_CONFIG_DIR"] = ".streamlit"
10
+ os.environ["STREAMLIT_LOG_FOLDER"] = ".streamlit"
11
+ os.environ["STREAMLIT_BROWSER_GATHER_USAGE_STATS"] = "false"
12
+
13
+
14
+ import streamlit as st
15
+ # from dotenv import load_dotenv
16
+ from langchain.chains import ConversationalRetrievalChain
17
+ from langchain.memory import ConversationBufferMemory
18
+ from langchain_community.vectorstores import FAISS
19
+ from langchain.embeddings.openai import OpenAIEmbeddings
20
+ from langchain.chat_models import ChatOpenAI
21
+
22
+ # ------------------ Load environment variables ------------------
23
+ # load_dotenv()
24
+ OPENAI_API_KEY = os.environ.getenv("OPENAI_API_KEY")
25
+
26
+ # ------------------ Paths ------------------
27
+ VECTORSTORE_PATH = os.path.join("storage", "faiss_index") # folder containing index.faiss and index.pkl
28
+
29
+ # ------------------ Load vectorstore ------------------
30
+ @st.cache_resource
31
+ def load_vectorstore(path):
32
+ if not os.path.exists(path):
33
+ st.error(f"FAISS index not found at {path}. Please run ingest.py first.")
34
+ return None
35
+ embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
36
+ vectorstore = FAISS.load_local(
37
+ path,
38
+ embeddings,
39
+ allow_dangerous_deserialization=True
40
+ )
41
+ return vectorstore
42
+
43
+ vectorstore = load_vectorstore(VECTORSTORE_PATH)
44
+ if vectorstore is None:
45
+ st.stop()
46
+
47
+ # ------------------ Load LLM ------------------
48
+ @st.cache_resource
49
+ def load_llm():
50
+ llm = ChatOpenAI(
51
+ model_name="gpt-3.5-turbo",
52
+ temperature=0,
53
+ openai_api_key=OPENAI_API_KEY
54
+ )
55
+ return llm
56
+
57
+ llm = load_llm()
58
+
59
+ # ------------------ Memory ------------------
60
+ memory = ConversationBufferMemory(
61
+ memory_key="chat_history",
62
+ return_messages=True
63
+ )
64
+
65
+ # ------------------ Conversational Retrieval Chain ------------------
66
+ qa_chain = ConversationalRetrievalChain.from_llm(
67
+ llm=llm,
68
+ retriever=vectorstore.as_retriever(search_kwargs={"k": 3}),
69
+ memory=memory,
70
+ output_key="answer"
71
+ )
72
+
73
+ # ------------------ Streamlit UI ------------------
74
+ st.title("💉 Diabetes Chatbot")
75
+ st.write("Chat with the bot about diabetes. It remembers your questions during this session!")
76
+
77
+ # Initialize chat history
78
+ if "chat_history" not in st.session_state:
79
+ st.session_state["chat_history"] = []
80
+
81
+ # ------------------ Chat Interface ------------------
82
+ user_input = st.chat_input("Type your question here...")
83
+
84
+ if user_input:
85
+ # Display user message instantly
86
+ st.session_state["chat_history"].append((user_input, None))
87
+
88
+ # Run QA chain and generate answer
89
+ with st.spinner("Bot is thinking..."):
90
+ result = qa_chain({"question": user_input, "chat_history": st.session_state["chat_history"]})
91
+ answer = result["answer"]
92
+ # Update the last user message with the bot response
93
+ st.session_state["chat_history"][-1] = (user_input, answer)
94
+
95
+ # Display chat history using Streamlit chat messages
96
+ for q, a in st.session_state["chat_history"]:
97
+ st.chat_message("user").write(q)
98
+ st.chat_message("assistant").write(a)