sakthi07 commited on
Commit
7af8516
·
verified ·
1 Parent(s): fbd8cfb

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -0
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import os
3
+ from dotenv import load_dotenv
4
+ from langchain.chains import ConversationalRetrievalChain
5
+ from langchain.memory import ConversationBufferMemory
6
+ from langchain_community.vectorstores import FAISS
7
+ from langchain_community.embeddings import OpenAIEmbeddings
8
+ from langchain_community.chat_models import ChatOpenAI
9
+ import gradio as gr
10
+
11
+ # ------------------ Load environment variables ------------------
12
+ # load_dotenv()
13
+ OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
14
+
15
+ # ------------------ Paths ------------------
16
+ VECTORSTORE_PATH = os.path.join("storage", "faiss_index") # folder containing index.faiss and index.pkl
17
+
18
+ # ------------------ Load vectorstore ------------------
19
+ def load_vectorstore(path):
20
+ if not os.path.exists(path):
21
+ raise ValueError(f"FAISS index not found at {path}. Please run ingest.py first.")
22
+ embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
23
+ vectorstore = FAISS.load_local(
24
+ path,
25
+ embeddings,
26
+ allow_dangerous_deserialization=True
27
+ )
28
+ return vectorstore
29
+
30
+ vectorstore = load_vectorstore(VECTORSTORE_PATH)
31
+
32
+ # ------------------ Load LLM ------------------
33
+ def load_llm():
34
+ llm = ChatOpenAI(
35
+ model_name="gpt-3.5-turbo",
36
+ temperature=0,
37
+ openai_api_key=OPENAI_API_KEY
38
+ )
39
+ return llm
40
+
41
+ llm = load_llm()
42
+
43
+ # ------------------ Memory ------------------
44
+ memory = ConversationBufferMemory(
45
+ memory_key="chat_history",
46
+ return_messages=True
47
+ )
48
+
49
+ # ------------------ Conversational Retrieval Chain ------------------
50
+ qa_chain = ConversationalRetrievalChain.from_llm(
51
+ llm=llm,
52
+ retriever=vectorstore.as_retriever(search_kwargs={"k": 3}),
53
+ memory=memory,
54
+ output_key="answer"
55
+ )
56
+
57
+ # ------------------ Gradio Chat ------------------
58
+ def respond(user_message, chat_history):
59
+ if user_message:
60
+ try:
61
+ result = qa_chain({"question": user_message, "chat_history": memory.chat_memory.messages})
62
+ answer = result["answer"]
63
+ except Exception as e:
64
+ answer = f"Error: {str(e)}"
65
+ chat_history.append((user_message, answer))
66
+ return chat_history, chat_history
67
+
68
+ with gr.Blocks() as demo:
69
+ gr.Markdown("## 💉 Diabetes Chatbot\nChat with the bot about diabetes. It remembers your questions during this session!")
70
+ chatbot = gr.Chatbot()
71
+ user_input = gr.Textbox(label="Type your question here...", placeholder="Ask anything about diabetes...", lines=1)
72
+ user_input.submit(respond, [user_input, chatbot], [chatbot, chatbot])
73
+
74
+ demo.launch()