import gradio as gr
from langchain.vectorstores.chroma import Chroma
from langchain.prompts import ChatPromptTemplate
from langchain_community.llms.ollama import Ollama
from get_embedding_function import get_embedding_function
from gradio_pdf import PDF
import os
from huggingface_hub import HfApi
from huggingface_hub import snapshot_download
HF_TOKEN = os.environ.get("HF_TOKEN")
iTrustAI_DATA = os.environ.get("iTrustAI_DATA")
API = HfApi(token=HF_TOKEN)
downloaded_folder_path = snapshot_download(
repo_id=iTrustAI_DATA,
repo_type="dataset",
token=HF_TOKEN
)
CHROMA_PATH=f"{downloaded_folder_path}/chroma_db_itrustai"
############ Variables
CHAT_PROMPT_TEMPLATE = """
You are a helpful assistant.
Context information is below.
---------------------
{context}
---------------------
Conversation history:
{conversation_history}
Provide a helpful answer to the user's last question based on the above context ONLY.
Assistant:"""
ADVANCED_CHAT_PROMPT_TEMPLATE = """
You are a knowledgeable and helpful assistant dedicated to providing accurate and comprehensive answers.
Please utilize the context information provided below to inform your response. Ensure that your answer is based solely on this context, integrating relevant details to fully address the user's query.
---------------------
{context}
---------------------
Conversation history:
{conversation_history}
Provide a detailed and helpful answer to the user's last question, using the context above.
Assistant:"""
# SOURCE_ANSWER_TEMPLATE = """
# Answer the question based only on the following context:
# {context}
# ---
# Answer the question based on the above context: {question}
# """
SOURCE_ANSWER_TEMPLATE = """
You are a helpful assistant.
Context information is below.
---------------------
{context}
---------------------
Conversation history:
User: {question}\nAssistant:
Provide a helpful answer to the user's last question based on the above context ONLY.
Assistant:"""
INVERTIBLEAI_SERVICE = os.getenv('INVERTIBLEAI_SERVICE')
MODEL_NAME = os.getenv('MODEL_NAME')
GENERATION_TEMPERATURE = 0.8
GENERATION_TOP_P = 0.9
######## Functions
def process_input(input_method, url, online_pdf_url, uploaded_pdf):
return "Document processed and stored successfully.", "Summary text here!"
def query_source_answer_rag(query_text: str, template=CHAT_PROMPT_TEMPLATE, temperature=GENERATION_TEMPERATURE, top_p=GENERATION_TOP_P):
# Prepare the DB.
embedding_function = get_embedding_function()
db = Chroma(persist_directory=CHROMA_PATH, embedding_function=embedding_function)
# Search the DB.
results = db.similarity_search_with_score(query_text, k=5)
context_text = "\n\n---\n\n".join([doc.page_content for doc, _score in results])
prompt_template = ChatPromptTemplate.from_template(template)
prompt = prompt_template.format(context=context_text, question=query_text)
# print(prompt)
model = Ollama(model=MODEL_NAME, base_url=f"{INVERTIBLEAI_SERVICE}")
response_text = model.invoke(prompt, max_tokens=1024, temperature=temperature, top_p=top_p, repetition_penalty=1.0)
sources = [f"{doc.metadata.get('id', None).split('/')[-2]}/{doc.metadata.get('id', None).split('/')[-1].split(':')[0]}#page={doc.metadata.get('id', None).split('/')[-1].split(':')[1]}" for doc, _score in results]
formatted_response = f"Response: {response_text}\nSources: {sources}"
print(formatted_response)
return response_text, sources
def query_rag(query_text: str, conversation_history=None, template=CHAT_PROMPT_TEMPLATE, temperature=GENERATION_TEMPERATURE, top_p=GENERATION_TOP_P):
# Prepare the DB.
embedding_function = get_embedding_function()
db = Chroma(persist_directory=CHROMA_PATH, embedding_function=embedding_function)
# Search the DB.
results = db.similarity_search_with_score(query_text, k=10)
context_text = "\n\n---\n\n".join([doc.page_content for doc, _score in results])
# Prepare the conversation history text
if conversation_history:
conversation_text = ""
for msg, resp in conversation_history:
conversation_text += f"User: {msg}\n"
if resp:
conversation_text += f"Assistant: {resp}\n"
else:
conversation_text = ""
# Append the last user message
conversation_text += f"User: {query_text}\nAssistant:"
# Prepare the prompt
prompt = template.format(
context=context_text,
conversation_history=conversation_text
)
print(prompt)
model = Ollama(model=MODEL_NAME, base_url=f"{INVERTIBLEAI_SERVICE}") # Use the specified Ollama server
response_text = model.invoke(prompt, max_tokens=1024, temperature=temperature, top_p=top_p, repetition_penalty=1.0)
sources = [doc.metadata.get("id", None) for doc, _score in results]
return response_text, sources
def get_response(sentence):
response, sources = query_rag(sentence)
return response, sources
def clear_sentiment():
return "", ""
def clear_sentiment_explanation():
return "", "", ""
def clear_insights():
return None, None, [], None, None
def new_func(clear_sentiment_explanation):
return clear_sentiment_explanation
# Function to handle chat interactions
def get_response_response(query_text):
# Get the response from the model
response, sources = query_source_answer_rag(query_text, template=SOURCE_ANSWER_TEMPLATE, temperature=0.8, top_p=0.9)
# Append the new interaction to the history
markdown_source=""
for idx, source in enumerate(sources):
markdown_source +=f"**Source#{idx+1} -** "+source.split("/")[-2]+"/"+source.split("/")[-1]+" (view) \n\n "
return response,markdown_source
def chat_get_response(history, user_message):
if history is None:
history = []
# Get the response from the model
response, sources = query_rag(user_message, history, template=CHAT_PROMPT_TEMPLATE, temperature=0.8, top_p=0.9)
history.append((user_message, response))
return history,""
# Function to handle chat interactions
def advanced_chat_get_response(history, user_message):
if history is None:
history = []
# Get the response from the model
response, sources = query_rag(user_message, history, template=ADVANCED_CHAT_PROMPT_TEMPLATE, temperature=0.8, top_p=0.9)
history.append((user_message, response))
return history,""
def reset_chat():
return []
########### Gradio Interface
css = """
/* Style for active tab header
div[class*="gradio-container"] .contain button[role="tab"] {
background-color: #000000;
color: white;
font-size:16px;
}*/
.svelte-1tcem6n selected {
border-color: #0f5b69;
background: #0f5b69;
color: white;
}
button.svelte-1uw5tnk {
margin-bottom: -1px;
border: 1px solid transparent;
border-color: transparent;
border-bottom: none;
border-top-right-radius: var(--container-radius);
border-top-left-radius: var(--container-radius);
padding: var(--size-1) var(--size-4);
color: var(--body-text-color-subdued);
font-weight: bold;
font-size: 16px;
}
div.svelte-19hvt5v {
display: flex;
font-size:16px;
position: relative;
border: 5px solid #0f5b69;
border-bottom-right-radius: var(--container-radius);
border-bottom-left-radius: var(--container-radius);
padding: var(--block-padding);
}
.hide-container.svelte-12cmxck {
margin: 0;
box-shadow: none;
--block-border-width: 0;
background: transparent;
padding: 0;
overflow: visible;
}
td.svelte-p5q82i.svelte-p5q82i.svelte-p5q82i{
text-align: left;
}
.label.svelte-p5q82i.svelte-p5q82i.svelte-p5q82i {
display: flex;
align-items: center;
margin-bottom: 5px;
background-color:#f87315;
color: #fff;
font-weight: bold;
font-size: 16px;
line-height: 50px;
}
#pdf_viewer {
width: 600px;
}
"""
#
with gr.Blocks(css=css) as demo:
gr.HTML("""
""")
# Adding the new Chat tab with chat interface
with gr.Tab("InterPARES-Chat"):
gr.HTML("