import streamlit as st from PIL import Image from datetime import time as t import time from operator import itemgetter import os import json import getpass import openai from langchain.vectorstores import Pinecone from langchain.embeddings import OpenAIEmbeddings import pinecone from results import results_agent from filter import filter_agent from reranker import reranker from utils import build_filter OPENAI_API = st.secrets["OPENAI_API"] PINECONE_API = st.secrets["PINECONE_API"] openai.api_key = OPENAI_API pinecone.init( api_key= PINECONE_API, environment="gcp-starter" ) index_name = "use-class-db" embeddings = OpenAIEmbeddings(openai_api_key = OPENAI_API) index = pinecone.Index(index_name) k = 5 if "messages" not in st.session_state: st.session_state.messages = [] st.title("USC GPT - Find the perfect class") class_time = st.slider( "Filter Class Times:", value=(t(11, 30), t(12, 45))) # st.write("You're scheduled for:", class_time) units = st.slider( "Number of units", 1, 4, value = (1, 4) ) for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) prompt = st.chat_input("What kind of class are you looking for?") ### GPT Response # Display assistant response in chat message container with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" assistant_response = "Hello there! How can I assist you today?" # Simulate stream of response with milliseconds delay for chunk in assistant_response.split(): full_response += chunk + " " time.sleep(0.05) # Add a blinking cursor to simulate typing message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(final_response) # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": final_response})