File size: 1,637 Bytes
c77bdbe
 
db9f296
c77bdbe
547da5b
c77bdbe
db9f296
c77bdbe
e08466a
47671b6
db9f296
 
 
 
47671b6
a700e33
47671b6
 
 
76d3ade
 
8dd3847
 
76d3ade
d1cac18
c77bdbe
 
 
 
556b474
c77bdbe
 
4435384
 
c77bdbe
8dd3847
 
c77bdbe
 
 
 
4435384
 
ec3f884
 
14a0222
4435384
 
8abb177
0ba26be
4435384
c77bdbe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import openai
import streamlit as st
from PIL import Image

st.title("Taco' Bout It - Addison")

# Streamlit Secrets
openai.api_key = st.secrets["OPENAI_API_KEY"]
grounding = st.secrets["GROUNDING"]

image = Image.open('logo.png')

st.image(image)

if "openai_model" not in st.session_state:
    st.session_state["openai_model"] = "gpt-5-mini"

if "messages" not in st.session_state:
    st.session_state.messages = []    

for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

if prompt := st.chat_input("Type hello to start/ escribir hola para empezar:"):    
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("user"):
        st.markdown(prompt)

    with st.chat_message("assistant"):
        message_placeholder = st.empty()
        full_response = ""
        
        response = openai.responses.create(
            model=st.session_state["openai_model"],
            instructions = grounding,
            input=[
                {"role": m["role"], "content": m["content"]}
                for m in st.session_state.messages
            ],
            stream=True,
        )
        
        for event in response:
            if event.type == 'response.output_text.delta':
                full_response += event.delta.replace('\\$','$').replace('$','\\$')
                message_placeholder.markdown(full_response + "▌")
        
        message_placeholder.markdown(full_response)
        print(full_response)
        
    st.session_state.messages.append({"role": "assistant", "content": full_response})