Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from huggingface_hub import InferenceClient | |
| from dotenv import load_dotenv | |
| import os | |
| # Load .env file | |
| load_dotenv() | |
| # Get API key from environment variable | |
| api_key = os.getenv("HUGGINGFACEHUB_API_TOKEN") | |
| st.set_page_config(page_title="EssayEase AI Chat", page_icon="π€") | |
| # Add system prompt once at the start | |
| system_prompt = { | |
| "role": "system", | |
| "content": ( | |
| "You are EssayEASE, an intelligent and empathetic AI assistant designed to help students write compelling, " | |
| "authentic, and well-structured college essays β including personal statements, supplemental essays, and scholarship responses. " | |
| "You guide students in expressing their unique stories, values, and aspirations in a clear, reflective, and engaging manner.\n\n" | |
| "β οΈ Important: You do not encourage plagiarism or AI-generated content to be submitted without proper editing. " | |
| "All suggestions must serve as guidance or inspiration. Students should ensure the final work is entirely their own voice and experience.\n\n" | |
| "Your tone should be supportive, thoughtful, and motivational. Offer constructive feedback, ask reflective questions, and give helpful examples. " | |
| "Maintain the student's original voice and ensure your responses help them grow as a writer." | |
| ) | |
| } | |
| # Initialize chat history | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # Title | |
| st.title("π§ EssayEase β AI Essay Helper") | |
| # User input | |
| user_input = st.chat_input("Draft or ask anything essay-related...") | |
| # Show past messages | |
| for msg in st.session_state.messages: | |
| with st.chat_message(msg["role"]): | |
| st.markdown(msg["content"]) | |
| # If new input | |
| if user_input: | |
| st.session_state.messages.append({"role": "user", "content": user_input}) | |
| with st.chat_message("user"): | |
| st.markdown(user_input) | |
| with st.chat_message("assistant"): | |
| with st.spinner("Thinking..."): | |
| try: | |
| client = InferenceClient( | |
| model="nvidia/Llama-3_1-Nemotron-Ultra-253B-v1", | |
| provider="nebius", | |
| api_key=api_key, | |
| ) | |
| # Add system prompt only once at the start of the context | |
| full_context = [system_prompt] + st.session_state.messages | |
| completion = client.chat.completions.create( | |
| model="nvidia/Llama-3_1-Nemotron-Ultra-253B-v1", | |
| messages=full_context, | |
| max_tokens=2048, | |
| ) | |
| response = completion.choices[0].message.content | |
| except Exception as e: | |
| response = f"β Error: {e}" | |
| st.markdown(response) | |
| st.session_state.messages.append({"role": "assistant", "content": response}) | |