mcpOptimizer / src /app_enhanced.py
anouar-bm's picture
ai
498af49
import streamlit as st
from engine import AdvancedPromptOptimizer
from llm_optimizer import optimize_with_llm, PERSONAS
from dotenv import load_dotenv
import os
load_dotenv()
cost_model = {
"GPT-4": (0.01, 0.03),
"Claude Opus": (0.015, 0.075),
"Claude Sonnet": (0.003, 0.015),
"LLaMA 2": (0.012, 0.04),
"Custom": (None, None),
}
def format_cost(tokens, cost_per_k):
return f"${tokens * cost_per_k / 1000:.4f}"
def main():
st.set_page_config(
layout="wide",
page_title="PromptCraft - AI Prompt Optimizer",
page_icon="πŸš€",
initial_sidebar_state="expanded"
)
# Custom CSS for enhanced styling
st.markdown("""
<style>
.main {
padding-top: 1rem;
}
.stApp {
background: #f8f9fa;
}
.main .block-container {
padding-top: 2rem;
padding-bottom: 2rem;
background: white;
border-radius: 20px;
box-shadow: 0 10px 30px rgba(0,0,0,0.1);
margin-top: 2rem;
}
.header-container {
background: linear-gradient(90deg, #4facfe 0%, #00f2fe 100%);
padding: 2rem;
border-radius: 15px;
margin-bottom: 2rem;
text-align: center;
box-shadow: 0 5px 20px rgba(79, 172, 254, 0.3);
}
.stSelectbox > div > div {
background-color: #f8f9ff;
border-radius: 10px;
}
.stTextArea textarea {
background-color: #f8f9ff;
border-radius: 10px;
border: 2px solid #e1e8ff;
}
.stButton > button {
background: linear-gradient(45deg, #667eea, #764ba2);
color: white;
border-radius: 25px;
border: none;
padding: 0.75rem 2rem;
font-weight: 600;
transition: all 0.3s ease;
box-shadow: 0 5px 15px rgba(102, 126, 234, 0.4);
}
.stButton > button:hover {
transform: translateY(-2px);
box-shadow: 0 7px 20px rgba(102, 126, 234, 0.6);
}
.metric-card {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
padding: 1.5rem;
border-radius: 15px;
color: white;
text-align: center;
box-shadow: 0 5px 20px rgba(102, 126, 234, 0.3);
margin-bottom: 1rem;
}
.feature-card {
background: #f8f9ff;
padding: 1.5rem;
border-radius: 15px;
border: 2px solid #e1e8ff;
margin-bottom: 1rem;
}
.cost-card {
background: linear-gradient(135deg, #11998e 0%, #38ef7d 100%);
padding: 1.5rem;
border-radius: 15px;
color: white;
text-align: center;
box-shadow: 0 5px 20px rgba(17, 153, 142, 0.3);
margin-bottom: 1rem;
}
</style>
""", unsafe_allow_html=True)
# Header Section
st.markdown("""
<div class="header-container">
<h1 style="color: white; font-size: 3rem; margin-bottom: 0.5rem; text-shadow: 2px 2px 4px rgba(0,0,0,0.3);">πŸš€ PromptCraft AI</h1>
<h3 style="color: white; margin-top: 0; opacity: 0.9; font-weight: 300;">✨ Optimize Your AI Prompts, Save Money & Time ✨</h3>
<p style="color: white; opacity: 0.8; font-size: 1.1rem;">Transform verbose prompts into efficient, cost-effective versions without losing meaning</p>
</div>
""", unsafe_allow_html=True)
col1, col2 = st.columns([0.65, 0.35], gap="large")
with col1:
st.markdown("""
<div class="feature-card">
<h3 style="color: #667eea; margin-top: 0;">βš™οΈ Configuration</h3>
</div>
""", unsafe_allow_html=True)
st.markdown("**πŸ’° LLM Cost Settings**")
model = st.selectbox("Select LLM Model", list(cost_model.keys()))
if model == "Custom":
input_cost = st.number_input("Input Cost ($/1K tokens)", 0.01, 1.0, 0.03)
output_cost = st.number_input("Output Cost ($/1K tokens)", 0.01, 1.0, 0.06)
else:
input_cost, output_cost = cost_model[model]
st.markdown("**πŸ€– Optimization Model**")
# Create columns for the optimizer section
opt_col1, opt_col2 = st.columns([1, 1])
with opt_col1:
optimizer_model = st.selectbox("Choose Optimizer", ["spaCy + Lemminflect", "GPT-5"])
persona = "Default"
api_key_input = ""
tavily_api_key_input = ""
if optimizer_model == "GPT-5":
with opt_col2:
persona = st.selectbox("Choose Persona", list(PERSONAS.keys()))
# API Keys in the same row
api_col1, api_col2 = st.columns([1, 1])
with api_col1:
api_key_input = st.text_input("AIMLAPI API Key (optional)", type="password", help="If you don't provide a key, the one in your .env file will be used.")
with api_col2:
tavily_api_key_input = st.text_input("Tavily API Key (optional)", type="password", help="If you don't provide a key, the one in your .env file will be used.")
elif optimizer_model == "spaCy + Lemminflect":
with opt_col2:
aggressiveness = st.slider(
"Optimization Level",
0.0,
1.0,
0.7,
help="Higher = more aggressive shortening",
)
else:
aggressiveness = 1.0
st.markdown("**πŸ“ Your Prompt**")
prompt = st.text_area(
"Original Prompt",
height=200,
placeholder="✨ Paste your AI prompt here and watch the magic happen...\n\nExample: 'Please analyze this data very carefully and provide a comprehensive detailed report with all the advantages and disadvantages'",
help="Enter the prompt you want to optimize. The optimizer will reduce token count while preserving meaning."
)
col_btn1, col_btn2, col_btn3 = st.columns([1, 2, 1])
with col_btn2:
optimize_clicked = st.button("πŸš€ Optimize My Prompt", type="primary", use_container_width=True)
if optimize_clicked:
if optimizer_model == "spaCy + Lemminflect":
optimizer = AdvancedPromptOptimizer()
optimized, orig_toks, new_toks = optimizer.optimize(prompt, aggressiveness)
else: # GPT-5
api_key = api_key_input if api_key_input else os.getenv("AIMLAPI_API_KEY")
tavily_api_key = tavily_api_key_input if tavily_api_key_input else os.getenv("TAVILY_API_KEY")
if not api_key or api_key == "<YOUR_API_KEY>":
st.error("Please set your AIMLAPI_API_KEY in the .env file or enter it above.")
return
optimized = optimize_with_llm(prompt, api_key, persona, tavily_api_key=tavily_api_key)
# We need to calculate the tokens for the optimized prompt
# This is a simplification, as we don't have the exact tokenizer for gpt-5
# We will use tiktoken as an approximation
import tiktoken
tokenizer = tiktoken.get_encoding("cl100k_base")
orig_toks = len(tokenizer.encode(prompt))
new_toks = len(tokenizer.encode(optimized))
if orig_toks == 0:
st.warning("Please enter a valid prompt.")
return
# Calculate savings
token_savings = orig_toks - new_toks
percent_savings = (token_savings / orig_toks) * 100 if orig_toks > 0 else 0
input_cost_savings = token_savings * input_cost / 1000
output_cost_savings = token_savings * output_cost / 1000
total_cost_savings = input_cost_savings + output_cost_savings
with col1:
st.markdown("""
<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 1rem; border-radius: 15px; margin-bottom: 1rem;">
<h3 style="color: white; text-align: center; margin: 0;">✨ Optimized Prompt</h3>
</div>
""", unsafe_allow_html=True)
st.code(optimized, language="text")
# Enhanced download button
col_dl1, col_dl2, col_dl3 = st.columns([1, 2, 1])
with col_dl2:
st.download_button(
"πŸ“₯ Download Optimized Prompt",
optimized,
file_name="optimized_prompt.txt",
use_container_width=True
)
with col2:
st.markdown("""
<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 1rem; border-radius: 15px; margin-bottom: 1rem;">
<h3 style="color: white; text-align: center; margin: 0;">πŸ“Š Optimization Results</h3>
</div>
""", unsafe_allow_html=True)
# Token Savings Card
st.markdown(
f"""
<div class="metric-card">
<h4 style="margin-top:0; opacity: 0.9;">🎯 Token Reduction</h4>
<div style="font-size:36px;font-weight:bold;margin:10px 0;">
{percent_savings:.1f}%
</div>
<div style="opacity: 0.8; font-size:16px;">
{token_savings} tokens saved
</div>
</div>
""",
unsafe_allow_html=True,
)
# Cost Savings Card
if orig_toks > 0 and (input_cost + output_cost) > 0:
cost_percent_savings = (
total_cost_savings
/ (orig_toks * (input_cost + output_cost) / 1000)
* 100
)
else:
cost_percent_savings = 0
st.markdown(
f"""
<div class="cost-card">
<h4 style="margin-top:0; opacity: 0.9;">πŸ’Έ Cost Reduction</h4>
<div style="font-size:36px;font-weight:bold;margin:10px 0;">
{cost_percent_savings:.1f}%
</div>
<div style="opacity: 0.8; font-size:16px;">
${total_cost_savings:.4f} saved per call
</div>
</div>
""",
unsafe_allow_html=True,
)
# Visual Progress Indicator
progress_value = min(1.0, max(0.0, percent_savings / 100))
st.markdown("**πŸ“ˆ Optimization Progress**")
st.progress(progress_value)
st.markdown(f"<p style='text-align: center; color: #667eea; font-weight: 500;'>Prompt reduced to {100-percent_savings:.1f}% of original size</p>", unsafe_allow_html=True)
# Detailed Breakdown
with st.expander("πŸ“Š Cost Analysis"):
col_a, col_b = st.columns(2)
with col_a:
st.markdown(
f"**Input Cost**\n\n"
f"Original: {format_cost(orig_toks, input_cost)}\n\n"
f"Optimized: {format_cost(new_toks, input_cost)}\n\n"
f"Saved: {format_cost(token_savings, input_cost)}"
)
with col_b:
st.markdown(
f"**Output Cost**\n\n"
f"Original: {format_cost(orig_toks, output_cost)}\n\n"
f"Optimized: {format_cost(new_toks, output_cost)}\n\n"
f"Saved: {format_cost(token_savings, output_cost)}"
)
# Optimization report
with st.expander("πŸ” Applied Optimizations"):
st.markdown("### Common Transformations")
st.json(
{
"Removed fillers": "e.g., 'very', 'carefully'",
"Shortened phrases": "'advantages/disadvantages' β†’ 'pros/cons'",
"Structural changes": "Simplified JSON formatting",
"Verb optimization": "Converted to base forms",
"Preposition removal": "Dropped non-essential connectors",
}
)
st.markdown("### Share Your Savings")
st.code(
f"Saved {token_savings} tokens (${total_cost_savings:.4f}) with #PromptOptimizer\n"
f"Optimization level: {aggressiveness*100:.0f}%"
)
if __name__ == "__main__":
main()