Spaces:
Build error
Build error
| import os | |
| import tiktoken | |
| from langchain_openai import ChatOpenAI | |
| from langchain_core.prompts import ChatPromptTemplate | |
| from langchain_tavily import TavilySearch | |
| from langgraph.prebuilt import create_react_agent | |
| PERSONAS = { | |
| "Default": "<role>You are a helpful assistant that optimizes prompts for clarity, conciseness, and effectiveness.</role>\n<task>Your goal is to reduce token count while preserving the original intent.</task>\n<rules>\n1. Remove filler words.\n2. Simplify complex sentences.\n3. Use active voice.\n4. Convert jargon to plain language.\n</rules>", | |
| "UI/UX Designer": "<role>You are a UI/UX designer.</role>\n<task>Optimize the following prompt to be user-centric, clear, and focused on usability. Remove ambiguity and ensure the prompt is easy to understand for a general audience.</task>\n<example>\nUser: I want to find the settings page.\nAssistant: Locate settings page.\n</example>", | |
| "Software Engineer": "<role>You are a software engineer.</role>\n<task>Optimize the following prompt for technical accuracy, conciseness, and clarity. Remove any non-essential information and focus on the core technical request.</task>\n<example>\nUser: The app is crashing when I click the button.\nAssistant: Debug app crash on button click.\n</example>", | |
| "Marketing Copywriter": "<role>You are a marketing copywriter.</role>\n<task>Optimize the following prompt to be persuasive, engaging, and impactful. Focus on action-oriented language and a clear call to action.</task>\n<example>\nUser: Tell me about your new product.\nAssistant: Describe the new product's features and benefits.\n</example>", | |
| "Creative Writer": "<role>You are a creative writer.</role>\n<task>Optimize the following prompt to be imaginative, descriptive, and evocative. Enhance the original prompt to inspire a more creative response.</task>\n<example>\nUser: Write a story about a cat.\nAssistant: Write a story about a mischievous cat who goes on an adventure.\n</example>", | |
| "Technical Writer": "<role>You are a technical writer.</role>\n<task>Optimize the following prompt to be clear, concise, and easy to follow. Use simple language and avoid jargon.</task>\n<example>\nUser: How do I install the software?\nAssistant: Provide step-by-step instructions for installing the software.\n</example>", | |
| "Legal Advisor": "<role>You are a legal advisor.</role>\n<task>Optimize the following prompt for legal accuracy and clarity. Ensure the prompt is unambiguous and does not contain any misleading information.</task>\n<example>\nUser: What are the terms of the contract?\nAssistant: Summarize the key terms and conditions of the contract.\n</example>", | |
| "Medical Professional": "<role>You are a medical professional.</role>\n<task>Optimize the following prompt for medical accuracy and clarity. Use precise medical terminology and avoid generalizations.</task>\n<example>\nUser: What are the symptoms of the flu?\nAssistant: List the common symptoms of influenza.\n</example>", | |
| "Financial Analyst": "<role>You are a financial analyst.</role>\n<task>Optimize the following prompt for financial accuracy and clarity. Use precise financial terminology and avoid speculation.</task>\n<example>\nUser: What is the company's financial performance?\nAssistant: Analyze the company's key financial metrics from the latest earnings report.\n</example>", | |
| } | |
| # ============================================================================ | |
| # SIMPLE LLM OPTIMIZATION (No Search) | |
| # ============================================================================ | |
| def optimize_with_llm(prompt: str, api_key: str, persona: str = "Default") -> str: | |
| """Simple LLM-based prompt optimization without search enhancement.""" | |
| chat = ChatOpenAI( | |
| base_url="https://api.aimlapi.com/v1", | |
| api_key=api_key, | |
| model="openai/gpt-5-chat-latest", | |
| temperature=0, | |
| ) | |
| system_prompt = PERSONAS.get(persona, PERSONAS["Default"]) | |
| system_prompt += "\n\nIMPORTANT: Return ONLY the optimized prompt without any explanations, prefixes, markdown formatting, or additional text." | |
| prompt_template = ChatPromptTemplate.from_messages([ | |
| ("system", system_prompt), | |
| ("human", "Shorten and optimize the following prompt: {prompt}"), | |
| ]) | |
| chain = prompt_template | chat | |
| response = chain.invoke({"prompt": prompt}) | |
| return response.content | |
| # ============================================================================ | |
| # AGENT-BASED OPTIMIZATION WITH SEARCH ENHANCEMENT | |
| # ============================================================================ | |
| def get_accurate_token_count(text: str, model_name: str = "gpt-4") -> int: | |
| """Get accurate token count using model-specific tokenizer.""" | |
| try: | |
| if "gpt-5" in model_name.lower() or "gpt-4" in model_name.lower(): | |
| enc = tiktoken.get_encoding("o200k_base") # Latest encoding | |
| else: | |
| enc = tiktoken.encoding_for_model("gpt-4") # Fallback | |
| return len(enc.encode(text)) | |
| except Exception: | |
| enc = tiktoken.get_encoding("cl100k_base") | |
| return len(enc.encode(text)) | |
| def create_prompt_enhancement_agent(api_key: str, tavily_api_key: str, persona: str = "Default"): | |
| """Create a ReAct agent with persona-specific system prompt.""" | |
| if tavily_api_key: | |
| os.environ["TAVILY_API_KEY"] = tavily_api_key | |
| # Get persona-specific instructions | |
| persona_prompt = PERSONAS.get(persona, PERSONAS["Default"]) | |
| # Create comprehensive system prompt that includes persona details | |
| system_prompt = f"""You are an intelligent prompt enhancement and optimization agent. | |
| {persona_prompt} | |
| WORKFLOW: | |
| 1. First, analyze the user's prompt to determine if it needs current information | |
| 2. If current information would improve the prompt, use the tavily_search tool to find relevant, up-to-date data | |
| 3. Enhance the original prompt by incorporating valuable current information (if found) | |
| 4. Apply your persona-specific optimization approach to make the prompt clear and concise | |
| 5. Return ONLY the final optimized prompt - no explanations or metadata | |
| SEARCH GUIDELINES: | |
| - Only search when the prompt would genuinely benefit from current information | |
| - Look for keywords like "latest", "current", "2024", "2025", "recent", "new" | |
| - Consider if your persona typically needs up-to-date information | |
| - Use 3-6 relevant search terms | |
| - Evaluate search results for relevance before incorporating | |
| OPTIMIZATION PRINCIPLES: | |
| - Preserve the user's original intent | |
| - Apply persona-specific expertise | |
| - Balance enhancement with conciseness | |
| - Use clear, actionable language | |
| - Remove unnecessary words while maintaining meaning""" | |
| # Set up LLM | |
| llm = ChatOpenAI( | |
| base_url="https://api.aimlapi.com/v1", | |
| api_key=api_key, | |
| model="openai/gpt-5-chat-latest", | |
| temperature=0.1 | |
| ) | |
| # Configure search tool | |
| tavily_search_tool = TavilySearch( | |
| max_results=3, | |
| topic="general", | |
| include_answer=True, | |
| search_depth="basic" | |
| ) | |
| # Create ReAct agent with system prompt | |
| try: | |
| agent = create_react_agent( | |
| llm, | |
| [tavily_search_tool], | |
| state_modifier=system_prompt | |
| ) | |
| except TypeError: | |
| # Fallback if state_modifier is not supported | |
| agent = create_react_agent(llm, [tavily_search_tool]) | |
| # Store system prompt for use in message | |
| agent._system_prompt = system_prompt | |
| return agent | |
| def enhance_and_optimize_prompt(prompt: str, persona: str, agent): | |
| """Use the agent to enhance and optimize the prompt.""" | |
| # Check if we stored a system prompt (fallback case) | |
| if hasattr(agent, '_system_prompt'): | |
| # Include system prompt in the user message | |
| user_instruction = f"""{agent._system_prompt} | |
| Original prompt to enhance and optimize: {prompt}""" | |
| else: | |
| # Simple instruction if system prompt was set via state_modifier | |
| user_instruction = f"Enhance and optimize this prompt: {prompt}" | |
| # Agent handles the entire flow autonomously | |
| result = agent.invoke({"messages": [{"role": "user", "content": user_instruction}]}) | |
| return result["messages"][-1].content | |
| def optimize_with_agent(prompt: str, api_key: str, persona: str = "Default", tavily_api_key: str = None) -> str: | |
| """Agent-based prompt optimization with search enhancement.""" | |
| agent = create_prompt_enhancement_agent(api_key, tavily_api_key, persona) | |
| optimized_prompt = enhance_and_optimize_prompt(prompt, persona, agent) | |
| return optimized_prompt | |
| # ============================================================================ | |
| # UTILITY FUNCTIONS | |
| # ============================================================================ | |
| def compare_optimization_methods(prompt: str, api_key: str, persona: str = "Default", tavily_api_key: str = None) -> dict: | |
| """Compare simple LLM optimization vs agent-based optimization.""" | |
| # Simple optimization | |
| simple_result = optimize_with_llm(prompt, api_key, persona) | |
| simple_tokens = get_accurate_token_count(simple_result) | |
| # Agent optimization (if tavily_api_key provided) | |
| if tavily_api_key: | |
| agent_result = optimize_with_agent(prompt, api_key, persona, tavily_api_key) | |
| agent_tokens = get_accurate_token_count(agent_result) | |
| else: | |
| agent_result = "N/A - Tavily API key required" | |
| agent_tokens = 0 | |
| # Original metrics | |
| original_tokens = get_accurate_token_count(prompt) | |
| return { | |
| "original": { | |
| "prompt": prompt, | |
| "tokens": original_tokens | |
| }, | |
| "simple_optimization": { | |
| "prompt": simple_result, | |
| "tokens": simple_tokens, | |
| "reduction": original_tokens - simple_tokens | |
| }, | |
| "agent_optimization": { | |
| "prompt": agent_result, | |
| "tokens": agent_tokens, | |
| "reduction": original_tokens - agent_tokens if agent_tokens > 0 else "N/A" | |
| }, | |
| "persona": persona | |
| } | |
| def batch_optimize(prompts: list, api_key: str, persona: str = "Default", method: str = "simple") -> list: | |
| """Optimize multiple prompts using the specified method.""" | |
| results = [] | |
| for prompt in prompts: | |
| if method == "simple": | |
| optimized = optimize_with_llm(prompt, api_key, persona) | |
| elif method == "agent": | |
| # Note: This would require tavily_api_key for agent method | |
| optimized = "Agent method requires tavily_api_key parameter" | |
| else: | |
| optimized = "Invalid method. Use 'simple' or 'agent'" | |
| results.append({ | |
| "original": prompt, | |
| "optimized": optimized, | |
| "persona": persona, | |
| "method": method | |
| }) | |
| return results | |