Spaces:
Sleeping
Sleeping
| """ | |
| app_optimized.py - Memory-Optimized ID Agents | |
| ---------------------------------------------- | |
| Streamlined version of the complex multi-agent system optimized for HF Spaces memory limits. | |
| Includes core agent builder functionality while reducing memory footprint. | |
| """ | |
| # --- Core Imports --- | |
| import gradio as gr | |
| import json | |
| import re | |
| import os | |
| import asyncio | |
| import logging | |
| from typing import Dict, cast | |
| # Essential imports with graceful fallbacks | |
| try: | |
| import openai | |
| from openai import RateLimitError, APIError, APIConnectionError, OpenAI | |
| OPENAI_AVAILABLE = True | |
| except ImportError: | |
| OPENAI_AVAILABLE = False | |
| try: | |
| from core.agents.agent_utils import linkify_citations, build_agent, load_prefilled, prepare_download | |
| from config import agents_config, skills_library, prefilled_agents | |
| from core.agents.chat_orchestrator import simulate_agent_response_stream | |
| CORE_MODULES_AVAILABLE = True | |
| except ImportError: | |
| CORE_MODULES_AVAILABLE = False | |
| # Fallback configurations | |
| agents_config = {} | |
| skills_library = { | |
| "π‘οΈ Antimicrobial Stewardship": ["recommend_deescalation", "alert_prolonged_antibiotic_use"], | |
| "π¦ Infection Prevention and Control": ["calculate_infection_rate", "assess_outbreak_risk"], | |
| "π¬ Research Assistant": ["search_pubmed", "summarize_guidelines"], | |
| "π₯ Clinical Assistant": ["differential_diagnosis", "treatment_recommendations"], | |
| "π Education Assistant": ["create_quiz", "explain_concepts"], | |
| "πΌ Orchestrator": ["coordinate_agents", "synthesize_responses"] | |
| } | |
| prefilled_agents = { | |
| "Stewardship Expert": { | |
| "agent_type": "π‘οΈ Antimicrobial Stewardship", | |
| "agent_name": "StewardshipBot", | |
| "agent_mission": "Optimize antibiotic use and prevent resistance", | |
| "skills": ["recommend_deescalation", "alert_prolonged_antibiotic_use"] | |
| } | |
| } | |
| # Set up logging | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| # OpenAI setup | |
| OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") | |
| if OPENAI_API_KEY and OPENAI_AVAILABLE: | |
| client = OpenAI(api_key=OPENAI_API_KEY) | |
| else: | |
| client = None | |
| def simple_chat_response(user_message, history): | |
| """Simple chat response for fallback mode""" | |
| if not client: | |
| return history + [[user_message, "β οΈ OpenAI not configured. Please check API keys."]], "" | |
| # Convert to OpenAI format | |
| messages = [] | |
| for user_msg, assistant_msg in history: | |
| if user_msg: | |
| messages.append({"role": "user", "content": user_msg}) | |
| if assistant_msg: | |
| messages.append({"role": "assistant", "content": assistant_msg}) | |
| messages.append({"role": "user", "content": user_message}) | |
| try: | |
| completion = client.chat.completions.create( | |
| model="gpt-3.5-turbo", | |
| messages=messages, | |
| temperature=0.7, | |
| ) | |
| reply = completion.choices[0].message.content.strip() | |
| return history + [[user_message, reply]], "" | |
| except Exception as e: | |
| error_msg = f"Error: {str(e)}" | |
| return history + [[user_message, error_msg]], "" | |
| def convert_messages_to_gradio(messages): | |
| """Convert OpenAI messages to Gradio format""" | |
| if not messages: | |
| return [] | |
| result = [] | |
| for i, msg in enumerate(messages): | |
| if msg["role"] == "user": | |
| # Look ahead for assistant response | |
| assistant_response = "" | |
| if i + 1 < len(messages) and messages[i + 1]["role"] == "assistant": | |
| assistant_response = messages[i + 1]["content"] | |
| result.append([msg["content"], assistant_response]) | |
| elif msg["role"] == "assistant" and (i == 0 or messages[i-1]["role"] != "user"): | |
| # Standalone assistant message (like greeting) | |
| result.append([None, msg["content"]]) | |
| return result | |
| def safe_agent_chat(agent_name, user_text, histories): | |
| """Safe agent chat with fallback""" | |
| if not CORE_MODULES_AVAILABLE: | |
| response = f"π§ **Demo Mode**: Agent '{agent_name}' would normally provide specialized infectious disease assistance here." | |
| history = histories.get(agent_name, []) | |
| new_history = history + [[user_text, response]] | |
| histories[agent_name] = new_history | |
| return new_history, histories, "" | |
| try: | |
| # Use existing chat logic if available | |
| from app import chatpanel_handle | |
| return chatpanel_handle(agent_name, user_text, histories) | |
| except: | |
| # Fallback to simple response | |
| response = f"π€ Agent '{agent_name}' received: {user_text}\n\nThis is a simplified response due to system limitations." | |
| history = histories.get(agent_name, []) | |
| new_history = history + [[user_text, response]] | |
| histories[agent_name] = new_history | |
| return new_history, histories, "" | |
| def build_optimized_ui(): | |
| """Build memory-optimized UI""" | |
| # Streamlined CSS - reduced from original | |
| css = """ | |
| :root { | |
| --id-primary: #1e40af; | |
| --id-secondary: #3b82f6; | |
| --id-accent: #06d6a0; | |
| --id-bg: #f8fafc; | |
| --id-surface: #ffffff; | |
| --id-text: #1e293b; | |
| } | |
| .gradio-container { background: var(--id-bg) !important; } | |
| .id-header { | |
| background: linear-gradient(90deg, var(--id-primary), var(--id-secondary)); | |
| color: white; | |
| padding: 1.5rem; | |
| border-radius: 12px; | |
| margin-bottom: 1.5rem; | |
| } | |
| .generate-btn { | |
| background: linear-gradient(90deg, var(--id-secondary), var(--id-accent)); | |
| color: white !important; | |
| border: none !important; | |
| font-weight: 600; | |
| } | |
| """ | |
| with gr.Blocks(title="π¦ ID Agents - Optimized", css=css, theme=gr.themes.Soft()) as app: | |
| # State management | |
| builder_chat_histories = gr.State({}) | |
| # Header | |
| gr.HTML(""" | |
| <div class="id-header"> | |
| <h1 style="margin: 0;">π¦ ID Agents - Multi-Agent Builder</h1> | |
| <p style="margin: 8px 0 0 0;">Optimized for HF Spaces - Build specialized infectious disease agents</p> | |
| </div> | |
| """) | |
| with gr.Tabs(): | |
| # Tab 1: Simple Chat | |
| with gr.TabItem("π¬ Simple Chat"): | |
| gr.Markdown("### Quick GPT-3.5 Chat") | |
| simple_chatbot = gr.Chatbot() | |
| simple_input = gr.Textbox(placeholder="Ask anything...", show_label=False) | |
| simple_send = gr.Button("Send") | |
| simple_reset = gr.Button("Reset") | |
| # Wire up simple chat | |
| simple_send.click( | |
| fn=simple_chat_response, | |
| inputs=[simple_input, simple_chatbot], | |
| outputs=[simple_chatbot, simple_input] | |
| ) | |
| simple_input.submit( | |
| fn=simple_chat_response, | |
| inputs=[simple_input, simple_chatbot], | |
| outputs=[simple_chatbot, simple_input] | |
| ) | |
| simple_reset.click(lambda: ([], ""), outputs=[simple_chatbot, simple_input]) | |
| # Tab 2: Agent Builder (Streamlined) | |
| with gr.TabItem("π οΈ Agent Builder"): | |
| gr.Markdown("### Build Your ID Agent") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| # Simplified form | |
| prefilled = gr.Dropdown( | |
| choices=["None"] + list(prefilled_agents.keys()), | |
| label="Template", | |
| value="None" | |
| ) | |
| agent_type = gr.Radio( | |
| choices=list(skills_library.keys()), | |
| label="Agent Type" | |
| ) | |
| agent_name = gr.Textbox(label="Agent Name", placeholder="e.g., StewardBot") | |
| agent_mission = gr.Textbox(label="Mission", lines=3) | |
| skills = gr.CheckboxGroup(choices=[], label="Skills") | |
| generate_button = gr.Button("β¨ Generate Agent", elem_classes="generate-btn") | |
| # Generated config | |
| agent_output = gr.Code(label="Agent Config", language="json") | |
| with gr.Column(scale=2): | |
| # Agent management | |
| gr.Markdown("### Active Agents") | |
| active_agents = gr.Markdown("_(None yet)_") | |
| agent_dropdown = gr.Dropdown(label="Select Agent", choices=[]) | |
| with gr.Row(): | |
| chat_button = gr.Button("π¬ Chat") | |
| remove_button = gr.Button("β Remove") | |
| # Chat interface | |
| agent_chatbot = gr.Chatbot(label="Agent Chat") | |
| chat_input = gr.Textbox(placeholder="Chat with your agent...", show_label=False) | |
| chat_send = gr.Button("Send") | |
| reset_chat = gr.Button("π Reset") | |
| # Update skills when agent type changes | |
| def update_skills(agent_type): | |
| if agent_type: | |
| return gr.update(choices=skills_library.get(agent_type, [])) | |
| return gr.update(choices=[]) | |
| agent_type.change(update_skills, inputs=[agent_type], outputs=[skills]) | |
| # Generate agent | |
| def generate_agent(prefilled_choice, agent_type, agent_name, agent_mission, skills): | |
| if not agent_name.strip(): | |
| return "β Please provide an agent name", "", "### Active Agents\n_(None yet)_", gr.update() | |
| agent_data = { | |
| "agent_type": agent_type, | |
| "agent_name": agent_name, | |
| "agent_mission": agent_mission, | |
| "skills": skills, | |
| "web_access": True, | |
| "allow_fallback": True | |
| } | |
| agents_config[agent_name] = json.dumps(agent_data) | |
| # Update UI | |
| config_json = json.dumps(agent_data, indent=2) | |
| active_list = "### Active Agents\n" + "\n".join(f"- {name}" for name in agents_config.keys()) | |
| choices = list(agents_config.keys()) | |
| return config_json, agent_data, active_list, gr.update(choices=choices) | |
| generate_button.click( | |
| generate_agent, | |
| inputs=[prefilled, agent_type, agent_name, agent_mission, skills], | |
| outputs=[agent_output, gr.State(), active_agents, agent_dropdown] | |
| ) | |
| # Chat with agent | |
| def start_agent_chat(agent_name): | |
| if agent_name: | |
| greeting = f"π Hello! I'm {agent_name}. How can I assist you with infectious diseases?" | |
| return [[None, greeting]], "" | |
| return [], "" | |
| chat_button.click( | |
| start_agent_chat, | |
| inputs=[agent_dropdown], | |
| outputs=[agent_chatbot, chat_input] | |
| ) | |
| # Send message to agent | |
| def send_to_agent(agent_name, message, history, histories): | |
| if not agent_name or not message.strip(): | |
| return history, "", histories | |
| return safe_agent_chat(agent_name, message, histories) | |
| chat_send.click( | |
| send_to_agent, | |
| inputs=[agent_dropdown, chat_input, agent_chatbot, builder_chat_histories], | |
| outputs=[agent_chatbot, chat_input, builder_chat_histories] | |
| ) | |
| chat_input.submit( | |
| send_to_agent, | |
| inputs=[agent_dropdown, chat_input, agent_chatbot, builder_chat_histories], | |
| outputs=[agent_chatbot, chat_input, builder_chat_histories] | |
| ) | |
| # Reset chat | |
| reset_chat.click(lambda: ([], ""), outputs=[agent_chatbot, chat_input]) | |
| # Remove agent | |
| def remove_agent(agent_name): | |
| if agent_name in agents_config: | |
| del agents_config[agent_name] | |
| active_list = "### Active Agents\n" + "\n".join(f"- {name}" for name in agents_config.keys()) if agents_config else "### Active Agents\n_(None yet)_" | |
| choices = list(agents_config.keys()) | |
| return active_list, gr.update(choices=choices, value=None) | |
| remove_button.click( | |
| remove_agent, | |
| inputs=[agent_dropdown], | |
| outputs=[active_agents, agent_dropdown] | |
| ) | |
| # Status info | |
| status_md = "β **Optimized Mode**: Core functionality available" | |
| if CORE_MODULES_AVAILABLE: | |
| status_md += "\nβ **Full modules loaded**" | |
| else: | |
| status_md += "\nβ οΈ **Demo mode**: Limited functionality" | |
| gr.Markdown(f"### System Status\n{status_md}") | |
| return app | |
| # --- Main Application Entry Point --- | |
| if __name__ == "__main__": | |
| try: | |
| print("π Launching Optimized ID Agents...") | |
| # Authentication credentials | |
| auth_credentials = [ | |
| ("dr_smith", "idweek2025"), | |
| ("id_fellow", "hello"), | |
| ("pharmacist", "stewardship"), | |
| ("ipc_nurse", "infection"), | |
| ("researcher", "research"), | |
| ("educator", "education"), | |
| ("student", "learning"), | |
| ("admin", "idagents2025"), | |
| ("guest1", "guest123"), | |
| ("guest2", "guest456") | |
| ] | |
| auth_message = """ | |
| π¦ **ID Agents Beta Testing - Optimized Version** | |
| Welcome to the memory-optimized ID Agents environment! | |
| **Available Test Accounts:** | |
| β’ **dr_smith** / idweek2025 (ID Physician) | |
| β’ **id_fellow** / hello (ID Fellow) | |
| β’ **pharmacist** / stewardship (Clinical Pharmacist) | |
| β’ **ipc_nurse** / infection (IPC Coordinator) | |
| β’ **researcher** / research (Clinical Researcher) | |
| β’ **educator** / education (Medical Educator) | |
| β’ **student** / learning (Medical Student) | |
| β’ **admin** / idagents2025 (Administrator) | |
| β’ **guest1** / guest123 (Guest Access) | |
| β’ **guest2** / guest456 (Guest Access) | |
| This optimized version provides core agent building functionality | |
| while staying within HF Spaces memory limits. | |
| """ | |
| # Configure launch | |
| try: | |
| from hf_config import configure_hf_environment, get_hf_launch_config | |
| if configure_hf_environment(): | |
| launch_config = get_hf_launch_config() | |
| launch_config["auth"] = auth_credentials | |
| launch_config["auth_message"] = auth_message | |
| print("π HF Spaces optimized deployment") | |
| else: | |
| launch_config = { | |
| "auth": auth_credentials, | |
| "auth_message": auth_message, | |
| "share": False, | |
| "server_name": "127.0.0.1", | |
| "server_port": 7860 | |
| } | |
| print("π Local optimized deployment") | |
| except ImportError: | |
| launch_config = { | |
| "auth": auth_credentials, | |
| "auth_message": auth_message, | |
| "share": False, | |
| "server_name": "127.0.0.1", | |
| "server_port": 7860 | |
| } | |
| print("π Fallback optimized deployment") | |
| # Launch optimized app | |
| build_optimized_ui().launch(**launch_config) | |
| except Exception as e: | |
| logger.error(f"Failed to launch optimized ID Agents: {e}") | |
| print(f"β Launch failed: {e}") | |
| import traceback | |
| traceback.print_exc() | |