IDAgents Developer commited on
Commit
8120936
·
1 Parent(s): 2379938

Deploy COMPLETE ID Agents - Medical AI system (clean, no cache files)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +56 -0
  2. __init__.py +1 -0
  3. app.py +0 -0
  4. assets/style.css +0 -0
  5. config.py +102 -0
  6. core/__init__.py +1 -0
  7. core/agents/__init__.py +1 -0
  8. core/agents/agent_utils.py +84 -0
  9. core/agents/chat_orchestrator.py +352 -0
  10. core/agents/orchestrator.py +670 -0
  11. core/config/settings.py +0 -0
  12. core/ui/__init__.py +1 -0
  13. core/ui/ui.py +111 -0
  14. core/utils/__init__.py +1 -0
  15. core/utils/llm_connector.py +467 -0
  16. core/utils/rag.py +85 -0
  17. core/utils/skills_registry.py +132 -0
  18. prompts/alert_prolonged_antibiotic_use.j2 +9 -0
  19. prompts/deescalation.j2 +9 -0
  20. prompts/diagnostic_recommendation.j2 +12 -0
  21. prompts/draft_critique_enhance_board_exam.j2 +96 -0
  22. prompts/empiric_therapy.j2 +13 -0
  23. prompts/evaluate_nhsn_definition.j2 +19 -0
  24. prompts/extract_nhsn_fields.j2 +12 -0
  25. prompts/extract_nhsn_logic.j2 +12 -0
  26. prompts/extract_reporting_fields.j2 +17 -0
  27. prompts/extract_reporting_format.j2 +23 -0
  28. prompts/final_enhancement_board_exam.j2 +143 -0
  29. prompts/generate_board_exam_vignette.j2 +75 -0
  30. prompts/generate_comparison_table.j2 +77 -0
  31. prompts/generate_presentation_slide.j2 +65 -0
  32. prompts/generate_question_blueprint.j2 +87 -0
  33. prompts/history_taking.j2 +9 -0
  34. prompts/ipc_reporting.j2 +14 -0
  35. prompts/ipc_reporting_followup.j2 +18 -0
  36. prompts/isolation_precautions.j2 +13 -0
  37. prompts/nhsn_criteria_evaluator_followup.j2 +17 -0
  38. prompts/nhsn_criteria_evaluator_start.j2 +16 -0
  39. prompts/quality_review_board_exam.j2 +83 -0
  40. prompts/summarize_antibiotic_duration.j2 +13 -0
  41. requirements.txt +59 -0
  42. tools/__init__.py +0 -0
  43. tools/alert_prolonged_antibiotic_use.py +119 -0
  44. tools/base.py +56 -0
  45. tools/create_educational_presentation.py +1308 -0
  46. tools/explain_in_layman_language.py +370 -0
  47. tools/fhir_patient.py +50 -0
  48. tools/format_references.py +423 -0
  49. tools/generate_board_exam_question.py +0 -0
  50. tools/generate_flash_cards.py +324 -0
.gitignore ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+ MANIFEST
23
+
24
+ # PyInstaller
25
+ *.manifest
26
+ *.spec
27
+
28
+ # Environments
29
+ .env
30
+ .venv
31
+ env/
32
+ venv/
33
+ ENV/
34
+ env.bak/
35
+ venv.bak/
36
+
37
+ # IDEs
38
+ .vscode/
39
+ .idea/
40
+ *.swp
41
+ *.swo
42
+
43
+ # OS
44
+ .DS_Store
45
+ Thumbs.db
46
+
47
+ # Logs
48
+ *.log
49
+ logs/
50
+
51
+ # Temporary files
52
+ *.tmp
53
+ *.temp
54
+
55
+ # HF Spaces specific
56
+ .git/
__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # This file marks the tools directory as a Python package.
app.py CHANGED
The diff for this file is too large to render. See raw diff
 
assets/style.css ADDED
File without changes
config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ config.py
3
+ ---------
4
+ Global configuration, shared state, and constants for the modular AI agent system.
5
+ """
6
+
7
+ # Global configuration, shared state, and constants for the modular AI agent system.
8
+ import os
9
+
10
+ agents_config = {}
11
+ rag_retriever = None
12
+
13
+ skills_library = {
14
+ "🛡️ Antimicrobial Stewardship": [
15
+ "recommend_deescalation",
16
+ "recommend_empiric_therapy",
17
+ "alert_prolonged_antibiotic_use"
18
+ ],
19
+ "🦠 Infection Prevention and Control": [
20
+ "IPC_reporting",
21
+ "NHSN_criteria_evaluator",
22
+ "recommend_isolation_precautions"
23
+ ],
24
+ "🔬 Research Assistant": [
25
+ "search_pubmed",
26
+ "suggest_journals_for_submission",
27
+ "format_references"
28
+ ],
29
+ "🏥 Clinical Assistant": [
30
+ "retrieve_guidelines",
31
+ "explain_in_layman_language",
32
+ "history_taking"
33
+ ],
34
+ "📚 Education Assistant": [
35
+ "generate_board_exam_question",
36
+ "generate_flash_cards",
37
+ "create_educational_presentation"
38
+ ],
39
+ "🎼 Orchestrator": [
40
+ # Note: Orchestrator capabilities are built-in and don't require skill selection
41
+ ]
42
+ }
43
+
44
+ prefilled_agents = {
45
+ "Example Stewardship Agent": {
46
+ "agent_name": "SmartSteward",
47
+ "agent_type": "🛡️ Antimicrobial Stewardship",
48
+ "agent_mission": "Assist stewardship team in optimizing antibiotic use.",
49
+ "skills": [
50
+ "recommend_deescalation",
51
+ "alert_prolonged_antibiotic_use",
52
+ "recommend_empiric_therapy"
53
+ ]
54
+ },
55
+ "Example Infection Control Agent": {
56
+ "agent_name": "InfectoGuard",
57
+ "agent_type": "🦠 Infection Prevention and Control",
58
+ "agent_mission": "Support IPC team in monitoring outbreaks and adherence to precautions. When users ask about reportable diseases or reporting requirements, offer to help with the specific reporting process using available tools.",
59
+ "skills": [
60
+ "IPC_reporting",
61
+ "NHSN_criteria_evaluator",
62
+ "recommend_isolation_precautions"
63
+ ]
64
+ },
65
+ "Example Research Assistant Agent": {
66
+ "agent_name": "ResearchRanger",
67
+ "agent_type": "🔬 Research Assistant",
68
+ "agent_mission": "Assist in literature search, article summarization, and citation formatting.",
69
+ "skills": [
70
+ "search_pubmed",
71
+ "suggest_journals_for_submission",
72
+ "format_references"
73
+ ]
74
+ },
75
+ "Example Clinical Assistant Agent": {
76
+ "agent_name": "ClinicoPilot",
77
+ "agent_type": "🏥 Clinical Assistant",
78
+ "agent_mission": "Assist clinicians in diagnostic workflows and patient education by translating complex medical information into layman terms.",
79
+ "skills": [
80
+ "retrieve_guidelines",
81
+ "explain_in_layman_language"
82
+ ]
83
+ },
84
+ "Example Education Assistant Agent": {
85
+ "agent_name": "EduMedCoach",
86
+ "agent_type": "📚 Education Assistant",
87
+ "agent_mission": "Generate educational materials like board exam questions, flash cards, and comprehensive presentations.",
88
+ "skills": [
89
+ "generate_board_exam_question",
90
+ "generate_flash_cards",
91
+ "create_educational_presentation"
92
+ ]
93
+ },
94
+ "Example Orchestrator Agent": {
95
+ "agent_name": "ID Maestro",
96
+ "agent_type": "🎼 Orchestrator",
97
+ "agent_mission": "Intelligently coordinate multiple ID agents to provide comprehensive analysis and recommendations.",
98
+ "skills": [
99
+ # Orchestrator uses built-in coordination capabilities
100
+ ]
101
+ }
102
+ }
core/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Core module for ID Agents application
core/agents/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Agent orchestration and management components
core/agents/agent_utils.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+ import gradio as gr
4
+
5
+ def linkify_citations(answer_text: str, titles: dict) -> str:
6
+ """
7
+ Replace every literal [n] in the answer with:
8
+ <a href="#srcn" title="snippet...">[n]</a>
9
+ where titles[n] holds the hover text.
10
+ """
11
+ def _sub(match):
12
+ n = match.group(1) # the number between brackets
13
+ title = titles.get(int(n), "")
14
+ return f'<a href="#src{n}" title="{title}">[{n}]</a>'
15
+
16
+ return re.sub(r"\[(\d+)\]", _sub, answer_text)
17
+
18
+ def build_agent(agent_type, agent_name, agent_mission, selected_skills, web_access, allow_fallback, uploaded_files, link1, link2, link3, link4):
19
+ trusted_links = [link for link in [link1, link2, link3, link4] if link]
20
+ agent = {
21
+ "agent_name": agent_name,
22
+ "agent_type": agent_type,
23
+ "agent_mission": agent_mission,
24
+ "skills": selected_skills,
25
+ "web_access": web_access,
26
+ "allow_fallback": allow_fallback,
27
+ "grounded_files": [file.name for file in uploaded_files] if uploaded_files else [],
28
+ "trusted_links": trusted_links
29
+ }
30
+ return json.dumps(agent, indent=2)
31
+
32
+ def load_prefilled(prefilled_name, prefilled_agents):
33
+ if prefilled_name in prefilled_agents:
34
+ data = prefilled_agents[prefilled_name]
35
+ return (data["agent_type"], data["agent_name"], data["agent_mission"], data["skills"])
36
+ else:
37
+ return (None, "", "", [])
38
+
39
+ def prepare_download(agent_json):
40
+ return ("agent_config.json", agent_json.encode())
41
+
42
+ def preload_demo_chat(agent_json):
43
+ """Show instruction message instead of agent greeting to avoid confusion"""
44
+ return [{"role": "assistant", "content": "� **Agent generated successfully!**\n\nTo start chatting:\n1. Select an agent from the dropdown menu above\n2. Click '💬 Chat with Selected Agent'\n3. Then you can type your questions here\n\n*Please select an agent to begin testing.*"}]
45
+
46
+ def _safe_title(text, limit=140):
47
+ txt = text.replace('"', "'").replace("\n", " ")
48
+ return (txt[:limit] + "…") if len(txt) > limit else txt
49
+
50
+ def extract_clinical_variables_from_history(history, variable_names):
51
+ """
52
+ Extract the most recent value for each variable from the chat history (list of dicts).
53
+ Returns a dict of variable_name: value.
54
+ """
55
+ import re
56
+ result = {var: None for var in variable_names}
57
+ # Search from most recent to oldest
58
+ for message in reversed(history):
59
+ if message["role"] != "user" and message["role"] != "assistant":
60
+ continue
61
+ content = message.get("content", "")
62
+ for var in variable_names:
63
+ # Simple regex: look for 'var: value' or '- var: value' or 'var = value'
64
+ pattern = rf"(?:^|\n|\-)\s*{re.escape(var.replace('_', ' '))}\s*[:=]\s*(.+)"
65
+ match = re.search(pattern, content, re.IGNORECASE)
66
+ if match and not result[var]:
67
+ value = match.group(1).strip()
68
+ # Remove trailing punctuation
69
+ value = re.sub(r"[\.,;\n\r]+$", "", value)
70
+ result[var] = value
71
+ return result
72
+
73
+ def validate_and_reference_recommendation(reply_text: str) -> str:
74
+ """
75
+ Checks the reply for common clinical errors and attaches references/guidelines.
76
+ Returns the possibly modified reply with references appended.
77
+ """
78
+ # No hardcoded clinical rules or references here. If clinical validation is needed, use a knowledge base or guideline-driven system.
79
+ return reply_text
80
+ """
81
+ agent_utils.py
82
+ -------------
83
+ Utility functions for agent configuration, citation linking, and other helpers.
84
+ """
core/agents/chat_orchestrator.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ chat_orchestrator.py
3
+ -------------------
4
+ Agent orchestration, chat streaming, and related logic.
5
+ """
6
+
7
+ import json
8
+ from typing import Dict, cast
9
+ from core.utils.llm_connector import AgentLLMConnector
10
+ from core.utils.skills_registry import get_tool_by_name
11
+ from core.agents.agent_utils import validate_and_reference_recommendation
12
+
13
+ # --- Orchestrator state ---
14
+ orchestrators: Dict[str, object] = {}
15
+
16
+ # --- Constants ---
17
+ MAX_HISTORY = 20
18
+
19
+ # --- Build log utility ---
20
+ def build_log(conn):
21
+ """Return a formatted log of tool invocations if available."""
22
+ if hasattr(conn, 'invocations'):
23
+ invocations = getattr(conn, 'invocations', [])
24
+ if not invocations:
25
+ return ''
26
+ log = '--- Tool Invocation Log ---\n'
27
+ for i, inv in enumerate(invocations, 1):
28
+ log += f"{i}. {inv}\n"
29
+ return log
30
+ return ''
31
+
32
+ # --- Streaming to agent (for child agents) ---
33
+ def _stream_to_agent(cfg, history, user_input, debug_flag, active_children):
34
+ """Yield streaming responses for a child agent (sync generator)."""
35
+ skill_objs = []
36
+ if cfg.get("web_access", False):
37
+ web_tool = get_tool_by_name("search_internet", {"user_query": user_input})
38
+ if web_tool:
39
+ skill_objs.append(web_tool)
40
+ for skill_name in cfg.get("skills", []):
41
+ tool = get_tool_by_name(skill_name, {"user_query": user_input})
42
+ if tool:
43
+ skill_objs.append(tool)
44
+ # Pass allow_fallback, trusted_links, grounded_files to AgentLLMConnector
45
+ allow_fallback = cfg.get("allow_fallback", True)
46
+ trusted_links = cfg.get("trusted_links", [])
47
+ grounded_files = cfg.get("grounded_files", [])
48
+
49
+ # Get global RAG retriever if available
50
+ rag_retriever = None
51
+ try:
52
+ import sys
53
+ if 'app' in sys.modules:
54
+ app_module = sys.modules['app']
55
+ rag_retriever = getattr(app_module, 'rag_retriever', None)
56
+ except:
57
+ pass # No RAG retriever available
58
+
59
+ conn = AgentLLMConnector(
60
+ api_key=cast(str, cfg.get("api_key")),
61
+ skills=skill_objs,
62
+ allow_fallback=allow_fallback,
63
+ trusted_links=trusted_links,
64
+ grounded_files=grounded_files,
65
+ rag_retriever=rag_retriever
66
+ )
67
+ model = conn.agent_model_mapping.get(cfg.get("agent_type", ""), "gpt-5-mini")
68
+
69
+ # Build enhanced system message with tool-specific guidance
70
+ system_content = f"You are {cfg.get('agent_name', 'Agent')}. {cfg.get('agent_mission', '')}"
71
+
72
+ # Add general concise response guidelines for child agents
73
+ system_content += (
74
+ "\n\nRESPONSE GUIDELINES:\n"
75
+ "- Be concise and clinically focused\n"
76
+ "- Provide clear, actionable recommendations\n"
77
+ "- Avoid excessive reasoning or explanation unless specifically requested\n"
78
+ "- Structure responses with clear sections when appropriate\n"
79
+ "- Use bullet points or numbered lists for multiple recommendations"
80
+ )
81
+
82
+ # Add specific guidance for IPC reporting
83
+ has_ipc_reporting = any(skill.name == "IPC_reporting" for skill in skill_objs)
84
+ if has_ipc_reporting:
85
+ system_content += (
86
+ "\n\nWhen users ask about reportable diseases, reporting requirements, or infection control reporting, "
87
+ "always offer to help with the specific reporting process. Use the IPC_reporting tool to provide "
88
+ "jurisdiction-specific requirements and generate formatted reports. "
89
+ "IMPORTANT: When calling IPC_reporting, include ALL conversation context in the case_summary parameter, "
90
+ "especially the specific organism/pathogen mentioned by the user (e.g., 'User asked about typhus fever reporting')."
91
+ )
92
+
93
+ system_msg = {"role": "system", "content": system_content}
94
+ from collections import deque
95
+ recent = deque(history, maxlen=MAX_HISTORY)
96
+ history_msgs = [ {"role": m["role"], "content": m["content"]} for m in recent if m["role"] in ("user", "assistant")]
97
+ messages = [system_msg] + history_msgs
98
+ history.append({"role": "assistant", "content": ""})
99
+ buf = ""
100
+ # This is a sync generator for child agents; in real use, adapt to async if needed
101
+ # Fix: call the async generator and iterate with asyncio
102
+ import asyncio
103
+ async def run_stream():
104
+ async for token in conn.chat_with_agent_stream(model_name=model, messages=messages):
105
+ yield token
106
+ loop = asyncio.new_event_loop()
107
+ asyncio.set_event_loop(loop)
108
+ gen = run_stream()
109
+ try:
110
+ while True:
111
+ token = loop.run_until_complete(gen.__anext__())
112
+ buf += token
113
+ history[-1]["content"] = buf
114
+ invocation_log = build_log(conn)
115
+ yield history, "", invocation_log, active_children, None
116
+ except StopAsyncIteration:
117
+ pass
118
+ history[-1]["content"] = buf.strip()
119
+ invocation_log = build_log(conn)
120
+ yield history, "", invocation_log, active_children, None
121
+
122
+ # --- Main async chat orchestrator ---
123
+ async def simulate_agent_response_stream(agent_json, history, user_input, debug_flag, active_children):
124
+ """
125
+ Streams agent replies with sliding window, multi-agent routing,
126
+ and invocation logging under orchestrator flows.
127
+ Yields: history, cleared input, invocation log, active_children, challenger_info.
128
+ """
129
+ if not agent_json or not agent_json.strip():
130
+ history.append({"role": "assistant", "content": "⚠️ No agent configuration found. Please Generate or Load an agent first."})
131
+ yield history, "", "", active_children, None
132
+ return
133
+
134
+ try:
135
+ cfg = json.loads(agent_json)
136
+ except json.JSONDecodeError:
137
+ history.append({"role": "assistant", "content": "⚠️ Invalid agent configuration. Please regenerate or reload the agent."})
138
+ yield history, "", "", active_children, None
139
+ return
140
+
141
+ name = cfg.get("agent_name", "Agent")
142
+ mission = cfg.get("agent_mission", "")
143
+ agent_type = cfg.get("agent_type", "")
144
+
145
+ if not history:
146
+ history.append({"role": "assistant", "content": f"👋 Hello! I'm {name}. How can I assist today?"})
147
+ yield history, "", "", active_children, None
148
+
149
+ # Add user message to history and display it immediately
150
+ history.append({"role": "user", "content": user_input})
151
+ yield history, "", "", active_children, None
152
+
153
+ if agent_type == "🎼 Orchestrator":
154
+ try:
155
+ from core.agents.orchestrator import OrchestratorAgent
156
+ global orchestrators
157
+ name = cfg.get("agent_name", "orchestrator")
158
+
159
+ # Create a fresh orchestrator instance for new conversations,
160
+ # but reuse existing instance to maintain state for execution
161
+ orch = orchestrators.get(name)
162
+ if orch is None:
163
+ # Import the runtime agents_config from app.py where it's dynamically updated
164
+ import sys
165
+ if 'app' in sys.modules:
166
+ # Get the runtime agents_config that contains all deployed agents
167
+ app_module = sys.modules['app']
168
+ runtime_agents_config = getattr(app_module, 'agents_config', {})
169
+ else:
170
+ # Fallback to config if app module not available
171
+ from config import agents_config as runtime_agents_config
172
+
173
+ # Create new orchestrator instance only if none exists
174
+ orch = OrchestratorAgent(runtime_agents_config, cast(str, cfg.get("api_key", "")))
175
+ orchestrators[name] = orch
176
+
177
+ history.append({"role": "assistant", "content": ""})
178
+ orch_agent = cast(OrchestratorAgent, orch)
179
+ answer_gen = orch_agent.answer(history, user_input, debug=debug_flag)
180
+ async for msg in answer_gen:
181
+ if isinstance(msg, dict):
182
+ chunk = msg.get("content", "")
183
+ if chunk:
184
+ history[-1]["content"] += chunk
185
+ else:
186
+ history[-1]["content"] += str(msg)
187
+ invocation_log = build_log(orch) if hasattr(orch, 'invocations') and isinstance(orch, OrchestratorAgent) else ""
188
+ yield history, "", invocation_log, active_children, None
189
+ except ImportError:
190
+ history.append({"role": "assistant", "content": "Orchestrator not available."})
191
+ yield history, "", "", active_children, None
192
+ return
193
+
194
+ if active_children:
195
+ for child_json in active_children:
196
+ child_cfg = json.loads(child_json)
197
+ for output in _stream_to_agent(child_cfg, history, user_input, debug_flag, active_children):
198
+ yield output
199
+ return
200
+
201
+ skill_objs = []
202
+ if cfg.get("web_access", False):
203
+ web_tool = get_tool_by_name("search_internet", {"user_query": user_input})
204
+ if web_tool:
205
+ skill_objs.append(web_tool)
206
+ for skill_name in cfg.get("skills", []):
207
+ tool = get_tool_by_name(skill_name, {"user_query": user_input})
208
+ if tool:
209
+ skill_objs.append(tool)
210
+
211
+ # Pass allow_fallback, trusted_links, grounded_files to AgentLLMConnector
212
+ allow_fallback = cfg.get("allow_fallback", True)
213
+ trusted_links = cfg.get("trusted_links", [])
214
+ grounded_files = cfg.get("grounded_files", [])
215
+
216
+ # Get global RAG retriever if available
217
+ rag_retriever = None
218
+ try:
219
+ import sys
220
+ if 'app' in sys.modules:
221
+ app_module = sys.modules['app']
222
+ rag_retriever = getattr(app_module, 'rag_retriever', None)
223
+ except:
224
+ pass # No RAG retriever available
225
+
226
+ conn = AgentLLMConnector(
227
+ api_key=cast(str, cfg.get("api_key", "")),
228
+ skills=skill_objs,
229
+ allow_fallback=allow_fallback,
230
+ trusted_links=trusted_links,
231
+ grounded_files=grounded_files,
232
+ rag_retriever=rag_retriever
233
+ )
234
+ model = conn.agent_model_mapping.get(agent_type, "gpt-5-mini")
235
+ has_history_tool = any(t.name == "history_taking" for t in skill_objs)
236
+ has_ipc_reporting = any(t.name == "IPC_reporting" for t in skill_objs)
237
+
238
+ if agent_type == "🏥 Clinical Assistant" and has_history_tool:
239
+ system_content = (
240
+ f"You are {name}. {mission}\n\n"
241
+ "Before giving any advice, gather all necessary patient history by calling the "
242
+ "`history_taking` function. A JSON-schema has been provided with each question as a "
243
+ "parameter description. Ask wauestions, wait for the user's answer, and only "
244
+ "once every required field is filled will you then provide your final recommendation.\n\n"
245
+ "RESPONSE FORMAT: Keep responses concise and clinical. Avoid lengthy explanations unless specifically asked. "
246
+ "Focus on actionable recommendations and key clinical points."
247
+ )
248
+ else:
249
+ system_content = f"You are {name}." + (f" {mission}" if mission else "")
250
+
251
+ # Add general instruction to keep responses focused and concise
252
+ system_content += (
253
+ "\n\nRESPONSE GUIDELINES:\n"
254
+ "- Be concise and clinically focused\n"
255
+ "- Provide clear, actionable recommendations\n"
256
+ "- Avoid excessive reasoning or explanation unless specifically requested\n"
257
+ "- Structure responses with clear sections when appropriate\n"
258
+ "- Use bullet points or numbered lists for multiple recommendations"
259
+ )
260
+
261
+ # Add specific guidance for IPC reporting
262
+ if has_ipc_reporting:
263
+ system_content += (
264
+ "\n\nWhen users ask about reportable diseases, reporting requirements, or infection control reporting, "
265
+ "always offer to help with the specific reporting process. Use the IPC_reporting tool to provide "
266
+ "jurisdiction-specific requirements and generate formatted reports. "
267
+ "IMPORTANT: When calling IPC_reporting, include ALL conversation context in the case_summary parameter, "
268
+ "especially the specific organism/pathogen mentioned by the user (e.g., 'User asked about typhus fever reporting')."
269
+ )
270
+
271
+ system_msg = {"role": "system", "content": system_content}
272
+ from collections import deque
273
+ recent = deque(history, maxlen=MAX_HISTORY)
274
+ history_msgs = [ {"role": m["role"], "content": m["content"]} for m in recent if m["role"] in ("user", "assistant")]
275
+ messages = [system_msg] + history_msgs
276
+ history.append({"role": "assistant", "content": ""})
277
+ buf = ""
278
+ tool_invoked = False
279
+
280
+ async for token in conn.chat_with_agent_stream(model_name=model, messages=messages):
281
+ buf += token
282
+ history[-1]["content"] = buf
283
+ invocation_log = build_log(conn)
284
+ # Detect if a tool was invoked (by tool name in the reply)
285
+ for tool in skill_objs:
286
+ if tool.name in buf:
287
+ tool_invoked = True
288
+ yield history, "", invocation_log, active_children, None
289
+
290
+ # Apply clinical validation and references to the final reply
291
+ original_reply = validate_and_reference_recommendation(buf.strip())
292
+
293
+ # --- Challenger step: adversarial critique if enabled ---
294
+
295
+ challenger_enabled = cfg.get("challenger_enabled", False)
296
+ critique = None
297
+ final_reply = original_reply
298
+
299
+ # --- Only run challenger if the required fields for the tool actually invoked are present, or if FORCE_CHALLENGE is present ---
300
+ def required_fields_present_for_invoked_tool():
301
+ # Try to infer which tool was actually invoked (last tool in skill_objs with a matching name in the reply)
302
+ from core.utils.skills_registry import get_tool_by_name
303
+ invoked_tool = None
304
+ for skill_name in cfg.get("skills", []):
305
+ if skill_name in original_reply:
306
+ invoked_tool = get_tool_by_name(skill_name, {"user_query": user_input})
307
+ break
308
+ if not invoked_tool and skill_objs:
309
+ invoked_tool = skill_objs[-1] # fallback: last tool
310
+ if not invoked_tool:
311
+ return True # fallback: allow
312
+ required_fields = invoked_tool.args_schema.get("required", [])
313
+ if not required_fields:
314
+ return True
315
+ for field in required_fields:
316
+ found = False
317
+ for m in history[::-1]:
318
+ if m["role"] == "user" and (field.replace("_", " ") in m["content"].lower() or field in m["content"].lower()):
319
+ found = True
320
+ break
321
+ if not found:
322
+ return False
323
+ return True
324
+
325
+ force_challenge = "FORCE_CHALLENGE" in user_input or "FORCE_CHALLENGE" in original_reply
326
+
327
+ # Always run challenger if enabled (or forced)
328
+ if challenger_enabled or force_challenge:
329
+ try:
330
+ from core.utils.llm_connector import challenge_agent_response, refine_final_answer
331
+ user_message = user_input
332
+ agent_reply = original_reply
333
+ # Pass conversation history to challenger for better context awareness
334
+ critique = await challenge_agent_response(user_message, agent_reply, history)
335
+ # If critique is None or empty, treat as OK
336
+ if not critique or critique.strip().upper() == "OK":
337
+ critique = "OK"
338
+ final_reply = original_reply
339
+ else:
340
+ # Use a refiner LLM to produce a clean, user-facing answer
341
+ final_reply = await refine_final_answer(user_message, original_reply, critique)
342
+ except Exception as e:
343
+ critique = f"[Challenger error: {e}]"
344
+ final_reply = original_reply
345
+
346
+ history[-1]["content"] = final_reply
347
+ invocation_log = build_log(conn)
348
+ yield history, "", invocation_log, active_children, {
349
+ "original_reply": original_reply,
350
+ "challenger_critique": critique,
351
+ "final_reply": final_reply
352
+ }
core/agents/orchestrator.py ADDED
@@ -0,0 +1,670 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ orchestrator.py
3
+ ---------------
4
+
5
+ Coordinates multi-agent planning, tool invocation, if context_results:
6
+ # Replanning m system = (
7
+ "You are an orchestration assistant. A clinician's request may contain multiple distinct tasks "
8
+ "(e.g. antibiotic choice, duration, isolation, orders, literature search, quiz). "
9
+ "Here are your available agents:\n\n"
10
+ f"{json.dumps(avail, indent=2)}\n\n"
11
+ "Return *only* a JSON array of objects, each with exactly three keys:\n"
12
+ " • action: always \"invoke\"\n"
13
+ " • agent: one of the agent names above\n"
14
+ " • prompt: the exact question to send to that agent\n"
15
+ "Do not include any extra text.\n\n"
16
+ "RESPONSE GUIDELINES:\n"
17
+ "- Be concise and clinically focused\n"
18
+ "- Provide clear, actionable recommendations\n"
19
+ "- Avoid excessive reasoning or explanation unless specifically requested"
20
+ ) system = (
21
+ "You are an expert replanning agent. Based on intermediate results, create an updated "
22
+ "execution plan that adapts to new information while maintaining original objectives.\n\n"
23
+ "Focus on:\n"
24
+ "1. Incorporating lessons from completed phases\n"
25
+ "2. Adjusting remaining phases based on i summary = await call_llm(
26
+ [{"role": "user", "content": synthesis_prompt}],
27
+ model=SUMMARY_MODEL
28
+ )ediate findings\n"
29
+ "3. Optimizing parallel execution for efficiency\n"
30
+ "4. Setting new replanning conditions\n\n"
31
+ f"Available agents:\n{json.dumps(avail, indent=2)}\n\n"
32
+ f"Context from previous phases:\n{json.dumps(context_results, indent=2)}\n\n"
33
+ "Return updated plan in enhanced format (JSON):\n"
34
+ "{\n"
35
+ ' "execution_strategy": "description",\n'
36
+ ' "phases": [{"phase_id": "id", "description": "desc", "parallel_tasks": [{"task_id": "id", "agent": "name", "prompt": "task", "priority": "high", "dependencies": []}]}],\n'
37
+ ' "replanning_conditions": [{"condition": "desc", "trigger_phase": "id", "evaluation_prompt": "prompt"}]\n'
38
+ '}'
39
+ )
40
+ else:
41
+ # Initial planning mode
42
+ system = (
43
+ "You are an expert planning agent for an Infectious Disease consultation system. "
44
+ "Create sophisticated execution plans with parallel agent coordination and dynamic replanning.\n\n"
45
+ f"Available agents:\n{json.dumps(avail, indent=2)}\n\n"
46
+ "Output format (JSON):\n"
47
+ "{\n"
48
+ ' "execution_strategy": "description of approach",\n'
49
+ ' "phases": [\n'
50
+ ' {\n'
51
+ ' "phase_id": "phase_1",\n'
52
+ ' "description": "phase description",\n'
53
+ ' "parallel_tasks": [\n'
54
+ ' {\n'
55
+ ' "task_id": "task_1",\n'
56
+ ' "agent": "agent_name",\n'
57
+ ' "prompt": "specific task",\n'
58
+ ' "priority": "high|medium|low",\n'
59
+ ' "dependencies": ["task_id"]\n'
60
+ ' }\n'
61
+ ' ]\n'
62
+ ' }\n'
63
+ ' ],\n'
64
+ ' "replanning_conditions": [\n'
65
+ ' {\n'
66
+ ' "condition": "description",\n'
67
+ ' "trigger_phase": "phase_id",\n'
68
+ ' "evaluation_prompt": "LLM prompt for evaluation"\n'
69
+ ' }\n'
70
+ ' ]\n'
71
+ '}'
72
+ )se synthesis for the AI agent system.
73
+
74
+ - Defines the OrchestratorAgent class, which manages agent selection, planning, and execution.
75
+ - Handles user input decomposition, tool invocation, and error handling.
76
+ - Integrates with OpenAI and the tool registry for dynamic agent workflows.
77
+ - Provides robust logging, request tracking, and user-friendly error messages.
78
+ - Designed for future compatibility with Model Context Protocol (MCP) and Agent-to-Agent (A2A) standards.
79
+
80
+ """
81
+
82
+ # orchestrator.py
83
+
84
+ import os
85
+ import json
86
+ import re
87
+ import asyncio
88
+ from typing import Any, Dict, List, Optional
89
+ from openai import OpenAI
90
+ from core.utils.llm_connector import AgentLLMConnector, call_llm
91
+ from core.utils.skills_registry import get_tool_by_name
92
+
93
+ GREETING_RE = re.compile(r"^(hi|hello|hey|good\s(morning|afternoon|evening))\b", re.I)
94
+ PLANNER_MODEL = "gpt-5" # Use GPT-5 for planning
95
+ SUMMARY_MODEL = "gpt-5" # Use GPT-5 for summaries
96
+
97
+ import uuid
98
+ import datetime
99
+
100
+ class OrchestratorAgent:
101
+ """
102
+ The OrchestratorAgent coordinates multiple AI agent tools to fulfill complex user requests.
103
+
104
+ Attributes:
105
+ cfg (Dict[str, str]): Configuration for available agents.
106
+ client (OpenAI): OpenAI client for LLM calls.
107
+ state (dict): Tracks the current plan, subtask progress, and results.
108
+ invocations (List[Dict[str, Any]]): Audit log of tool invocations and results.
109
+ """
110
+ def __init__(self, agents_cfg: Dict[str, str], api_key: str):
111
+ """
112
+ Initialize the OrchestratorAgent.
113
+
114
+ Args:
115
+ agents_cfg (Dict[str, str]): Configuration for available agents.
116
+ api_key (str): API key for OpenAI.
117
+ """
118
+ self.cfg = agents_cfg
119
+ self.client = OpenAI(api_key=api_key)
120
+ self.state = {
121
+ "plan": [], # list of {"action","agent","prompt","priority","dependencies"}
122
+ "current_phase": 0, # which execution phase we're on
123
+ "subtask_convos": {}, # maps step index -> convo so far
124
+ "results": {}, # agent_key -> [final outputs]
125
+ "intermediate_results": {}, # step_id -> intermediate results for replanning
126
+ "execution_phases": [], # list of phases with parallel execution groups
127
+ "replanning_triggers": [] # conditions that trigger replanning
128
+ }
129
+ self.invocations: List[Dict[str,Any]] = []
130
+
131
+ async def planning_agent(self, user_input: str, context_results: Optional[Dict] = None) -> Dict:
132
+ """
133
+ Enhanced planning agent that creates parallel execution plans with replanning conditions.
134
+
135
+ Args:
136
+ user_input (str): The user's request or question.
137
+ context_results (Optional[Dict]): Context from previous phases for replanning.
138
+
139
+ Returns:
140
+ Dict: Enhanced plan with phases, parallel tasks, and replanning conditions.
141
+ """
142
+ agent_keys = list(self.cfg.keys())
143
+ avail = []
144
+ history_agent = None
145
+ for name in agent_keys:
146
+ cfg = json.loads(self.cfg[name])
147
+ avail.append({
148
+ "name": name,
149
+ "mission": cfg.get("agent_mission",""),
150
+ "skills": cfg.get("skills",[])
151
+ })
152
+ if history_agent is None and "history_taking" in cfg.get("skills", []):
153
+ history_agent = name
154
+
155
+ system = (
156
+ "You are an orchestration assistant. A clinician’s request may contain multiple distinct tasks "
157
+ "(e.g. antibiotic choice, duration, isolation, orders, literature search, quiz). "
158
+ "Here are your available agents:\n\n"
159
+ f"{json.dumps(avail, indent=2)}\n\n"
160
+ "Return *only* a JSON array of objects, each with exactly three keys:\n"
161
+ " • action: always \"invoke\"\n"
162
+ " • agent: one of the agent names above\n"
163
+ " • prompt: the exact question to send to that agent\n"
164
+ "Do not include any extra text."
165
+ )
166
+ # **critical fix**: pass the model name
167
+ plan_str = await call_llm(
168
+ [{"role":"system","content":system},
169
+ {"role":"user","content":user_input}],
170
+ model=PLANNER_MODEL
171
+ )
172
+ self.invocations.append({
173
+ "tool":"enhanced_planning_agent",
174
+ "args":{"user_input":user_input, "context_results": context_results},
175
+ "result":plan_str
176
+ })
177
+
178
+ try:
179
+ plan = json.loads(plan_str)
180
+
181
+ # Validate and convert plan format if needed
182
+ if isinstance(plan, list):
183
+ # Old format: convert list to enhanced dictionary format
184
+ enhanced_plan = {
185
+ "execution_strategy": "sequential_legacy",
186
+ "phases": [{
187
+ "phase_id": "phase_1",
188
+ "description": "Legacy sequential execution",
189
+ "parallel_tasks": []
190
+ }],
191
+ "replanning_conditions": []
192
+ }
193
+
194
+ # Convert each list item to a parallel task
195
+ for i, task in enumerate(plan):
196
+ if isinstance(task, dict) and "agent" in task:
197
+ enhanced_plan["phases"][0]["parallel_tasks"].append({
198
+ "task_id": f"task_{i+1}",
199
+ "agent": task.get("agent", "stewardship_agent"),
200
+ "prompt": task.get("prompt", user_input),
201
+ "priority": "medium",
202
+ "dependencies": []
203
+ })
204
+
205
+ return enhanced_plan
206
+
207
+ elif isinstance(plan, dict):
208
+ # Enhanced format: validate required keys
209
+ if "phases" not in plan:
210
+ plan["phases"] = [self._create_default_phase(user_input, avail, history_agent)]
211
+ if "execution_strategy" not in plan:
212
+ plan["execution_strategy"] = "enhanced_parallel"
213
+ if "replanning_conditions" not in plan:
214
+ plan["replanning_conditions"] = []
215
+
216
+ return plan
217
+
218
+ else:
219
+ # Invalid format: use fallback
220
+ return self._create_fallback_plan(user_input, avail, history_agent)
221
+
222
+ except json.JSONDecodeError:
223
+ # Fallback to simple plan
224
+ return self._create_fallback_plan(user_input, avail, history_agent)
225
+
226
+ def _create_fallback_plan(self, user_input: str, avail: List[Dict], history_agent: Optional[str] = None) -> Dict[str, Any]:
227
+ """Create a simple fallback plan when advanced planning fails."""
228
+ fallback_plan = {
229
+ "execution_strategy": "sequential",
230
+ "phases": [{
231
+ "phase_id": 1,
232
+ "description": "Fallback sequential execution",
233
+ "parallel_tasks": []
234
+ }],
235
+ "replanning_conditions": [],
236
+ "success_criteria": "Complete all agent responses"
237
+ }
238
+
239
+ # Add history agent first if available
240
+ if history_agent:
241
+ fallback_plan["phases"][0]["parallel_tasks"].append({
242
+ "task_id": "history_task",
243
+ "action": "invoke",
244
+ "agent": history_agent,
245
+ "prompt": "Gather necessary patient history before proceeding",
246
+ "priority": "high",
247
+ "estimated_duration": "medium",
248
+ "dependencies": [],
249
+ "replanning_triggers": []
250
+ })
251
+
252
+ # Add a general agent task
253
+ if avail:
254
+ default_agent = avail[0]["name"]
255
+ fallback_plan["phases"][0]["parallel_tasks"].append({
256
+ "task_id": "main_task",
257
+ "action": "invoke",
258
+ "agent": default_agent,
259
+ "prompt": user_input,
260
+ "priority": "high",
261
+ "estimated_duration": "medium",
262
+ "dependencies": ["history_task"] if history_agent else [],
263
+ "replanning_triggers": []
264
+ })
265
+
266
+ return fallback_plan
267
+
268
+ def _validate_and_organize_plan(self, plan: Dict[str, Any], agent_keys: List[str], history_agent: Optional[str] = None) -> Dict[str, Any]:
269
+ """Validate and organize the enhanced plan."""
270
+ validated_plan = {
271
+ "execution_strategy": plan.get("execution_strategy", "sequential"),
272
+ "phases": [],
273
+ "replanning_conditions": plan.get("replanning_conditions", []),
274
+ "success_criteria": plan.get("success_criteria", "Complete all tasks successfully")
275
+ }
276
+
277
+ # Validate each phase
278
+ for phase in plan.get("phases", []):
279
+ validated_phase = {
280
+ "phase_id": phase.get("phase_id", 1),
281
+ "description": phase.get("description", "Execution phase"),
282
+ "parallel_tasks": []
283
+ }
284
+
285
+ # Validate each task in the phase
286
+ for task in phase.get("parallel_tasks", []):
287
+ if (task.get("agent") in agent_keys and
288
+ task.get("action") == "invoke" and
289
+ isinstance(task.get("prompt"), str)):
290
+
291
+ validated_task = {
292
+ "task_id": task.get("task_id", f"task_{len(validated_phase['parallel_tasks'])}"),
293
+ "action": "invoke",
294
+ "agent": task["agent"],
295
+ "prompt": task["prompt"],
296
+ "priority": task.get("priority", "medium"),
297
+ "estimated_duration": task.get("estimated_duration", "medium"),
298
+ "dependencies": task.get("dependencies", []),
299
+ "replanning_triggers": task.get("replanning_triggers", [])
300
+ }
301
+ validated_phase["parallel_tasks"].append(validated_task)
302
+
303
+ if validated_phase["parallel_tasks"]:
304
+ validated_plan["phases"].append(validated_phase)
305
+
306
+ # Ensure we have at least one phase
307
+ if not validated_plan["phases"]:
308
+ validated_plan = self._create_fallback_plan("", [{"name": list(agent_keys)[0] if agent_keys else "default"}], history_agent)
309
+
310
+ return validated_plan
311
+
312
+ async def _execute_parallel_tasks(self, tasks: List[Dict[str, Any]], history: List[Dict]) -> Dict[str, Any]:
313
+ """Execute a group of tasks in parallel."""
314
+ import asyncio
315
+ from core.utils.llm_connector import AgentLLMConnector
316
+ from core.utils.skills_registry import get_tool_by_name
317
+
318
+ # Create coroutines for each task
319
+ task_coroutines = []
320
+ task_ids = []
321
+
322
+ for task in tasks:
323
+ # Check dependencies
324
+ dependencies_met = True
325
+ for dep in task.get("dependencies", []):
326
+ if dep not in self.state["intermediate_results"]:
327
+ dependencies_met = False
328
+ break
329
+
330
+ if not dependencies_met:
331
+ continue # Skip this task for now
332
+
333
+ task_ids.append(task["task_id"])
334
+ task_coroutines.append(self._execute_single_task(task, history))
335
+
336
+ # Execute tasks in parallel
337
+ if task_coroutines:
338
+ results = await asyncio.gather(*task_coroutines, return_exceptions=True)
339
+
340
+ # Process results
341
+ phase_results = {}
342
+ for i, result in enumerate(results):
343
+ task_id = task_ids[i]
344
+ if isinstance(result, Exception):
345
+ phase_results[task_id] = {"error": str(result), "success": False}
346
+ else:
347
+ phase_results[task_id] = {"result": result, "success": True}
348
+ # Store for potential replanning
349
+ self.state["intermediate_results"][task_id] = result
350
+
351
+ return phase_results
352
+
353
+ return {}
354
+
355
+ async def _execute_single_task(self, task: Dict[str, Any], history: List[Dict]) -> str:
356
+ """Execute a single agent task."""
357
+ from core.utils.llm_connector import AgentLLMConnector
358
+ from core.utils.skills_registry import get_tool_by_name
359
+
360
+ agent_key = task["agent"]
361
+ prompt = task["prompt"]
362
+
363
+ # Get agent configuration
364
+ cfg = json.loads(self.cfg[agent_key])
365
+ skills = [get_tool_by_name(s, {"user_query": prompt}) for s in cfg.get("skills", [])]
366
+ skills = [t for t in skills if t]
367
+
368
+ # Create agent connector
369
+ child = AgentLLMConnector(api_key=self.client.api_key, skills=skills)
370
+
371
+ # Prepare messages
372
+ sys_msg = {"role": "system", "content": f"You are {cfg['agent_name']}, a {cfg['agent_type']} agent."}
373
+ user_msg = {"role": "user", "content": prompt}
374
+
375
+ # Execute and collect response
376
+ response_buffer = ""
377
+ async for token in child.chat_with_agent_stream(
378
+ model_name=child.agent_model_mapping[cfg["agent_type"]],
379
+ messages=[sys_msg, user_msg]
380
+ ):
381
+ response_buffer += token
382
+
383
+ return response_buffer.strip()
384
+
385
+ async def _execute_single_task_with_timeout(self, task: Dict[str, Any], history: List[Dict], timeout: int = 45) -> Dict[str, Any]:
386
+ """Execute a single task with timeout and simplified error handling."""
387
+ import asyncio
388
+
389
+ try:
390
+ # Execute with timeout
391
+ result = await asyncio.wait_for(
392
+ self._execute_single_task_fast(task, history),
393
+ timeout=timeout
394
+ )
395
+ return {"success": True, "result": result}
396
+ except asyncio.TimeoutError:
397
+ return {"success": False, "error": f"Task timed out after {timeout} seconds"}
398
+ except Exception as e:
399
+ return {"success": False, "error": str(e)}
400
+
401
+ async def _execute_single_task_fast(self, task: Dict[str, Any], history: List[Dict]) -> str:
402
+ """Fast execution with minimal tool usage."""
403
+ from core.utils.llm_connector import AgentLLMConnector
404
+
405
+ agent_key = task["agent"]
406
+ prompt = task["prompt"]
407
+
408
+ # Get agent configuration
409
+ cfg = json.loads(self.cfg[agent_key])
410
+
411
+ # Use simplified agent with minimal tools for speed
412
+ child = AgentLLMConnector(api_key=self.client.api_key, skills=[])
413
+
414
+ # Add concise response guidelines
415
+ system_content = f"You are {cfg['agent_name']}, a {cfg['agent_type']} agent. {cfg.get('agent_mission', '')}"
416
+ system_content += (
417
+ "\n\nRESPONSE GUIDELINES:\n"
418
+ "- Be concise and clinically focused\n"
419
+ "- Provide clear, actionable recommendations\n"
420
+ "- Avoid excessive reasoning or explanation\n"
421
+ "- Structure responses with bullet points when appropriate\n"
422
+ "- Maximum 300 words"
423
+ )
424
+
425
+ sys_msg = {"role": "system", "content": system_content}
426
+ user_msg = {"role": "user", "content": prompt}
427
+
428
+ # Execute and collect response
429
+ response_buffer = ""
430
+ async for token in child.chat_with_agent_stream(
431
+ model_name=child.agent_model_mapping.get(cfg["agent_type"], "gpt-4o-mini"),
432
+ messages=[sys_msg, user_msg]
433
+ ):
434
+ response_buffer += token
435
+
436
+ return response_buffer.strip()[:2000] # Limit response length
437
+
438
+ async def _generate_updated_synthesis(self, new_context: str, results: Dict) -> str:
439
+ """Generate updated synthesis incorporating new user context."""
440
+ from core.utils.llm_connector import call_llm
441
+
442
+ synthesis_prompt = (
443
+ "You are an expert clinical synthesizer. The user has provided additional context to an existing clinical recommendation. "
444
+ "Create a focused update that incorporates this new information.\n\n"
445
+ f"New context from user: {new_context}\n\n"
446
+ f"Previous recommendations: {json.dumps(results, indent=2)}\n\n"
447
+ "RESPONSE GUIDELINES:\n"
448
+ "- Focus specifically on how the new context changes recommendations\n"
449
+ "- Be concise and clinically focused\n"
450
+ "- Provide clear, actionable updates\n"
451
+ "- Use bullet points for specific changes\n"
452
+ "- Maximum 400 words\n\n"
453
+ "Provide only the updated/modified recommendations based on the new context:"
454
+ )
455
+
456
+ try:
457
+ updated_response = await call_llm(
458
+ [{"role": "user", "content": synthesis_prompt}],
459
+ model="gpt-4o-mini"
460
+ )
461
+ return updated_response
462
+ except Exception as e:
463
+ return f"Unable to generate updated synthesis: {str(e)}"
464
+
465
+ async def _check_replanning_conditions(self, phase_results: Dict[str, Any], current_phase: Dict[str, Any]) -> bool:
466
+ """Check if replanning is needed based on intermediate results."""
467
+ # Check global replanning conditions
468
+ for condition in self.state.get("replanning_conditions", []):
469
+ if await self._evaluate_condition(condition, phase_results):
470
+ return True
471
+
472
+ # Check task-specific replanning triggers
473
+ for task in current_phase.get("parallel_tasks", []):
474
+ task_id = task["task_id"]
475
+ if task_id in phase_results:
476
+ result = phase_results[task_id]
477
+ for trigger in task.get("replanning_triggers", []):
478
+ if await self._evaluate_trigger(trigger, result):
479
+ return True
480
+
481
+ return False
482
+
483
+ async def _evaluate_condition(self, condition: str, results: Dict[str, Any]) -> bool:
484
+ """Evaluate a replanning condition using LLM."""
485
+ evaluation_prompt = (
486
+ f"Evaluate if this condition is met based on the results:\n"
487
+ f"Condition: {condition}\n"
488
+ f"Results: {json.dumps(results, indent=2)}\n"
489
+ f"Return only 'true' or 'false'."
490
+ )
491
+
492
+ response = await call_llm(
493
+ [{"role": "user", "content": evaluation_prompt}],
494
+ model="gpt-5-mini"
495
+ )
496
+
497
+ return response.strip().lower() == "true"
498
+
499
+ async def _evaluate_trigger(self, trigger: str, result: Dict[str, Any]) -> bool:
500
+ """Evaluate a specific replanning trigger."""
501
+ evaluation_prompt = (
502
+ f"Evaluate if this trigger condition is met:\n"
503
+ f"Trigger: {trigger}\n"
504
+ f"Task Result: {json.dumps(result, indent=2)}\n"
505
+ f"Return only 'true' or 'false'."
506
+ )
507
+
508
+ response = await call_llm(
509
+ [{"role": "user", "content": evaluation_prompt}],
510
+ model="gpt-5-mini"
511
+ )
512
+
513
+ return response.strip().lower() == "true"
514
+
515
+ def _create_default_phase(self, user_input, avail, history_agent):
516
+ """Create a default phase when validation fails."""
517
+ return {
518
+ "phase_id": "default_phase",
519
+ "description": "Default sequential execution",
520
+ "parallel_tasks": [{
521
+ "task_id": "default_task",
522
+ "agent": "stewardship_agent",
523
+ "prompt": user_input,
524
+ "priority": "medium",
525
+ "dependencies": []
526
+ }]
527
+ }
528
+
529
+ import uuid
530
+ import datetime
531
+ async def answer(self, history, user_input, debug=False):
532
+ """
533
+ Enhanced main entry point with parallel execution and dynamic replanning.
534
+
535
+ Args:
536
+ history (list): Conversation history for context.
537
+ user_input (str): The user's request or question.
538
+ debug (bool): If True, enables debug output.
539
+
540
+ Yields:
541
+ dict: Assistant responses, including streaming output and error messages.
542
+ """
543
+ from tools.utils import ToolExecutionError, logger
544
+ request_id = str(uuid.uuid4())
545
+ request_time = datetime.datetime.utcnow().isoformat()
546
+
547
+ try:
548
+ if GREETING_RE.match(user_input.strip()):
549
+ logger.info(f"[request_id={request_id}] Greeting detected. user_input={user_input!r}")
550
+ yield {"role":"assistant","content":"👋 Hello! How can I help?"}
551
+ return
552
+
553
+ # Enhanced planning with parallel execution support
554
+ if not self.state.get("execution_phases"):
555
+ logger.info(f"[request_id={request_id}] Starting enhanced planning...")
556
+ enhanced_plan = await self.planning_agent(user_input)
557
+ self.state["execution_phases"] = enhanced_plan["phases"]
558
+ self.state["replanning_conditions"] = enhanced_plan["replanning_conditions"]
559
+
560
+ logger.info(f"[request_id={request_id}] Enhanced plan created with {len(enhanced_plan['phases'])} phases")
561
+ yield {"role":"assistant", "content":f"🎯 **Enhanced Execution Plan ({enhanced_plan['execution_strategy']}):**\n```json\n{json.dumps(enhanced_plan, indent=2)}\n```\n\n**Ready to execute!** Say 'proceed' to start execution or provide additional context."}
562
+ return
563
+
564
+ # Check if user wants to proceed with execution
565
+ proceed_keywords = ["proceed", "execute", "start", "go", "continue", "run"]
566
+ if any(keyword in user_input.lower() for keyword in proceed_keywords):
567
+ # Fast execution mode - execute tasks sequentially with timeout
568
+ yield {"role":"assistant", "content":"🚀 **Executing plan...**"}
569
+
570
+ all_results = []
571
+ total_tasks = sum(len(phase["parallel_tasks"]) for phase in self.state["execution_phases"])
572
+ completed_tasks = 0
573
+
574
+ for phase in self.state["execution_phases"]:
575
+ for task in phase["parallel_tasks"]:
576
+ completed_tasks += 1
577
+ yield {"role":"assistant", "content":f"⚡ **Task {completed_tasks}/{total_tasks}:** {task['agent']} - {task['prompt'][:100]}..."}
578
+
579
+ # Execute task with timeout
580
+ try:
581
+ result = await self._execute_single_task_with_timeout(task, history, timeout=45)
582
+ if result.get("success"):
583
+ all_results.append({
584
+ "agent": task["agent"],
585
+ "result": result["result"]
586
+ })
587
+ yield {"role":"assistant", "content":f"✅ **{task['agent']} completed**"}
588
+ else:
589
+ yield {"role":"assistant", "content":f"⚠️ **{task['agent']} failed** - {result.get('error', 'Unknown error')}"}
590
+ except Exception as e:
591
+ yield {"role":"assistant", "content":f"⚠️ **{task['agent']} timeout/error** - {str(e)[:100]}"}
592
+
593
+ # Mark all phases as complete
594
+ self.state["current_phase"] = len(self.state["execution_phases"])
595
+ self.state["results"] = {result["agent"]: [result["result"]] for result in all_results}
596
+ else:
597
+ # User provided additional context - update the plan to incorporate it
598
+ yield {"role":"assistant", "content":"📝 **Processing additional context...**"}
599
+
600
+ # Check if execution is complete and user wants updated synthesis
601
+ if self.state["current_phase"] >= len(self.state["execution_phases"]) and self.state.get("results"):
602
+ # Generate updated synthesis with new context
603
+ updated_synthesis = await self._generate_updated_synthesis(user_input, self.state["results"])
604
+ yield {"role":"assistant", "content":f"� **Updated recommendation incorporating your context:**\n\n{updated_synthesis}"}
605
+ return
606
+
607
+ # If execution not started, update the plan with new context
608
+ if not self.state.get("results"):
609
+ # Add context to existing plan
610
+ self.state["additional_context"] = self.state.get("additional_context", [])
611
+ self.state["additional_context"].append(user_input)
612
+
613
+ # Update task prompts to include new context
614
+ context_update = f"Additional context: {user_input}"
615
+ for phase in self.state["execution_phases"]:
616
+ for task in phase["parallel_tasks"]:
617
+ if not task["prompt"].endswith(context_update):
618
+ task["prompt"] += f"\n\n{context_update}"
619
+
620
+ yield {"role":"assistant", "content":"✅ **Context incorporated into execution plan.** Say 'proceed' to start execution with updated context."}
621
+ else:
622
+ yield {"role":"assistant", "content":"📝 **Additional context noted.** Say 'proceed' to continue or provide more details."}
623
+ return
624
+
625
+ # Enhanced synthesis with intermediate results
626
+ all_results = {
627
+ "agent_results": self.state["results"],
628
+ "intermediate_results": self.state["intermediate_results"],
629
+ "execution_summary": {
630
+ "total_phases": len(self.state["execution_phases"]),
631
+ "parallel_execution": True,
632
+ "replanning_occurred": len(self.state.get("replanning_conditions", [])) > 0
633
+ }
634
+ }
635
+
636
+ synthesis_prompt = (
637
+ "You are an expert clinical synthesizer. Create a comprehensive, actionable response by combining:\n"
638
+ "1. All agent outputs into coherent clinical recommendations\n"
639
+ "2. Highlight any conflicting advice and provide resolution\n"
640
+ "3. Prioritize recommendations by clinical urgency\n"
641
+ "4. Include specific next steps for the clinician\n\n"
642
+ "RESPONSE GUIDELINES:\n"
643
+ "- Be concise and clinically focused\n"
644
+ "- Provide clear, actionable recommendations\n"
645
+ "- Avoid excessive reasoning or explanation unless specifically requested\n"
646
+ "- Structure responses with clear sections when appropriate\n"
647
+ "- Use bullet points or numbered lists for multiple recommendations\n\n"
648
+ f"Results to synthesize:\n{json.dumps(all_results, indent=2)}"
649
+ )
650
+
651
+ summary = await call_llm(
652
+ [{"role":"system","content": synthesis_prompt}],
653
+ model=SUMMARY_MODEL
654
+ )
655
+
656
+ logger.info(f"[request_id={request_id}] Enhanced synthesis complete. Summary length: {len(summary)}")
657
+ yield {"role":"assistant","content":f"🎯 **Comprehensive Clinical Recommendation:**\n\n{summary}"}
658
+
659
+ except ToolExecutionError as te:
660
+ logger.error(f"[request_id={request_id}] ToolExecutionError [{te.code}]: {te.message} | user_input={user_input!r}")
661
+ yield {
662
+ "role": "assistant",
663
+ "content": f"❗ Error ({te.code}) [Request ID: {request_id} | {request_time}]: {te.user_message}"
664
+ }
665
+ except Exception as e:
666
+ logger.exception(f"[request_id={request_id}] Unexpected error in enhanced orchestrator | user_input={user_input!r}")
667
+ yield {
668
+ "role": "assistant",
669
+ "content": f"❗ An unexpected error occurred in enhanced orchestrator. [Request ID: {request_id} | {request_time}] Please try again or contact support."
670
+ }
core/config/settings.py ADDED
File without changes
core/ui/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # UI components for the ID Agents application
core/ui/ui.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ from core.agents.agent_utils import build_agent, preload_demo_chat, prepare_download, load_prefilled
4
+ from core.agents.chat_orchestrator import simulate_agent_response_stream
5
+ import logging
6
+ from config import agents_config, skills_library, prefilled_agents
7
+
8
+ def show_landing():
9
+ return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
10
+
11
+ def show_builder():
12
+ return gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
13
+
14
+ def show_chat():
15
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
16
+
17
+ def build_active_agents_markdown_and_dropdown():
18
+ if not agents_config:
19
+ return "### 🧠 Active Agents\n_(None yet)_", []
20
+ output = "### 🧠 Active Agents\n"
21
+ dropdown_choices = []
22
+ for name, agent_json in agents_config.items():
23
+ agent_type = json.loads(agent_json)["agent_type"]
24
+ output += f"- {name} ({agent_type})\n"
25
+ dropdown_choices.append(name)
26
+ return output, dropdown_choices
27
+
28
+ def refresh_active_agents_widgets():
29
+ md, dd = build_active_agents_markdown_and_dropdown()
30
+ return gr.update(value=md), gr.update(choices=dd, value=None)
31
+
32
+ # All other UI callback functions from app.py
33
+ def build_ui():
34
+ def _convert_history_for_gradio(history):
35
+ # Convert OpenAI-style history to Gradio Chatbot format: [(user, assistant), ...]
36
+ result = []
37
+ last_user = None
38
+ for msg in history:
39
+ if msg["role"] == "user":
40
+ last_user = msg["content"]
41
+ elif msg["role"] == "assistant":
42
+ if last_user is not None:
43
+ result.append((last_user, msg["content"]))
44
+ last_user = None
45
+ else:
46
+ # orphan assistant message (e.g. greeting)
47
+ result.append((None, msg["content"]))
48
+ if last_user is not None:
49
+ result.append((last_user, None))
50
+ return result
51
+
52
+ # --- App Layout ---
53
+ with gr.Blocks() as app:
54
+ chat_histories = gr.State({})
55
+ with gr.Row():
56
+ with gr.Column(scale=3):
57
+ # Use unique elem_id for each chatbox
58
+ builder_chatbox = gr.Chatbot(elem_id="builder-chatbox")
59
+ # ...other chat UI elements (inputs, buttons, etc.)...
60
+ with gr.Column(scale=3):
61
+ # If you have a deployed chatbox elsewhere, ensure it also uses a unique elem_id, e.g., "deployed-chatbox"
62
+ deployed_chatbox = gr.Chatbot(elem_id="deployed-chatbox")
63
+ with gr.Column(scale=1):
64
+ gr.HTML(
65
+ """
66
+ <div id=\"sidebar\" style=\"width:100%;height:600px;border:1px solid #ccc;\">
67
+ <iframe id=\"sidebar-iframe\" src=\"\" style=\"width:100%;height:100%;border:none;\"></iframe>
68
+ </div>
69
+ """,
70
+ elem_id="sidebar-html"
71
+ )
72
+
73
+ # Add a shared class to all chatboxes by elem_id, then intercept links for all with that class
74
+ gr.HTML(
75
+ """
76
+ <script>
77
+ // Add shared class to all chatboxes by elem_id
78
+ function addChatboxSharedClass() {
79
+ ["builder-chatbox", "deployed-chatbox"].forEach(function(id) {
80
+ var el = document.getElementById(id);
81
+ if (el && !el.classList.contains("chatbox-shared")) {
82
+ el.classList.add("chatbox-shared");
83
+ }
84
+ });
85
+ }
86
+ // Run on load and after each Gradio update
87
+ document.addEventListener("DOMContentLoaded", addChatboxSharedClass);
88
+ document.addEventListener("gradio:updated", addChatboxSharedClass);
89
+
90
+ // Intercept chat links for all chatboxes with shared class
91
+ document.addEventListener("click", function(e) {
92
+ let target = e.target;
93
+ if (target.tagName === "A" && target.closest(".chatbox-shared")) {
94
+ e.preventDefault();
95
+ let iframe = document.getElementById("sidebar-iframe");
96
+ if (iframe) {
97
+ iframe.src = target.href;
98
+ }
99
+ }
100
+ }, true);
101
+ </script>
102
+ """,
103
+ elem_id="sidebar-js"
104
+ )
105
+ # ...rest of your UI and callback wiring...
106
+ return app
107
+ """
108
+ ui.py
109
+ ------
110
+ Gradio UI layout, callbacks, and event wiring for the modular AI agent system.
111
+ """
core/utils/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Utility modules for the ID Agents application
core/utils/llm_connector.py ADDED
@@ -0,0 +1,467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ async def challenge_agent_response(user_message: str, agent_reply: str, conversation_history: list = None, challenge_prompt: str = None, model: str = None) -> str:
2
+ """
3
+ Uses an LLM to adversarially critique the agent's draft reply before it is shown to the user.
4
+ Returns either the original reply, a corrected reply, or a warning if unsafe/incomplete.
5
+
6
+ Args:
7
+ user_message: The current user input
8
+ agent_reply: The agent's draft response
9
+ conversation_history: List of previous messages for context (optional)
10
+ challenge_prompt: Optional custom prompt template
11
+ model: LLM model to use
12
+ """
13
+
14
+ # Skip challenger if the agent is properly asking for required information for tool calling
15
+ # These are legitimate information-gathering responses that should not be critiqued
16
+ info_gathering_patterns = [
17
+ "I need some additional information",
18
+ "I need some specific information",
19
+ "To recommend.*I need",
20
+ "To provide.*I need",
21
+ "Please provide.*required information",
22
+ "required information.*for.*therapy",
23
+ "following required information",
24
+ "need.*following.*information",
25
+ "Could you please provide",
26
+ "Patient age.*allergies.*laboratory.*results", # Specific empiric therapy pattern
27
+ "age.*known drug allergies.*laboratory results", # Alternative pattern
28
+ "provide.*information.*and.*assist.*determining", # Tool workflow pattern
29
+ "Patient age or age bracket.*allergies.*laboratory results.*Culture.*sensitivity", # Exact empiric therapy pattern
30
+ "Please provide this information.*assist.*determining.*treatment" # Tool conclusion pattern
31
+ ]
32
+
33
+ # Additional patterns for ongoing tool workflows that should not be critiqued
34
+ ongoing_workflow_patterns = [
35
+ "🔔 Tool.*invoked", # Tool invocation indicator
36
+ "still need.*following.*information", # Continuing info gathering
37
+ "appears.*issue.*reporting.*process", # IPC reporting continuation
38
+ "ensure.*report.*completed.*correctly", # Report completion workflow
39
+ "provide.*following.*details.*clear.*format", # Structured data collection
40
+ "formatted.*correctly.*proceed.*report", # Report finalization
41
+ "Sorry.*ran into.*issue.*tool", # Tool error messages that should not be critiqued
42
+ "Could you please clarify or try again", # Tool error recovery
43
+ "No current reporting requirements found", # IPC search failures
44
+ "Unable to find current reporting requirements" # IPC search failures
45
+ ]
46
+
47
+ # Check conversation history for ongoing tool workflows
48
+ is_ongoing_workflow = False
49
+ if conversation_history:
50
+ recent_messages = conversation_history[-10:] # Check last 10 messages
51
+ for msg in recent_messages:
52
+ if msg.get("role") == "assistant" and msg.get("content"):
53
+ content = msg["content"]
54
+ # Check if recent conversation involved tool usage
55
+ if any(pattern in content for pattern in ["🔔 Tool", "invoked", "reporting", "provide the following"]):
56
+ is_ongoing_workflow = True
57
+ break
58
+
59
+ # Skip challenger for ongoing workflows unless there's a clear safety issue
60
+ if is_ongoing_workflow:
61
+ # Only challenge if there are obvious safety concerns
62
+ safety_keywords = ["dosage", "contraindicated", "allergy", "toxic", "dangerous", "fatal"]
63
+ has_safety_concern = any(keyword in agent_reply.lower() for keyword in safety_keywords)
64
+ if not has_safety_concern:
65
+ with open("challenger_debug.log", "a", encoding="utf-8") as f:
66
+ f.write("\n--- Challenger SKIPPED (Ongoing Tool Workflow) ---\n")
67
+ f.write(f"Agent reply: {agent_reply[:200]}...\n")
68
+ return "OK"
69
+
70
+ import re
71
+ for pattern in info_gathering_patterns + ongoing_workflow_patterns:
72
+ if re.search(pattern, agent_reply, re.IGNORECASE):
73
+ # This is a legitimate info-gathering response, don't critique it
74
+ # DEBUG: Log when challenger is skipped
75
+ with open("challenger_debug.log", "a", encoding="utf-8") as f:
76
+ f.write("\n--- Challenger SKIPPED (Info Gathering Response) ---\n")
77
+ f.write(f"Pattern matched: {pattern}\n")
78
+ f.write(f"Agent reply: {agent_reply[:200]}...\n")
79
+ return "OK"
80
+
81
+ if not challenge_prompt:
82
+ # Build context from conversation history if available
83
+ context_str = ""
84
+ if conversation_history:
85
+ recent_context = conversation_history[-6:] # Last 6 messages for context
86
+ context_lines = []
87
+ for msg in recent_context:
88
+ role = msg.get("role", "")
89
+ content = msg.get("content", "")[:200] # Limit length
90
+ if role and content:
91
+ context_lines.append(f"{role}: {content}")
92
+ if context_lines:
93
+ context_str = f"\n\nConversation context:\n" + "\n".join(context_lines) + "\n"
94
+
95
+ challenge_prompt = (
96
+ "You are a clinical safety and accuracy reviewer for an infectious diseases AI assistant. "
97
+ "Given the user's question and the agent's draft reply, identify any clinical errors, unsafe advice, missing references, or unsupported statements. "
98
+ "Consider the conversation context to understand if this is part of an ongoing workflow (like data collection for reporting or treatment planning). "
99
+ "If the reply is safe and accurate, or if it's part of a legitimate ongoing process, respond with 'OK'. "
100
+ "If you find a problem, respond with a short warning or correction, and suggest a safer or more accurate reply if possible. "
101
+ f"{context_str}"
102
+ "\n\nUser question: {user_message}\n\nAgent reply: {agent_reply}"
103
+ )
104
+ prompt = challenge_prompt.format(user_message=user_message, agent_reply=agent_reply)
105
+ # --- FORCE a challenger suggestion for testing ---
106
+ # If the agent reply contains a special marker, always suggest a new answer
107
+ if "FORCE_CHALLENGE" in agent_reply or "FORCE_CHALLENGE" in user_message:
108
+ # Instead of a canned string, return a realistic sample answer for testing
109
+ return (
110
+ "Micafungin is an effective antifungal for Candida species, including C. albicans, but it achieves low concentrations in urine. "
111
+ "For urinary tract infections caused by C. albicans, fluconazole is generally preferred due to its high urinary excretion. "
112
+ "If the isolate is susceptible, consider switching to fluconazole. If fluconazole cannot be used (e.g., resistance or intolerance), amphotericin B deoxycholate is an alternative. "
113
+ "Always tailor antifungal therapy to the patient's clinical status and susceptibility results."
114
+ )
115
+ critique = await call_llm(prompt, model=model)
116
+ # DEBUG: Log the prompt and critique for challenger step
117
+ with open("challenger_debug.log", "a", encoding="utf-8") as f:
118
+ f.write("\n--- Challenger Call ---\n")
119
+ f.write(f"Prompt:\n{prompt}\n")
120
+ f.write(f"Critique:\n{critique}\n")
121
+ return critique
122
+
123
+ async def refine_final_answer(user_message: str, original_reply: str, critique: str, model: str = None) -> str:
124
+ """
125
+ Given the user message, original agent reply, and the challenger critique (which may include warnings and a suggested revision),
126
+ draft a clean, precise, and accurate reply for the user. If a suggested revision is present, use it. Do not include any warnings,
127
+ critique, or meta-commentary—just the best final answer.
128
+ """
129
+ prompt = (
130
+ "You are an expert clinical assistant. Given the user's question, the agent's original answer, and the critique (which may include warnings and a suggested revision), "
131
+ "draft a clean, precise, and accurate reply for the user. If a suggested revision is present, use it. "
132
+ "Do not include any warnings, critique, or meta-commentary—just the best final answer.\n\n"
133
+ f"User question: {user_message}\n\nAgent original answer: {original_reply}\n\nCritique: {critique}\n\nFinal user-facing answer:"
134
+ )
135
+ return await call_llm(prompt, model=model)
136
+ """
137
+ llm_connector.py
138
+ ---------------
139
+
140
+ Abstraction layer for LLM (OpenAI) API calls, with retry logic, streaming, and agent skill integration.
141
+
142
+ - Provides AgentLLMConnector for managing LLM chat, streaming, and function/tool calls.
143
+ - Implements robust retry/backoff for API reliability.
144
+ - Supports skill registration and OpenAI function-calling interface.
145
+ - Used by orchestrator and app for all LLM interactions.
146
+ - Designed for future integration with Model Context Protocol (MCP) and Agent-to-Agent (A2A) standards.
147
+
148
+ """
149
+
150
+ import os
151
+ import json
152
+ import time
153
+ import random
154
+ import asyncio
155
+
156
+
157
+ # --- OpenAI Client Setup ---
158
+ import openai
159
+ from openai import RateLimitError, APIError, APIConnectionError, OpenAI
160
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
161
+ if not OPENAI_API_KEY:
162
+ raise RuntimeError("OPENAI_API_KEY environment variable is not set. Please set it before running the application.")
163
+ client = OpenAI(api_key=OPENAI_API_KEY)
164
+
165
+
166
+
167
+ def call_with_backoff(fn, *args, max_retries=5, base_delay=1.0, **kwargs):
168
+ """
169
+ Retry helper for OpenAI API calls.
170
+ """
171
+ delay = base_delay
172
+ for _ in range(max_retries):
173
+ try:
174
+ return fn(*args, **kwargs)
175
+ except (RateLimitError, APIError, APIConnectionError) as e:
176
+ # Log the retry attempt
177
+ print(f"API call failed, retrying in {delay:.1f}s: {type(e).__name__}")
178
+ time.sleep(delay + random.random())
179
+ delay *= 2
180
+
181
+ # Final attempt with proper error handling
182
+ try:
183
+ return fn(*args, **kwargs)
184
+ except APIConnectionError:
185
+ # Network connection error
186
+ raise ConnectionError("Unable to connect to OpenAI API. Please check your internet connection.")
187
+ except RateLimitError:
188
+ # Rate limit exceeded
189
+ raise ConnectionError("OpenAI API rate limit exceeded. Please wait a moment and try again.")
190
+ except APIError as e:
191
+ # Other API errors (invalid key, etc.)
192
+ raise ConnectionError(f"OpenAI API error: {str(e)}")
193
+ except Exception as e:
194
+ # Catch any other unexpected errors
195
+ raise ConnectionError(f"Unexpected API error: {str(e)}")
196
+
197
+ class AgentLLMConnector:
198
+ def __init__(self, api_key: str, skills: list = None, allow_fallback: bool = True, trusted_links=None, grounded_files=None, rag_retriever=None):
199
+ """
200
+ api_key: your OpenAI API key
201
+ skills: list of Tool instances (each with .name and .openai_spec())
202
+ allow_fallback: if False, do not allow fallback to LLM general knowledge
203
+ trusted_links: list of trusted source URLs (if any)
204
+ grounded_files: list of uploaded RAG files (if any)
205
+ rag_retriever: SimpleRAGRetriever instance for knowledge retrieval
206
+ """
207
+ # API key is set globally via client above; this arg is kept for compatibility
208
+ self.skills = skills or []
209
+ self.invocations = []
210
+
211
+ self.allow_fallback = allow_fallback
212
+ self.trusted_links = trusted_links or []
213
+ self.grounded_files = grounded_files or []
214
+ self.rag_retriever = rag_retriever
215
+
216
+ # map agent_type (from your builder) → model name
217
+ self.agent_model_mapping = {
218
+ "🛡️ Antimicrobial Stewardship": "gpt-5-mini",
219
+ "🦠 Infection Prevention and Control":"gpt-5-mini",
220
+ "🔬 Research Assistant": "gpt-5-mini",
221
+ "🏥 Clinical Assistant": "gpt-5-mini",
222
+ "📚 Education Assistant": "gpt-5-mini",
223
+ "🎼 Orchestrator": "gpt-5"
224
+ }
225
+
226
+
227
+ def _chat_create(self, **kwargs):
228
+ """
229
+ Internal wrapper around client.chat.completions.create with backoff.
230
+ """
231
+ return call_with_backoff(client.chat.completions.create, **kwargs)
232
+
233
+ async def chat_with_agent_stream(self,
234
+ model_name: str,
235
+ messages: list[dict]):
236
+ """
237
+ Async generator: streams the assistant's response, handling function calls.
238
+ """
239
+ try:
240
+ # 1) Gather function specs from skills
241
+ tools = []
242
+ if self.skills:
243
+ for tool in self.skills:
244
+ spec = tool.openai_spec(legacy=False)
245
+ tools.append({
246
+ "type": "function",
247
+ "function": spec
248
+ })
249
+
250
+ # 2) RAG retrieval if available
251
+ if self.rag_retriever and messages:
252
+ # Get the latest user message for RAG query
253
+ user_messages = [msg for msg in messages if msg.get("role") == "user"]
254
+ if user_messages:
255
+ latest_query = user_messages[-1].get("content", "")
256
+ relevant_chunks = self.rag_retriever.retrieve_relevant_chunks(latest_query)
257
+
258
+ if relevant_chunks:
259
+ # Add knowledge context to the conversation
260
+ knowledge_context = "📚 **Knowledge Base Context:**\n\n" + "\n\n---\n\n".join(relevant_chunks)
261
+
262
+ # Insert knowledge before the latest user message
263
+ messages = list(messages[:-1]) # All messages except the last
264
+ messages.append({"role": "system", "content": knowledge_context})
265
+ messages.append(user_messages[-1]) # Add back the latest user message
266
+
267
+ # 3) Local copy of the conversation
268
+ convo = list(messages)
269
+
270
+ # 4) Recursive loop: handle tool_calls until normal reply
271
+ while True:
272
+ # Prepare call arguments, including functions if any
273
+ call_args = {
274
+ "model": model_name,
275
+ "messages": convo,
276
+ "stream": False
277
+ }
278
+ if tools:
279
+ call_args["tools"] = tools
280
+ call_args["tool_choice"] = "auto"
281
+
282
+ try:
283
+ # Call the API with error handling
284
+ resp = self._chat_create(**call_args)
285
+ choice = resp.choices[0]
286
+ msg = choice.message
287
+ except ConnectionError as e:
288
+ # Network or API connection error - yield user-friendly message
289
+ error_message = f"⚠️ Connection Error: {str(e)}\n\nPlease check your internet connection and try again."
290
+ for char in error_message:
291
+ yield char
292
+ await asyncio.sleep(0.01) # Small delay for visual effect
293
+ return
294
+ except Exception as e:
295
+ # Any other unexpected error
296
+ error_message = f"⚠️ Unexpected Error: {str(e)}\n\nPlease try again or contact support if the issue persists."
297
+ for char in error_message:
298
+ yield char
299
+ await asyncio.sleep(0.01)
300
+ return
301
+
302
+ # a) If model wants to call a tool, execute it
303
+ if msg.tool_calls:
304
+ for tool_call in msg.tool_calls:
305
+ fname = tool_call.function.name
306
+ raw_args = tool_call.function.arguments or "{}"
307
+ try:
308
+ func_args = json.loads(raw_args)
309
+ except json.JSONDecodeError:
310
+ func_args = {}
311
+
312
+ # Find and run the correct tool
313
+ tool = next((t for t in self.skills if t.name == fname), None)
314
+ if not tool:
315
+ err = f"[Error] Tool '{fname}' not found."
316
+ for ch in err:
317
+ yield ch
318
+ return
319
+
320
+ # Execute tool (may be async)
321
+ result = await tool.run(**func_args)
322
+
323
+ # Record both the call and its output for the invocation log
324
+ self.invocations.append({
325
+ "tool": fname,
326
+ "args": func_args,
327
+ "result": result
328
+ })
329
+
330
+ # Handle tool-level errors gracefully
331
+ if isinstance(result, dict) and "error" in result:
332
+ apology = (
333
+ f"⚠️ Sorry, I ran into an issue with the `{fname}` tool: {result['error']}. "
334
+ "Could you please clarify or try again?"
335
+ )
336
+ for ch in apology:
337
+ yield ch
338
+ return
339
+
340
+ # Surface partial tool results to the user
341
+ partial = f"🔔 Tool `{fname}` invoked\n"
342
+ for ch in partial:
343
+ yield ch
344
+
345
+ # Inject the tool call message and its output using modern format
346
+ convo.append({
347
+ "role": "assistant",
348
+ "content": None,
349
+ "tool_calls": [{
350
+ "id": tool_call.id,
351
+ "type": "function",
352
+ "function": {
353
+ "name": fname,
354
+ "arguments": json.dumps(func_args)
355
+ }
356
+ }]
357
+ })
358
+ convo.append({
359
+ "role": "tool",
360
+ "tool_call_id": tool_call.id,
361
+ "content": json.dumps(result)
362
+ })
363
+
364
+ # After processing all tool calls, loop to let the model respond to the results
365
+ continue
366
+
367
+ # b) Normal assistant reply: stream content then exit
368
+ content = msg.content or ""
369
+ # --- ENFORCE NO FALLBACK TO LLM IF NOT ALLOWED ---
370
+ # Allow clarifying questions and tool result summaries, only block unsupported final answers
371
+ if not self.allow_fallback and (self.trusted_links or self.grounded_files):
372
+ # Heuristic: allow clarifying questions (contains '?', 'please provide', 'missing', etc.)
373
+ clarifying_phrases = [
374
+ '?',
375
+ 'please provide',
376
+ 'missing',
377
+ 'required information',
378
+ 'what is',
379
+ 'could you',
380
+ 'specify',
381
+ 'enter',
382
+ 'need to know',
383
+ 'tell me',
384
+ 'which',
385
+ 'select',
386
+ ]
387
+ content_lower = content.lower()
388
+ is_clarifying = any(phrase in content_lower for phrase in clarifying_phrases)
389
+ # Heuristic: allow tool result summaries (contains 'according to', 'the following', 'results found', etc.)
390
+ tool_summary_phrases = [
391
+ 'according to',
392
+ 'the following',
393
+ 'results found',
394
+ 'based on',
395
+ 'search result',
396
+ 'tool',
397
+ 'invoked',
398
+ 'here are',
399
+ 'found at',
400
+ ]
401
+ is_tool_summary = any(phrase in content_lower for phrase in tool_summary_phrases)
402
+ # If content is empty, or is a clarifying question, or is a tool summary, allow it
403
+ if not content.strip() or is_clarifying or is_tool_summary:
404
+ for ch in content:
405
+ yield ch
406
+ break
407
+ # Otherwise, block with default message
408
+ default_msg = "⚠️ No information found in the provided sources. Please try rephrasing your question or upload more relevant documents."
409
+ for ch in default_msg:
410
+ yield ch
411
+ break
412
+ # Otherwise, allow normal LLM answer
413
+ for ch in content:
414
+ yield ch
415
+ break
416
+
417
+ except Exception as e:
418
+ # Catch any remaining unhandled errors at the top level
419
+ error_message = f"⚠️ System Error: {str(e)}\n\nPlease try again or contact support if the issue persists."
420
+ for char in error_message:
421
+ yield char
422
+ await asyncio.sleep(0.01)
423
+
424
+
425
+ async def chat_with_agent(self,
426
+ model_name: str,
427
+ messages: list[dict]) -> str:
428
+ """
429
+ Convenience: collect and return the full assistant response.
430
+ """
431
+ out = ""
432
+ async for tok in self.chat_with_agent_stream(model_name, messages):
433
+ out += tok
434
+ return out
435
+
436
+ async def call_llm(prompt: str | list[dict],
437
+ model: str = None) -> str:
438
+ """
439
+ Helper for tools: run a simple prompt or message array through the LLM.
440
+ """
441
+ # Normalize prompt to a messages list
442
+ if isinstance(prompt, list):
443
+ messages = prompt
444
+ else:
445
+ try:
446
+ # If prompt is a JSON-stringified message list
447
+ parsed = json.loads(prompt)
448
+ messages = parsed if isinstance(parsed, list) else []
449
+ except Exception:
450
+ messages = []
451
+
452
+ if not messages:
453
+ # Fallback to a simple system+user framing
454
+ messages = [
455
+ {"role": "system", "content": "You are an assistant."},
456
+ {"role": "user", "content": prompt}
457
+ ]
458
+
459
+ # Call the API
460
+
461
+ response = call_with_backoff(
462
+ client.chat.completions.create,
463
+ model=model or os.getenv("OPENAI_MODEL", "gpt-5-mini"),
464
+ messages=messages
465
+ )
466
+
467
+ return response.choices[0].message.content
core/utils/rag.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ rag.py
4
+ ------
5
+ KnowledgeLoader and SimpleRAGRetriever classes for document ingestion and retrieval.
6
+ """
7
+
8
+ import os
9
+ import pdfplumber
10
+ import docx
11
+ import numpy as np
12
+ from openai import OpenAI
13
+
14
+ class KnowledgeLoader:
15
+ def load_text_from_file(self, file_obj):
16
+ ext = file_obj.name.split(".")[-1].lower()
17
+ if ext == "pdf":
18
+ return self._load_pdf(file_obj)
19
+ elif ext == "docx":
20
+ return self._load_docx(file_obj)
21
+ elif ext == "csv" or ext == "txt":
22
+ try:
23
+ return file_obj.read().decode("utf-8")
24
+ except UnicodeDecodeError:
25
+ try:
26
+ return file_obj.read().decode("latin-1")
27
+ except Exception as e:
28
+ return f"⚠️ Failed to decode file: {str(e)}"
29
+ else:
30
+ return ""
31
+
32
+ def _load_pdf(self, file_obj):
33
+ try:
34
+ with pdfplumber.open(file_obj) as pdf:
35
+ text = "\n".join([page.extract_text() or "" for page in pdf.pages])
36
+ return text
37
+ except Exception as e:
38
+ return f"⚠️ Failed to read PDF: {str(e)}"
39
+
40
+ def _load_docx(self, file_obj):
41
+ try:
42
+ doc = docx.Document(file_obj)
43
+ text = "\n".join([p.text for p in doc.paragraphs])
44
+ return text
45
+ except Exception as e:
46
+ return f"⚠️ Failed to read DOCX: {str(e)}"
47
+
48
+ class SimpleRAGRetriever:
49
+ def __init__(self, openai_api_key, chunk_size=600):
50
+ self.chunk_size = chunk_size
51
+ self.embeddings = {}
52
+ self.text_chunks = {}
53
+ self.client = OpenAI(api_key=openai_api_key)
54
+
55
+ def split_text(self, text):
56
+ return [text[i:i+self.chunk_size] for i in range(0, len(text), self.chunk_size)]
57
+
58
+ def embed_text(self, texts):
59
+ if not texts:
60
+ return []
61
+ response = self.client.embeddings.create(
62
+ input=texts,
63
+ model="text-embedding-ada-002"
64
+ )
65
+ return [np.array(d.embedding) for d in response.data]
66
+
67
+ def add_knowledge(self, file_obj):
68
+ loader = KnowledgeLoader()
69
+ full_text = loader.load_text_from_file(file_obj)
70
+ chunks = self.split_text(full_text)
71
+ chunk_embeddings = self.embed_text(chunks)
72
+ for idx, (chunk, emb) in enumerate(zip(chunks, chunk_embeddings)):
73
+ self.embeddings[len(self.embeddings)] = emb
74
+ self.text_chunks[len(self.text_chunks)] = chunk
75
+
76
+ def retrieve_relevant_chunks(self, query, top_k=2):
77
+ if not self.embeddings:
78
+ return []
79
+ query_emb = self.embed_text([query])[0]
80
+ similarities = {}
81
+ for idx, emb in self.embeddings.items():
82
+ similarities[idx] = float(np.dot(query_emb, emb) / (np.linalg.norm(query_emb) * np.linalg.norm(emb)))
83
+ sorted_idxs = sorted(similarities, key=lambda idx: similarities[idx], reverse=True)
84
+ top_chunks = [self.text_chunks[idx] for idx in sorted_idxs[:top_k]]
85
+ return top_chunks
core/utils/skills_registry.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import re
4
+ import math
5
+ from typing import Optional
6
+ from openai import OpenAI, OpenAIError
7
+
8
+ from tools.registry import TOOL_REGISTRY
9
+
10
+ from typing import Any, Callable, Dict, Optional, Type, Union
11
+ from tools.base import Tool
12
+
13
+
14
+
15
+
16
+ # Instantiate stateless tools once and store in a dict
17
+ STATIC_TOOL_INSTANCES = {name: cls() for name, cls in TOOL_REGISTRY.items() if name != "history_taking"}
18
+
19
+ """
20
+ skills_registry.py
21
+ ------------------
22
+ Registry and helper functions for tool instantiation, syndrome matching, and embedding lookup.
23
+ """
24
+
25
+ #match for KB
26
+ # 1) Initialize the OpenAI client
27
+ openai_api_key = os.getenv("OPENAI_API_KEY")
28
+ if not openai_api_key:
29
+ raise RuntimeError("OPENAI_API_KEY environment variable is not set. Please set it before running the application.")
30
+ _client = OpenAI(api_key=openai_api_key)
31
+
32
+ # 2) Load your precomputed syndrome embeddings
33
+ # Make sure syndrome_embeddings.json is in your working dir
34
+ with open("syndrome_embeddings.json", "r") as f:
35
+ SYNDROME_EMBS = json.load(f) # { syndrome_key: [float, ...], … }
36
+
37
+
38
+ def _cosine_sim(a: list[float], b: list[float]) -> float:
39
+ """
40
+ Compute the cosine similarity between two vectors.
41
+
42
+ Args:
43
+ a (list[float]): First vector.
44
+ b (list[float]): Second vector.
45
+
46
+ Returns:
47
+ float: Cosine similarity between a and b.
48
+ """
49
+ dot = sum(x*y for x, y in zip(a, b))
50
+ norm_a = math.sqrt(sum(x*x for x in a))
51
+ norm_b = math.sqrt(sum(y*y for y in b))
52
+ return dot / (norm_a * norm_b) if norm_a and norm_b else 0.0
53
+
54
+ def _match_syndrome(user_query: str,
55
+ model: str = "text-embedding-ada-002",
56
+ threshold: float = 0.7
57
+ ) -> Optional[str]:
58
+ # TODO: implement syndrome matching logic or remove if unused
59
+ pass
60
+ # This function is now implemented at the end of the file (see below)
61
+ pass
62
+ """
63
+ Semantically match the user_query to the best syndrome_key
64
+ via cosine similarity against precomputed embeddings.
65
+
66
+ Args:
67
+ user_query (str): The user's query string.
68
+ model (str, optional): Embedding model to use. Defaults to "text-embedding-ada-002".
69
+ threshold (float, optional): Minimum similarity threshold. Defaults to 0.7.
70
+
71
+ Returns:
72
+ Optional[str]: The best-matching syndrome key, or None if no match meets the threshold.
73
+ """
74
+ # normalize
75
+ q = user_query.lower()
76
+ q = re.sub(r"[^a-z0-9\s]", " ", q)
77
+ q = re.sub(r"\s+", " ", q).strip()
78
+
79
+ # embed the query
80
+ try:
81
+ resp = _client.embeddings.create(model=model, input=[q])
82
+ q_emb = resp.data[0].embedding
83
+ except OpenAIError as e:
84
+ # if embedding fails, fall back to no match
85
+ print(f"[Embedding error] {e}")
86
+ return None
87
+
88
+ # find best cosine similarity
89
+ best_key, best_score = None, -1.0
90
+ for key, emb in SYNDROME_EMBS.items():
91
+ score = _cosine_sim(q_emb, emb)
92
+ if score > best_score:
93
+ best_key, best_score = key, score
94
+
95
+ return best_key if best_score >= threshold else None
96
+
97
+
98
+ # Unified tool registry: uses TOOL_REGISTRY for class references and STATIC_TOOL_INSTANCES for stateless tools
99
+ from tools.history_taking import HistoryTakingTool
100
+
101
+ tool_registry: Dict[str, Dict[str, Any]] = {}
102
+ for name, cls in TOOL_REGISTRY.items():
103
+ if name == "history_taking":
104
+ tool_registry[name] = {"fn": HistoryTakingTool}
105
+ else:
106
+ instance = STATIC_TOOL_INSTANCES[name]
107
+ tool_registry[name] = {"fn": instance, "args_schema": instance.args_schema}
108
+
109
+ def get_tool_by_name(
110
+ name: str,
111
+ context: Dict[str, Any]
112
+ ) -> Optional[Tool]:
113
+ """
114
+ Retrieve a tool instance by name, optionally using context for dynamic instantiation.
115
+
116
+ Args:
117
+ name (str): The tool key, e.g. "history_taking".
118
+ context (Dict[str, Any]): Must include "user_query" for dynamic tools.
119
+
120
+ Returns:
121
+ Optional[Tool]: The tool instance, or None if not found or not instantiable.
122
+ """
123
+ entry = tool_registry[name]
124
+ fn = entry["fn"]
125
+ if name == "history_taking":
126
+ syndrome_key = _match_syndrome(context["user_query"])
127
+ if not syndrome_key:
128
+ return None
129
+ return fn(syndrome_key) # instantiate with dynamic key
130
+ else:
131
+ # static tools: fn is already an instance
132
+ return fn
prompts/alert_prolonged_antibiotic_use.j2 ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {% raw %}
2
+ [
3
+ {"role": "system", "content":
4
+ "You are an infectious diseases consultant. Based on all provided clinical data and guidelines, determine if the patient is at risk for prolonged or unnecessary antibiotic use and summarize recommended durations for the specified condition. Provide rationale and cite relevant guidelines if possible."},
5
+
6
+ {"role": "user", "content":
7
+ "Clinical condition: {{ condition }}\nSite of infection: {{ site_of_infection }}\nRisk or presence of biofilm: {{ risk_of_biofilm }}\nCurrent response to antibiotics: {{ current_response }}\nCreatinine clearance: {{ creatinine_clearance }}\nSeverity of infection: {{ severity_of_infection }}\nKnown drug allergies: {{ known_allergies }}\n\nPlease:\n1. Summarize recommended antibiotic duration for this condition.\n2. Indicate if current therapy may be prolonged.\n3. Provide rationale and cite guidelines if possible."}
8
+ ]
9
+ {% endraw %}
prompts/deescalation.j2 ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {% raw %}
2
+ [
3
+ {"role": "system", "content":
4
+ "You are an infectious diseases consultant. Based on all provided clinical data, recommend appropriate antibiotic de‐escalation."},
5
+
6
+ {"role": "user", "content":
7
+ "Patient culture results: {{ culture }}\nCurrent antibiotics: {{ meds }}\nSite of infection: {{ site_of_infection }}\nRisk or presence of biofilm: {{ risk_of_biofilm }}\nCurrent response to antibiotics: {{ current_response }}\nCreatinine clearance: {{ creatinine_clearance }}\nSeverity of infection: {{ severity_of_infection }}\nKnown drug allergies: {{ known_allergies }}\n\nPlease:\n1. Identify if de‐escalation is safe.\n2. Recommend the narrowest effective agent.\n3. Provide rationale in 2–3 sentences."}
8
+ ]
9
+ {% endraw %}
prompts/diagnostic_recommendation.j2 ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% raw %}
2
+ [
3
+ {"role": "system", "content":
4
+ "You are an infectious diseases consultant who has gathered the patient’s clinical history and now will provide a diagnostic and management recommendation."},
5
+
6
+ {"role": "user", "content":
7
+ "The user’s concern: '{{ syndrome_query }}'.\n\n"
8
+ "Collected History:\n"
9
+ "{% for question_key, answer in history.items() %}- {{ question_key.replace('_', ' ').capitalize() }}: {{ answer }}\n{% endfor %}\n\n"
10
+ "Based on this information, please provide your diagnostic impression and recommended management plan, including appropriate empiric therapy, isolation precautions, and any other relevant considerations."}
11
+ ]
12
+ {% endraw %}
prompts/draft_critique_enhance_board_exam.j2 ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are an expert Infectious Diseases fellowship board exam question writer and critic.
2
+
3
+ Your task is to DRAFT an initial question, CRITIQUE it thoroughly, and then ENHANCE it to board-level excellence.
4
+
5
+ **BLUEPRINT TO IMPLEMENT:**
6
+ {{ blueprint }}
7
+
8
+ **TOPIC:** {{ topic }}
9
+ **DIFFICULTY LEVEL:** {{ difficulty_level }}
10
+ **QUESTION TYPE:** {{ question_type }}
11
+
12
+ **YOUR 3-PHASE MISSION:**
13
+
14
+ ## PHASE 1: DRAFT INITIAL QUESTION
15
+ Based on the blueprint, create:
16
+ 1. **Clinical Vignette:** Rich clinical scenario implementing the blueprint strategy
17
+ 2. **Question Stem:** Clear, specific question
18
+ 3. **Answer Choices:** 5 options matching the blueprint differential diagnoses
19
+ 4. **Explanations:** Detailed explanations for correct and incorrect answers
20
+
21
+ ## PHASE 2: CRITIQUE THE DRAFT
22
+ Analyze your draft question for:
23
+ - **Diagnostic Giveaways:** Any obvious clues that make the answer too easy?
24
+ - **Wrong Answer Quality:** Are the distractors plausible for ID specialists?
25
+ - **Clinical Realism:** Does the vignette reflect real-world presentations?
26
+ - **Difficulty Level:** Will this challenge ID fellowship trainees?
27
+ - **Blueprint Adherence:** Does it follow the planned strategy?
28
+
29
+ ## PHASE 3: ENHANCE BASED ON CRITIQUE
30
+ Revise the question to:
31
+ - Eliminate any diagnostic giveaways identified
32
+ - Strengthen weak distractors
33
+ - Add clinical complexity and sophistication
34
+ - Ensure blueprint strategy is perfectly executed
35
+ - Achieve ID fellowship-level difficulty
36
+
37
+ **ENHANCEMENT REQUIREMENTS:**
38
+
39
+ 1. **Vignette Excellence:**
40
+ - Include specific lab values, imaging findings
41
+ - Add clinical complexity (comorbidities, medications)
42
+ - Use sophisticated medical terminology
43
+ - Implement all blueprint clues naturally
44
+
45
+ 2. **Question Sophistication:**
46
+ - Avoid obvious diagnostic language
47
+ - Focus on clinical reasoning
48
+ - Require expert-level differentiation
49
+
50
+ 3. **Answer Choice Quality:**
51
+ - Each distractor must be plausible for ID specialists
52
+ - Implement the blueprint's wrong answer reasoning
53
+ - Ensure choices require clinical expertise to differentiate
54
+
55
+ 4. **Explanation Depth:**
56
+ - Detailed reasoning for correct answer
57
+ - Specific reasons why each wrong answer is incorrect
58
+ - Educational value for ID trainees
59
+
60
+ **CRITICAL RULES:**
61
+ - NO classic travel + classic symptoms combinations that give away answers
62
+ - Each wrong answer must have the specific reasoning from the blueprint
63
+ - Include 3+ supporting clues for correct diagnosis as planned
64
+ - Add misleading clues to increase complexity
65
+ - Target ID fellowship-level difficulty
66
+
67
+ **OUTPUT FORMAT:**
68
+ Return a JSON object with this exact structure:
69
+
70
+ ```json
71
+ {
72
+ "vignette": "Enhanced clinical vignette with sophisticated details",
73
+ "question_stem": "Clear, specific question requiring ID expertise",
74
+ "answer_choices": [
75
+ "Correct answer - first choice",
76
+ "Plausible distractor 2",
77
+ "Plausible distractor 3",
78
+ "Plausible distractor 4",
79
+ "Plausible distractor 5"
80
+ ],
81
+ "explanations": {
82
+ "correct": "Detailed explanation of why this is correct with ID-specific reasoning",
83
+ "incorrect": "Detailed explanation of why other choices are incorrect, implementing blueprint reasoning"
84
+ },
85
+ "enhancement_notes": "Summary of key enhancements made during critique phase",
86
+ "critique_summary": "Brief summary of issues identified and how they were resolved"
87
+ }
88
+ ```
89
+
90
+ **EXAMPLE OF BLUEPRINT IMPLEMENTATION:**
91
+
92
+ If blueprint says "Blastomyces wrong because yeast description will be small 2-4 microns":
93
+ - Vignette should describe: "BAL fluid microscopy reveals small budding yeasts measuring 2-4 micrometers"
94
+ - Explanation should state: "Blastomyces dermatitidis typically shows broad-based budding yeasts that are 8-15 micrometers, not the small 2-4 micrometer yeasts described"
95
+
96
+ **Remember:** Follow the blueprint strategy exactly while creating a sophisticated question that challenges ID specialists!
prompts/empiric_therapy.j2 ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% raw %}
2
+ [
3
+ {
4
+ "role": "system",
5
+ "content": "You are a clinical decision support agent. Suggest empiric antibiotic therapy per standard guidelines, considering all provided clinical variables."
6
+ },
7
+ {
8
+ "role": "user",
9
+ "content": "Patient profile:\n- Age: {{ age }}\n- Allergies: {{ allergies }}\n- Recent labs: {{ labs }}\n- Culture & Sensitivity Results: {{ culture }}\n- Current Antibiotic Regimen: {{ meds }}\n- Site of Infection: {{ site_of_infection }}\n- Risk or Presence of Biofilm: {{ risk_of_biofilm }}\n- Current Response to Antibiotics: {{ current_response }}\n- Creatinine Clearance: {{ creatinine_clearance }}\n- Severity of Infection: {{ severity_of_infection }}\n- Known Drug Allergies: {{ known_allergies }}\n\nBased on this, recommend:\n1. One or two empiric antibiotic regimens.\n2. Dosing and route.\n3. Brief justification."
10
+ }
11
+ ]
12
+
13
+ {% endraw %}
prompts/evaluate_nhsn_definition.j2 ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "role": "system",
4
+ "content": "You are an expert at applying NHSN surveillance definitions to patient cases. \n\n"
5
+ "Here is the formal logic extracted from the NHSN site:\n"
6
+ "<JSON>{{ definition_logic | tojson }}</JSON>\n\n"
7
+ "And here are the user-provided field values:\n"
8
+ "<JSON>{{ values | tojson }}</JSON>\n\n"
9
+ "Based on this logic and these values, decide whether the case **meets** the definition. \n"
10
+ "Return **only** a JSON object wrapped in <JSON>…</JSON> tags with exactly two keys:\n"
11
+ " • `meets_definition`: true or false\n"
12
+ " • `reasoning`: a brief justification referencing the logic.\n"
13
+ "Do not output any additional text."
14
+ },
15
+ {
16
+ "role": "user",
17
+ "content": "Case: {{ case_description }}"
18
+ }
19
+ ]
prompts/extract_nhsn_fields.j2 ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "role": "system",
4
+ "content": "You are an expert in NHSN surveillance definitions. \n"
5
+ "From the following webpage snippets, extract ONLY the list of required data elements (field names) that a user must collect to apply the definition. \n"
6
+ "Return **only** a JSON array wrapped in <JSON>…</JSON> tags—no extra text."
7
+ },
8
+ {
9
+ "role": "user",
10
+ "content": "Snippets:\n\n{{ snippets }}"
11
+ }
12
+ ]
prompts/extract_nhsn_logic.j2 ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "role": "system",
4
+ "content": "You are an expert in NHSN surveillance definitions. \n"
5
+ "From the same webpage snippets, extract the core logic or rule text that describes how to decide the definition (e.g., “central line in place >2 calendar days…”, “no alternate source of infection…”, etc.). \n"
6
+ "Return **only** a JSON object wrapped in <JSON>…</JSON> tags with a single key `logic` whose value is that text."
7
+ },
8
+ {
9
+ "role": "user",
10
+ "content": "Snippets:\n\n{{ snippets }}"
11
+ }
12
+ ]
prompts/extract_reporting_fields.j2 ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% raw %}
2
+
3
+ [
4
+ {
5
+ "role": "system",
6
+ "content": "You are an expert at extracting field names from public health reporting requirement snippets."
7
+ },
8
+ {
9
+ "role": "user",
10
+ "content": "From these snippets, extract only the list of required reporting fields. "
11
+ "Return **only** a JSON array wrapped between <JSON> and </JSON> tags, for example:\n\n"
12
+ "<JSON>\n[\"Name\",\"Age\",…]\n</JSON>\n\n"
13
+ "Snippets:\n\n{{ snippets }}"
14
+ }
15
+ ]
16
+
17
+ {% endraw %}
prompts/extract_reporting_format.j2 ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% raw %}
2
+
3
+ [
4
+ {
5
+ "role": "system",
6
+ "content": "You are an expert at understanding state public health reporting templates."
7
+ },
8
+ {
9
+ "role": "user",
10
+ "content": "From these snippets, extract only the JSON object describing the report format (columns, column_headers, file_name). "
11
+ "Return **only** that object wrapped between <JSON> and </JSON> tags, for example:\n\n"
12
+ "<JSON>\n"
13
+ "{\n"
14
+ " \"columns\": [\"Name\",\"DOB\",…],\n"
15
+ " \"column_headers\": {\"Name\":\"Full Name\",…},\n"
16
+ " \"file_name\":\"Typhoid_Report_Dallas_TX.csv\"\n"
17
+ "}\n"
18
+ "</JSON>\n\n"
19
+ "Snippets:\n\n{{ snippets }}"
20
+ }
21
+ ]
22
+
23
+ {% endraw %}
prompts/final_enhancement_board_exam.j2 ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are an expert Infectious Diseases fellowship board exam question reviewer and enhancer.
2
+
3
+ Your task is to take a drafted board exam question and enhance it to ensure it meets the highest standards for ID fellowship board examinations.
4
+
5
+ **CURRENT DRAFTED QUESTION:**
6
+
7
+ **Topic:** {{ topic }}
8
+
9
+ **Vignette:**
10
+ {{ current_vignette }}
11
+
12
+ **Question Stem:**
13
+ {{ current_question }}
14
+
15
+ **Answer Choices:**
16
+ {{ current_choices }}
17
+
18
+ **Explanations:**
19
+ {{ current_explanations }}
20
+
21
+ **QUALITY REVIEW FEEDBACK:**
22
+ {{ quality_feedback }}
23
+
24
+ **YOUR ENHANCEMENT MISSION:**
25
+
26
+ 🚨 **IMMEDIATE ACTION REQUIRED:**
27
+ The quality review shows this question FAILS board exam standards. You MUST follow these mandatory steps:
28
+
29
+ **STEP 1: EMERGENCY QUALITY CHECK**
30
+ - Quality score: {{ quality_feedback.percentage_score }}%
31
+ - Board readiness: {{ quality_feedback.board_exam_readiness }}
32
+ - **TARGET REQUIRED: 80%+ score AND board_readiness = True**
33
+ - If score <80% OR board_readiness = False → **EMERGENCY PROTOCOL ACTIVATED**
34
+
35
+ **STEP 2: EMERGENCY PROTOCOL (MANDATORY)**
36
+ When quality review shows problems, you MUST:
37
+ 1. **COMPLETELY DISCARD** the current vignette
38
+ 2. **START OVER** with an entirely new clinical scenario
39
+ 3. **REMOVE ALL** forbidden combinations identified in quality review
40
+ 4. **CREATE** a genuinely difficult scenario requiring fellowship-level expertise
41
+ 5. **ENSURE 3+ EQUALLY PLAUSIBLE diagnoses** with overlapping features
42
+
43
+ **FORBIDDEN ACTIONS** (Will result in continued failure):
44
+ - ❌ Making minor modifications to existing vignette
45
+ - ❌ Keeping any elements that quality review flagged as "too obvious"
46
+ - ❌ Maintaining travel history that gives away diagnosis
47
+ - ❌ Using any combination identified as "strongly suggests" the correct answer
48
+
49
+ **MANDATORY ACTIONS** (Required for passing):
50
+ - ✅ Completely new patient scenario
51
+ - ✅ Remove or mislead all geographic/exposure clues
52
+ - ✅ Create multiple equally plausible diagnoses
53
+ - ✅ Require expert-level ID reasoning to differentiate
54
+
55
+ 2. **CRITICAL: Eliminate All Diagnostic Giveaways**
56
+ - **Question stem**: NEVER mention suspected diagnosis, pathogen type, or disease category
57
+ - **Vignette**: REMOVE obvious combinations:
58
+ * Classic geographic exposure + classic symptoms = REMOVE one or both
59
+ * Specific travel history + pathognomonic findings = REMOVE or change
60
+ * Immunocompromised + classic opportunistic infection signs = ADD confounding factors
61
+ - **Create genuine uncertainty**: Each differential diagnosis should have 2-3 supporting clues
62
+
63
+ 3. **CRITICAL: Implement 3-Clue Rule**
64
+ - **Correct diagnosis**: Must have EXACTLY 3 clear but subtle diagnostic clues
65
+ - **Each distractor**: Should have 1-2 misleading clues suggesting that diagnosis
66
+ - **Ensure diagnostic confusion**: Clinical picture should suggest multiple possibilities
67
+
68
+ 4. **Maximize difficulty for ID specialists:**
69
+ - Create atypical presentations requiring expert reasoning
70
+ - Include overlapping features between differential diagnoses
71
+ - Add confounding clinical information that misleads
72
+ - Use complex clinical scenarios (immunocompromised, nosocomial, drug interactions)
73
+
74
+ 5. **Include ID-specific sophisticated details:**
75
+ - Antimicrobial resistance patterns
76
+ - Specific pathogens and their characteristics
77
+ - Complex clinical scenarios (immunocompromised patients, healthcare settings, etc.)
78
+ - Laboratory interpretations (cultures, molecular diagnostics, serology)
79
+ - Treatment decisions and drug interactions
80
+
81
+ **ENHANCEMENT REQUIREMENTS:**
82
+
83
+ - **Vignette Enhancement:** Add clinical complexity, specific lab values, imaging findings, patient history that increases difficulty
84
+ - **Question Refinement:** Make the question stem more sophisticated and ID-specific
85
+ - **Answer Choice Improvement:** Ensure all distractors are plausible for an ID specialist
86
+ - **Explanation Enhancement:** Provide detailed ID-specific reasoning that teaches key concepts
87
+
88
+ **CRITICAL RULES for MAXIMUM DIFFICULTY:**
89
+ - **MANDATORY: If quality review score <75%** → COMPLETELY REWRITE the vignette from scratch
90
+ - **MANDATORY: If "board_exam_readiness = False"** → ABANDON current approach and create entirely new scenario
91
+ - **3-Clue Rule**: Correct answer has exactly 3 diagnostic clues, distractors have 1-2 clues each
92
+ - **NO obvious combinations**: Classic exposure + classic symptoms + confirmatory tests = TOO EASY
93
+ - **Geographic clues**: Either REMOVE entirely or make misleading (wrong endemic area)
94
+ - **Diagnostic confusion**: Include findings that could suggest 3+ different diagnoses
95
+ - **ID expertise required**: Should challenge infectious disease attendings
96
+ - **Atypical presentations**: Completely avoid textbook classic cases
97
+ - **Clinical complexity**: Multiple confounding factors that mislead from correct diagnosis
98
+
99
+ **EXAMPLES OF PROPER DIFFICULTY:**
100
+
101
+ ❌ **FAILING APPROACH** (What you must NOT do):
102
+ - "Patient with HIV traveled to Southeast Asia and has skin lesions" → Obviously Penicillium marneffei
103
+ - Minor tweaks: "traveled to multiple regions in Southeast Asia" → Still obvious
104
+ - Cosmetic changes: "atypical skin findings" → Diagnosis still clear
105
+
106
+ ✅ **PASSING APPROACH** (What you MUST do):
107
+ - **Option A**: Completely different scenario: "Hospital-acquired infection in ICU patient with complex comorbidities and atypical presentation suggesting multiple resistant pathogens"
108
+ - **Option B**: Misleading scenario: "Recent travel to Europe, pneumonia-like symptoms, multiple possible bacterial/viral/fungal etiologies requiring expert differentiation"
109
+ - **Option C**: Complex presentation: "Immunocompromised patient with overlapping symptoms suggesting 3+ different opportunistic infections, requiring laboratory interpretation skills"
110
+
111
+ **TRANSFORMATION EXAMPLES:**
112
+ - **Before**: HIV + Southeast Asia travel + skin lesions → Penicillium marneffei
113
+ - **After**: HIV + Recent European travel + respiratory symptoms + complex lab findings → Could be PCP, bacterial pneumonia, atypical mycobacteria, or resistant fungal infection
114
+
115
+ **EMERGENCY REWRITE TRIGGERS:**
116
+ - Any mention of Southeast Asia in context of HIV + skin lesions
117
+ - Any "classic" exposure + symptoms combination
118
+ - Any scenario where quality review says "diagnosis too obvious"
119
+ - Quality score <75% or board_exam_readiness = False
120
+
121
+ **OUTPUT FORMAT:**
122
+ Return a JSON object with this exact structure:
123
+
124
+ ```json
125
+ {
126
+ "vignette": "Enhanced clinical vignette with ID-specific complexity",
127
+ "question_stem": "Enhanced question that requires ID expertise",
128
+ "answer_choices": [
129
+ "Correct answer - first choice",
130
+ "Plausible distractor 2",
131
+ "Plausible distractor 3",
132
+ "Plausible distractor 4",
133
+ "Plausible distractor 5"
134
+ ],
135
+ "explanations": {
136
+ "correct": "Detailed explanation of why this is correct with ID-specific reasoning",
137
+ "incorrect": "Detailed explanation of why other choices are incorrect, with teaching points"
138
+ },
139
+ "enhancement_notes": "Summary of key enhancements made to increase difficulty and ID-specificity"
140
+ }
141
+ ```
142
+
143
+ **Remember:** This question should challenge an Infectious Diseases fellow or attending physician. Make it clinically sophisticated and ID-specialty specific!
prompts/generate_board_exam_vignette.j2 ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are an expert medical education specialist creating advanced board exam questions.
2
+
3
+ Using the following comparison table, generate a complete board exam question including vignette, question stem, answer choices, and explanations.
4
+
5
+ **Comparison Table:**
6
+ {{ comparison_table }}
7
+
8
+ **Requirements for CHALLENGING Board Exam Questions:**
9
+
10
+ 1. **CRITICAL Diagnostic Clue Distribution:**
11
+ - **Correct diagnosis ONLY**: Must have exactly 3 clear but subtle diagnostic clues
12
+ - **Each distractor**: Should have 1-2 misleading clues suggesting that diagnosis
13
+ - **Create confusion**: Include overlapping symptoms, labs, imaging between diagnoses
14
+ - **Avoid giveaways**: NO obvious exposure + classic symptoms + confirmatory tests
15
+
16
+ 2. **Clinical Vignette Complexity**: Create a realistic, challenging clinical scenario that:
17
+ - Uses specific patient demographics, medical history, and presentation timeline
18
+ - Includes detailed vital signs, physical exam findings, and laboratory results
19
+ - Incorporates specific imaging findings and diagnostic test results
20
+ - **Includes confounding elements**: Red herrings from differential diagnoses
21
+ - **Atypical presentations**: Avoid textbook classic presentations
22
+ - **Expert-level reasoning required**: Should challenge ID specialists
23
+ - Uses realistic medical details (specific lab values, medication names, dosages)
24
+ - **Multiple comorbidities/factors** that complicate the clinical picture
25
+
26
+ 3. **Question Stem**:
27
+ - For diagnosis questions: "Which of the following is the most likely diagnosis?"
28
+ - For treatment questions: "Which of the following is the most appropriate next step in management?"
29
+ - For identification questions: "Which of the following is the most likely causative organism?"
30
+
31
+ 4. **Answer Choices**: Extract the condition names from the comparison table:
32
+ - A. [Correct answer condition]
33
+ - B. [Distractor 1 condition]
34
+ - C. [Distractor 2 condition]
35
+ - D. [Distractor 3 condition]
36
+ - E. [Distractor 4 condition]
37
+
38
+ 4. **Explanations**:
39
+ - **Correct Answer**: Explain why this diagnosis is most consistent with the clinical presentation, citing specific discriminating features
40
+ - **Each Distractor**: Explain why this diagnosis is less likely, highlighting key differences in presentation, lab findings, or clinical context
41
+
42
+ **Quality Standards:**
43
+ - Board exam level complexity and clinical accuracy
44
+ - Specific medical details that demonstrate expertise
45
+ - Clear clinical reasoning that teaches key concepts
46
+ - Appropriate difficulty for advanced learners
47
+
48
+ **Topic**: {{ topic }}
49
+ **Question Type**: {{ question_type }}
50
+ **Difficulty**: {{ difficulty_level }}
51
+
52
+ **Output Format:**
53
+ Return a JSON object with this exact structure:
54
+ ```json
55
+ {
56
+ "vignette": "Detailed clinical scenario with specific medical details...",
57
+ "question_stem": "Complete question including vignette + question",
58
+ "answer_choices": [
59
+ "A. Correct condition name",
60
+ "B. Distractor 1 condition name",
61
+ "C. Distractor 2 condition name",
62
+ "D. Distractor 3 condition name",
63
+ "E. Distractor 4 condition name"
64
+ ],
65
+ "explanations": {
66
+ "correct_explanation": "Why A is correct with specific clinical reasoning...",
67
+ "distractor_explanations": [
68
+ "Why B is incorrect with specific differences...",
69
+ "Why C is incorrect with specific differences...",
70
+ "Why D is incorrect with specific differences...",
71
+ "Why E is incorrect with specific differences..."
72
+ ]
73
+ }
74
+ }
75
+ ```
prompts/generate_comparison_table.j2 ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are an expert medical education specialist creating advanced board exam questions for healthcare professionals.
2
+
3
+ Generate a comprehensive comparison table for differential diagnosis of: {{ topic }}
4
+
5
+ {% if guideline_context and guideline_context != "No specific guidelines found. Use standard medical knowledge." %}
6
+ **EVIDENCE-BASED CONTEXT:**
7
+ {{ guideline_context }}
8
+
9
+ **Important:** Base your comparison table on the above evidence-based guidelines and current medical literature.
10
+ {% else %}
11
+ **Context:** No specific guidelines available. Use current standard medical knowledge and evidence-based practice.
12
+ {% endif %}
13
+
14
+ **Instructions:**
15
+ 1. Create a comparison table with 5 conditions:
16
+ - 1 CORRECT answer (the actual condition)
17
+ - 4 plausible DISTRACTORS (similar conditions that could be confused)
18
+
19
+ 2. For each condition, provide specific clinical details:
20
+ - **Clinical Presentation**: Specific symptoms, timeline, severity
21
+ - **Epidemiology/Risk Factors**: Patient demographics, exposures, comorbidities
22
+ - **Laboratory Findings**: Specific lab values, patterns, biomarkers
23
+ - **Imaging Characteristics**: Specific radiologic findings, patterns
24
+ - **Diagnostic Tests**: Confirmatory tests, sensitivity/specificity
25
+ - **Treatment**: First-line therapy, dosing, duration
26
+ - **Clinical Reasoning**: Why this diagnosis fits or doesn't fit
27
+
28
+ 3. **Quality Requirements for DIFFICULT Board Questions:**
29
+ - Use specific medical details (lab values, imaging findings, medications)
30
+ - Include discriminating features that distinguish conditions
31
+ - **CRITICAL: Create diagnostic complexity** - include misleading information from differential diagnoses
32
+ - **Clue Distribution Rule**: Correct diagnosis should have exactly 3 clear diagnostic clues, distractors should have 1-2 misleading clues
33
+ - Ensure clinical accuracy and board exam relevance
34
+ - Make distractors plausible with overlapping presentations
35
+ - **Avoid obvious giveaways** (no classic exposure history + classic symptoms + confirmatory tests)
36
+ - Reference current guidelines and evidence-based practice when available
37
+
38
+ 4. **Difficulty Enhancement Requirements:**
39
+ - **Confusing Clinical Picture**: Include symptoms/findings that could suggest multiple diagnoses
40
+ - **Overlapping Presentations**: Each distractor should share some features with the correct answer
41
+ - **Subtle Distinguishing Features**: The correct diagnosis should be evident only through careful analysis
42
+ - **Expert-Level Reasoning Required**: Should challenge ID fellows/attendings, not be obvious to general internists
43
+
44
+ 4. **Difficulty Level**: {{ difficulty_level }}
45
+ 5. **Question Type**: {{ question_type }}
46
+
47
+ **Output Format:**
48
+ Return a JSON object with this exact structure:
49
+ ```json
50
+ {
51
+ "correct_answer": {
52
+ "condition": "Exact condition name",
53
+ "clinical_presentation": "Detailed clinical features",
54
+ "epidemiology_risk_factors": "Specific risk factors and demographics",
55
+ "laboratory_findings": "Specific lab values and patterns",
56
+ "imaging_characteristics": "Detailed imaging findings",
57
+ "diagnostic_tests": "Confirmatory diagnostic approaches",
58
+ "treatment": "Specific treatment recommendations",
59
+ "clinical_reasoning": "Why this is the most likely diagnosis"
60
+ },
61
+ "distractor_1": {
62
+ "condition": "Alternative diagnosis 1",
63
+ "clinical_presentation": "How this condition typically presents",
64
+ "epidemiology_risk_factors": "Risk factors for this condition",
65
+ "laboratory_findings": "Lab pattern for this condition",
66
+ "imaging_characteristics": "Imaging findings for this condition",
67
+ "diagnostic_tests": "How to diagnose this condition",
68
+ "treatment": "Treatment for this condition",
69
+ "clinical_reasoning": "Why this could be considered but is less likely"
70
+ },
71
+ "distractor_2": { ... },
72
+ "distractor_3": { ... },
73
+ "distractor_4": { ... }
74
+ }
75
+ ```
76
+
77
+ Focus on creating clinically accurate, educationally valuable content that tests advanced medical reasoning.
prompts/generate_presentation_slide.j2 ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are an expert medical educator creating comprehensive educational presentation slides. Your task is to generate detailed, educational content for a specific slide based on research information.
2
+
3
+ **Presentation Context:**
4
+ - Topic: {{ topic }}
5
+ - Target Audience: {{ target_audience }}
6
+ - Slide Title: {{ slide_title }}
7
+ - Section: {{ section }}
8
+ - Content Description: {{ content_description }}
9
+
10
+ **Research Information Available:**
11
+ {{ research_report }}
12
+
13
+ **Slide Content Requirements:**
14
+
15
+ 1. **Educational Value**: Content should be appropriate for {{ target_audience }} level
16
+ 2. **Clinical Relevance**: Include practical, clinically applicable information
17
+ 3. **Evidence-Based**: Use the research information to support all claims
18
+ 4. **Structured Format**: Organize content in clear, digestible bullet points
19
+ 5. **Engagement**: Make content engaging and memorable
20
+
21
+ **Content Guidelines:**
22
+ - Use 4-6 main bullet points per slide
23
+ - Each bullet point should be concise but informative (1-2 sentences)
24
+ - Include specific clinical details, dosages, guidelines when appropriate
25
+ - Add sub-bullets for important details or examples
26
+ - Ensure content flows logically and builds knowledge
27
+
28
+ **Special Instructions by Slide Type:**
29
+ - **Learning Objectives**: Focus on measurable, achievable goals
30
+ - **Case Vignettes**: Create realistic, detailed clinical scenarios
31
+ - **Pathophysiology**: Explain mechanisms clearly with clinical correlation
32
+ - **Diagnosis**: Include specific tests, criteria, and interpretation
33
+ - **Treatment**: Provide evidence-based protocols, dosages, monitoring
34
+ - **Guidelines**: Reference current society recommendations
35
+ - **Clinical Pearls**: Share practical tips and expert insights
36
+
37
+ **Output Format:**
38
+ Return a JSON object with this exact structure:
39
+ ```json
40
+ {
41
+ "slide_title": "{{ slide_title }}",
42
+ "main_content": [
43
+ "First main bullet point with clinical detail",
44
+ "Second main bullet point with evidence-based information",
45
+ "Third main bullet point with practical application",
46
+ "Fourth main bullet point with specific examples",
47
+ "Fifth main bullet point with clinical correlation",
48
+ "Sixth main bullet point with key takeaways"
49
+ ],
50
+ "sub_bullets": {
51
+ "First main bullet point": [
52
+ "Supporting detail or example",
53
+ "Additional clinical correlation"
54
+ ],
55
+ "Third main bullet point": [
56
+ "Specific dosage or protocol",
57
+ "Monitoring parameters"
58
+ ]
59
+ },
60
+ "clinical_notes": "Key clinical insights, warnings, or pearls for this slide content",
61
+ "references_used": "Brief mention of which research sources informed this content"
62
+ }
63
+ ```
64
+
65
+ Generate comprehensive, educationally valuable content that advances the learner's understanding of {{ topic }}.
prompts/generate_question_blueprint.j2 ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are an expert Infectious Diseases fellowship board exam question designer.
2
+
3
+ Your task is to create a strategic blueprint for a challenging ID board exam question.
4
+
5
+ **TOPIC:** {{ topic }}
6
+ **DIFFICULTY LEVEL:** {{ difficulty_level }}
7
+ **QUESTION TYPE:** {{ question_type }}
8
+
9
+ **YOUR MISSION:**
10
+ Create a detailed blueprint that outlines the strategy for a challenging ID fellowship-level question.
11
+ Think like an expert test writer planning how to create a question that will challenge ID specialists.
12
+
13
+ **BLUEPRINT REQUIREMENTS:**
14
+
15
+ 1. **Scenario Description:** Describe the clinical scenario you will use (e.g., "immunocompromised patient with new onset fever and pulmonary nodules")
16
+
17
+ 2. **Primary Diagnosis:** The correct answer you want to test
18
+
19
+ 3. **Differential Diagnoses:** List exactly 5 differential diagnoses that will become your answer choices:
20
+ - Position 1: The correct diagnosis
21
+ - Positions 2-5: Plausible but incorrect differential diagnoses
22
+
23
+ 4. **Diagnostic Clues Strategy:** Plan your clues carefully:
24
+ - **Supporting Primary:** List 3-4 clues that support the correct diagnosis
25
+ - **Misleading Clues:** List 2-3 clues that might initially mislead toward other diagnoses
26
+ - **Geographic/Exposure Clues:** If using travel/exposure, specify how they support or mislead
27
+
28
+ 5. **Wrong Answer Reasoning:** For each wrong answer, explain exactly why it will be wrong:
29
+ - Example: "Blastomyces will be wrong because the yeast description will be small 2-4 microns"
30
+ - Example: "Coccidioidomycosis will be wrong based on the patient living location"
31
+ - Example: "Aspergillus and mucor will be wrong because the BAL growing yeast not mold"
32
+
33
+ **EXAMPLE BLUEPRINT FORMAT:**
34
+
35
+ For the topic "Penicillium marneffei":
36
+ - **Scenario:** "Immunocompromised patient with new onset fever and pulmonary nodules"
37
+ - **Primary Diagnosis:** Histoplasma capsulatum
38
+ - **Differentials:** Histoplasma, Blastomyces, Coccidioidomycosis, Aspergillus, Mucormycosis
39
+ - **Supporting Clues:** Patient lives in Mississippi, has a parrot pet, BAL culture growing yeast
40
+ - **Wrong Answer Logic:**
41
+ - Blastomyces: Wrong because yeast description will be small 2-4 microns
42
+ - Cocci: Wrong based on patient living location
43
+ - Aspergillus/Mucor: Wrong because BAL growing yeast not mold
44
+
45
+ **CRITICAL RULES:**
46
+ - NO obvious diagnostic giveaways (avoid classic travel + classic symptoms)
47
+ - Each wrong answer must have a clear, specific reason why it's wrong
48
+ - Include 3+ supporting clues for correct answer
49
+ - Add 1-2 misleading clues to increase difficulty
50
+ - Ensure ID fellowship-level sophistication
51
+
52
+ **OUTPUT FORMAT:**
53
+ Return a JSON object with this exact structure:
54
+
55
+ ```json
56
+ {
57
+ "scenario_description": "Brief description of the clinical scenario",
58
+ "primary_diagnosis": "The correct diagnosis",
59
+ "differential_diagnoses": [
60
+ "Correct diagnosis (position 1)",
61
+ "Wrong diagnosis 2",
62
+ "Wrong diagnosis 3",
63
+ "Wrong diagnosis 4",
64
+ "Wrong diagnosis 5"
65
+ ],
66
+ "diagnostic_clues": {
67
+ "supporting_primary": [
68
+ "Clue 1 supporting correct diagnosis",
69
+ "Clue 2 supporting correct diagnosis",
70
+ "Clue 3 supporting correct diagnosis"
71
+ ],
72
+ "misleading_clues": [
73
+ "Misleading clue 1",
74
+ "Misleading clue 2"
75
+ ]
76
+ },
77
+ "wrong_answer_reasoning": {
78
+ "wrong_diagnosis_2": "Specific reason why this diagnosis is wrong",
79
+ "wrong_diagnosis_3": "Specific reason why this diagnosis is wrong",
80
+ "wrong_diagnosis_4": "Specific reason why this diagnosis is wrong",
81
+ "wrong_diagnosis_5": "Specific reason why this diagnosis is wrong"
82
+ },
83
+ "reasoning_strategy": "Overall strategy for how this question will challenge ID specialists"
84
+ }
85
+ ```
86
+
87
+ Create a sophisticated blueprint that will result in a challenging ID fellowship-level question!
prompts/history_taking.j2 ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {% raw %}
2
+ [
3
+ {"role": "system", "content":
4
+ "You are an infectious diseases consultant preparing to advise a clinician. But first, you must gather the appropriate clinical history."},
5
+
6
+ {"role": "user", "content":
7
+ "The user described the concern as: '{{ syndrome_query }}'.\n\nHere is a list of questions to ask the user based on your knowledge base:\n\n{{ questions | join('\\n') }}\n\nNow, ask the user these questions one at a time, in a conversational and efficient way. Do not give advice yet."}
8
+ ]
9
+ {% endraw %}
prompts/ipc_reporting.j2 ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% raw %}
2
+
3
+ [
4
+ {
5
+ "role": "system",
6
+ "content": "You are an IPC reporting assistant. Your only way to get reporting requirements is by calling the `IPC_reporting` function. Do NOT free-text beyond gathering inputs for that function call."
7
+ },
8
+ {
9
+ "role": "user",
10
+ "content": "Case summary: {{ case_summary }}\nJurisdiction: {{ jurisdiction }}"
11
+ }
12
+ ]
13
+
14
+ {% endraw %}
prompts/ipc_reporting_followup.j2 ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% raw %}
2
+ [
3
+ {
4
+ "role": "system",
5
+ "content": "You have collected these fields: {{ collected_fields }}.\n\n"
6
+ "• If any required fields are still missing, ask the user one at a time:\n"
7
+ " “Please provide the <field>:”\n\n"
8
+ "• Once **all** fields are collected, output **only** a JSON object wrapped in <JSON>…</JSON> tags, for example:\n\n"
9
+ "<JSON>\n"
10
+ "{\n"
11
+ " \"meets_definition\": true,\n"
12
+ " \"reasoning\": \"The patient had a central line in place for more than 2 days and no other sources identified.\"\n"
13
+ "}\n"
14
+ "</JSON>\n\n"
15
+ "Do **not** output any additional text."
16
+ }
17
+ ]
18
+ {% endraw %}
prompts/isolation_precautions.j2 ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% raw %}
2
+ [
3
+ {
4
+ "role": "system",
5
+ "content": "You are an infection prevention specialist. Interpret isolation precautions guidelines."
6
+ },
7
+ {
8
+ "role": "user",
9
+ "content": "Patient diagnosis: {{ diagnosis }}\nSymptoms: {{ symptoms }}\nKnown pathogens: {{ pathogen_list }}\n\nUsing CDC/APIC recommendations:\n- List required precautions (e.g., Contact, Droplet, Airborne).\n- Mention any special room or PPE requirements."
10
+ }
11
+ ]
12
+
13
+ {% endraw %}
prompts/nhsn_criteria_evaluator_followup.j2 ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ [
3
+ {
4
+ "role": "system",
5
+ "content": "I have these required fields: {{ required_fields }} \n"
6
+ "Ask the user for **each** missing field one at a time, using exactly:\n\n"
7
+ “Please provide the <field>:”\n\n
8
+ "When you have all values, output a single JSON object wrapped in <JSON>…</JSON> with keys:\n"
9
+ ```json
10
+ {
11
+ "meets_definition": true|false,
12
+ "reasoning": "..."
13
+ }
14
+ ```
15
+ "Do **not** output any other text."
16
+ }
17
+ ]
prompts/nhsn_criteria_evaluator_start.j2 ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ [
3
+ {
4
+ "role": "system",
5
+ "content": "You are an expert in NHSN surveillance definitions. \n"
6
+ "Your **only** output must be a JSON array of required data elements, wrapped in <JSON>…</JSON> tags. \n"
7
+ "Do **not** output any other text."
8
+ },
9
+ {
10
+ "role": "user",
11
+ "content": "Patient case: {{ case_description }} \n"
12
+ "Extract and return only the array of required NHSN fields."
13
+ }
14
+ ]
15
+
16
+
prompts/quality_review_board_exam.j2 ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are a stringent medical education quality assurance expert with a focus on creating MAXIMUM DIFFICULTY board exam questions. Your job is to be highly critical and identify every flaw that makes questions too easy or gives away answers.
2
+
3
+ **Question to Review:**
4
+ **Topic**: {{ topic }}
5
+ **Vignette**: {{ vignette }}
6
+ **Question**: {{ question_stem }}
7
+ **Answer Choices**: {{ answer_choices }}
8
+ **Explanations**: {{ explanations }}
9
+
10
+ **CRITICAL FLAW DETECTION - BE RUTHLESS:**
11
+
12
+ 🚨 **ANSWER GIVEAWAYS IN VIGNETTE** (Automatic score reduction):
13
+ - Does vignette mention specific test results revealing diagnosis? (e.g., "positive Coccidioides serology", "fungal pathogen antibodies")
14
+ - Are there overly specific findings that make answer obvious?
15
+ - Does travel history + specific test results = obvious diagnosis?
16
+ - Are there unnecessary diagnostic details that eliminate differential?
17
+
18
+ 🚨 **ANSWER GIVEAWAYS IN QUESTION STEM** (Automatic score reduction):
19
+ - Does question mention suspected diagnosis? (e.g., "suspected coccidioidomycosis", "this TB patient")
20
+ - Does wording hint at correct answer category?
21
+ - Is question diagnostically neutral or biased?
22
+
23
+ 🚨 **INSUFFICIENT DISTRACTORS IN VIGNETTE** (Major weakness):
24
+ - Could misleading clinical findings support alternative diagnoses?
25
+ - Are there missing symptoms that would make other conditions plausible?
26
+ - Could lab results include red herrings or be more ambiguous?
27
+ - Does vignette need MORE confounding factors?
28
+
29
+ 🚨 **TOO EASY FOR BOARD LEVEL** (Difficulty failure):
30
+ - Can residents easily eliminate distractors?
31
+ - Does this require fellowship-level reasoning or just pattern recognition?
32
+ - Are distractors clinically implausible to experienced physicians?
33
+
34
+ **Quality Assessment Criteria:**
35
+
36
+ 1. **Clinical Accuracy (0-5 points)**: Medical facts, dosages, procedures correct
37
+ 2. **Educational Value (0-5 points)**: Tests important clinical reasoning
38
+ 3. **Difficulty Appropriateness (0-5 points)**: Requires advanced clinical reasoning, fellowship-level knowledge
39
+ 4. **Vignette Quality (0-5 points)**: Complex, realistic, includes distractors, NO giveaways
40
+ 5. **Answer Choice Quality (0-5 points)**: All options plausible to experienced physicians
41
+
42
+ **SCORING PENALTIES:**
43
+ - Automatic -2 points if vignette gives away diagnosis
44
+ - Automatic -2 points if question stem mentions suspected diagnosis
45
+ - Automatic -1 point if distractors are too easily eliminated
46
+ - Automatic -1 point if insufficient clinical complexity
47
+
48
+ **Assessment Instructions:**
49
+ - BE HIGHLY CRITICAL - assume most questions are too easy
50
+ - Demand maximum difficulty appropriate for ID fellowship/board certification
51
+ - Suggest adding MORE distractors and confounding factors
52
+ - Eliminate ANY details that make diagnosis obvious
53
+
54
+ **Output Format:**
55
+ Return a JSON object with this exact structure:
56
+ ```json
57
+ {
58
+ "clinical_accuracy_score": 4,
59
+ "educational_value_score": 5,
60
+ "difficulty_score": 3,
61
+ "vignette_quality_score": 4,
62
+ "answer_choice_quality_score": 4,
63
+ "total_score": 20,
64
+ "percentage_score": 80,
65
+ "quality_level": "EXCELLENT/GOOD/ADEQUATE/NEEDS_IMPROVEMENT/POOR",
66
+ "strengths": [
67
+ "Specific strength 1",
68
+ "Specific strength 2"
69
+ ],
70
+ "weaknesses": [
71
+ "Specific weakness 1",
72
+ "Specific weakness 2"
73
+ ],
74
+ "improvement_suggestions": [
75
+ "Specific suggestion 1",
76
+ "Specific suggestion 2"
77
+ ],
78
+ "board_exam_readiness": true/false,
79
+ "overall_assessment": "Detailed summary of question quality and recommendations"
80
+ }
81
+ ```
82
+
83
+ Provide honest, constructive feedback to ensure the highest quality medical education content.
prompts/summarize_antibiotic_duration.j2 ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {% raw %}
2
+ [
3
+ {
4
+ "role": "system",
5
+ "content": "You are an expert in infectious diseases guidelines. Summarize the recommended antibiotic duration for {{ condition }} using the IDSA search results first, then any other society guidelines."
6
+ },
7
+ {
8
+ "role": "user",
9
+ "content": "IDSA results:\n{{ idsa_results }}\n\nOther society results:\n{{ other_results }}\n\nPlease provide:\n1. The recommended duration(s) with your source for each.\n2. A brief rationale if durations differ."
10
+ }
11
+ ]
12
+
13
+ {% endraw %}
requirements.txt CHANGED
@@ -1 +1,60 @@
 
 
 
 
 
 
 
 
 
1
  gradio==4.40.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ID Agents Production Requirements
2
+ # Updated: 2025-08-10 15:54:20
3
+ # Python 3.12.7
4
+ #
5
+ # This file contains pinned versions for reproducible deployments
6
+ # Tested and verified working in development environment
7
+
8
+ # Core AI/ML Framework
9
+ openai>=1.3.0
10
  gradio==4.40.0
11
+
12
+ # Data Processing & Scientific Computing
13
+ pandas>=2.0.0
14
+ numpy>=1.24.0
15
+
16
+ # Document Processing
17
+ pdfplumber
18
+ python-docx
19
+
20
+ # Web Requests & Search
21
+ requests>=2.31.0
22
+ duckduckgo-search>=4.0.0
23
+
24
+ # Configuration & Environment Management
25
+ python-dotenv>=1.0.0
26
+
27
+ # Authentication & Security
28
+ bcrypt>=4.0.0
29
+ PyJWT>=2.8.0
30
+
31
+ # Logging & Monitoring
32
+ structlog>=23.0.0
33
+
34
+ # Template Processing
35
+ jinja2
36
+
37
+ # Analytics & Visualization (optional)
38
+ matplotlib>=3.5.0
39
+
40
+ # Advanced AI Libraries (optional - enable for enhanced features)
41
+ # Uncomment the following if you need advanced AI capabilities:
42
+ # transformers
43
+ # langchain
44
+ # autogen
45
+ # llama_index
46
+ # farm-haystack
47
+ # faiss-cpu
48
+
49
+ # Development & Testing (optional - remove for production)
50
+ # pytest
51
+
52
+ # Load Testing Dependencies
53
+ aiohttp>=3.8.0
54
+ psutil>=5.9.0
55
+
56
+ # Notes:
57
+ # - All core dependencies use minimum version pinning (>=)
58
+ # - Advanced AI libraries are commented out to reduce deployment size
59
+ # - Uncomment additional libraries as needed for specific features
60
+ # - For strict version pinning, replace >= with == and specific versions
tools/__init__.py ADDED
File without changes
tools/alert_prolonged_antibiotic_use.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tools.base import Tool
2
+
3
+
4
+ from tools.utils import ToolExecutionError, logger
5
+ from typing import Any
6
+
7
+ class AlertProlongedAntibioticUseTool(Tool):
8
+
9
+ def openai_spec(self, legacy=False):
10
+ return {
11
+ "name": self.name,
12
+ "description": self.description,
13
+ "parameters": self.args_schema
14
+ }
15
+ """
16
+ Tool to summarize recommended antibiotic durations for a given condition based on guidelines.
17
+
18
+ This tool searches IDSA and other society guidelines to provide a summary of recommended antibiotic durations for a specified clinical condition.
19
+ """
20
+ def __init__(self) -> None:
21
+ """
22
+ Initialize the AlertProlongedAntibioticUseTool with its name, description, and argument schema.
23
+ """
24
+ super().__init__()
25
+ self.name = "alert_prolonged_antibiotic_use"
26
+ self.description = (
27
+ "Search IDSA (and then other society) guidelines to summarize "
28
+ "recommended antibiotic durations for a given condition, considering all relevant clinical variables."
29
+ )
30
+ self.args_schema = {
31
+ "type": "object",
32
+ "properties": {
33
+ "condition": {
34
+ "type": "string",
35
+ "description": "Clinical condition to lookup antibiotic duration for"
36
+ },
37
+ "site_of_infection": {
38
+ "type": "string",
39
+ "description": "Site of infection (e.g., lung, urine, blood, etc.)"
40
+ },
41
+ "risk_of_biofilm": {
42
+ "type": "string",
43
+ "description": "Risk or presence of biofilm (e.g., prosthetic material, indwelling devices)"
44
+ },
45
+ "current_response": {
46
+ "type": "string",
47
+ "description": "Current response to antibiotics (e.g., improving, stable, worsening)"
48
+ },
49
+ "creatinine_clearance": {
50
+ "type": "string",
51
+ "description": "Creatinine clearance or renal function (e.g., 60 mL/min, ESRD, etc.)"
52
+ },
53
+ "severity_of_infection": {
54
+ "type": "string",
55
+ "description": "Severity of infection (e.g., mild, moderate, severe, septic shock)"
56
+ },
57
+ "known_allergies": {
58
+ "type": "string",
59
+ "description": "Known drug allergies (e.g., penicillin, sulfa, none)"
60
+ }
61
+ },
62
+ "required": [
63
+ "condition",
64
+ "site_of_infection",
65
+ "risk_of_biofilm",
66
+ "current_response",
67
+ "creatinine_clearance",
68
+ "severity_of_infection",
69
+ "known_allergies"
70
+ ]
71
+ }
72
+
73
+ async def run(
74
+ self,
75
+ condition: str,
76
+ site_of_infection: str,
77
+ risk_of_biofilm: str,
78
+ current_response: str,
79
+ creatinine_clearance: str,
80
+ severity_of_infection: str,
81
+ known_allergies: str
82
+ ) -> str:
83
+ """
84
+ Summarize recommended antibiotic duration for a given clinical condition, considering all relevant clinical variables.
85
+
86
+ Args:
87
+ condition (str): Clinical condition to lookup antibiotic duration for.
88
+ site_of_infection (str): Site of infection.
89
+ risk_of_biofilm (str): Risk or presence of biofilm.
90
+ current_response (str): Current response to antibiotics.
91
+ creatinine_clearance (str): Renal function.
92
+ severity_of_infection (str): Severity of infection.
93
+ known_allergies (str): Known drug allergies.
94
+
95
+ Returns:
96
+ str: The antibiotic duration summary (placeholder).
97
+
98
+ Raises:
99
+ ToolExecutionError: If the tool fails to summarize duration.
100
+ """
101
+ try:
102
+ # Placeholder for actual search and summarization logic
103
+ return (
104
+ f"Antibiotic duration summary for: {condition}\n"
105
+ f"Site of infection: {site_of_infection}\n"
106
+ f"Risk of biofilm: {risk_of_biofilm}\n"
107
+ f"Current response: {current_response}\n"
108
+ f"Creatinine clearance: {creatinine_clearance}\n"
109
+ f"Severity: {severity_of_infection}\n"
110
+ f"Allergies: {known_allergies}"
111
+ )
112
+ except Exception as e:
113
+ logger.error(f"AlertProlongedAntibioticUseTool failed: {e}", exc_info=True)
114
+ raise ToolExecutionError(
115
+ message=f"AlertProlongedAntibioticUseTool failed: {e}",
116
+ code="ANTIBIOTIC_DURATION_ERROR",
117
+ user_message="Unable to summarize antibiotic duration. Please try again or contact support.",
118
+ original_exception=e
119
+ )
tools/base.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from typing import Any, Dict
3
+ from tools.utils import ToolExecutionError, logger
4
+
5
+ class Tool:
6
+ def openai_spec(self, legacy=False):
7
+ """
8
+ Return the OpenAI function calling spec for this tool.
9
+ Subclasses should override this if they support function calling.
10
+ """
11
+ return {
12
+ "name": self.name,
13
+ "description": self.description,
14
+ "parameters": self.args_schema
15
+ }
16
+ """
17
+ Base class for all tools in the agent framework.
18
+
19
+ Attributes:
20
+ name (str): The unique name of the tool.
21
+ description (str): A short description of the tool's purpose.
22
+ args_schema (dict): The schema describing the tool's input arguments.
23
+ """
24
+ name: str
25
+ description: str
26
+ args_schema: Dict[str, Any]
27
+
28
+ def __init__(self) -> None:
29
+ """
30
+ Initialize the base Tool with default values.
31
+ Subclasses should override these attributes as needed.
32
+ """
33
+ self.name = "tool"
34
+ self.description = ""
35
+ self.args_schema = {}
36
+
37
+ async def run(self, **kwargs: Any) -> Any:
38
+ """
39
+ Run the tool with the provided arguments.
40
+ Subclasses must implement this method.
41
+
42
+ Args:
43
+ **kwargs: Arbitrary keyword arguments for the tool.
44
+
45
+ Returns:
46
+ Any: The result of the tool's execution.
47
+
48
+ Raises:
49
+ ToolExecutionError: If the method is not implemented by a subclass.
50
+ """
51
+ logger.error(f"Tool '{self.name}' does not implement the run method.")
52
+ raise ToolExecutionError(
53
+ message=f"Tool '{self.name}' does not implement the run method.",
54
+ code="TOOL_NOT_IMPLEMENTED",
55
+ user_message="This tool is not available. Please contact support.",
56
+ )
tools/create_educational_presentation.py ADDED
@@ -0,0 +1,1308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ create_educational_presentation.py
3
+ ---------------------------------
4
+
5
+ Tool for creating comprehensive educational presentations through iterative research.
6
+
7
+ This tool conducts deep research on medical topics, creates detailed reports, and converts
8
+ them into structured slide presentations for educational purposes. It uses an iterative
9
+ research approach with user confirmation before finalizing the presentation.
10
+
11
+ Key Features:
12
+ - Iterative internet research with 4-5 rounds of 3-5 pages each
13
+ - User clarification questions before research
14
+ - Comprehensive report generation
15
+ - Structured slide presentation creation
16
+ - Educational flow: objectives → vignette → education → application → Q&A
17
+ """
18
+
19
+ import asyncio
20
+ import json
21
+ from typing import Any, Dict, List, Union
22
+ from tools.base import Tool
23
+ from tools.utils import ToolExecutionError, logger, load_prompt
24
+ from core.utils.llm_connector import call_llm
25
+ from tools.internet_search import InternetSearchTool
26
+
27
+ class CreateEducationalPresentationTool(Tool):
28
+ """
29
+ Tool for creating comprehensive educational presentations through iterative research.
30
+
31
+ This tool conducts deep research, creates detailed reports, and converts them into
32
+ structured slide presentations for educational purposes.
33
+ """
34
+
35
+ def __init__(self) -> None:
36
+ """Initialize the CreateEducationalPresentationTool."""
37
+ super().__init__()
38
+ self.name = "create_educational_presentation"
39
+ self.description = "Create comprehensive educational presentations through AI-powered dynamic research and content generation."
40
+ self.internet_search = InternetSearchTool()
41
+ self.args_schema = {
42
+ "type": "object",
43
+ "properties": {
44
+ "topic": {
45
+ "type": "string",
46
+ "description": "The medical topic for the educational presentation (e.g., 'sepsis management', 'heart failure diagnosis', 'antibiotic stewardship')"
47
+ },
48
+ "target_audience": {
49
+ "type": "string",
50
+ "description": "The target audience for the presentation",
51
+ "enum": ["medical_students", "residents", "attendings", "nurses", "pharmacists", "multidisciplinary"],
52
+ "default": "medical_students"
53
+ },
54
+ "presentation_duration": {
55
+ "type": "integer",
56
+ "description": "Expected duration of presentation in minutes",
57
+ "default": 45,
58
+ "minimum": 15,
59
+ "maximum": 120
60
+ },
61
+ "focus_area": {
62
+ "type": "string",
63
+ "description": "Specific focus area within the topic",
64
+ "default": "comprehensive_overview"
65
+ },
66
+ "aspects_to_emphasize": {
67
+ "type": "string",
68
+ "description": "What specific aspects to emphasize (e.g., 'pathophysiology, diagnosis, treatment')"
69
+ },
70
+ "guidelines_to_include": {
71
+ "type": "string",
72
+ "description": "Specific guidelines or evidence to include (e.g., 'IDSA guidelines')"
73
+ },
74
+ "learning_objectives": {
75
+ "type": "string",
76
+ "description": "What should the audience learn (e.g., 'diagnostic skills, treatment decisions')"
77
+ },
78
+ "clinical_scenarios": {
79
+ "type": "string",
80
+ "description": "Specific clinical scenarios to highlight (e.g., 'common presentations')"
81
+ },
82
+ "takeaway_message": {
83
+ "type": "string",
84
+ "description": "Key clinical pearl or takeaway message (e.g., 'early recognition saves lives')"
85
+ }
86
+ },
87
+ "required": ["topic"]
88
+ }
89
+
90
+ def openai_spec(self, legacy=False):
91
+ """Return OpenAI function specification."""
92
+ return {
93
+ "name": self.name,
94
+ "description": self.description,
95
+ "parameters": self.args_schema
96
+ }
97
+
98
+ async def run(
99
+ self,
100
+ topic: str,
101
+ target_audience: str = "medical_students",
102
+ presentation_duration: int = 45,
103
+ focus_area: str = "comprehensive_overview",
104
+ aspects_to_emphasize: Union[str, None] = None,
105
+ guidelines_to_include: Union[str, None] = None,
106
+ learning_objectives: Union[str, None] = None,
107
+ clinical_scenarios: Union[str, None] = None,
108
+ takeaway_message: Union[str, None] = None
109
+ ) -> Dict[str, Any]:
110
+ """
111
+ Create a comprehensive educational presentation through iterative research.
112
+
113
+ Args:
114
+ topic (str): The medical topic for the presentation
115
+ target_audience (str): The target audience
116
+ presentation_duration (int): Duration in minutes
117
+ focus_area (str): Specific focus area
118
+ aspects_to_emphasize (str): What specific aspects to emphasize
119
+ guidelines_to_include (str): Specific guidelines or evidence to include
120
+ learning_objectives (str): What should the audience learn
121
+ clinical_scenarios (str): Specific clinical scenarios to highlight
122
+ takeaway_message (str): Key clinical pearl or takeaway message
123
+
124
+ Returns:
125
+ Dict[str, Any]: Complete presentation with research, report, and slides
126
+ """
127
+ try:
128
+ logger.info(f"Starting educational presentation creation for topic: {topic}")
129
+
130
+ # Build clarification responses from provided parameters
131
+ clarification_responses = {}
132
+
133
+ # Check if we have enough information to proceed
134
+ if aspects_to_emphasize and guidelines_to_include and learning_objectives and clinical_scenarios and takeaway_message:
135
+ clarification_responses = {
136
+ "aspects": aspects_to_emphasize,
137
+ "guidelines": guidelines_to_include,
138
+ "learning_objectives": learning_objectives,
139
+ "clinical_scenarios": clinical_scenarios,
140
+ "takeaway_message": takeaway_message
141
+ }
142
+ else:
143
+ # Use intelligent defaults based on the topic and focus area
144
+ clarification_responses = self._generate_intelligent_defaults(topic, target_audience, focus_area)
145
+ logger.info(f"Using intelligent defaults for presentation creation")
146
+
147
+ # Proceed with full presentation creation
148
+ logger.info(f"Proceeding with presentation creation using responses")
149
+
150
+ # Step 2: Conduct iterative research
151
+ research_results = await self._conduct_iterative_research(topic, clarification_responses)
152
+
153
+ # Step 3: Generate comprehensive report
154
+ research_report = self._generate_research_report(topic, research_results, clarification_responses)
155
+
156
+ # Step 4: Create presentation structure
157
+ presentation_structure = self._create_presentation_structure(
158
+ topic, target_audience, presentation_duration, research_report
159
+ )
160
+
161
+ # Step 5: Create final presentation using existing method
162
+ final_presentation = await self.create_final_presentation(
163
+ topic, target_audience, presentation_duration, research_report,
164
+ presentation_structure, ""
165
+ )
166
+
167
+ logger.info(f"Successfully created educational presentation for {topic}")
168
+ return final_presentation
169
+
170
+ except Exception as e:
171
+ logger.error(f"CreateEducationalPresentationTool failed: {e}", exc_info=True)
172
+ raise ToolExecutionError(f"Failed to create educational presentation: {e}")
173
+
174
+ async def continue_with_research(
175
+ self,
176
+ topic: str,
177
+ target_audience: str,
178
+ presentation_duration: int,
179
+ focus_area: str,
180
+ clarification_responses: Dict[str, str]
181
+ ) -> Dict[str, Any]:
182
+ """
183
+ Continue with research phase after receiving clarification responses.
184
+
185
+ Args:
186
+ topic (str): The medical topic
187
+ target_audience (str): Target audience
188
+ presentation_duration (int): Duration in minutes
189
+ focus_area (str): Focus area
190
+ clarification_responses (Dict[str, str]): User responses to clarification questions
191
+
192
+ Returns:
193
+ Dict[str, Any]: Research results and next steps
194
+ """
195
+ try:
196
+ logger.info(f"Continuing with research for topic: {topic}")
197
+
198
+ # Step 2: Conduct iterative research
199
+ research_results = await self._conduct_iterative_research(topic, clarification_responses)
200
+
201
+ # Step 3: Generate comprehensive report
202
+ research_report = self._generate_research_report(topic, research_results, clarification_responses)
203
+
204
+ # Step 4: Create presentation structure
205
+ presentation_structure = self._create_presentation_structure(
206
+ topic, target_audience, presentation_duration, research_report
207
+ )
208
+
209
+ return {
210
+ "status": "research_complete",
211
+ "topic": topic,
212
+ "target_audience": target_audience,
213
+ "presentation_duration": presentation_duration,
214
+ "research_results": research_results,
215
+ "research_report": research_report,
216
+ "proposed_structure": presentation_structure,
217
+ "next_step": "Please review the research report and presentation structure. Confirm to proceed with slide creation."
218
+ }
219
+
220
+ except Exception as e:
221
+ logger.error(f"Research phase failed: {e}", exc_info=True)
222
+ raise ToolExecutionError(f"Failed to complete research: {e}")
223
+
224
+ async def create_final_presentation(
225
+ self,
226
+ topic: str,
227
+ target_audience: str,
228
+ presentation_duration: int,
229
+ research_report: str,
230
+ presentation_structure: Dict[str, Any],
231
+ user_feedback: str = ""
232
+ ) -> Dict[str, Any]:
233
+ """
234
+ Create the final presentation slides.
235
+
236
+ Args:
237
+ topic (str): The medical topic
238
+ target_audience (str): Target audience
239
+ presentation_duration (int): Duration in minutes
240
+ research_report (str): The research report
241
+ presentation_structure (Dict): Presentation structure
242
+ user_feedback (str): User feedback on structure
243
+
244
+ Returns:
245
+ Dict[str, Any]: Complete presentation with slides
246
+ """
247
+ try:
248
+ logger.info(f"Creating final presentation for topic: {topic}")
249
+
250
+ # Adjust structure based on user feedback if provided
251
+ if user_feedback:
252
+ presentation_structure = self._adjust_structure_based_on_feedback(
253
+ presentation_structure, user_feedback
254
+ )
255
+
256
+ # Generate all slides
257
+ slides = await self._generate_all_slides(
258
+ topic, target_audience, research_report, presentation_structure
259
+ )
260
+
261
+ # Create speaker notes
262
+ speaker_notes = self._generate_speaker_notes(slides, research_report)
263
+
264
+ # Generate presentation metadata
265
+ presentation_metadata = self._generate_presentation_metadata(
266
+ topic, target_audience, presentation_duration, len(slides)
267
+ )
268
+
269
+ return {
270
+ "status": "presentation_complete",
271
+ "topic": topic,
272
+ "target_audience": target_audience,
273
+ "presentation_duration": presentation_duration,
274
+ "total_slides": len(slides),
275
+ "slides": slides,
276
+ "speaker_notes": speaker_notes,
277
+ "metadata": presentation_metadata,
278
+ "research_report": research_report,
279
+ "created_date": "2025-07-18"
280
+ }
281
+
282
+ except Exception as e:
283
+ logger.error(f"Final presentation creation failed: {e}", exc_info=True)
284
+ raise ToolExecutionError(f"Failed to create final presentation: {e}")
285
+
286
+ def _generate_clarification_questions(self, topic: str, target_audience: str, focus_area: str) -> List[Dict[str, str]]:
287
+ """Generate 3-5 clarification questions for the user."""
288
+
289
+ questions = [
290
+ {
291
+ "question": f"What specific aspects of {topic} would you like to emphasize in this presentation?",
292
+ "purpose": "To focus the research on the most relevant areas",
293
+ "examples": "e.g., pathophysiology, diagnosis, treatment, recent advances, guidelines"
294
+ },
295
+ {
296
+ "question": f"Are there any specific guidelines, studies, or evidence you want to include?",
297
+ "purpose": "To ensure important references are included",
298
+ "examples": "e.g., specific society guidelines, landmark studies, recent publications"
299
+ },
300
+ {
301
+ "question": f"What learning objectives should the {target_audience} achieve after this presentation?",
302
+ "purpose": "To structure the educational content appropriately",
303
+ "examples": "e.g., diagnostic skills, treatment decisions, understanding pathophysiology"
304
+ },
305
+ {
306
+ "question": f"Are there any specific clinical scenarios or patient populations you want to highlight?",
307
+ "purpose": "To create relevant clinical vignettes",
308
+ "examples": "e.g., pediatric patients, elderly, specific comorbidities, severity levels"
309
+ },
310
+ {
311
+ "question": f"What should be the takeaway message or key clinical pearl from this presentation?",
312
+ "purpose": "To ensure the presentation has a clear, memorable message",
313
+ "examples": "e.g., early recognition saves lives, personalized treatment approach, guideline adherence"
314
+ }
315
+ ]
316
+
317
+ return questions
318
+
319
+ def _generate_intelligent_defaults(self, topic: str, target_audience: str, focus_area: str) -> Dict[str, str]:
320
+ """
321
+ Generate intelligent default responses based on topic and focus area.
322
+
323
+ Args:
324
+ topic (str): The medical topic
325
+ target_audience (str): Target audience
326
+ focus_area (str): Focus area
327
+
328
+ Returns:
329
+ Dict[str, str]: Intelligent default responses
330
+ """
331
+ try:
332
+ # Topic-specific intelligent defaults
333
+ topic_lower = topic.lower()
334
+
335
+ if "dimorphic fungi" in topic_lower or "fungal" in topic_lower:
336
+ return {
337
+ "aspects": "comprehensive coverage including pathophysiology, diagnosis, treatment, epidemiology, and clinical presentations",
338
+ "guidelines": "IDSA guidelines and recent evidence-based recommendations",
339
+ "learning_objectives": "comprehensive understanding of diagnosis, treatment, and key clinical presentations for board exam preparation",
340
+ "clinical_scenarios": "common clinical presentations of each dimorphic fungus including histoplasmosis, coccidioidomycosis, blastomycosis, and others",
341
+ "takeaway_message": "systematic approach to diagnosis and management with focus on board exam question patterns"
342
+ }
343
+ elif "sepsis" in topic_lower:
344
+ return {
345
+ "aspects": "pathophysiology, early recognition, diagnosis, management, and outcomes",
346
+ "guidelines": "Surviving Sepsis Campaign guidelines and recent updates",
347
+ "learning_objectives": "early recognition, appropriate management, and outcome improvement",
348
+ "clinical_scenarios": "emergency department presentations, ICU management, and complications",
349
+ "takeaway_message": "early recognition and prompt treatment save lives"
350
+ }
351
+ elif "heart failure" in topic_lower:
352
+ return {
353
+ "aspects": "pathophysiology, classification, diagnosis, management, and prognosis",
354
+ "guidelines": "ACC/AHA heart failure guidelines",
355
+ "learning_objectives": "diagnostic skills, treatment optimization, and guideline adherence",
356
+ "clinical_scenarios": "acute decompensated heart failure, chronic management, and comorbidities",
357
+ "takeaway_message": "guideline-directed medical therapy improves outcomes"
358
+ }
359
+ else:
360
+ # Generic intelligent defaults
361
+ return {
362
+ "aspects": "comprehensive coverage including pathophysiology, diagnosis, treatment, and recent advances",
363
+ "guidelines": "latest evidence-based guidelines from relevant professional societies",
364
+ "learning_objectives": "comprehensive understanding of diagnosis, treatment, and key clinical pearls",
365
+ "clinical_scenarios": "common clinical presentations and real-world case studies",
366
+ "takeaway_message": "evidence-based approach to diagnosis and management"
367
+ }
368
+
369
+ except Exception as e:
370
+ logger.warning(f"Failed to generate intelligent defaults: {e}")
371
+ # Fallback to basic defaults
372
+ return {
373
+ "aspects": "comprehensive overview of the topic",
374
+ "guidelines": "current evidence-based guidelines",
375
+ "learning_objectives": "understanding of key concepts",
376
+ "clinical_scenarios": "common clinical presentations",
377
+ "takeaway_message": "evidence-based clinical approach"
378
+ }
379
+
380
+ async def _conduct_iterative_research(self, topic: str, clarification_responses: Dict[str, str]) -> Dict[str, Any]:
381
+ """Conduct 4-5 rounds of iterative research."""
382
+
383
+ research_results = {
384
+ "rounds": [],
385
+ "total_sources": 0,
386
+ "key_themes": [],
387
+ "evidence_summary": {}
388
+ }
389
+
390
+ # Import internet search tool
391
+ from tools.internet_search import InternetSearchTool
392
+ internet_tool = InternetSearchTool()
393
+
394
+ # Round 1: General topic overview
395
+ round1_queries = [
396
+ f"{topic} overview clinical guidelines",
397
+ f"{topic} pathophysiology mechanisms",
398
+ f"{topic} diagnosis treatment current evidence",
399
+ f"{topic} management recommendations 2024",
400
+ f"{topic} clinical practice guidelines"
401
+ ]
402
+
403
+ round1_results = await self._conduct_research_round(internet_tool, round1_queries, 1, "General Overview")
404
+ research_results["rounds"].append(round1_results)
405
+
406
+ # Round 2: Specific focus based on clarification
407
+ focus_keywords = self._extract_focus_keywords(clarification_responses)
408
+ round2_queries = [
409
+ f"{topic} {focus_keywords[0]} latest research",
410
+ f"{topic} {focus_keywords[1]} clinical studies",
411
+ f"{topic} {focus_keywords[0]} best practices",
412
+ f"{topic} guidelines {focus_keywords[1]}",
413
+ f"{topic} evidence based {focus_keywords[0]}"
414
+ ]
415
+
416
+ round2_results = await self._conduct_research_round(internet_tool, round2_queries, 2, "Focused Research")
417
+ research_results["rounds"].append(round2_results)
418
+
419
+ # Round 3: Clinical evidence and studies
420
+ round3_queries = [
421
+ f"{topic} randomized controlled trials",
422
+ f"{topic} systematic review meta-analysis",
423
+ f"{topic} clinical outcomes studies",
424
+ f"{topic} evidence quality assessment",
425
+ f"{topic} landmark studies"
426
+ ]
427
+
428
+ round3_results = await self._conduct_research_round(internet_tool, round3_queries, 3, "Clinical Evidence")
429
+ research_results["rounds"].append(round3_results)
430
+
431
+ # Round 4: Guidelines and recommendations
432
+ round4_queries = [
433
+ f"{topic} society guidelines recommendations",
434
+ f"{topic} international consensus statements",
435
+ f"{topic} practice guidelines updates",
436
+ f"{topic} expert consensus recommendations",
437
+ f"{topic} clinical practice standards"
438
+ ]
439
+
440
+ round4_results = await self._conduct_research_round(internet_tool, round4_queries, 4, "Guidelines & Recommendations")
441
+ research_results["rounds"].append(round4_results)
442
+
443
+ # Calculate total sources
444
+ research_results["total_sources"] = sum(len(round_data["sources"]) for round_data in research_results["rounds"])
445
+
446
+ # Extract key themes
447
+ research_results["key_themes"] = self._extract_key_themes(research_results["rounds"])
448
+
449
+ return research_results
450
+
451
+ async def _conduct_research_round(self, internet_tool, queries: List[str], round_number: int, round_focus: str) -> Dict[str, Any]:
452
+ """Conduct a single round of research."""
453
+
454
+ round_results = {
455
+ "round_number": round_number,
456
+ "focus": round_focus,
457
+ "queries": queries,
458
+ "sources": [],
459
+ "summary": ""
460
+ }
461
+
462
+ for query in queries:
463
+ try:
464
+ search_results = await internet_tool.run(query)
465
+ if search_results:
466
+ # Parse and extract key information
467
+ parsed_sources = self._parse_search_results(search_results, query)
468
+ round_results["sources"].extend(parsed_sources)
469
+
470
+ # Limit to 3-5 sources per round
471
+ if len(round_results["sources"]) >= 5:
472
+ break
473
+
474
+ except Exception as e:
475
+ logger.warning(f"Search failed for query '{query}': {e}")
476
+ continue
477
+
478
+ # Generate summary for this round
479
+ round_results["summary"] = self._generate_round_summary(round_results["sources"], round_focus)
480
+
481
+ return round_results
482
+
483
+ def _parse_search_results(self, search_results: str, query: str) -> List[Dict[str, str]]:
484
+ """Parse search results string into structured sources."""
485
+
486
+ sources = []
487
+
488
+ # Split by entries (each entry starts with **)
489
+ import re
490
+ entries = re.split(r'\*\*([^*]+)\*\*', search_results)
491
+
492
+ for i in range(1, len(entries), 2):
493
+ if i + 1 < len(entries):
494
+ title = entries[i].strip()
495
+ content_and_link = entries[i + 1].strip()
496
+
497
+ # Extract the link
498
+ link_match = re.search(r'\[Read more\]\(([^)]+)\)', content_and_link)
499
+ url = link_match.group(1) if link_match else ""
500
+
501
+ # Extract the content
502
+ content = re.sub(r'\[Read more\]\([^)]+\)', '', content_and_link).strip()
503
+
504
+ if title and content:
505
+ sources.append({
506
+ "title": title,
507
+ "url": url,
508
+ "content": content,
509
+ "query": query,
510
+ "relevance": "high" # Could be improved with actual relevance scoring
511
+ })
512
+
513
+ return sources
514
+
515
+ def _extract_focus_keywords(self, clarification_responses: Dict[str, str]) -> List[str]:
516
+ """Extract focus keywords from clarification responses."""
517
+
518
+ keywords = ["diagnosis", "treatment", "management", "pathophysiology", "guidelines"]
519
+
520
+ # Extract keywords from user responses
521
+ for response in clarification_responses.values():
522
+ if response:
523
+ # Simple keyword extraction - could be improved
524
+ if "diagnosis" in response.lower():
525
+ keywords.insert(0, "diagnosis")
526
+ elif "treatment" in response.lower():
527
+ keywords.insert(0, "treatment")
528
+ elif "management" in response.lower():
529
+ keywords.insert(0, "management")
530
+
531
+ return keywords[:2] # Return top 2 keywords
532
+
533
+ def _generate_round_summary(self, sources: List[Dict], round_focus: str) -> str:
534
+ """Generate a summary for a research round."""
535
+
536
+ if not sources:
537
+ return f"No relevant sources found for {round_focus}."
538
+
539
+ # Extract key points from sources
540
+ key_points = []
541
+ for source in sources:
542
+ content = source.get("content", "")
543
+ if len(content) > 50:
544
+ # Extract first sentence or key point
545
+ first_sentence = content.split('.')[0]
546
+ if len(first_sentence) > 20:
547
+ key_points.append(first_sentence)
548
+
549
+ summary = f"**{round_focus}** ({len(sources)} sources):\n"
550
+ for i, point in enumerate(key_points[:3], 1):
551
+ summary += f"{i}. {point}\n"
552
+
553
+ return summary
554
+
555
+ def _extract_key_themes(self, rounds: List[Dict]) -> List[str]:
556
+ """Extract key themes from all research rounds."""
557
+
558
+ themes = []
559
+
560
+ for round_data in rounds:
561
+ summary = round_data.get("summary", "")
562
+ if "diagnosis" in summary.lower():
563
+ themes.append("Diagnostic Approach")
564
+ if "treatment" in summary.lower():
565
+ themes.append("Treatment Strategies")
566
+ if "management" in summary.lower():
567
+ themes.append("Clinical Management")
568
+ if "guidelines" in summary.lower():
569
+ themes.append("Evidence-Based Guidelines")
570
+ if "pathophysiology" in summary.lower():
571
+ themes.append("Pathophysiology")
572
+
573
+ # Remove duplicates and return unique themes
574
+ return list(set(themes))
575
+
576
+ def _generate_research_report(self, topic: str, research_results: Dict, clarification_responses: Dict) -> str:
577
+ """Generate a comprehensive research report."""
578
+
579
+ report = f"# Comprehensive Research Report: {topic.title()}\n\n"
580
+
581
+ # Executive summary
582
+ report += "## Executive Summary\n"
583
+ report += f"This report synthesizes findings from {research_results['total_sources']} sources across {len(research_results['rounds'])} research rounds.\n\n"
584
+
585
+ # Key themes
586
+ report += "## Key Themes Identified\n"
587
+ for theme in research_results["key_themes"]:
588
+ report += f"- {theme}\n"
589
+ report += "\n"
590
+
591
+ # Research rounds summary
592
+ report += "## Research Findings by Round\n"
593
+ for round_data in research_results["rounds"]:
594
+ report += f"### Round {round_data['round_number']}: {round_data['focus']}\n"
595
+ report += f"{round_data['summary']}\n\n"
596
+
597
+ # Evidence synthesis
598
+ report += "## Evidence Synthesis\n"
599
+ report += f"Based on the research conducted, the following key points emerge about {topic}:\n\n"
600
+
601
+ # Add synthesized content based on themes
602
+ for theme in research_results["key_themes"]:
603
+ report += f"**{theme}**: [Evidence-based summary for {theme}]\n\n"
604
+
605
+ # Clinical implications
606
+ report += "## Clinical Implications\n"
607
+ report += f"The research findings have the following implications for clinical practice:\n"
608
+ report += "- [Key clinical implication 1]\n"
609
+ report += "- [Key clinical implication 2]\n"
610
+ report += "- [Key clinical implication 3]\n\n"
611
+
612
+ # Recommendations
613
+ report += "## Recommendations\n"
614
+ report += "Based on the evidence review:\n"
615
+ report += "1. [Recommendation 1]\n"
616
+ report += "2. [Recommendation 2]\n"
617
+ report += "3. [Recommendation 3]\n\n"
618
+
619
+ return report
620
+
621
+ def _create_presentation_structure(self, topic: str, target_audience: str, duration: int, research_report: str) -> Dict[str, Any]:
622
+ """Create the presentation structure."""
623
+
624
+ # Calculate approximate slides based on duration
625
+ slides_estimate = max(10, duration // 3) # ~3 minutes per slide
626
+
627
+ structure = {
628
+ "title": f"{topic.title()}: A Comprehensive Review",
629
+ "estimated_slides": slides_estimate,
630
+ "estimated_duration": duration,
631
+ "sections": [
632
+ {
633
+ "section": "Introduction",
634
+ "slides": [
635
+ {"title": "Title Slide", "content": f"{topic.title()}", "duration": 1},
636
+ {"title": "Learning Objectives", "content": "What you will learn today", "duration": 2},
637
+ {"title": "Case Vignette", "content": "Clinical scenario introduction", "duration": 3}
638
+ ]
639
+ },
640
+ {
641
+ "section": "Educational Content",
642
+ "slides": [
643
+ {"title": "Definition & Overview", "content": f"What is {topic}?", "duration": 5},
644
+ {"title": "Pathophysiology", "content": "Understanding the mechanisms", "duration": 7},
645
+ {"title": "Clinical Presentation", "content": "Recognition and diagnosis", "duration": 7},
646
+ {"title": "Diagnostic Approach", "content": "Evidence-based diagnosis", "duration": 8},
647
+ {"title": "Treatment Strategies", "content": "Management options", "duration": 8},
648
+ {"title": "Guidelines & Evidence", "content": "Current recommendations", "duration": 5}
649
+ ]
650
+ },
651
+ {
652
+ "section": "Application",
653
+ "slides": [
654
+ {"title": "Case Application", "content": "Applying knowledge to the vignette", "duration": 5},
655
+ {"title": "Clinical Pearls", "content": "Key takeaways", "duration": 3}
656
+ ]
657
+ },
658
+ {
659
+ "section": "Assessment",
660
+ "slides": [
661
+ {"title": "Rapid Fire Questions", "content": "Quick knowledge check", "duration": 5},
662
+ {"title": "Discussion", "content": "Open discussion and Q&A", "duration": 5}
663
+ ]
664
+ }
665
+ ]
666
+ }
667
+
668
+ return structure
669
+
670
+ def _adjust_structure_based_on_feedback(self, structure: Dict, feedback: str) -> Dict:
671
+ """Adjust presentation structure based on user feedback."""
672
+
673
+ # Simple feedback processing - could be enhanced
674
+ if "more slides" in feedback.lower():
675
+ # Add more detail slides
676
+ for section in structure["sections"]:
677
+ if section["section"] == "Educational Content":
678
+ section["slides"].append({
679
+ "title": "Advanced Topics",
680
+ "content": "Additional detailed information",
681
+ "duration": 5
682
+ })
683
+
684
+ if "shorter" in feedback.lower():
685
+ # Remove some slides
686
+ for section in structure["sections"]:
687
+ if len(section["slides"]) > 2:
688
+ section["slides"] = section["slides"][:2]
689
+
690
+ return structure
691
+
692
+ async def _generate_all_slides(self, topic: str, target_audience: str, research_report: str, structure: Dict) -> List[Dict[str, Any]]:
693
+ """Generate all presentation slides using AI and research content."""
694
+
695
+ slides = []
696
+ slide_number = 1
697
+
698
+ logger.info(f"Starting AI-powered slide generation for {topic}")
699
+
700
+ for section in structure["sections"]:
701
+ for slide_template in section["slides"]:
702
+ try:
703
+ slide = await self._create_ai_slide(
704
+ slide_number,
705
+ slide_template["title"],
706
+ slide_template["content"],
707
+ topic,
708
+ target_audience,
709
+ research_report,
710
+ section["section"]
711
+ )
712
+ slides.append(slide)
713
+ slide_number += 1
714
+ logger.info(f"Generated slide {slide_number-1}: {slide_template['title']}")
715
+
716
+ except Exception as e:
717
+ logger.error(f"Failed to generate slide {slide_number}: {e}")
718
+ # Fallback to basic slide structure
719
+ slide = self._create_fallback_slide(slide_number, slide_template["title"], section["section"])
720
+ slides.append(slide)
721
+ slide_number += 1
722
+
723
+ logger.info(f"Completed slide generation: {len(slides)} slides created")
724
+ return slides
725
+
726
+ async def _create_ai_slide(self, slide_number: int, title: str, content_desc: str, topic: str,
727
+ target_audience: str, research_report: str, section: str) -> Dict[str, Any]:
728
+ """Create an individual slide with AI-generated content based on research."""
729
+
730
+ try:
731
+ # Load the slide generation prompt
732
+ logger.info(f"Generating AI content for slide: {title}")
733
+ prompt = load_prompt('generate_presentation_slide.j2',
734
+ topic=topic,
735
+ target_audience=target_audience.replace('_', ' '),
736
+ slide_title=title,
737
+ section=section,
738
+ content_description=content_desc,
739
+ research_report=research_report[:3000] # Limit research content to avoid token limits
740
+ )
741
+
742
+ # Generate slide content with OpenAI
743
+ response = await asyncio.wait_for(
744
+ call_llm(prompt),
745
+ timeout=30.0
746
+ )
747
+
748
+ # Parse AI response
749
+ if response.strip().startswith('```json'):
750
+ response = response.strip()[7:-3].strip()
751
+ elif response.strip().startswith('```'):
752
+ response = response.strip()[3:-3].strip()
753
+
754
+ slide_content = json.loads(response)
755
+
756
+ # Construct the slide with AI-generated content
757
+ slide = {
758
+ "slide_number": slide_number,
759
+ "title": slide_content.get("slide_title", title),
760
+ "section": section,
761
+ "content": {
762
+ "bullet_points": slide_content.get("main_content", []),
763
+ "sub_bullets": slide_content.get("sub_bullets", {}),
764
+ "clinical_notes": slide_content.get("clinical_notes", ""),
765
+ "references_used": slide_content.get("references_used", ""),
766
+ "generation_method": "AI-powered with research integration"
767
+ }
768
+ }
769
+
770
+ logger.info(f"Successfully generated AI slide: {title} ({len(slide['content']['bullet_points'])} main points)")
771
+ return slide
772
+
773
+ except Exception as e:
774
+ logger.error(f"AI slide generation failed for {title}: {e}")
775
+ # Return fallback slide
776
+ return self._create_fallback_slide(slide_number, title, section)
777
+
778
+ def _create_fallback_slide(self, slide_number: int, title: str, section: str) -> Dict[str, Any]:
779
+ """Create a basic fallback slide if AI generation fails."""
780
+ return {
781
+ "slide_number": slide_number,
782
+ "title": title,
783
+ "section": section,
784
+ "content": {
785
+ "bullet_points": [
786
+ f"Content for {title} slide",
787
+ "Key points to be covered",
788
+ "Clinical applications",
789
+ "Important considerations"
790
+ ],
791
+ "sub_bullets": {},
792
+ "clinical_notes": "Fallback content - consider manual review",
793
+ "generation_method": "Fallback template"
794
+ }
795
+ }
796
+
797
+ def _create_slide(self, slide_number: int, title: str, content_desc: str, topic: str, target_audience: str, research_report: str, section: str) -> Dict[str, Any]:
798
+ """Create an individual slide with detailed, presentation-ready content."""
799
+
800
+ slide = {
801
+ "slide_number": slide_number,
802
+ "title": title,
803
+ "section": section,
804
+ "content": {
805
+ "bullet_points": [],
806
+ "images": [],
807
+ "notes": ""
808
+ }
809
+ }
810
+
811
+ # Generate detailed content based on slide type and topic
812
+ if "Title Slide" in title:
813
+ slide["content"]["bullet_points"] = [
814
+ f"{topic.title()}: A Comprehensive Review",
815
+ f"For {target_audience.replace('_', ' ').title()}",
816
+ f"Date: July 18, 2025"
817
+ ]
818
+ elif "Learning Objectives" in title:
819
+ slide["content"]["bullet_points"] = self._generate_learning_objectives_content(topic)
820
+ elif "Case Vignette" in title:
821
+ slide["content"]["bullet_points"] = self._generate_case_vignette_content(topic)
822
+ elif "Definition" in title or "Overview" in title:
823
+ slide["content"]["bullet_points"] = self._generate_definition_overview_content(topic)
824
+ elif "Pathophysiology" in title:
825
+ slide["content"]["bullet_points"] = self._generate_pathophysiology_content(topic)
826
+ elif "Clinical Presentation" in title:
827
+ slide["content"]["bullet_points"] = self._generate_clinical_presentation_content(topic)
828
+ elif "Diagnostic" in title:
829
+ slide["content"]["bullet_points"] = self._generate_diagnostic_content(topic)
830
+ elif "Treatment" in title:
831
+ slide["content"]["bullet_points"] = self._generate_treatment_content(topic)
832
+ elif "Guidelines" in title:
833
+ slide["content"]["bullet_points"] = self._generate_guidelines_content(topic)
834
+ elif "Case Application" in title:
835
+ slide["content"]["bullet_points"] = self._generate_case_application_content(topic)
836
+ elif "Clinical Pearls" in title:
837
+ slide["content"]["bullet_points"] = self._generate_clinical_pearls_content(topic)
838
+ elif "Rapid Fire" in title:
839
+ slide["content"]["bullet_points"] = self._generate_rapid_fire_content(topic)
840
+ elif "Discussion" in title:
841
+ slide["content"]["bullet_points"] = self._generate_discussion_content(topic)
842
+ else:
843
+ # Fallback for other slide types
844
+ slide["content"]["bullet_points"] = self._generate_generic_content(title, topic)
845
+
846
+ return slide
847
+
848
+ def _generate_learning_objectives_content(self, topic: str) -> List[str]:
849
+ """Generate specific learning objectives based on topic."""
850
+
851
+ if "dimorphic fungi" in topic.lower():
852
+ return [
853
+ "Identify the three major endemic dimorphic fungi in the United States",
854
+ "Describe the unique morphological characteristics of dimorphic fungi",
855
+ "Recognize geographic distribution patterns and epidemiologic risk factors",
856
+ "Differentiate clinical presentations of histoplasmosis, blastomycosis, and coccidioidomycosis",
857
+ "Apply appropriate diagnostic testing strategies and interpret results",
858
+ "Implement evidence-based antifungal treatment protocols per IDSA guidelines"
859
+ ]
860
+ elif "pneumonia" in topic.lower():
861
+ return [
862
+ "Classify pneumonia by etiology and clinical setting (CAP, HAP, VAP)",
863
+ "Recognize clinical presentation and physical examination findings",
864
+ "Select appropriate diagnostic tests and interpret chest imaging",
865
+ "Apply severity scoring systems (CURB-65, PSI) for risk stratification",
866
+ "Implement evidence-based antibiotic therapy based on guidelines",
867
+ "Identify complications and indications for hospitalization"
868
+ ]
869
+ else:
870
+ return [
871
+ f"Define key concepts related to {topic}",
872
+ f"Recognize clinical manifestations of {topic}",
873
+ f"Apply diagnostic approaches for {topic}",
874
+ f"Implement evidence-based treatment strategies",
875
+ f"Integrate current guidelines into clinical practice"
876
+ ]
877
+
878
+ def _generate_case_vignette_content(self, topic: str) -> List[str]:
879
+ """Generate specific case vignette based on topic."""
880
+
881
+ if "dimorphic fungi" in topic.lower():
882
+ return [
883
+ "45-year-old construction worker from Ohio River Valley",
884
+ "Recent spelunking activities in Kentucky caves (6 weeks ago)",
885
+ "3-week history: fever, nonproductive cough, 15-pound weight loss",
886
+ "Physical exam: erythema nodosum, bilateral hilar lymphadenopathy",
887
+ "Labs: lymphopenia, elevated ESR, positive Histoplasma urine antigen",
888
+ "Question: What is the most likely diagnosis and treatment?"
889
+ ]
890
+ elif "pneumonia" in topic.lower():
891
+ return [
892
+ "68-year-old man with COPD and diabetes",
893
+ "Recent cruise ship travel, acute onset (48 hours)",
894
+ "Productive cough with rust-colored sputum, pleuritic chest pain",
895
+ "Physical exam: dullness to percussion, bronchial breath sounds",
896
+ "Labs: elevated WBC with left shift, positive pneumococcal antigen",
897
+ "Question: What is the most appropriate treatment approach?"
898
+ ]
899
+ else:
900
+ return [
901
+ f"Clinical scenario presenting with {topic}",
902
+ "Relevant patient history and risk factors",
903
+ "Physical examination findings",
904
+ "Initial diagnostic workup results",
905
+ "Clinical decision-making challenge"
906
+ ]
907
+
908
+ def _generate_definition_overview_content(self, topic: str) -> List[str]:
909
+ """Generate definition and overview content."""
910
+
911
+ if "dimorphic fungi" in topic.lower():
912
+ return [
913
+ "Dimorphic fungi: organisms that exist in two distinct morphological forms",
914
+ "Yeast form at body temperature (37°C) - pathogenic phase",
915
+ "Mold form at room temperature (25°C) - environmental phase",
916
+ "Three major endemic fungi in US: Histoplasma, Blastomyces, Coccidioides",
917
+ "Cause significant morbidity in immunocompromised and healthy hosts",
918
+ "Geographic distribution correlates with environmental factors"
919
+ ]
920
+ elif "pneumonia" in topic.lower():
921
+ return [
922
+ "Pneumonia: infection of the lung parenchyma and alveolar spaces",
923
+ "Leading cause of infectious disease mortality worldwide",
924
+ "Classification: Community-acquired (CAP), Healthcare-associated (HAP/VAP)",
925
+ "Etiology: bacterial, viral, fungal, or atypical pathogens",
926
+ "Risk factors: age, comorbidities, immunosuppression, aspiration",
927
+ "Clinical spectrum: mild outpatient to severe septic shock"
928
+ ]
929
+ else:
930
+ return [
931
+ f"Definition and key characteristics of {topic}",
932
+ f"Epidemiology and prevalence of {topic}",
933
+ f"Clinical significance in medical practice",
934
+ f"Risk factors and predisposing conditions"
935
+ ]
936
+
937
+ def _generate_pathophysiology_content(self, topic: str) -> List[str]:
938
+ """Generate pathophysiology content."""
939
+
940
+ if "dimorphic fungi" in topic.lower():
941
+ return [
942
+ "Inhalation of microconidia from contaminated soil or bird/bat droppings",
943
+ "Conversion to yeast form in lung alveoli at body temperature",
944
+ "Phagocytosis by alveolar macrophages - intracellular survival",
945
+ "Hematogenous dissemination to reticuloendothelial system",
946
+ "Host immune response: cell-mediated immunity crucial for control",
947
+ "Granulomatous inflammation with potential for reactivation"
948
+ ]
949
+ elif "pneumonia" in topic.lower():
950
+ return [
951
+ "Pathogen invasion of lower respiratory tract via inhalation or aspiration",
952
+ "Overwhelm of normal host defense mechanisms (mucociliary clearance, alveolar macrophages)",
953
+ "Inflammatory response: neutrophil recruitment, cytokine release",
954
+ "Alveolar filling with inflammatory exudate and impaired gas exchange",
955
+ "Systemic inflammatory response syndrome (SIRS) in severe cases",
956
+ "Complications: pleural effusion, empyema, respiratory failure"
957
+ ]
958
+ else:
959
+ return [
960
+ f"Underlying mechanisms of {topic}",
961
+ f"Pathophysiologic pathways involved",
962
+ f"Host response and immune system involvement",
963
+ f"Disease progression and complications"
964
+ ]
965
+
966
+ def _generate_clinical_presentation_content(self, topic: str) -> List[str]:
967
+ """Generate clinical presentation content."""
968
+
969
+ if "dimorphic fungi" in topic.lower():
970
+ return [
971
+ "Histoplasmosis: fever, cough, weight loss, erythema nodosum",
972
+ "Blastomycosis: skin lesions, pulmonary symptoms, bone involvement",
973
+ "Coccidioidomycosis: Valley fever, arthralgias, desert rheumatism",
974
+ "Pulmonary manifestations: nodules, cavitation, hilar lymphadenopathy",
975
+ "Disseminated disease: CNS, skin, bone, adrenal involvement",
976
+ "Chronic forms: progressive pulmonary fibrosis, cavitary disease"
977
+ ]
978
+ elif "pneumonia" in topic.lower():
979
+ return [
980
+ "Classic triad: fever, cough, and dyspnea",
981
+ "Productive cough with purulent sputum (bacterial)",
982
+ "Pleuritic chest pain and decreased breath sounds",
983
+ "Physical signs: dullness to percussion, crackles, bronchial breath sounds",
984
+ "Systemic symptoms: malaise, myalgias, headache",
985
+ "Severe cases: sepsis, altered mental status, respiratory failure"
986
+ ]
987
+ else:
988
+ return [
989
+ f"Common signs and symptoms of {topic}",
990
+ f"Physical examination findings",
991
+ f"Disease spectrum and severity variations",
992
+ f"Complications and warning signs"
993
+ ]
994
+
995
+ def _generate_diagnostic_content(self, topic: str) -> List[str]:
996
+ """Generate diagnostic approach content."""
997
+
998
+ if "dimorphic fungi" in topic.lower():
999
+ return [
1000
+ "Urine antigen testing: rapid, sensitive for Histoplasma",
1001
+ "Serology: complement fixation, EIA antibodies (takes weeks)",
1002
+ "Culture: gold standard but requires 2-6 weeks for growth",
1003
+ "Histopathology: special stains (GMS, PAS) for tissue diagnosis",
1004
+ "Molecular testing: PCR increasingly available",
1005
+ "Imaging: chest CT for pulmonary nodules, lymphadenopathy"
1006
+ ]
1007
+ elif "pneumonia" in topic.lower():
1008
+ return [
1009
+ "Chest X-ray: first-line imaging for consolidation",
1010
+ "Laboratory: CBC with differential, procalcitonin, blood cultures",
1011
+ "Sputum culture: if good quality specimen available",
1012
+ "Urinary antigens: pneumococcal and Legionella",
1013
+ "Severity assessment: CURB-65, PSI scoring systems",
1014
+ "Advanced imaging: chest CT if complicated or atypical"
1015
+ ]
1016
+ else:
1017
+ return [
1018
+ f"Laboratory tests for {topic}",
1019
+ f"Imaging studies and interpretation",
1020
+ f"Differential diagnosis considerations",
1021
+ f"Confirmatory diagnostic procedures"
1022
+ ]
1023
+
1024
+ def _generate_treatment_content(self, topic: str) -> List[str]:
1025
+ """Generate treatment strategies content."""
1026
+
1027
+ if "dimorphic fungi" in topic.lower():
1028
+ return [
1029
+ "Mild-moderate disease: Itraconazole 200 mg BID × 6-12 weeks",
1030
+ "Severe disease: Amphotericin B 0.7-1.0 mg/kg/day × 1-2 weeks",
1031
+ "Step-down therapy: Itraconazole after amphotericin stabilization",
1032
+ "CNS disease: Amphotericin B × 4-6 weeks, then fluconazole",
1033
+ "Duration: 6-12 months for pulmonary, 12-24 months for disseminated",
1034
+ "Monitoring: drug levels, hepatic function, treatment response"
1035
+ ]
1036
+ elif "pneumonia" in topic.lower():
1037
+ return [
1038
+ "Outpatient CAP: Amoxicillin or macrolide monotherapy",
1039
+ "Hospitalized CAP: Beta-lactam + macrolide or fluoroquinolone",
1040
+ "Severe CAP: Broad-spectrum beta-lactam + macrolide",
1041
+ "Duration: 5-7 days for most cases, longer if complications",
1042
+ "Supportive care: oxygen, fluids, bronchodilators if needed",
1043
+ "Prevention: pneumococcal and influenza vaccination"
1044
+ ]
1045
+ else:
1046
+ return [
1047
+ f"First-line treatment options for {topic}",
1048
+ f"Alternative therapies and second-line agents",
1049
+ f"Treatment duration and monitoring parameters",
1050
+ f"Management of complications"
1051
+ ]
1052
+
1053
+ def _generate_guidelines_content(self, topic: str) -> List[str]:
1054
+ """Generate guidelines and evidence content."""
1055
+
1056
+ if "dimorphic fungi" in topic.lower():
1057
+ return [
1058
+ "IDSA 2007 Guidelines for Endemic Mycoses (updated recommendations)",
1059
+ "Treatment recommendations based on disease severity and location",
1060
+ "Antifungal drug selection considers penetration and efficacy",
1061
+ "Monitoring guidelines for drug toxicity and therapeutic response",
1062
+ "Prevention strategies for high-risk populations",
1063
+ "Quality indicators for optimal clinical outcomes"
1064
+ ]
1065
+ elif "pneumonia" in topic.lower():
1066
+ return [
1067
+ "IDSA/ATS 2019 Guidelines for Community-Acquired Pneumonia",
1068
+ "Antimicrobial selection based on severity and risk factors",
1069
+ "Biomarker-guided therapy duration (procalcitonin)",
1070
+ "Quality measures: appropriate antibiotic selection and timing",
1071
+ "Prevention: vaccination recommendations and smoking cessation",
1072
+ "Stewardship: narrow-spectrum therapy when possible"
1073
+ ]
1074
+ else:
1075
+ return [
1076
+ f"Current clinical practice guidelines for {topic}",
1077
+ f"Evidence-based recommendations and quality indicators",
1078
+ f"Emerging research and future directions",
1079
+ f"Implementation strategies in clinical practice"
1080
+ ]
1081
+
1082
+ def _generate_case_application_content(self, topic: str) -> List[str]:
1083
+ """Generate case application content."""
1084
+
1085
+ if "dimorphic fungi" in topic.lower():
1086
+ return [
1087
+ "Case diagnosis: Acute pulmonary histoplasmosis",
1088
+ "Rationale: Geographic exposure + clinical presentation + positive urine antigen",
1089
+ "Treatment plan: Itraconazole 200 mg BID × 6-12 weeks",
1090
+ "Monitoring: Clinical response, itraconazole levels, hepatic function",
1091
+ "Patient education: Prognosis, medication adherence, follow-up",
1092
+ "Prevention: Avoid high-risk activities in endemic areas"
1093
+ ]
1094
+ elif "pneumonia" in topic.lower():
1095
+ return [
1096
+ "Case diagnosis: Community-acquired pneumonia, moderate severity",
1097
+ "CURB-65 score: 2 points (age > 65, confusion absent)",
1098
+ "Treatment: Ceftriaxone 2g IV daily + azithromycin 500mg IV daily",
1099
+ "Expected response: Clinical improvement within 48-72 hours",
1100
+ "Discharge criteria: Stable vital signs, tolerating oral therapy",
1101
+ "Follow-up: Chest X-ray in 6 weeks if high-risk patient"
1102
+ ]
1103
+ else:
1104
+ return [
1105
+ f"Application of diagnostic criteria for {topic}",
1106
+ f"Treatment decision-making based on evidence",
1107
+ f"Monitoring response and adjusting therapy",
1108
+ f"Patient education and follow-up planning"
1109
+ ]
1110
+
1111
+ def _generate_clinical_pearls_content(self, topic: str) -> List[str]:
1112
+ """Generate clinical pearls content."""
1113
+
1114
+ if "dimorphic fungi" in topic.lower():
1115
+ return [
1116
+ "Geographic history is crucial - ask about travel to endemic areas",
1117
+ "Urine antigen testing provides rapid diagnosis for Histoplasma",
1118
+ "Lymphopenia is characteristic of histoplasmosis vs. bacterial infections",
1119
+ "Erythema nodosum suggests acute infection with good prognosis",
1120
+ "Itraconazole levels should be checked after 2 weeks of therapy",
1121
+ "Immunocompromised patients require longer, more intensive treatment"
1122
+ ]
1123
+ elif "pneumonia" in topic.lower():
1124
+ return [
1125
+ "Procalcitonin > 0.5 ng/mL suggests bacterial etiology",
1126
+ "Positive urinary antigens guide targeted antibiotic therapy",
1127
+ "CURB-65 score helps determine site of care (outpatient vs. hospital)",
1128
+ "Atypical pathogens require macrolide or fluoroquinolone coverage",
1129
+ "Clinical response expected within 48-72 hours of appropriate therapy",
1130
+ "Chest X-ray may lag behind clinical improvement by several days"
1131
+ ]
1132
+ else:
1133
+ return [
1134
+ f"Key clinical insights for {topic}",
1135
+ f"Common pitfalls to avoid in diagnosis",
1136
+ f"Practical tips for optimal patient management",
1137
+ f"Important prognostic factors to consider"
1138
+ ]
1139
+
1140
+ def _generate_rapid_fire_content(self, topic: str) -> List[str]:
1141
+ """Generate rapid fire questions content."""
1142
+
1143
+ if "dimorphic fungi" in topic.lower():
1144
+ return [
1145
+ "Q: Which dimorphic fungus is associated with spelunking? A: Histoplasma",
1146
+ "Q: What is the most sensitive test for histoplasmosis? A: Urine antigen",
1147
+ "Q: Which form is pathogenic at body temperature? A: Yeast form",
1148
+ "Q: What skin finding suggests acute coccidioidomycosis? A: Erythema nodosum",
1149
+ "Q: First-line treatment for mild histoplasmosis? A: Itraconazole",
1150
+ "Q: How long should treatment continue? A: 6-12 weeks for pulmonary disease"
1151
+ ]
1152
+ elif "pneumonia" in topic.lower():
1153
+ return [
1154
+ "Q: What is the most common cause of CAP? A: Streptococcus pneumoniae",
1155
+ "Q: Which score predicts 30-day mortality? A: CURB-65 or PSI",
1156
+ "Q: When should blood cultures be obtained? A: Before antibiotics in hospitalized patients",
1157
+ "Q: First-line outpatient treatment for CAP? A: Amoxicillin or macrolide",
1158
+ "Q: What biomarker helps guide antibiotic duration? A: Procalcitonin",
1159
+ "Q: How soon should clinical improvement occur? A: Within 48-72 hours"
1160
+ ]
1161
+ else:
1162
+ return [
1163
+ f"Quick review questions about {topic}",
1164
+ f"Key facts and figures to remember",
1165
+ f"High-yield testing points",
1166
+ f"Clinical scenarios for practice"
1167
+ ]
1168
+
1169
+ def _generate_discussion_content(self, topic: str) -> List[str]:
1170
+ """Generate discussion content."""
1171
+
1172
+ return [
1173
+ "Questions and answers session",
1174
+ "Case-based discussion and clinical experiences",
1175
+ "Challenging scenarios and problem-solving",
1176
+ "Summary of key learning points",
1177
+ "Resources for further learning",
1178
+ "Contact information for follow-up questions"
1179
+ ]
1180
+
1181
+ def _generate_generic_content(self, title: str, topic: str) -> List[str]:
1182
+ """Generate generic content for unspecified slide types."""
1183
+
1184
+ return [
1185
+ f"Key concepts related to {title.lower()} in {topic}",
1186
+ f"Clinical significance and practical applications",
1187
+ f"Evidence-based approaches and recommendations",
1188
+ f"Integration with current clinical practice"
1189
+ ]
1190
+
1191
+ def _generate_speaker_notes(self, slides: List[Dict], research_report: str) -> Dict[str, str]:
1192
+ """Generate detailed speaker notes for each slide."""
1193
+
1194
+ speaker_notes = {}
1195
+
1196
+ for slide in slides:
1197
+ slide_number = slide["slide_number"]
1198
+ title = slide["title"]
1199
+
1200
+ # Generate specific speaker notes based on slide content
1201
+ if "Title Slide" in title:
1202
+ notes = f"**Speaker Notes for Slide {slide_number}: {title}**\n\n"
1203
+ notes += "Welcome the audience and introduce the topic.\n"
1204
+ notes += "Mention the importance of understanding dimorphic fungi in clinical practice.\n"
1205
+ notes += "Preview the learning objectives and interactive elements.\n"
1206
+ notes += "Encourage questions throughout the presentation.\n"
1207
+ elif "Learning Objectives" in title:
1208
+ notes = f"**Speaker Notes for Slide {slide_number}: {title}**\n\n"
1209
+ notes += "Review each learning objective with the audience.\n"
1210
+ notes += "Explain how these objectives relate to clinical practice.\n"
1211
+ notes += "Ask: 'What is your current experience with diagnosing fungal infections?'\n"
1212
+ notes += "Set expectations for active participation.\n"
1213
+ elif "Case Vignette" in title:
1214
+ notes = f"**Speaker Notes for Slide {slide_number}: {title}**\n\n"
1215
+ notes += "Present the case systematically, pausing for audience input.\n"
1216
+ notes += "Ask: 'What additional history would you want to obtain?'\n"
1217
+ notes += "Highlight key clinical clues that point to the diagnosis.\n"
1218
+ notes += "Build suspense - we'll return to this case later.\n"
1219
+ elif "Definition" in title or "Overview" in title:
1220
+ notes = f"**Speaker Notes for Slide {slide_number}: {title}**\n\n"
1221
+ notes += "Explain the unique characteristics of dimorphic fungi.\n"
1222
+ notes += "Use the temperature-dependent morphology as a key teaching point.\n"
1223
+ notes += "Emphasize the geographic distribution and clinical significance.\n"
1224
+ notes += "Ask: 'Which endemic areas are you familiar with?'\n"
1225
+ elif "Pathophysiology" in title:
1226
+ notes = f"**Speaker Notes for Slide {slide_number}: {title}**\n\n"
1227
+ notes += "Walk through the infection process step by step.\n"
1228
+ notes += "Emphasize the importance of cell-mediated immunity.\n"
1229
+ notes += "Explain why immunocompromised patients are at higher risk.\n"
1230
+ notes += "Connect pathophysiology to clinical presentation.\n"
1231
+ elif "Clinical Presentation" in title:
1232
+ notes = f"**Speaker Notes for Slide {slide_number}: {title}**\n\n"
1233
+ notes += "Describe the spectrum of disease for each fungus.\n"
1234
+ notes += "Highlight distinguishing features between organisms.\n"
1235
+ notes += "Use clinical images if available to illustrate skin findings.\n"
1236
+ notes += "Ask: 'What clinical clues help differentiate these infections?'\n"
1237
+ elif "Diagnostic" in title:
1238
+ notes = f"**Speaker Notes for Slide {slide_number}: {title}**\n\n"
1239
+ notes += "Discuss the pros and cons of each diagnostic method.\n"
1240
+ notes += "Emphasize the rapid turnaround time of urine antigen testing.\n"
1241
+ notes += "Explain when to use each test based on clinical scenario.\n"
1242
+ notes += "Address common pitfalls in diagnosis.\n"
1243
+ elif "Treatment" in title:
1244
+ notes = f"**Speaker Notes for Slide {slide_number}: {title}**\n\n"
1245
+ notes += "Review IDSA guidelines for treatment recommendations.\n"
1246
+ notes += "Explain rationale for drug selection and duration.\n"
1247
+ notes += "Discuss monitoring parameters and side effects.\n"
1248
+ notes += "Address when to consult infectious disease specialists.\n"
1249
+ elif "Guidelines" in title:
1250
+ notes = f"**Speaker Notes for Slide {slide_number}: {title}**\n\n"
1251
+ notes += "Highlight key recommendations from IDSA guidelines.\n"
1252
+ notes += "Discuss recent updates and changes in recommendations.\n"
1253
+ notes += "Emphasize evidence-based approach to treatment.\n"
1254
+ notes += "Provide resources for accessing current guidelines.\n"
1255
+ elif "Case Application" in title:
1256
+ notes = f"**Speaker Notes for Slide {slide_number}: {title}**\n\n"
1257
+ notes += "Return to the opening case vignette.\n"
1258
+ notes += "Walk through the diagnostic reasoning process.\n"
1259
+ notes += "Explain treatment selection and monitoring plan.\n"
1260
+ notes += "Ask: 'What would you do differently in this case?'\n"
1261
+ elif "Clinical Pearls" in title:
1262
+ notes = f"**Speaker Notes for Slide {slide_number}: {title}**\n\n"
1263
+ notes += "Emphasize practical tips for clinical practice.\n"
1264
+ notes += "Share memorable mnemonics or decision aids.\n"
1265
+ notes += "Highlight common mistakes to avoid.\n"
1266
+ notes += "Encourage audience to share their own pearls.\n"
1267
+ elif "Rapid Fire" in title:
1268
+ notes = f"**Speaker Notes for Slide {slide_number}: {title}**\n\n"
1269
+ notes += "Engage the audience with quick questions.\n"
1270
+ notes += "Encourage rapid responses to build confidence.\n"
1271
+ notes += "Provide immediate feedback and explanations.\n"
1272
+ notes += "Use this as a knowledge check before concluding.\n"
1273
+ elif "Discussion" in title:
1274
+ notes = f"**Speaker Notes for Slide {slide_number}: {title}**\n\n"
1275
+ notes += "Open the floor for questions and discussion.\n"
1276
+ notes += "Encourage sharing of clinical experiences.\n"
1277
+ notes += "Address any remaining questions or concerns.\n"
1278
+ notes += "Provide contact information and additional resources.\n"
1279
+ notes += "Thank the audience for their participation.\n"
1280
+ else:
1281
+ notes = f"**Speaker Notes for Slide {slide_number}: {title}**\n\n"
1282
+ notes += f"Key talking points for {title}.\n"
1283
+ notes += "Connect to research findings and clinical evidence.\n"
1284
+ notes += "Engage audience with relevant questions.\n"
1285
+ notes += "Ensure smooth transition to next slide.\n"
1286
+
1287
+ speaker_notes[str(slide_number)] = notes
1288
+
1289
+ return speaker_notes
1290
+
1291
+ def _generate_presentation_metadata(self, topic: str, target_audience: str, duration: int, total_slides: int) -> Dict[str, Any]:
1292
+ """Generate presentation metadata."""
1293
+
1294
+ metadata = {
1295
+ "topic": topic,
1296
+ "target_audience": target_audience,
1297
+ "duration_minutes": duration,
1298
+ "total_slides": total_slides,
1299
+ "created_date": "2025-07-18",
1300
+ "presentation_type": "Educational",
1301
+ "format": "PowerPoint/Slides",
1302
+ "estimated_time_per_slide": duration / total_slides if total_slides > 0 else 3,
1303
+ "learning_level": "Intermediate",
1304
+ "prerequisites": f"Basic knowledge of {topic}",
1305
+ "materials_needed": "Projector, handouts (optional)"
1306
+ }
1307
+
1308
+ return metadata
tools/explain_in_layman_language.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ explain_in_layman_language.py
3
+ -----------------------------
4
+
5
+ Tool for explaining medical assessment and plans in patient-friendly language with educational resources.
6
+
7
+ This tool takes complex medical terminology from assessments and plans and translates them into
8
+ easy-to-understand language for patients. It also searches for reliable educational resources
9
+ to help patients better understand their conditions and treatments.
10
+ """
11
+
12
+ import asyncio
13
+ from typing import Dict, List, Union, Any
14
+ from tools.base import Tool
15
+ from tools.utils import ToolExecutionError, logger
16
+ from tools.internet_search import InternetSearchTool
17
+
18
+
19
+ class ExplainInLaymanLanguageTool(Tool):
20
+ """
21
+ Tool for explaining medical assessments and plans in patient-friendly language.
22
+
23
+ This tool:
24
+ 1. Takes medical assessment and plan text
25
+ 2. Translates complex medical terminology into layman terms
26
+ 3. Searches for 2-3 reliable educational links on the topics
27
+ 4. Provides a comprehensive patient-friendly explanation
28
+ """
29
+
30
+ def __init__(self) -> None:
31
+ """Initialize the ExplainInLaymanLanguageTool."""
32
+ super().__init__()
33
+ self.name = "explain_in_layman_language"
34
+ self.description = "Explain medical assessment and plan in patient-friendly language with educational resources"
35
+ self.args_schema = {
36
+ "type": "object",
37
+ "properties": {
38
+ "assessment_and_plan": {
39
+ "type": "string",
40
+ "description": "The medical assessment and plan text to explain in layman terms"
41
+ },
42
+ "patient_context": {
43
+ "type": "string",
44
+ "description": "Additional context about the patient (age, relevant history, etc.)",
45
+ "default": ""
46
+ }
47
+ },
48
+ "required": ["assessment_and_plan"]
49
+ }
50
+
51
+ # Initialize internet search tool for finding educational resources
52
+ self.internet_search = InternetSearchTool()
53
+
54
+ def openai_spec(self, legacy=False):
55
+ """Return OpenAI function specification for this tool."""
56
+ return {
57
+ "name": self.name,
58
+ "description": self.description,
59
+ "parameters": self.args_schema
60
+ }
61
+
62
+ async def run(
63
+ self,
64
+ assessment_and_plan: str,
65
+ patient_context: str = ""
66
+ ) -> Dict[str, Any]:
67
+ """
68
+ Explain medical assessment and plan in layman language with educational resources.
69
+
70
+ Args:
71
+ assessment_and_plan (str): The medical assessment and plan to explain
72
+ patient_context (str): Additional patient context
73
+
74
+ Returns:
75
+ Dict[str, Any]: Patient-friendly explanation with educational resources
76
+ """
77
+ try:
78
+ logger.info(f"Explaining medical assessment in layman language")
79
+
80
+ # Step 1: Extract key medical topics from the assessment and plan
81
+ key_topics = self._extract_medical_topics(assessment_and_plan)
82
+
83
+ # Step 2: Create patient-friendly explanation
84
+ layman_explanation = self._create_layman_explanation(
85
+ assessment_and_plan,
86
+ patient_context,
87
+ key_topics
88
+ )
89
+
90
+ # Step 3: Search for educational resources
91
+ educational_links = await self._find_educational_resources(key_topics)
92
+
93
+ # Step 4: Combine everything into a comprehensive response
94
+ result = {
95
+ "layman_explanation": layman_explanation,
96
+ "educational_resources": educational_links,
97
+ "key_topics_covered": key_topics,
98
+ "patient_context": patient_context
99
+ }
100
+
101
+ logger.info(f"Successfully created layman explanation with {len(educational_links)} educational resources")
102
+ return result
103
+
104
+ except Exception as e:
105
+ logger.error(f"ExplainInLaymanLanguageTool failed: {e}", exc_info=True)
106
+ raise ToolExecutionError(f"Failed to explain assessment in layman language: {e}")
107
+
108
+ def _extract_medical_topics(self, assessment_and_plan: str) -> List[str]:
109
+ """Extract key medical topics from the assessment and plan."""
110
+ # Common medical terms and their categories
111
+ medical_terms = {
112
+ # Infections
113
+ "pneumonia": "lung infection",
114
+ "sepsis": "blood infection",
115
+ "cellulitis": "skin infection",
116
+ "uti": "urinary tract infection",
117
+ "urinary tract infection": "urinary tract infection",
118
+ "meningitis": "brain infection",
119
+ "endocarditis": "heart infection",
120
+
121
+ # Antibiotics
122
+ "antibiotic": "antibiotic treatment",
123
+ "vancomycin": "antibiotic treatment",
124
+ "ceftriaxone": "antibiotic treatment",
125
+ "azithromycin": "antibiotic treatment",
126
+ "levofloxacin": "antibiotic treatment",
127
+ "piperacillin": "antibiotic treatment",
128
+ "meropenem": "antibiotic treatment",
129
+
130
+ # Conditions
131
+ "diabetes": "diabetes",
132
+ "hypertension": "high blood pressure",
133
+ "copd": "chronic lung disease",
134
+ "asthma": "asthma",
135
+ "heart failure": "heart failure",
136
+ "kidney disease": "kidney disease",
137
+ "liver disease": "liver disease",
138
+
139
+ # Procedures
140
+ "blood culture": "blood tests",
141
+ "chest x-ray": "chest imaging",
142
+ "ct scan": "CT scan",
143
+ "mri": "MRI scan",
144
+ "lumbar puncture": "spinal fluid test",
145
+ "biopsy": "tissue sample",
146
+
147
+ # Symptoms
148
+ "fever": "fever",
149
+ "shortness of breath": "breathing difficulty",
150
+ "chest pain": "chest pain",
151
+ "abdominal pain": "stomach pain",
152
+ "nausea": "nausea",
153
+ "vomiting": "vomiting",
154
+ "diarrhea": "diarrhea"
155
+ }
156
+
157
+ topics = []
158
+ assessment_lower = assessment_and_plan.lower()
159
+
160
+ for term, category in medical_terms.items():
161
+ if term in assessment_lower:
162
+ if category not in topics:
163
+ topics.append(category)
164
+
165
+ # If no specific topics found, extract general categories
166
+ if not topics:
167
+ if "infection" in assessment_lower:
168
+ topics.append("infection")
169
+ if "antibiotic" in assessment_lower:
170
+ topics.append("antibiotic treatment")
171
+ if "treatment" in assessment_lower:
172
+ topics.append("treatment")
173
+
174
+ return topics[:5] # Limit to top 5 topics
175
+
176
+ def _create_layman_explanation(
177
+ self,
178
+ assessment_and_plan: str,
179
+ patient_context: str,
180
+ key_topics: List[str]
181
+ ) -> str:
182
+ """Create a patient-friendly explanation of the assessment and plan."""
183
+
184
+ # Medical terminology translations
185
+ translations = {
186
+ # Infections
187
+ "pneumonia": "a lung infection",
188
+ "sepsis": "a serious blood infection",
189
+ "bacteremia": "bacteria in the blood",
190
+ "cellulitis": "a skin and soft tissue infection",
191
+ "uti": "a urinary tract infection (bladder infection)",
192
+ "urinary tract infection": "a bladder infection",
193
+ "pyelonephritis": "a kidney infection",
194
+ "meningitis": "an infection of the brain and spinal cord lining",
195
+ "endocarditis": "an infection of the heart valves",
196
+
197
+ # Antibiotics
198
+ "vancomycin": "a strong antibiotic medication",
199
+ "ceftriaxone": "an antibiotic medication",
200
+ "azithromycin": "an antibiotic medication (Z-pack)",
201
+ "levofloxacin": "an antibiotic medication",
202
+ "piperacillin-tazobactam": "a combination antibiotic medication",
203
+ "meropenem": "a powerful antibiotic medication",
204
+ "clindamycin": "an antibiotic medication",
205
+
206
+ # Medical procedures
207
+ "blood culture": "blood tests to check for bacteria",
208
+ "chest x-ray": "a picture of your lungs",
209
+ "ct scan": "a detailed scan using X-rays",
210
+ "mri": "a detailed scan using magnets",
211
+ "lumbar puncture": "a procedure to test spinal fluid",
212
+ "biopsy": "taking a small sample of tissue for testing",
213
+
214
+ # Conditions
215
+ "copd": "chronic obstructive pulmonary disease (long-term lung condition)",
216
+ "chf": "congestive heart failure (heart not pumping well)",
217
+ "dm": "diabetes mellitus (diabetes)",
218
+ "htn": "hypertension (high blood pressure)",
219
+ "ckd": "chronic kidney disease (long-term kidney problems)",
220
+
221
+ # Symptoms
222
+ "dyspnea": "shortness of breath",
223
+ "tachycardia": "fast heart rate",
224
+ "hypotension": "low blood pressure",
225
+ "hypertension": "high blood pressure",
226
+ "pyrexia": "fever",
227
+ "malaise": "feeling unwell",
228
+ "lethargy": "extreme tiredness"
229
+ }
230
+
231
+ # Start with a friendly introduction
232
+ explanation = "Here's what your medical team has found and what the plan is, explained in simple terms:\n\n"
233
+
234
+ # Add patient context if provided
235
+ if patient_context:
236
+ explanation += f"**About You:** {patient_context}\n\n"
237
+
238
+ # Process the assessment and plan
239
+ explanation += "**What's Happening:**\n"
240
+
241
+ # Simple word replacement for common medical terms
242
+ simplified_text = assessment_and_plan.lower()
243
+ for medical_term, layman_term in translations.items():
244
+ simplified_text = simplified_text.replace(medical_term, layman_term)
245
+
246
+ # Add the simplified explanation
247
+ explanation += simplified_text.capitalize() + "\n\n"
248
+
249
+ # Add key points section
250
+ if key_topics:
251
+ explanation += "**Key Points to Remember:**\n"
252
+ for topic in key_topics:
253
+ explanation += f"• {topic.capitalize()}\n"
254
+ explanation += "\n"
255
+
256
+ # Add encouragement and next steps
257
+ explanation += "**What This Means for You:**\n"
258
+ explanation += "Your medical team is working to give you the best care possible. "
259
+ explanation += "It's important to follow the treatment plan and ask questions if anything is unclear. "
260
+ explanation += "Your health and understanding are our top priorities.\n\n"
261
+
262
+ return explanation
263
+
264
+ async def _find_educational_resources(self, key_topics: List[str]) -> List[Dict[str, str]]:
265
+ """Find 2-3 reliable educational resources about the key topics."""
266
+ educational_links = []
267
+
268
+ # Trusted medical education websites
269
+ trusted_sites = [
270
+ "mayoclinic.org",
271
+ "webmd.com",
272
+ "healthline.com",
273
+ "medlineplus.gov",
274
+ "cdc.gov",
275
+ "nih.gov",
276
+ "patient.info"
277
+ ]
278
+
279
+ try:
280
+ # Search for educational resources for each topic
281
+ for topic in key_topics[:3]: # Limit to 3 topics
282
+ search_queries = [
283
+ f"{topic} patient education site:mayoclinic.org",
284
+ f"{topic} patient information site:medlineplus.gov",
285
+ f"what is {topic} patient guide site:healthline.com"
286
+ ]
287
+
288
+ for query in search_queries:
289
+ try:
290
+ search_results = await self.internet_search.run(q=query, max_results=3)
291
+
292
+ # Parse the search results string to extract links
293
+ if search_results and isinstance(search_results, str):
294
+ # Extract links from the formatted string response
295
+ import re
296
+ # Pattern to match [Read more](url) links
297
+ link_pattern = r'\[Read more\]\(([^)]+)\)'
298
+ title_pattern = r'\*\*([^*]+)\*\*'
299
+
300
+ links = re.findall(link_pattern, search_results)
301
+ titles = re.findall(title_pattern, search_results)
302
+
303
+ # Match titles with links
304
+ for i, (title, link) in enumerate(zip(titles, links)):
305
+ if any(site in link.lower() for site in trusted_sites):
306
+ educational_links.append({
307
+ "title": title,
308
+ "url": link,
309
+ "topic": topic,
310
+ "description": f"Learn more about {topic}"
311
+ })
312
+ break # Found a good link for this topic
313
+
314
+ if len(educational_links) >= 3:
315
+ break
316
+
317
+ except Exception as e:
318
+ logger.warning(f"Failed to search for {topic}: {e}")
319
+ continue
320
+
321
+ if len(educational_links) >= 3:
322
+ break
323
+
324
+ # If we don't have enough links, add some general reliable resources
325
+ if len(educational_links) < 2:
326
+ general_resources = [
327
+ {
328
+ "title": "MedlinePlus Health Information",
329
+ "url": "https://medlineplus.gov/",
330
+ "topic": "general health",
331
+ "description": "Reliable health information from the National Library of Medicine"
332
+ },
333
+ {
334
+ "title": "Mayo Clinic Patient Education",
335
+ "url": "https://www.mayoclinic.org/patient-visitor-guide/patient-education",
336
+ "topic": "general health",
337
+ "description": "Comprehensive patient education resources from Mayo Clinic"
338
+ },
339
+ {
340
+ "title": "CDC Health Information",
341
+ "url": "https://www.cdc.gov/healthypeople/",
342
+ "topic": "general health",
343
+ "description": "Health information and resources from the CDC"
344
+ }
345
+ ]
346
+
347
+ # Add general resources to reach at least 2-3 links
348
+ for resource in general_resources:
349
+ if len(educational_links) < 3:
350
+ educational_links.append(resource)
351
+
352
+ except Exception as e:
353
+ logger.warning(f"Failed to find educational resources: {e}")
354
+ # Provide fallback resources
355
+ educational_links = [
356
+ {
357
+ "title": "MedlinePlus - Easy-to-Read Health Information",
358
+ "url": "https://medlineplus.gov/",
359
+ "topic": "general health",
360
+ "description": "Trusted health information from the National Library of Medicine"
361
+ },
362
+ {
363
+ "title": "Mayo Clinic - Patient Education",
364
+ "url": "https://www.mayoclinic.org/diseases-conditions",
365
+ "topic": "general health",
366
+ "description": "Comprehensive information about diseases and conditions"
367
+ }
368
+ ]
369
+
370
+ return educational_links[:3] # Return maximum 3 links
tools/fhir_patient.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tools.base import Tool
2
+
3
+
4
+ from tools.utils import ToolExecutionError, logger
5
+ from typing import Any, Dict
6
+
7
+ class FHIRPatientTool(Tool):
8
+
9
+ def openai_spec(self, legacy=False):
10
+ return {
11
+ "name": self.name,
12
+ "description": self.description,
13
+ "parameters": self.args_schema
14
+ }
15
+ """
16
+ Tool to fetch synthetic patient labs and vitals from a FHIR server by patient ID.
17
+
18
+ This tool queries a FHIR API for a given patient ID and returns labs and vitals (placeholder implementation).
19
+ """
20
+ def __init__(self) -> None:
21
+ """
22
+ Initialize the FHIRPatientTool with its name, description, and argument schema.
23
+ """
24
+ super().__init__()
25
+ self.name = "synthetic_patient_lookup"
26
+ self.description = "Fetch synthetic patient labs / vitals from FHIR by patient_id."
27
+ self.args_schema = {
28
+ "type": "object",
29
+ "properties": {
30
+ "patient_id": {"type": "string", "description": "Patient ID to query"}
31
+ },
32
+ "required": ["patient_id"]
33
+ }
34
+
35
+ async def run(self, patient_id: str) -> Dict[str, Any]:
36
+ """
37
+ Fetch synthetic patient labs and vitals for a given patient ID.
38
+
39
+ Args:
40
+ patient_id (str): The patient ID to query.
41
+
42
+ Returns:
43
+ Dict[str, Any]: The patient data (placeholder).
44
+ """
45
+ try:
46
+ # Placeholder for actual FHIR API call
47
+ return {"patient_id": patient_id, "labs": [], "vitals": []}
48
+ except Exception as e:
49
+ logger.error(f"FHIRPatientTool failed: {e}", exc_info=True)
50
+ raise ToolExecutionError(f"FHIRPatientTool failed: {e}")
tools/format_references.py ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Format References Tool
3
+ Smart reference formatting for any journal's requirements
4
+ """
5
+
6
+ import json
7
+ import re
8
+ import requests
9
+ import os
10
+ from tools.base import Tool
11
+
12
+
13
+ class FormatReferencesTool(Tool):
14
+ """
15
+ Smart reference formatting tool that can format references according to any journal's specific requirements.
16
+ Asks for the target journal, looks up formatting instructions, and applies them to user-provided references.
17
+ """
18
+
19
+ def __init__(self):
20
+ super().__init__()
21
+
22
+ def execute(self, references_text, target_journal=None, max_length=2000):
23
+ """
24
+ Format references according to target journal requirements.
25
+
26
+ Args:
27
+ references_text (str): Raw references to format
28
+ target_journal (str, optional): Target journal name
29
+ max_length (int): Maximum response length
30
+
31
+ Returns:
32
+ dict: Formatted references and formatting guidelines
33
+ """
34
+ try:
35
+ # Step 1: If no journal specified, ask for it
36
+ if not target_journal or not target_journal.strip():
37
+ return {
38
+ "status": "journal_required",
39
+ "message": "Please specify the target journal for reference formatting. For example: 'Clinical Infectious Diseases', 'The Lancet', 'Nature Medicine', etc.",
40
+ "formatted_references": "",
41
+ "formatting_guidelines": ""
42
+ }
43
+
44
+ # Step 2: Clean and validate input references
45
+ if not references_text or not references_text.strip():
46
+ return {
47
+ "status": "references_required",
48
+ "message": "Please provide the references you'd like to format.",
49
+ "formatted_references": "",
50
+ "formatting_guidelines": ""
51
+ }
52
+
53
+ # Step 3: Search for journal-specific formatting guidelines
54
+ formatting_guidelines = self._get_journal_formatting_guidelines(target_journal)
55
+
56
+ # Step 4: Parse input references
57
+ parsed_references = self._parse_references(references_text)
58
+
59
+ # Step 5: Apply journal-specific formatting
60
+ formatted_references = self._apply_journal_formatting(
61
+ parsed_references,
62
+ target_journal,
63
+ formatting_guidelines
64
+ )
65
+
66
+ # Step 6: Prepare response
67
+ response = {
68
+ "status": "success",
69
+ "journal": target_journal,
70
+ "formatted_references": formatted_references,
71
+ "formatting_guidelines": formatting_guidelines,
72
+ "reference_count": len(parsed_references),
73
+ "message": f"Successfully formatted {len(parsed_references)} references for {target_journal}"
74
+ }
75
+
76
+ # Trim response if too long
77
+ response_str = json.dumps(response)
78
+ if len(response_str) > max_length:
79
+ # Truncate formatted references if needed
80
+ available_space = max_length - len(json.dumps({**response, "formatted_references": ""}))
81
+ if available_space > 100:
82
+ response["formatted_references"] = response["formatted_references"][:available_space-50] + "...[truncated]"
83
+
84
+ return response
85
+
86
+ except Exception as e:
87
+ return {
88
+ "status": "error",
89
+ "message": f"Error formatting references: {str(e)}",
90
+ "formatted_references": "",
91
+ "formatting_guidelines": ""
92
+ }
93
+
94
+ def _get_journal_formatting_guidelines(self, journal_name):
95
+ """Search for journal-specific reference formatting guidelines"""
96
+ try:
97
+ # Search for journal reference formatting guidelines
98
+ search_queries = [
99
+ f"{journal_name} reference format guidelines",
100
+ f"{journal_name} citation style requirements",
101
+ f"{journal_name} author guidelines references",
102
+ f"how to format references for {journal_name}"
103
+ ]
104
+
105
+ guidelines = ""
106
+ for query in search_queries:
107
+ try:
108
+ # Use a simple synchronous approach with requests for now
109
+ import requests
110
+ import os
111
+
112
+ print(f"Searching for: {query}") # Debug
113
+
114
+ # Use Serper API directly
115
+ api_key = os.getenv("SERPER_API_KEY")
116
+ if api_key:
117
+ payload = {"q": query, "num": 3}
118
+ headers = {"X-API-KEY": api_key, "Content-Type": "application/json"}
119
+ resp = requests.post("https://google.serper.dev/search",
120
+ json=payload, headers=headers, timeout=5)
121
+ print(f"Search response status: {resp.status_code}") # Debug
122
+
123
+ if resp.status_code == 200:
124
+ results = resp.json().get("organic", [])
125
+ print(f"Found {len(results)} results") # Debug
126
+
127
+ for result in results:
128
+ snippet = result.get("snippet", "")
129
+ title = result.get("title", "")
130
+ content = f"{title} {snippet}"
131
+ if self._contains_formatting_info(content, journal_name):
132
+ guidelines += content + "\n\n"
133
+ print(f"Found relevant guidelines") # Debug
134
+ break
135
+ if guidelines:
136
+ break
137
+ except Exception:
138
+ continue
139
+
140
+ if not guidelines:
141
+ guidelines = self._get_fallback_guidelines(journal_name)
142
+
143
+ return guidelines[:1500] # Limit length
144
+
145
+ except Exception as e:
146
+ return f"Error retrieving guidelines: {str(e)}"
147
+
148
+ def _contains_formatting_info(self, content, journal_name):
149
+ """Check if content contains relevant formatting information"""
150
+ keywords = [
151
+ "reference", "citation", "format", "style", "bibliography",
152
+ "author", "title", "journal", "volume", "page", "doi",
153
+ "vancouver", "ama", "chicago", "harvard", "numbered"
154
+ ]
155
+
156
+ content_lower = content.lower()
157
+ journal_lower = journal_name.lower()
158
+
159
+ # Must contain journal name and at least 3 formatting keywords
160
+ return (journal_lower in content_lower and
161
+ sum(1 for keyword in keywords if keyword in content_lower) >= 3)
162
+
163
+ def _parse_references(self, references_text):
164
+ """Parse input references into structured format"""
165
+ # Split references by common delimiters
166
+ references = []
167
+
168
+ # Split by numbered patterns (1., 2., etc.) or line breaks
169
+ ref_parts = re.split(r'\n+|\d+\.\s+', references_text.strip())
170
+ ref_parts = [part.strip() for part in ref_parts if part.strip()]
171
+
172
+ for i, ref_text in enumerate(ref_parts, 1):
173
+ parsed_ref = self._extract_reference_components(ref_text)
174
+ parsed_ref["original"] = ref_text
175
+ parsed_ref["number"] = str(i)
176
+ references.append(parsed_ref)
177
+
178
+ return references
179
+
180
+ def _extract_reference_components(self, ref_text):
181
+ """Extract components from a single reference"""
182
+ components = {
183
+ "authors": "",
184
+ "title": "",
185
+ "journal": "",
186
+ "year": "",
187
+ "volume": "",
188
+ "issue": "",
189
+ "pages": "",
190
+ "doi": "",
191
+ "pmid": "",
192
+ "url": ""
193
+ }
194
+
195
+ # Extract DOI
196
+ doi_match = re.search(r'doi:\s*([^\s,;]+)', ref_text, re.IGNORECASE)
197
+ if doi_match:
198
+ components["doi"] = doi_match.group(1)
199
+
200
+ # Extract PMID
201
+ pmid_match = re.search(r'pmid:\s*(\d+)', ref_text, re.IGNORECASE)
202
+ if pmid_match:
203
+ components["pmid"] = pmid_match.group(1)
204
+
205
+ # Extract year (4 digits)
206
+ year_match = re.search(r'\b(19|20)\d{2}\b', ref_text)
207
+ if year_match:
208
+ components["year"] = year_match.group(0)
209
+
210
+ # Extract volume and pages pattern like "2023;45(3):123-130"
211
+ vol_pages_match = re.search(r'(\d+)\((\d+)\):(\d+[-–]\d+)', ref_text)
212
+ if vol_pages_match:
213
+ components["volume"] = vol_pages_match.group(1)
214
+ components["issue"] = vol_pages_match.group(2)
215
+ components["pages"] = vol_pages_match.group(3)
216
+
217
+ # Extract URL
218
+ url_match = re.search(r'https?://[^\s,;]+', ref_text)
219
+ if url_match:
220
+ components["url"] = url_match.group(0)
221
+
222
+ # Simple author extraction (everything before first period if present)
223
+ if '.' in ref_text:
224
+ potential_authors = ref_text.split('.')[0]
225
+ if len(potential_authors) < 100: # Reasonable author length
226
+ components["authors"] = potential_authors.strip()
227
+
228
+ return components
229
+
230
+ def _apply_journal_formatting(self, references, journal_name, guidelines):
231
+ """Apply journal-specific formatting to references"""
232
+ formatted_refs = []
233
+
234
+ # Determine formatting style based on journal and guidelines
235
+ style = self._determine_formatting_style(journal_name, guidelines)
236
+
237
+ for ref in references:
238
+ if style == "vancouver":
239
+ formatted_ref = self._format_vancouver_style(ref)
240
+ elif style == "ama":
241
+ formatted_ref = self._format_ama_style(ref)
242
+ elif style == "chicago":
243
+ formatted_ref = self._format_chicago_style(ref)
244
+ else:
245
+ formatted_ref = self._format_generic_style(ref)
246
+
247
+ formatted_refs.append(f"{ref['number']}. {formatted_ref}")
248
+
249
+ return "\n\n".join(formatted_refs)
250
+
251
+ def _determine_formatting_style(self, journal_name, guidelines):
252
+ """Determine the appropriate formatting style for the journal"""
253
+ journal_lower = journal_name.lower()
254
+ guidelines_lower = guidelines.lower()
255
+
256
+ # Medical journals often use Vancouver or AMA
257
+ medical_journals = [
258
+ "clinical infectious diseases", "journal of infectious diseases",
259
+ "the lancet", "new england journal of medicine", "jama",
260
+ "nature medicine", "bmj", "plos"
261
+ ]
262
+
263
+ if any(j in journal_lower for j in medical_journals):
264
+ if "vancouver" in guidelines_lower:
265
+ return "vancouver"
266
+ elif "ama" in guidelines_lower:
267
+ return "ama"
268
+ else:
269
+ return "vancouver" # Default for medical journals
270
+
271
+ # Check guidelines for specific style mentions
272
+ if "vancouver" in guidelines_lower:
273
+ return "vancouver"
274
+ elif "ama" in guidelines_lower:
275
+ return "ama"
276
+ elif "chicago" in guidelines_lower:
277
+ return "chicago"
278
+
279
+ return "generic"
280
+
281
+ def _format_vancouver_style(self, ref):
282
+ """Format reference in Vancouver style"""
283
+ parts = []
284
+
285
+ if ref["authors"]:
286
+ # Format authors (last name, first initial)
287
+ authors = self._format_authors_vancouver(ref["authors"])
288
+ parts.append(authors)
289
+
290
+ if ref["title"]:
291
+ title = ref["title"].strip().rstrip('.')
292
+ parts.append(title + ".")
293
+
294
+ if ref["journal"]:
295
+ journal_part = ref["journal"]
296
+ if ref["year"]:
297
+ journal_part += f" {ref['year']}"
298
+ if ref["volume"]:
299
+ journal_part += f";{ref['volume']}"
300
+ if ref["issue"]:
301
+ journal_part += f"({ref['issue']})"
302
+ if ref["pages"]:
303
+ journal_part += f":{ref['pages']}"
304
+ parts.append(journal_part + ".")
305
+
306
+ if ref["doi"]:
307
+ parts.append(f"doi:{ref['doi']}")
308
+
309
+ return " ".join(parts)
310
+
311
+ def _format_ama_style(self, ref):
312
+ """Format reference in AMA style"""
313
+ parts = []
314
+
315
+ if ref["authors"]:
316
+ authors = self._format_authors_ama(ref["authors"])
317
+ parts.append(authors)
318
+
319
+ if ref["title"]:
320
+ title = ref["title"].strip().rstrip('.')
321
+ parts.append(title + ".")
322
+
323
+ if ref["journal"]:
324
+ journal_part = f"{ref['journal']}."
325
+ if ref["year"] and ref["volume"]:
326
+ journal_part += f" {ref['year']};{ref['volume']}"
327
+ if ref["issue"]:
328
+ journal_part += f"({ref['issue']})"
329
+ if ref["pages"]:
330
+ journal_part += f":{ref['pages']}"
331
+ parts.append(journal_part)
332
+
333
+ if ref["doi"]:
334
+ parts.append(f"doi:{ref['doi']}")
335
+
336
+ return " ".join(parts)
337
+
338
+ def _format_chicago_style(self, ref):
339
+ """Format reference in Chicago style"""
340
+ parts = []
341
+
342
+ if ref["authors"]:
343
+ authors = self._format_authors_chicago(ref["authors"])
344
+ parts.append(authors)
345
+
346
+ if ref["title"]:
347
+ title = f'"{ref["title"].strip().rstrip(".")}"'
348
+ parts.append(title)
349
+
350
+ if ref["journal"]:
351
+ journal_part = ref["journal"]
352
+ if ref["volume"]:
353
+ journal_part += f" {ref['volume']}"
354
+ if ref["issue"]:
355
+ journal_part += f", no. {ref['issue']}"
356
+ if ref["year"]:
357
+ journal_part += f" ({ref['year']})"
358
+ if ref["pages"]:
359
+ journal_part += f": {ref['pages']}"
360
+ parts.append(journal_part + ".")
361
+
362
+ return " ".join(parts)
363
+
364
+ def _format_generic_style(self, ref):
365
+ """Format reference in generic academic style"""
366
+ return self._format_vancouver_style(ref) # Default to Vancouver
367
+
368
+ def _format_authors_vancouver(self, authors_str):
369
+ """Format authors in Vancouver style"""
370
+ # Simple formatting - could be enhanced
371
+ if "," in authors_str:
372
+ return authors_str.strip().rstrip('.') + "."
373
+ return authors_str.strip() + "."
374
+
375
+ def _format_authors_ama(self, authors_str):
376
+ """Format authors in AMA style"""
377
+ return self._format_authors_vancouver(authors_str)
378
+
379
+ def _format_authors_chicago(self, authors_str):
380
+ """Format authors in Chicago style"""
381
+ return authors_str.strip().rstrip('.') + "."
382
+
383
+ def _get_fallback_guidelines(self, journal_name):
384
+ """Provide fallback formatting guidelines when internet search fails"""
385
+ journal_lower = journal_name.lower()
386
+
387
+ # Known guidelines for major medical journals
388
+ known_guidelines = {
389
+ "clinical infectious diseases": "Uses numbered Vancouver style: Author(s). Title. Journal Name. Year;Volume(Issue):Pages. doi:xxx",
390
+ "the lancet": "Uses numbered references in Vancouver style with specific formatting requirements",
391
+ "nature medicine": "Uses numbered references with Nature style formatting",
392
+ "jama": "Uses AMA style numbered references with author-year format",
393
+ "new england journal of medicine": "Uses numbered Vancouver style references",
394
+ "plos one": "Uses numbered references with specific PLOS formatting requirements",
395
+ "infection control": "Uses Vancouver style for infection control journals",
396
+ "antimicrobial": "Uses medical journal Vancouver style formatting"
397
+ }
398
+
399
+ # Check for exact or partial matches
400
+ for known_journal, guideline in known_guidelines.items():
401
+ if known_journal in journal_lower:
402
+ return f"Formatting guidelines for {journal_name}: {guideline}"
403
+
404
+ # Generic medical journal guidelines
405
+ return f"Standard medical journal formatting for {journal_name}: Uses numbered Vancouver style references with Author(s). Title. Journal Name. Year;Volume(Issue):Pages. doi:xxx format."
406
+
407
+
408
+ # Tool metadata
409
+ TOOL_METADATA = {
410
+ "name": "format_references",
411
+ "description": "Smart reference formatting tool that formats citations according to any journal's specific requirements",
412
+ "parameters": {
413
+ "references_text": {
414
+ "type": "string",
415
+ "description": "The references to format (can be in any format)"
416
+ },
417
+ "target_journal": {
418
+ "type": "string",
419
+ "description": "The target journal name (e.g., 'Clinical Infectious Diseases', 'Nature Medicine')"
420
+ }
421
+ },
422
+ "category": "research"
423
+ }
tools/generate_board_exam_question.py ADDED
The diff for this file is too large to render. See raw diff
 
tools/generate_flash_cards.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ generate_flash_cards.py
3
+ -----------------------
4
+
5
+ Tool for generating educational flash cards for medical topics.
6
+
7
+ This tool creates flash card style educational content to help with memorization and
8
+ quick review of key medical concepts, facts, and clinical pearls.
9
+
10
+ Key Features:
11
+ - Creates front/back flash card format
12
+ - Generates multiple cards per topic
13
+ - Includes mnemonics and memory aids
14
+ - Covers key facts, clinical pearls, and high-yield information
15
+ - Organizes cards by difficulty and subtopic
16
+ """
17
+
18
+ import asyncio
19
+ from typing import Any, Dict, List, Union
20
+ from tools.base import Tool
21
+ from tools.utils import ToolExecutionError, logger
22
+
23
+ class GenerateFlashCardsTool(Tool):
24
+ """
25
+ Tool for generating educational flash cards for medical topics.
26
+
27
+ This tool creates flash card style educational content to help with memorization
28
+ and quick review of key medical concepts.
29
+ """
30
+
31
+ def __init__(self) -> None:
32
+ """Initialize the GenerateFlashCardsTool."""
33
+ super().__init__()
34
+ self.name = "generate_flash_cards"
35
+ self.description = "Generate educational flash cards for medical topics to aid memorization and quick review."
36
+ self.args_schema = {
37
+ "type": "object",
38
+ "properties": {
39
+ "topic": {
40
+ "type": "string",
41
+ "description": "The medical topic to create flash cards about (e.g., 'hypertension', 'diabetes medications', 'heart murmurs')"
42
+ },
43
+ "number_of_cards": {
44
+ "type": "integer",
45
+ "description": "Number of flash cards to generate (default: 10)",
46
+ "default": 10,
47
+ "minimum": 5,
48
+ "maximum": 20
49
+ },
50
+ "card_type": {
51
+ "type": "string",
52
+ "description": "Type of flash cards to generate",
53
+ "enum": ["basic_facts", "clinical_pearls", "mnemonics", "differential_diagnosis", "medications", "mixed"],
54
+ "default": "mixed"
55
+ },
56
+ "difficulty_level": {
57
+ "type": "string",
58
+ "description": "Difficulty level for the cards",
59
+ "enum": ["medical_student", "resident", "board_review", "advanced"],
60
+ "default": "medical_student"
61
+ }
62
+ },
63
+ "required": ["topic"]
64
+ }
65
+
66
+ def openai_spec(self, legacy=False):
67
+ """Return OpenAI function specification."""
68
+ return {
69
+ "name": self.name,
70
+ "description": self.description,
71
+ "parameters": self.args_schema
72
+ }
73
+
74
+ async def run(
75
+ self,
76
+ topic: str,
77
+ number_of_cards: int = 10,
78
+ card_type: str = "mixed",
79
+ difficulty_level: str = "medical_student"
80
+ ) -> Dict[str, Any]:
81
+ """
82
+ Generate educational flash cards for a medical topic.
83
+
84
+ Args:
85
+ topic (str): The medical topic to create flash cards about
86
+ number_of_cards (int): Number of flash cards to generate
87
+ card_type (str): Type of flash cards (basic_facts, clinical_pearls, mnemonics, etc.)
88
+ difficulty_level (str): Difficulty level for the cards
89
+
90
+ Returns:
91
+ Dict[str, Any]: Complete flash card set with organized cards
92
+ """
93
+ try:
94
+ logger.info(f"Generating {number_of_cards} flash cards for topic: {topic}")
95
+
96
+ # Generate flash cards
97
+ flash_cards = self._generate_flash_cards(topic, number_of_cards, card_type, difficulty_level)
98
+
99
+ # Organize cards by subtopic
100
+ organized_cards = self._organize_cards_by_subtopic(flash_cards, topic)
101
+
102
+ # Generate study tips and usage instructions
103
+ study_tips = self._generate_study_tips(topic, card_type)
104
+
105
+ # Create the complete flash card set
106
+ flash_card_set = {
107
+ "topic": topic,
108
+ "card_type": card_type,
109
+ "difficulty_level": difficulty_level,
110
+ "total_cards": len(flash_cards),
111
+ "flash_cards": flash_cards,
112
+ "organized_by_subtopic": organized_cards,
113
+ "study_tips": study_tips,
114
+ "review_schedule": self._generate_review_schedule(),
115
+ "created_date": "2025-07-18"
116
+ }
117
+
118
+ logger.info(f"Successfully generated {len(flash_cards)} flash cards for {topic}")
119
+ return flash_card_set
120
+
121
+ except Exception as e:
122
+ logger.error(f"GenerateFlashCardsTool failed: {e}", exc_info=True)
123
+ raise ToolExecutionError(f"Failed to generate flash cards: {e}")
124
+
125
+ def _generate_flash_cards(self, topic: str, number_of_cards: int, card_type: str, difficulty_level: str) -> List[Dict[str, Any]]:
126
+ """Generate individual flash cards."""
127
+
128
+ flash_cards = []
129
+
130
+ # Define card templates based on topic and type
131
+ card_templates = self._get_card_templates(topic, card_type, difficulty_level)
132
+
133
+ # Generate cards using templates
134
+ for i in range(number_of_cards):
135
+ if i < len(card_templates):
136
+ card = card_templates[i]
137
+ else:
138
+ # Generate additional cards if needed
139
+ card = self._generate_additional_card(topic, card_type, difficulty_level, i)
140
+
141
+ flash_cards.append(card)
142
+
143
+ return flash_cards
144
+
145
+ def _get_card_templates(self, topic: str, card_type: str, difficulty_level: str) -> List[Dict[str, Any]]:
146
+ """Get predefined card templates for common topics."""
147
+
148
+ templates = {
149
+ "hypertension": [
150
+ {
151
+ "card_id": 1,
152
+ "subtopic": "Definition",
153
+ "front": "What is the definition of hypertension?",
154
+ "back": "Blood pressure ≥140/90 mmHg on two separate occasions, or ≥130/80 mmHg for patients with diabetes or chronic kidney disease.",
155
+ "memory_aid": "Remember: 140/90 for general population, 130/80 for high-risk groups",
156
+ "difficulty": "basic"
157
+ },
158
+ {
159
+ "card_id": 2,
160
+ "subtopic": "Classification",
161
+ "front": "What are the stages of hypertension according to AHA/ACC guidelines?",
162
+ "back": "Normal: <120/80\nElevated: 120-129/<80\nStage 1: 130-139/80-89\nStage 2: ≥140/90\nCrisis: >180/120",
163
+ "memory_aid": "Mnemonic: Never Ever Stop Managing Crises (Normal, Elevated, Stage 1, Stage 2, Crisis)",
164
+ "difficulty": "basic"
165
+ },
166
+ {
167
+ "card_id": 3,
168
+ "subtopic": "First-line medications",
169
+ "front": "What are the first-line medications for hypertension?",
170
+ "back": "ACE inhibitors, ARBs, Calcium channel blockers, Thiazide diuretics",
171
+ "memory_aid": "Mnemonic: ACCT (ACE, ARB, CCB, Thiazide) - ACCounT for first-line HTN meds",
172
+ "difficulty": "basic"
173
+ },
174
+ {
175
+ "card_id": 4,
176
+ "subtopic": "Complications",
177
+ "front": "What are the major complications of untreated hypertension?",
178
+ "back": "Stroke, Heart attack, Heart failure, Kidney disease, Vision loss, Peripheral artery disease",
179
+ "memory_aid": "Mnemonic: SHOCKS (Stroke, Heart attack, Heart failure, Kidney disease, Vision loss, PAD)",
180
+ "difficulty": "intermediate"
181
+ }
182
+ ],
183
+ "diabetes": [
184
+ {
185
+ "card_id": 1,
186
+ "subtopic": "Diagnosis",
187
+ "front": "What are the diagnostic criteria for diabetes mellitus?",
188
+ "back": "Fasting glucose ≥126 mg/dL OR Random glucose ≥200 mg/dL + symptoms OR HbA1c ≥6.5% OR 2-hour OGTT ≥200 mg/dL",
189
+ "memory_aid": "Remember: 126 fasting, 200 random, 6.5% A1c, 200 OGTT",
190
+ "difficulty": "basic"
191
+ },
192
+ {
193
+ "card_id": 2,
194
+ "subtopic": "HbA1c targets",
195
+ "front": "What is the HbA1c target for most adults with diabetes?",
196
+ "back": "<7% for most adults. <6.5% for selected patients if achievable without hypoglycemia. <8% for patients with limited life expectancy or high risk of hypoglycemia.",
197
+ "memory_aid": "Lucky 7: <7% for most, adjust based on individual factors",
198
+ "difficulty": "basic"
199
+ }
200
+ ]
201
+ }
202
+
203
+ # Return templates for the topic or generate generic ones
204
+ if topic.lower() in templates:
205
+ return templates[topic.lower()]
206
+ else:
207
+ return self._generate_generic_templates(topic, card_type, difficulty_level)
208
+
209
+ def _generate_generic_templates(self, topic: str, card_type: str, difficulty_level: str) -> List[Dict[str, Any]]:
210
+ """Generate generic card templates for any topic."""
211
+
212
+ generic_templates = [
213
+ {
214
+ "card_id": 1,
215
+ "subtopic": "Definition",
216
+ "front": f"What is {topic}?",
217
+ "back": f"[Definition and key characteristics of {topic}]",
218
+ "memory_aid": f"Key concept: {topic}",
219
+ "difficulty": "basic"
220
+ },
221
+ {
222
+ "card_id": 2,
223
+ "subtopic": "Clinical Presentation",
224
+ "front": f"What are the main signs and symptoms of {topic}?",
225
+ "back": f"[List of key clinical features of {topic}]",
226
+ "memory_aid": f"Clinical pearl for {topic}",
227
+ "difficulty": "basic"
228
+ },
229
+ {
230
+ "card_id": 3,
231
+ "subtopic": "Diagnosis",
232
+ "front": f"How is {topic} diagnosed?",
233
+ "back": f"[Diagnostic criteria and tests for {topic}]",
234
+ "memory_aid": f"Diagnostic approach for {topic}",
235
+ "difficulty": "intermediate"
236
+ },
237
+ {
238
+ "card_id": 4,
239
+ "subtopic": "Treatment",
240
+ "front": f"What is the treatment for {topic}?",
241
+ "back": f"[Treatment options and management for {topic}]",
242
+ "memory_aid": f"Treatment strategy for {topic}",
243
+ "difficulty": "intermediate"
244
+ },
245
+ {
246
+ "card_id": 5,
247
+ "subtopic": "Complications",
248
+ "front": f"What are the complications of {topic}?",
249
+ "back": f"[Potential complications and monitoring for {topic}]",
250
+ "memory_aid": f"Watch for complications of {topic}",
251
+ "difficulty": "advanced"
252
+ }
253
+ ]
254
+
255
+ return generic_templates
256
+
257
+ def _generate_additional_card(self, topic: str, card_type: str, difficulty_level: str, card_number: int) -> Dict[str, Any]:
258
+ """Generate additional cards when more are needed."""
259
+
260
+ additional_subtopics = [
261
+ "Pathophysiology", "Epidemiology", "Risk factors", "Differential diagnosis",
262
+ "Prognosis", "Prevention", "Monitoring", "Patient education", "Guidelines"
263
+ ]
264
+
265
+ subtopic_index = (card_number - 5) % len(additional_subtopics)
266
+ subtopic = additional_subtopics[subtopic_index]
267
+
268
+ return {
269
+ "card_id": card_number + 1,
270
+ "subtopic": subtopic,
271
+ "front": f"What should you know about {subtopic.lower()} of {topic}?",
272
+ "back": f"[Key information about {subtopic.lower()} related to {topic}]",
273
+ "memory_aid": f"Clinical pearl: {subtopic} in {topic}",
274
+ "difficulty": "intermediate"
275
+ }
276
+
277
+ def _organize_cards_by_subtopic(self, flash_cards: List[Dict], topic: str) -> Dict[str, List[Dict]]:
278
+ """Organize cards by subtopic for structured learning."""
279
+
280
+ organized = {}
281
+
282
+ for card in flash_cards:
283
+ subtopic = card.get("subtopic", "General")
284
+ if subtopic not in organized:
285
+ organized[subtopic] = []
286
+ organized[subtopic].append(card)
287
+
288
+ return organized
289
+
290
+ def _generate_study_tips(self, topic: str, card_type: str) -> List[str]:
291
+ """Generate study tips for using the flash cards effectively."""
292
+
293
+ tips = [
294
+ "Review cards daily for optimal retention",
295
+ "Use spaced repetition - review difficult cards more frequently",
296
+ "Study the memory aids and mnemonics separately",
297
+ "Test yourself by covering the back of the card first",
298
+ "Create your own examples for each concept",
299
+ "Review in both directions (front-to-back and back-to-front)",
300
+ "Focus on understanding, not just memorization",
301
+ "Connect concepts to real patient cases when possible"
302
+ ]
303
+
304
+ if card_type == "mnemonics":
305
+ tips.append("Practice writing out the mnemonics from memory")
306
+ elif card_type == "clinical_pearls":
307
+ tips.append("Think of patient scenarios where each pearl would apply")
308
+
309
+ return tips
310
+
311
+ def _generate_review_schedule(self) -> Dict[str, str]:
312
+ """Generate a spaced repetition review schedule."""
313
+
314
+ schedule = {
315
+ "Day 1": "Initial learning - review all cards",
316
+ "Day 2": "Review all cards again",
317
+ "Day 4": "Review cards you found difficult",
318
+ "Day 7": "Review entire set",
319
+ "Day 14": "Review difficult cards only",
320
+ "Day 30": "Complete review of all cards",
321
+ "Day 60": "Final review and assessment"
322
+ }
323
+
324
+ return schedule