GabrielSalem commited on
Commit
c24d711
·
verified ·
1 Parent(s): e5a4541

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -187
app.py CHANGED
@@ -1,15 +1,8 @@
1
  """
2
- AURA Chat — Gradio Space
3
  Single-file Gradio app that:
4
- - Accepts newline-separated prompts (data queries) from the user.
5
- - On "Analyze" scrapes those queries, sends the aggregated text to a locked LLM,
6
- and returns a polished analysis with a ranked list of best stocks and an
7
- "Investment Duration" (when to enter / when to exit) for each stock.
8
- - Seeds a chat component with the generated analysis; user can then chat about it.
9
-
10
- Notes:
11
- - Model, max tokens, and delay between scrapes are fixed and cannot be changed via UI.
12
- - Set OPENAI_API_KEY in environment (Space Secrets).
13
  """
14
 
15
  import os
@@ -19,12 +12,10 @@ import asyncio
19
  import requests
20
  import atexit
21
  import traceback
22
- from datetime import datetime
23
  from typing import List
24
  import gradio as gr
25
 
26
-
27
- # Defensive: ensure a fresh event loop early to avoid fd race on shutdown.
28
  if sys.platform != "win32":
29
  try:
30
  loop = asyncio.new_event_loop()
@@ -32,7 +23,6 @@ if sys.platform != "win32":
32
  except Exception:
33
  traceback.print_exc()
34
 
35
-
36
  # =============================================================================
37
  # CONFIGURATION (fixed)
38
  # =============================================================================
@@ -42,7 +32,6 @@ SCRAPER_HEADERS = {
42
  "Content-Type": "application/json"
43
  }
44
 
45
- # FIXED model & tokens (cannot be changed from UI)
46
  LLM_MODEL = os.getenv("LLM_MODEL", "openai/gpt-oss-20b:free")
47
  MAX_TOKENS = int(os.getenv("LLM_MAX_TOKENS", "3000"))
48
  SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0"))
@@ -50,12 +39,7 @@ SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0"))
50
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
51
  OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://openrouter.ai/api/v1")
52
 
53
-
54
- # =============================================================================
55
- # PROMPT ENGINEERING (fixed)
56
- # =============================================================================
57
  PROMPT_TEMPLATE = f"""You are AURA, a concise, professional hedge-fund research assistant.
58
-
59
  Task:
60
  - Given scraped data below, produce a clear, readable analysis that:
61
  1) Lists the top 5 stock picks (or fewer if not enough data).
@@ -68,50 +52,33 @@ Task:
68
  highest conviction pick).
69
  5) Output in plain text, clean formatting, easy for humans to read. No JSON.
70
  6) After the list, include a concise "Assumptions & Risks" section (2-3 bullet points).
71
-
72
  Important: Be decisive. If data is insufficient, state that clearly and provide
73
  the best-available picks with lower confidence.
74
-
75
  Max tokens for the LLM response: {MAX_TOKENS}
76
  Model: {LLM_MODEL}"""
77
 
78
-
79
  # =============================================================================
80
  # SCRAPING HELPERS
81
  # =============================================================================
82
  def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str:
83
- """Post a query to SCRAPER_API_URL and return a readable aggregation (or an error string)."""
84
  payload = {"query": query}
85
  last_err = None
86
-
87
  for attempt in range(1, retries + 1):
88
  try:
89
- resp = requests.post(
90
- SCRAPER_API_URL,
91
- headers=SCRAPER_HEADERS,
92
- json=payload,
93
- timeout=timeout
94
- )
95
  resp.raise_for_status()
96
  data = resp.json()
97
-
98
- # Format into readable text
99
  if isinstance(data, dict):
100
  parts = [f"{k.upper()}:\n{v}\n" for k, v in data.items()]
101
  return "\n".join(parts)
102
- else:
103
- return str(data)
104
-
105
  except Exception as e:
106
  last_err = e
107
  if attempt < retries:
108
  time.sleep(1.0)
109
-
110
  return f"ERROR: Scraper failed: {last_err}"
111
 
112
-
113
  def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
114
- """Scrape multiple queries and join results into one large string."""
115
  aggregated = []
116
  for q in queries:
117
  q = q.strip()
@@ -123,7 +90,6 @@ def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
123
  time.sleep(delay)
124
  return "\n".join(aggregated)
125
 
126
-
127
  # =============================================================================
128
  # LLM INTERACTION
129
  # =============================================================================
@@ -132,44 +98,28 @@ try:
132
  except Exception:
133
  OpenAI = None
134
 
135
-
136
- def run_llm_system_and_user(
137
- system_prompt: str,
138
- user_text: str,
139
- model: str = LLM_MODEL,
140
- max_tokens: int = MAX_TOKENS
141
- ) -> str:
142
- """Create the OpenAI client lazily, call the chat completions endpoint, then close."""
143
  if OpenAI is None:
144
- return "ERROR: openai package not installed or available. See requirements."
145
-
146
  if not OPENAI_API_KEY:
147
- return "ERROR: OPENAI_API_KEY not set in environment. Please add it to Space Secrets."
148
-
149
  client = None
150
  try:
151
  client = OpenAI(base_url=OPENAI_BASE_URL, api_key=OPENAI_API_KEY)
152
  completion = client.chat.completions.create(
153
  model=model,
154
- messages=[
155
- {"role": "system", "content": system_prompt},
156
- {"role": "user", "content": user_text},
157
- ],
158
  max_tokens=max_tokens,
159
  )
160
-
161
- # Extract content robustly
162
  if hasattr(completion, "choices") and len(completion.choices) > 0:
163
  try:
164
  return completion.choices[0].message.content
165
  except Exception:
166
  return str(completion.choices[0])
167
  return str(completion)
168
-
169
  except Exception as e:
170
  return f"ERROR: LLM call failed: {e}"
171
  finally:
172
- # Try to close client transport
173
  try:
174
  if client is not None:
175
  try:
@@ -182,156 +132,137 @@ def run_llm_system_and_user(
182
  except Exception:
183
  pass
184
 
185
-
186
  # =============================================================================
187
  # MAIN PIPELINE
188
  # =============================================================================
189
  def analyze_and_seed_chat(prompts_text: str):
190
- """Called when user clicks Analyze. Returns: (analysis_text, initial_chat_messages_list)"""
191
  if not prompts_text.strip():
192
- return "Please enter at least one prompt (query) describing what data to gather.", []
193
-
194
  queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
195
  scraped = multi_scrape(queries, delay=SCRAPE_DELAY)
196
-
197
  if scraped.startswith("ERROR"):
198
  return scraped, []
199
-
200
- # Compose user payload for LLM
201
  user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease follow the system instructions and output the analysis."
202
  analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
203
-
204
  if analysis.startswith("ERROR"):
205
  return analysis, []
206
-
207
- # Seed chat with user request and assistant analysis
208
  initial_chat = [
209
  {"role": "user", "content": f"Analyze the data I provided (prompts: {', '.join(queries)})"},
210
  {"role": "assistant", "content": analysis}
211
  ]
212
  return analysis, initial_chat
213
 
214
-
215
  def continue_chat(chat_messages, user_message: str, analysis_text: str):
216
- """Handle chat follow-ups. Returns updated list of message dicts."""
217
  if chat_messages is None:
218
  chat_messages = []
219
- if not user_message or not user_message.strip():
220
  return chat_messages
221
-
222
- # Append user's new message
223
  chat_messages.append({"role": "user", "content": user_message})
224
-
225
- # Build LLM input using analysis as reference context
226
  followup_system = (
227
- "You are AURA, a helpful analyst. The conversation context includes a recently "
228
- "generated analysis from scraped data. Use that analysis as ground truth context; "
229
- "answer follow-up questions, explain rationale, and provide clarifications. "
230
- "Be concise and actionable."
231
  )
232
- user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}\n\nRespond concisely and reference lines from the analysis where appropriate."
233
-
234
  assistant_reply = run_llm_system_and_user(followup_system, user_payload)
235
- if assistant_reply.startswith("ERROR"):
236
- assistant_reply = assistant_reply
237
-
238
- # Append assistant reply
239
  chat_messages.append({"role": "assistant", "content": assistant_reply})
240
  return chat_messages
241
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
 
243
  # =============================================================================
244
  # GRADIO UI
245
  # =============================================================================
246
- def build_demo():
247
- with gr.Blocks(title="AURA Chat — Hedge Fund Picks") as demo:
248
- # Custom CSS
249
- gr.HTML("""
250
- <style>
251
- .gradio-container { max-width: 1100px; margin: 18px auto; }
252
- .header { text-align: left; margin-bottom: 6px; }
253
- .muted { color: #7d8590; font-size: 14px; }
254
- .analysis-box { background: #ffffff; border-radius: 8px; padding: 12px; box-shadow: 0 4px 14px rgba(0,0,0,0.06); }
255
- </style>
256
- """)
257
-
258
- gr.Markdown("# AURA Chat Hedge Fund Picks")
259
- gr.Markdown(
260
- "**Enter one or more data prompts (one per line)** — e.g. SEC insider transactions october 2025 company XYZ.\n\n"
261
- "Only input prompts; model, tokens and timing are fixed. Press **Analyze** to fetch & generate the picks. "
262
- "After analysis you can chat with the assistant about the results."
263
- )
264
-
265
- with gr.Row():
266
- with gr.Column(scale=1):
267
- prompts = gr.Textbox(
268
- lines=6,
269
- label="Data Prompts (one per line)",
270
- placeholder="SEC insider transactions october 2025\n13F filings Q3 2025\ncompany: ACME corp insider buys"
271
  )
272
- analyze_btn = gr.Button("Analyze", variant="primary")
273
- error_box = gr.Markdown("", visible=False)
274
- gr.Markdown(f"**Fixed settings:** Model = {LLM_MODEL} • Max tokens = {MAX_TOKENS} • Scrape delay = {SCRAPE_DELAY}s")
275
- gr.Markdown("**Important:** Add your OPENAI_API_KEY to Space Secrets before running.")
276
-
277
- with gr.Column(scale=1):
278
- analysis_out = gr.Textbox(
279
- label="Generated Analysis (Top picks with Investment Duration)",
280
- lines=18,
281
- interactive=False
282
  )
283
- gr.Markdown("**Chat with AURA about this analysis**")
284
- chatbot = gr.Chatbot(label="AURA Chat", height=420)
285
- user_input = gr.Textbox(
286
- placeholder="Ask a follow-up question about the analysis...",
287
- label="Your question"
288
  )
289
- send_btn = gr.Button("Send")
290
-
291
- # States
292
- analysis_state = gr.State("")
293
- chat_state = gr.State([])
294
-
295
- # Handler functions
296
- def on_analyze(prompts_text):
297
- analysis_text, initial_chat = analyze_and_seed_chat(prompts_text)
298
- if analysis_text.startswith("ERROR"):
299
- return "", f"**Error:** {analysis_text}", "", []
300
- return analysis_text, "", analysis_text, initial_chat
301
-
302
- def on_send(chat_state_list, user_msg, analysis_text):
303
- if not user_msg or not user_msg.strip():
304
- return chat_state_list or [], ""
305
- updated_history = continue_chat(chat_state_list or [], user_msg, analysis_text)
306
- return updated_history, ""
307
-
308
- def render_chat(chat_messages):
309
- return chat_messages or []
310
-
311
- # Wire handlers
312
- analyze_btn.click(
313
- fn=on_analyze,
314
- inputs=[prompts],
315
- outputs=[analysis_out, error_box, analysis_state, chat_state]
316
- )
317
- send_btn.click(
318
- fn=on_send,
319
- inputs=[chat_state, user_input, analysis_state],
320
- outputs=[chat_state, user_input]
321
- )
322
- user_input.submit(
323
- fn=on_send,
324
- inputs=[chat_state, user_input, analysis_state],
325
- outputs=[chat_state, user_input]
326
- )
327
- chat_state.change(
328
- fn=render_chat,
329
- inputs=[chat_state],
330
- outputs=[chatbot]
331
- )
332
-
333
  return demo
334
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
 
336
  # =============================================================================
337
  # CLEAN SHUTDOWN
@@ -340,26 +271,17 @@ def _cleanup_on_exit():
340
  try:
341
  loop = asyncio.get_event_loop()
342
  if loop and not loop.is_closed():
343
- try:
344
- loop.stop()
345
- except Exception:
346
- pass
347
- try:
348
- loop.close()
349
- except Exception:
350
- pass
351
- except Exception:
352
- pass
353
 
354
  atexit.register(_cleanup_on_exit)
355
 
356
-
357
  # =============================================================================
358
  # RUN
359
  # =============================================================================
360
  if __name__ == "__main__":
361
- demo = build_demo()
362
- demo.launch(
363
- server_name="0.0.0.0",
364
- server_port=int(os.environ.get("PORT", 7860))
365
- )
 
1
  """
2
+ AURA Chat — Gradio Space + MCP Server Tab
3
  Single-file Gradio app that:
4
+ - Chat / Analysis Tab: Enter prompts, analyze, and chat with the assistant.
5
+ - MCP Server Tab: Call scraping and analysis functions directly with JSON output.
 
 
 
 
 
 
 
6
  """
7
 
8
  import os
 
12
  import requests
13
  import atexit
14
  import traceback
 
15
  from typing import List
16
  import gradio as gr
17
 
18
+ # Defensive: fresh event loop early to avoid fd race on shutdown
 
19
  if sys.platform != "win32":
20
  try:
21
  loop = asyncio.new_event_loop()
 
23
  except Exception:
24
  traceback.print_exc()
25
 
 
26
  # =============================================================================
27
  # CONFIGURATION (fixed)
28
  # =============================================================================
 
32
  "Content-Type": "application/json"
33
  }
34
 
 
35
  LLM_MODEL = os.getenv("LLM_MODEL", "openai/gpt-oss-20b:free")
36
  MAX_TOKENS = int(os.getenv("LLM_MAX_TOKENS", "3000"))
37
  SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0"))
 
39
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
40
  OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://openrouter.ai/api/v1")
41
 
 
 
 
 
42
  PROMPT_TEMPLATE = f"""You are AURA, a concise, professional hedge-fund research assistant.
 
43
  Task:
44
  - Given scraped data below, produce a clear, readable analysis that:
45
  1) Lists the top 5 stock picks (or fewer if not enough data).
 
52
  highest conviction pick).
53
  5) Output in plain text, clean formatting, easy for humans to read. No JSON.
54
  6) After the list, include a concise "Assumptions & Risks" section (2-3 bullet points).
 
55
  Important: Be decisive. If data is insufficient, state that clearly and provide
56
  the best-available picks with lower confidence.
 
57
  Max tokens for the LLM response: {MAX_TOKENS}
58
  Model: {LLM_MODEL}"""
59
 
 
60
  # =============================================================================
61
  # SCRAPING HELPERS
62
  # =============================================================================
63
  def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str:
 
64
  payload = {"query": query}
65
  last_err = None
 
66
  for attempt in range(1, retries + 1):
67
  try:
68
+ resp = requests.post(SCRAPER_API_URL, headers=SCRAPER_HEADERS, json=payload, timeout=timeout)
 
 
 
 
 
69
  resp.raise_for_status()
70
  data = resp.json()
 
 
71
  if isinstance(data, dict):
72
  parts = [f"{k.upper()}:\n{v}\n" for k, v in data.items()]
73
  return "\n".join(parts)
74
+ return str(data)
 
 
75
  except Exception as e:
76
  last_err = e
77
  if attempt < retries:
78
  time.sleep(1.0)
 
79
  return f"ERROR: Scraper failed: {last_err}"
80
 
 
81
  def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str:
 
82
  aggregated = []
83
  for q in queries:
84
  q = q.strip()
 
90
  time.sleep(delay)
91
  return "\n".join(aggregated)
92
 
 
93
  # =============================================================================
94
  # LLM INTERACTION
95
  # =============================================================================
 
98
  except Exception:
99
  OpenAI = None
100
 
101
+ def run_llm_system_and_user(system_prompt: str, user_text: str, model: str = LLM_MODEL, max_tokens: int = MAX_TOKENS) -> str:
 
 
 
 
 
 
 
102
  if OpenAI is None:
103
+ return "ERROR: openai package not installed or available."
 
104
  if not OPENAI_API_KEY:
105
+ return "ERROR: OPENAI_API_KEY not set in environment."
 
106
  client = None
107
  try:
108
  client = OpenAI(base_url=OPENAI_BASE_URL, api_key=OPENAI_API_KEY)
109
  completion = client.chat.completions.create(
110
  model=model,
111
+ messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_text}],
 
 
 
112
  max_tokens=max_tokens,
113
  )
 
 
114
  if hasattr(completion, "choices") and len(completion.choices) > 0:
115
  try:
116
  return completion.choices[0].message.content
117
  except Exception:
118
  return str(completion.choices[0])
119
  return str(completion)
 
120
  except Exception as e:
121
  return f"ERROR: LLM call failed: {e}"
122
  finally:
 
123
  try:
124
  if client is not None:
125
  try:
 
132
  except Exception:
133
  pass
134
 
 
135
  # =============================================================================
136
  # MAIN PIPELINE
137
  # =============================================================================
138
  def analyze_and_seed_chat(prompts_text: str):
 
139
  if not prompts_text.strip():
140
+ return "Please enter at least one prompt (query).", []
 
141
  queries = [line.strip() for line in prompts_text.splitlines() if line.strip()]
142
  scraped = multi_scrape(queries, delay=SCRAPE_DELAY)
 
143
  if scraped.startswith("ERROR"):
144
  return scraped, []
 
 
145
  user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease follow the system instructions and output the analysis."
146
  analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload)
 
147
  if analysis.startswith("ERROR"):
148
  return analysis, []
 
 
149
  initial_chat = [
150
  {"role": "user", "content": f"Analyze the data I provided (prompts: {', '.join(queries)})"},
151
  {"role": "assistant", "content": analysis}
152
  ]
153
  return analysis, initial_chat
154
 
 
155
  def continue_chat(chat_messages, user_message: str, analysis_text: str):
 
156
  if chat_messages is None:
157
  chat_messages = []
158
+ if not user_message.strip():
159
  return chat_messages
 
 
160
  chat_messages.append({"role": "user", "content": user_message})
 
 
161
  followup_system = (
162
+ "You are AURA, a helpful analyst. Use the previous analysis as context; answer follow-ups concisely."
 
 
 
163
  )
164
+ user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}"
 
165
  assistant_reply = run_llm_system_and_user(followup_system, user_payload)
 
 
 
 
166
  chat_messages.append({"role": "assistant", "content": assistant_reply})
167
  return chat_messages
168
 
169
+ def convert_to_gradio_chat_format(chat_messages):
170
+ return chat_messages or []
171
+
172
+ # =============================================================================
173
+ # MCP SERVER HELPERS
174
+ # =============================================================================
175
+ def mcp_scrape(query: str):
176
+ return {"query": query, "result": deep_scrape(query)}
177
+
178
+ def mcp_multi_scrape(queries_text: str):
179
+ queries = [line.strip() for line in queries_text.splitlines() if line.strip()]
180
+ return {"queries": queries, "result": multi_scrape(queries)}
181
+
182
+ def mcp_analyze(prompts_text: str):
183
+ analysis, seed_chat = analyze_and_seed_chat(prompts_text)
184
+ return {"prompts": prompts_text, "analysis": analysis, "seed_chat": seed_chat}
185
 
186
  # =============================================================================
187
  # GRADIO UI
188
  # =============================================================================
189
+ def build_demo_with_mcp():
190
+ with gr.Blocks(title="AURA Chat — Hedge Fund Picks + MCP Server") as demo:
191
+ with gr.Tabs():
192
+ # Chat / Analysis Tab
193
+ with gr.TabItem("Chat / Analysis"):
194
+ with gr.Row():
195
+ with gr.Column(scale=1):
196
+ prompts = gr.Textbox(lines=6, label="Data Prompts")
197
+ analyze_btn = gr.Button("Analyze", variant="primary")
198
+ error_box = gr.Markdown("", visible=False)
199
+ with gr.Column(scale=1):
200
+ analysis_out = gr.Textbox(label="Generated Analysis", lines=18, interactive=False)
201
+ gr.Markdown("**Chat with AURA**")
202
+ chatbot = gr.Chatbot(height=420)
203
+ user_input = gr.Textbox(placeholder="Ask follow-up...", label="Your question")
204
+ send_btn = gr.Button("Send")
205
+
206
+ analysis_state = gr.State("")
207
+ chat_state = gr.State([])
208
+
209
+ analyze_btn.click(
210
+ fn=lambda txt: on_analyze(txt),
211
+ inputs=[prompts],
212
+ outputs=[analysis_out, error_box, analysis_state, chat_state]
 
213
  )
214
+ send_btn.click(
215
+ fn=lambda chat_list, msg, analysis_txt: on_send(chat_list, msg, analysis_txt),
216
+ inputs=[chat_state, user_input, analysis_state],
217
+ outputs=[chat_state, user_input]
 
 
 
 
 
 
218
  )
219
+ user_input.submit(
220
+ fn=lambda chat_list, msg, analysis_txt: on_send(chat_list, msg, analysis_txt),
221
+ inputs=[chat_state, user_input, analysis_state],
222
+ outputs=[chat_state, user_input]
 
223
  )
224
+ chat_state.change(
225
+ fn=convert_to_gradio_chat_format,
226
+ inputs=[chat_state],
227
+ outputs=[chatbot]
228
+ )
229
+
230
+ # MCP Server Tab
231
+ with gr.TabItem("MCP Server"):
232
+ gr.Markdown("**Call scraping and analysis functions directly:**")
233
+ with gr.Row():
234
+ with gr.Column(scale=1):
235
+ single_query = gr.Textbox(label="Single Scrape Query")
236
+ scrape_btn = gr.Button("Scrape Query")
237
+ scrape_out = gr.JSON()
238
+ multi_queries = gr.Textbox(lines=6, label="Multi Scrape Queries")
239
+ multi_scrape_btn = gr.Button("Multi Scrape")
240
+ multi_scrape_out = gr.JSON()
241
+ with gr.Column(scale=1):
242
+ analysis_prompts = gr.Textbox(lines=6, label="Analysis Prompts")
243
+ analyze_mcp_btn = gr.Button("Run Full Analysis")
244
+ analyze_mcp_out = gr.JSON()
245
+
246
+ scrape_btn.click(fn=mcp_scrape, inputs=[single_query], outputs=[scrape_out])
247
+ multi_scrape_btn.click(fn=mcp_multi_scrape, inputs=[multi_queries], outputs=[multi_scrape_out])
248
+ analyze_mcp_btn.click(fn=mcp_analyze, inputs=[analysis_prompts], outputs=[analyze_mcp_out])
249
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
  return demo
251
 
252
+ # =============================================================================
253
+ # Handlers for Chat Tab
254
+ # =============================================================================
255
+ def on_analyze(prompts_text):
256
+ analysis_text, initial_chat = analyze_and_seed_chat(prompts_text)
257
+ if analysis_text.startswith("ERROR"):
258
+ return "", f"**Error:** {analysis_text}", "", []
259
+ return analysis_text, "", analysis_text, initial_chat
260
+
261
+ def on_send(chat_state_list, user_msg, analysis_text):
262
+ if not user_msg.strip():
263
+ return chat_state_list or [], ""
264
+ updated_history = continue_chat(chat_state_list or [], user_msg, analysis_text)
265
+ return updated_history, ""
266
 
267
  # =============================================================================
268
  # CLEAN SHUTDOWN
 
271
  try:
272
  loop = asyncio.get_event_loop()
273
  if loop and not loop.is_closed():
274
+ try: loop.stop()
275
+ except Exception: pass
276
+ try: loop.close()
277
+ except Exception: pass
278
+ except Exception: pass
 
 
 
 
 
279
 
280
  atexit.register(_cleanup_on_exit)
281
 
 
282
  # =============================================================================
283
  # RUN
284
  # =============================================================================
285
  if __name__ == "__main__":
286
+ demo = build_demo_with_mcp()
287
+ demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))