Spaces:
Running
Running
| """ | |
| AURA Chat — Gradio Space | |
| Single-file Gradio app that: | |
| - Accepts newline-separated prompts (data queries) from the user. | |
| - On "Analyze" scrapes those queries, sends the aggregated text to a locked LLM, | |
| and returns a polished analysis with a ranked list of best stocks and an | |
| "Investment Duration" for each stock. | |
| - Seeds a chat component with the generated analysis; user can then chat about it. | |
| Notes: | |
| - Model, max tokens, and delay between scrapes are fixed. | |
| - User only inputs prompts; everything else is predefined. | |
| """ | |
| import os | |
| import time | |
| import requests | |
| import asyncio | |
| import atexit | |
| from typing import List | |
| import gradio as gr | |
| # ----------------------- | |
| # Configuration (fixed) | |
| # ----------------------- | |
| SCRAPER_API_URL = os.getenv("SCRAPER_API_URL", "https://deep-scraper-96.created.app/api/deep-scrape") | |
| SCRAPER_HEADERS = {"User-Agent": "Mozilla/5.0", "Content-Type": "application/json"} | |
| LLM_MODEL = os.getenv("LLM_MODEL", "openai/gpt-oss-20b:free") | |
| MAX_TOKENS = int(os.getenv("LLM_MAX_TOKENS", "3000")) | |
| SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0")) | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://openrouter.ai/api/v1") | |
| # ----------------------- | |
| # Prompt engineering (fixed) | |
| # ----------------------- | |
| PROMPT_TEMPLATE = f""" | |
| You are AURA, a concise, professional hedge-fund research assistant. | |
| Task: | |
| - Given scraped data below, produce a clear, readable analysis that: | |
| 1) Lists the top 5 stock picks (or fewer if not enough data). | |
| 2) For each stock provide: Ticker / Company name, short rationale (2-3 bullets), | |
| and an explicit **Investment Duration** entry: one-line "When to Invest" and one-line "When to Sell". | |
| 3) Keep each stock entry short and scannable. Use a bullet list or numbered list. | |
| 4) At the top, provide a 2-3 sentence summary conclusion. | |
| 5) Output in plain text, clean formatting. | |
| 6) Include a concise "Assumptions & Risks" section (2-3 bullet points). | |
| Max tokens for the LLM response: {MAX_TOKENS} | |
| Model: {LLM_MODEL} | |
| """ | |
| # ----------------------- | |
| # Scraping | |
| # ----------------------- | |
| def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str: | |
| payload = {"query": query} | |
| last_err = None | |
| for attempt in range(retries): | |
| try: | |
| resp = requests.post(SCRAPER_API_URL, headers=SCRAPER_HEADERS, json=payload, timeout=timeout) | |
| resp.raise_for_status() | |
| data = resp.json() | |
| if isinstance(data, dict): | |
| return "\n".join(f"{k.upper()}:\n{v}" for k, v in data.items()) | |
| else: | |
| return str(data) | |
| except Exception as e: | |
| last_err = e | |
| time.sleep(1.0) | |
| return f"ERROR: Scraper failed: {last_err}" | |
| def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str: | |
| aggregated = [] | |
| for q in queries: | |
| q = q.strip() | |
| if not q: | |
| continue | |
| aggregated.append(f"\n=== QUERY: {q} ===\n") | |
| aggregated.append(deep_scrape(q)) | |
| time.sleep(delay) | |
| return "\n".join(aggregated) | |
| # ----------------------- | |
| # LLM call | |
| # ----------------------- | |
| try: | |
| from openai import OpenAI | |
| except ImportError: | |
| OpenAI = None | |
| def run_llm_system_and_user(system_prompt: str, user_text: str) -> str: | |
| if OpenAI is None: | |
| return "ERROR: `openai` package not installed." | |
| if not OPENAI_API_KEY: | |
| return "ERROR: OPENAI_API_KEY not set in environment." | |
| client = OpenAI(base_url=OPENAI_BASE_URL, api_key=OPENAI_API_KEY) | |
| try: | |
| completion = client.chat.completions.create( | |
| model=LLM_MODEL, | |
| messages=[{"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": user_text}], | |
| max_tokens=MAX_TOKENS | |
| ) | |
| return completion.choices[0].message.content | |
| except Exception as e: | |
| return f"ERROR: LLM call failed: {e}" | |
| finally: | |
| try: | |
| client.close() | |
| except Exception: | |
| pass | |
| # ----------------------- | |
| # Analysis pipeline | |
| # ----------------------- | |
| def analyze_and_seed_chat(prompts_text: str): | |
| if not prompts_text.strip(): | |
| return "Please enter at least one prompt.", [] | |
| queries = [line.strip() for line in prompts_text.splitlines() if line.strip()] | |
| scraped = multi_scrape(queries) | |
| if scraped.startswith("ERROR"): | |
| return scraped, [] | |
| user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease follow the system instructions." | |
| analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload) | |
| if analysis.startswith("ERROR"): | |
| return analysis, [] | |
| # Seed chat as a list of dicts | |
| initial_chat = [ | |
| {"role": "user", "content": f"Analyze the data I provided (prompts: {', '.join(queries)})"}, | |
| {"role": "assistant", "content": analysis} | |
| ] | |
| return analysis, initial_chat | |
| def continue_chat(chat_messages, user_message, analysis_text): | |
| if not user_message.strip(): | |
| return chat_messages | |
| chat_messages.append({"role": "user", "content": user_message}) | |
| followup_system = ( | |
| "You are AURA, a helpful analyst. The conversation context includes a recently generated analysis." | |
| ) | |
| user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}\n\nRespond concisely." | |
| assistant_reply = run_llm_system_and_user(followup_system, user_payload) | |
| chat_messages.append({"role": "assistant", "content": assistant_reply}) | |
| return chat_messages | |
| # Convert dict chat to Gradio format | |
| def convert_to_gradio_chat_format(chat_messages): | |
| formatted = [] | |
| i = 0 | |
| while i < len(chat_messages): | |
| if chat_messages[i]["role"] == "user": | |
| user_msg = chat_messages[i]["content"] | |
| assistant_msg = "" | |
| if i + 1 < len(chat_messages) and chat_messages[i + 1]["role"] == "assistant": | |
| assistant_msg = chat_messages[i + 1]["content"] | |
| i += 1 | |
| formatted.append((user_msg, assistant_msg)) | |
| i += 1 | |
| return formatted | |
| # ----------------------- | |
| # Gradio UI | |
| # ----------------------- | |
| def build_ui(): | |
| with gr.Blocks(title="AURA Chat — Hedge Fund Picks") as demo: | |
| # Custom CSS | |
| gr.HTML(""" | |
| <style> | |
| .gradio-container { max-width: 1200px; margin: 20px auto; font-family: 'Arial', sans-serif; } | |
| .analysis-box { background: #f9f9f9; border-radius: 10px; padding: 12px; box-shadow: 0 4px 12px rgba(0,0,0,0.08); } | |
| .chat-box { background: #ffffff; border-radius: 10px; padding: 8px; box-shadow: 0 2px 10px rgba(0,0,0,0.05); } | |
| </style> | |
| """) | |
| gr.Markdown("# AURA Chat — Hedge Fund Picks") | |
| gr.Markdown("Enter data prompts (one per line). Click **Analyze**. Then chat about the generated analysis.") | |
| with gr.Row(): | |
| # Left container: input | |
| with gr.Column(scale=1): | |
| prompts = gr.Textbox( | |
| lines=6, | |
| label="Data Prompts (one per line)", | |
| placeholder="SEC insider transactions october 2025\n13F filings Q3 2025\ncompany: ACME corp insider buys" | |
| ) | |
| analyze_btn = gr.Button("Analyze", variant="primary") | |
| error_box = gr.Markdown("", visible=False) | |
| gr.Markdown(f"**Fixed settings:** Model = `{LLM_MODEL}`, Max tokens = `{MAX_TOKENS}`, Scrape delay = `{SCRAPE_DELAY}s`") | |
| gr.Markdown("Add your `OPENAI_API_KEY` to Space Secrets.") | |
| # Right container: analysis + chat | |
| with gr.Column(scale=2): | |
| analysis_out = gr.Textbox(label="Generated Analysis", lines=18, interactive=False, elem_classes="analysis-box") | |
| gr.Markdown("**Chat with AURA about this analysis**") | |
| chatbot = gr.Chatbot(label="AURA Chat", height=420, elem_classes="chat-box") | |
| user_input = gr.Textbox(placeholder="Ask follow-up questions...", label="Your question") | |
| send_btn = gr.Button("Send") | |
| # States | |
| analysis_state = gr.State("") | |
| chat_state = gr.State([]) | |
| # Handlers | |
| def on_analyze(prompts_text): | |
| analysis_text, initial_chat = analyze_and_seed_chat(prompts_text) | |
| if analysis_text.startswith("ERROR"): | |
| return "", f"**Error:** {analysis_text}", "", [] | |
| return analysis_text, "", analysis_text, initial_chat | |
| def on_send(chat_state_list, user_msg, analysis_text): | |
| updated_history = continue_chat(chat_state_list or [], user_msg, analysis_text) | |
| return updated_history, "" | |
| analyze_btn.click( | |
| fn=on_analyze, | |
| inputs=[prompts], | |
| outputs=[analysis_out, error_box, analysis_state, chat_state] | |
| ) | |
| send_btn.click( | |
| fn=on_send, | |
| inputs=[chat_state, user_input, analysis_state], | |
| outputs=[chat_state, user_input] | |
| ) | |
| user_input.submit( | |
| fn=on_send, | |
| inputs=[chat_state, user_input, analysis_state], | |
| outputs=[chat_state, user_input] | |
| ) | |
| chat_state.change( | |
| fn=convert_to_gradio_chat_format, | |
| inputs=[chat_state], | |
| outputs=[chatbot] | |
| ) | |
| return demo | |
| # ----------------------- | |
| # Run | |
| # ----------------------- | |
| if __name__ == "__main__": | |
| demo = build_ui() | |
| demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860))) | |