Sisko09876 commited on
Commit
bbd04cf
Β·
verified Β·
1 Parent(s): e08f9ab

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -60
app.py CHANGED
@@ -6,7 +6,6 @@ Using distilgpt2 for Ultra-Fast CPU Inference (Debug Mode)
6
  import os
7
  import torch
8
  from transformers import AutoTokenizer, AutoModelForCausalLM
9
- import yfinance as yf
10
  import gradio as gr
11
  import warnings
12
  warnings.filterwarnings('ignore')
@@ -35,7 +34,6 @@ try:
35
  tokenizer = AutoTokenizer.from_pretrained(model_name)
36
  model = AutoModelForCausalLM.from_pretrained(model_name)
37
 
38
- # Move to GPU if available, else CPU
39
  if torch.cuda.is_available():
40
  model = model.cuda()
41
  print("βœ“ Model on GPU")
@@ -47,52 +45,22 @@ except Exception as e:
47
  print(f"⚠ Model loading error: {e}")
48
  raise
49
 
50
- # ===============================
51
- # API LOOKUPS + CONTEXT ENRICHMENT
52
- # ===============================
53
- print("\n[2] Loading Market Data Engine...")
54
-
55
- def fetch_market_context(query):
56
- """
57
- Fetch live market data and enrich prompt with real numbers.
58
- """
59
- context = ""
60
- tickers = ["AAPL", "MSFT", "NVDA", "TSLA", "GOOGL", "AMZN", "BTC-USD", "ETH-USD"]
61
-
62
- for ticker in tickers:
63
- if ticker.lower() in query.lower():
64
- try:
65
- data = yf.Ticker(ticker).info
66
- price = data.get('currentPrice', 'N/A')
67
- pe = data.get('trailingPE', 'N/A')
68
- market_cap = data.get('marketCap', 'N/A')
69
- context += f"\n{ticker}: Price=${price}, P/E={pe}, Market Cap=${market_cap}"
70
- except:
71
- context += f"\n{ticker}: (live data unavailable)"
72
-
73
- return context if context else "\n(No specific tickers mentioned; generic market context mode)"
74
-
75
  # ===============================
76
  # INFERENCE ENGINE
77
  # ===============================
78
- print("\n[3] Loading Inference Engine...")
79
 
80
- def sisko_query(user_query, max_tokens=30):
81
  """
82
- Generate response using distilgpt2 + market context.
83
  """
84
  try:
85
- market_context = fetch_market_context(user_query)
86
-
87
- # Build simple prompt for distilgpt2
88
  prompt = f"Question: {user_query}\nAnswer:"
89
 
90
- # Tokenize and prepare
91
  inputs = tokenizer(prompt, return_tensors="pt")
92
  device = next(model.parameters()).device
93
  inputs = {k: v.to(device) for k, v in inputs.items()}
94
 
95
- # Generate response
96
  with torch.no_grad():
97
  outputs = model.generate(
98
  **inputs,
@@ -102,51 +70,45 @@ def sisko_query(user_query, max_tokens=30):
102
  do_sample=True
103
  )
104
 
105
- # Decode and clean
106
  full_resp = tokenizer.decode(outputs[0], skip_special_tokens=True)
107
-
108
- return full_resp.strip() if full_resp else "Unable to generate response at this time. Please try again."
109
 
110
  except Exception as e:
111
  return f"Error: {str(e)[:80]}"
112
 
113
- print("βœ“ Inference Engine Ready (distilgpt2 + Live Market Data)")
114
 
115
  # ===============================
116
  # GRADIO UI
117
  # ===============================
118
- print("\n[4] Launching Gradio UI...")
119
-
120
- def chat_sisko(message, history):
121
- try:
122
- resp = sisko_query(message)
123
- except Exception as e:
124
- resp = f"Error: {str(e)[:80]}"
125
- history.append((message, resp))
126
- return history, history
127
 
128
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as demo:
129
  gr.Markdown(
130
  """
131
  # πŸ€– Sisko AI: FinKing
132
  ### AI-Powered Investing for Superior Returns
133
- **Powered by distilgpt2 + Live Market Data (Debug Mode)**
134
 
135
  Annual Return: **27%** | Sharpe Ratio: **0.82** | Volatility: **12%**
136
-
137
- *Strategy: Small LLM + API Lookups + Prompt Context for fast, accurate responses*
138
  """
139
  )
140
- gr.ChatInterface(
141
- fn=chat_sisko,
142
- title="Sisko AI: FinKing Chat",
143
- description="Stock, Crypto, Portfolio & Macro Analytics",
144
- examples=[
145
- "What is 2+2?",
146
- "Tell me about AAPL",
147
- "Bitcoin outlook?"
148
- ]
 
 
 
 
 
149
  )
 
150
  gr.Markdown(
151
  """
152
  ---
 
6
  import os
7
  import torch
8
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
9
  import gradio as gr
10
  import warnings
11
  warnings.filterwarnings('ignore')
 
34
  tokenizer = AutoTokenizer.from_pretrained(model_name)
35
  model = AutoModelForCausalLM.from_pretrained(model_name)
36
 
 
37
  if torch.cuda.is_available():
38
  model = model.cuda()
39
  print("βœ“ Model on GPU")
 
45
  print(f"⚠ Model loading error: {e}")
46
  raise
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  # ===============================
49
  # INFERENCE ENGINE
50
  # ===============================
51
+ print("\n[2] Loading Inference Engine...")
52
 
53
+ def sisko_query(user_query, max_tokens=20):
54
  """
55
+ Generate response using distilgpt2.
56
  """
57
  try:
 
 
 
58
  prompt = f"Question: {user_query}\nAnswer:"
59
 
 
60
  inputs = tokenizer(prompt, return_tensors="pt")
61
  device = next(model.parameters()).device
62
  inputs = {k: v.to(device) for k, v in inputs.items()}
63
 
 
64
  with torch.no_grad():
65
  outputs = model.generate(
66
  **inputs,
 
70
  do_sample=True
71
  )
72
 
 
73
  full_resp = tokenizer.decode(outputs[0], skip_special_tokens=True)
74
+ return full_resp.strip() if full_resp else "Unable to generate response."
 
75
 
76
  except Exception as e:
77
  return f"Error: {str(e)[:80]}"
78
 
79
+ print("βœ“ Inference Engine Ready (distilgpt2)")
80
 
81
  # ===============================
82
  # GRADIO UI
83
  # ===============================
84
+ print("\n[3] Launching Gradio UI...")
 
 
 
 
 
 
 
 
85
 
86
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as demo:
87
  gr.Markdown(
88
  """
89
  # πŸ€– Sisko AI: FinKing
90
  ### AI-Powered Investing for Superior Returns
91
+ **Powered by distilgpt2 (Debug Mode)**
92
 
93
  Annual Return: **27%** | Sharpe Ratio: **0.82** | Volatility: **12%**
 
 
94
  """
95
  )
96
+
97
+ with gr.Row():
98
+ prompt_input = gr.Textbox(label="Ask me anything", placeholder="What is 2+2?")
99
+ output = gr.Textbox(label="Response")
100
+
101
+ submit_btn = gr.Button("Submit")
102
+ submit_btn.click(fn=sisko_query, inputs=prompt_input, outputs=output)
103
+
104
+ gr.Examples(
105
+ examples=["What is 2+2?", "Tell me about AAPL", "Bitcoin outlook?"],
106
+ inputs=prompt_input,
107
+ outputs=output,
108
+ fn=sisko_query,
109
+ cache_examples=False
110
  )
111
+
112
  gr.Markdown(
113
  """
114
  ---