Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,367 +1,26 @@
|
|
| 1 |
-
import
|
| 2 |
import gradio as gr
|
| 3 |
-
import json
|
| 4 |
-
import mimetypes # Used in MiniMax template for base64 encoding, though not directly in my code for now
|
| 5 |
import os
|
| 6 |
-
import
|
| 7 |
import time
|
| 8 |
-
import re # For regex to extract code blocks
|
| 9 |
-
import threading # For running agent asynchronously
|
| 10 |
-
|
| 11 |
-
# Import modelscope_studio components
|
| 12 |
-
import modelscope_studio.components.antd as antd
|
| 13 |
-
import modelscope_studio.components.antdx as antdx
|
| 14 |
-
import modelscope_studio.components.base as ms
|
| 15 |
-
import modelscope_studio.components.pro as pro # pro.Chatbot etc.
|
| 16 |
-
from modelscope_studio.components.pro.chatbot import (
|
| 17 |
-
ChatbotActionConfig, ChatbotBotConfig, ChatbotMarkdownConfig,
|
| 18 |
-
ChatbotPromptsConfig, ChatbotUserConfig, ChatbotWelcomeConfig
|
| 19 |
-
)
|
| 20 |
-
|
| 21 |
-
# Your existing smolagents imports
|
| 22 |
-
from run import create_agent, run_agent_with_streaming
|
| 23 |
from dotenv import load_dotenv
|
| 24 |
|
| 25 |
load_dotenv()
|
| 26 |
CONFIG_FILE = ".user_config.env"
|
| 27 |
|
| 28 |
-
# --- Constants and Helper Functions from MiniMaxAI template ---
|
| 29 |
-
# (Adapt paths and values as per your project structure)
|
| 30 |
-
|
| 31 |
-
# Dummy EXAMPLES and DEFAULT_PROMPTS for the Code Playground (replace with your actual data)
|
| 32 |
-
EXAMPLES = {
|
| 33 |
-
"UI Components": [
|
| 34 |
-
{"title": "Simple Button", "description": "Generate a simple HTML button with hover effect."},
|
| 35 |
-
{"title": "Responsive Nav Bar", "description": "Create a responsive navigation bar using HTML and CSS."},
|
| 36 |
-
],
|
| 37 |
-
"Games & Visualizations": [
|
| 38 |
-
{"title": "Maze Generator and Pathfinding Visualizer", "description": "Create a maze generator and pathfinding visualizer. Randomly generate a maze and visualize A* algorithm solving it step by step. Use canvas and animations. Make it visually appealing."},
|
| 39 |
-
{"title": "Particle Explosion Effect", "description": "Implement a particle explosion effect when the user clicks anywhere on the page."},
|
| 40 |
-
],
|
| 41 |
-
"Interactive Apps": [
|
| 42 |
-
{"title": "Typing Speed Game", "description": "Build a typing speed test web app. Randomly show a sentence, and track the user's typing speed in WPM (words per minute). Provide live feedback with colors and accuracy."},
|
| 43 |
-
{"title": "Simple Calculator", "description": "Generate a basic four-function calculator with a user-friendly interface."},
|
| 44 |
-
],
|
| 45 |
-
}
|
| 46 |
-
|
| 47 |
-
# The SYSTEM_PROMPT for code generation, now as a constant
|
| 48 |
-
SYSTEM_PROMPT_CODE_GEN = """
|
| 49 |
-
You are an expert web developer. Your task is to write a complete, single HTML file
|
| 50 |
-
(including all necessary CSS and JavaScript within <style> and <script> tags, or as data URIs for images if any)
|
| 51 |
-
that directly solves the user's request.
|
| 52 |
-
|
| 53 |
-
- Do NOT use external stylesheets or scripts, unless explicitly requested and only if absolutely necessary (e.g., a CDN for a well-known library).
|
| 54 |
-
- Your output MUST be a complete HTML document, enclosed in ```html ... ``` code block.
|
| 55 |
-
- For interactive elements, use pure JavaScript or standard libraries.
|
| 56 |
-
- If the user asks for a simple visualization, use HTML, CSS, and SVG or Canvas.
|
| 57 |
-
- Ensure the HTML is self-contained and ready to be rendered in an iframe.
|
| 58 |
-
- Provide a brief reasoning *before* the code block, explaining your approach.
|
| 59 |
-
"""
|
| 60 |
-
|
| 61 |
-
# Dummy DEFAULT_PROMPTS for the Chatbot (if your chatbot uses them)
|
| 62 |
-
DEFAULT_PROMPTS = [
|
| 63 |
-
{"description": "What is the capital of France?"},
|
| 64 |
-
{"description": "Explain quantum entanglement in simple terms."},
|
| 65 |
-
{"description": "Write a short story about a brave knight."},
|
| 66 |
-
]
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
# --- Helper Functions from MiniMaxAI Template (adapted for your app) ---
|
| 70 |
-
def remove_code_block(text):
|
| 71 |
-
"""
|
| 72 |
-
Extracts the content of the first Markdown code block (```html ... ``` or ``` ... ```)
|
| 73 |
-
from a given text. If no code block is found, it checks if the text is raw HTML.
|
| 74 |
-
"""
|
| 75 |
-
patterns = [
|
| 76 |
-
r'```(?:html|HTML)\n([\s\S]+?)\n```', # Match ```html or ```HTML
|
| 77 |
-
r'```\n([\s\S]+?)\n```', # Match code blocks without language markers
|
| 78 |
-
r'```([\s\S]+?)```' # Match inline code blocks (less likely for full HTML)
|
| 79 |
-
]
|
| 80 |
-
for pattern in patterns:
|
| 81 |
-
match = re.search(pattern, text, re.DOTALL)
|
| 82 |
-
if match:
|
| 83 |
-
extracted = match.group(1).strip()
|
| 84 |
-
print("[DEBUG] Successfully extracted code block.")
|
| 85 |
-
return extracted
|
| 86 |
-
|
| 87 |
-
# If no code block is found, check if the entire text looks like HTML
|
| 88 |
-
if text.strip().startswith(('<!DOCTYPE html>', '<html')):
|
| 89 |
-
print("[DEBUG] Text appears to be raw HTML, using as is.")
|
| 90 |
-
return text.strip()
|
| 91 |
-
|
| 92 |
-
print("[DEBUG] No code block found in text. Returning original text (may not be valid HTML).")
|
| 93 |
-
return text.strip()
|
| 94 |
-
|
| 95 |
-
def send_to_sandbox(code):
|
| 96 |
-
"""
|
| 97 |
-
Wraps HTML code in a sandbox iframe data URI.
|
| 98 |
-
Includes basic safety measures like replacing localStorage and onerror.
|
| 99 |
-
"""
|
| 100 |
-
wrapped_code = f"""
|
| 101 |
-
<!DOCTYPE html>
|
| 102 |
-
<html>
|
| 103 |
-
<head>
|
| 104 |
-
<meta charset="UTF-8">
|
| 105 |
-
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 106 |
-
<script>
|
| 107 |
-
// Create a safe storage alternative
|
| 108 |
-
const safeStorage = {{
|
| 109 |
-
_data: {{}},
|
| 110 |
-
getItem: function(key) {{
|
| 111 |
-
return this._data[key] || null;
|
| 112 |
-
}},
|
| 113 |
-
setItem: function(key, value) {{
|
| 114 |
-
this._data[key] = value;
|
| 115 |
-
}},
|
| 116 |
-
removeItem: function(key) {{
|
| 117 |
-
delete this._data[key];
|
| 118 |
-
}},
|
| 119 |
-
clear: function() {{
|
| 120 |
-
this._data = {{}};
|
| 121 |
-
}}
|
| 122 |
-
}};
|
| 123 |
-
// Replace native localStorage
|
| 124 |
-
Object.defineProperty(window, 'localStorage', {{
|
| 125 |
-
value: safeStorage,
|
| 126 |
-
writable: false
|
| 127 |
-
}});
|
| 128 |
-
// Add error handling without using alert
|
| 129 |
-
window.onerror = function(message, source, lineno, colno, error) {{
|
| 130 |
-
console.error('Error in sandbox:', message);
|
| 131 |
-
}};
|
| 132 |
-
</script>
|
| 133 |
-
<style>
|
| 134 |
-
/* Basic default body styling for generated code */
|
| 135 |
-
body {{ margin: 0; padding: 10px; font-family: sans-serif; }}
|
| 136 |
-
</style>
|
| 137 |
-
</head>
|
| 138 |
-
<body>
|
| 139 |
-
{code}
|
| 140 |
-
</body>
|
| 141 |
-
</html>
|
| 142 |
-
"""
|
| 143 |
-
encoded_html = base64.b64encode(wrapped_code.encode('utf-8')).decode('utf-8')
|
| 144 |
-
data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}"
|
| 145 |
-
iframe_html = f'<iframe src="{data_uri}" width="100%" height="920px" sandbox="allow-scripts allow-same-origin allow-forms allow-popups allow-modals allow-presentation" allow="display-capture"></iframe>'
|
| 146 |
-
print("[DEBUG] Generated iframe for sandbox.")
|
| 147 |
-
return iframe_html
|
| 148 |
-
|
| 149 |
-
def select_example(example_state):
|
| 150 |
-
"""Function to set the input textbox value from an example card."""
|
| 151 |
-
# Assuming example_state is a dictionary with a 'description' key
|
| 152 |
-
return gr.update(value=example_state.get("description", ""))
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
# --- Your existing save_env_vars_to_file (from your original code) ---
|
| 156 |
def save_env_vars_to_file(env_vars):
|
| 157 |
print("[DEBUG] Saving user config to file")
|
| 158 |
with open(CONFIG_FILE, "w") as f:
|
| 159 |
for key, value in env_vars.items():
|
| 160 |
f.write(f"{key}={value}\n")
|
| 161 |
|
| 162 |
-
# --- CSS from MiniMaxAI template ---
|
| 163 |
-
CUSTOM_CSS = """
|
| 164 |
-
/* Add styles for the main container */
|
| 165 |
-
.ant-tabs-content {
|
| 166 |
-
height: calc(100vh - 200px);
|
| 167 |
-
overflow: hidden;
|
| 168 |
-
}
|
| 169 |
-
.ant-tabs-tabpane {
|
| 170 |
-
height: 100%;
|
| 171 |
-
overflow-y: auto;
|
| 172 |
-
}
|
| 173 |
-
/* Modify existing styles */
|
| 174 |
-
.output-empty,.output-loading {
|
| 175 |
-
display: flex;
|
| 176 |
-
flex-direction: column;
|
| 177 |
-
align-items: center;
|
| 178 |
-
justify-content: center;
|
| 179 |
-
width: 100%;
|
| 180 |
-
min-height: 680px;
|
| 181 |
-
position: relative;
|
| 182 |
-
}
|
| 183 |
-
.output-html {
|
| 184 |
-
display: flex;
|
| 185 |
-
flex-direction: column;
|
| 186 |
-
width: 100%;
|
| 187 |
-
min-height: 680px;
|
| 188 |
-
}
|
| 189 |
-
.output-html > iframe {
|
| 190 |
-
flex: 1;
|
| 191 |
-
}
|
| 192 |
-
.right_content {
|
| 193 |
-
display: flex;
|
| 194 |
-
flex-direction: column;
|
| 195 |
-
align-items: center;
|
| 196 |
-
justify-content: center;
|
| 197 |
-
width: 100%;
|
| 198 |
-
height: 100%;
|
| 199 |
-
min-height: unset;
|
| 200 |
-
background: #fff;
|
| 201 |
-
border-radius: 8px;
|
| 202 |
-
box-shadow: 0 2px 8px rgba(0,0,0,0.1);
|
| 203 |
-
}
|
| 204 |
-
/* Add styles for the code playground container */
|
| 205 |
-
.code-playground-container {
|
| 206 |
-
height: 100%;
|
| 207 |
-
overflow-y: auto;
|
| 208 |
-
padding-right: 8px;
|
| 209 |
-
}
|
| 210 |
-
.code-playground-container::-webkit-scrollbar {
|
| 211 |
-
width: 6px;
|
| 212 |
-
}
|
| 213 |
-
.code-playground-container::-webkit-scrollbar-track {
|
| 214 |
-
background: #f1f1f1;
|
| 215 |
-
border-radius: 3px;
|
| 216 |
-
}
|
| 217 |
-
.code-playground-container::-webkit-scrollbar-thumb {
|
| 218 |
-
background: #888;
|
| 219 |
-
border-radius: 3px;
|
| 220 |
-
}
|
| 221 |
-
.code-playground-container::-webkit-scrollbar-thumb:hover {
|
| 222 |
-
background: #555;
|
| 223 |
-
}
|
| 224 |
-
.render_header {
|
| 225 |
-
display: flex;
|
| 226 |
-
align-items: center;
|
| 227 |
-
padding: 8px 16px;
|
| 228 |
-
background: #f5f5f5;
|
| 229 |
-
border-bottom: 1px solid #e8e8e8;
|
| 230 |
-
border-top-left-radius: 8px;
|
| 231 |
-
border-top-right-radius: 8px;
|
| 232 |
-
}
|
| 233 |
-
.header_btn {
|
| 234 |
-
width: 12px;
|
| 235 |
-
height: 12px;
|
| 236 |
-
border-radius: 50%;
|
| 237 |
-
margin-right: 8px;
|
| 238 |
-
display: inline-block;
|
| 239 |
-
}
|
| 240 |
-
.header_btn:nth-child(1) {
|
| 241 |
-
background: #ff5f56;
|
| 242 |
-
}
|
| 243 |
-
.header_btn:nth-child(2) {
|
| 244 |
-
background: #ffbd2e;
|
| 245 |
-
}
|
| 246 |
-
.header_btn:nth-child(3) {
|
| 247 |
-
background: #27c93f;
|
| 248 |
-
}
|
| 249 |
-
.output-html > iframe {
|
| 250 |
-
flex: 1;
|
| 251 |
-
border: none;
|
| 252 |
-
background: #fff;
|
| 253 |
-
}
|
| 254 |
-
.reasoning-box {
|
| 255 |
-
max-height: 300px;
|
| 256 |
-
overflow-y: auto;
|
| 257 |
-
border-radius: 4px;
|
| 258 |
-
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
|
| 259 |
-
font-size: 14px;
|
| 260 |
-
line-height: 1.6;
|
| 261 |
-
width: 100%;
|
| 262 |
-
scroll-behavior: smooth;
|
| 263 |
-
display: flex;
|
| 264 |
-
flex-direction: column-reverse;
|
| 265 |
-
}
|
| 266 |
-
.reasoning-box .ms-markdown { /* Targeting markdown within the box for modelscope */
|
| 267 |
-
padding: 0 12px;
|
| 268 |
-
}
|
| 269 |
-
.reasoning-box::-webkit-scrollbar {
|
| 270 |
-
width: 6px;
|
| 271 |
-
}
|
| 272 |
-
.reasoning-box::-webkit-scrollbar-track {
|
| 273 |
-
background: #f1f1f1;
|
| 274 |
-
border-radius: 3px;
|
| 275 |
-
}
|
| 276 |
-
.reasoning-box::-webkit-scrollbar-thumb {
|
| 277 |
-
background: #888;
|
| 278 |
-
border-radius: 3px;
|
| 279 |
-
}
|
| 280 |
-
.reasoning-box::-webkit-scrollbar-thumb:hover {
|
| 281 |
-
background: #555;
|
| 282 |
-
}
|
| 283 |
-
.markdown-container {
|
| 284 |
-
max-height: 300px;
|
| 285 |
-
overflow-y: auto;
|
| 286 |
-
border-radius: 4px;
|
| 287 |
-
font-family: -apple-system, BlinkMacMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
|
| 288 |
-
font-size: 14px;
|
| 289 |
-
line-height: 1.6;
|
| 290 |
-
width: 100%;
|
| 291 |
-
scroll-behavior: smooth;
|
| 292 |
-
display: flex;
|
| 293 |
-
flex-direction: column-reverse;
|
| 294 |
-
}
|
| 295 |
-
/* Example card styles */
|
| 296 |
-
.example-card {
|
| 297 |
-
flex: 1 1 calc(50% - 20px);
|
| 298 |
-
max-width: calc(50% - 20px);
|
| 299 |
-
margin: 6px;
|
| 300 |
-
transition: all 0.3s;
|
| 301 |
-
cursor: pointer;
|
| 302 |
-
border: 1px solid #e8e8e8;
|
| 303 |
-
border-radius: 8px;
|
| 304 |
-
box-shadow: 0 2px 8px rgba(0,0,0,0.05);
|
| 305 |
-
}
|
| 306 |
-
.example-card:hover {
|
| 307 |
-
transform: translateY(-4px);
|
| 308 |
-
box-shadow: 0 4px 12px rgba(0,0,0,0.1);
|
| 309 |
-
border-color: #d9d9d9;
|
| 310 |
-
}
|
| 311 |
-
.example-card .ant-card-meta-title {
|
| 312 |
-
font-size: 16px;
|
| 313 |
-
font-weight: 500;
|
| 314 |
-
margin-bottom: 8px;
|
| 315 |
-
color: #262626;
|
| 316 |
-
}
|
| 317 |
-
.example-card .ant-card-meta-description {
|
| 318 |
-
color: #666;
|
| 319 |
-
font-size: 14px;
|
| 320 |
-
line-height: 1.5;
|
| 321 |
-
}
|
| 322 |
-
/* Example tabs styles */
|
| 323 |
-
.example-tabs .ant-tabs-nav {
|
| 324 |
-
margin-bottom: 16px;
|
| 325 |
-
}
|
| 326 |
-
.example-tabs .ant-tabs-tab {
|
| 327 |
-
padding: 8px 16px;
|
| 328 |
-
font-size: 15px;
|
| 329 |
-
}
|
| 330 |
-
.example-tabs .ant-tabs-tab-active {
|
| 331 |
-
font-weight: 500;
|
| 332 |
-
}
|
| 333 |
-
/* Empty state styles */
|
| 334 |
-
/* Corrected to match the target's `.right_content` for empty state */
|
| 335 |
-
.right_content .output-empty {
|
| 336 |
-
display: flex;
|
| 337 |
-
flex-direction: column;
|
| 338 |
-
align-items: center;
|
| 339 |
-
justify-content: center;
|
| 340 |
-
width: 100%;
|
| 341 |
-
min-height: 620px; /* Adjusted to match original */
|
| 342 |
-
background: #fff;
|
| 343 |
-
border-radius: 8px;
|
| 344 |
-
box-shadow: 0 2px 8px rgba(0,0,0,0.1);
|
| 345 |
-
}
|
| 346 |
-
/* Add styles for the example cards container */
|
| 347 |
-
.example-tabs .ant-tabs-content {
|
| 348 |
-
padding: 0 8px;
|
| 349 |
-
}
|
| 350 |
-
.example-tabs .ant-flex {
|
| 351 |
-
margin: 0 -8px;
|
| 352 |
-
width: calc(100% + 16px);
|
| 353 |
-
}
|
| 354 |
-
"""
|
| 355 |
-
|
| 356 |
-
# --- Main Gradio Interface Launch Function ---
|
| 357 |
def launch_interface():
|
| 358 |
-
# --- Chatbot Tab Logic (Your existing logic, using gr.gr components) ---
|
| 359 |
def setup_agent_streaming(question, model_id, hf_token, openai_api_key, serpapi_key, api_endpoint, use_custom_endpoint,
|
| 360 |
-
|
| 361 |
print("[DEBUG] Setting up agent with input question:", question)
|
| 362 |
|
| 363 |
if question.strip() == "":
|
| 364 |
-
yield "
|
| 365 |
return
|
| 366 |
|
| 367 |
endpoint = custom_api_endpoint if use_custom_endpoint else api_endpoint
|
|
@@ -388,389 +47,119 @@ def launch_interface():
|
|
| 388 |
custom_search_url=custom_search_url
|
| 389 |
)
|
| 390 |
|
| 391 |
-
|
| 392 |
-
|
| 393 |
is_complete = False
|
| 394 |
|
| 395 |
def highlight_text(text):
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
nonlocal final_answer_text
|
| 399 |
-
final_answer_text = text.split(":", 1)[1].strip()
|
| 400 |
-
return f"<p><span style='color:#10b981;font-weight:bold;'>[FINAL]</span> <mark>{final_answer_text}</mark></p>"
|
| 401 |
elif "[ERROR]" in text:
|
| 402 |
-
return f"<
|
| 403 |
elif "[STARTING]" in text:
|
| 404 |
-
return f"<
|
| 405 |
elif text.strip():
|
| 406 |
-
# Wrap regular steps in details tag for collapsing
|
| 407 |
return f"<details><summary><span style='color:#f59e0b;'>Step</span></summary>\n<pre>{text.strip()}</pre>\n</details>"
|
| 408 |
return ""
|
| 409 |
|
| 410 |
def stream_callback(text):
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
|
|
|
|
|
|
| 415 |
|
| 416 |
def run_agent_async():
|
| 417 |
nonlocal is_complete
|
| 418 |
try:
|
| 419 |
-
run_agent_with_streaming(agent, question, stream_callback)
|
| 420 |
except Exception as e:
|
| 421 |
-
|
| 422 |
finally:
|
| 423 |
is_complete = True
|
| 424 |
|
| 425 |
agent_thread = threading.Thread(target=run_agent_async)
|
| 426 |
agent_thread.start()
|
| 427 |
|
| 428 |
-
|
| 429 |
-
# Yield initial message
|
| 430 |
-
yield "<p><i>Agent started...</i></p>", "" # Initial message for immediate feedback
|
| 431 |
-
|
| 432 |
while not is_complete or agent_thread.is_alive():
|
| 433 |
-
|
| 434 |
-
if len(
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
|
| 438 |
-
last_buffer_length = len(current_html_output)
|
| 439 |
-
time.sleep(0.05) # Smaller delay for more responsive updates
|
| 440 |
-
|
| 441 |
-
# Ensure final state is yielded
|
| 442 |
-
final_html_output = "".join(output_html_buffer)
|
| 443 |
-
yield final_html_output, final_answer_text
|
| 444 |
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
custom_api_endpoint, custom_api_key):
|
| 448 |
-
print(f"[DEBUG] Starting code generation with query: {query}")
|
| 449 |
|
| 450 |
-
|
| 451 |
-
# Reset outputs and show empty state
|
| 452 |
-
# Yield for reasoning_output (Markdown), code_output_raw (Code), sandbox_output (HTML)
|
| 453 |
-
# code_output_tabs_container (antd.Tabs, for active_key and visibility)
|
| 454 |
-
# loading_state_group (gr.Group, for visibility)
|
| 455 |
-
# loading_tip (gr.State, for value and visibility)
|
| 456 |
-
yield gr.update(value=""), gr.update(value=""), gr.update(value=""), \
|
| 457 |
-
gr.update(selected="empty", visible=False), gr.update(visible=True), \
|
| 458 |
-
gr.update(value="Enter your request to generate code", visible=False)
|
| 459 |
-
return
|
| 460 |
-
|
| 461 |
-
endpoint = custom_api_endpoint if use_custom_endpoint else api_endpoint
|
| 462 |
-
api_key = custom_api_key if use_custom_endpoint else openai_api_key
|
| 463 |
-
|
| 464 |
-
agent = create_agent(
|
| 465 |
-
model_id=model_id,
|
| 466 |
-
hf_token=hf_token,
|
| 467 |
-
openai_api_key=openai_api_key,
|
| 468 |
-
serpapi_key=serpapi_key, # May not be needed for pure code gen, but kept for consistency
|
| 469 |
-
api_endpoint=api_endpoint,
|
| 470 |
-
custom_api_endpoint=endpoint,
|
| 471 |
-
custom_api_key=api_key,
|
| 472 |
-
search_provider="none", # Explicitly set to none if not used
|
| 473 |
-
search_api_key=None,
|
| 474 |
-
custom_search_url=None
|
| 475 |
-
)
|
| 476 |
-
|
| 477 |
-
# Corrected: Set the system prompt using prompt_templates as per the error message.
|
| 478 |
-
if hasattr(agent, 'prompt_templates'):
|
| 479 |
-
if "system_prompt" in agent.prompt_templates:
|
| 480 |
-
agent.prompt_templates["system_prompt"] = SYSTEM_PROMPT_CODE_GEN
|
| 481 |
-
print("[DEBUG] Set agent.prompt_templates['system_prompt'] for code generation.")
|
| 482 |
-
elif 'user_agent' in agent.prompt_templates and 'system_message' in agent.prompt_templates['user_agent']:
|
| 483 |
-
agent.prompt_templates['user_agent']['system_message'] = SYSTEM_PROMPT_CODE_GEN
|
| 484 |
-
print("[DEBUG] Set agent.prompt_templates['user_agent']['system_message'] for code generation.")
|
| 485 |
-
else:
|
| 486 |
-
print("[WARNING] Could not set system prompt for CodeAgent using known patterns. "
|
| 487 |
-
"Agent might not follow code generation instructions optimally.")
|
| 488 |
-
# Fallback: Prepend to the question if no proper system prompt mechanism
|
| 489 |
-
query = SYSTEM_PROMPT_CODE_GEN + "\n\n" + query
|
| 490 |
-
else:
|
| 491 |
-
print("[WARNING] Agent has no 'prompt_templates' attribute. Cannot set system prompt.")
|
| 492 |
-
query = SYSTEM_PROMPT_CODE_GEN + "\n\n" + query
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
reasoning_text_buffer = [] # Buffer for the raw text of reasoning/code combined
|
| 496 |
-
final_generated_code_content = "" # Store the final extracted code
|
| 497 |
-
is_agent_run_complete = False # Flag for the async agent run completion
|
| 498 |
-
|
| 499 |
-
# Callback for the run_agent_with_streaming
|
| 500 |
-
def code_gen_stream_callback(text_chunk):
|
| 501 |
-
nonlocal reasoning_text_buffer
|
| 502 |
-
reasoning_text_buffer.append(text_chunk)
|
| 503 |
-
|
| 504 |
-
# Function to run the agent asynchronously
|
| 505 |
-
def run_agent_async_for_codegen():
|
| 506 |
-
nonlocal is_agent_run_complete, final_generated_code_content
|
| 507 |
-
try:
|
| 508 |
-
# The run_agent_with_streaming returns the final answer
|
| 509 |
-
final_answer_from_agent = run_agent_with_streaming(agent, query, code_gen_stream_callback)
|
| 510 |
-
# Ensure the final answer from agent.run is captured
|
| 511 |
-
final_generated_code_content = final_answer_from_agent
|
| 512 |
-
except Exception as e:
|
| 513 |
-
reasoning_text_buffer.append(f"[ERROR] {str(e)}\n")
|
| 514 |
-
finally:
|
| 515 |
-
is_agent_run_complete = True
|
| 516 |
-
|
| 517 |
-
# Start agent in background thread
|
| 518 |
-
agent_thread = threading.Thread(target=run_agent_async_for_codegen)
|
| 519 |
-
agent_thread.start()
|
| 520 |
-
|
| 521 |
-
# --- Initial yield to show loading state ---
|
| 522 |
-
# Hide empty, show loading, show reasoning tab initially
|
| 523 |
-
yield gr.update(value="", visible=True), gr.update(value="", visible=False), gr.update(value="", visible=False), \
|
| 524 |
-
gr.update(selected="reasoning", visible=True), gr.update(visible=True), \
|
| 525 |
-
gr.update(value="Thinking and coding...", visible=True)
|
| 526 |
-
|
| 527 |
-
# --- Streaming loop for Gradio UI ---
|
| 528 |
-
last_buffer_len = 0
|
| 529 |
-
while not is_agent_run_complete or agent_thread.is_alive() or len(reasoning_text_buffer) > last_buffer_len:
|
| 530 |
-
current_full_output = "".join(reasoning_text_buffer)
|
| 531 |
-
if len(current_full_output) > last_buffer_len:
|
| 532 |
-
# Update reasoning output with accumulated text
|
| 533 |
-
yield gr.update(value=current_full_output, visible=True), \
|
| 534 |
-
gr.update(value="", visible=False), \
|
| 535 |
-
gr.update(value="", visible=False), \
|
| 536 |
-
gr.update(selected="reasoning"), \
|
| 537 |
-
gr.update(visible=False), \
|
| 538 |
-
gr.update(value="Generating code...", visible=True) # Update loading status
|
| 539 |
-
last_buffer_len = len(current_full_output)
|
| 540 |
-
time.sleep(0.05) # Small delay for UI updates
|
| 541 |
-
|
| 542 |
-
# After the agent run completes and all buffered text is processed:
|
| 543 |
-
# Use the actual final answer from the agent's run method if available, otherwise buffer.
|
| 544 |
-
# This is important if the final_answer_from_agent is more concise than the full buffer.
|
| 545 |
-
final_output_for_parsing = final_generated_code_content if final_generated_code_content else "".join(reasoning_text_buffer)
|
| 546 |
-
|
| 547 |
-
generated_code_extracted = remove_code_block(final_output_for_parsing)
|
| 548 |
-
|
| 549 |
-
# Try to refine reasoning if code was extracted
|
| 550 |
-
reasoning_only_display = final_output_for_parsing
|
| 551 |
-
if generated_code_extracted:
|
| 552 |
-
# Simple heuristic to remove code block from reasoning for display
|
| 553 |
-
reasoning_only_display = reasoning_only_display.replace(f"```{generated_code_extracted}```", "").strip()
|
| 554 |
-
reasoning_only_display = reasoning_only_display.replace(f"```html\n{generated_code_extracted}\n```", "").strip()
|
| 555 |
-
reasoning_only_display = reasoning_only_display.replace(f"```HTML\n{generated_code_extracted}\n```", "").strip()
|
| 556 |
-
|
| 557 |
-
html_to_render = send_to_sandbox(generated_code_extracted) if generated_code_extracted else "<div>No valid HTML code was generated or extracted.</div>"
|
| 558 |
-
|
| 559 |
-
# Final yield to show the code and rendered output
|
| 560 |
-
yield gr.update(value=reasoning_only_display, visible=True), \
|
| 561 |
-
gr.update(value=generated_code_extracted, visible=True), \
|
| 562 |
-
gr.update(value=html_to_render, visible=True), \
|
| 563 |
-
gr.update(selected="render", visible=True), \
|
| 564 |
-
gr.update(visible=True), \
|
| 565 |
-
gr.update(value="Done", visible=False) # Hide loading status
|
| 566 |
-
|
| 567 |
-
# --- Gradio UI Layout (Combining your original with MiniMaxAI template) ---
|
| 568 |
-
# Use gr.Blocks, ms.Application, antdx.XProvider, ms.AutoLoading for modelscope theming
|
| 569 |
-
with gr.Blocks(css=CUSTOM_CSS) as demo, ms.Application(), antdx.XProvider(), ms.AutoLoading():
|
| 570 |
gr.Markdown("# SmolAgent - Intelligent AI with Web Tools")
|
| 571 |
|
| 572 |
-
with gr.
|
| 573 |
-
with gr.
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
-
|
| 585 |
-
use_custom_endpoint_chatbot = gr.Checkbox(label="Use Custom API Endpoint")
|
| 586 |
-
custom_api_endpoint_chatbot = gr.Textbox(label="Custom API URL", visible=False, placeholder="URL for your custom API endpoint")
|
| 587 |
-
custom_api_key_chatbot = gr.Textbox(label="Custom API Key (Optional)", type="password", visible=False, placeholder="API key for the custom endpoint")
|
| 588 |
-
|
| 589 |
-
with gr.Accordion("Search Configuration", open=False):
|
| 590 |
-
serpapi_key_chatbot = gr.Textbox(label="SerpAPI Key (Optional)", type="password", value=os.getenv("SERPAPI_API_KEY", ""), placeholder="Your SerpAPI key for web searches")
|
| 591 |
-
search_provider_chatbot = gr.Dropdown(choices=["serper", "searxng"], value="searxng", label="Search Provider")
|
| 592 |
-
search_api_key_chatbot = gr.Textbox(label="Serper API Key", type="password", visible=False, placeholder="API key for Serper.dev if selected")
|
| 593 |
-
custom_search_url_chatbot = gr.Textbox(label="Custom SearxNG URL", value="https://search.endorisk.nl/search", visible=True, placeholder="URL for your SearxNG instance")
|
| 594 |
-
|
| 595 |
-
submit_btn_chatbot = gr.Button("Run Agent", variant="primary")
|
| 596 |
-
|
| 597 |
-
with gr.Column(scale=2):
|
| 598 |
-
output_chatbot = gr.HTML(label="Live Agent Output")
|
| 599 |
-
final_chatbot = gr.Textbox(label="Final Answer", interactive=False)
|
| 600 |
-
copy_btn_chatbot = gr.Button("Copy Final Answer")
|
| 601 |
-
|
| 602 |
-
def update_visibility_chatbot(provider):
|
| 603 |
-
is_searxng = (provider == "searxng")
|
| 604 |
-
is_serper = (provider == "serper")
|
| 605 |
-
return {
|
| 606 |
-
custom_search_url_chatbot: gr.update(visible=is_searxng),
|
| 607 |
-
search_api_key_chatbot: gr.update(visible=is_serper)
|
| 608 |
-
}
|
| 609 |
-
|
| 610 |
-
def update_custom_fields_chatbot(checked):
|
| 611 |
-
return {
|
| 612 |
-
custom_api_endpoint_chatbot: gr.update(visible=checked),
|
| 613 |
-
custom_api_key_chatbot: gr.update(visible=checked)
|
| 614 |
-
}
|
| 615 |
-
|
| 616 |
-
search_provider_chatbot.change(fn=update_visibility_chatbot, inputs=search_provider_chatbot, outputs=[custom_search_url_chatbot, search_api_key_chatbot])
|
| 617 |
-
use_custom_endpoint_chatbot.change(fn=update_custom_fields_chatbot, inputs=use_custom_endpoint_chatbot, outputs=[custom_api_endpoint_chatbot, custom_api_key_chatbot])
|
| 618 |
-
|
| 619 |
-
submit_btn_chatbot.click(
|
| 620 |
-
fn=setup_agent_streaming,
|
| 621 |
-
inputs=[question, model_id_chatbot, hf_token_chatbot, openai_api_key_chatbot, serpapi_key_chatbot, api_endpoint_chatbot, use_custom_endpoint_chatbot, custom_api_endpoint_chatbot, custom_api_key_chatbot, search_provider_chatbot, search_api_key_chatbot, custom_search_url_chatbot],
|
| 622 |
-
outputs=[output_chatbot, final_chatbot],
|
| 623 |
-
show_progress=True
|
| 624 |
-
)
|
| 625 |
-
|
| 626 |
-
copy_btn_chatbot.click(
|
| 627 |
-
fn=None,
|
| 628 |
-
inputs=final_chatbot,
|
| 629 |
-
outputs=None,
|
| 630 |
-
js="(text) => { if (text) { navigator.clipboard.writeText(text); return 'Copied!'; } return ''; }"
|
| 631 |
-
)
|
| 632 |
-
|
| 633 |
-
with gr.TabItem("Code Playground (WebDev)"):
|
| 634 |
-
# This section uses modelscope_studio.components.antd/antdx/ms
|
| 635 |
-
with antd.Row(gutter=[32, 12], elem_classes="code-playground-container"):
|
| 636 |
-
with antd.Col(span=24, md=12):
|
| 637 |
-
with antd.Flex(vertical=True, gap="middle"):
|
| 638 |
-
code_query = antd.Input.Textarea(
|
| 639 |
-
size="large",
|
| 640 |
-
allow_clear=True,
|
| 641 |
-
auto_size=dict(minRows=2, maxRows=6),
|
| 642 |
-
placeholder="Please enter what kind of application you want or choose an example below and click the button"
|
| 643 |
-
)
|
| 644 |
-
generate_code_btn = antd.Button("Generate Code", type="primary", size="large")
|
| 645 |
-
|
| 646 |
-
# Output tabs for Reasoning and Generated Code
|
| 647 |
-
with antd.Tabs(active_key="reasoning", visible=False) as output_tabs_code_gen: # Matches target's output_tabs
|
| 648 |
-
with antd.Tabs.Item(key="reasoning", label="🤔 Thinking Process"):
|
| 649 |
-
reasoning_output = ms.Markdown(elem_classes="reasoning-box") # Use ms.Markdown
|
| 650 |
-
with antd.Tabs.Item(key="code", label="💻 Generated Code"):
|
| 651 |
-
# Gradio's gr.Code is suitable here, as modelscope doesn't have a direct equivalent for code display
|
| 652 |
-
code_output_raw = gr.Code(label="Generated Code", language="html", interactive=False, lines=20)
|
| 653 |
-
|
| 654 |
-
antd.Divider("Examples")
|
| 655 |
-
# Examples with categories
|
| 656 |
-
with antd.Tabs(elem_classes="example-tabs") as example_tabs:
|
| 657 |
-
for category, examples_list in EXAMPLES.items(): # Renamed 'examples' to 'examples_list' to avoid conflict
|
| 658 |
-
with antd.Tabs.Item(key=category, label=category):
|
| 659 |
-
with antd.Flex(gap="small", wrap=True):
|
| 660 |
-
for example in examples_list:
|
| 661 |
-
with antd.Card(
|
| 662 |
-
elem_classes="example-card",
|
| 663 |
-
hoverable=True
|
| 664 |
-
) as example_card:
|
| 665 |
-
antd.Card.Meta(
|
| 666 |
-
title=example['title'],
|
| 667 |
-
description=example['description'])
|
| 668 |
-
# Use gr.State to pass the example data, and then select_example
|
| 669 |
-
example_card.click(
|
| 670 |
-
fn=select_example,
|
| 671 |
-
inputs=[gr.State(example)],
|
| 672 |
-
outputs=[code_query]
|
| 673 |
-
)
|
| 674 |
-
|
| 675 |
-
with antd.Col(span=24, md=12):
|
| 676 |
-
# This column will contain the output display: empty, loading, or rendered HTML
|
| 677 |
-
with antd.Card(title="Output", elem_style=dict(height="100%"), styles=dict(body=dict(height="100%")), elem_id="output-container"):
|
| 678 |
-
# This internal Tabs component will control the main right panel's state (empty/loading/render)
|
| 679 |
-
with antd.Tabs(active_key="empty", render_tab_bar="() => null") as state_tab: # Matches target's state_tab
|
| 680 |
-
with antd.Tabs.Item(key="empty"):
|
| 681 |
-
empty = antd.Empty(
|
| 682 |
-
description="Enter your request to generate code",
|
| 683 |
-
elem_classes="output-empty" # Matches target's CSS class
|
| 684 |
-
)
|
| 685 |
-
with antd.Tabs.Item(key="loading"):
|
| 686 |
-
# The Spin component from antd
|
| 687 |
-
with antd.Spin(True, tip="Thinking and coding...", size="large", elem_classes="output-loading") as loading_spinner: # Matches target's loading
|
| 688 |
-
ms.Div() # Placeholder for content inside spin
|
| 689 |
-
with antd.Tabs.Item(key="render"):
|
| 690 |
-
sandbox_output = gr.HTML(elem_classes="output-html") # Matches target's sandbox
|
| 691 |
|
| 692 |
-
|
| 693 |
-
|
| 694 |
-
|
| 695 |
-
|
| 696 |
-
|
| 697 |
-
|
| 698 |
-
|
| 699 |
-
|
| 700 |
-
|
| 701 |
-
|
| 702 |
-
|
| 703 |
-
|
| 704 |
-
|
| 705 |
-
|
| 706 |
-
|
| 707 |
-
|
| 708 |
-
|
| 709 |
-
|
| 710 |
-
|
| 711 |
-
|
| 712 |
-
|
| 713 |
-
|
| 714 |
-
|
| 715 |
-
|
| 716 |
-
|
| 717 |
-
|
| 718 |
-
|
| 719 |
-
|
| 720 |
-
|
| 721 |
-
|
| 722 |
-
|
| 723 |
-
|
| 724 |
-
|
| 725 |
-
|
| 726 |
-
|
| 727 |
-
|
| 728 |
-
|
| 729 |
-
|
| 730 |
-
function() {
|
| 731 |
-
setTimeout(() => {
|
| 732 |
-
const reasoningBox = document.querySelector('.reasoning-box');
|
| 733 |
-
if (reasoningBox) {
|
| 734 |
-
reasoningBox.scrollTop = reasoningBox.scrollHeight;
|
| 735 |
-
}
|
| 736 |
-
}, 100);
|
| 737 |
-
}
|
| 738 |
-
"""
|
| 739 |
-
)
|
| 740 |
-
code_output_raw.change( # This is gr.Code, might need different selector
|
| 741 |
-
fn=None,
|
| 742 |
-
inputs=[],
|
| 743 |
-
outputs=[],
|
| 744 |
-
js="""
|
| 745 |
-
function() {
|
| 746 |
-
setTimeout(() => {
|
| 747 |
-
// Gradio's gr.Code output is often within a <textarea> or <pre> inside a div
|
| 748 |
-
const codeBox = document.querySelector('.markdown-container pre, .markdown-container textarea');
|
| 749 |
-
if (codeBox) {
|
| 750 |
-
codeBox.scrollTop = codeBox.scrollHeight;
|
| 751 |
-
}
|
| 752 |
-
}, 100);
|
| 753 |
-
}
|
| 754 |
-
"""
|
| 755 |
-
)
|
| 756 |
-
|
| 757 |
-
# Handling tab changes to ensure correct visibility as in MiniMaxAI
|
| 758 |
-
def on_output_tabs_change(tab_key):
|
| 759 |
-
# This function is not directly used in the current streaming yield flow
|
| 760 |
-
# but is provided in the original template for programmatic tab changes.
|
| 761 |
-
# In our streaming, we set `selected` directly in the yields.
|
| 762 |
-
return gr.update(active_key=tab_key)
|
| 763 |
|
| 764 |
-
|
| 765 |
-
|
| 766 |
-
|
| 767 |
-
|
| 768 |
-
|
| 769 |
-
|
| 770 |
-
|
|
|
|
| 771 |
|
| 772 |
print("[DEBUG] Launching updated Gradio interface")
|
| 773 |
-
demo.
|
| 774 |
|
| 775 |
if __name__ == "__main__":
|
| 776 |
launch_interface()
|
|
|
|
| 1 |
+
from run import create_agent, run_agent_with_streaming
|
| 2 |
import gradio as gr
|
|
|
|
|
|
|
| 3 |
import os
|
| 4 |
+
import threading
|
| 5 |
import time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
from dotenv import load_dotenv
|
| 7 |
|
| 8 |
load_dotenv()
|
| 9 |
CONFIG_FILE = ".user_config.env"
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
def save_env_vars_to_file(env_vars):
|
| 12 |
print("[DEBUG] Saving user config to file")
|
| 13 |
with open(CONFIG_FILE, "w") as f:
|
| 14 |
for key, value in env_vars.items():
|
| 15 |
f.write(f"{key}={value}\n")
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
def launch_interface():
|
|
|
|
| 18 |
def setup_agent_streaming(question, model_id, hf_token, openai_api_key, serpapi_key, api_endpoint, use_custom_endpoint,
|
| 19 |
+
custom_api_endpoint, custom_api_key, search_provider, search_api_key, custom_search_url):
|
| 20 |
print("[DEBUG] Setting up agent with input question:", question)
|
| 21 |
|
| 22 |
if question.strip() == "":
|
| 23 |
+
yield "Please enter a question.", ""
|
| 24 |
return
|
| 25 |
|
| 26 |
endpoint = custom_api_endpoint if use_custom_endpoint else api_endpoint
|
|
|
|
| 47 |
custom_search_url=custom_search_url
|
| 48 |
)
|
| 49 |
|
| 50 |
+
output_buffer = []
|
| 51 |
+
final_answer = ""
|
| 52 |
is_complete = False
|
| 53 |
|
| 54 |
def highlight_text(text):
|
| 55 |
+
if "[COMPLETED] Final answer:" in text:
|
| 56 |
+
return f"<span style='color:#10b981;font-weight:bold;'>[FINAL]</span> <mark>{text.split(':', 1)[1].strip()}</mark>"
|
|
|
|
|
|
|
|
|
|
| 57 |
elif "[ERROR]" in text:
|
| 58 |
+
return f"<span style='color:#ef4444;font-weight:bold;'>[ERROR]</span> <pre>{text.strip()}</pre>"
|
| 59 |
elif "[STARTING]" in text:
|
| 60 |
+
return f"<span style='color:#f59e0b;font-weight:bold;'>[STEP]</span> {text.strip()}"
|
| 61 |
elif text.strip():
|
|
|
|
| 62 |
return f"<details><summary><span style='color:#f59e0b;'>Step</span></summary>\n<pre>{text.strip()}</pre>\n</details>"
|
| 63 |
return ""
|
| 64 |
|
| 65 |
def stream_callback(text):
|
| 66 |
+
nonlocal final_answer
|
| 67 |
+
if "[COMPLETED] Final answer:" in text:
|
| 68 |
+
final_answer = text.split("[COMPLETED] Final answer:", 1)[1].strip()
|
| 69 |
+
formatted = highlight_text(text)
|
| 70 |
+
if formatted:
|
| 71 |
+
output_buffer.append(formatted)
|
| 72 |
|
| 73 |
def run_agent_async():
|
| 74 |
nonlocal is_complete
|
| 75 |
try:
|
| 76 |
+
_ = run_agent_with_streaming(agent, question, stream_callback)
|
| 77 |
except Exception as e:
|
| 78 |
+
output_buffer.append(highlight_text(f"[ERROR] {str(e)}"))
|
| 79 |
finally:
|
| 80 |
is_complete = True
|
| 81 |
|
| 82 |
agent_thread = threading.Thread(target=run_agent_async)
|
| 83 |
agent_thread.start()
|
| 84 |
|
| 85 |
+
last_length = 0
|
|
|
|
|
|
|
|
|
|
| 86 |
while not is_complete or agent_thread.is_alive():
|
| 87 |
+
current_output = "\n".join(output_buffer)
|
| 88 |
+
if len(current_output) > last_length:
|
| 89 |
+
yield current_output, ""
|
| 90 |
+
last_length = len(current_output)
|
| 91 |
+
time.sleep(0.1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
+
final_output = "\n".join(output_buffer)
|
| 94 |
+
yield final_output, final_answer
|
|
|
|
|
|
|
| 95 |
|
| 96 |
+
with gr.Blocks(title="SmolAgent - Streaming AI", theme="CultriX/gradio-theme") as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
gr.Markdown("# SmolAgent - Intelligent AI with Web Tools")
|
| 98 |
|
| 99 |
+
with gr.Row():
|
| 100 |
+
with gr.Column(scale=1):
|
| 101 |
+
question = gr.Textbox(label="Your Question", lines=3, placeholder="Enter your question or task for the AI agent...")
|
| 102 |
+
model_id = gr.Textbox(label="Model ID", value="gpt-4.1-nano", placeholder="e.g., gpt-4, claude-3-opus-20240229")
|
| 103 |
+
|
| 104 |
+
with gr.Accordion("API Configuration", open=False):
|
| 105 |
+
hf_token = gr.Textbox(label="Hugging Face Token (Optional)", type="password", value=os.getenv("HF_TOKEN", ""), placeholder="Your Hugging Face token if using HF models")
|
| 106 |
+
openai_api_key = gr.Textbox(label="OpenAI API Key (Optional)", type="password", value=os.getenv("OPENAI_API_KEY", ""), placeholder="Your OpenAI API key")
|
| 107 |
+
api_endpoint = gr.Textbox(label="Default API Endpoint", value=os.getenv("API_ENDPOINT", "https://api.openai.com/v1"), placeholder="e.g., https://api.openai.com/v1")
|
| 108 |
+
with gr.Group():
|
| 109 |
+
use_custom_endpoint = gr.Checkbox(label="Use Custom API Endpoint")
|
| 110 |
+
custom_api_endpoint = gr.Textbox(label="Custom API URL", visible=False, placeholder="URL for your custom API endpoint")
|
| 111 |
+
custom_api_key = gr.Textbox(label="Custom API Key (Optional)", type="password", visible=False, placeholder="API key for the custom endpoint")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
+
with gr.Accordion("Search Configuration", open=False):
|
| 114 |
+
serpapi_key = gr.Textbox(label="SerpAPI Key (Optional)", type="password", value=os.getenv("SERPAPI_API_KEY", ""), placeholder="Your SerpAPI key for web searches")
|
| 115 |
+
search_provider = gr.Dropdown(choices=["serper", "searxng"], value="searxng", label="Search Provider")
|
| 116 |
+
# search_api_key is for Serper, custom_search_url is for SearxNG.
|
| 117 |
+
# Default is searxng, so custom_search_url is visible, search_api_key is not.
|
| 118 |
+
search_api_key = gr.Textbox(label="Serper API Key", type="password", visible=False, placeholder="API key for Serper.dev if selected")
|
| 119 |
+
custom_search_url = gr.Textbox(label="Custom SearxNG URL", value="https://search.endorisk.nl/search", visible=True, placeholder="URL for your SearxNG instance")
|
| 120 |
+
|
| 121 |
+
submit_btn = gr.Button("Run Agent", variant="primary")
|
| 122 |
+
|
| 123 |
+
with gr.Column(scale=2):
|
| 124 |
+
output = gr.Markdown(label="Live Agent Output")
|
| 125 |
+
final = gr.Textbox(label="Final Answer", interactive=False)
|
| 126 |
+
copy_btn = gr.Button("Copy Final Answer")
|
| 127 |
+
|
| 128 |
+
def update_visibility(provider):
|
| 129 |
+
is_searxng = (provider == "searxng")
|
| 130 |
+
is_serper = (provider == "serper")
|
| 131 |
+
return {
|
| 132 |
+
custom_search_url: gr.update(visible=is_searxng),
|
| 133 |
+
search_api_key: gr.update(visible=is_serper)
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
def update_custom_fields(checked):
|
| 137 |
+
return {
|
| 138 |
+
custom_api_endpoint: gr.update(visible=checked),
|
| 139 |
+
custom_api_key: gr.update(visible=checked)
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
search_provider.change(fn=update_visibility, inputs=search_provider, outputs=[custom_search_url, search_api_key])
|
| 143 |
+
use_custom_endpoint.change(fn=update_custom_fields, inputs=use_custom_endpoint, outputs=[custom_api_endpoint, custom_api_key])
|
| 144 |
+
|
| 145 |
+
submit_btn.click(
|
| 146 |
+
fn=setup_agent_streaming,
|
| 147 |
+
inputs=[question, model_id, hf_token, openai_api_key, serpapi_key, api_endpoint, use_custom_endpoint, custom_api_endpoint, custom_api_key, search_provider, search_api_key, custom_search_url],
|
| 148 |
+
outputs=[output, final],
|
| 149 |
+
show_progress=True
|
| 150 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
+
# Output actions
|
| 153 |
+
copy_btn.click(
|
| 154 |
+
fn=None,
|
| 155 |
+
inputs=final,
|
| 156 |
+
outputs=None,
|
| 157 |
+
js="(text) => { if (text) { navigator.clipboard.writeText(text); return 'Copied!'; } return ''; }"
|
| 158 |
+
)
|
| 159 |
+
# Removed the non-existent export_md.click call that was here
|
| 160 |
|
| 161 |
print("[DEBUG] Launching updated Gradio interface")
|
| 162 |
+
demo.launch()
|
| 163 |
|
| 164 |
if __name__ == "__main__":
|
| 165 |
launch_interface()
|