LLM-kitchen / app.py
Keeby-smilyai's picture
Update app.py
105bb74 verified
# app.py β€” FINAL VERSION
import gradio as gr
from backend import (
login_user,
signup_user,
get_user_runs,
get_run_logs,
queue_training_run,
publish_run_to_hub,
run_inference
)
from utils import ARCH_ANALOGIES, get_auto_hyperparams
# ------------------------------ STATE ------------------------------
user_state = {"user_id": None, "username": "", "arch_config": {}}
# ------------------------------ BACKEND WRAPPERS (UI LOGIC) ------------------------------
def login_action(username, password):
user_id, msg = login_user(username, password)
if user_id:
user_state["user_id"] = user_id
user_state["username"] = username
runs_list = page_processes()
return gr.update(visible=False), gr.update(visible=True), msg, runs_list
return gr.update(), gr.update(), msg, ""
def signup_action(username, password):
user_id, msg = signup_user(username, password)
if user_id:
return gr.update(visible=False), gr.update(visible=True), msg
return gr.update(), gr.update(), msg
def page_processes():
if not user_state.get("user_id"): return "Login required."
runs = get_user_runs(user_state["user_id"])
run_list = "\n".join([f"🍳 Run #{r[0]} | {r[1].upper()} x{r[2]}L | Status: {r[3]}" for r in runs]) or "No runs yet."
return run_list
def load_run_logs(run_id_str):
try:
run_id = int(run_id_str)
user_id = user_state["user_id"]
logs, status = get_run_logs(user_id, run_id)
if status == "unknown":
return "Error: Run not found or you do not have permission to view it."
return f"Status: {status}\n\n{logs}"
except:
return "Invalid Run ID format. Please enter a number."
def page_architecture_next(arch_type, num_layers):
analogy = ARCH_ANALOGIES.get(arch_type, "")
auto_config = get_auto_hyperparams(arch_type, num_layers)
user_state["arch_config"] = {"arch_type": arch_type, "num_layers": num_layers, "auto_config": auto_config}
suggestion = f"🧠 Auto-Seasoningβ„’ Suggestion:\nLR: {auto_config['learning_rate']} | Epochs: {auto_config['epochs']} | Batch: {auto_config['batch_size']}"
return gr.update(visible=False), gr.update(visible=True), analogy, suggestion, auto_config['learning_rate'], auto_config['epochs'], auto_config['batch_size']
def page_hyperparams_next(lr, epochs, batch_size):
config = user_state["arch_config"]
final_config = {"arch_type": config["arch_type"], "num_layers": config["num_layers"], "learning_rate": float(lr) if lr else config["auto_config"]["learning_rate"], "epochs": int(epochs) if epochs else config["auto_config"]["epochs"], "batch_size": int(batch_size) if batch_size else config["auto_config"]["batch_size"]}
queue_training_run(user_state["user_id"], final_config)
return gr.update(visible=False), gr.update(visible=True), page_processes()
def get_completed_runs():
all_runs = get_user_runs(user_state["user_id"])
return [r for r in all_runs if r[3] == 'completed']
def go_to_page_with_run_list(page_to_show):
completed_runs = get_completed_runs()
if not completed_runs:
gr.Warning("You have no completed models! Finish a training run first.")
return gr.update(), gr.update(), gr.update(choices=[])
choices = [(f"Run #{r[0]}: {r[1].upper()} x{r[2]}L", r[0]) for r in completed_runs]
return gr.update(visible=False), page_to_show, gr.update(choices=choices, value=choices[0][1])
def inference_action(run_id, prompt):
if not run_id: return "Error: Please select a model."
try: return run_inference(run_id, prompt)
except Exception as e: return f"Error: {str(e)}"
def publish_action(run_id, hf_token, description):
if not run_id: return "Error: Please select a model to publish."
if not hf_token: return "Error: Hugging Face Token is required."
try:
run_info = next((r for r in get_completed_runs() if r[0] == run_id), None)
repo_name = f"llm-kitchen-{run_info[1]}-{run_info[2]}L-run{run_id}" if run_info else f"llm-kitchen-run-{run_id}"
return publish_run_to_hub(run_id, hf_token, repo_name, description.strip())
except Exception as e:
return f"Publish failed: {str(e)}"
# ------------------------------ UI ------------------------------
with gr.Blocks(title="LLM Kitchen 🍳", theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🍳 Welcome to LLM Kitchen")
gr.Markdown("### Cook your own language model β€” from scratch!")
with gr.Group(visible=False) as page_signup_ui:
gr.Markdown("### πŸ“ Create a New Account")
signup_user_input = gr.Textbox(label="Username")
signup_pass_input = gr.Textbox(label="Password", type="password")
signup_btn = gr.Button("Sign Up", variant="primary")
signup_msg = gr.Markdown()
go_to_login_btn = gr.Button("Already have an account? Log In")
with gr.Group() as page_login_ui:
gr.Markdown("### πŸ” Login to the Kitchen")
login_user_input = gr.Textbox(label="Username")
login_pass_input = gr.Textbox(label="Password", type="password")
login_btn = gr.Button("Login", variant="primary")
login_msg = gr.Markdown()
go_to_signup_btn = gr.Button("Don't have an account? Sign Up")
with gr.Group(visible=False) as page_processes_ui:
gr.Markdown(f"### πŸ§‘β€πŸ³ Your Processes")
with gr.Row():
refresh_btn = gr.Button("πŸ”„ Refresh")
inference_btn = gr.Button("πŸ§ͺ Inference Kitchen")
publish_btn = gr.Button("πŸš€ Publishing Bay")
runs_display = gr.Textbox(label="Your Training Runs", lines=8, interactive=False)
with gr.Accordion("View Raw Logs", open=False):
run_id_input = gr.Textbox(label="Enter a Run ID")
view_logs_btn = gr.Button("View Logs")
logs_display = gr.Textbox(label="Training Logs", lines=10, interactive=False)
new_run_btn = gr.Button("βž• Start New Process", variant="primary")
with gr.Group(visible=False) as page_inference_ui:
gr.Markdown("### πŸ§ͺ Inference Kitchen")
inf_run_id_dropdown = gr.Dropdown(label="Select a Completed Model")
prompt_input = gr.Textbox(label="Your Prompt", lines=3)
infer_btn = gr.Button("Generate Answer")
output_text = gr.Textbox(label="Model's Answer", lines=5, interactive=False)
back_from_inf = gr.Button("⬅️ Back to Processes")
with gr.Group(visible=False) as page_publish_ui:
gr.Markdown("### πŸš€ Publishing Bay")
pub_run_id_dropdown = gr.Dropdown(label="Select a Completed Model to Publish")
pub_hf_token_input = gr.Textbox(label="Your Hugging Face Token (with write permissions)", type="password")
pub_description_input = gr.Textbox(label="Model Card Description", lines=4)
publish_now_btn = gr.Button("Publish to Hugging Face Hub", variant="primary")
publish_status = gr.Markdown()
back_from_pub = gr.Button("⬅️ Back to Processes")
with gr.Group(visible=False) as page_arch_ui:
gr.Markdown("### πŸ—οΈ Step 2: Choose Your Architecture")
arch_dropdown = gr.Dropdown(["cnn", "rnn", "transformer"], label="Architecture Type")
layers_slider = gr.Slider(1, 16, value=4, step=1, label="Number of Layers")
arch_next_btn = gr.Button("Next β†’ Hyperparameters")
arch_analogy = gr.Markdown()
auto_suggestion = gr.Markdown()
with gr.Group(visible=False) as page_hyper_ui:
gr.Markdown("### πŸ§‚ Step 3: Season Your Model")
lr_input = gr.Number(label="Learning Rate")
epochs_input = gr.Number(label="Epochs", precision=0)
batch_input = gr.Number(label="Batch Size", precision=0)
hyper_next_btn = gr.Button("Start Cooking! 🍲")
# ------------------------------ EVENTS ------------------------------
go_to_signup_btn.click(lambda: (gr.update(visible=False), gr.update(visible=True)), outputs=[page_login_ui, page_signup_ui])
go_to_login_btn.click(lambda: (gr.update(visible=False), gr.update(visible=True)), outputs=[page_signup_ui, page_login_ui])
login_btn.click(login_action, inputs=[login_user_input, login_pass_input], outputs=[page_login_ui, page_processes_ui, login_msg, runs_display])
signup_btn.click(signup_action, inputs=[signup_user_input, signup_pass_input], outputs=[page_signup_ui, page_login_ui, signup_msg])
refresh_btn.click(page_processes, outputs=runs_display)
view_logs_btn.click(load_run_logs, inputs=run_id_input, outputs=logs_display)
new_run_btn.click(lambda: (gr.update(visible=False), gr.update(visible=True)), outputs=[page_processes_ui, page_arch_ui])
arch_next_btn.click(page_architecture_next, inputs=[arch_dropdown, layers_slider], outputs=[page_arch_ui, page_hyper_ui, arch_analogy, auto_suggestion, lr_input, epochs_input, batch_input])
hyper_next_btn.click(page_hyperparams_next, inputs=[lr_input, epochs_input, batch_input], outputs=[page_hyper_ui, page_processes_ui, runs_display])
inference_btn.click(lambda: go_to_page_with_run_list(gr.update(visible=True)), outputs=[page_processes_ui, page_inference_ui, inf_run_id_dropdown])
infer_btn.click(inference_action, inputs=[inf_run_id_dropdown, prompt_input], outputs=output_text)
back_from_inf.click(lambda: (gr.update(visible=False), gr.update(visible=True)), outputs=[page_inference_ui, page_processes_ui])
publish_btn.click(lambda: go_to_page_with_run_list(gr.update(visible=True)), outputs=[page_processes_ui, page_publish_ui, pub_run_id_dropdown])
publish_now_btn.click(publish_action, inputs=[pub_run_id_dropdown, pub_hf_token_input, pub_description_input], outputs=publish_status)
back_from_pub.click(lambda: (gr.update(visible=False), gr.update(visible=True)), outputs=[page_publish_ui, page_processes_ui])
demo.queue().launch()