tenet commited on
Commit
4dcf554
·
verified ·
1 Parent(s): d576613

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -16
app.py CHANGED
@@ -1,8 +1,10 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # Pre-load multiple models
5
- models = {
 
 
6
  "TinyBERT (Fill Mask)": pipeline("fill-mask", model="prajjwal1/bert-tiny"),
7
  "DistilBERT (Fill Mask)": pipeline("fill-mask", model="distilbert-base-uncased"),
8
  "ALBERT (Fill Mask)": pipeline("fill-mask", model="albert-base-v2"),
@@ -10,39 +12,77 @@ models = {
10
  "GPT-2 (Text Generation)": pipeline("text-generation", model="gpt2")
11
  }
12
 
13
- def run_model(model_name, text):
14
- pipe = models[model_name]
15
 
16
- # GPT-2 → freeform text generation
17
  if "GPT-2" in model_name:
18
  output = pipe(text, max_length=50, do_sample=True, top_k=50, temperature=0.7)
19
  return output[0]["generated_text"]
20
 
21
- # Fill-mask models → require [MASK] token
22
  else:
23
  if "[MASK]" not in text:
24
- # If user didn’t include a mask, append one
25
  text = text.strip()
26
  if not text.endswith("."):
27
  text += "."
28
  text = text[:-1] + " [MASK]."
29
 
30
- preds = pipe(text, top_k=5) # top 5 predictions
31
  formatted = "\n".join(
32
  [f"{p['token_str']} (prob={p['score']:.4f})" for p in preds]
33
  )
34
  return f"Input: {text}\n\nPredictions:\n{formatted}"
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  with gr.Blocks() as demo:
37
- gr.Markdown("# 🔥 Tiny LLM Playground\nChoose a small model and test it!\n\n"
38
- "💡 For BERT-style models, you can add `[MASK]` in your text, "
39
- "or just type normally and I'll add one for you.")
 
 
 
 
 
 
 
40
 
41
- model_choice = gr.Dropdown(list(models.keys()), label="Choose Model")
42
- text_input = gr.Textbox(label="Enter text or prompt")
43
- output = gr.Textbox(label="Output", lines=8)
 
 
 
44
 
45
- run_btn = gr.Button("Run")
46
- run_btn.click(fn=run_model, inputs=[model_choice, text_input], outputs=output)
 
 
 
 
47
 
48
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # ----------------
5
+ # TEXT MODELS
6
+ # ----------------
7
+ text_models = {
8
  "TinyBERT (Fill Mask)": pipeline("fill-mask", model="prajjwal1/bert-tiny"),
9
  "DistilBERT (Fill Mask)": pipeline("fill-mask", model="distilbert-base-uncased"),
10
  "ALBERT (Fill Mask)": pipeline("fill-mask", model="albert-base-v2"),
 
12
  "GPT-2 (Text Generation)": pipeline("text-generation", model="gpt2")
13
  }
14
 
15
+ def run_text_model(model_name, text):
16
+ pipe = text_models[model_name]
17
 
 
18
  if "GPT-2" in model_name:
19
  output = pipe(text, max_length=50, do_sample=True, top_k=50, temperature=0.7)
20
  return output[0]["generated_text"]
21
 
 
22
  else:
23
  if "[MASK]" not in text:
 
24
  text = text.strip()
25
  if not text.endswith("."):
26
  text += "."
27
  text = text[:-1] + " [MASK]."
28
 
29
+ preds = pipe(text, top_k=5)
30
  formatted = "\n".join(
31
  [f"{p['token_str']} (prob={p['score']:.4f})" for p in preds]
32
  )
33
  return f"Input: {text}\n\nPredictions:\n{formatted}"
34
 
35
+
36
+ # ----------------
37
+ # IMAGE SEGMENTATION
38
+ # ----------------
39
+ segmentation_pipeline = pipeline(
40
+ "image-segmentation", model="nvidia/segformer-b0-finetuned-ade-512-512"
41
+ )
42
+
43
+ def segment_image(image):
44
+ results = segmentation_pipeline(image)
45
+ # Gradio AnnotatedImage expects (image, annotations)
46
+ ann = [(image, r["mask"]) for r in results]
47
+ return (image, ann)
48
+
49
+
50
+ # ----------------
51
+ # SPEECH RECOGNITION
52
+ # ----------------
53
+ asr_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-tiny")
54
+
55
+ def transcribe(audio):
56
+ return asr_pipeline(audio)["text"]
57
+
58
+
59
+ # ----------------
60
+ # GRADIO APP
61
+ # ----------------
62
  with gr.Blocks() as demo:
63
+ gr.Markdown("# 🔥 Multi-Modal Playground\n"
64
+ "Try **Tiny LLMs, Image Segmentation, and Speech Models** all in one app!\n\n")
65
+
66
+ # TEXT TAB
67
+ with gr.Tab("Text Models"):
68
+ model_choice = gr.Dropdown(list(text_models.keys()), label="Choose Model")
69
+ text_input = gr.Textbox(label="Enter text or prompt")
70
+ text_output = gr.Textbox(label="Output", lines=8)
71
+ run_btn = gr.Button("Run")
72
+ run_btn.click(fn=run_text_model, inputs=[model_choice, text_input], outputs=text_output)
73
 
74
+ # IMAGE TAB
75
+ with gr.Tab("Image Segmentation"):
76
+ img_in = gr.Image(type="pil", label="Upload an Image")
77
+ img_out = gr.AnnotatedImage(label="Segmented Output")
78
+ seg_btn = gr.Button("Segment Objects")
79
+ seg_btn.click(fn=segment_image, inputs=img_in, outputs=img_out)
80
 
81
+ # AUDIO TAB
82
+ with gr.Tab("Speech Recognition"):
83
+ audio_in = gr.Audio(sources=["microphone", "upload"], type="filepath", label="Upload or record audio")
84
+ audio_out = gr.Textbox(label="Transcription")
85
+ asr_btn = gr.Button("Transcribe")
86
+ asr_btn.click(fn=transcribe, inputs=audio_in, outputs=audio_out)
87
 
88
  demo.launch()