guychuk commited on
Commit
9377eb2
·
verified ·
1 Parent(s): 710b591

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -8
app.py CHANGED
@@ -1,10 +1,38 @@
1
  import gradio as gr
 
 
 
2
 
3
- with gr.Blocks(fill_height=True) as demo:
4
- with gr.Sidebar():
5
- gr.Markdown("# Inference Provider")
6
- gr.Markdown("This Space showcases the WinKawaks/vit-small-patch16-224 model, served by the hf-inference API. Sign in with your Hugging Face account to use this API.")
7
- button = gr.LoginButton("Sign in")
8
- gr.load("models/WinKawaks/vit-small-patch16-224", accept_token=button, provider="hf-inference")
9
-
10
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoImageProcessor, AutoTokenizer
3
+ from optimum.onnxruntime import ORTModelForVision2Seq
4
+ from PIL import Image
5
 
6
+ # Load model, tokenizer, and processor
7
+ processor = AutoImageProcessor.from_pretrained("WinKawaks/vit-small-patch16-224")
8
+ tokenizer = AutoTokenizer.from_pretrained("WinKawaks/vit-small-patch16-224")
9
+ model = ORTModelForVision2Seq.from_pretrained(
10
+ "WinKawaks/vit-small-patch16-224",
11
+ export=True
12
+ )
13
+
14
+ def run(image):
15
+ if image is None:
16
+ return "No image provided."
17
+
18
+ # Preprocess
19
+ inputs = processor(images=image, return_tensors="pt")
20
+
21
+ # Generate
22
+ outputs = model.generate(**inputs, max_length=64)
23
+
24
+ # Decode
25
+ text = tokenizer.decode(outputs[0], skip_special_tokens=True)
26
+ return text
27
+
28
+
29
+ # --- Gradio UI ---
30
+ demo = gr.Interface(
31
+ fn=run,
32
+ inputs=gr.Image(type="pil"),
33
+ outputs="text",
34
+ title="ViT Vision2Seq ONNX Demo",
35
+ description="Upload an image → get generated text from WinKawaks/vit-small-patch16-224 (ONNX)."
36
+ )
37
+
38
+ demo.launch()