guychuk commited on
Commit
1d32d95
Β·
verified Β·
1 Parent(s): 085ce57

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -43
app.py CHANGED
@@ -3,66 +3,80 @@ import numpy as np
3
  import onnxruntime as ort
4
  from PIL import Image
5
  from huggingface_hub import hf_hub_download
6
- from transformers import AutoTokenizer
 
 
 
 
 
 
 
 
 
7
 
8
  # ---------------------------------------------------------
9
- # Download ONNX files directly from Hugging Face model hubs
10
  # ---------------------------------------------------------
11
 
12
- # 1) Multilingual sentiment (DistilBERT)
13
- multilingual_onnx = hf_hub_download(
14
  repo_id="lxyuan/distilbert-base-multilingual-cased-sentiments-student",
15
  filename="onnx/model.onnx"
16
  )
17
  tokenizer_multilingual = AutoTokenizer.from_pretrained(
18
  "lxyuan/distilbert-base-multilingual-cased-sentiments-student"
19
  )
20
- session_multilingual = ort.InferenceSession(multilingual_onnx, providers=["CPUExecutionProvider"])
 
 
 
 
21
 
22
- # 2) SDG BERT
23
- sdgbert_onnx = hf_hub_download(
 
24
  repo_id="sadickam/sdgBERT",
25
  filename="onnx/model.onnx"
26
  )
27
  tokenizer_sdg = AutoTokenizer.from_pretrained("sadickam/sdgBERT")
28
- session_sdg = ort.InferenceSession(sdgbert_onnx, providers=["CPUExecutionProvider"])
 
 
 
29
 
30
- # 3) German sentiment BERT
31
- german_onnx = hf_hub_download(
32
  repo_id="oliverguhr/german-sentiment-bert",
33
  filename="onnx/model.onnx"
34
  )
35
  tokenizer_german = AutoTokenizer.from_pretrained("oliverguhr/german-sentiment-bert")
36
- session_german = ort.InferenceSession(german_onnx, providers=["CPUExecutionProvider"])
 
 
 
37
 
38
- # 4) ViT-small image classifier
39
- vit_onnx = hf_hub_download(
40
  repo_id="WinKawaks/vit-small-patch16-224",
41
  filename="onnx/model.onnx"
42
  )
43
- session_vit = ort.InferenceSession(vit_onnx, providers=["CPUExecutionProvider"])
44
 
45
- # Basic preprocessing params (ImageNet)
46
  IMAGE_SIZE = 224
47
  MEAN = [0.485, 0.456, 0.406]
48
- STD = [0.229, 0.224, 0.225]
49
 
50
 
51
  # ---------------------------------------------------------
52
  # Inference functions
53
  # ---------------------------------------------------------
54
- def softmax(x):
55
- e = np.exp(x - np.max(x))
56
- return e / e.sum(axis=-1, keepdims=True)
57
-
58
 
59
  def run_multilingual(text):
60
  inputs = tokenizer_multilingual(text, return_tensors="np", truncation=True, padding=True)
61
  inputs = {k: v.astype(np.int64) for k, v in inputs.items()}
62
  logits = session_multilingual.run(None, inputs)[0][0]
63
  probs = softmax(logits)
64
- labels = tokenizer_multilingual.model.config.id2label
65
- return {labels[i]: float(probs[i]) for i in range(len(probs))}
66
 
67
 
68
  def run_sdg(text):
@@ -70,8 +84,7 @@ def run_sdg(text):
70
  inputs = {k: v.astype(np.int64) for k, v in inputs.items()}
71
  logits = session_sdg.run(None, inputs)[0][0]
72
  probs = softmax(logits)
73
- labels = tokenizer_sdg.model.config.id2label
74
- return {labels[i]: float(probs[i]) for i in range(len(probs))}
75
 
76
 
77
  def run_german(text):
@@ -79,8 +92,7 @@ def run_german(text):
79
  inputs = {k: v.astype(np.int64) for k, v in inputs.items()}
80
  logits = session_german.run(None, inputs)[0][0]
81
  probs = softmax(logits)
82
- labels = tokenizer_german.model.config.id2label
83
- return {labels[i]: float(probs[i]) for i in range(len(probs))}
84
 
85
 
86
  def preprocess_vit(image):
@@ -101,35 +113,52 @@ def run_vit(image):
101
 
102
 
103
  # ---------------------------------------------------------
104
- # Gradio UI
105
  # ---------------------------------------------------------
106
- def inference(model, text, image):
107
- if model == "Multilingual Sentiment":
 
108
  return run_multilingual(text)
109
- if model == "SDG Classification":
110
  return run_sdg(text)
111
- if model == "German Sentiment":
112
  return run_german(text)
113
- if model == "ViT Image Classification":
114
  if image is None:
115
- return {"error": "Upload an image"}
116
  return run_vit(image)
117
- return {"error": "Invalid selection"}
 
118
 
119
 
120
- with gr.Blocks() as demo:
121
- gr.Markdown("# πŸ” Multi-Model ONNX Inference (HF-loaded)")
 
122
 
123
- model_choice = gr.Dropdown(
124
- ["Multilingual Sentiment", "SDG Classification", "German Sentiment", "ViT Image Classification"],
 
 
 
 
 
 
 
 
 
125
  label="Choose a model"
126
  )
127
 
128
- text_in = gr.Textbox(label="Text Input", lines=3)
129
- img_in = gr.Image(label="Image Input", type="pil")
130
- output = gr.JSON(label="Output")
 
 
131
 
132
- run_btn = gr.Button("Run Inference")
133
- run_btn.click(fn=inference, inputs=[model_choice, text_in, img_in], outputs=[output])
 
 
 
134
 
135
  demo.launch()
 
3
  import onnxruntime as ort
4
  from PIL import Image
5
  from huggingface_hub import hf_hub_download
6
+ from transformers import AutoTokenizer, AutoConfig
7
+
8
+
9
+ # ---------------------------------------------------------
10
+ # Helper
11
+ # ---------------------------------------------------------
12
+ def softmax(x):
13
+ e = np.exp(x - np.max(x))
14
+ return e / e.sum()
15
+
16
 
17
  # ---------------------------------------------------------
18
+ # Load ONNX models + tokenizers + configs (for labels)
19
  # ---------------------------------------------------------
20
 
21
+ # --- Model 1: Multilingual DistilBERT Sentiment ---
22
+ multilingual_onnx_path = hf_hub_download(
23
  repo_id="lxyuan/distilbert-base-multilingual-cased-sentiments-student",
24
  filename="onnx/model.onnx"
25
  )
26
  tokenizer_multilingual = AutoTokenizer.from_pretrained(
27
  "lxyuan/distilbert-base-multilingual-cased-sentiments-student"
28
  )
29
+ config_multilingual = AutoConfig.from_pretrained(
30
+ "lxyuan/distilbert-base-multilingual-cased-sentiments-student"
31
+ )
32
+ labels_multilingual = config_multilingual.id2label
33
+ session_multilingual = ort.InferenceSession(multilingual_onnx_path, providers=["CPUExecutionProvider"])
34
 
35
+
36
+ # --- Model 2: SDG-BERT ---
37
+ sdg_onnx_path = hf_hub_download(
38
  repo_id="sadickam/sdgBERT",
39
  filename="onnx/model.onnx"
40
  )
41
  tokenizer_sdg = AutoTokenizer.from_pretrained("sadickam/sdgBERT")
42
+ config_sdg = AutoConfig.from_pretrained("sadickam/sdgBERT")
43
+ labels_sdg = config_sdg.id2label
44
+ session_sdg = ort.InferenceSession(sdg_onnx_path, providers=["CPUExecutionProvider"])
45
+
46
 
47
+ # --- Model 3: German Sentiment BERT ---
48
+ german_onnx_path = hf_hub_download(
49
  repo_id="oliverguhr/german-sentiment-bert",
50
  filename="onnx/model.onnx"
51
  )
52
  tokenizer_german = AutoTokenizer.from_pretrained("oliverguhr/german-sentiment-bert")
53
+ config_german = AutoConfig.from_pretrained("oliverguhr/german-sentiment-bert")
54
+ labels_german = config_german.id2label
55
+ session_german = ort.InferenceSession(german_onnx_path, providers=["CPUExecutionProvider"])
56
+
57
 
58
+ # --- Model 4: ViT Image Classifier ---
59
+ vit_onnx_path = hf_hub_download(
60
  repo_id="WinKawaks/vit-small-patch16-224",
61
  filename="onnx/model.onnx"
62
  )
63
+ session_vit = ort.InferenceSession(vit_onnx_path, providers=["CPUExecutionProvider"])
64
 
 
65
  IMAGE_SIZE = 224
66
  MEAN = [0.485, 0.456, 0.406]
67
+ STD = [0.229, 0.224, 0.225]
68
 
69
 
70
  # ---------------------------------------------------------
71
  # Inference functions
72
  # ---------------------------------------------------------
 
 
 
 
73
 
74
  def run_multilingual(text):
75
  inputs = tokenizer_multilingual(text, return_tensors="np", truncation=True, padding=True)
76
  inputs = {k: v.astype(np.int64) for k, v in inputs.items()}
77
  logits = session_multilingual.run(None, inputs)[0][0]
78
  probs = softmax(logits)
79
+ return {labels_multilingual[i]: float(probs[i]) for i in range(len(probs))}
 
80
 
81
 
82
  def run_sdg(text):
 
84
  inputs = {k: v.astype(np.int64) for k, v in inputs.items()}
85
  logits = session_sdg.run(None, inputs)[0][0]
86
  probs = softmax(logits)
87
+ return {labels_sdg[i]: float(probs[i]) for i in range(len(probs))}
 
88
 
89
 
90
  def run_german(text):
 
92
  inputs = {k: v.astype(np.int64) for k, v in inputs.items()}
93
  logits = session_german.run(None, inputs)[0][0]
94
  probs = softmax(logits)
95
+ return {labels_german[i]: float(probs[i]) for i in range(len(probs))}
 
96
 
97
 
98
  def preprocess_vit(image):
 
113
 
114
 
115
  # ---------------------------------------------------------
116
+ # Unified inference router
117
  # ---------------------------------------------------------
118
+
119
+ def inference(model_name, text, image):
120
+ if model_name == "Multilingual Sentiment":
121
  return run_multilingual(text)
122
+ elif model_name == "SDG Classification":
123
  return run_sdg(text)
124
+ elif model_name == "German Sentiment":
125
  return run_german(text)
126
+ elif model_name == "ViT Image Classification":
127
  if image is None:
128
+ return {"error": "Please upload an image."}
129
  return run_vit(image)
130
+ else:
131
+ return {"error": "Invalid model selected."}
132
 
133
 
134
+ # ---------------------------------------------------------
135
+ # Gradio UI
136
+ # ---------------------------------------------------------
137
 
138
+ with gr.Blocks() as demo:
139
+ gr.Markdown("# πŸ” Multi-Model ONNX Demo (Loaded from Hugging Face Hub)")
140
+ gr.Markdown("Text + Image models, running entirely with **ONNX Runtime CPU**")
141
+
142
+ model_selector = gr.Dropdown(
143
+ [
144
+ "Multilingual Sentiment",
145
+ "SDG Classification",
146
+ "German Sentiment",
147
+ "ViT Image Classification"
148
+ ],
149
  label="Choose a model"
150
  )
151
 
152
+ text_input = gr.Textbox(lines=3, label="Text Input")
153
+ image_input = gr.Image(type="pil", label="Image Input")
154
+ output_box = gr.JSON(label="Model Output")
155
+
156
+ run_button = gr.Button("Run")
157
 
158
+ run_button.click(
159
+ inference,
160
+ inputs=[model_selector, text_input, image_input],
161
+ outputs=output_box
162
+ )
163
 
164
  demo.launch()