anisgtboi commited on
Commit
09d6a1f
Β·
verified Β·
1 Parent(s): 3da4e6c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -150
app.py CHANGED
@@ -2,185 +2,121 @@
2
  import os
3
  import traceback
4
  import gradio as gr
5
- from typing import Tuple
6
 
7
- # Try to import transformers; if not available, the app will error and tell you to add requirements.
8
  try:
9
- from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
10
  except Exception as e:
11
  pipeline = None
 
12
 
13
- # Optional: Hugging Face hosted-inference fallback
14
- try:
15
- from huggingface_hub import InferenceApi
16
- except Exception:
17
- InferenceApi = None
18
-
19
- # ---------- CONFIG ----------
20
- # Lightweight models that work well on CPU / Spaces:
21
- MODEL_EN_TO_BN = "shhossain/opus-mt-en-to-bn" # small finetuned en -> bn (β‰ˆ75M params)
22
- MODEL_BN_TO_EN = "Helsinki-NLP/opus-mt-bn-en" # bn -> en
23
- # If you prefer other model ids, change the strings above.
24
-
25
- # Language labels for UI
26
- DIRECTION_CHOICES = ["English β†’ Bengali", "Bengali β†’ English"]
27
-
28
- # ---------- GLOBALS ----------
29
- local_pipeline = None
30
- local_model_name = None
31
- use_api_fallback = False
32
- inference_client = None
33
-
34
- # ---------- HELPERS ----------
35
- def try_load_local(model_name: str) -> Tuple[bool, str]:
36
- """Try to load a local transformers pipeline for translation.
37
- Returns (success, message)."""
38
- global local_pipeline, local_model_name, use_api_fallback
39
  if pipeline is None:
40
- return False, "transformers not installed (add to requirements.txt)"
41
- try:
42
- # Use the 'translation' pipeline (Marian / MarianMT based models)
43
- local_pipeline = pipeline("translation", model=model_name, device=-1, max_length=512)
44
- local_model_name = model_name
45
- use_api_fallback = False
46
- return True, f"Loaded local model: {model_name}"
47
- except Exception as e:
48
- use_api_fallback = True
49
- return False, f"Local load failed: {str(e)}"
50
-
51
- def try_init_inference_api(token_env="HF_API_TOKEN", model_name_fallback=None):
52
- """Initialize huggingface_hub Inference API client if token present."""
53
- global inference_client, use_api_fallback
54
- token = os.environ.get(token_env)
55
- if not token:
56
- return False, "No HF_API_TOKEN found in env (set Space secret HF_API_TOKEN)"
57
- if InferenceApi is None:
58
- return False, "huggingface_hub not installed (add to requirements.txt)"
59
  try:
60
- inference_client = InferenceApi(repo_id=model_name_fallback or "facebook/nllb-200-distilled-600M", token=token)
61
- use_api_fallback = True
62
- return True, "Inference API client ready"
 
63
  except Exception as e:
64
- return False, f"Inference API init failed: {str(e)}"
65
-
66
- def translate_with_local(text: str):
67
- global local_pipeline
68
- if local_pipeline is None:
69
- raise RuntimeError("Local pipeline not loaded")
70
- out = local_pipeline(text, max_length=512)
71
- if isinstance(out, list) and len(out) > 0:
72
- # many Marian models use 'translation_text' or 'generated_text'
73
- res = out[0].get("translation_text") if isinstance(out[0], dict) else None
74
- if not res:
75
- # fallback to first value in dict
76
- if isinstance(out[0], dict):
77
- res = list(out[0].values())[0]
78
- return res or str(out)
79
- return str(out)
80
-
81
- def translate_with_api(text: str, model_name: str):
82
- global inference_client
83
- if inference_client is None:
84
- raise RuntimeError("Inference client not ready")
85
- # Note: the Inference API will run the model hosted on HF; for Marian models, you just pass the text.
86
- res = inference_client(inputs=text, parameters={})
87
- # API returns either list or dict; try to extract text
88
- if isinstance(res, list) and len(res) > 0:
89
- first = res[0]
90
- if isinstance(first, dict):
91
- return first.get("translation_text") or first.get("generated_text") or str(first)
92
- return str(first)
93
- if isinstance(res, dict):
94
- return res.get("translation_text") or res.get("generated_text") or str(res)
95
- return str(res)
96
-
97
- # ---------- ON START: try local load (best-effort) ----------
98
- # We'll pre-load both directions lazily on first use; try EN->BN by default
99
- _success, _msg = try_load_local(MODEL_EN_TO_BN)
100
- print("Model load attempt:", _success, _msg)
101
-
102
- # If local load failed, but user supplied HF_API_TOKEN in Secrets, init inference client as fallback
103
- if use_api_fallback:
104
- ok, msg = try_init_inference_api(model_name_fallback=MODEL_EN_TO_BN)
105
- print("Inference API init:", ok, msg)
106
-
107
- # ---------- TRANSLATION FUNCTION FOR UI ----------
108
  def translate_text(text: str, direction: str):
109
- """Main translate function: returns (translation, status, analysis)"""
 
 
 
110
  if not text or not text.strip():
111
- return "", "Please type text to translate", ""
112
  try:
113
- model_name = MODEL_EN_TO_BN if direction == DIRECTION_CHOICES[0] else MODEL_BN_TO_EN
114
-
115
- # If local model not loaded or different than needed, try loading it
116
- global local_model_name
117
- if local_pipeline is None or local_model_name != model_name:
118
- ok, msg = try_load_local(model_name)
119
- print("Reload attempt:", ok, msg)
120
- # if local load failed, try to init API if token present
121
- if not ok and inference_client is None:
122
- ok2, msg2 = try_init_inference_api(model_name_fallback=model_name)
123
- print("Fallback init:", ok2, msg2)
124
-
125
- # If local available, use it
126
- if local_pipeline is not None and local_model_name == model_name:
127
- translated = translate_with_local(text)
128
- status = f"Local model used: {local_model_name}"
 
 
 
129
  else:
130
- # fallback to hosted inference
131
- if inference_client is None:
132
- return "", "No model available locally and no HF_API_TOKEN set for API fallback. Set HF_API_TOKEN in Space secrets.", ""
133
- translated = translate_with_api(text, model_name)
134
- status = f"Hosted Inference API used: {model_name}"
135
-
136
- # small "analysis" block: length, word count, suggestions
137
- words = len(text.split())
138
- analysis = f"Input words: {words}. Output length: {len(translated.split())} words."
139
  return translated, status, analysis
140
 
141
  except Exception as e:
 
142
  tb = traceback.format_exc()
143
- return "", f"Error: {str(e)}", tb
144
-
145
- # ---------- GRADIO APP UI ----------
146
- with gr.Blocks(title="English ↔ Bengali β€” Fast Translator") as demo:
147
- gr.Markdown("# English ↔ Bengali β€” Fast Translator")
148
- gr.Markdown(
149
- "Small, fast models (OPUS-MT) used for speed. If local loading fails the app will use the Hugging Face Inference API (requires HF_API_TOKEN set in Space secrets)."
150
- )
151
-
 
 
 
 
 
 
152
  with gr.Row():
153
- direction = gr.Radio(label="Direction", choices=DIRECTION_CHOICES, value=DIRECTION_CHOICES[0])
154
- swap = gr.Button("Swap")
155
 
156
- input_text = gr.Textbox(label="Input text", lines=4, placeholder="Type in English or Bengali...")
157
  translate_btn = gr.Button("Translate", variant="primary")
158
-
159
  with gr.Row():
160
  out_translation = gr.Textbox(label="Translation", lines=4)
161
- out_status = gr.Textbox(label="Status / Tips", lines=2)
162
- out_analysis = gr.Textbox(label="Analysis / Notes", lines=3)
163
-
164
- # examples
165
  with gr.Row():
166
- ex1 = gr.Button("Hello, how are you?")
167
- ex2 = gr.Button("Ami bhalo achi")
168
- ex3 = gr.Button("Where is the market?")
169
 
170
- # wiring
171
  def do_swap(cur):
172
- return DIRECTION_CHOICES[1] if cur == DIRECTION_CHOICES[0] else DIRECTION_CHOICES[0]
173
- swap.click(do_swap, inputs=direction, outputs=direction)
174
 
 
175
  translate_btn.click(translate_text, inputs=[input_text, direction], outputs=[out_translation, out_status, out_analysis])
176
 
 
 
 
 
 
 
177
  ex1.click(lambda: "Hello, how are you?", outputs=input_text)
178
  ex2.click(lambda: "Ami bhalo achi", outputs=input_text)
179
  ex3.click(lambda: "Where is the market?", outputs=input_text)
180
 
181
- gr.Markdown("---")
182
- gr.Markdown("If the app shows `No model available` error: go to Space Settings β†’ Secrets and add `HF_API_TOKEN` (your Hugging Face token).")
183
-
184
- # Launch if run directly
185
  if __name__ == "__main__":
186
- demo.launch(debug=True)
 
 
2
  import os
3
  import traceback
4
  import gradio as gr
 
5
 
6
+ # Try import transformers; if missing the app will show a clear error message in logs.
7
  try:
8
+ from transformers import pipeline
9
  except Exception as e:
10
  pipeline = None
11
+ print("transformers import error:", e)
12
 
13
+ # --- CONFIG: use lightweight OPUS-MT models for CPU-friendly translation ---
14
+ MODEL_EN_TO_BN = "shhossain/opus-mt-en-to-bn"
15
+ MODEL_BN_TO_EN = "Helsinki-NLP/opus-mt-bn-en"
16
+
17
+ # cache of loaded pipelines
18
+ _loaded = {}
19
+
20
+ def safe_load_pipeline(model_name):
21
+ """Load a translation pipeline lazily and return a tuple (success, message)."""
22
+ global _loaded
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  if pipeline is None:
24
+ return False, "transformers not available - check requirements.txt"
25
+ if model_name in _loaded:
26
+ return True, f"model already loaded: {model_name}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  try:
28
+ # device=-1 ensures CPU usage; set max_length moderately
29
+ p = pipeline("translation", model=model_name, device=-1, max_length=512)
30
+ _loaded[model_name] = p
31
+ return True, f"Loaded {model_name}"
32
  except Exception as e:
33
+ # log the full stack to Space logs so you can copy it
34
+ print("Exception while loading model:", model_name)
35
+ traceback.print_exc()
36
+ return False, f"Failed to load {model_name}: {str(e)}"
37
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  def translate_text(text: str, direction: str):
39
+ """
40
+ Main translation function used by the Gradio UI.
41
+ returns: translation, status, debug_info
42
+ """
43
  if not text or not text.strip():
44
+ return "", "⚠️ Enter text to translate", ""
45
  try:
46
+ model_name = MODEL_EN_TO_BN if direction == "English β†’ Bengali" else MODEL_BN_TO_EN
47
+
48
+ # Try local lazy-load first
49
+ ok, msg = safe_load_pipeline(model_name)
50
+ if not ok:
51
+ # If local load fails, provide immediate dummy fallback so buttons respond
52
+ print("Local model load failed:", msg)
53
+ return text, f"⚠️ Local model load failed: {msg}. Showing fallback (identity) translation.", "Fallback used: returning input as output."
54
+
55
+ # Use the loaded pipeline
56
+ translator = _loaded.get(model_name)
57
+ result = translator(text, max_length=512)
58
+ # result is often list of dicts
59
+ if isinstance(result, list) and len(result) > 0:
60
+ r0 = result[0]
61
+ if isinstance(r0, dict):
62
+ translated = r0.get("translation_text") or r0.get("generated_text") or str(r0)
63
+ else:
64
+ translated = str(r0)
65
  else:
66
+ translated = str(result)
67
+ status = f"βœ… Translated using {model_name}"
68
+ analysis = f"Input words: {len(text.split())}; Output words: {len(translated.split())}"
 
 
 
 
 
 
69
  return translated, status, analysis
70
 
71
  except Exception as e:
72
+ # If anything crashes, show a simple fallback so UI remains responsive
73
  tb = traceback.format_exc()
74
+ print("Translation exception:", tb)
75
+ return "", f"❌ Error during translation: {str(e)}", tb
76
+
77
+ # --- Small, responsive CSS for mobile: keep layout simple ---
78
+ custom_css = """
79
+ /* Make UI mobile-friendly and readable */
80
+ .gradio-container { padding: 12px !important; max-width: 900px; margin: auto; font-family: 'Times New Roman', serif; }
81
+ .gradio-row { gap: 8px !important; }
82
+ textarea, input[type="text"] { font-size: 18px !important; }
83
+ .gr-button { font-size: 18px !important; padding: 12px 18px !important; }
84
+ """
85
+
86
+ # --- Build Gradio UI with Blocks for responsive layout ---
87
+ with gr.Blocks(css=custom_css, title="English ↔ Bengali β€” Fast Translator") as demo:
88
+ gr.Markdown("## English ↔ Bengali β€” Fast Translator\nUsing small OPUS-MT models (CPU friendly). The app lazy-loads models so Space won't crash. If a model fails to load the app will return a fallback so buttons still work.")
89
  with gr.Row():
90
+ direction = gr.Radio(label="Direction", choices=["English β†’ Bengali", "Bengali β†’ English"], value="English β†’ Bengali")
91
+ swap_btn = gr.Button("Swap")
92
 
93
+ input_text = gr.Textbox(label="Input text", placeholder="Type a sentence here (English or Bengali)...", lines=4)
94
  translate_btn = gr.Button("Translate", variant="primary")
 
95
  with gr.Row():
96
  out_translation = gr.Textbox(label="Translation", lines=4)
 
 
 
 
97
  with gr.Row():
98
+ out_status = gr.Textbox(label="Status / Tips", lines=1)
99
+ out_analysis = gr.Textbox(label="Analysis / Debug", lines=3)
 
100
 
101
+ # swap behavior
102
  def do_swap(cur):
103
+ return "Bengali β†’ English" if cur == "English β†’ Bengali" else "English β†’ Bengali"
104
+ swap_btn.click(do_swap, inputs=direction, outputs=direction)
105
 
106
+ # main click hook
107
  translate_btn.click(translate_text, inputs=[input_text, direction], outputs=[out_translation, out_status, out_analysis])
108
 
109
+ # example quick buttons
110
+ with gr.Row():
111
+ ex1 = gr.Button("Hello, how are you?")
112
+ ex2 = gr.Button("Ami bhalo achi")
113
+ ex3 = gr.Button("Where is the market?")
114
+
115
  ex1.click(lambda: "Hello, how are you?", outputs=input_text)
116
  ex2.click(lambda: "Ami bhalo achi", outputs=input_text)
117
  ex3.click(lambda: "Where is the market?", outputs=input_text)
118
 
119
+ # Launch
 
 
 
120
  if __name__ == "__main__":
121
+ # debug=True prints logs to the container console
122
+ demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)), debug=True)