anisgtboi commited on
Commit
fd89216
·
verified ·
1 Parent(s): 93de821

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +223 -246
app.py CHANGED
@@ -1,81 +1,138 @@
1
- import gradio as gr
 
 
 
2
  import re
3
  import time
 
 
 
4
  import torch
 
 
5
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
6
- from diffusers import StableDiffusionPipeline, LCMScheduler
7
- import random
8
 
9
- # --- Configuration ---
10
- TRANSLATION_MODEL = "facebook/nllb-200-distilled-600M"
11
- SRC_LANG = "eng_Latn"
12
- TGT_LANG = "ben_Beng"
13
- MAX_LENGTH = 512
14
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
 
16
- # --- Globals for caching ---
17
- translation_tokenizer = None
18
- translation_model = None
19
- image_pipe = None
20
 
21
- # --- Translation Functions ---
22
- def load_translation_model():
23
- global translation_tokenizer, translation_model
24
- if translation_tokenizer is None or translation_model is None:
25
- try:
26
- print(f"Loading translation model {TRANSLATION_MODEL} on {DEVICE} ...")
27
- translation_tokenizer = AutoTokenizer.from_pretrained(TRANSLATION_MODEL)
28
- translation_model = AutoModelForSeq2SeqLM.from_pretrained(TRANSLATION_MODEL).to(DEVICE)
29
- print("Translation model loaded successfully.")
30
- except Exception as e:
31
- translation_tokenizer, translation_model = None, None
32
- raise RuntimeError(f"Failed to load translation model: {e}")
33
- return translation_tokenizer, translation_model
34
 
35
  def split_into_sentences(text: str):
36
  if not text:
37
  return []
 
38
  sentences = re.split(r'(?<=[.!?])\s+', text.strip())
39
  return [s.strip() for s in sentences if s.strip()]
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  def translate_text(text: str, max_length: int = MAX_LENGTH):
 
42
  if not text or not text.strip():
43
  return ""
 
44
  try:
45
  tokenizer, model = load_translation_model()
46
  except Exception as e:
47
- return f"Model load error: {e}"
 
48
 
49
  sentences = split_into_sentences(text)
50
  translations = []
51
 
 
 
52
  for s in sentences:
53
  if not s:
54
  continue
55
  try:
56
- formatted_text = f"{SRC_LANG} {s}"
57
-
 
58
  inputs = tokenizer(
59
- formatted_text,
60
  return_tensors="pt",
61
  truncation=True,
62
  max_length=max_length,
63
- padding=False,
64
  ).to(DEVICE)
65
 
66
- generated_tokens = model.generate(
67
- **inputs,
68
- forced_bos_token_id=tokenizer.convert_tokens_to_ids(TGT_LANG),
69
  max_length=max_length + 64,
70
  num_beams=5,
71
  early_stopping=True,
72
  )
73
-
 
 
 
 
 
 
 
74
  decoded = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
 
 
75
  if decoded.startswith(TGT_LANG):
76
  decoded = decoded[len(TGT_LANG):].strip()
77
-
78
  translations.append(decoded)
 
79
  except RuntimeError as re_err:
80
  return f"Runtime error during generation: {re_err}"
81
  except Exception as e:
@@ -83,240 +140,160 @@ def translate_text(text: str, max_length: int = MAX_LENGTH):
83
 
84
  return " ".join(translations)
85
 
86
- # --- Faster Image Generation Functions ---
87
- def load_image_model():
88
- global image_pipe
89
- if image_pipe is None:
 
90
  try:
91
- print("Loading faster image generation model...")
92
- # Using a much faster model with LCM-LoRA
93
- model_id = "lykon/dreamshaper-7"
94
- lcm_lora_id = "latent-consistency/lcm-lora-sdv1-5"
95
-
96
- image_pipe = StableDiffusionPipeline.from_pretrained(
97
  model_id,
98
- torch_dtype=torch.float16,
99
- variant="fp16",
100
  )
101
-
102
- # Load LCM-LoRA for faster inference
103
- image_pipe.load_lora_weights(lcm_lora_id)
104
- image_pipe.scheduler = LCMScheduler.from_config(image_pipe.scheduler.config)
105
- image_pipe = image_pipe.to(DEVICE)
106
- print("Fast image generation model loaded successfully.")
107
  except Exception as e:
108
- image_pipe = None
109
- raise RuntimeError(f"Failed to load image model: {e}")
110
- return image_pipe
 
111
 
112
- def generate_image(prompt: str, num_inference_steps: int = 4): # Only 4 steps needed with LCM!
 
113
  if not prompt or not prompt.strip():
114
  return None, "Please enter a prompt to generate an image."
115
-
116
  try:
117
  pipe = load_image_model()
118
-
119
- seed = random.randint(0, 1000000)
120
- generator = torch.Generator(device=DEVICE).manual_seed(seed)
121
-
122
- # Generate image with very few steps
123
- image = pipe(
124
  prompt=prompt,
125
- num_inference_steps=num_inference_steps,
126
- guidance_scale=1.0, # Low guidance scale for LCM
127
- generator=generator,
128
- ).images[0]
129
-
130
- return image, f"Successfully generated image in {num_inference_steps} steps!"
131
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  except Exception as e:
133
- return None, f"Error generating image: {str(e)}"
 
 
134
 
135
- # --- Gradio UI with Real-time Features ---
136
  css = """
137
- .gradio-container {
138
- max-width: 1200px !important;
139
- }
140
- .header {
141
- text-align: center;
142
- background: linear-gradient(45deg, #667eea, #764ba2);
143
- padding: 20px;
144
- border-radius: 10px;
145
- color: white;
146
- margin-bottom: 20px;
147
- }
148
- .quick-btn {
149
- margin: 5px;
150
- padding: 8px 15px;
151
- }
152
  """
153
 
154
  with gr.Blocks(title="Fast Bengali Translator & Image Generator", css=css) as demo:
155
- # Header
156
  gr.Markdown("""
157
- <div class="header">
158
- <h1>⚡ Fast Bengali Translator & Image Generator</h1>
159
- <p>Real-time speech input with fast translation and image generation</p>
160
- </div>
161
  """)
162
-
163
  with gr.Tabs():
164
- with gr.TabItem("🌐 Translation"):
165
- gr.Markdown("## English to Bengali Translation")
166
-
167
  with gr.Row():
168
- with gr.Column():
169
- # Speech input section
170
- gr.Markdown("### 🎤 Speak or Type")
171
- audio_input = gr.Audio(type="filepath", label="Record your voice")
172
- transcribe_btn = gr.Button("Transcribe Speech", variant="primary")
173
-
174
- # Text input
175
- input_text = gr.Textbox(
176
- label="English Text",
177
- placeholder="Type or paste English text here...",
178
- lines=5
179
- )
180
-
181
- # Quick phrases buttons
182
- gr.Markdown("### 💬 Quick Phrases")
183
  with gr.Row():
184
- quick_hello = gr.Button("Hello, how are you?", elem_classes="quick-btn")
185
- quick_weather = gr.Button("Nice weather today", elem_classes="quick-btn")
186
- quick_thanks = gr.Button("Thank you very much", elem_classes="quick-btn")
187
-
188
- translate_btn = gr.Button("Translate", variant="primary")
189
-
190
- examples = gr.Examples(
191
- examples=[
192
- ["Hello, how are you? I hope you are doing well today."],
193
- ["The weather is beautiful today. Let's go for a walk in the park."],
194
- ],
195
- inputs=input_text,
196
- fn=None,
197
- cache_examples=False,
198
- label="Example Texts"
199
- )
200
-
201
- with gr.Column():
202
- output_text = gr.Textbox(
203
- label="Bengali Translation",
204
- lines=5,
205
- interactive=False
206
- )
207
- copy_btn = gr.Button("Copy Translation", variant="secondary")
208
-
209
- gr.Markdown("### 🎨 Generate Image from Translation")
210
- use_for_image_btn = gr.Button("Use for Image Generation", variant="primary")
211
-
212
- with gr.TabItem("🎨 Fast Image Generation"):
213
- gr.Markdown("## AI Image Generation (Optimized for Speed)")
214
-
215
  with gr.Row():
216
- with gr.Column():
217
- image_prompt = gr.Textbox(
218
- label="Image Prompt",
219
- placeholder="Describe the image you want to generate...",
220
- lines=3
221
- )
222
-
223
  with gr.Row():
224
- generate_btn = gr.Button("Generate Image (Fast!)", variant="primary")
225
- clear_btn = gr.Button("Clear", variant="secondary")
226
-
227
- steps_slider = gr.Slider(
228
- minimum=2,
229
- maximum=8,
230
- value=4,
231
- step=1,
232
- label="Inference Steps (4 is usually enough with LCM)"
233
- )
234
-
235
- with gr.Column():
236
  output_image = gr.Image(label="Generated Image", interactive=False)
237
  status_message = gr.Textbox(label="Status", interactive=False)
238
-
239
- gr.Markdown("### Tips for Fast Generation")
240
- gr.Markdown("- Use 4 steps for the best speed/quality balance")
241
- gr.Markdown("- Simple prompts work best with fast models")
242
-
243
- # Footer
244
  gr.Markdown("---")
245
- gr.Markdown("""
246
- <div style="text-align: center">
247
- <p>This optimized app uses faster models for better performance on Hugging Face Spaces.</p>
248
- </div>
249
- """)
250
-
251
- # Event handlers
252
- def transcribe_audio(audio_path):
253
- if audio_path is None:
254
- return "Please record audio first."
255
- try:
256
- # Simple transcription simulation (in a real app, you'd use a speech recognition library)
257
- # For now, we'll just return a placeholder
258
- return "I heard your voice. Please type your text for translation."
259
- except:
260
- return "Error transcribing audio. Please try again or type your text."
261
-
262
- def copy_to_clipboard(text):
263
- return text # Gradio will automatically copy text when the button is clicked
264
-
265
- # Connect events
266
- transcribe_btn.click(
267
- fn=transcribe_audio,
268
- inputs=audio_input,
269
- outputs=input_text
270
- )
271
-
272
- translate_btn.click(
273
- fn=translate_text,
274
- inputs=input_text,
275
- outputs=output_text
276
- )
277
-
278
- copy_btn.click(
279
- fn=copy_to_clipboard,
280
- inputs=output_text,
281
- outputs=output_text # This will trigger the browser's copy functionality
282
- )
283
-
284
- use_for_image_btn.click(
285
- fn=lambda x: x,
286
- inputs=output_text,
287
- outputs=image_prompt
288
- )
289
-
290
- generate_btn.click(
291
- fn=generate_image,
292
- inputs=[image_prompt, steps_slider],
293
- outputs=[output_image, status_message]
294
- )
295
-
296
- clear_btn.click(
297
- fn=lambda: [None, None, None],
298
- inputs=None,
299
- outputs=[image_prompt, output_image, status_message]
300
- )
301
-
302
- # Quick phrase buttons
303
- quick_hello.click(
304
- fn=lambda: "Hello, how are you?",
305
- inputs=None,
306
- outputs=input_text
307
- )
308
-
309
- quick_weather.click(
310
- fn=lambda: "The weather is nice today.",
311
- inputs=None,
312
- outputs=input_text
313
- )
314
-
315
- quick_thanks.click(
316
- fn=lambda: "Thank you very much for your help.",
317
- inputs=None,
318
- outputs=input_text
319
- )
320
-
321
- if __name__ == "__main__":
322
- demo.launch(debug=True)
 
1
+ # app.py
2
+ # Fast English -> Bengali translator with optional speech input and fast image generation
3
+
4
+ import os
5
  import re
6
  import time
7
+ import random
8
+ import traceback
9
+
10
  import torch
11
+ import gradio as gr
12
+
13
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
14
+ from diffusers import StableDiffusionPipeline
 
15
 
16
+ # -------- Configuration --------
17
+ TRANSLATION_MODEL = os.environ.get("TRANSLATION_MODEL", "facebook/nllb-200-distilled-600M")
18
+ SRC_LANG = os.environ.get("SRC_LANG", "eng_Latn")
19
+ TGT_LANG = os.environ.get("TGT_LANG", "ben_Beng")
20
+ MAX_LENGTH = int(os.environ.get("MAX_LENGTH", "512"))
21
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22
 
23
+ # Image model (can be changed to any HF stable-diffusion model you prefer)
24
+ IMAGE_MODEL_ID = os.environ.get("IMAGE_MODEL_ID", "runwayml/stable-diffusion-v1-5")
 
 
25
 
26
+ # -------- Globals / Caches --------
27
+ _translation_tokenizer = None
28
+ _translation_model = None
29
+ _image_pipe = None
30
+
31
+ # -------- Helpers: Translation --------
 
 
 
 
 
 
 
32
 
33
  def split_into_sentences(text: str):
34
  if not text:
35
  return []
36
+ # Basic sentence splitting that keeps punctuation
37
  sentences = re.split(r'(?<=[.!?])\s+', text.strip())
38
  return [s.strip() for s in sentences if s.strip()]
39
 
40
+
41
+ def load_translation_model():
42
+ global _translation_tokenizer, _translation_model
43
+ if _translation_tokenizer is None or _translation_model is None:
44
+ try:
45
+ print(f"Loading translation model {TRANSLATION_MODEL} on {DEVICE}...")
46
+ _translation_tokenizer = AutoTokenizer.from_pretrained(TRANSLATION_MODEL, use_fast=False)
47
+ _translation_model = AutoModelForSeq2SeqLM.from_pretrained(TRANSLATION_MODEL).to(DEVICE)
48
+ print("Translation model loaded.")
49
+ except Exception as e:
50
+ _translation_tokenizer, _translation_model = None, None
51
+ raise
52
+ return _translation_tokenizer, _translation_model
53
+
54
+
55
+ def _get_forced_bos_token_id(tokenizer):
56
+ # Different tokenizers expose language IDs differently. Try several approaches and fallback to None.
57
+ # 1) Hugging Face lang_code_to_id mapping (used by some multilingual tokenizers)
58
+ try:
59
+ if hasattr(tokenizer, "lang_code_to_id") and isinstance(tokenizer.lang_code_to_id, dict):
60
+ if TGT_LANG in tokenizer.lang_code_to_id:
61
+ return tokenizer.lang_code_to_id[TGT_LANG]
62
+ except Exception:
63
+ pass
64
+
65
+ # 2) Convert token string -> id (some checkpoints use language tags as tokens)
66
+ try:
67
+ token_id = tokenizer.convert_tokens_to_ids(TGT_LANG)
68
+ if token_id != tokenizer.unk_token_id:
69
+ return token_id
70
+ except Exception:
71
+ pass
72
+
73
+ # 3) Try common special form (e.g. "<2ben_Beng>")
74
+ try:
75
+ candidate = f"<2{TGT_LANG}>"
76
+ token_id = tokenizer.convert_tokens_to_ids(candidate)
77
+ if token_id != tokenizer.unk_token_id:
78
+ return token_id
79
+ except Exception:
80
+ pass
81
+
82
+ return None
83
+
84
+
85
  def translate_text(text: str, max_length: int = MAX_LENGTH):
86
+ """Translate English text to Bengali. Returns translated string or error message."""
87
  if not text or not text.strip():
88
  return ""
89
+
90
  try:
91
  tokenizer, model = load_translation_model()
92
  except Exception as e:
93
+ tb = traceback.format_exc()
94
+ return f"Model load error: {e}\n{tb}"
95
 
96
  sentences = split_into_sentences(text)
97
  translations = []
98
 
99
+ forced_bos = _get_forced_bos_token_id(tokenizer)
100
+
101
  for s in sentences:
102
  if not s:
103
  continue
104
  try:
105
+ # Prepend source language hint if the model expects it (common for NLLB)
106
+ src_prefixed = f"{SRC_LANG} {s}"
107
+
108
  inputs = tokenizer(
109
+ src_prefixed,
110
  return_tensors="pt",
111
  truncation=True,
112
  max_length=max_length,
 
113
  ).to(DEVICE)
114
 
115
+ gen_kwargs = dict(
 
 
116
  max_length=max_length + 64,
117
  num_beams=5,
118
  early_stopping=True,
119
  )
120
+
121
+ if forced_bos is not None:
122
+ gen_kwargs["forced_bos_token_id"] = forced_bos
123
+ elif getattr(model.config, "forced_bos_token_id", None) is not None:
124
+ gen_kwargs["forced_bos_token_id"] = model.config.forced_bos_token_id
125
+
126
+ generated_tokens = model.generate(**inputs, **gen_kwargs)
127
+
128
  decoded = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
129
+
130
+ # The decoded text sometimes includes the target language token. Remove if present at start.
131
  if decoded.startswith(TGT_LANG):
132
  decoded = decoded[len(TGT_LANG):].strip()
133
+
134
  translations.append(decoded)
135
+
136
  except RuntimeError as re_err:
137
  return f"Runtime error during generation: {re_err}"
138
  except Exception as e:
 
140
 
141
  return " ".join(translations)
142
 
143
+ # -------- Image generation (fast-ish) --------
144
+
145
+ def load_image_model(model_id: str = IMAGE_MODEL_ID):
146
+ global _image_pipe
147
+ if _image_pipe is None:
148
  try:
149
+ dtype = torch.float16 if DEVICE.type == "cuda" else torch.float32
150
+ print(f"Loading image model {model_id} (dtype={dtype}) on {DEVICE} ...")
151
+
152
+ _image_pipe = StableDiffusionPipeline.from_pretrained(
 
 
153
  model_id,
154
+ torch_dtype=dtype,
 
155
  )
156
+
157
+ # Move to device
158
+ _image_pipe = _image_pipe.to(DEVICE)
159
+ print("Image model loaded.")
160
+
 
161
  except Exception as e:
162
+ _image_pipe = None
163
+ raise
164
+ return _image_pipe
165
+
166
 
167
+ def generate_image(prompt: str, num_inference_steps: int = 4):
168
+ """Generate one image; returns PIL Image or None and status message."""
169
  if not prompt or not prompt.strip():
170
  return None, "Please enter a prompt to generate an image."
171
+
172
  try:
173
  pipe = load_image_model()
174
+
175
+ seed = random.randint(0, 2**31 - 1)
176
+ gen = torch.Generator(device=DEVICE).manual_seed(seed) if DEVICE.type == "cuda" else torch.Generator().manual_seed(seed)
177
+
178
+ # Guidance scale and steps tuned for speed; user can change steps via UI
179
+ out = pipe(
180
  prompt=prompt,
181
+ num_inference_steps=int(num_inference_steps),
182
+ guidance_scale=7.5,
183
+ generator=gen,
184
+ )
185
+
186
+ image = out.images[0]
187
+ return image, f"Generated (seed={seed}) in {num_inference_steps} steps."
188
+
189
+ except Exception as e:
190
+ tb = traceback.format_exc()
191
+ return None, f"Error generating image: {e}\n{tb}"
192
+
193
+ # -------- Optional: Speech transcription (if dependencies installed) --------
194
+
195
+ try:
196
+ import speech_recognition as sr
197
+ from pydub import AudioSegment
198
+ _SR_AVAILABLE = True
199
+ except Exception:
200
+ _SR_AVAILABLE = False
201
+
202
+
203
+ def transcribe_audio_file(audio_path: str):
204
+ if not audio_path:
205
+ return ""
206
+ if not _SR_AVAILABLE:
207
+ return "(speech_recognition/pydub not installed) Please type your text or install optional deps."
208
+
209
+ try:
210
+ recognizer = sr.Recognizer()
211
+ # Convert file to WAV if needed
212
+ wav_path = audio_path
213
+ if not audio_path.lower().endswith('.wav'):
214
+ audio = AudioSegment.from_file(audio_path)
215
+ wav_path = audio_path.rsplit('.', 1)[0] + '.wav'
216
+ audio.export(wav_path, format='wav')
217
+
218
+ with sr.AudioFile(wav_path) as source:
219
+ audio_data = recognizer.record(source)
220
+ text = recognizer.recognize_google(audio_data)
221
+ return text
222
+
223
  except Exception as e:
224
+ return f"Error transcribing audio: {e}"
225
+
226
+ # -------- Gradio UI --------
227
 
 
228
  css = """
229
+ .gradio-container { max-width: 1100px !important; }
230
+ .header { text-align: center; padding: 16px; border-radius: 8px; color: white; background: linear-gradient(90deg,#2563eb,#7c3aed); }
231
+ .quick-btn { margin: 6px; }
 
 
 
 
 
 
 
 
 
 
 
 
232
  """
233
 
234
  with gr.Blocks(title="Fast Bengali Translator & Image Generator", css=css) as demo:
 
235
  gr.Markdown("""
236
+ <div class="header"><h2>⚡ Fast English → Bengali Translator + Fast Image Generator</h2>
237
+ <p>Speech input (optional), sentence-split translation, and 2–8 step image generation for fast feedback.</p></div>
 
 
238
  """)
239
+
240
  with gr.Tabs():
241
+ with gr.TabItem("Translation"):
242
+ gr.Markdown("### English Bengali")
 
243
  with gr.Row():
244
+ with gr.Column(scale=6):
245
+ audio_input = gr.Audio(source="upload", type="filepath", label="Record or upload audio (optional)")
246
+ transcribe_btn = gr.Button("Transcribe Speech")
247
+
248
+ input_text = gr.Textbox(lines=6, placeholder="Type or paste English text here...", label="English Text")
 
 
 
 
 
 
 
 
 
 
249
  with gr.Row():
250
+ quick_hello = gr.Button("Hello, how are you?")
251
+ quick_weather = gr.Button("The weather is nice today.")
252
+ quick_thanks = gr.Button("Thank you very much.")
253
+
254
+ translate_btn = gr.Button("Translate")
255
+
256
+ with gr.Column(scale=6):
257
+ output_text = gr.Textbox(lines=6, label="Bengali Translation", interactive=False)
258
+ copy_btn = gr.Button("Copy")
259
+
260
+ use_for_image_btn = gr.Button("Use translation as image prompt")
261
+
262
+ with gr.TabItem("Image Generation"):
263
+ gr.Markdown("### Fast Image Generation")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
  with gr.Row():
265
+ with gr.Column(scale=6):
266
+ image_prompt = gr.Textbox(lines=4, label="Image Prompt", placeholder="Describe the image you want to generate...")
 
 
 
 
 
267
  with gr.Row():
268
+ generate_btn = gr.Button("Generate Image")
269
+ clear_btn = gr.Button("Clear")
270
+ steps_slider = gr.Slider(minimum=2, maximum=12, step=1, value=4, label="Inference Steps (fewer = faster)")
271
+
272
+ with gr.Column(scale=6):
 
 
 
 
 
 
 
273
  output_image = gr.Image(label="Generated Image", interactive=False)
274
  status_message = gr.Textbox(label="Status", interactive=False)
275
+
 
 
 
 
 
276
  gr.Markdown("---")
277
+ gr.Markdown("*Notes: For best performance use a GPU in Spaces or locally. Optional speech transcription requires `speechrecognition` and `pydub`.*")
278
+
279
+ # Event bindings
280
+ def _transcribe_then_fill(path):
281
+ return transcribe_audio_file(path)
282
+
283
+ def _copy_text(t):
284
+ return t
285
+
286
+ def _use_translation_for_image(t):
287
+ return t
288
+
289
+ transcribe_btn.click(fn=_transcribe_then_fill, inputs=audio_input, outputs=input_text)
290
+ translate_btn.click(fn=translate_text, inputs=input_text, outputs=output_text)
291
+ copy_btn.click(fn=_copy_text, inputs=output_text, outputs=output_text)
292
+
293
+ use_for_image_btn.click(fn=_use_translation_for_image, inputs=output_text, outputs=image_prompt)
294
+
295
+ generate_btn.click(fn=generate_image, inputs=[image_prompt, steps_slider], outputs=[output_image, status_message])
296
+ clear_btn.click(fn=lambda: ["", None, ""], inputs=None, outputs=[image_prompt, output_image, status_message])
297
+
298
+ if __name__ == '__main__':
299
+ demo.launch(server_name='0.0.0.0', server_port=int(os.environ.get('PORT', 7860)))