Update app.py
Browse files
app.py
CHANGED
|
@@ -58,12 +58,29 @@ class Translators:
|
|
| 58 |
translation = pipe(self.input_text)
|
| 59 |
return translation[0]['translation_text']
|
| 60 |
|
| 61 |
-
def
|
| 62 |
from quickmt import Translator
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
from pathlib import Path
|
| 65 |
-
model_name = f"quickmt-{self.sl}-{self.tl}"
|
| 66 |
model_path = Path("/quickmt/models") / model_name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
# from quickmt.hub import hf_list
|
| 68 |
# quickmt_models = [i.split("/quickmt-")[1] for i in hf_list()]
|
| 69 |
# quickmt_models.sort()
|
|
@@ -72,22 +89,26 @@ class Translators:
|
|
| 72 |
'en-fa', 'en-fr', 'en-he', 'en-hi', 'en-hu', 'en-id', 'en-it', 'en-ja', 'en-ko', 'en-lv', 'en-pl', 'en-pt',
|
| 73 |
'en-ro', 'en-ru', 'en-th', 'en-tr', 'en-ur', 'en-vi', 'en-zh', 'es-en', 'fa-en', 'fr-en', 'he-en', 'hi-en',
|
| 74 |
'hu-en', 'id-en', 'it-en', 'ja-en', 'ko-en', 'lv-en', 'pl-en', 'pt-en', 'ro-en', 'ru-en', 'th-en', 'tr-en', 'ur-en', 'vi-en', 'zh-en']
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
translation = f'Model {model_name} from {iso1_to_name[self.sl]} to {iso1_to_name[self.tl]} not available!'
|
| 77 |
message = f"Available models: {', '.join(quickmt_models)}"
|
| 78 |
-
return translation, message
|
| 79 |
-
if not model_path.exists():
|
| 80 |
-
hf_download(
|
| 81 |
-
model_name = f"quickmt/{model_name}",
|
| 82 |
-
output_dir=Path("/quickmt/models") / model_name,
|
| 83 |
-
)
|
| 84 |
-
# 'auto' auto-detects GPU, set to "cpu" to force CPU inference
|
| 85 |
-
device = 'gpu' if torch.cuda.is_available() else 'cpu'
|
| 86 |
-
translator = Translator(str(model_path), device = device)
|
| 87 |
-
# translation = Translator(f"./quickmt-{self.sl}-{self.tl}/", device="auto", inter_threads=2)
|
| 88 |
-
# set beam size to 1 for faster speed (but lower quality)
|
| 89 |
-
translation = translator({self.input_text}, beam_size=5)[0]
|
| 90 |
-
message = f'Translated from {iso1_to_name[self.sl]} to {iso1_to_name[self.tl]} with {model_name}.'
|
| 91 |
return translation, message
|
| 92 |
|
| 93 |
@classmethod
|
|
|
|
| 58 |
translation = pipe(self.input_text)
|
| 59 |
return translation[0]['translation_text']
|
| 60 |
|
| 61 |
+
def quickmttranslate(model_path, input_text):
|
| 62 |
from quickmt import Translator
|
| 63 |
+
# 'auto' auto-detects GPU, set to "cpu" to force CPU inference
|
| 64 |
+
device = 'gpu' if torch.cuda.is_available() else 'cpu'
|
| 65 |
+
translator = Translator(str(model_path), device = device)
|
| 66 |
+
# translation = Translator(f"./quickmt-{self.sl}-{self.tl}/", device="auto", inter_threads=2)
|
| 67 |
+
# set beam size to 1 for faster speed (but lower quality)
|
| 68 |
+
translation = translator(input_text, beam_size=5)[0]
|
| 69 |
+
return translation
|
| 70 |
+
|
| 71 |
+
def quickmtdownload(model_name):
|
| 72 |
from pathlib import Path
|
|
|
|
| 73 |
model_path = Path("/quickmt/models") / model_name
|
| 74 |
+
if not model_path.exists():
|
| 75 |
+
hf_download(
|
| 76 |
+
model_name = f"quickmt/{model_name}",
|
| 77 |
+
output_dir=Path("/quickmt/models") / model_name,
|
| 78 |
+
)
|
| 79 |
+
return model_path
|
| 80 |
+
|
| 81 |
+
def quickmt(self):
|
| 82 |
+
from quickmt.hub import hf_download
|
| 83 |
+
model_name = f"quickmt-{self.sl}-{self.tl}"
|
| 84 |
# from quickmt.hub import hf_list
|
| 85 |
# quickmt_models = [i.split("/quickmt-")[1] for i in hf_list()]
|
| 86 |
# quickmt_models.sort()
|
|
|
|
| 89 |
'en-fa', 'en-fr', 'en-he', 'en-hi', 'en-hu', 'en-id', 'en-it', 'en-ja', 'en-ko', 'en-lv', 'en-pl', 'en-pt',
|
| 90 |
'en-ro', 'en-ru', 'en-th', 'en-tr', 'en-ur', 'en-vi', 'en-zh', 'es-en', 'fa-en', 'fr-en', 'he-en', 'hi-en',
|
| 91 |
'hu-en', 'id-en', 'it-en', 'ja-en', 'ko-en', 'lv-en', 'pl-en', 'pt-en', 'ro-en', 'ru-en', 'th-en', 'tr-en', 'ur-en', 'vi-en', 'zh-en']
|
| 92 |
+
# available_languages = list(set([lang for model in quickmt_models for lang in model.split('-')]))
|
| 93 |
+
# available_languages.sort()
|
| 94 |
+
available_languages = ['ar', 'bn', 'cs', 'da', 'de', 'el', 'en', 'es', 'fa', 'fr', 'he', 'hi', 'hu',
|
| 95 |
+
'id', 'it', 'ja', 'ko', 'lv', 'pl', 'pt', 'ro', 'ru', 'th', 'tr', 'ur', 'vi', 'zh']
|
| 96 |
+
if f"{self.sl}-{self.tl}" in quickmt_models:
|
| 97 |
+
model_path = quickmtdownload(model_name)
|
| 98 |
+
translation = quickmttranslate(model_path, self.input_text)
|
| 99 |
+
message = f'Translated from {iso1_to_name[self.sl]} to {iso1_to_name[self.tl]} with {model_name}.'
|
| 100 |
+
# Pivot language English
|
| 101 |
+
elif self.sl in available_languages and self.tl in available_languages:
|
| 102 |
+
model_name = f"quickmt-{self.sl}-en"
|
| 103 |
+
model_path = quickmtdownload(model_name)
|
| 104 |
+
entranslation = quickmttranslate(model_path, self.input_text)
|
| 105 |
+
model_name = f"quickmt-en-{self.tl}"
|
| 106 |
+
model_path = quickmtdownload(model_name)
|
| 107 |
+
translation = quickmttranslate(model_path, entranslation)
|
| 108 |
+
message = f'Translated from {iso1_to_name[self.sl]} to {iso1_to_name[self.tl]} with pivot language English.'
|
| 109 |
+
else:
|
| 110 |
translation = f'Model {model_name} from {iso1_to_name[self.sl]} to {iso1_to_name[self.tl]} not available!'
|
| 111 |
message = f"Available models: {', '.join(quickmt_models)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
return translation, message
|
| 113 |
|
| 114 |
@classmethod
|