try to reduce loading model time
Browse files
app.py
CHANGED
@@ -26,6 +26,9 @@ import argparse
|
|
26 |
import langid
|
27 |
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
|
28 |
|
|
|
|
|
|
|
29 |
|
30 |
class myTheme(Base):
|
31 |
def __init__(
|
@@ -128,8 +131,6 @@ def opus_trans(article, target_language):
|
|
128 |
def nllb_trans(article, target_language):
|
129 |
result_lang = detect_lang(article)
|
130 |
|
131 |
-
tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M")
|
132 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M")
|
133 |
inputs = tokenizer(article, return_tensors="pt")
|
134 |
|
135 |
if target_language == "English":
|
|
|
26 |
import langid
|
27 |
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
|
28 |
|
29 |
+
tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M")
|
30 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M")
|
31 |
+
|
32 |
|
33 |
class myTheme(Base):
|
34 |
def __init__(
|
|
|
131 |
def nllb_trans(article, target_language):
|
132 |
result_lang = detect_lang(article)
|
133 |
|
|
|
|
|
134 |
inputs = tokenizer(article, return_tensors="pt")
|
135 |
|
136 |
if target_language == "English":
|