Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,6 @@ from transformers import pipeline, set_seed
|
|
2 |
from transformers import BioGptTokenizer, BioGptForCausalLM
|
3 |
from multilingual_translation import translate
|
4 |
from utils import lang_ids
|
5 |
-
|
6 |
import gradio as gr
|
7 |
|
8 |
model_list = [
|
@@ -19,18 +18,16 @@ def translate_to_english(text, base_lang):
|
|
19 |
base_lang = lang_ids[base_lang]
|
20 |
new_text = translate("facebook/m2m100_418M", text, base_lang, "en")
|
21 |
return new_text
|
22 |
-
|
23 |
|
24 |
def biogpt(
|
25 |
prompt: str,
|
26 |
model_id: str,
|
27 |
max_length: int = 25,
|
28 |
num_return_sequences: int = 5,
|
29 |
-
base_lang: str = "
|
30 |
):
|
31 |
|
32 |
-
en_prompt = translate_to_english(prompt, base_lang)
|
33 |
-
|
34 |
model = BioGptForCausalLM.from_pretrained(model_id)
|
35 |
tokenizer = BioGptTokenizer.from_pretrained(model_id)
|
36 |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
@@ -43,6 +40,7 @@ def biogpt(
|
|
43 |
"4": output[3]['generated_text'],
|
44 |
"5": output[4]['generated_text']
|
45 |
}
|
|
|
46 |
output_text = f'{output_dict["1"]}\n\n{output_dict["2"]}\n\n{output_dict["3"]}\n\n{output_dict["4"]}\n\n{output_dict["5"]}'
|
47 |
return en_prompt, output_text
|
48 |
|
@@ -64,7 +62,6 @@ examples = [
|
|
64 |
["COVID-19 is", "microsoft/biogpt", 25, 5, "English"],
|
65 |
["Kanser", "microsoft/biogpt", 25, 5, "Turkish"]
|
66 |
]
|
67 |
-
|
68 |
title = " BioGPT: Generative Pre-trained Transformer for Biomedical Text Generation and Mining"
|
69 |
demo_app = gr.Interface(
|
70 |
biogpt,
|
|
|
2 |
from transformers import BioGptTokenizer, BioGptForCausalLM
|
3 |
from multilingual_translation import translate
|
4 |
from utils import lang_ids
|
|
|
5 |
import gradio as gr
|
6 |
|
7 |
model_list = [
|
|
|
18 |
base_lang = lang_ids[base_lang]
|
19 |
new_text = translate("facebook/m2m100_418M", text, base_lang, "en")
|
20 |
return new_text
|
|
|
21 |
|
22 |
def biogpt(
|
23 |
prompt: str,
|
24 |
model_id: str,
|
25 |
max_length: int = 25,
|
26 |
num_return_sequences: int = 5,
|
27 |
+
base_lang: str = "English"
|
28 |
):
|
29 |
|
30 |
+
en_prompt = translate_to_english(prompt, base_lang)[0]
|
|
|
31 |
model = BioGptForCausalLM.from_pretrained(model_id)
|
32 |
tokenizer = BioGptTokenizer.from_pretrained(model_id)
|
33 |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
|
|
40 |
"4": output[3]['generated_text'],
|
41 |
"5": output[4]['generated_text']
|
42 |
}
|
43 |
+
|
44 |
output_text = f'{output_dict["1"]}\n\n{output_dict["2"]}\n\n{output_dict["3"]}\n\n{output_dict["4"]}\n\n{output_dict["5"]}'
|
45 |
return en_prompt, output_text
|
46 |
|
|
|
62 |
["COVID-19 is", "microsoft/biogpt", 25, 5, "English"],
|
63 |
["Kanser", "microsoft/biogpt", 25, 5, "Turkish"]
|
64 |
]
|
|
|
65 |
title = " BioGPT: Generative Pre-trained Transformer for Biomedical Text Generation and Mining"
|
66 |
demo_app = gr.Interface(
|
67 |
biogpt,
|