flash64 kadirnar commited on
Commit
e45d82f
0 Parent(s):

Duplicate from kadirnar/BioGpt

Browse files

Co-authored-by: Kadir Nar <[email protected]>

Files changed (5) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +95 -0
  4. requirements.txt +5 -0
  5. utils.py +106 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: BioGpt
3
+ emoji: 🌖
4
+ colorFrom: red
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 3.17.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: kadirnar/BioGpt
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, set_seed
2
+ from transformers import BioGptTokenizer, BioGptForCausalLM
3
+ from multilingual_translation import translate
4
+ from utils import lang_ids
5
+ import gradio as gr
6
+ import torch
7
+
8
+ biogpt_model_list = [
9
+ "microsoft/biogpt",
10
+ "microsoft/BioGPT-Large",
11
+ "microsoft/BioGPT-Large-PubMedQA"
12
+ ]
13
+
14
+ lang_model_list = [
15
+ "facebook/m2m100_1.2B",
16
+ "facebook/m2m100_418M"
17
+ ]
18
+
19
+ lang_list = list(lang_ids.keys())
20
+
21
+ def translate_to_english(text, lang_model_id, base_lang):
22
+ if base_lang == "English":
23
+ return text
24
+ else:
25
+ base_lang = lang_ids[base_lang]
26
+ new_text = translate(lang_model_id, text, base_lang, "en")
27
+ return new_text[0]
28
+
29
+ def biogpt(
30
+ prompt: str,
31
+ biogpt_model_id: str,
32
+ max_length: str,
33
+ num_return_sequences: int,
34
+ base_lang: str,
35
+ lang_model_id: str
36
+ ):
37
+
38
+ en_prompt = translate_to_english(prompt, lang_model_id, base_lang)
39
+ generator = pipeline("text-generation", model=biogpt_model_id, device="cuda:0")
40
+ output = generator(en_prompt, max_length=max_length, num_return_sequences=num_return_sequences, do_sample=True)
41
+ output_dict = {}
42
+ for i in range(num_return_sequences):
43
+ output_dict[str(i+1)] = output[i]['generated_text']
44
+
45
+ output_text = ""
46
+ for i in range(num_return_sequences):
47
+ output_text += f'{output_dict[str(i+1)]}\n\n'
48
+
49
+ if base_lang == "English":
50
+ base_lang_output = output_text
51
+
52
+ else:
53
+ base_lang_output_ = ""
54
+ for i in range(num_return_sequences):
55
+ base_lang_output_ += f'{translate(lang_model_id, output_dict[str(i+1)], "en", lang_ids[base_lang])[0]}\n\n'
56
+ base_lang_output = base_lang_output_
57
+
58
+
59
+ return en_prompt, output_text, base_lang_output
60
+
61
+
62
+ inputs = [
63
+ gr.Textbox(lines=5, value="COVID-19 is", label="Prompt"),
64
+ gr.Dropdown(biogpt_model_list, value="microsoft/biogpt", label="BioGPT Model ID"),
65
+ gr.Slider(minumum=1, maximum=100, value=25, step=1, label="Max Length"),
66
+ gr.Slider(minumum=1, maximum=10, value=2, step=1, label="Number of Outputs"),
67
+ gr.Dropdown(lang_list, value="English", label="Base Language"),
68
+ gr.Dropdown(lang_model_list, value="facebook/m2m100_418M", label="Language Model ID")
69
+ ]
70
+
71
+ outputs = [
72
+ gr.outputs.Textbox(label="Prompt"),
73
+ gr.outputs.Textbox(label="Output"),
74
+ gr.outputs.Textbox(label="Translated Output")
75
+ ]
76
+
77
+ examples = [
78
+ ["COVID-19 is", "microsoft/biogpt", 25, 2, "English", "facebook/m2m100_418M"],
79
+ ["Kanser", "microsoft/biogpt", 25, 2, "Turkish", "facebook/m2m100_1.2B"]
80
+ ]
81
+
82
+ title = "M2M100 + BioGPT: Generative Pre-trained Transformer for Biomedical Text Generation and Mining"
83
+
84
+ description = "BioGPT is a domain-specific generative pre-trained Transformer language model for biomedical text generation and mining. BioGPT follows the Transformer language model backbone, and is pre-trained on 15M PubMed abstracts from scratch. Github: github.com/microsoft/BioGPT Paper: https://arxiv.org/abs/2210.10341"
85
+
86
+ demo_app = gr.Interface(
87
+ biogpt,
88
+ inputs,
89
+ outputs,
90
+ title=title,
91
+ description=description,
92
+ examples=examples,
93
+ cache_examples=False,
94
+ )
95
+ demo_app.launch(debug=True, enable_queue=True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ sacremoses
2
+ torch
3
+ beautifulsoup4==4.11.2
4
+ multilingual_translation==0.0.3
5
+ requests==2.28.1
utils.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bs4 import BeautifulSoup
2
+ import requests
3
+
4
+
5
+ lang_ids = {
6
+ "Afrikaans": "af",
7
+ "Amharic": "am",
8
+ "Arabic": "ar",
9
+ "Asturian": "ast",
10
+ "Azerbaijani": "az",
11
+ "Bashkir": "ba",
12
+ "Belarusian": "be",
13
+ "Bulgarian": "bg",
14
+ "Bengali": "bn",
15
+ "Breton": "br",
16
+ "Bosnian": "bs",
17
+ "Catalan": "ca",
18
+ "Cebuano": "ceb",
19
+ "Czech": "cs",
20
+ "Welsh": "cy",
21
+ "Danish": "da",
22
+ "German": "de",
23
+ "Greeek": "el",
24
+ "English": "en",
25
+ "Spanish": "es",
26
+ "Estonian": "et",
27
+ "Persian": "fa",
28
+ "Fulah": "ff",
29
+ "Finnish": "fi",
30
+ "French": "fr",
31
+ "Western Frisian": "fy",
32
+ "Irish": "ga",
33
+ "Gaelic": "gd",
34
+ "Galician": "gl",
35
+ "Gujarati": "gu",
36
+ "Hausa": "ha",
37
+ "Hebrew": "he",
38
+ "Hindi": "hi",
39
+ "Croatian": "hr",
40
+ "Haitian": "ht",
41
+ "Hungarian": "hu",
42
+ "Armenian": "hy",
43
+ "Indonesian": "id",
44
+ "Igbo": "ig",
45
+ "Iloko": "ilo",
46
+ "Icelandic": "is",
47
+ "Italian": "it",
48
+ "Japanese": "ja",
49
+ "Javanese": "jv",
50
+ "Georgian": "ka",
51
+ "Kazakh": "kk",
52
+ "Central Khmer": "km",
53
+ "Kannada": "kn",
54
+ "Korean": "ko",
55
+ "Luxembourgish": "lb",
56
+ "Ganda": "lg",
57
+ "Lingala": "ln",
58
+ "Lao": "lo",
59
+ "Lithuanian": "lt",
60
+ "Latvian": "lv",
61
+ "Malagasy": "mg",
62
+ "Macedonian": "mk",
63
+ "Malayalam": "ml",
64
+ "Mongolian": "mn",
65
+ "Marathi": "mr",
66
+ "Malay": "ms",
67
+ "Burmese": "my",
68
+ "Nepali": "ne",
69
+ "Dutch": "nl",
70
+ "Norwegian": "no",
71
+ "Northern Sotho": "ns",
72
+ "Occitan": "oc",
73
+ "Oriya": "or",
74
+ "Panjabi": "pa",
75
+ "Polish": "pl",
76
+ "Pushto": "ps",
77
+ "Portuguese": "pt",
78
+ "Romanian": "ro",
79
+ "Russian": "ru",
80
+ "Sindhi": "sd",
81
+ "Sinhala": "si",
82
+ "Slovak": "sk",
83
+ "Slovenian": "sl",
84
+ "Somali": "so",
85
+ "Albanian": "sq",
86
+ "Serbian": "sr",
87
+ "Swati": "ss",
88
+ "Sundanese": "su",
89
+ "Swedish": "sv",
90
+ "Swahili": "sw",
91
+ "Tamil": "ta",
92
+ "Thai": "th",
93
+ "Tagalog": "tl",
94
+ "Tswana": "tn",
95
+ "Turkish": "tr",
96
+ "Ukrainian": "uk",
97
+ "Urdu": "ur",
98
+ "Uzbek": "uz",
99
+ "Vietnamese": "vi",
100
+ "Wolof": "wo",
101
+ "Xhosa": "xh",
102
+ "Yiddish": "yi",
103
+ "Yoruba": "yo",
104
+ "Chinese": "zh",
105
+ "Zulu": "zu",
106
+ }