vanim commited on
Commit
bb822e2
·
1 Parent(s): 2a7f953

Customize gradio app

Browse files
Files changed (1) hide show
  1. app.py +44 -4
app.py CHANGED
@@ -1,7 +1,47 @@
 
 
 
 
 
1
  import gradio as gr
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ M6_NB_MiniProject_1_PartB_Deploy_Medical_Q&A_GPT2.ipynb
3
+ Original file in google drive
4
+ """
5
+ import os
6
  import gradio as gr
7
+ from transformers import AutoModelWithLMHead, AutoTokenizer
8
 
9
+ def generate_response(model, tokenizer, prompt, max_length=512):
10
+ # YOUR CODE HERE ...
11
+ input_ids = tokenizer.encode(prompt, return_tensors="pt") # 'pt' for returning pytorch tensor
12
 
13
+ # Create the attention mask and pad token id
14
+ attention_mask = torch.ones_like(input_ids)
15
+ pad_token_id = tokenizer.eos_token_id
16
+
17
+ output = model.generate(
18
+ input_ids,
19
+ max_length=max_length,
20
+ num_return_sequences=1,
21
+ attention_mask=attention_mask,
22
+ pad_token_id=pad_token_id
23
+ )
24
+
25
+ return tokenizer.decode(output[0], skip_special_tokens=True)
26
+
27
+ def generate_query_response(prompt, max_length=200):
28
+ # Load your model from hub
29
+ model = AutoModelWithLMHead.from_pretrained("vanim/chatgpt2-medical-QnA")
30
+ # Load your tokenizer from hub
31
+ tokenizer = AutoTokenizer.from_pretrained("vanim/chatgpt2-medical-QnA")
32
+
33
+ return generate_response(model, tokenizer, prompt, max_length)
34
+
35
+
36
+ # Create title, description and article strings
37
+ title = "Medical QnA chat bot"
38
+ description = "ChatGPT2 based Medical Q and A demo"
39
+
40
+ demo = gr.Interface(fn=generate_query_response,
41
+ inputs = "text",
42
+ outputs = "text",
43
+ title=title,
44
+ description=description,)
45
+
46
+ #Launch the demo
47
+ demo.launch()