xnetba commited on
Commit
cc74a9c
1 Parent(s): cce3d0b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -7
app.py CHANGED
@@ -1,9 +1,30 @@
1
- from transformers import pipeline
 
 
2
 
3
- generator = pipeline("text-generation", model="distilgpt2")
4
- generator(
5
- "In this course, we will teach you how to",
6
- max_length=30,
7
- num_return_sequences=2,
8
 
9
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
 
5
+ #set up the model (large version of DialoGPT)
6
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
7
+ model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
 
 
8
 
9
+ #Defining a predict function
10
+ def predict(input, history=[]):
11
+ # tokenize the new input sentence
12
+ new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
13
+
14
+ # append the new user input tokens to the chat history
15
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
16
+
17
+ # generate a response
18
+ history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
19
+
20
+ # convert the tokens to text, and then split the responses into lines
21
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
22
+ response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
23
+ return response, history
24
+
25
+ #creating a gradio interface
26
+ demo = gr.Interface(fn=predict,
27
+ inputs=["text", "state"],
28
+ outputs=["chatbot", "state"])
29
+
30
+ demo.launch()