Anandhavalli2 commited on
Commit
99bb313
·
verified ·
1 Parent(s): 8b6a883

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -0
app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ import gradio as gr
3
+ import torch
4
+ #from transformers import GPT2LMHeadModel, GPT2Tokenizer
5
+
6
+ #import pickle
7
+
8
+
9
+ title = "🤖Deployment GUVI GPT Model using Hugging Face"
10
+ description = "Building open-domain chatbots is a challenging area for machine learning research."
11
+ examples = [["Guvi Details"]]
12
+
13
+ model_name = "fine_tuned_model123"
14
+ #model = GPT2LMHeadModel.from_pretrained(model_name)
15
+ #tokenizer = GPT2Tokenizer.from_pretrained(model_name)
16
+
17
+ # Load the tokenizer and model from Hugging Face Hub
18
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
19
+ model = AutoModelForCausalLM.from_pretrained(model_name)
20
+
21
+
22
+
23
+
24
+ def predict(input, history=[]):
25
+ # tokenize the new input sentence
26
+ new_user_input_ids = tokenizer.encode(
27
+ input + tokenizer.eos_token, return_tensors="pt"
28
+ )
29
+
30
+ # append the new user input tokens to the chat history
31
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
32
+
33
+ # generate a response
34
+ history = model.generate(
35
+ bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id
36
+ ).tolist()
37
+
38
+ # convert the tokens to text, and then split the responses into lines
39
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
40
+ # print('decoded_response-->>'+str(response))
41
+ response = [
42
+ (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
43
+ ] # convert to tuples of list
44
+ # print('response-->>'+str(response))
45
+ return response, history
46
+
47
+
48
+ gr.Interface(
49
+ fn=predict,
50
+ title=title,
51
+ description=description,
52
+ examples=examples,
53
+ inputs=["text", "state"],
54
+ outputs=["chatbot", "state"],
55
+ theme="finlaymacklon/boxy_violet",
56
+ ).launch()