Spaces:
Sleeping
Sleeping
removed max_len param from model.gen
Browse files
app.py
CHANGED
@@ -28,7 +28,7 @@ def infer_1(inp, chat_history):
|
|
28 |
inp_tok = tokenizer(inp_1, return_tensors="pt")
|
29 |
X = inp_tok["input_ids"].to(device)
|
30 |
a = inp_tok["attention_mask"].to(device)
|
31 |
-
output = model.generate(X, attention_mask=a
|
32 |
output = tokenizer.decode(output[0])
|
33 |
#Remove the user input part seq
|
34 |
output = output[len(inp_1):]
|
@@ -91,7 +91,7 @@ training_data = pd.read_csv('ques_ans_v5.csv', index_col = 0)
|
|
91 |
#Launch with gradio
|
92 |
with gr.Blocks() as demo:
|
93 |
gr.HTML(project_heading)
|
94 |
-
|
95 |
gr.Markdown(value = project_des)
|
96 |
|
97 |
chatbot = gr.Chatbot(label = "Trained on scripts from \"Rich Dad Poor Dad\"") #value = [['Hey there User', 'Hey there CB']]
|
|
|
28 |
inp_tok = tokenizer(inp_1, return_tensors="pt")
|
29 |
X = inp_tok["input_ids"].to(device)
|
30 |
a = inp_tok["attention_mask"].to(device)
|
31 |
+
output = model.generate(X, attention_mask=a)
|
32 |
output = tokenizer.decode(output[0])
|
33 |
#Remove the user input part seq
|
34 |
output = output[len(inp_1):]
|
|
|
91 |
#Launch with gradio
|
92 |
with gr.Blocks() as demo:
|
93 |
gr.HTML(project_heading)
|
94 |
+
|
95 |
gr.Markdown(value = project_des)
|
96 |
|
97 |
chatbot = gr.Chatbot(label = "Trained on scripts from \"Rich Dad Poor Dad\"") #value = [['Hey there User', 'Hey there CB']]
|