piyushgrover commited on
Commit
d1255fb
Β·
verified Β·
1 Parent(s): 0f7c2dd

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -28,7 +28,7 @@ tokenizer.pad_token = tokenizer.eos_token
28
  tokenizer.padding_side = "right"
29
 
30
  # βœ… Set up text generation pipeline
31
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=500)
32
 
33
 
34
  # βœ… Chatbot Function with Conversation History
@@ -44,7 +44,7 @@ def chat(user_input, history=[]):
44
  prompt = f"{formatted_history}\n\n### User:\n{user_input}\n\n### Assistant:\n"
45
 
46
  # Generate response
47
- response = generator(prompt, max_length=128, do_sample=True)
48
 
49
  # Extract only the model's generated response
50
  answer = response[0]["generated_text"].split("### Assistant:\n")[-1].strip()
@@ -52,7 +52,7 @@ def chat(user_input, history=[]):
52
  # Update conversation history
53
  history.append((user_input, answer))
54
 
55
- return "", history # Return empty input and updated history
56
 
57
 
58
  # βœ… Create Gradio Chat Interface
 
28
  tokenizer.padding_side = "right"
29
 
30
  # βœ… Set up text generation pipeline
31
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=500, truncation=True)
32
 
33
 
34
  # βœ… Chatbot Function with Conversation History
 
44
  prompt = f"{formatted_history}\n\n### User:\n{user_input}\n\n### Assistant:\n"
45
 
46
  # Generate response
47
+ response = generator(prompt, max_length=128, do_sample=True, truncation=True)
48
 
49
  # Extract only the model's generated response
50
  answer = response[0]["generated_text"].split("### Assistant:\n")[-1].strip()
 
52
  # Update conversation history
53
  history.append((user_input, answer))
54
 
55
+ return history # Return empty input and updated history
56
 
57
 
58
  # βœ… Create Gradio Chat Interface