SantiagoPG commited on
Commit
c3d3e17
·
1 Parent(s): 6f5be4c

Add application file

Browse files
Files changed (1) hide show
  1. app.py +22 -0
app.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
3
+ import torch
4
+
5
+
6
+ # Load the model for inference
7
+ model = AutoModelForSeq2SeqLM.from_pretrained('SantiagoPG/chatbot_customer_service')
8
+ tokenizer = AutoTokenizer.from_pretrained("Kaludi/Customer-Support-Assistant-V2")
9
+
10
+ def get_chatbot_response(message):
11
+ inputs = tokenizer.encode(message, return_tensors='pt')
12
+ reply_ids = model.generate(inputs)
13
+ return tokenizer.decode(reply_ids[0], skip_special_tokens=True)
14
+
15
+ # Streamlit interface
16
+ st.title("Customer Service Chatbot")
17
+
18
+ user_input = st.text_input("Type your question here:")
19
+
20
+ if user_input:
21
+ response = get_chatbot_response(user_input)
22
+ st.text_area("Response", value=response, height=100, max_chars=None, key=None)