|
import streamlit as st |
|
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
|
import torch |
|
|
|
|
|
|
|
model = AutoModelForSeq2SeqLM.from_pretrained('SantiagoPG/chatbot_customer_service') |
|
tokenizer = AutoTokenizer.from_pretrained("Kaludi/Customer-Support-Assistant-V2") |
|
|
|
def get_chatbot_response(message): |
|
inputs = tokenizer.encode(message, return_tensors='pt') |
|
reply_ids = model.generate(inputs) |
|
return tokenizer.decode(reply_ids[0], skip_special_tokens=True) |
|
|
|
|
|
st.title("Customer Service Chatbot") |
|
|
|
user_input = st.text_input("Type your question here:") |
|
|
|
if user_input: |
|
response = get_chatbot_response(user_input) |
|
st.text_area("Response", value=response, height=100, max_chars=None, key=None) |