Spaces:
Sleeping
Sleeping
import streamlit as st | |
import requests | |
# Set the model ID of your fine-tuned model on Hugging Face | |
MODEL_ID = "Mishal23/fine-tuned-dialoGPT-crm-chatbot" # Your model ID | |
# Retrieve your Hugging Face token from secrets | |
HUGGING_FACE_TOKEN = st.secrets["HUGGING_FACE_TOKEN"] | |
# Function to generate a response from the chatbot using the Hugging Face API | |
def generate_response(prompt): | |
headers = {"Authorization": f"Bearer {HUGGING_FACE_TOKEN}"} | |
payload = {"inputs": prompt} | |
try: | |
# Make the API call to the Hugging Face model | |
response = requests.post(f"https://api-inference.huggingface.co/models/{MODEL_ID}", headers=headers, json=payload) | |
response.raise_for_status() # Raise an error for bad responses | |
return response.json()[0]['generated_text'] | |
except requests.exceptions.HTTPError as http_err: | |
st.error(f"HTTP error occurred: {http_err}") | |
return "Sorry, there was an error with the server." | |
except requests.exceptions.RequestException as req_err: | |
st.error(f"Request error occurred: {req_err}") | |
return "Sorry, there was an issue with your request." | |
except Exception as e: | |
st.error(f"Error generating response: {e}") | |
return "Sorry, I couldn't generate a response." | |
# Streamlit UI setup | |
st.title("Chatbot Powered by Hugging Face") | |
st.subheader("Talk to the Chatbot") | |
# User input | |
user_input = st.text_input("You: ", "") | |
# Button to submit the input | |
if st.button("Send"): | |
if user_input: | |
with st.spinner("Generating response..."): | |
bot_response = generate_response(user_input) | |
st.text_area("Chatbot:", value=bot_response, height=200) | |
else: | |
st.warning("Please enter a message before sending.") | |