kalyani2599's picture
Update app.py
c5676d3 verified
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import os
# Ensure correct model and tokenizer initialization
model_name = "kalyani2599/emotional_support_bot"
# Clear Hugging Face cache (optional but useful if there are issues with cached files)
cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "huggingface")
if os.path.exists(cache_dir):
for file in os.listdir(cache_dir):
file_path = os.path.join(cache_dir, file)
os.remove(file_path)
# Load model and tokenizer
try:
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) # Using the fast tokenizer version
except Exception as e:
print(f"Error loading model or tokenizer: {e}")
# If the model/tokenizer fails to load, try a different one
model_name = "facebook/blenderbot-3B" # Example fallback model
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
def chatbot_response(input_text):
try:
inputs = tokenizer(input_text, return_tensors="pt")
outputs = model.generate(**inputs, max_length=100)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
except Exception as e:
return f"Error in generating response: {e}"
# Example chatbot interaction
input_text = "Hello, how are you?"
response = chatbot_response(input_text)
print(response)