import gradio as gr from transformers import AutoModelForSequenceClassification, AutoTokenizer # Load pre-trained model and tokenizer model = AutoModelForSequenceClassification.from_pretrained("klue/bert-base-uncased") tokenizer = AutoTokenizer.from_pretrained("klue/bert-base-uncased") def chatbot(input_text): # Tokenize input text inputs = tokenizer(input_text, return_tensors="pt") # Get model predictions outputs = model(**inputs) logits = outputs.logits.detach().numpy() predicted_class = logits.argmax(-1)[0] # Generate response based on predicted class responses = [ "I'm happy to help you with that!", "I'm not sure I understand. Can you please rephrase?", "I'm sorry, I'm not trained to respond to that." ] response = responses[predicted_class] return response # Create Gradio interface iface = gr.Interface( fn=chatbot, inputs="text", outputs="text", title="Chatbot", description="Talk to me!" ) # Launch Gradio app iface.launch()