File size: 1,048 Bytes
d828f96
9cbb57b
d828f96
9cbb57b
 
 
 
 
 
 
 
 
 
 
 
 
 
9e64cc6
 
 
 
 
 
9cbb57b
 
d828f96
9cbb57b
9e64cc6
9cbb57b
711f8c2
02398b1
9cbb57b
 
d828f96
 
9cbb57b
9e64cc6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import gradio as gr
from transformers import AutoModelForSequenceClassification, AutoTokenizer

# Load pre-trained model and tokenizer
model = AutoModelForSequenceClassification.from_pretrained("klue/bert-base-uncased")
tokenizer = AutoTokenizer.from_pretrained("klue/bert-base-uncased")

def chatbot(input_text):
    # Tokenize input text
    inputs = tokenizer(input_text, return_tensors="pt")
    
    # Get model predictions
    outputs = model(**inputs)
    logits = outputs.logits.detach().numpy()
    predicted_class = logits.argmax(-1)[0]
    
    # Generate response based on predicted class
    responses = [
        "I'm happy to help you with that!",
        "I'm not sure I understand. Can you please rephrase?",
        "I'm sorry, I'm not trained to respond to that."
    ]
    response = responses[predicted_class]
    
    return response

# Create Gradio interface
iface = gr.Interface(
    fn=chatbot,
    inputs="text",
    outputs="text",
    title="Chatbot",
    description="Talk to me!"
)

# Launch Gradio app
iface.launch()