from transformers import AutoTokenizer, AutoModelForCausalLM | |
from transformers import pipeline | |
import gradio as gr | |
# messages = [ | |
# {"role": "user", "content": "Who are you?"}, | |
# ] | |
# pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-3B-Instruct") | |
# pipe(messages) | |
chatbot = pipeline("conversational", model="microsoft/DialoGPT-medium") | |
# tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-3B-Instruct") | |
# model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-3B-Instruct") | |
def chat_with_bot(user_input): | |
# Generate a response from the chatbot model | |
response = chatbot(user_input) | |
return response[0]['generated_text'] | |
interface = gr.Interface( | |
fn=chat_with_bot, # Function to call for processing the input | |
inputs=gr.Textbox(label="Enter your message"), # User input (text) | |
outputs=gr.Textbox(label="Chatbot Response"), # Model output (text) | |
title="Chat with DialoGPT", # Optional: Add a title to your interface | |
description="Chat with an AI model powered by DialoGPT!" # Optional: Add a description | |
) | |
interface.launch() |