Athspi's picture
Update app.py
31c0af3 verified
raw
history blame
679 Bytes
import gradio as gr
from transformers import pipeline
# Initialize the model pipeline
pipe = pipeline("text-generation", model="unsloth/gemma-2b-it")
# Define a function to handle user input and generate model output
def generate_response(user_input):
# Create a conversation format if necessary (based on the input model)
messages = [{"role": "user", "content": user_input}]
# Generate response using the pipeline
result = pipe(messages)
return result[0]['generated_text'] # Adjust based on the exact output format
# Create a Gradio interface for user input and model output
gr.Interface(fn=generate_response, inputs="text", outputs="text").launch()