CreitinGameplays's picture
Update app.py
66a7808 verified
raw
history blame
916 Bytes
import gradio as gr
def conversation(prompt="", max_new_tokens=128):
# Integrate your Bloom 3b model here to generate response based on prompt and max_tokens
# Replace this with the actual call to your Bloom 3b model
gr.load("models/CreitinGameplays/bloom-3b-conversational")
response = "Bloom 3b is currently unavailable. Try again later!"
return response
interface = gr.Interface(
fn=conversation,
inputs=[
gr.Textbox(label="Text Prompt", value="<|system|> You are a helpful AI assistant </s> <|prompter|> What is an AI? </s> <|assistant|>"),
gr.Slider(minimum=1, maximum=1024, label="Max New Tokens", value=128),
],
outputs=gr.Textbox(label="AI Assistant Response"), # Textbox for the response
title="Bloom 3b Conversational Assistant",
description="Talk to Bloom 3b using a text prompt and adjust the maximum number of tokens for response generation.",
)
interface.launch()