CreitinGameplays's picture
Update app.py
95ac312 verified
raw
history blame
908 Bytes
import gradio as gr
def conversation(prompt="", max_tokens=128):
# Integrate your Bloom 3b model here to generate response based on prompt and max_tokens
# Replace this with the actual call to your Bloom 3b model
gr.load("models/CreitinGameplays/bloom-3b-conversational")
response = "Bloom 3b is currently unavailable. Try again later!"
return response
interface = gr.Interface(
fn=conversation,
inputs=[
gr.Textbox(label="Text Prompt", value="<|system|> You are a helpful AI assistant </s> <|prompter|> What is an AI? </s> <|assistant|>"),
gr.Slider(minimum=1, maximum=1024, label="Max New Tokens", value=128),
],
# No outputs are specified as the response will be printed in the console.
title="Bloom 3b Conversational Assistant",
description="Talk to Bloom 3b using a text prompt and adjust the maximum number of tokens for response generation.",
)
interface.launch()