import gradio as gr def conversation(prompt="", max_tokens=128): # Integrate your Bloom 3b model here to generate response based on prompt and max_tokens gr.load("models/CreitinGameplays/bloom-3b-conversational").launch() # Replace this with the actual call to your Bloom 3b model response = "Bloom 3b is currently unavailable. Try again later!" return response interface = gr.Interface( fn=conversation, inputs=[ gr.Textbox(label="Text Prompt", value="<|system|> You are a helpful AI assistant <|prompter|> What is an AI? <|assistant|>"), gr.Slider(minimum=1, maximum=1024, label="Max New Tokens", value=128), ], outputs=gr.Textbox(label="AI Assistant Response"), # Textbox for the response title="Bloom 3b Conversational Assistant", description="Talk to Bloom 3b using a text prompt and adjust the maximum number tokens for response generation.", ) interface.launch()