CreitinGameplays's picture
Update app.py
6b10d1a verified
raw
history blame
1.94 kB
import gradio as gr
from transformers import pipeline
bloom_model_name = "CreitinGameplays/bloom-3b-conversational"
# Create a pipeline for text generation
generator = pipeline("text-generation", model=bloom_model_name, truncation=True)
def conversation(prompt="", max_tokens=128):
"""
Generates conversation response using Bloom with Hugging Face Transformers.
Args:
prompt (str, optional): Text prompt for Bloom. Defaults to "".
max_tokens (int, optional): Maximum number of tokens for response generation. Defaults to 128.
Returns:
str: Bloom's generated response to the prompt.
"""
try:
# Generate response using Bloom text-generation pipeline
response = generator(prompt, max_length=max_tokens, num_return_sequences=1)[0]["generated_text"]
return response.strip() # Remove potential leading/trailing whitespace
except Exception as e:
print(f"Error during Bloom interaction: {e}")
return "Bloom is currently unavailable. Try again later!"
interface = gr.Interface(
fn=conversation,
inputs=[
gr.Textbox(label="Text Prompt", value="<|system|> You are a helpful AI assistant </s> <|prompter|> What is an AI? </s> <|assistant|>"),
gr.Slider(minimum=1, maximum=1024, label="Max New Tokens", value=128),
],
outputs=gr.Textbox(label="AI Assistant Response"), # Textbox for the response
title="Bloom 3b Conversational Assistant",
description="Talk to Bloom 3b using a text prompt and adjust the maximum number tokens for response generation.",
)
interface.launch()
# This is a placeholder function, replace with your Bloom 3b interaction code
def generate_response_from_bloom3b(prompt, max_tokens):
# Implement your Bloom 3b interaction logic here
# Use libraries like transformers to call Bloom 3b and process the response
# ...
# Return the generated response as a string
return "This is a placeholder response from generate_response_from_bloom3b"