import gradio as gr import torch import bitsandbytes as bnb from transformers import AutoTokenizer, AutoModelForCausalLM # BNB config bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 ) # Define the BLOOM model name model_name = "CreitinGameplays/bloom-3b-conversational" # Load tokenizer and model tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=bnb_config) def generate_text(user_prompt): """Generates text using the BLOOM model from Hugging Face Transformers and removes the user prompt.""" # Construct the full prompt with system introduction, user prompt, and assistant role prompt = f"<|system|> You are a helpful AI assistant. <|prompter|> {user_prompt} <|assistant|>" # Encode the entire prompt into tokens prompt_encoded = tokenizer(prompt, return_tensors="pt").input_ids # Generate text with the complete prompt and limit the maximum length to 256 tokens output = model.generate( input_ids=prompt_encoded, max_length=256, num_beams=1, num_return_sequences=1, # Generate only 1 sequence do_sample=True, # Enable sampling for creativity top_k=50, # Sample from the top 50 most likely tokens at each step top_p=0.15, # Filter out highly probable unlikely continuations temperature=0.1, # Control the randomness of the generated text (1.0 for default) repetition_penalty=1.165 ) # Decode the generated token sequence back to text generated_text = tokenizer.decode(output[0], skip_special_tokens=True) # Extract the assistant's response (assuming it starts with "<|assistant|>") assistant_response = generated_text.split("<|assistant|>")[-1] assistant_response = assistant_response.replace(f"{user_prompt}", "").strip() assistant_response = assistant_response.replace("You are a helpful AI assistant.", "").strip() return assistant_response # Define the Gradio interface interface = gr.Interface( fn=generate_text, inputs=[ gr.Textbox(label="Text Prompt", value="What's an AI?"), ], outputs="text", description="Interact with BLOOM-3b-conversational (Loaded with Hugging Face Transformers)", ) # Launch the Gradio interface interface.launch()