|
import gradio as gr |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
model_name = "CreitinGameplays/bloom-3b-conversational" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
def generate_text(prompt): |
|
"""Generates text using the BLOOM model from Hugging Face Transformers.""" |
|
|
|
input_ids = tokenizer(prompt, return_tensors="pt").input_ids |
|
|
|
|
|
output = model.generate( |
|
input_ids=input_ids, |
|
max_length=256, |
|
num_return_sequences=1, |
|
do_sample=True, |
|
top_k=50, |
|
top_p=0.15, |
|
temperature=0.1 |
|
repetition_penalty=1.165 |
|
) |
|
|
|
|
|
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) |
|
return generated_text |
|
|
|
|
|
interface = gr.Interface( |
|
fn=generate_text, |
|
inputs="text", |
|
outputs="text", |
|
description="Interact with BLOOM (Loaded with Hugging Face Transformers)", |
|
) |
|
|
|
|
|
interface.launch() |
|
|