import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM # Load the fine-tuned model and tokenizer from Hugging Face model_name = "smgriffin/24thankyou-lyrics-generator" # Replace with your Hugging Face repo name tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Define the generation function def generate_lyrics(): prompt = "Lyrics:" input_ids = tokenizer.encode(prompt, return_tensors="pt") output = model.generate( input_ids, max_length=300, # Maximum number of tokens temperature=0.7, # Adjust randomness top_k=50, # Top-k sampling top_p=0.95, # Nucleus sampling do_sample=True, # Enable stochastic decoding ) return tokenizer.decode(output[0], skip_special_tokens=True) # Define the Gradio interface interface = gr.Interface( fn=generate_lyrics, inputs=None, # No input arguments outputs=gr.Textbox(label="lyrics"), title="", description="click 'generate' to make new 24thankyou lyrics", theme="compact", ) # Launch the app interface.launch()