Spaces:
Build error
Build error
| from transformers import GPT2LMHeadModel, GPT2TokenizerFast, pipeline | |
| import gradio as gr | |
| import os | |
| # Set the correct path to your model directory on Hugging Face Hub | |
| model_dir = "JakeTurner616/Adonalsium-gpt2" | |
| # Manually specify the model's configuration and weights files | |
| model = GPT2LMHeadModel.from_pretrained(model_dir, torch_dtype='auto', low_cpu_mem_usage=True) | |
| tokenizer = GPT2TokenizerFast.from_pretrained(model_dir, use_fast=True) | |
| # Ensure the tokenizer uses the correct pad token | |
| tokenizer.pad_token = tokenizer.eos_token | |
| # Create the text generation pipeline | |
| generator = pipeline('text-generation', model=model, tokenizer=tokenizer) | |
| def generate_text(prompt): | |
| # Generate text | |
| generated_texts = generator(prompt, max_length=150, temperature=0.7, | |
| top_p=0.85, repetition_penalty=1.3, | |
| num_return_sequences=1, no_repeat_ngram_size=2, | |
| early_stopping=True) | |
| return generated_texts[0]['generated_text'] | |
| # Create the Gradio interface | |
| iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", | |
| title="Cosmere Text Generator", | |
| description="Generate text based on the Cosmere series by Brandon Sanderson.") | |
| # Launch the interface | |
| iface.launch() |