Spaces:
Sleeping
Sleeping
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import gradio as gr | |
# Initialize the model and tokenizer | |
model = AutoModelForCausalLM.from_pretrained("anto18671/lumenspark", trust_remote_code=True) | |
tokenizer = AutoTokenizer.from_pretrained("anto18671/lumenspark", trust_remote_code=True) | |
def generate_text(input_text): | |
# Tokenize input text | |
encoded_input = tokenizer(input_text, return_tensors='pt') | |
# Generate text using the model | |
output = model.generate( | |
input_ids=encoded_input["input_ids"], | |
attention_mask=encoded_input["attention_mask"], | |
max_length=100, | |
min_length=20, | |
temperature=0.6, | |
top_k=50, | |
top_p=0.9, | |
repetition_penalty=1.1, | |
do_sample=True | |
) | |
# Decode the generated text | |
decoded_text = tokenizer.decode(output[0], skip_special_tokens=True) | |
return decoded_text | |
# Set up Gradio interface | |
interface = gr.Interface( | |
fn=generate_text, | |
inputs=gr.Textbox(lines=2, placeholder="Enter your text here..."), | |
outputs="text", | |
title="Text Generator", | |
description="Generate text using the Lumenspark model." | |
) | |
# Launch the interface | |
interface.launch() |