File size: 1,533 Bytes
194731c 5277372 b058713 d1da5ff 36b5e88 e643487 5277372 194731c c7cf3c2 194731c d1da5ff 194731c 5277372 c17c736 038610e 3c24b96 cab69d9 5a38614 194731c 5277372 172d00c 194731c 5277372 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import gradio as gr
from transformers import AutoModelForSeq2SeqLM
from transformers import DataCollatorForSeq2Seq, AutoConfig
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
print(f"Successfully loaded the model without gradio or spaces, model object: {model}")
@spaces.GPU(duration=120)
def run_train(model_name, dataset_name, hub_id, api_key, num_epochs, batch_size, lr, grad):
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
return "WORKS"
# Create Gradio interface
try:
iface = gr.Interface(
fn=run_train,
inputs=[
gr.Textbox(label="Model Name (e.g., 'google/t5-efficient-tiny-nh8')"),
gr.Textbox(label="Dataset Name (e.g., 'imdb')"),
gr.Textbox(label="HF hub to push to after training"),
gr.Textbox(label="HF API token"),
gr.Slider(minimum=1, maximum=10, value=3, label="Number of Epochs", step=1),
gr.Slider(minimum=1, maximum=2000, value=1, label="Batch Size", step=1),
gr.Slider(minimum=1, maximum=1000, value=1, label="Learning Rate (e-5)", step=1),
gr.Slider(minimum=1, maximum=100, value=1, label="Gradient accumulation", step=1),
],
outputs="text",
title="Fine-Tune Hugging Face Model",
description="This interface allows you to fine-tune a Hugging Face model on a specified dataset."
)
# Launch the interface
iface.launch()
except Exception as e:
print(f"An error occurred: {str(e)}, TB: {traceback.format_exc()}") |