Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModel, AutoTokenizer | |
# Load the model and tokenizer | |
model = AutoModel.from_pretrained("kakaobank/kf-deberta-base") | |
tokenizer = AutoTokenizer.from_pretrained("kakaobank/kf-deberta-base") | |
def process_text(text): | |
# Tokenize the input text | |
tokens = tokenizer.tokenize(text) | |
token_output = f"Tokens: {tokens}" | |
# Generate model output | |
inputs = tokenizer(text, return_tensors="pt") | |
model_output = model(**inputs) | |
# You might want to format this output in a more readable way | |
model_output_str = str(model_output) | |
return token_output, model_output_str | |
# Create a Gradio interface | |
iface = gr.Interface( | |
fn=process_text, | |
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter text here..."), | |
outputs=[gr.outputs.Textbox(label="Tokenized Output"), gr.outputs.Textbox(label="Model Output")], | |
title="DeBERTa Model Text Processing", | |
description="This interface tokenizes the input text and processes it with the DeBERTa model." | |
) | |
iface.launch() | |