import os os.system("pip install transformers torch") import gradio as gr from transformers import AutoModel, AutoTokenizer, trainer_utils import torch # Load the model and tokenizer device = "cuda" if torch.cuda.is_available() else "cpu" model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device) tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") # Define the text generation function def generate_text(input_text): x_token = tokenizer(input_text, return_tensors="pt") trainer_utils.set_seed(30) input_ids = x_token.input_ids.to(device) gen_token = model.generate(input_ids, max_new_tokens=50) return tokenizer.decode(gen_token[0]) # Create the Gradio interface iface = gr.Interface( fn=generate_text, inputs="text", outputs="text", title="Japanese Text Generation", description="Enter a prompt in Japanese to generate text." ) # Launch the interface iface.launch()