# import torch # from transformers import AutoModelForCausalLM, AutoTokenizer # import gradio as gr # # Load model and tokenizer (using CPU for broader accessibility) # model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2", torch_dtype=torch.float32, device_map="cpu", trust_remote_code=True) # tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True) # def generate_text(prompt): # inputs = tokenizer(prompt, return_tensors="pt", return_attention_mask=False) # outputs = model.generate(**inputs, max_length=200) # text = tokenizer.batch_decode(outputs)[0] # return text # # Create Gradio interface # iface = gr.Interface( # fn=generate_text, # inputs=[gr.Textbox(lines=5, label="Enter your prompt")], # outputs="text", # title="PHI-2 Text Generator", # description="Generate text using the PHI-2 generative language model", # ) # # Launch the interface # iface.launch() import gradio as gr from transformers import pipeline pipe = pipeline("text2text-generation", model="yeye776/t5-OndeviceAI-HomeIoT") # gr.load("models/yeye776/t5-OndeviceAI-HomeIoT").launch() iface = gradio.Interface(fn=pipe, inputs="text", outputs="text") iface.launch()