import gradio as gr from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained( 'vitaliy-sharandin/wiseai', load_in_8bit=True, device_map = {"": 0} ) tokenizer = AutoTokenizer.from_pretrained('vitaliy-sharandin/wiseai') pipe = pipeline('text-generation', model=model,tokenizer=tokenizer) def generate_text(prompt): result = pipe(prompt, max_length=200, do_sample=True, temperature=0.7, num_return_sequences=1, return_full_text=False) return result[0]['generated_text'] iface = gr.Interface(fn=generate_text, inputs="text", outputs="text") iface.launch()