|
import gradio as gr |
|
from transformers import pipeline |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
def generateYodaLanguage(prompt): |
|
repo_id="dvgodoy/phi3-mini-yoda-adapter" |
|
model = AutoModelForCausalLM.from_pretrained (repo_id) |
|
tokenizer = AutoTokenizer.from_pretrained(repo_id) |
|
generator = pipeline("text-generation", model=repo_id) |
|
output = generator([{"role": "user", "content": prompt}], max_new_tokens=128, return_full_text=False)[0] |
|
return output["generated_text"] |
|
|
|
with gr.Blocks() as demo: |
|
with gr.Row(): |
|
text=gr.Textbox(label="prompt") |
|
translate_btn=gr.Button("Say it in Yoda Language!") |
|
output=gr.Textbox(label="Yoda language") |
|
translate_btn.click(fn=generateYodaLanguage, inputs=text, outputs=output) |
|
demo.launch(debug=True,share=True) |