|
import gradio as gr |
|
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer |
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
'vitaliy-sharandin/wiseai', |
|
load_in_8bit=True, |
|
device_map = {"": 0} |
|
) |
|
tokenizer = AutoTokenizer.from_pretrained('vitaliy-sharandin/wiseai') |
|
|
|
pipe = pipeline('text-generation', model=model,tokenizer=tokenizer) |
|
|
|
def generate_text(instruction, input): |
|
if not instruction.strip(): |
|
return str('The instruction field is required.') |
|
|
|
if instruction.strip() and input.strip(): |
|
input_prompt = (f"Below is an instruction that describes a task. " |
|
"Write a response that appropriately completes the request.\n\n" |
|
"### Instruction:\n" |
|
f"{instruction}\n\n" |
|
"### Input:\n" |
|
f"{input}\n\n" |
|
f"### Response: \n") |
|
else : |
|
input_prompt = (f"Below is an instruction that describes a task. " |
|
"Write a response that appropriately completes the request.\n\n" |
|
"### Instruction:\n" |
|
f"{instruction}\n\n" |
|
f"### Response: \n") |
|
result = pipe(input_prompt, max_length=200, top_p=0.9, temperature=0.9, num_return_sequences=1, return_full_text=False)[0]['generated_text'] |
|
return result[:str(result).find("###")] |
|
|
|
iface = gr.Interface(fn=generate_text, inputs=[gr.Textbox(label="Instruction"), |
|
gr.Textbox(label="Additional Input")], |
|
outputs=gr.Textbox(label="Response")) |
|
iface.launch() |