File size: 1,334 Bytes
298fce5 1ac728b 298fce5 1cefe7d 1010618 3a2c5b6 6ff692c 1cefe7d 117942b 1cefe7d 79c88a9 1cefe7d 46e2fd5 1cefe7d 1ac728b 1cefe7d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import random
from transformers import AutoTokenizer, AutoModelForCausalLM
import gradio as gr
tokenizer = AutoTokenizer.from_pretrained("docto/Docto-Bot")
model = AutoModelForCausalLM.from_pretrained("docto/Docto-Bot")
special_token = '<|endoftext|>'
def get_reply(userinput):
prompt_text = f'Question: {userinput}\nAnswer:'
encoded_prompt = tokenizer.encode(prompt_text,
add_special_tokens = False,
return_tensors = 'pt')
output_sequences = model.generate(
input_ids = encoded_prompt,
max_length = 500,
temperature = 0.7,
top_k = 20,
top_p = 0.9,
repetition_penalty = 1,
do_sample = True,
num_return_sequences = 1
)
# result = tokenizer.decode(random.choice(output_sequences))
# result = result[result.index("Answer: "):result.index(special_token)]
try:
result = tokenizer.decode(random.choice(output_sequences))
result = result[result.index("Answer: "):result.index(special_token)]
return (result[8:])
except:
return "Sorry! I don\'t Know"
iface = gr.Interface(fn=get_reply, inputs=["text"], outputs=["textbox"]).launch()
|