Spaces:
Running
Running
from ctransformers import AutoModelForCausalLM | |
from transformers import AutoTokenizer | |
import torch | |
import gradio as gr | |
import os | |
import time | |
model_id = "alibidaran/Gemma2_Virtual_doctor" | |
bnb_config = BitsAndBytesConfig( | |
load_in_4bit=True, | |
bnb_4bit_quant_type="nf4", | |
bnb_4bit_compute_dtype=torch.bfloat16 | |
) | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto") | |
def print_like_dislike(x: gr.LikeData): | |
print(x.index, x.value, x.liked) | |
def add_text(history, text): | |
history = history + [(text,None)] | |
return history, gr.Textbox(value="", interactive=False) | |
def add_file(history, file): | |
global image_file | |
image_file=file.name | |
history = history + [((file.name,),None)] | |
return history | |
def bot(history): | |
prompt=history[-1][0] | |
text=f"<s> ###Human: {prompt} ###Asistant: " | |
inputs=tokenizer(text,return_tensors='pt').to('cpu') | |
with torch.no_grad(): | |
outputs=model.generate(**inputs,max_new_tokens=200,do_sample=True,top_p=0.92,top_k=10,temperature=0.7) | |
response=tokenizer.decode(outputs[0], skip_special_tokens=True) | |
print(response) | |
history[-1][1] = "" | |
for character in response[1:-1]: | |
history[-1][1] += character | |
time.sleep(0.01) | |
yield history | |
with gr.Blocks() as demo: | |
chatbot = gr.Chatbot( | |
[], | |
elem_id="chatbot", | |
bubble_full_width=False, | |
#avatar_images=(None, (os.path.join(os.path.dirname(__file__), "avatar.png"))), | |
) | |
with gr.Row(): | |
txt = gr.Textbox( | |
scale=4, | |
show_label=False, | |
placeholder="Ask the virtual doctor about your symptoms!", | |
container=False, | |
) | |
btn = gr.UploadButton("π", file_types=["image", "video", "audio"]) | |
txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then( | |
bot, chatbot, chatbot, api_name="bot_response" | |
) | |
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False) | |
file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then( | |
bot, chatbot, chatbot | |
) | |
chatbot.like(print_like_dislike, None, None) | |
if __name__=="__main__": | |
demo.launch(share=True,debug=True) |