File size: 663 Bytes
007b3da
4312459
 
 
8569557
 
 
4312459
8569557
 
4312459
e4c0c54
8569557
4312459
8569557
4312459
8569557
821b5fa
7509ca1
8569557
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# 使用微软的 BioGPT 模型
tokenizer = AutoTokenizer.from_pretrained("microsoft/biogpt")
model = AutoModelForCausalLM.from_pretrained("microsoft/biogpt")

def chat(prompt):
    inputs = tokenizer(prompt, return_tensors="pt")
    with torch.no_grad():
        outputs = model.generate(**inputs, max_new_tokens=256, do_sample=True)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)

gr.Interface(
    fn=chat,
    inputs=gr.Textbox(label="输入医学问题或症状描述"),
    outputs=gr.Textbox(),
    title="医疗语言模型BioGPT"
).launch()