Spaces:
Sleeping
Sleeping
File size: 2,859 Bytes
d07888f f6f833c d07888f cb20107 f6f833c cb20107 d07888f 763c74b d07888f cccefce d07888f cccefce d07888f ad0aa53 959c7e5 d07888f 959c7e5 ad0aa53 959c7e5 763c74b 959c7e5 763c74b d07888f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# 加载本地模型和tokenizer
model_name = "ganchengguang/OIELLM-8B-Instruction" # 替换为你的模型名称
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
low_cpu_mem_usage=True
)
# 定义语言和选项的映射
options = {
'English': {'NER': '/NER/', 'Sentimentrw': '/Sentiment related word/', 'Sentimentadjn': '/Sentiment Adj and N/', 'Sentimentadj': '/Sentiment Adj/', 'Sentimentn': '/Sentiment N/', 'Relation': '/relation extraction/', 'Event': '/event extraction/'},
'中文': {'NER': '/实体命名识别/', 'Sentimentrw': '/感情分析关联单词/', 'Sentimentadjn': '/感情分析形容词名词/', 'Sentimentadj': '/感情分析形容词/', 'Sentimentn': '/感情分析名词/', 'Relation': '/关系抽取/', 'Event': '/事件抽取/'},
'日本語': {'NER': '/固有表現抽出/', 'Sentimentrw': '/感情分析関連単語/', 'Sentimentadjn': '/感情分析形容詞名詞/', 'Sentimentadj': '/感情分析形容詞/', 'Sentimentn': '/感情分析名詞/', 'Relation': '/関係抽出/', 'Event': '/事件抽出/'}
}
# 定义聊天函数
def respond(message, language, task, max_tokens):
# 初始化对话历史
system_message = "You are a friendly Chatbot."
messages = [{"role": "system", "content": system_message}]
user_message = message + " " + options[language][task]
messages.append({"role": "user", "content": user_message})
# 编码输入
inputs = tokenizer(user_message, return_tensors="pt", padding=True, truncation=True)
# 生成回复
outputs = model.generate(
inputs["input_ids"],
max_length=max_tokens,
do_sample=True
)
# 解码回复
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# 去除输入部分
response = response[len(user_message):].strip()
return response
# 更新任务选项的函数
def update_tasks(language):
return gr.update(choices=list(options[language].keys()))
# 创建Gradio接口
with gr.Blocks() as demo:
gr.Markdown("# Open-domain Information Extraction Large Language Models Demo")
language = gr.Dropdown(label="Language", choices=list(options.keys()), value="English")
task = gr.Dropdown(label="Task", choices=list(options['English'].keys()))
message = gr.Textbox(label="Input Text")
max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
output = gr.Textbox(label="Output")
send_button = gr.Button("Send")
language.change(update_tasks, inputs=language, outputs=task)
send_button.click(respond, inputs=[message, language, task, max_tokens], outputs=output)
if __name__ == "__main__":
demo.launch()
|