Panyun's picture
Update app.py
dda1778 verified
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
from huggingface_hub import login
login(token="你的Token") # 添加在模型加载前
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-7B-Instruct-AWQ",
device_map="auto",
use_auth_token=True, # 启用认证
resume_download=True, # 断点续传
local_files_only=False
)
# 定义生成函数
def generate_response(message, history):
# 格式化对话历史
messages = [{"role": "user", "content": message}]
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# 生成回复
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.7
)
response = tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
return response
# 启动Gradio界面
gr.ChatInterface(
fn=generate_response,
title="Qwen2.5-7B大模型在线演示",
description="输入问题后按回车开始对话"
).launch()