import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer # 加载模型(使用量化版本节省显存) model_name = "Qwen/Qwen2.5-7B-Instruct-GPTQ-Int8" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", # 自动分配GPU/CPU torch_dtype="auto" ) # 定义生成函数 def generate_response(message, history): # 格式化对话历史 messages = [{"role": "user", "content": message}] text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) inputs = tokenizer(text, return_tensors="pt").to(model.device) # 生成回复 outputs = model.generate( **inputs, max_new_tokens=512, temperature=0.7 ) response = tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True) return response # 启动Gradio界面 gr.ChatInterface( fn=generate_response, title="Qwen2.5-7B大模型在线演示", description="输入问题后按回车开始对话" ).launch()