from flask import Flask, request, jsonify, render_template, send_from_directory from flask_cors import CORS import sys import os sys.path.append(os.path.dirname(__file__)) # 确保当前目录加入模块搜索路径 from llama3 import LlaMa3 # 导入您的 LlaMa3 类 app = Flask(__name__, static_folder='../frontend/dist', template_folder='../frontend/dist') CORS(app) # 实例化 LlaMa3 模型 llama3_model = LlaMa3() # 检验响应可用 #@app.route("/") #def health_check(): # return "Service is running!", 200 @app.route('/') def index(): # 返回 HTML 页面 return render_template('index_s.html') @app.route('/chat', methods=['POST']) def chat(): # 获取前端发送的用户消息 user_message = request.json.get('message', '') if not user_message.strip(): return jsonify({"response": "请输入有效内容!"}), 400 try: # 构造聊天上下文 messages = [{"role": "user", "content": user_message}] # 调用 LlaMa3 的 chat 方法生成回复 ai_response = llama3_model.chat(messages) # 返回 AI 的回复 return jsonify({"response": ai_response}) except Exception as e: print(f"Error during llama3 call: {e}") return jsonify({"response": "发生错误,请稍后重试!"}), 500 @app.route('/favicon.ico') def favicon(): return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.ico', mimetype='image/vnd.microsoft.icon') if __name__ == '__main__': app.run(debug=True, host='127.0.0.1', port=7860)