import gradio as gr from ultralytics import YOLO from fastapi import FastAPI, File, UploadFile from PIL import Image import numpy as np import io import torch import spaces # 导入 spaces 模块 # 初始化 FastAPI 和模型 app = FastAPI() # 检查 GPU 是否可用,并选择设备 device = 'cuda' if torch.cuda.is_available() else 'cpu' model = YOLO('NailongKiller.yolo11n.pt').to(device) @spaces.GPU # 使用装饰器标记需要 GPU 的函数 def predict(img): # 将 numpy 数组转换为 PyTorch 张量 img_tensor = torch.from_numpy(img).to(device) results = model.predict(img_tensor) return results[0].plot() # Gradio 界面 demo = gr.Interface( predict, inputs=[ gr.Image(label="输入图片") ], outputs=[ gr.Image(label="检测结果", type="numpy") ], title="🐉 奶龙杀手 (NailongKiller)", description="上传图片来检测奶龙 | Upload an image to detect Nailong", examples=[ ["example1.jpg"] ], cache_examples=True ) # API 端点 @app.post("/detect/") async def detect_api(file: UploadFile = File(...)): contents = await file.read() image = Image.open(io.BytesIO(contents)) image_np = np.array(image) # 将图像移动到 GPU image_tensor = torch.from_numpy(image_np).to(device) results = model.predict(image_tensor) result = results[0] detections = [] for box in result.boxes: detection = { "bbox": box.xyxy[0].tolist(), "confidence": float(box.conf[0]), "class": int(box.cls[0]) } detections.append(detection) return {"detections": detections} # 挂载 Gradio 到 FastAPI app = gr.mount_gradio_app(app, demo, path="/") # 启动应用 if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=7860)