repo_id
stringlengths
19
138
file_path
stringlengths
32
200
content
stringlengths
1
12.9M
__index_level_0__
int64
0
0
apollo_public_repos/apollo-model-yolo3d/src/models
apollo_public_repos/apollo-model-yolo3d/src/models/components/base.py
""" KITTI Regressor Model """ import torch from torch import nn import torch.nn.functional as F from torchvision import models class RegressorNet(nn.Module): def __init__( self, backbone: nn.Module, bins: int, ): super().__init__() # init model self.in_features = self._get_in_features(backbone) self.model = nn.Sequential(*(list(backbone.children())[:-2])) self.bins = bins # # orientation head, for orientation estimation # self.orientation = nn.Sequential( # nn.Linear(self.in_features, 256), # nn.ReLU(True), # nn.Dropout(), # nn.Linear(256, 256), # nn.ReLU(True), # nn.Dropout(), # nn.Linear(256, self.bins*2) # 4 bins # ) # # confident head, for orientation estimation # self.confidence = nn.Sequential( # nn.Linear(self.in_features, 256), # nn.ReLU(True), # nn.Dropout(), # nn.Linear(256, 256), # nn.ReLU(True), # nn.Dropout(), # nn.Linear(256, self.bins), # nn.Sigmoid() # ) self.orientation = nn.Sequential( nn.Linear(self.in_features, 1024), nn.ReLU(True), nn.Dropout(), nn.Linear(1024, 1024), nn.ReLU(True), nn.Dropout(), nn.Linear(1024, self.bins*2) # 4 bins ) # confident head, for orientation estimation self.confidence = nn.Sequential( nn.Linear(self.in_features, 512), nn.ReLU(True), nn.Dropout(), nn.Linear(512, 512), nn.ReLU(True), nn.Dropout(), nn.Linear(512, self.bins), nn.Sigmoid() ) # dimension head self.dimension = nn.Sequential( nn.Linear(self.in_features, 512), nn.ReLU(True), nn.Dropout(), nn.Linear(512, 512), nn.ReLU(True), nn.Dropout(), nn.Linear(512, 3) # x, y, z ) def forward(self, x): x = self.model(x) x = x.view(-1, self.in_features) orientation = self.orientation(x) orientation = orientation.view(-1, self.bins, 2) # orientation = F.normalize(orientation, dim=2) # TODO: export model use this orientation = orientation.div(orientation.norm(dim=2, keepdim=True)) confidence = self.confidence(x) dimension = self.dimension(x) return orientation, confidence, dimension def _get_in_features(self, net: nn.Module): # TODO: add more models in_features = { 'resnet': (lambda: net.fc.in_features * 7 * 7), # 512 * 7 * 7 = 25088 'vgg': (lambda: net.classifier[0].in_features), # 512 * 7 * 7 = 25088 # 'mobilenetv3_large': (lambda: (net.classifier[0].in_features) * 7 * 7), # 960 * 7 * 7 = 47040 'mobilenetv3': (lambda: (net.classifier[0].in_features) * 7 * 7), # 576 * 7 * 7 = 28416 } return in_features[(net.__class__.__name__).lower()]() class RegressorNet2(nn.Module): def __init__( self, backbone: nn.Module, bins: int, ): super().__init__() # init model self.in_features = self._get_in_features(backbone) self.model = nn.Sequential(*(list(backbone.children())[:-2])) self.bins = bins # orientation head, for orientation estimation # TODO: inprove 256 to 1024 self.orientation = nn.Sequential( nn.Linear(self.in_features, 256), nn.LeakyReLU(0.1), nn.Dropout(), nn.Linear(256, self.bins*2), # 4 bins nn.LeakyReLU(0.1) ) # confident head, for orientation estimation self.confidence = nn.Sequential( nn.Linear(self.in_features, 256), nn.LeakyReLU(0.1), nn.Dropout(), nn.Linear(256, self.bins), nn.LeakyReLU(0.1) ) # dimension head self.dimension = nn.Sequential( nn.Linear(self.in_features, 512), nn.LeakyReLU(0.1), nn.Dropout(), nn.Linear(512, 3), # x, y, z nn.LeakyReLU(0.1) ) def forward(self, x): x = self.model(x) x = x.view(-1, self.in_features) orientation = self.orientation(x) orientation = orientation.view(-1, self.bins, 2) # TODO: export model use this orientation = orientation.div(orientation.norm(dim=2, keepdim=True)) confidence = self.confidence(x) dimension = self.dimension(x) return orientation, confidence, dimension def _get_in_features(self, net: nn.Module): # TODO: add more models in_features = { 'resnet': (lambda: net.fc.in_features * 7 * 7), 'vgg': (lambda: net.classifier[0].in_features) } return in_features[(net.__class__.__name__).lower()]() def OrientationLoss(orient_batch, orientGT_batch, confGT_batch): """ Orientation loss function """ batch_size = orient_batch.size()[0] indexes = torch.max(confGT_batch, dim=1)[1] # extract important bin orientGT_batch = orientGT_batch[torch.arange(batch_size), indexes] orient_batch = orient_batch[torch.arange(batch_size), indexes] theta_diff = torch.atan2(orientGT_batch[:,1], orientGT_batch[:,0]) estimated_theta_diff = torch.atan2(orient_batch[:,1], orient_batch[:,0]) return 2 - 2 * torch.cos(theta_diff - estimated_theta_diff).mean() # return -torch.cos(theta_diff - estimated_theta_diff).mean() def orientation_loss2(y_pred, y_true): """ Orientation loss function input: y_true -- (batch_size, bin, 2) ground truth orientation value in cos and sin form. y_pred -- (batch_size, bin, 2) estimated orientation value from the ConvNet output: loss -- loss values for orientation """ # sin^2 + cons^2 anchors = torch.sum(y_true ** 2, dim=2) # check which bin valid anchors = torch.gt(anchors, 0.5) # add valid bin anchors = torch.sum(anchors.type(torch.float32), dim=1) # cos(true)cos(estimate) + sin(true)sin(estimate) loss = (y_true[:, : ,0] * y_pred[:, :, 0] + y_true[:, :, 1] * y_pred[:, :, 1]) # the mean value in each bin loss = torch.sum(loss, dim=1) / anchors # sum the value at each bin loss = torch.mean(loss) loss = 2 - 2 * loss return loss def get_model(backbone: str): """ Get truncated model and in_features """ # list of support model name # TODO: add more models list_model = ['resnet18', 'vgg11'] # model_name = str(backbone.__class__.__name__).lower() assert backbone in list_model, f"Model not support, please choose {list_model}" # TODO: change if else with attributes in_features = None model = None if backbone == 'resnet18': backbone = models.resnet18(pretrained=True) in_features = backbone.fc.in_features * 7 * 7 model = nn.Sequential(*(list(backbone.children())[:-2])) elif backbone == 'vgg11': backbone = models.vgg11(pretrained=True) in_features = backbone.classifier[0].in_features model = backbone.features return [model, in_features] if __name__ == '__main__': # from torchvision.models import resnet18 # from torchsummary import summary # backbone = resnet18(pretrained=False) # model = RegressorNet(backbone, 2) # input_size = (3, 224, 224) # summary(model, input_size, device='cpu') # test orientation loss y_true = torch.tensor([[[0.0, 0.0], [0.9362, 0.3515]]]) y_pred = torch.tensor([[[0.0, 0.0], [0.9362, 0.3515]]]) print(y_true, "\n", y_pred) print(orientation_loss2(y_pred, y_true))
0
apollo_public_repos
apollo_public_repos/apollo-model-centerpoint/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
0
apollo_public_repos
apollo_public_repos/apollo-model-centerpoint/requirements.txt
colorlog easydict filelock numba >= 0.56 numpy nuscenes-devkit opencv-python <= 4.6.0 pandas paddledet paddleseg pyquaternion pyyaml pillow<=8.3.2 rarfile scikit-image scikit-learn visualdl h5py
0
apollo_public_repos
apollo_public_repos/apollo-model-centerpoint/.pre-commit-config.yaml
repos: - repo: local hooks: - id: yapf name: yapf entry: yapf --style .style.yapf -i language: system files: \.py$ - repo: https://github.com/pre-commit/pre-commit-hooks rev: a11d9314b22d8f8c7556443875b731ef05965464 hooks: - id: check-merge-conflict - id: check-symlinks - id: end-of-file-fixer - id: trailing-whitespace - id: detect-private-key - id: check-added-large-files - repo: local hooks: - id: flake8 name: flake8 entry: flake8 --count --select=E9,F63,F7,F82 --show-source --statistics language: system files: \.py$ - repo: local hooks: - id: clang-format-with-version-check name: clang-format description: Format files with ClangFormat entry: bash .clang_format.hook -style=Google -i language: system files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|cuh|proto)$
0
apollo_public_repos
apollo_public_repos/apollo-model-centerpoint/.clang_format.hook
#!/bin/bash set -e readonly VERSIONS=("10.0.0" "10.0.1") version=$(clang-format -version) for v in ${VERSIONS[*]}; do if [[ "${version}" == *"${v}"* ]]; then clang-format $@ exit 0 fi done echo "clang-format version check failed." echo "a version in ${VERSIONS[*]} is needed, but get ${version}" echo "please install the right version via 'pip install clang-format==version'" exit -1
0
apollo_public_repos
apollo_public_repos/apollo-model-centerpoint/README.md
# Apollo CenterPoint 该项目提供了开源自动驾驶平台Apollo中Lidar 3D目标检测算法CenterPoint的训练和部署代码。 ## 介绍 CenterPoint是Anchor-Free的三维物体检测器,以点云作为输入,将三维物体在Bird-View下的中心点作为关键点,基于关键点检测的方式回归物体的尺寸、方向和速度。相比于Anchor-Based的三维物体检测器,CenterPoint不需要人为设定Anchor尺寸,面向物体尺寸多样不一的场景时其精度表现更高,且简易的模型设计使其在性能上也表现更加高效。 <div align=center> <img src="images/centerpoint.png" width="1200"/> </div> Apollo对CenterPoint进行了一系列优化工作,检测效果和泛化能力都获得大幅提升,可以提供复杂城市道路场景下实时、准确、稳定的3D目标检测效果。 模型端: * **更好的检测效果,更强的泛化能力**。使用百万真实路测数据对CenterPoint进行训练和优化,精度和召回率相较于应用最多的CNNSeg模型提升了20%+,检测能力和泛化能力显著提升。 * **新增城市道路标识检测能力**。提供了锥桶、水马、防撞桶、指示牌等目标的检测能力,极大地保障了自动驾驶的安全性。 * **降低训练开发成本,提升易用性**。代码中新增功能:冻结网络层finetune训练、fp16训练、自定义数据集训练评测等功能,更加简单易上手。 部署端: * **显著提升近处行人目标和小目标的召回率**。对前后处理、配置、模型推理进行了针对性调优和处理,修复了推理端结果不一致问题,行人和小目标召回率提升。 * **输出polygon,增强跟踪稳定性**。优化了障碍物点云的获取逻辑,使centerpoint可输出准确polygon信息,进一步增强了跟踪的稳定性。 * **大幅降低模型推理耗时和GPU占用**。提供了tensorrt + fp16推理 & int8推理的功能和教程,在保持模型检测效果前提下,大幅降低了模型的推理耗时和GPU占用,在低算力平台运行可满足实时性要求。 检测结果可视化 <div align=center> <img src="images/centerpoint_result2.png" width="1200"/> </div> <div align=center> <img src="images/centerpoint_result1.png" width="1200"/> </div> 模型文件下载地址 | 模型文件 | 下载地址 | | -- | -- | | Apollo CenterPoint训练权重文件 | [Link](https://apollo-pkg-beta.bj.bcebos.com/perception_model/centerpoint_core_pretrained_model.zip) | | Apollo CenterPoint可部署文件 | [Link](https://apollo-pkg-beta.bj.bcebos.com/perception_model/center_point_paddle.zip) | ## 开发 开发者可基于该代码进行二次开发,快速方便地完成: * **学习研究**:使用公开数据集KITTI和NuScenes对CenterPoint进行训练、评测、导出、部署。 * **增量训练**:使用自定义数据集对CenterPoint进行增量训练,提升在用户自定义场景中的检测效果。 * **Apollo感知赛事**:使用Apolloscape数据集对CenterPoint进行训练,顺利完成Apollo感知赛事。 环境配置和安装教程详尽[installation](./docs/installation.md) 完整训练流程和配置详见 [centerpoint](./configs/centerpoint/),下面以KITTI数据集为例进行简单介绍。 先准备KITTI数据集,请在[官网](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)进行下载。将数据解压后按照下方的目录结构进行组织: ``` kitti_dataset_root |—— training | |—— label_2 | | |—— 000001.txt | | |—— ... | |—— calib | | |—— 000001.txt | | |—— ... | |—— velodyne | | |—— 000001.bin | | |—— ... |—— ImageSets │ |—— test.txt │ |—— train.txt │ |—— trainval.txt │ |—— val.txt ``` 在Paddle3D的目录下创建软链接 `datasets/KITTI`,指向到上面的数据集目录: ``` mkdir datasets ln -s /path/to/kitti_dataset_root ./datasets mv ./datasets/kitti_dataset_root ./datasets/KITTI ``` 生成训练时数据增强所需的真值库: ``` python tools/create_det_gt_database.py --dataset_name kitti --dataset_root ./datasets/KITTI --save_dir ./datasets/KITTI ``` ``` kitti_train_gt_database |—— anno_info_train.pkl |—— Car | |—— 4371_Car_7.bin | |—— ... |—— Cyclist ``` 使用8张GPU训练KITTI数据集: ``` python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py --config configs/centerpoint/centerpoint_pillars_016voxel_kitti.yml --save_dir ./output_kitti --num_workers 4 --save_interval 5 ``` 模型评测 ``` python tools/evaluate.py --config configs/centerpoint/centerpoint_pillars_016voxel_kitti.yml --model ./output_kitti/epoch_160/model.pdparams --batch_size 1 --num_workers 4 ``` 导出推理模型,将训练时保存的动态图模型文件导出成推理引擎能够加载的静态图模型文件。 ``` python tools/export.py --config configs/centerpoint/centerpoint_pillars_02voxel_nuscenes_10sweep.yml --model /path/to/model.pdparams --save_dir /path/to/output ``` ## 其他资料 * [Apollo自动驾驶平台](https://github.com/ApolloAuto/apollo) * [2023星火培训感知专项营:感知模型训练与部署](https://www.bilibili.com/video/BV1RV411c7Xp/) * [CenterPoint模型训练与部署](https://apollo.baidu.com/community/article/1141) # Paddle3D ## 🌈简介 Paddle3D是飞桨官方开源的端到端深度学习3D感知套件,涵盖了许多前沿和经典的3D感知模型,支持多种模态和多种任务,可以助力开发者便捷地完成 **『自动驾驶』** 领域模型 从训练到部署的全流程应用。 <div align="center"> <p align="center"> <img src="https://user-images.githubusercontent.com/29754889/185546875-b8296cf4-f298-494b-8c15-201a2559d7ea.gif" align="middle" width="980"/> </p> </div> <div align="center"> <p align="center"> <img src="https://user-images.githubusercontent.com/29754889/185551580-828f08d0-d607-4020-9e05-b96110bce7eb.gif" align="middle" width="980"/> </p> </div> ## ✨主要特性 ### 🧩灵活的框架设计 针对各类3D数据格式,灵活构建数据处理、骨干网络等核心模块,支持基于[PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection)、[PaddleSeg](https://github.com/PaddlePaddle/PaddleSeg)灵活扩展2D视觉感知能力,并提供API与脚本两种训练评估方式,满足开发者灵活定制的差异化需求。 ### 📱丰富的模型库 聚合主流3D感知算法及精度调优策略,覆盖单目、点云等多种模态及检测、分割等多种任务类型。 ### 🎗️端到端全流程 支持KITTI、nuScenes、Waymo等主流3D数据集,提供从数据处理、模型搭建、训练调优到部署落地的全流程能力,极致优化模型性能,适配多种自动驾驶主流芯片,支持计算图优化、TensorRT/OpenVINO等加速库,并提供了开箱即用的部署教程,5分钟即可完成模型部署。 ### 🏆无缝衔接Apollo 无缝对接Apollo自动驾驶平台,支持真机与仿真平台实验效果快速验证、多模态模型高性能融合,实现自动驾驶全栈式技术方案的高效搭建。 <div align="center"> <p align="center"> <img src="https://user-images.githubusercontent.com/61035602/209662380-6f67d4df-12a1-43b0-a79e-424eb4f4dc75.png" align="middle" width="980"/> </p> </div> ## 📣最新进展 **💎稳定版本** 位于[`主分支`](https://github.com/PaddlePaddle/Paddle3D),Paddle3D v1.0正式版本发布,详情请参考[release note](https://github.com/PaddlePaddle/Paddle3D/releases/tag/v1.0)。 **🧬预览版本** 位于[`develop`](https://github.com/PaddlePaddle/Paddle3D/tree/develop)分支,体验最新功能请切换到[该分支](https://github.com/PaddlePaddle/Paddle3D/tree/develop)。 ## 👫开源社区 - **📑项目合作:** 如果您是企业开发者且有明确的目标检测垂类应用需求,请扫描如下二维码入群,并联系`群管理员AI`后可免费与官方团队展开不同层次的合作。 - **🏅️社区贡献:** Paddle3D非常欢迎你加入到飞桨社区的开源建设中,参与贡献方式可以参考[开源项目开发指南](https://www.paddlepaddle.org.cn/documentation/docs/zh/dev_guides/index_cn.html)。 - **💻直播教程:** Paddle3D会定期在飞桨直播间([B站:飞桨PaddlePaddle](https://space.bilibili.com/476867757)、[微信: 飞桨PaddlePaddle](https://mp.weixin.qq.com/s/6ji89VKqoXDY6SSGkxS8NQ)),针对发新内容、以及产业范例、使用教程等进行直播分享。 <div align="center"> <img src="https://user-images.githubusercontent.com/61035602/209660514-4285abea-a855-44c4-9533-f2e90b9ca608.jpeg" width = "150" height = "150",caption='' /> <p>Paddle3D官方技术交流群二维码</p> </div> - **🎈社区近期活动** - **🎗️Paddle3D v1.0正式版解读** - `文章传送门`:[Paddle3D正式版发布!BEV、单目、激光雷达3D感知算法开箱即用,无缝衔接Apollo](https://mp.weixin.qq.com/s/LL0DgKxEVsfhpFO6HedQ7Q) <div align="center"> <img src="https://user-images.githubusercontent.com/61035602/210311019-bdb15ec8-e8b9-471c-aa1d-d2f953a6939a.png" height = "250" caption='' /> <p></p> </div> - **🚦自动驾驶感知系统揭秘** - `课程录播&PPT传送门`:[自动驾驶感知系统揭秘](https://aistudio.baidu.com/aistudio/education/group/info/26961) <div align="center"> <img src="https://user-images.githubusercontent.com/61035602/210315230-83ace5d1-1851-4d9b-b305-4290edf9dde8.png" height = "300" caption='' /> <p></p> </div> ### 📱模型库 <table align="center"> <tbody> <tr align="center" valign="center"> <td> <b>单目3D感知</b> </td> <td> <b>激光雷达3D感知</b> </td> <td> <b>多相机3D感知</b> </td> <td> <b>骨干网络</b> </td> </tr> <tr valign="top"> <td> </ul> <li><b>检测</b></li> <ul> <ul> <li><a href="docs/models/caddn">CaDDN</a></li> <li><a href="docs/models/smoke">SMOKE</a></li> <li><a href="docs/models/dd3d">DD3D</a></li> </ul> </td> <td> </ul> <li><b>检测</b></li> <ul> <ul> <li><a href="docs/models/pointpillars">PointPillars</a></li> <li><a href="docs/models/centerpoint">CenterPoint</a></li> <li><a href="docs/models/iassd">IA-SSD</a></li> <li><a href="docs/models/pv_rcnn">PV-RCNN</a></li> <li><a href="docs/models/voxel_rcnn">Voxel-RCNN</a></li> <li><a href="docs/models/paconv">PAConv</a></li> </ul> </ul> <li><b>分割</b></li> <ul> <ul> <li><a href="docs/models/squeezesegv3">SqueezeSegV3</a></li> </ul> </td> <td> </ul> <li><b>BEV-Camera</b></li> <ul> <ul> <li><a href="docs/models/petr">PETR</a></li> <li><a href="docs/models/petr">PETRv2</a></li> <li><a href="docs/models/bevformer">BEVFormer</a></li> </ul> </td> <td> <ul> <li><a href="paddle3d/models/backbones">DLA</a></li> <li><a href="paddle3d/models/backbones">HRNet</a></li> <li><a href="paddle3d/models/backbones">ResNet</a></li> <li><a href="paddle3d/models/backbones">Transformer</a></li> </ul> </td> </tr> </td> </tr> </tbody> </table> ## 🔥使用教程 * [安装](./docs/installation.md) * [全流程速览](./docs/quickstart.md) * [自定义数据准备](./docs/datasets/custom.md) * [配置文件详解](./docs/configuration.md) * [API](./docs/api.md) * Paddle3D&Apollo集成开发示例 * [视觉感知算法集成开发示例](https://apollo.baidu.com/community/Apollo-Homepage-Document/Apollo_Doc_CN_8_0/camera) * [点云感知算法集成开发示例](https://apollo.baidu.com/community/Apollo-Homepage-Document/Apollo_Doc_CN_8_0/lidar) * [常见问题](./docs/faq.md) * [更新日志](./docs/release_note.md) ## 💡产业实践范例 产业实践范例是Paddle3D针对3D目标检测应用场景,提供的端到端开发示例,帮助开发者打通数据标注-模型训练-模型调优-预测部署全流程。 针对每个范例我们都通过[AI-Studio](https://ai.baidu.com/ai-doc/AISTUDIO/Tk39ty6ho)提供了项目代码以及说明,用户可以同步运行体验。 - [【自动驾驶实战】基于Paddle3D&Apollo的点云3D目标物检测](https://aistudio.baidu.com/aistudio/projectdetail/5268894) - [【自动驾驶实战】基于Paddle3D&Apollo的单目3D目标物检测](https://aistudio.baidu.com/aistudio/projectdetail/5269115) ## 📝许可证 本项目的发布受[Apache 2.0 license](./LICENSE)许可认证。
0
apollo_public_repos
apollo_public_repos/apollo-model-centerpoint/setup.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from functools import partial from setuptools import find_packages, setup import paddle3d with open("requirements.txt") as fin: REQUIRED_PACKAGES = fin.read() def get_all_files(directory: str): all_files = [] for root, _, files in os.walk(directory): root = os.path.relpath(root, directory) for file in files: filepath = os.path.join(root, file) all_files.append(filepath) return all_files def get_data_files(directory: str, data: list = None, filetypes: list = None): all_files = [] data = data or [] filetypes = filetypes or [] for file in get_all_files(directory): filetype = os.path.splitext(file)[1][1:] filename = os.path.basename(file) if file in data: all_files.append(file) elif filetype in filetypes: all_files.append(file) return all_files get_cpp_files = partial( get_data_files, filetypes=['h', 'hpp', 'cpp', 'cc', 'cu']) setup( name='paddle3d', version=paddle3d.__version__.replace('-', ''), # TODO: add description description=(''), long_description='', url='https://github.com/PaddlePaddle/Paddle3D', author='PaddlePaddle Author', author_email='', install_requires=REQUIRED_PACKAGES, packages=find_packages(), package_data={ 'paddle3d.ops': get_cpp_files('paddle3d/ops'), 'paddle3d.thirdparty': get_all_files('paddle3d/thirdparty') }, # PyPI package information. classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Mathematics', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ], license='Apache 2.0', keywords=( 'paddle3d paddlepaddle pointcloud detection classification segmentation' ))
0
apollo_public_repos
apollo_public_repos/apollo-model-centerpoint/.style.yapf
[style] based_on_style = pep8 column_limit = 80
0
apollo_public_repos/apollo-model-centerpoint
apollo_public_repos/apollo-model-centerpoint/tools/create_waymo_infos.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import multiprocessing import os import pickle from collections import defaultdict from typing import List, Tuple, Union import numpy as np from tqdm import tqdm from paddle3d.datasets.waymo import WaymoPCDataset from paddle3d.geometries import BBoxes3D from paddle3d.geometries.bbox import get_mask_of_points_in_bboxes3d from paddle3d.utils.logger import logger def create_waymo_gt_database(dataset_root, class_names, save_path=None, sampled_interval=1, use_point_dim=5): if save_path is None: save_path = dataset_root save_path = os.path.join(save_path, "waymo_train_gt_database") dataset = WaymoPCDataset( dataset_root=dataset_root, sampled_interval=sampled_interval, mode="train", class_names=class_names) database = defaultdict(list) for data_idx in tqdm(range(0, len(dataset), sampled_interval)): sample = dataset[data_idx] points = sample.data bboxes_3d = sample.bboxes_3d labels = sample.labels # starts from 0 difficulties = sample.difficulties box_names = np.array(class_names)[labels] # sampling 1/4 of "Vehicle" class if data_idx % 4 != 0 and len(box_names) > 0: mask = (box_names == "Vehicle") box_names = box_names[~mask] difficulties = difficulties[~mask] bboxes_3d = BBoxes3D( data=bboxes_3d[~mask], coordmode=bboxes_3d.coordmode, origin=bboxes_3d.origin) # sampling 1/2 of "Pedestrian" class if data_idx % 2 != 0 and len(box_names) > 0: mask = (box_names == "Pedestrian") box_names = box_names[~mask] difficulties = difficulties[~mask] bboxes_3d = BBoxes3D( data=bboxes_3d[~mask], coordmode=bboxes_3d.coordmode, origin=bboxes_3d.origin) num_bboxes = len(bboxes_3d) if num_bboxes == 0: continue # TODO(liuxiao): get_mask could be accelerate masks = get_mask_of_points_in_bboxes3d(points, bboxes_3d) for box_idx in range(num_bboxes): box_name = box_names[box_idx] if box_name not in class_names: continue mask = masks[:, box_idx] selected_points = points[mask] selected_points[:, :3] -= bboxes_3d[box_idx, :3] if not os.path.exists(os.path.join(save_path, box_name)): os.makedirs(os.path.join(save_path, box_name)) lidar_file = os.path.join( os.path.join(save_path, box_name), "{}_{}_{}.bin".format( data_idx, box_name, box_idx)) with open(lidar_file, "w") as f: selected_points.tofile(f) anno_info = { "lidar_file": os.path.join("waymo_train_gt_database", box_name, "{}_{}_{}.bin".format( data_idx, box_name, box_idx)), "cls_name": box_name, "bbox_3d": bboxes_3d[box_idx, :], "box_idx": box_idx, "data_idx": data_idx, "num_points_in_box": selected_points.shape[0], "lidar_dim": use_point_dim, "difficulty": difficulties[box_idx] } database[box_name].append(anno_info) for k, v in database.items(): logger.info("Database %s: %d" % (k, len(v))) db_anno_file = os.path.join(save_path, "waymo_train_gt_database_infos.pkl") with open(db_anno_file, 'wb') as f: pickle.dump(database, f) def get_infos(raw_data_path, save_path, sample_sequence_list, num_workers=multiprocessing.cpu_count(), sampled_interval=1): from functools import partial from paddle3d.datasets.waymo import waymo_utils logger.info( "---------------The waymo sample interval is %d, total sequecnes is %d-----------------" % (sampled_interval, len(sample_sequence_list))) process_single_sequence = partial( waymo_utils.process_single_sequence, save_path=save_path, sampled_interval=sampled_interval) sample_sequence_file_list = [ os.path.join(raw_data_path, sequence_file) for sequence_file in sample_sequence_list ] p = multiprocessing.Pool(num_workers) sequence_infos = list( tqdm( p.map(process_single_sequence, sample_sequence_file_list), total=len(sample_sequence_file_list))) p.close() p.join() all_sequences_infos = [item for infos in sequence_infos for item in infos] return all_sequences_infos def create_waymo_infos(dataset_root, class_names, save_path, raw_data_tag, processed_data_tag, num_workers=min(16, multiprocessing.cpu_count())): os.environ["CUDA_VISIBLE_DEVICES"] = "-1" logger.info("---------------Start to generate data infos---------------") dataset = WaymoPCDataset( dataset_root=dataset_root, sampled_interval=1, mode="train", class_names=class_names, processed_data_tag=processed_data_tag) waymo_infos_train = get_infos( raw_data_path=os.path.join(dataset_root, raw_data_tag), save_path=os.path.join(save_path, processed_data_tag), sample_sequence_list=dataset.sample_sequence_list, num_workers=num_workers, sampled_interval=1 # save all infos ) logger.info("----------------Waymo train info is saved-----------------") dataset = WaymoPCDataset( dataset_root=dataset_root, sampled_interval=1, mode="val", class_names=class_names, processed_data_tag=processed_data_tag) waymo_infos_val = get_infos( raw_data_path=os.path.join(dataset_root, raw_data_tag), save_path=os.path.join(save_path, processed_data_tag), sample_sequence_list=dataset.sample_sequence_list, num_workers=num_workers, sampled_interval=1 # save all infos ) logger.info("----------------Waymo val info is saved-----------------") logger.info("-------------------Create gt database-------------------") create_waymo_gt_database( dataset_root=dataset_root, class_names=class_names, save_path=save_path, sampled_interval=1, # sampling all gt use_point_dim=5) logger.info("-------------------Create gt database done-------------------") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Create infos and gt database") parser.add_argument( "--processed_data_tag", type=str, default="waymo_processed_data_v1_3_2", help="") args = parser.parse_args() create_waymo_infos( dataset_root="./datasets/waymo", class_names=["Vehicle", "Pedestrian", "Cyclist"], save_path="./datasets/waymo", raw_data_tag="raw_data", processed_data_tag=args.processed_data_tag)
0
apollo_public_repos/apollo-model-centerpoint
apollo_public_repos/apollo-model-centerpoint/tools/create_det_gt_database.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from paddle3d.datasets.generate_gt_database import ( generate_kitti_gt_database, generate_nuscenes_gt_database, generate_apollo_gt_database) def parse_args(): parser = argparse.ArgumentParser( description='Create a ground truth database for a dataset.') parser.add_argument( '--config', dest='config', help='path of config file', type=str, default='') parser.add_argument( '--dataset_name', dest='dataset_name', help='Name of the dataset: nuscenes, kitti, or apollo.', type=str) parser.add_argument( '--dataset_root', dest='dataset_root', help='Path of the dataset.', type=str) parser.add_argument( '--save_dir', dest='save_dir', help='Path to save the generated database.', type=str) return parser.parse_args() def main(args): if args.dataset_name.lower() == 'nuscenes': generate_nuscenes_gt_database(args.dataset_root, save_dir=args.save_dir) elif args.dataset_name.lower() == 'kitti': generate_kitti_gt_database(args.dataset_root, save_dir=args.save_dir) elif args.dataset_name.lower() == 'apollo': generate_apollo_gt_database(args.config) else: raise ValueError( f"Database generation is not supported for the {args.dataset_name} dataset." ) if __name__ == '__main__': args = parse_args() main(args)
0
apollo_public_repos/apollo-model-centerpoint
apollo_public_repos/apollo-model-centerpoint/tools/create_bevformer_nus_infos.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------ # Modify from https://github.com/fundamentalvision/BEVFormer/blob/master/tools/create_data.py # Copyright (c) OpenMMLab. All rights reserved. # ------------------------------------------------------------------------ # ------------------------------------------------------------------------ # Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) # Copyright (c) OpenMMLab. All rights reserved. # ------------------------------------------------------------------------ import argparse import os import pickle from pathlib import Path import numpy as np import tqdm from nuscenes import NuScenes from nuscenes.can_bus.can_bus_api import NuScenesCanBus from nuscenes.utils import splits as nuscenes_split from nuscenes.utils.data_classes import Box as NuScenesBox from nuscenes.utils.geometry_utils import transform_matrix from pyquaternion import Quaternion from paddle3d.datasets.nuscenes import NuscenesMVDataset from paddle3d.datasets.nuscenes.nuscenes_det import NuscenesDetDataset from paddle3d.utils.logger import logger SENSORS = [ 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_FRONT_LEFT' ] def parse_args(): parser = argparse.ArgumentParser( description='Create infos for kitti dataset.') parser.add_argument( '--dataset_root', default='data/nuscenes', help='Path of the dataset.', type=str) parser.add_argument( '--can_bus_root', type=str, default='data/nuscenes', help='specify the root path of nuScenes canbus') parser.add_argument( '--save_dir', default='data/nuscenes', help='Path to save the generated database.', type=str) parser.add_argument( '--mode', default='train', help='mode to generate dataset.', type=str) parser.add_argument( '--num_sweep', default=10, help='nummber of sweep frames between two key frame.', type=int) return parser.parse_args() def is_filepath(x): return isinstance(x, str) or isinstance(x, Path) def get_available_scenes(nusc): """Get available scenes from the input nuscenes class. """ available_scenes = [] logger.info('total scene num: {}'.format(len(nusc.scene))) for scene in nusc.scene: scene_token = scene['token'] scene_rec = nusc.get('scene', scene_token) sample_rec = nusc.get('sample', scene_rec['first_sample_token']) sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) has_more_frames = True scene_not_exist = False while has_more_frames: lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token']) lidar_path = str(lidar_path) if os.getcwd() in lidar_path: # path from lyftdataset is absolute path lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1] # relative path if not is_filepath(lidar_path): scene_not_exist = True break else: break if scene_not_exist: continue available_scenes.append(scene) logger.info('exist scene num: {}'.format(len(available_scenes))) return available_scenes def obtain_sensor2top(nusc, sensor_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, sensor_type='lidar'): """Obtain the info with RT matric from general sensor to Top LiDAR. """ sd_rec = nusc.get('sample_data', sensor_token) cs_record = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) data_path = str(nusc.get_sample_data_path(sd_rec['token'])) # absolute path if os.getcwd() in data_path: # path from lyftdataset is absolute path data_path = data_path.split(f'{os.getcwd()}/')[-1] # relative path sweep = { 'data_path': nusc.get('sample_data', sd_rec['token'])['filename'], # relative path 'type': sensor_type, 'sample_data_token': sd_rec['token'], 'sensor2ego_translation': cs_record['translation'], 'sensor2ego_rotation': cs_record['rotation'], 'ego2global_translation': pose_record['translation'], 'ego2global_rotation': pose_record['rotation'], 'timestamp': sd_rec['timestamp'] } l2e_r_s = sweep['sensor2ego_rotation'] l2e_t_s = sweep['sensor2ego_translation'] e2g_r_s = sweep['ego2global_rotation'] e2g_t_s = sweep['ego2global_translation'] # obtain the RT from sensor to Top LiDAR # sweep->ego->global->ego'->lidar l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T ) + l2e_t @ np.linalg.inv(l2e_r_mat).T sweep['sensor2lidar_rotation'] = R.T # points @ R.T + T sweep['sensor2lidar_translation'] = T return sweep def _get_can_bus_info(nusc, nusc_can_bus, sample): scene_name = nusc.get('scene', sample['scene_token'])['name'] sample_timestamp = sample['timestamp'] try: pose_list = nusc_can_bus.get_messages(scene_name, 'pose') except: return np.zeros(18) # server scenes do not have can bus information. can_bus = [] # during each scene, the first timestamp of can_bus may be large than the first sample's timestamp last_pose = pose_list[0] for i, pose in enumerate(pose_list): if pose['utime'] > sample_timestamp: break last_pose = pose _ = last_pose.pop('utime') # useless pos = last_pose.pop('pos') rotation = last_pose.pop('orientation') can_bus.extend(pos) can_bus.extend(rotation) for key in last_pose.keys(): can_bus.extend(pose[key]) # 16 elements can_bus.extend([0., 0.]) return np.array(can_bus) def fill_trainval_infos(nusc, nusc_can_bus, train_scenes, val_scenes, test=False, max_sweeps=10): """Generate the train/val infos from the raw data. """ train_nusc_infos = [] val_nusc_infos = [] frame_idx = 0 msg = "Begin to generate a info of nuScenes dataset." for sample_idx in logger.range(len(nusc.sample), msg=msg): sample = nusc.sample[sample_idx] lidar_token = sample['data']['LIDAR_TOP'] sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) cs_record = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) assert os.path.exists(lidar_path) # absolute path can_bus = _get_can_bus_info(nusc, nusc_can_bus, sample) info = { 'lidar_token': lidar_token, 'lidar_path': nusc.get('sample_data', lidar_token)['filename'], # relative path 'token': sample['token'], 'prev': sample['prev'], 'next': sample['next'], 'can_bus': can_bus, 'frame_idx': frame_idx, # temporal related info 'sweeps': [], 'cams': dict(), 'scene_token': sample['scene_token'], 'lidar2ego_translation': cs_record['translation'], 'lidar2ego_rotation': cs_record['rotation'], 'ego2global_translation': pose_record['translation'], 'ego2global_rotation': pose_record['rotation'], 'timestamp': sample['timestamp'], } if sample['next'] == '': frame_idx = 0 else: frame_idx += 1 l2e_r = info['lidar2ego_rotation'] l2e_t = info['lidar2ego_translation'] e2g_r = info['ego2global_rotation'] e2g_t = info['ego2global_translation'] l2e_r_mat = Quaternion(l2e_r).rotation_matrix e2g_r_mat = Quaternion(e2g_r).rotation_matrix # obtain 6 image's information per frame camera_types = [ 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_FRONT_LEFT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT', ] for cam in camera_types: cam_token = sample['data'][cam] cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, cam) cam_info.update(cam_intrinsic=cam_intrinsic) info['cams'].update({cam: cam_info}) # obtain sweeps for a single key-frame sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) sweeps = [] while len(sweeps) < max_sweeps: if not sd_rec['prev'] == '': sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') sweeps.append(sweep) sd_rec = nusc.get('sample_data', sd_rec['prev']) else: break info['sweeps'] = sweeps # obtain annotation if not test: annotations = [ nusc.get('sample_annotation', token) for token in sample['anns'] ] locs = np.array([b.center for b in boxes]).reshape(-1, 3) dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) rots = np.array([b.orientation.yaw_pitch_roll[0] for b in boxes]).reshape(-1, 1) velocity = np.array( [nusc.box_velocity(token)[:2] for token in sample['anns']]) valid_flag = np.array( [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 for anno in annotations], dtype=bool).reshape(-1) # convert velo from global to lidar for i in range(len(boxes)): velo = np.array([*velocity[i], 0.0]) velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( l2e_r_mat).T velocity[i] = velo[:2] names = [b.name for b in boxes] for i in range(len(names)): # NuscenesDetDataset.LABEL_MAP if names[i] in NuscenesDetDataset.LABEL_MAP: names[i] = NuscenesDetDataset.LABEL_MAP[names[i]] names = np.array(names) # we need to convert box size to # the format of our lidar coordinate system # which is x_size, y_size, z_size (corresponding to l, w, h) gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) assert len(gt_boxes) == len( annotations), f'{len(gt_boxes)}, {len(annotations)}' info['gt_boxes'] = gt_boxes info['gt_names'] = names info['gt_velocity'] = velocity.reshape(-1, 2) info['num_lidar_pts'] = np.array( [a['num_lidar_pts'] for a in annotations]) info['num_radar_pts'] = np.array( [a['num_radar_pts'] for a in annotations]) info['valid_flag'] = valid_flag if sample['scene_token'] in train_scenes: train_nusc_infos.append(info) else: val_nusc_infos.append(info) return train_nusc_infos, val_nusc_infos def build_bevformer_nuscenes_data(dataset_root, is_test, nusc, nusc_can_bus, version, max_sweeps=10): if version == 'v1.0-trainval': train_scenes = nuscenes_split.train val_scenes = nuscenes_split.val elif version == 'v1.0-test': train_scenes = nuscenes_split.test val_scenes = [] elif version == 'v1.0-mini': train_scenes = nuscenes_split.mini_train val_scenes = nuscenes_split.mini_val else: raise ValueError('unknown nuscenes dataset version') available_scenes = get_available_scenes(nusc) available_scene_names = [s['name'] for s in available_scenes] train_scenes = list( filter(lambda x: x in available_scene_names, train_scenes)) val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) train_scenes = set([ available_scenes[available_scene_names.index(s)]['token'] for s in train_scenes ]) val_scenes = set([ available_scenes[available_scene_names.index(s)]['token'] for s in val_scenes ]) if is_test: print('test scene: {}'.format(len(train_scenes))) else: print('train scene: {}, val scene: {}'.format( len(train_scenes), len(val_scenes))) train_nusc_infos, val_nusc_infos = fill_trainval_infos( nusc, nusc_can_bus, train_scenes, val_scenes, is_test, max_sweeps=max_sweeps) metadata = dict(version=version) if is_test: print('test sample: {}'.format(len(train_nusc_infos))) data = dict(infos=train_nusc_infos, metadata=metadata) return [data] else: print('train sample: {}, val sample: {}'.format( len(train_nusc_infos), len(val_nusc_infos))) train_data = dict(infos=train_nusc_infos, metadata=metadata) val_data = dict(infos=val_nusc_infos, metadata=metadata) return train_data, val_data def main(args): dataset_root = args.dataset_root can_bus_root = args.can_bus_root save_dir = args.save_dir num_sweep = args.num_sweep version = NuscenesDetDataset.VERSION_MAP[args.mode] nuscenes = NuScenes(version=version, dataroot=dataset_root, verbose=False) nusc_can_bus = NuScenesCanBus(dataroot=can_bus_root) is_test = 'test' in args.mode if is_test: test_ann_cache_file = os.path.join( save_dir, 'bevformer_nuscenes_annotation_test.pkl') if os.path.exists(test_ann_cache_file): raise OSError( "{} annotation file is exist!".format(test_ann_cache_file)) else: train_ann_cache_file = os.path.join( save_dir, 'bevformer_nuscenes_annotation_train.pkl') val_ann_cache_file = os.path.join( save_dir, 'bevformer_nuscenes_annotation_val.pkl') if os.path.exists(train_ann_cache_file): raise OSError( "{} annotation file is exist!".format(train_ann_cache_file)) if os.path.exists(val_ann_cache_file): raise OSError( "{} annotation file is exist!".format(val_ann_cache_file)) infos = build_bevformer_nuscenes_data(dataset_root, is_test, nuscenes, nusc_can_bus, version, num_sweep) if is_test: infos_dict = {test_ann_cache_file: infos[0]} else: infos_dict = { train_ann_cache_file: infos[0], val_ann_cache_file: infos[1] } msg = "Adding sweep frame annotations" for ann_cache_file, key_infos in infos_dict.items(): pickle.dump(key_infos, open(ann_cache_file, 'wb')) logger.info("---------------Data preparation Done---------------") if __name__ == '__main__': args = parse_args() main(args)
0
apollo_public_repos/apollo-model-centerpoint
apollo_public_repos/apollo-model-centerpoint/tools/export.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import datetime import os import yaml from paddle3d.apis.config import Config from paddle3d.models.base import BaseDetectionModel from paddle3d.slim import get_qat_config from paddle3d.utils.checkpoint import load_pretrained_model from paddle3d.utils.logger import logger parser = argparse.ArgumentParser(description='Model Export') def parse_normal_args(): normal_args = parser.add_argument_group('model args') # params of export normal_args.add_argument( "--config", dest="cfg", help="The config file.", required=True, type=str) normal_args.add_argument( "--export_for_apollo", dest="export_for_apollo", help="Whether to export to the deployment format supported by Apollo.", action='store_true') normal_args.add_argument( '--model', dest='model', help='pretrained parameters of the model', type=str, default=None) normal_args.add_argument( '--save_dir', dest='save_dir', help='The directory saving inference params.', type=str, default="./exported_model") normal_args.add_argument( '--save_name', dest='save_name', help='The name of inference params file.', type=str, default=None) parser.add_argument( '--quant_config', dest='quant_config', help='Config for quant model.', default=None, type=str) return parser.parse_known_args() def parse_model_args(arg_dict: dict): model_args = parser.add_argument_group('model args') for key, value in arg_dict.items(): model_args.add_argument(key, **value) return parser.parse_args() def generate_apollo_deploy_file(cfg, save_dir: str): yml_file = os.path.join(args.save_dir, 'apollo_deploy.yaml') model = cfg.model with open(yml_file, 'w') as file: # Save the content one by one to ensure the content order of the output file file.write('# base information\n') yaml.dump({'name': model.apollo_deploy_name}, file) yaml.dump({'date': datetime.date.today()}, file) yaml.dump({'task_type': '3d_detection'}, file) yaml.dump({'sensor_type': model.sensor}, file) yaml.dump({'framework': 'PaddlePaddle'}, file) file.write('\n# dataset information\n') yaml.dump({ 'dataset': { 'name': cfg.train_dataset.name, 'labels': cfg.train_dataset.labels } }, file) file.write('\n# model information\n') transforms = cfg.export_config.get('transforms', []) save_name = args.save_name or cfg.model.save_name model_file = '{}.pdmodel'.format(save_name) params_file = '{}.pdiparams'.format(save_name) data = { 'model': { 'inputs': model.inputs, 'outputs': model.outputs, 'preprocess': transforms, 'model_files': [{ 'name': model_file, 'type': 'model', 'size': os.path.getsize(os.path.join(args.save_dir, model_file)) }, { 'name': params_file, 'type': 'params', 'size': os.path.getsize(os.path.join(args.save_dir, params_file)) }] } } yaml.dump(data, file) def main(args, rest_args): cfg = Config(path=args.cfg) model = cfg.model model.eval() if args.quant_config: quant_config = get_qat_config(args.quant_config) cfg.model.build_slim_model(quant_config['quant_config']) if args.model is not None: load_pretrained_model(model, args.model) arg_dict = {} if not hasattr(model.export, 'arg_dict') else model.export.arg_dict args = parse_model_args(arg_dict) kwargs = {key[2:]: getattr(args, key[2:]) for key in arg_dict} model.export(args.save_dir, name=args.save_name, **kwargs) if args.export_for_apollo: if not isinstance(model, BaseDetectionModel): logger.error('Model {} does not support Apollo yet!'.format( model.__class__.__name__)) else: generate_apollo_deploy_file(cfg, args.save_dir) if __name__ == '__main__': args, rest_args = parse_normal_args() main(args, rest_args)
0
apollo_public_repos/apollo-model-centerpoint
apollo_public_repos/apollo-model-centerpoint/tools/graphvis.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import contextlib import multiprocessing import paddle import visualdl import visualdl.server.app as vdlapp from paddle3d.apis.config import Config from paddle3d.slim import get_qat_config from paddle3d.utils.common import generate_tempdir def parse_args(): parser = argparse.ArgumentParser(description='Model Visualization') # params of evaluate parser.add_argument( "--config", dest="cfg", help="The config file.", required=True, type=str) parser.add_argument( "--host", dest="host", help="VisualDL server host", type=str, default=None) parser.add_argument( "--port", dest="port", help="VisualDL server port", type=str, default=None) parser.add_argument( "--save_dir", dest="save_dir", help= "VisualDL graph save dir. Saves to a temporary directory by default", type=str, default=None) parser.add_argument( '--quant_config', dest='quant_config', help='Config for quant model.', default=None, type=str) return parser.parse_args() @contextlib.contextmanager def generate_dir(dir: str = None): if dir is not None: yield dir else: with generate_tempdir() as dir: yield dir def main(args): cfg = Config(path=args.cfg) model = cfg.model model.eval() if args.quant_config: quant_config = get_qat_config(args.quant_config) cfg.model.build_slim_model(quant_config['quant_config']) with generate_dir(args.save_dir) as _dir: with visualdl.LogWriter(logdir=_dir) as writer: with model.export_guard(): writer.add_graph(model, model.input_spec) pid = vdlapp.run( logdir=writer._logdir, host=args.host, port=args.port) for child in multiprocessing.process._children: if child.pid == pid: child.join() if __name__ == '__main__': args = parse_args() main(args)
0
apollo_public_repos/apollo-model-centerpoint
apollo_public_repos/apollo-model-centerpoint/tools/creat_caddn_kitti_infos.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import pickle from paddle3d.datasets.kitti.kitti_depth_det import KittiDepthDataset from paddle3d.utils.logger import logger def parse_args(): parser = argparse.ArgumentParser( description='Create infos for kitti dataset.') parser.add_argument( '--dataset_root', default='data/kitti', help='Path of the dataset.', type=str) parser.add_argument( '--save_dir', default='data/kitti', help='Path to save the generated database.', type=str) return parser.parse_args() def create_caddn_kitti_infos(dataset, save_path, workers=4): train_split, val_split = 'train', 'val' train_filename = os.path.join(save_path, 'kitti_infos_train.pkl') val_filename = os.path.join(save_path, 'kitti_infos_val.pkl') trainval_filename = os.path.join(save_path, 'kitti_infos_trainval.pkl') test_filename = os.path.join(save_path, 'kitti_infos_test.pkl') logger.info("---------------Start to generate data infos---------------") dataset.set_split(train_split) kitti_infos_train = dataset.get_infos( num_workers=workers, has_label=True, count_inside_pts=True, mode='train') with open(train_filename, 'wb') as f: pickle.dump(kitti_infos_train, f) logger.info("Kitti info train file is saved to %s" % train_filename) dataset.set_split(val_split) kitti_infos_val = dataset.get_infos( num_workers=workers, has_label=True, count_inside_pts=True, mode='train') with open(val_filename, 'wb') as f: pickle.dump(kitti_infos_val, f) logger.info("Kitti info val file is saved to %s" % val_filename) with open(trainval_filename, 'wb') as f: pickle.dump(kitti_infos_train + kitti_infos_val, f) logger.info("Kitti info trainval file is saved to %s" % trainval_filename) dataset.set_split('test') kitti_infos_test = dataset.get_infos( num_workers=workers, has_label=False, count_inside_pts=False, mode='test') with open(test_filename, 'wb') as f: pickle.dump(kitti_infos_test, f) logger.info("Kitti info test file is saved to %s" % test_filename) logger.info("---------------Data preparation Done---------------") def main(args): dataset_root = args.dataset_root save_dir = args.save_dir dataset = KittiDepthDataset( dataset_root=dataset_root, mode='val', point_cloud_range=[2, -30.08, -3.0, 46.8, 30.08, 1.0], depth_downsample_factor=4, voxel_size=[0.16, 0.16, 0.16], class_names=['Car', 'Pedestrian', 'Cyclist']) create_caddn_kitti_infos(dataset=dataset, save_path=save_dir) if __name__ == '__main__': args = parse_args() main(args)
0
apollo_public_repos/apollo-model-centerpoint
apollo_public_repos/apollo-model-centerpoint/tools/train.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import random import shutil import numpy as np import paddle import paddle3d.env as paddle3d_env from paddle3d.apis.config import Config from paddle3d.apis.trainer import Trainer from paddle3d.slim import update_dic, get_qat_config from paddle3d.utils.checkpoint import load_pretrained_model from paddle3d.utils.logger import logger def parse_args(): """ """ parser = argparse.ArgumentParser(description='Model training') # params of training parser.add_argument( '--batch_size', dest='batch_size', help='Mini batch size of one gpu or cpu', type=int, default=None) parser.add_argument( "--config", dest="cfg", help="The config file.", default=None, type=str) parser.add_argument( '--do_eval', dest='do_eval', help='Eval while training', action='store_true') parser.add_argument( '--iters', dest='iters', help='iters for training', type=int, default=None) parser.add_argument( '--epochs', dest='epochs', help='epochs for training', type=int, default=None) parser.add_argument( '--keep_checkpoint_max', dest='keep_checkpoint_max', help='Maximum number of checkpoints to save', type=int, default=5) parser.add_argument( '--learning_rate', dest='learning_rate', help='Learning rate', type=float, default=None) parser.add_argument( '--log_interval', dest='log_interval', help='Display logging information at every log_interval', default=10, type=int) parser.add_argument( '--num_workers', dest='num_workers', help='Num workers for data loader', type=int, default=2) parser.add_argument( '--resume', dest='resume', help='Whether to resume training from checkpoint', action='store_true') parser.add_argument( '--model', dest='model', help='pretrained parameters of the model', type=str, default=None) parser.add_argument( '--save_dir', dest='save_dir', help='The directory for saving the model snapshot', type=str, default='./output') parser.add_argument( '--save_interval', dest='save_interval', help= 'How many iters/epochs to save a model snapshot once during training.' \ 'Default None means 1000 if using iters or 5 for epochs', type=int, default=None) parser.add_argument( '--seed', dest='seed', help='Set the random seed of paddle during training.', default=None, type=int) parser.add_argument( '--quant_config', dest='quant_config', help='Config for quant model.', default=None, type=str) return parser.parse_args() def main(args): """ """ if not os.path.exists(args.save_dir): os.mkdir(args.save_dir) shutil.copyfile(args.cfg, args.save_dir + '/' + str(args.cfg.split('/')[-1])) place = 'gpu' if paddle.is_compiled_with_cuda() else 'cpu' paddle.set_device(place) if args.seed is not None: logger.info("use random seed {}".format(args.seed)) paddle.seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) if args.cfg is None: raise RuntimeError("No configuration file specified!") if not os.path.exists(args.cfg): raise RuntimeError("Config file `{}` does not exist!".format(args.cfg)) cfg = Config(path=args.cfg) if args.model is not None: load_pretrained_model(cfg.model, args.model) if args.quant_config: quant_config = get_qat_config(args.quant_config) cfg.model.build_slim_model(quant_config['quant_config']) update_dic(cfg.dic, quant_config['finetune_config']) cfg.update( learning_rate=args.learning_rate, batch_size=args.batch_size, iters=args.iters, epochs=args.epochs) if cfg.train_dataset is None: raise RuntimeError( 'The training dataset is not specified in the configuration file!') elif len(cfg.train_dataset) == 0: raise ValueError( 'The length of training dataset is 0. Please check if your dataset is valid!' ) logger.info('\n{}'.format(paddle3d_env.get_env_info())) logger.info('\n{}'.format(cfg)) dic = cfg.to_dict() batch_size = dic.pop('batch_size') save_interval = args.save_interval if save_interval is None: if cfg.iters: save_interval = 1000 if cfg.epochs: save_interval = 5 dic.update({ 'resume': args.resume, 'checkpoint': { 'keep_checkpoint_max': args.keep_checkpoint_max, 'save_dir': args.save_dir }, 'scheduler': { 'save_interval': save_interval, 'log_interval': args.log_interval, 'do_eval': args.do_eval }, 'dataloader_fn': { 'batch_size': batch_size, 'num_workers': args.num_workers, } }) trainer = Trainer(**dic) trainer.train() if __name__ == '__main__': args = parse_args() main(args)
0
apollo_public_repos/apollo-model-centerpoint
apollo_public_repos/apollo-model-centerpoint/tools/evaluate.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import random import numpy as np import paddle from paddle3d.apis.config import Config from paddle3d.apis.trainer import Trainer from paddle3d.slim import get_qat_config from paddle3d.utils.checkpoint import load_pretrained_model from paddle3d.utils.logger import logger def parse_args(): """ """ parser = argparse.ArgumentParser(description='Model evaluation') # params of training parser.add_argument( "--config", dest="cfg", help="The config file.", default=None, type=str) parser.add_argument( '--batch_size', dest='batch_size', help='Mini batch size of one gpu or cpu', type=int, default=None) parser.add_argument( '--model', dest='model', help='pretrained parameters of the model', type=str, default=None) parser.add_argument( '--num_workers', dest='num_workers', help='Num workers for data loader', type=int, default=2) parser.add_argument( '--quant_config', dest='quant_config', help='Config for quant model.', default=None, type=str) return parser.parse_args() def worker_init_fn(worker_id): np.random.seed(1024) def main(args): """ """ if args.cfg is None: raise RuntimeError("No configuration file specified!") if not os.path.exists(args.cfg): raise RuntimeError("Config file `{}` does not exist!".format(args.cfg)) cfg = Config(path=args.cfg, batch_size=args.batch_size) if cfg.val_dataset is None: raise RuntimeError( 'The validation dataset is not specified in the configuration file!' ) elif len(cfg.val_dataset) == 0: raise ValueError( 'The length of validation dataset is 0. Please check if your dataset is valid!' ) dic = cfg.to_dict() batch_size = dic.pop('batch_size') dic.update({ 'dataloader_fn': { 'batch_size': batch_size, 'num_workers': args.num_workers, 'worker_init_fn': worker_init_fn } }) if args.quant_config: quant_config = get_qat_config(args.quant_config) cfg.model.build_slim_model(quant_config['quant_config']) if args.model is not None: load_pretrained_model(cfg.model, args.model) dic['checkpoint'] = None dic['resume'] = False else: dic['resume'] = True trainer = Trainer(**dic) trainer.evaluate() if __name__ == '__main__': args = parse_args() main(args)
0
apollo_public_repos/apollo-model-centerpoint
apollo_public_repos/apollo-model-centerpoint/tools/create_petr_nus_infos.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------ # Modify from https://github.com/megvii-research/PETR/blob/main/tools/generate_sweep_pkl.py # Copyright (c) 2022 megvii-model. All Rights Reserved. # ------------------------------------------------------------------------ # ------------------------------------------------------------------------ # Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) # Copyright (c) OpenMMLab. All rights reserved. # ------------------------------------------------------------------------ import argparse import os import pickle from pathlib import Path import numpy as np import tqdm from nuscenes import NuScenes from nuscenes.utils import splits as nuscenes_split from nuscenes.utils.data_classes import Box as NuScenesBox from nuscenes.utils.geometry_utils import transform_matrix from pyquaternion import Quaternion from paddle3d.datasets.nuscenes import NuscenesMVDataset from paddle3d.datasets.nuscenes.nuscenes_det import NuscenesDetDataset from paddle3d.utils.logger import logger SENSORS = [ 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_FRONT_LEFT' ] def parse_args(): parser = argparse.ArgumentParser( description='Create infos for kitti dataset.') parser.add_argument( '--dataset_root', default='data/nuscenes', help='Path of the dataset.', type=str) parser.add_argument( '--save_dir', default='data/nuscenes', help='Path to save the generated database.', type=str) parser.add_argument( '--mode', default='train', help='mode to generate dataset.', type=str) parser.add_argument( '--num_prev', default=5, help='nummber of previous key frames.', type=int) parser.add_argument( '--num_sweep', default=5, help='nummber of sweep frames between two key frame.', type=int) return parser.parse_args() def is_filepath(x): return isinstance(x, str) or isinstance(x, Path) def add_frame(sample_data, e2g_t, l2e_t, l2e_r_mat, e2g_r_mat, data_root, nuscenes): sweep_cam = dict() sweep_cam['is_key_frame'] = sample_data['is_key_frame'] sweep_cam['data_path'] = sample_data['filename'] sweep_cam['type'] = 'camera' sweep_cam['timestamp'] = sample_data['timestamp'] sweep_cam['sample_data_token'] = sample_data['sample_token'] pose_record = nuscenes.get('ego_pose', sample_data['ego_pose_token']) calibrated_sensor_record = nuscenes.get( 'calibrated_sensor', sample_data['calibrated_sensor_token']) sweep_cam['ego2global_translation'] = pose_record['translation'] sweep_cam['ego2global_rotation'] = pose_record['rotation'] sweep_cam['sensor2ego_translation'] = calibrated_sensor_record[ 'translation'] sweep_cam['sensor2ego_rotation'] = calibrated_sensor_record['rotation'] sweep_cam['cam_intrinsic'] = calibrated_sensor_record['camera_intrinsic'] l2e_r_s = sweep_cam['sensor2ego_rotation'] l2e_t_s = sweep_cam['sensor2ego_translation'] e2g_r_s = sweep_cam['ego2global_rotation'] e2g_t_s = sweep_cam['ego2global_translation'] l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T ) + l2e_t @ np.linalg.inv(l2e_r_mat).T sweep_cam['sensor2lidar_rotation'] = R.T # points @ R.T + T sweep_cam['sensor2lidar_translation'] = T lidar2cam_r = np.linalg.inv(sweep_cam['sensor2lidar_rotation']) lidar2cam_t = sweep_cam['sensor2lidar_translation'] @ lidar2cam_r.T lidar2cam_rt = np.eye(4) lidar2cam_rt[:3, :3] = lidar2cam_r.T lidar2cam_rt[3, :3] = -lidar2cam_t intrinsic = np.array(sweep_cam['cam_intrinsic']) viewpad = np.eye(4) viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic lidar2img_rt = (viewpad @ lidar2cam_rt.T) sweep_cam['intrinsics'] = viewpad.astype(np.float32) sweep_cam['extrinsics'] = lidar2cam_rt.astype(np.float32) sweep_cam['lidar2img'] = lidar2img_rt.astype(np.float32) pop_keys = [ 'ego2global_translation', 'ego2global_rotation', 'sensor2ego_translation', 'sensor2ego_rotation', 'cam_intrinsic' ] [sweep_cam.pop(k) for k in pop_keys] return sweep_cam def get_available_scenes(nusc): """Get available scenes from the input nuscenes class. """ available_scenes = [] logger.info('total scene num: {}'.format(len(nusc.scene))) for scene in nusc.scene: scene_token = scene['token'] scene_rec = nusc.get('scene', scene_token) sample_rec = nusc.get('sample', scene_rec['first_sample_token']) sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) has_more_frames = True scene_not_exist = False while has_more_frames: lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token']) lidar_path = str(lidar_path) if os.getcwd() in lidar_path: # path from lyftdataset is absolute path lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1] # relative path if not is_filepath(lidar_path): scene_not_exist = True break else: break if scene_not_exist: continue available_scenes.append(scene) logger.info('exist scene num: {}'.format(len(available_scenes))) return available_scenes def obtain_sensor2top(nusc, sensor_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, sensor_type='lidar'): """Obtain the info with RT matric from general sensor to Top LiDAR. """ sd_rec = nusc.get('sample_data', sensor_token) cs_record = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) data_path = str(nusc.get_sample_data_path(sd_rec['token'])) # absolute path if os.getcwd() in data_path: # path from lyftdataset is absolute path data_path = data_path.split(f'{os.getcwd()}/')[-1] # relative path sweep = { 'data_path': nusc.get('sample_data', sd_rec['token'])['filename'], # relative path 'type': sensor_type, 'sample_data_token': sd_rec['token'], 'sensor2ego_translation': cs_record['translation'], 'sensor2ego_rotation': cs_record['rotation'], 'ego2global_translation': pose_record['translation'], 'ego2global_rotation': pose_record['rotation'], 'timestamp': sd_rec['timestamp'] } l2e_r_s = sweep['sensor2ego_rotation'] l2e_t_s = sweep['sensor2ego_translation'] e2g_r_s = sweep['ego2global_rotation'] e2g_t_s = sweep['ego2global_translation'] # obtain the RT from sensor to Top LiDAR # sweep->ego->global->ego'->lidar l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T ) + l2e_t @ np.linalg.inv(l2e_r_mat).T sweep['sensor2lidar_rotation'] = R.T # points @ R.T + T sweep['sensor2lidar_translation'] = T return sweep def fill_trainval_infos(nusc, train_scenes, val_scenes, test=False, max_sweeps=10): """Generate the train/val infos from the raw data. """ train_nusc_infos = [] val_nusc_infos = [] msg = "Begin to generate a info of nuScenes dataset." for sample_idx in logger.range(len(nusc.sample), msg=msg): sample = nusc.sample[sample_idx] lidar_token = sample['data']['LIDAR_TOP'] sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) cs_record = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) assert os.path.exists(lidar_path) info = { 'lidar_token': lidar_token, 'lidar_path': nusc.get('sample_data', lidar_token)['filename'], # relative path 'token': sample['token'], 'sweeps': [], 'cams': dict(), 'lidar2ego_translation': cs_record['translation'], 'lidar2ego_rotation': cs_record['rotation'], 'ego2global_translation': pose_record['translation'], 'ego2global_rotation': pose_record['rotation'], 'timestamp': sample['timestamp'], } l2e_r = info['lidar2ego_rotation'] l2e_t = info['lidar2ego_translation'] e2g_r = info['ego2global_rotation'] e2g_t = info['ego2global_translation'] l2e_r_mat = Quaternion(l2e_r).rotation_matrix e2g_r_mat = Quaternion(e2g_r).rotation_matrix # obtain 6 image's information per frame camera_types = [ 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_FRONT_LEFT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT', ] for cam in camera_types: cam_token = sample['data'][cam] cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, cam) cam_info.update(cam_intrinsic=cam_intrinsic) info['cams'].update({cam: cam_info}) # obtain sweeps for a single key-frame sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) sweeps = [] while len(sweeps) < max_sweeps: if not sd_rec['prev'] == '': sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') sweeps.append(sweep) sd_rec = nusc.get('sample_data', sd_rec['prev']) else: break info['sweeps'] = sweeps # obtain annotation if not test: annotations = [ nusc.get('sample_annotation', token) for token in sample['anns'] ] locs = np.array([b.center for b in boxes]).reshape(-1, 3) dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) rots = np.array([b.orientation.yaw_pitch_roll[0] for b in boxes]).reshape(-1, 1) velocity = np.array( [nusc.box_velocity(token)[:2] for token in sample['anns']]) valid_flag = np.array( [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 for anno in annotations], dtype=bool).reshape(-1) # convert velo from global to lidar for i in range(len(boxes)): velo = np.array([*velocity[i], 0.0]) velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( l2e_r_mat).T velocity[i] = velo[:2] names = [b.name for b in boxes] for i in range(len(names)): # NuscenesDetDataset.LABEL_MAP if names[i] in NuscenesDetDataset.LABEL_MAP: names[i] = NuscenesDetDataset.LABEL_MAP[names[i]] names = np.array(names) # we need to convert box size to # the format of our lidar coordinate system # which is x_size, y_size, z_size (corresponding to l, w, h) gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) assert len(gt_boxes) == len( annotations), f'{len(gt_boxes)}, {len(annotations)}' info['gt_boxes'] = gt_boxes info['gt_names'] = names info['gt_velocity'] = velocity.reshape(-1, 2) info['num_lidar_pts'] = np.array( [a['num_lidar_pts'] for a in annotations]) info['num_radar_pts'] = np.array( [a['num_radar_pts'] for a in annotations]) info['valid_flag'] = valid_flag if sample['scene_token'] in train_scenes: train_nusc_infos.append(info) else: val_nusc_infos.append(info) return train_nusc_infos, val_nusc_infos def build_petr_nuscenes_data(dataset_root, is_test, nusc, version, max_sweeps=10): if version == 'v1.0-trainval': train_scenes = nuscenes_split.train val_scenes = nuscenes_split.val elif version == 'v1.0-test': train_scenes = nuscenes_split.test val_scenes = [] elif version == 'v1.0-mini': train_scenes = nuscenes_split.mini_train val_scenes = nuscenes_split.mini_val else: raise ValueError('unknown nuscenes dataset version') available_scenes = get_available_scenes(nusc) available_scene_names = [s['name'] for s in available_scenes] train_scenes = list( filter(lambda x: x in available_scene_names, train_scenes)) val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) train_scenes = set([ available_scenes[available_scene_names.index(s)]['token'] for s in train_scenes ]) val_scenes = set([ available_scenes[available_scene_names.index(s)]['token'] for s in val_scenes ]) if is_test: print('test scene: {}'.format(len(train_scenes))) else: print('train scene: {}, val scene: {}'.format( len(train_scenes), len(val_scenes))) train_nusc_infos, val_nusc_infos = fill_trainval_infos( nusc, train_scenes, val_scenes, is_test, max_sweeps=max_sweeps) metadata = dict(version=version) if is_test: print('test sample: {}'.format(len(train_nusc_infos))) data = dict(infos=train_nusc_infos, metadata=metadata) return [data] else: print('train sample: {}, val sample: {}'.format( len(train_nusc_infos), len(val_nusc_infos))) train_data = dict(infos=train_nusc_infos, metadata=metadata) val_data = dict(infos=val_nusc_infos, metadata=metadata) return train_data, val_data def main(args): dataset_root = args.dataset_root save_dir = args.save_dir num_prev = args.num_prev num_sweep = args.num_sweep version = NuscenesDetDataset.VERSION_MAP[args.mode] nuscenes = NuScenes(version=version, dataroot=dataset_root, verbose=False) is_test = 'test' in args.mode if is_test: test_ann_cache_file = os.path.join(save_dir, 'petr_nuscenes_annotation_test.pkl') if os.path.exists(test_ann_cache_file): raise OSError( "{} annotation file is exist!".format(test_ann_cache_file)) else: train_ann_cache_file = os.path.join( save_dir, 'petr_nuscenes_annotation_train.pkl') val_ann_cache_file = os.path.join(save_dir, 'petr_nuscenes_annotation_val.pkl') if os.path.exists(train_ann_cache_file): raise OSError( "{} annotation file is exist!".format(train_ann_cache_file)) if os.path.exists(val_ann_cache_file): raise OSError( "{} annotation file is exist!".format(val_ann_cache_file)) infos = build_petr_nuscenes_data(dataset_root, is_test, nuscenes, version, num_sweep) if is_test: infos_dict = {test_ann_cache_file: infos[0]} else: infos_dict = { train_ann_cache_file: infos[0], val_ann_cache_file: infos[1] } msg = "Adding sweep frame annotations" for ann_cache_file, key_infos in infos_dict.items(): for current_id in logger.range(len(key_infos['infos']), msg=msg): ###parameters of current key frame e2g_t = key_infos['infos'][current_id]['ego2global_translation'] e2g_r = key_infos['infos'][current_id]['ego2global_rotation'] l2e_t = key_infos['infos'][current_id]['lidar2ego_translation'] l2e_r = key_infos['infos'][current_id]['lidar2ego_rotation'] l2e_r_mat = Quaternion(l2e_r).rotation_matrix e2g_r_mat = Quaternion(e2g_r).rotation_matrix # {'token': 'c0be823ae8f040e2b3306002c571ae57', 'timestamp': 1533153861447131, # 'prev': 'e866142822bb421d87d8f9bd1b91fbc3', 'next': 'f32d3a2842004926b41985152fa1bfad', # 'scene_token': 'bc6a757d637f4832be68986833ec17ac', 'data': {'RADAR_FRONT': # '85962dfd390843bab8cbedc9003a5d81', 'RADAR_FRONT_LEFT': '35e35910a6f8428ea1e3f71db59f0ed7', # 'RADAR_FRONT_RIGHT': 'a557a223830d4f7db59a9bf03425c52d', 'RADAR_BACK_LEFT': # '46b86e2060e341dabb14396a8edc1653', 'RADAR_BACK_RIGHT': '7e7b5ad41eff4f949d69b3ef6d65f991', # 'LIDAR_TOP': '5a0aa6326b004322bf009388f4df33df', 'CAM_FRONT': 'a5c43d3424bd406ba1a0a3d1d1493277', # 'CAM_FRONT_RIGHT': '38ee6078f2594c5cb3bea00956d3afeb', 'CAM_BACK_RIGHT': '082193ef4dff4dca9ff7af18493107f5', # 'CAM_BACK': 'aec2027af4e243b591cf22459735644e', 'CAM_BACK_LEFT': 'd6c479b792674d8db1a5de86af2b9183', # 'CAM_FRONT_LEFT': '451c4acac4534a0da20e652ba49a14a2'}, 'anns': []} sample = nuscenes.get('sample', key_infos['infos'][current_id]['token']) current_cams = dict() ###cam of current key frame for cam in SENSORS: # {'token': '8e25cfcd8f724bb7bbce69bff042a56f', 'sample_token': '02fd302178dd44568ae305320ea24054', # 'ego_pose_token': '8e25cfcd8f724bb7bbce69bff042a56f', 'calibrated_sensor_token': # '2fde3d3376ea42a8a561df595e001cc7', 'timestamp': 1533153859904816, 'fileformat': 'jpg', # 'is_key_frame': True, 'height': 900, 'width': 1600, 'filename': # 'samples/CAM_FRONT_LEFT/n008-2018-08-01-16-03-27-0400__CAM_FRONT_LEFT__1533153859904816.jpg', # 'prev': '5d82f148ba8947579a6d7647ac73a9d6', 'next': 'cb0a1671873647faba28916a88b14574', # 'sensor_modality': 'camera', 'channel': 'CAM_FRONT_LEFT'} current_cams[cam] = nuscenes.get('sample_data', sample['data'][cam]) sweep_lists = [] # previous sweep frame for i in range(num_prev): ### justify the first frame of a scene if sample['prev'] == '': break ###add sweep frame between two key frame for j in range(num_sweep): sweep_cams = dict() for cam in SENSORS: if current_cams[cam]['prev'] == '': sweep_cams = sweep_lists[-1] break # {'token': '8e25cfcd8f724bb7bbce69bff042a56f', 'sample_token': '02fd302178dd44568ae305320ea24054', # 'ego_pose_token': '8e25cfcd8f724bb7bbce69bff042a56f', 'calibrated_sensor_token': # '2fde3d3376ea42a8a561df595e001cc7', 'timestamp': 1533153859904816, 'fileformat': 'jpg', # 'is_key_frame': True, 'height': 900, 'width': 1600, 'filename': \ # 'samples/CAM_FRONT_LEFT/n008-2018-08-01-16-03-27-0400__CAM_FRONT_LEFT__1533153859904816.jpg', # 'prev': '5d82f148ba8947579a6d7647ac73a9d6', 'next': 'cb0a1671873647faba28916a88b14574', # 'sensor_modality': 'camera', 'channel': 'CAM_FRONT_LEFT'} sample_data = nuscenes.get('sample_data', current_cams[cam]['prev']) sweep_cam = add_frame(sample_data, e2g_t, l2e_t, l2e_r_mat, e2g_r_mat, dataset_root, nuscenes) current_cams[cam] = sample_data sweep_cams[cam] = sweep_cam sweep_lists.append(sweep_cams) ###add previous key frame sample = nuscenes.get('sample', sample['prev']) sweep_cams = dict() for cam in SENSORS: sample_data = nuscenes.get('sample_data', sample['data'][cam]) sweep_cam = add_frame(sample_data, e2g_t, l2e_t, l2e_r_mat, e2g_r_mat, dataset_root, nuscenes) current_cams[cam] = sample_data sweep_cams[cam] = sweep_cam sweep_lists.append(sweep_cams) key_infos['infos'][current_id]['sweeps'] = sweep_lists pickle.dump(key_infos, open(ann_cache_file, 'wb')) logger.info("---------------Data preparation Done---------------") if __name__ == '__main__': args = parse_args() main(args)
0
apollo_public_repos/apollo-model-centerpoint
apollo_public_repos/apollo-model-centerpoint/tools/convert_apolloscape_to_kitti.py
#!/usr/bin/env python3 """ Convert apolloscape training data to kitti format """ import os import argparse from pypcd import pypcd import numpy as np names_ = ["", "00000", "0000", "000", "00", "0"] type_mapper = { "1": "smallVehicle", "2": "bigVehicle", "3": "pedestrian", "4": "cyclist", "5": "trafficCone", "6": "other" } def save_pcd_file(pcd_file, output_folder, count): """save pcd file to dir """ # bin file bin_name = str(count) if len(bin_name) < len(names_) and len(bin_name) > 0: bin_name = names_[len(bin_name)] + bin_name bin_file = os.path.join(output_folder, bin_name + '.bin') # read pointcloud data pcd = pypcd.PointCloud.from_path(pcd_file) data = list() for line in pcd.pc_data: d = list() for e in line: d.append(float(e)) data.append(d) data = np.asarray(data, dtype=np.float32) data = data.reshape(-1) data.tofile(bin_file) # save to binary file def save_label_file(label_file, output_folder, count): """save label file to dir """ # output label file txt_name = str(count) if len(txt_name) < len(names_) and len(txt_name) > 0: txt_name = names_[len(txt_name)] + txt_name txt_name = os.path.join(output_folder, txt_name + '.txt') f = open(label_file, 'r') lines = f.readlines() f.close() with open(txt_name, 'w') as f: for line in lines: bbox = line.strip().split(' ') obj_type = bbox[0] # w,l,h and theta width = bbox[5] length = bbox[4] height = bbox[6] theta = bbox[7] # center x = bbox[1] y = bbox[2] z = bbox[3] if obj_type in type_mapper: f.write(type_mapper[obj_type]); f.write(' ') else: print('type not in type_mapper') f.write(type_mapper['6']); f.write(' ') for _ in range(7): f.write('0'); f.write(' ') f.write(height); f.write(' ') f.write(width); f.write(' ') f.write(length); f.write(' ') f.write(x); f.write(' ') f.write(y); f.write(' ') f.write(z); f.write(' ') f.write(theta); f.write('\n') def convert_to_kitti(pcd_path, label_path, output_path): """convert apolloscape dataset to kitti format """ record_dirs = os.listdir(pcd_path) # create output dir pcd_folder = os.path.join(output_path, 'training/velodyne') label_folder = os.path.join(output_path, 'training/label') if not os.path.exists(pcd_folder): os.makedirs(pcd_folder) if not os.path.exists(label_folder): os.makedirs(label_folder) # convert count = 0 for dir in record_dirs: # pcd dir pcd_dir = os.path.join(pcd_path, dir) # label dir label_dir = dir[7:-6] label_dir = os.path.join(label_path, label_dir) pcds = os.listdir(pcd_dir) for pcd in pcds: # pcd file pcd_file = os.path.join(pcd_dir, pcd) label_file = os.path.join(label_dir, pcd[:-4] + '.txt') if os.path.isfile(pcd_file) and os.path.isfile(label_file): save_pcd_file(pcd_file, pcd_folder, count) save_label_file(label_file, label_folder, count) count += 1 def main(pcd_path, label_path, output_path): """main """ convert_to_kitti(pcd_path, label_path, output_path) if __name__ == "__main__": """ pcd_path: |- result_9048_1_frame |- result_9048_3_frame |- ... label_path: |- 9048_1 |- 9048_3 |- ... """ parser = argparse.ArgumentParser(description='Convert to kitti format') parser.add_argument('--pcd_path', type=str, default=None, help='Specify the pcd path') parser.add_argument('--label_path', type=str, default=None, help='Specify the label path') parser.add_argument('--output_path', type=str, default=None, help='Specify the output_path') args = parser.parse_args() main(args.pcd_path, args.label_path, args.output_path)
0
apollo_public_repos/apollo-model-centerpoint/deploy/smoke
apollo_public_repos/apollo-model-centerpoint/deploy/smoke/python/vis.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cv2 import numpy as np from infer import get_img, get_ratio, init_predictor, parse_args, run from paddle3d.datasets.kitti.kitti_utils import camera_record_to_object from paddle3d.transforms.target_generator import encode_label def total_pred_by_conf_to_kitti_records( total_pred, conf, class_names=["Car", "Cyclist", "Pedestrian"]): """convert total_pred to kitti_records""" kitti_records_list = [] for p in total_pred: if p[-1] > conf: p = list(p) p[0] = class_names[int(p[0])] # default, to kitti_records formate p.insert(1, 0.0) p.insert(2, 0) kitti_records_list.append(p) kitti_records = np.array(kitti_records_list) return kitti_records def make_imgpts_list(bboxes_3d, K): """to 8 points on image""" # external parameters do not transform rvec = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) tvec = np.array([[0.0], [0.0], [0.0]]) imgpts_list = [] for box3d in bboxes_3d: locs = np.array(box3d[0:3]) rot_y = np.array(box3d[6]) height, width, length = box3d[3:6] _, box2d, box3d = encode_label(K, rot_y, np.array([length, height, width]), locs) if np.all(box2d == 0): continue imgpts, _ = cv2.projectPoints(box3d.T, rvec, tvec, K, 0) imgpts_list.append(imgpts) return imgpts_list def draw_smoke_3d(img, imgpts_list): """draw smoke result to photo""" connect_line_id = [ [1, 0], [2, 7], [3, 6], [4, 5], [1, 2], [2, 3], [3, 4], [4, 1], [0, 7], [7, 6], [6, 5], [5, 0], ] img_draw = img.copy() for imgpts in imgpts_list: for p in imgpts: p_x, p_y = int(p[0][0]), int(p[0][1]) cv2.circle(img_draw, (p_x, p_y), 1, (0, 255, 0), -1) for i, line_id in enumerate(connect_line_id): p1 = (int(imgpts[line_id[0]][0][0]), int(imgpts[line_id[0]][0][1])) p2 = (int(imgpts[line_id[1]][0][0]), int(imgpts[line_id[1]][0][1])) if i <= 3: # body color = (255, 0, 0) elif i <= 7: # head color = (0, 0, 255) else: # tail color = (255, 255, 0) cv2.line(img_draw, p1, p2, color, 1) return img_draw if __name__ == "__main__": args = parse_args() pred = init_predictor(args) # Listed below are camera intrinsic parameter of the kitti dataset # If the model is trained on other datasets, please replace the relevant data K = np.array( [[ [721.53771973, 0.0, 609.55932617], [0.0, 721.53771973, 172.85400391], [0, 0, 1], ]], np.float32, ) img, ori_img_size, output_size = get_img(args.image) ratio = get_ratio(ori_img_size, output_size) results = run(pred, img, K, ratio) total_pred = results[0] # convert pred to bboxes_2d, bboxes_3d kitti_records = total_pred_by_conf_to_kitti_records(total_pred, conf=0.5) bboxes_2d, bboxes_3d, labels = camera_record_to_object(kitti_records) # read origin image img_origin = cv2.imread(args.image) # to 8 points on image imgpts_list = make_imgpts_list(bboxes_3d, K[0]) # draw smoke result to photo img_draw = draw_smoke_3d(img_origin, imgpts_list) cv2.imwrite("output.bmp", img_draw)
0
apollo_public_repos/apollo-model-centerpoint/deploy/smoke
apollo_public_repos/apollo-model-centerpoint/deploy/smoke/python/infer.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import cv2 import numpy as np import paddle from paddle.inference import Config, PrecisionType, create_predictor def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--model_file", type=str, default="./inference.pdmodel", help="Model filename, Specify this when your model is a combined model." ) parser.add_argument( "--params_file", type=str, default="./inference.pdiparams", help= "Parameter filename, Specify this when your model is a combined model.") parser.add_argument( '--image', dest='image', help='The image path', type=str, required=True) parser.add_argument( "--use_gpu", action='store_true', help="Whether use gpu.") parser.add_argument( "--use_trt", action='store_true', help="Whether use trt.") parser.add_argument( "--collect_dynamic_shape_info", action='store_true', help="Whether to collect dynamic shape before using tensorrt.") parser.add_argument( "--dynamic_shape_file", dest='dynamic_shape_file', help='The image path', type=str, default="dynamic_shape_info.txt") return parser.parse_args() def get_ratio(ori_img_size, output_size, down_ratio=(4, 4)): return np.array([[ down_ratio[1] * ori_img_size[1] / output_size[1], down_ratio[0] * ori_img_size[0] / output_size[0] ]], np.float32) def get_img(img_path): img = cv2.imread(img_path) origin_shape = img.shape img = cv2.resize(img, (1280, 384)) target_shape = img.shape img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img / 255.0 img = np.subtract(img, np.array([0.485, 0.456, 0.406])) img = np.true_divide(img, np.array([0.229, 0.224, 0.225])) img = np.array(img, np.float32) img = img.transpose(2, 0, 1) img = img[None, :, :, :] return img, origin_shape, target_shape def init_predictor(args): config = Config(args.model_file, args.params_file) config.enable_memory_optim() if args.use_gpu: config.enable_use_gpu(1000, 0) else: # If not specific mkldnn, you can set the blas thread. # The thread num should not be greater than the number of cores in the CPU. config.set_cpu_math_library_num_threads(4) config.enable_mkldnn() if args.collect_dynamic_shape_info: config.collect_shape_range_info(args.dynamic_shape_file) elif args.use_trt: allow_build_at_runtime = True config.enable_tuned_tensorrt_dynamic_shape(args.dynamic_shape_file, allow_build_at_runtime) config.enable_tensorrt_engine( workspace_size=1 << 20, max_batch_size=1, min_subgraph_size=3, precision_mode=PrecisionType.Float32) predictor = create_predictor(config) return predictor def run(predictor, image, K, down_ratio): # copy img data to input tensor input_names = predictor.get_input_names() for i, name in enumerate(input_names): input_tensor = predictor.get_input_handle(name) if name == "images": input_tensor.reshape(image.shape) input_tensor.copy_from_cpu(image.copy()) elif name == "trans_cam_to_img": input_tensor.reshape(K.shape) input_tensor.copy_from_cpu(K.copy()) elif name == "down_ratios": input_tensor.reshape(down_ratio.shape) input_tensor.copy_from_cpu(down_ratio.copy()) # do the inference predictor.run() results = [] # get out data from output tensor output_names = predictor.get_output_names() for i, name in enumerate(output_names): output_tensor = predictor.get_output_handle(name) output_data = output_tensor.copy_to_cpu() results.append(output_data) return results if __name__ == '__main__': args = parse_args() pred = init_predictor(args) # Listed below are camera intrinsic parameter of the kitti dataset # If the model is trained on other datasets, please replace the relevant data K = np.array([[[721.53771973, 0., 609.55932617], [0., 721.53771973, 172.85400391], [0, 0, 1]]], np.float32) img, ori_img_size, output_size = get_img(args.image) ratio = get_ratio(ori_img_size, output_size) results = run(pred, img, K, ratio) total_pred = results[0]
0
apollo_public_repos/apollo-model-centerpoint/deploy/smoke
apollo_public_repos/apollo-model-centerpoint/deploy/smoke/cpp/CMakeLists.txt
cmake_minimum_required(VERSION 3.0) project(cpp_inference_demo CXX C) option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF) option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON) option(USE_TENSORRT "Compile demo with TensorRT." OFF) option(WITH_ROCM "Compile demo with rocm." OFF) if(NOT WITH_STATIC_LIB) add_definitions("-DPADDLE_WITH_SHARED_LIB") else() # PD_INFER_DECL is mainly used to set the dllimport/dllexport attribute in dynamic library mode. # Set it to empty in static library mode to avoid compilation issues. add_definitions("/DPD_INFER_DECL=") endif() macro(safe_set_static_flag) foreach(flag_var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) if(${flag_var} MATCHES "/MD") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") endif(${flag_var} MATCHES "/MD") endforeach(flag_var) endmacro() if(NOT DEFINED PADDLE_LIB) message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib") endif() if(NOT DEFINED DEMO_NAME) message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name") endif() include_directories("${PADDLE_LIB}/") set(PADDLE_LIB_THIRD_PARTY_PATH "${PADDLE_LIB}/third_party/install/") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}cryptopp/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/include") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}cryptopp/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib") link_directories("${PADDLE_LIB}/paddle/lib") if (WIN32) add_definitions("/DGOOGLE_GLOG_DLL_DECL=") option(MSVC_STATIC_CRT "use static C Runtime library by default" ON) if (MSVC_STATIC_CRT) if (WITH_MKL) set(FLAG_OPENMP "/openmp") endif() set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") safe_set_static_flag() if (WITH_STATIC_LIB) add_definitions(-DSTATIC_LIB) endif() endif() else() if(WITH_MKL) set(FLAG_OPENMP "-fopenmp") endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 ${FLAG_OPENMP}") endif() if(WITH_GPU) if(NOT WIN32) set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") else() if(CUDA_LIB STREQUAL "") set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\lib\\x64") endif() endif(NOT WIN32) endif() if (USE_TENSORRT AND WITH_GPU) set(TENSORRT_ROOT "" CACHE STRING "The root directory of TensorRT library") if("${TENSORRT_ROOT}" STREQUAL "") message(FATAL_ERROR "The TENSORRT_ROOT is empty, you must assign it a value with CMake command. Such as: -DTENSORRT_ROOT=TENSORRT_ROOT_PATH ") endif() set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include) set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib) file(READ ${TENSORRT_INCLUDE_DIR}/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS) string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION "${TENSORRT_VERSION_FILE_CONTENTS}") if("${TENSORRT_MAJOR_VERSION}" STREQUAL "") file(READ ${TENSORRT_INCLUDE_DIR}/NvInferVersion.h TENSORRT_VERSION_FILE_CONTENTS) string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION "${TENSORRT_VERSION_FILE_CONTENTS}") endif() if("${TENSORRT_MAJOR_VERSION}" STREQUAL "") message(SEND_ERROR "Failed to detect TensorRT version.") endif() string(REGEX REPLACE "define NV_TENSORRT_MAJOR +([0-9]+)" "\\1" TENSORRT_MAJOR_VERSION "${TENSORRT_MAJOR_VERSION}") message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. " "Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ") include_directories("${TENSORRT_INCLUDE_DIR}") link_directories("${TENSORRT_LIB_DIR}") endif() if(WITH_MKL) set(MATH_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mklml") include_directories("${MATH_LIB_PATH}/include") if(WIN32) set(MATH_LIB ${MATH_LIB_PATH}/lib/mklml${CMAKE_STATIC_LIBRARY_SUFFIX} ${MATH_LIB_PATH}/lib/libiomp5md${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(MATH_LIB ${MATH_LIB_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} ${MATH_LIB_PATH}/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(MKLDNN_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mkldnn") if(EXISTS ${MKLDNN_PATH}) include_directories("${MKLDNN_PATH}/include") if(WIN32) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib) else(WIN32) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) endif(WIN32) endif() else() set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas") include_directories("${OPENBLAS_LIB_PATH}/include/openblas") if(WIN32) set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() endif() if(WITH_STATIC_LIB) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() if(WIN32) set(DEPS ${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() endif() if (NOT WIN32) set(EXTERNAL_LIB "-lrt -ldl -lpthread") set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags protobuf xxhash cryptopp ${EXTERNAL_LIB}) else() set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags_static libprotobuf xxhash cryptopp-static ${EXTERNAL_LIB}) set(DEPS ${DEPS} shlwapi.lib) endif(NOT WIN32) if(WITH_GPU) if(NOT WIN32) if (USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX}) else() if(USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX}) if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() endif() set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX} ) endif() endif() if(WITH_ROCM) if(NOT WIN32) set(DEPS ${DEPS} ${ROCM_LIB}/libamdhip64${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() endif() add_executable(${DEMO_NAME} ${DEMO_NAME}.cpp) find_package(OpenCV REQUIRED) include_directories(${OpenCV_INCLUDE_DIRS}) set(DEPS ${DEPS} ${OpenCV_LIBS}) target_link_libraries(${DEMO_NAME} ${DEPS}) if(WIN32) if(USE_TENSORRT) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}) endif() endif() if(WITH_MKL) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release ) else() add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release ) endif() if(NOT WITH_STATIC_LIB) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) endif() endif()
0
apollo_public_repos/apollo-model-centerpoint/deploy/smoke
apollo_public_repos/apollo-model-centerpoint/deploy/smoke/cpp/compile.sh
#!/bin/bash set +x set -e work_path=$(dirname $(readlink -f $0)) # 1. check paddle_inference exists if [ ! -d "${work_path}/lib/paddle_inference" ]; then echo "Please download paddle_inference lib and move it in ${work_path}/lib" exit 1 fi # 2. check CMakeLists exists if [ ! -f "${work_path}/CMakeLists.txt" ]; then cp -a "${work_path}/lib/CMakeLists.txt" "${work_path}/" fi # 3. compile mkdir -p build cd build rm -rf * DEMO_NAME=infer WITH_MKL=ON WITH_GPU=ON USE_TENSORRT=OFF LIB_DIR=${work_path}/lib/paddle_inference CUDNN_LIB=/usr/lib/x86_64-linux-gnu/ CUDA_LIB=/usr/local/cuda/lib64 TENSORRT_ROOT=/usr/local/TensorRT-6.0.1.5 WITH_ROCM=OFF ROCM_LIB=/opt/rocm/lib cmake .. -DPADDLE_LIB=${LIB_DIR} \ -DWITH_MKL=${WITH_MKL} \ -DDEMO_NAME=${DEMO_NAME} \ -DWITH_GPU=${WITH_GPU} \ -DWITH_STATIC_LIB=OFF \ -DUSE_TENSORRT=${USE_TENSORRT} \ -DWITH_ROCM=${WITH_ROCM} \ -DROCM_LIB=${ROCM_LIB} \ -DCUDNN_LIB=${CUDNN_LIB} \ -DCUDA_LIB=${CUDA_LIB} \ -DTENSORRT_ROOT=${TENSORRT_ROOT} make -j
0
apollo_public_repos/apollo-model-centerpoint/deploy/smoke
apollo_public_repos/apollo-model-centerpoint/deploy/smoke/cpp/infer.cpp
#include <gflags/gflags.h> #include <glog/logging.h> #include <iostream> #include <numeric> #include "opencv2/core.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include "paddle/include/paddle_inference_api.h" using paddle_infer::Config; using paddle_infer::CreatePredictor; using paddle_infer::PrecisionType; using paddle_infer::Predictor; DEFINE_string(model_file, "", "Directory of the inference model."); DEFINE_string(params_file, "", "Directory of the inference model."); DEFINE_string(image, "", "Image path to be predicted."); DEFINE_bool(use_gpu, false, "Whether to use gpu."); DEFINE_bool(use_trt, false, "Whether to use trt."); DEFINE_int32(trt_precision, 0, "Precision type of tensorrt, 0: kFloat32, 1: kInt8, 2: kHalf"); DEFINE_bool(collect_dynamic_shape_info, false, "Whether to collect dynamic shape before using tensorrt"); DEFINE_string(dynamic_shape_file, "dynamic_shape_info.txt", "Path of a dynamic shape file for tensorrt"); void get_image(const std::string &image, float *data) { cv::Mat img = cv::imread(image, cv::IMREAD_COLOR); cv::cvtColor(img, img, cv::COLOR_BGR2RGB); cv::resize(img, img, cv::Size(1280, 384)); // Normalize img.convertTo(img, CV_32F, 1.0 / 255, 0); std::vector<float> mean_values{0.485, 0.456, 0.406}; std::vector<float> std_values{0.229, 0.224, 0.225}; std::vector<cv::Mat> rgbChannels(3); cv::split(img, rgbChannels); for (int i = 0; i < 3; ++i) { rgbChannels[i].convertTo(rgbChannels[i], CV_32FC1, 1 / std_values[i], (0.0 - mean_values[i]) / std_values[i]); } cv::merge(rgbChannels, img); // from hwc to chw int rows = img.rows; int cols = img.cols; int chs = img.channels(); for (int i = 0; i < chs; ++i) { cv::extractChannel( img, cv::Mat(rows, cols, CV_32FC1, data + i * rows * cols), i); } } std::shared_ptr<Predictor> InitPredictor() { Config config; config.SetModel(FLAGS_model_file, FLAGS_params_file); if (FLAGS_use_gpu) { config.EnableUseGpu(1000, 0); } else { config.EnableMKLDNN(); } if (FLAGS_collect_dynamic_shape_info) { config.CollectShapeRangeInfo(FLAGS_dynamic_shape_file); } else if (FLAGS_use_trt) { config.EnableTensorRtEngine( 1 << 30, 1, 5, PrecisionType(FLAGS_trt_precision), false, false); config.EnableTunedTensorRtDynamicShape(FLAGS_dynamic_shape_file, true); } config.SwitchIrOptim(true); config.EnableMemoryOptim(); return CreatePredictor(config); } void run(Predictor *predictor, const std::vector<float> &input_im_data, const std::vector<int> &input_im_shape, const std::vector<float> &input_K_data, const std::vector<int> &input_K_shape, const std::vector<float> &input_ratio_data, const std::vector<int> &input_ratio_shape, std::vector<float> &out_data) { int input_num = std::accumulate(input_im_shape.begin(), input_im_shape.end(), 1, std::multiplies<int>()); auto input_names = predictor->GetInputNames(); auto output_names = predictor->GetOutputNames(); auto input_im_handle = predictor->GetInputHandle(input_names[0]); input_im_handle->Reshape(input_im_shape); input_im_handle->CopyFromCpu(input_im_data.data()); auto input_K_handle = predictor->GetInputHandle(input_names[1]); input_K_handle->Reshape(input_K_shape); input_K_handle->CopyFromCpu(input_K_data.data()); auto input_ratio_handle = predictor->GetInputHandle(input_names[2]); input_ratio_handle->Reshape(input_ratio_shape); input_ratio_handle->CopyFromCpu(input_ratio_data.data()); CHECK(predictor->Run()); auto output_t = predictor->GetOutputHandle(output_names[0]); auto outshape = output_t->shape(); int outsize = std::accumulate(outshape.begin(), outshape.end(), 1, std::multiplies<int>()); out_data.resize(outsize); output_t->CopyToCpu(out_data.data()); } int main(int argc, char *argv[]) { google::ParseCommandLineFlags(&argc, &argv, true); if (FLAGS_model_file == "" || FLAGS_params_file == "" || FLAGS_image == "") { std::cout << "Missing required parameter" << std::endl; std::cout << "Usage: " << std::string(argv[0]) << " --model_file ${MODEL_FILE}" << "--params_file ${PARAMS_FILE} " << "--image ${TEST_IMAGE}" << std::endl; return -1; } auto predictor = InitPredictor(); std::vector<int> input_im_shape = {1, 3, 384, 1280}; std::vector<float> input_im_data(1 * 3 * 384 * 1280); get_image(FLAGS_image, input_im_data.data()); std::vector<int> input_K_shape = {1, 3, 3}; // Listed below are camera intrinsic parameter of the kitti dataset // If the model is trained on other datasets, please replace the relevant data std::vector<float> input_K_data = { 721.53771973, 0., 609.55932617, 0., 721.53771973, 172.85400391, 0, 0, 1}; std::vector<int> input_ratio_shape = {1, 2}; std::vector<float> input_ratio_data(4, 4); std::vector<float> out_data; run(predictor.get(), input_im_data, input_im_shape, input_K_data, input_K_shape, input_ratio_data, input_ratio_shape, out_data); std::vector<std::vector<float>> results; for (int i = 0; i < out_data.size(); i += 14) { // item 1 : class // item 2 : observation angle α // item 3 ~ 6 : box2d x1, y1, x2, y2 // item 7 ~ 9 : box3d h, w, l // item 10 ~ 12 : box3d bottom center x, y, z // item 13 : box3d yaw angle // item 14 : score std::vector<float> vec(out_data.begin() + i, out_data.begin() + i + 14); results.push_back(vec); } for (const auto &res : results) { // Filter predictions with low scores if (res[13] <= 0.25) continue; for (const auto &item : res) { std::cout << item << " "; } std::cout << std::endl; } }
0
apollo_public_repos/apollo-model-centerpoint/deploy/petr
apollo_public_repos/apollo-model-centerpoint/deploy/petr/python/infer.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import cv2 import numpy as np import paddle from paddle import inference def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--model_file", type=str, help="Model filename, Specify this when your model is a combined model.", required=True) parser.add_argument( "--params_file", type=str, help= "Parameter filename, Specify this when your model is a combined model.", required=True) parser.add_argument( '--img_paths', type=str, nargs='+', help='The image paths.', required=True) parser.add_argument("--gpu_id", type=int, default=0, help="GPU card id.") parser.add_argument( "--use_trt", action='store_true', help="Whether to use tensorrt to accelerate when using gpu.") parser.add_argument( "--trt_precision", type=int, default=0, help="Precision type of tensorrt, 0: kFloat32, 1: kHalf.") parser.add_argument( "--trt_use_static", action='store_true', help="Whether to load the tensorrt graph optimization from a disk path." ) parser.add_argument( "--trt_static_dir", type=str, help="Path of a tensorrt graph optimization directory.") parser.add_argument( "--collect_shape_info", action='store_true', help="Whether to collect dynamic shape before using tensorrt.") parser.add_argument( "--dynamic_shape_file", type=str, default="petr_shape_info.txt", help="Path of a dynamic shape file for tensorrt.") parser.add_argument( "--with_timestamp", action='store_true', help="Whether to timestamp(for petrv2).") return parser.parse_args() def load_predictor(model_file, params_file, gpu_id=0, use_trt=False, trt_precision=0, trt_use_static=False, trt_static_dir=None, collect_shape_info=False, dynamic_shape_file=None): """load_predictor initialize the inference engine """ config = inference.Config(model_file, params_file) config.enable_use_gpu(1000, gpu_id) # enable memory optim config.enable_memory_optim() # config.disable_glog_info() config.switch_use_feed_fetch_ops(False) config.switch_ir_optim(True) # create predictor if use_trt: precision_mode = paddle.inference.PrecisionType.Float32 if trt_precision == 1: precision_mode = paddle.inference.PrecisionType.Half config.enable_tensorrt_engine( workspace_size=1 << 20, max_batch_size=1, min_subgraph_size=30, precision_mode=precision_mode, use_static=trt_use_static, use_calib_mode=False) print('collect_shape_info', collect_shape_info) if collect_shape_info: config.collect_shape_range_info(dynamic_shape_file) else: config.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file, True) if trt_use_static: config.set_optim_cache_dir(trt_static_dir) predictor = inference.create_predictor(config) return predictor def imnormalize(img, mean, std, to_rgb=True): """normalize an image with mean and std. """ # cv2 inplace normalization does not accept uint8 img = img.copy().astype(np.float32) mean = np.float64(mean.reshape(1, -1)) stdinv = 1 / np.float64(std.reshape(1, -1)) if to_rgb: cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace cv2.subtract(img, mean, img) # inplace cv2.multiply(img, stdinv, img) # inplace return img def get_resize_crop_shape(img_shape, target_shape): H, W = img_shape fH, fW = target_shape resize = max(fH / H, fW / W) resize_shape = (int(W * resize), int(H * resize)) newW, newH = resize_shape crop_h = int(newH) - fH crop_w = int(max(0, newW - fW) / 2) crop_shape = (crop_h, crop_w, crop_h + fH, crop_w + fW) return resize_shape, crop_shape def get_image(filenames): """ Loads image for a sample Args: idx [int]: Index of the image sample Returns: image [np.ndarray(H, W, 3)]: RGB Image """ img = np.stack([cv2.imread(name) for name in filenames], axis=-1) imgs = [img[..., i] for i in range(img.shape[-1])] new_imgs = [] target_shape = (320, 800) for i in range(len(imgs)): img_shape = imgs[i].shape[:2] resize_shape, crop_shape = get_resize_crop_shape( img_shape, target_shape) img = cv2.resize(imgs[i], resize_shape, cv2.INTER_LINEAR) img = img[crop_shape[0]:crop_shape[2], crop_shape[1]:crop_shape[3], :] new_imgs.append(np.array(img).astype(np.float32)) mean = np.array([103.530, 116.280, 123.675], dtype=np.float32) std = np.array([57.375, 57.120, 58.395], dtype=np.float32) new_imgs = [imnormalize(img, mean, std, False) for img in new_imgs] return np.array(new_imgs).transpose([0, 3, 1, 2])[np.newaxis, ...] def run(predictor, img, with_timestamp): input_names = predictor.get_input_names() input_tensor0 = predictor.get_input_handle(input_names[0]) input_tensor1 = predictor.get_input_handle(input_names[1]) num_cams = 6 if with_timestamp: input_tensor2 = predictor.get_input_handle(input_names[2]) num_cams = 12 img2lidars = [ -1.40307297e-03, 9.07780395e-06, 4.84838307e-01, -5.43047376e-02, -1.40780103e-04, 1.25770375e-05, 1.04126692e+00, 7.67668605e-01, -1.02884378e-05, -1.41007011e-03, 1.02823459e-01, -3.07415128e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, -9.39000631e-04, -7.65239349e-07, 1.14073277e+00, 4.46270645e-01, 1.04998052e-03, 1.91798881e-05, 2.06218868e-01, 7.42717385e-01, 1.48074005e-05, -1.40855671e-03, 7.45946690e-02, -3.16081315e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, -7.0699735e-04, 4.2389297e-07, -5.5183989e-01, -5.3276348e-01, -1.2281288e-03, 2.5626015e-05, 1.0212017e+00, 6.1102939e-01, -2.2421273e-05, -1.4170362e-03, 9.3639769e-02, -3.0863306e-01, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00, 2.2227580e-03, 2.5312484e-06, -9.7261822e-01, 9.0684637e-02, 1.9360810e-04, 2.1347081e-05, -1.0779887e+00, -7.9227984e-01, 4.3742721e-06, -2.2310747e-03, 1.0842450e-01, -2.9406491e-01, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00, 5.97175560e-04, -5.88774265e-06, -1.15893924e+00, -4.49921310e-01, -1.28312141e-03, 3.58297058e-07, 1.48300052e-01, 1.14334166e-01, -2.80917516e-06, -1.41527120e-03, 8.37693438e-02, -2.36765608e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 3.6048229e-04, 3.8333174e-06, 7.9871160e-01, 4.3321830e-01, 1.3671946e-03, 6.7484652e-06, -8.4722507e-01, 1.9411178e-01, 7.5027779e-06, -1.4139183e-03, 8.2083985e-02, -2.4505949e-01, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00 ] if with_timestamp: img2lidars += img2lidars img2lidars = np.array(img2lidars).reshape([num_cams, 4, 4]).astype('float32') input_tensor0.reshape([1, num_cams, 3, 320, 800]) input_tensor0.copy_from_cpu(img) input_tensor1.reshape([num_cams, 4, 4]) input_tensor1.copy_from_cpu(img2lidars) if with_timestamp: timestamp = np.zeros([num_cams]).astype('float32') timestamp[num_cams // 2:] = 1.0 input_tensor2.reshape([1, num_cams]) input_tensor2.copy_from_cpu(timestamp) predictor.run() outs = [] output_names = predictor.get_output_names() for name in output_names: out = predictor.get_output_handle(name) out = out.copy_to_cpu() out = paddle.to_tensor(out) outs.append(out) result = {} result['pred_boxes'] = outs[0] result['pred_scores'] = outs[1] result['pred_labels'] = outs[2] return result def main(args): predictor = load_predictor(args.model_file, args.params_file, args.gpu_id, args.use_trt, args.trt_precision, args.trt_use_static, args.trt_static_dir, args.collect_shape_info, args.dynamic_shape_file) image = get_image(args.img_paths) result = run(predictor, image, args.with_timestamp) for k, v in result.items(): print(k, v.shape, v.dtype) print(result['pred_boxes']) if __name__ == '__main__': args = parse_args() main(args)
0
apollo_public_repos/apollo-model-centerpoint/deploy/petr
apollo_public_repos/apollo-model-centerpoint/deploy/petr/cpp/main.cc
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <gflags/gflags.h> #include <glog/logging.h> #include <time.h> #include <chrono> #include <iostream> #include <numeric> #include <opencv2/opencv.hpp> #include "paddle/include/paddle_inference_api.h" DEFINE_string(model_file, "", "Path of a inference model"); DEFINE_string(params_file, "", "Path of a inference params"); DEFINE_string( image_files, "", "list Path of a image file to be predicted, which split by comma"); DEFINE_int32(gpu_id, 0, "GPU card id"); DEFINE_bool(use_trt, false, "Whether to use tensorrt to accelerate when using gpu"); DEFINE_int32(trt_precision, 0, "Precision type of tensorrt, 0: kFloat32, 1: kHalf"); DEFINE_bool(trt_use_static, false, "Whether to load the tensorrt graph optimization from a disk path"); DEFINE_string(trt_static_dir, "", "Path of a tensorrt graph optimization directory"); DEFINE_bool(collect_shape_info, false, "Whether to collect dynamic shape before using tensorrt"); DEFINE_string(dynamic_shape_file, "petr_shape_info.txt", "Path of a dynamic shape file for tensorrt"); DEFINE_bool(with_timestamp, false, "Whether model with timestamp input(for petrv2)"); using paddle_infer::Config; using paddle_infer::CreatePredictor; using paddle_infer::Predictor; paddle_infer::PrecisionType GetPrecisionType(const std::string &ptype) { if (ptype == "trt_fp32") return paddle_infer::PrecisionType::kFloat32; if (ptype == "trt_fp16") return paddle_infer::PrecisionType::kHalf; return paddle_infer::PrecisionType::kFloat32; } std::shared_ptr<paddle_infer::Predictor> create_predictor( const std::string &model_path, const std::string &params_path, const int gpu_id, const int use_trt, const int trt_precision, const int trt_use_static, const std::string trt_static_dir, const int collect_shape_info, const std::string dynamic_shape_file) { paddle::AnalysisConfig config; config.EnableUseGpu(1000, gpu_id); config.SetModel(model_path, params_path); config.EnableMemoryOptim(); if (use_trt) { paddle::AnalysisConfig::Precision precision; if (trt_precision == 0) { precision = paddle_infer::PrecisionType::kFloat32; } else if (trt_precision == 1) { precision = paddle_infer::PrecisionType::kHalf; } else { LOG(ERROR) << "Tensorrt type can only support 0 or 1, but recieved is" << trt_precision << "\n"; return nullptr; } config.EnableTensorRtEngine(1 << 30, 1, 12, precision, trt_use_static, false); if (dynamic_shape_file == "") { LOG(ERROR) << "dynamic_shape_file should be set, but recieved is " << dynamic_shape_file << "\n"; return nullptr; } if (collect_shape_info) { config.CollectShapeRangeInfo(dynamic_shape_file); } else { config.EnableTunedTensorRtDynamicShape(dynamic_shape_file, true); } if (trt_use_static) { if (trt_static_dir == "") { LOG(ERROR) << "trt_static_dir should be set, but recieved is " << trt_static_dir << "\n"; return nullptr; } config.SetOptimCacheDir(trt_static_dir); } } config.SwitchIrOptim(true); return paddle_infer::CreatePredictor(config); } void normalize(cv::Mat *im, const std::vector<float> &mean, const std::vector<float> &std, float &scale) { if (scale) { (*im).convertTo(*im, CV_32FC3, scale); } for (int h = 0; h < im->rows; h++) { for (int w = 0; w < im->cols; w++) { im->at<cv::Vec3f>(h, w)[0] = (im->at<cv::Vec3f>(h, w)[0] - mean[0]) / std[0]; im->at<cv::Vec3f>(h, w)[1] = (im->at<cv::Vec3f>(h, w)[1] - mean[1]) / std[1]; im->at<cv::Vec3f>(h, w)[2] = (im->at<cv::Vec3f>(h, w)[2] - mean[2]) / std[2]; } } } void mat_to_vec(const cv::Mat *im, float *data) { int rh = im->rows; int rw = im->cols; int rc = im->channels(); for (int i = 0; i < rc; ++i) { cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, data + i * rh * rw), i); } } void run(Predictor *predictor, const std::vector<int> &images_shape, const std::vector<float> &images_data, const std::vector<int> &k_shape, const std::vector<float> &k_data, const std::vector<int> &timestamp_shape, const std::vector<float> &timestamp_data, std::vector<float> *boxes, std::vector<float> *scores, std::vector<int64_t> *labels, const bool with_timestamp) { auto input_names = predictor->GetInputNames(); auto in_tensor0 = predictor->GetInputHandle(input_names[0]); in_tensor0->Reshape(images_shape); in_tensor0->CopyFromCpu(images_data.data()); auto in_tensor1 = predictor->GetInputHandle(input_names[1]); in_tensor1->Reshape(k_shape); in_tensor1->CopyFromCpu(k_data.data()); if (with_timestamp) { auto in_tensor2 = predictor->GetInputHandle(input_names[2]); in_tensor2->Reshape(timestamp_shape); in_tensor2->CopyFromCpu(timestamp_data.data()); } for (int i = 0; i < 1; i++) { auto start_time = std::chrono::steady_clock::now(); CHECK(predictor->Run()); std::cout << "finish run!!!!" << std::endl; auto output_names = predictor->GetOutputNames(); for (size_t i = 0; i != output_names.size(); i++) { auto output = predictor->GetOutputHandle(output_names[i]); std::vector<int> output_shape = output->shape(); int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies<int>()); if (i == 0) { std::cout << "get bbox out size: " << out_num << std::endl; boxes->resize(out_num); output->CopyToCpu(boxes->data()); } else if (i == 1) { std::cout << "get scores out size: " << out_num << std::endl; scores->resize(out_num); output->CopyToCpu(scores->data()); } else if (i == 2) { std::cout << "get labels out size: " << out_num << std::endl; labels->resize(out_num); output->CopyToCpu(labels->data()); std::cout << "finish get labels out size: " << out_num << std::endl; } } // std::cout << "get out: " << i << std::endl; auto end_time = std::chrono::steady_clock::now(); auto tt = std::chrono::duration_cast<std::chrono::nanoseconds>(end_time - start_time) .count() / 1000000.0; LOG(INFO) << "time per file: " << tt << "(ms).\n"; } } void resize(const cv::Mat &img, cv::Mat &resize_img, int resized_h, int resized_w) { cv::resize(img, resize_img, cv::Size(resized_h, resized_w), 0, 0, cv::INTER_LINEAR); } std::vector<std::string> split_by_comma(std::string image_files) { std::vector<std::string> vecs; std::stringstream image_files_ss(image_files); while (image_files_ss.good()) { std::string substr; getline(image_files_ss, substr, ','); vecs.push_back(substr); } return vecs; } int main(int argc, char *argv[]) { google::ParseCommandLineFlags(&argc, &argv, true); if (FLAGS_model_file == "" || FLAGS_params_file == "" || FLAGS_image_files == "") { LOG(INFO) << "Missing required parameter" << "\n"; LOG(INFO) << "Usage: " << std::string(argv[0]) << " --model_file ${MODEL_FILE} " << "--params_file ${PARAMS_FILE} " << "--image_files ${IMAGE_FILES}" << "\n"; return -1; } auto predictor = create_predictor( FLAGS_model_file, FLAGS_params_file, FLAGS_gpu_id, FLAGS_use_trt, FLAGS_trt_precision, FLAGS_trt_use_static, FLAGS_trt_static_dir, FLAGS_collect_shape_info, FLAGS_dynamic_shape_file); if (predictor == nullptr) { return 0; } int num_cams = 6; if (FLAGS_with_timestamp) { num_cams = 12; } std::vector<float> input_data(num_cams * 3 * 320 * 800, 0.0f); std::vector<cv::Mat> imgs; auto filenames = split_by_comma(FLAGS_image_files); for (auto filename : filenames) { cv::Mat img = imread(filename, cv::IMREAD_COLOR); imgs.push_back(img); } std::cout << "imgs size: " << imgs.size() << std::endl; std::vector<cv::Mat> cropped_imgs; std::vector<float> mean{103.530, 116.280, 123.675}; std::vector<float> std{57.375, 57.120, 58.395}; float scale = 1.0f; for (auto img : imgs) { cv::Mat img_resized; resize(img, img_resized, 800, 450); auto crop_img = img_resized(cv::Range(130, 450), cv::Range(0, 800)); normalize(&crop_img, mean, std, scale); cropped_imgs.push_back(crop_img); } for (int i = 0; i < num_cams; i++) { mat_to_vec(&cropped_imgs[i], input_data.data() + i * (3 * 320 * 800)); } std::vector<int> images_shape = {1, num_cams, 3, 320, 800}; std::vector<int> k_shape = {1, num_cams, 4, 4}; std::vector<int> timestamp_shape = {1, num_cams}; /* clang-format off */ std::vector<float> k_data{ -1.40307297e-03, 9.07780395e-06, 4.84838307e-01, -5.43047376e-02, -1.40780103e-04, 1.25770375e-05, 1.04126692e+00, 7.67668605e-01, -1.02884378e-05, -1.41007011e-03, 1.02823459e-01, -3.07415128e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, -9.39000631e-04, -7.65239349e-07, 1.14073277e+00, 4.46270645e-01, 1.04998052e-03, 1.91798881e-05, 2.06218868e-01, 7.42717385e-01, 1.48074005e-05, -1.40855671e-03, 7.45946690e-02, -3.16081315e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, -7.0699735e-04, 4.2389297e-07, -5.5183989e-01, -5.3276348e-01, -1.2281288e-03, 2.5626015e-05, 1.0212017e+00, 6.1102939e-01, -2.2421273e-05, -1.4170362e-03, 9.3639769e-02, -3.0863306e-01, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00, 2.2227580e-03, 2.5312484e-06, -9.7261822e-01, 9.0684637e-02, 1.9360810e-04, 2.1347081e-05, -1.0779887e+00, -7.9227984e-01, 4.3742721e-06, -2.2310747e-03, 1.0842450e-01, -2.9406491e-01, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00, 5.97175560e-04, -5.88774265e-06, -1.15893924e+00, -4.49921310e-01, -1.28312141e-03, 3.58297058e-07, 1.48300052e-01, 1.14334166e-01, -2.80917516e-06, -1.41527120e-03, 8.37693438e-02, -2.36765608e-01, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, 3.6048229e-04, 3.8333174e-06, 7.9871160e-01, 4.3321830e-01, 1.3671946e-03, 6.7484652e-06, -8.4722507e-01, 1.9411178e-01, 7.5027779e-06, -1.4139183e-03, 8.2083985e-02, -2.4505949e-01, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00 }; /* clang-format on */ if (FLAGS_with_timestamp) { for (int i = 0; i < num_cams / 2 * 4 * 4; ++i) { k_data.push_back(k_data[i]); } } std::vector<float> timestamp(num_cams, 0.0f); // petrv2 inference, this is a fake input, you need to input real timestamp // timestampe will only affect Velocity predict. if (FLAGS_with_timestamp) { for (int i = num_cams / 2; i < num_cams; ++i) { timestamp[i] = 1.0f; } } std::vector<float> boxes; std::vector<int64_t> labels; std::vector<float> scores; run(predictor.get(), images_shape, input_data, k_shape, k_data, timestamp_shape, timestamp, &boxes, &scores, &labels, FLAGS_with_timestamp); // boxes 9个数据 std::cout << "boxes" << "\n"; for (auto e : boxes) { LOG(INFO) << e; } // labels 1个数据 std::cout << "labels" << "\n"; for (auto e : labels) { LOG(INFO) << e; } // scores:1个数据 std::cout << "scores" << "\n"; for (auto e : scores) { LOG(INFO) << e; } return 0; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/petr
apollo_public_repos/apollo-model-centerpoint/deploy/petr/cpp/CMakeLists.txt
cmake_minimum_required(VERSION 3.0) project(cpp_inference_demo CXX C) option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON) option(USE_TENSORRT "Compile demo with TensorRT." ON) option(CUSTOM_OPERATOR_FILES "List of file names for custom operators" "") execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpfullversion -dumpversion OUTPUT_VARIABLE GCC_VERSION) string(REGEX MATCHALL "[0-9]+" GCC_VERSION_COMPONENTS ${GCC_VERSION}) list(GET GCC_VERSION_COMPONENTS 0 GCC_MAJOR) list(GET GCC_VERSION_COMPONENTS 1 GCC_MINOR) set(GCC_VERSION "${GCC_MAJOR}.${GCC_MINOR}") if (GCC_VERSION LESS "8.0") set(CMAKE_CXX_FLAGS "-Wl,--no-as-needed") endif() set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") include(external/boost) SET(OPENCV_DIR "" CACHE PATH "Location of libraries") find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR} NO_DEFAULT_PATH) # find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/share/OpenCV NO_DEFAULT_PATH) MESSAGE(opencvdddir ${OpenCV_INCLUDE_DIRS}) include_directories(${OpenCV_INCLUDE_DIRS}) if(WITH_GPU) find_package(CUDA REQUIRED) add_definitions("-DPADDLE_WITH_CUDA") endif() if(NOT WITH_STATIC_LIB) add_definitions("-DPADDLE_WITH_SHARED_LIB") else() # PD_INFER_DECL is mainly used to set the dllimport/dllexport attribute in dynamic library mode. # Set it to empty in static library mode to avoid compilation issues. add_definitions("/DPD_INFER_DECL=") endif() macro(safe_set_static_flag) foreach(flag_var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) if(${flag_var} MATCHES "/MD") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") endif(${flag_var} MATCHES "/MD") endforeach(flag_var) endmacro() if(NOT DEFINED PADDLE_LIB) message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib") endif() if(NOT DEFINED DEMO_NAME) message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name") endif() include_directories("${PADDLE_LIB}/") set(PADDLE_LIB_THIRD_PARTY_PATH "${PADDLE_LIB}/third_party/install/") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/include") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/lib") link_directories("${PADDLE_LIB}/paddle/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib") if (WIN32) add_definitions("/DGOOGLE_GLOG_DLL_DECL=") option(MSVC_STATIC_CRT "use static C Runtime library by default" ON) if (MSVC_STATIC_CRT) if (WITH_MKL) set(FLAG_OPENMP "/openmp") endif() set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") safe_set_static_flag() if (WITH_STATIC_LIB) add_definitions(-DSTATIC_LIB) endif() endif() else() if(WITH_MKL) set(FLAG_OPENMP "-fopenmp") endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 ${FLAG_OPENMP}") endif() if(WITH_GPU) if(NOT WIN32) set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") else() if(CUDA_LIB STREQUAL "") set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\lib\\x64") endif() endif(NOT WIN32) endif() if (USE_TENSORRT AND WITH_GPU) set(TENSORRT_ROOT "" CACHE STRING "The root directory of TensorRT library") if("${TENSORRT_ROOT}" STREQUAL "") message(FATAL_ERROR "The TENSORRT_ROOT is empty, you must assign it a value with CMake command. Such as: -DTENSORRT_ROOT=TENSORRT_ROOT_PATH ") endif() set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include) set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib) endif() if (NOT WIN32) if (USE_TENSORRT AND WITH_GPU) include_directories("${TENSORRT_INCLUDE_DIR}") link_directories("${TENSORRT_LIB_DIR}") endif() endif(NOT WIN32) if(WITH_MKL) set(MATH_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mklml") include_directories("${MATH_LIB_PATH}/include") if(WIN32) set(MATH_LIB ${MATH_LIB_PATH}/lib/mklml${CMAKE_STATIC_LIBRARY_SUFFIX} ${MATH_LIB_PATH}/lib/libiomp5md${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(MATH_LIB ${MATH_LIB_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} ${MATH_LIB_PATH}/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(MKLDNN_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mkldnn") if(EXISTS ${MKLDNN_PATH}) include_directories("${MKLDNN_PATH}/include") if(WIN32) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib) else(WIN32) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) endif(WIN32) endif() else() set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas") include_directories("${OPENBLAS_LIB_PATH}/include/openblas") if(WIN32) set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() endif() if(WITH_STATIC_LIB) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() if(WIN32) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() endif() if (NOT WIN32) if (GCC_VERSION LESS "8.0") set(EXTERNAL_LIB ${EXTERNAL_LIB} "-lssl -lcrypto -lz -lleveldb -lsnappy") endif() set(EXTERNAL_LIB ${EXTERNAL_LIB} "-lrt -ldl -lpthread") set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags protobuf xxhash ${EXTERNAL_LIB}) else() set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags_static libprotobuf xxhash ${EXTERNAL_LIB}) set(DEPS ${DEPS} shlwapi.lib) endif(NOT WIN32) if(WITH_GPU) if(NOT WIN32) if (USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX}) else() if(USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX} ) endif() endif() # cuda_add_library(pd_infer_custom_op ${CUSTOM_OPERATOR_FILES} SHARED) add_executable(${DEMO_NAME} ${DEMO_NAME}.cc) if (GCC_VERSION GREATER_EQUAL "8.0") set(DEPS ${DEPS} libssl.a libcrypto.a libz.a libleveldb.a libsnappy.a) endif() # set(DEPS ${DEPS} boost ${OpenCV_LIBS} pd_infer_custom_op) set(DEPS ${DEPS} boost ${OpenCV_LIBS}) if(WIN32) if(USE_TENSORRT) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) endif() if(WITH_MKL) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release ) else() add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release ) endif() if(NOT WITH_STATIC_LIB) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_fluid.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) endif() endif() target_link_libraries(${DEMO_NAME} ${DEPS})
0
apollo_public_repos/apollo-model-centerpoint/deploy/petr
apollo_public_repos/apollo-model-centerpoint/deploy/petr/cpp/compile.sh
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. mkdir -p build cd build rm -rf * DEMO_NAME=main WITH_MKL=ON WITH_GPU=ON USE_TENSORRT=ON # paddle inference dir LIB_DIR=/workspace/codes/Paddle3D02/paddle_inference OPENCV_DIR=/workspace/codes/opencv/build/ # OPENCV_DIR=/ssd1/wangna11/caddn_test/opencv-3.4.7/build/ CUDNN_LIB=/usr/local/x86_64-pc-linux-gnu/ CUDA_LIB=/usr/local/cuda/lib64 TENSORRT_ROOT=/workspace/trt/TensorRT-8.2.3.0 CUSTOM_OPERATOR_FILES="" cmake .. -DPADDLE_LIB=${LIB_DIR} \ -DOPENCV_DIR=${OPENCV_DIR} \ -DWITH_MKL=${WITH_MKL} \ -DDEMO_NAME=${DEMO_NAME} \ -DWITH_GPU=${WITH_GPU} \ -DWITH_STATIC_LIB=OFF \ -DUSE_TENSORRT=${USE_TENSORRT} \ -DCUDNN_LIB=${CUDNN_LIB} \ -DCUDA_LIB=${CUDA_LIB} \ -DTENSORRT_ROOT=${TENSORRT_ROOT} \ -DCUSTOM_OPERATOR_FILES=${CUSTOM_OPERATOR_FILES} make -j
0
apollo_public_repos/apollo-model-centerpoint/deploy/petr/cpp/cmake
apollo_public_repos/apollo-model-centerpoint/deploy/petr/cpp/cmake/external/boost.cmake
include(ExternalProject) set(BOOST_PROJECT "extern_boost") # To release PaddlePaddle as a pip package, we have to follow the # manylinux1 standard, which features as old Linux kernels and # compilers as possible and recommends CentOS 5. Indeed, the earliest # CentOS version that works with NVIDIA CUDA is CentOS 6. And a new # version of boost, say, 1.66.0, doesn't build on CentOS 6. We # checked that the devtools package of CentOS 6 installs boost 1.41.0. # So we use 1.41.0 here. set(BOOST_VER "1.41.0") set(BOOST_TAR "boost_1_41_0" CACHE STRING "" FORCE) set(BOOST_URL "http://paddlepaddledeps.bj.bcebos.com/${BOOST_TAR}.tar.gz" CACHE STRING "" FORCE) MESSAGE(STATUS "BOOST_TAR: ${BOOST_TAR}, BOOST_URL: ${BOOST_URL}") set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost) set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}") set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}" CACHE PATH "boost include directory." FORCE) set_directory_properties(PROPERTIES CLEAN_NO_CUSTOM 1) include_directories(${BOOST_INCLUDE_DIR}) ExternalProject_Add( ${BOOST_PROJECT} ${EXTERNAL_PROJECT_LOG_ARGS} DOWNLOAD_DIR ${BOOST_DOWNLOAD_DIR} URL ${BOOST_URL} DOWNLOAD_NO_PROGRESS 1 PREFIX ${BOOST_SOURCES_DIR} CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" UPDATE_COMMAND "" ) if (${CMAKE_VERSION} VERSION_LESS "3.3.0" OR NOT WIN32) set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/boost_dummy.c) file(WRITE ${dummyfile} "const char *dummy = \"${dummyfile}\";") add_library(boost STATIC ${dummyfile}) else() add_library(boost INTERFACE) endif() add_dependencies(boost ${BOOST_PROJECT}) set(Boost_INCLUDE_DIR ${BOOST_INCLUDE_DIR})
0
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/python/infer.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import cv2 import numpy as np import paddle from paddle.inference import Config, create_predictor from paddle3d.ops.centerpoint_postprocess import centerpoint_postprocess from paddle3d.ops.voxelize import hard_voxelize def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--model_file", type=str, help="Model filename, Specify this when your model is a combined model.", required=True) parser.add_argument( "--params_file", type=str, help= "Parameter filename, Specify this when your model is a combined model.", required=True) parser.add_argument( '--lidar_file', type=str, help='The lidar path.', required=True) parser.add_argument( "--num_point_dim", type=int, default=4, help="Dimension of a point in the lidar file.") parser.add_argument( "--use_timelag", type=int, default=0, help="Whether to insert timelag to each point.") parser.add_argument("--gpu_id", type=int, default=0, help="GPU card id.") parser.add_argument( "--use_trt", type=int, default=0, help="Whether to use tensorrt to accelerate when using gpu.") parser.add_argument( "--trt_precision", type=int, default=0, help="Precision type of tensorrt, 0: kFloat32, 1: kHalf., 2: Int8") parser.add_argument( "--trt_use_static", type=int, default=0, help="Whether to load the tensorrt graph optimization from a disk path." ) parser.add_argument( "--trt_static_dir", type=str, help="Path of a tensorrt graph optimization directory.") parser.add_argument( "--collect_shape_info", type=int, default=0, help="Whether to collect dynamic shape before using tensorrt.") parser.add_argument( "--dynamic_shape_file", type=str, default="", help="Path of a dynamic shape file for tensorrt.") return parser.parse_args() def read_point(file_path, num_point_dim): points = np.fromfile(file_path, np.float32).reshape(-1, num_point_dim) points = points[:, :4] return points def insert_time_to_points(points): time_lag = np.zeros((points.shape[0], 1), dtype=points.dtype) points = np.hstack([points, time_lag]) return points def preprocess(file_path, num_point_dim, use_timelag): points = read_point(file_path, num_point_dim) if use_timelag: points = insert_time_to_points(points) return points def init_predictor(model_file, params_file, gpu_id=0, use_trt=False, trt_precision=0, trt_use_static=False, trt_static_dir=None, collect_shape_info=False, dynamic_shape_file=None): config = Config(model_file, params_file) config.enable_memory_optim() config.enable_use_gpu(1000, gpu_id) if use_trt: precision_mode = paddle.inference.PrecisionType.Float32 if trt_precision == 1: precision_mode = paddle.inference.PrecisionType.Half elif trt_precision == 2: precision_mode = paddle.inference.PrecisionType.Int8 config.enable_tensorrt_engine( workspace_size=1 << 30, max_batch_size=1, min_subgraph_size=3, precision_mode=precision_mode, use_static=trt_use_static, use_calib_mode=False) if collect_shape_info: config.collect_shape_range_info(dynamic_shape_file) else: config.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file, True) if trt_use_static: config.set_optim_cache_dir(trt_static_dir) predictor = create_predictor(config) return predictor def parse_result(box3d_lidar, label_preds, scores): num_bbox3d, bbox3d_dims = box3d_lidar.shape for box_idx in range(num_bbox3d): # filter fake results: score = -1 if scores[box_idx] < 0: continue if bbox3d_dims == 9: print( "Score: {} Label: {} Box(x_c, y_c, z_c, w, l, h, vec_x, vec_y, -rot): {} {} {} {} {} {} {} {} {}" .format(scores[box_idx], label_preds[box_idx], box3d_lidar[box_idx, 0], box3d_lidar[box_idx, 1], box3d_lidar[box_idx, 2], box3d_lidar[box_idx, 3], box3d_lidar[box_idx, 4], box3d_lidar[box_idx, 5], box3d_lidar[box_idx, 6], box3d_lidar[box_idx, 7], box3d_lidar[box_idx, 8])) elif bbox3d_dims == 7: print( "Score: {} Label: {} Box(x_c, y_c, z_c, w, l, h, -rot): {} {} {} {} {} {} {}" .format(scores[box_idx], label_preds[box_idx], box3d_lidar[box_idx, 0], box3d_lidar[box_idx, 1], box3d_lidar[box_idx, 2], box3d_lidar[box_idx, 3], box3d_lidar[box_idx, 4], box3d_lidar[box_idx, 5], box3d_lidar[box_idx, 6])) def run(predictor, points): # copy img data to input tensor input_names = predictor.get_input_names() for i, name in enumerate(input_names): if name == "data": input_tensor = predictor.get_input_handle(name) input_tensor.reshape(points.shape) input_tensor.copy_from_cpu(points.copy()) # do the inference predictor.run() results = [] # get out data from output tensor output_names = predictor.get_output_names() for i, name in enumerate(output_names): output_tensor = predictor.get_output_handle(name) if i == 0: box3d_lidar = output_tensor.copy_to_cpu() elif i == 1: label_preds = output_tensor.copy_to_cpu() elif i == 2: scores = output_tensor.copy_to_cpu() return box3d_lidar, label_preds, scores def main(args): predictor = init_predictor(args.model_file, args.params_file, args.gpu_id, args.use_trt, args.trt_precision, args.trt_use_static, args.trt_static_dir, args.collect_shape_info, args.dynamic_shape_file) points = preprocess(args.lidar_file, args.num_point_dim, args.use_timelag) box3d_lidar, label_preds, scores = run(predictor, points) parse_result(box3d_lidar, label_preds, scores) if __name__ == '__main__': args = parse_args() main(args)
0
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp/main.cc
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gflags/gflags.h> #include <glog/logging.h> #include <chrono> #include <cmath> #include <fstream> #include <iostream> #include <numeric> #include <string> #include "paddle/include/paddle_inference_api.h" using paddle_infer::Config; using paddle_infer::CreatePredictor; using paddle_infer::Predictor; DEFINE_string(model_file, "", "Path of a inference model"); DEFINE_string(params_file, "", "Path of a inference params"); DEFINE_string(lidar_file, "", "Path of a lidar file to be predicted"); DEFINE_int32(num_point_dim, 4, "Dimension of a point in the lidar file"); DEFINE_int32(with_timelag, 0, "Whether timelag is the 5-th dimension of each point feature, " "like: x, y, z, intensive, timelag"); DEFINE_int32(gpu_id, 0, "GPU card id"); DEFINE_int32(use_trt, 0, "Whether to use tensorrt to accelerate when using gpu"); DEFINE_int32(trt_precision, 0, "Precision type of tensorrt, 0: kFloat32, 1: kHalf"); DEFINE_int32( trt_use_static, 0, "Whether to load the tensorrt graph optimization from a disk path"); DEFINE_string(trt_static_dir, "", "Path of a tensorrt graph optimization directory"); DEFINE_int32(collect_shape_info, 0, "Whether to collect dynamic shape before using tensorrt"); DEFINE_string(dynamic_shape_file, "", "Path of a dynamic shape file for tensorrt"); bool read_point(const std::string &file_path, const int num_point_dim, void **buffer, int *num_points) { std::ifstream file_in(file_path, std::ios::in | std::ios::binary); if (num_point_dim < 4) { LOG(ERROR) << "Point dimension must not be less than 4, but received " << "num_point_dim is " << num_point_dim << ".\n"; } if (!file_in) { LOG(ERROR) << "Failed to read file: " << file_path << "\n"; return false; } std::streampos file_size; file_in.seekg(0, std::ios::end); file_size = file_in.tellg(); file_in.seekg(0, std::ios::beg); *buffer = malloc(file_size); if (*buffer == nullptr) { LOG(ERROR) << "Failed to malloc memory of size: " << file_size << "\n"; return false; } file_in.read(reinterpret_cast<char *>(*buffer), file_size); file_in.close(); if (file_size / sizeof(float) % num_point_dim != 0) { LOG(ERROR) << "Loaded file size (" << file_size << ") is not evenly divisible by num_point_dim (" << num_point_dim << ")\n"; return false; } *num_points = file_size / sizeof(float) / num_point_dim; return true; } bool insert_time_to_points(const int num_points, const int num_point_dim, float *points) { for (int i = 0; i < num_points; ++i) { *(points + i * num_point_dim + 4) = 0.; } return true; } bool preprocess(const std::string &file_path, const int num_point_dim, const int with_timelag, std::vector<int> *points_shape, std::vector<float> *points_data) { void *buffer = nullptr; int num_points; if (!read_point(file_path, num_point_dim, &buffer, &num_points)) { return false; } float *points = static_cast<float *>(buffer); if (!with_timelag && num_point_dim == 5 || num_point_dim > 5) { // the origin points dim is [x, y, z, intensity, ring_index], // but we need [x, y, z, intensity] and the sweep time index should be // inserted into points // so these two steps will be done in function insert_time_to_points insert_time_to_points(num_points, num_point_dim, points); } points_data->assign(points, points + num_points * num_point_dim); points_shape->push_back(num_points); points_shape->push_back(num_point_dim); free(points); return true; } std::shared_ptr<paddle_infer::Predictor> create_predictor( const std::string &model_path, const std::string &params_path, const int gpu_id, const int use_trt, const int trt_precision, const int trt_use_static, const std::string trt_static_dir, const int collect_shape_info, const std::string dynamic_shape_file) { paddle::AnalysisConfig config; config.EnableUseGpu(1000, gpu_id); config.SetModel(model_path, params_path); if (use_trt) { paddle::AnalysisConfig::Precision precision; if (trt_precision == 0) { precision = paddle_infer::PrecisionType::kFloat32; } else if (trt_precision == 1) { precision = paddle_infer::PrecisionType::kHalf; } else { LOG(ERROR) << "Tensorrt type can only support 0 or 1, but received is" << trt_precision << "\n"; return nullptr; } config.EnableTensorRtEngine(1 << 30, 1, 3, precision, trt_use_static, false); if (dynamic_shape_file == "") { LOG(ERROR) << "dynamic_shape_file should be set, but received is " << dynamic_shape_file << "\n"; return nullptr; } if (collect_shape_info) { config.CollectShapeRangeInfo(dynamic_shape_file); } else { config.EnableTunedTensorRtDynamicShape(dynamic_shape_file, true); } if (trt_use_static) { if (trt_static_dir == "") { LOG(ERROR) << "trt_static_dir should be set, but received is " << trt_static_dir << "\n"; return nullptr; } config.SetOptimCacheDir(trt_static_dir); } } config.SwitchIrOptim(true); return paddle_infer::CreatePredictor(config); } void run(Predictor *predictor, const std::vector<int> &points_shape, const std::vector<float> &points_data, std::vector<float> *box3d_lidar, std::vector<int64_t> *label_preds, std::vector<float> *scores) { auto input_names = predictor->GetInputNames(); for (const auto &tensor_name : input_names) { auto in_tensor = predictor->GetInputHandle(tensor_name); if (tensor_name == "data") { in_tensor->Reshape(points_shape); in_tensor->CopyFromCpu(points_data.data()); } } CHECK(predictor->Run()); auto output_names = predictor->GetOutputNames(); for (size_t i = 0; i != output_names.size(); i++) { auto output = predictor->GetOutputHandle(output_names[i]); std::vector<int> output_shape = output->shape(); int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies<int>()); if (i == 0) { box3d_lidar->resize(out_num); output->CopyToCpu(box3d_lidar->data()); } else if (i == 1) { label_preds->resize(out_num); output->CopyToCpu(label_preds->data()); } else if (i == 2) { scores->resize(out_num); output->CopyToCpu(scores->data()); } } } bool parse_result(const std::vector<float> &box3d_lidar, const std::vector<int64_t> &label_preds, const std::vector<float> &scores) { int num_bbox3d = scores.size(); int bbox3d_dims = box3d_lidar.size() / num_bbox3d; for (size_t box_idx = 0; box_idx != num_bbox3d; ++box_idx) { // filter fake results: score = -1 if (scores[box_idx] < 0) { continue; } LOG(INFO) << "Score: " << scores[box_idx] << " Label: " << label_preds[box_idx] << " "; if (bbox3d_dims == 9) { LOG(INFO) << "Box (x_c, y_c, z_c, w, l, h, vec_x, vec_y, -rot): " << box3d_lidar[box_idx * 9 + 0] << " " << box3d_lidar[box_idx * 9 + 1] << " " << box3d_lidar[box_idx * 9 + 2] << " " << box3d_lidar[box_idx * 9 + 3] << " " << box3d_lidar[box_idx * 9 + 4] << " " << box3d_lidar[box_idx * 9 + 5] << " " << box3d_lidar[box_idx * 9 + 6] << " " << box3d_lidar[box_idx * 9 + 7] << " " << box3d_lidar[box_idx * 9 + 8] << "\n"; } else if (bbox3d_dims == 7) { LOG(INFO) << "Box (x_c, y_c, z_c, w, l, h, -rot): " << box3d_lidar[box_idx * 7 + 0] << " " << box3d_lidar[box_idx * 7 + 1] << " " << box3d_lidar[box_idx * 7 + 2] << " " << box3d_lidar[box_idx * 7 + 3] << " " << box3d_lidar[box_idx * 7 + 4] << " " << box3d_lidar[box_idx * 7 + 5] << " " << box3d_lidar[box_idx * 7 + 6] << "\n"; } } return true; } int main(int argc, char *argv[]) { google::ParseCommandLineFlags(&argc, &argv, true); if (FLAGS_model_file == "" || FLAGS_params_file == "" || FLAGS_lidar_file == "") { LOG(INFO) << "Missing required parameter" << "\n"; LOG(INFO) << "Usage: " << std::string(argv[0]) << " --model_file ${MODEL_FILE} " << "--params_file ${PARAMS_FILE} " << "--lidar_file ${LIDAR_FILE}" << "\n"; return -1; } auto predictor = create_predictor( FLAGS_model_file, FLAGS_params_file, FLAGS_gpu_id, FLAGS_use_trt, FLAGS_trt_precision, FLAGS_trt_use_static, FLAGS_trt_static_dir, FLAGS_collect_shape_info, FLAGS_dynamic_shape_file); if (predictor == nullptr) { return 0; } std::vector<int> points_shape; std::vector<float> points_data; if (!preprocess(FLAGS_lidar_file, FLAGS_num_point_dim, FLAGS_with_timelag, &points_shape, &points_data)) { LOG(ERROR) << "Failed to preprocess!\n"; return 0; } std::vector<float> box3d_lidar; std::vector<int64_t> label_preds; std::vector<float> scores; run(predictor.get(), points_shape, points_data, &box3d_lidar, &label_preds, &scores); parse_result(box3d_lidar, label_preds, scores); return 0; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp/CMakeLists.txt
cmake_minimum_required(VERSION 3.0) project(cpp_inference_demo CXX C) option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON) option(USE_TENSORRT "Compile demo with TensorRT." ON) option(CUSTOM_OPERATOR_FILES "List of file names for custom operators" "") execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpfullversion -dumpversion OUTPUT_VARIABLE GCC_VERSION) string(REGEX MATCHALL "[0-9]+" GCC_VERSION_COMPONENTS ${GCC_VERSION}) list(GET GCC_VERSION_COMPONENTS 0 GCC_MAJOR) list(GET GCC_VERSION_COMPONENTS 1 GCC_MINOR) set(GCC_VERSION "${GCC_MAJOR}.${GCC_MINOR}") if (GCC_VERSION LESS "8.0") set(CMAKE_CXX_FLAGS "-Wl,--no-as-needed") endif() set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") include(external/boost) if(WITH_GPU) find_package(CUDA REQUIRED) add_definitions("-DPADDLE_WITH_CUDA") endif() if(NOT WITH_STATIC_LIB) add_definitions("-DPADDLE_WITH_SHARED_LIB") else() # PD_INFER_DECL is mainly used to set the dllimport/dllexport attribute in dynamic library mode. # Set it to empty in static library mode to avoid compilation issues. add_definitions("/DPD_INFER_DECL=") endif() macro(safe_set_static_flag) foreach(flag_var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) if(${flag_var} MATCHES "/MD") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") endif(${flag_var} MATCHES "/MD") endforeach(flag_var) endmacro() if(NOT DEFINED PADDLE_LIB) message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib") endif() if(NOT DEFINED DEMO_NAME) message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name") endif() include_directories("${PADDLE_LIB}/") set(PADDLE_LIB_THIRD_PARTY_PATH "${PADDLE_LIB}/third_party/install/") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/include") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/lib") link_directories("${PADDLE_LIB}/paddle/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib") if (WIN32) add_definitions("/DGOOGLE_GLOG_DLL_DECL=") option(MSVC_STATIC_CRT "use static C Runtime library by default" ON) if (MSVC_STATIC_CRT) if (WITH_MKL) set(FLAG_OPENMP "/openmp") endif() set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") safe_set_static_flag() if (WITH_STATIC_LIB) add_definitions(-DSTATIC_LIB) endif() endif() else() if(WITH_MKL) set(FLAG_OPENMP "-fopenmp") endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 ${FLAG_OPENMP}") endif() if(WITH_GPU) if(NOT WIN32) set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") else() if(CUDA_LIB STREQUAL "") set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\lib\\x64") endif() endif(NOT WIN32) endif() if (USE_TENSORRT AND WITH_GPU) set(TENSORRT_ROOT "" CACHE STRING "The root directory of TensorRT library") if("${TENSORRT_ROOT}" STREQUAL "") message(FATAL_ERROR "The TENSORRT_ROOT is empty, you must assign it a value with CMake command. Such as: -DTENSORRT_ROOT=TENSORRT_ROOT_PATH ") endif() set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include) set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib) endif() if (NOT WIN32) if (USE_TENSORRT AND WITH_GPU) include_directories("${TENSORRT_INCLUDE_DIR}") link_directories("${TENSORRT_LIB_DIR}") endif() endif(NOT WIN32) if(WITH_MKL) set(MATH_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mklml") include_directories("${MATH_LIB_PATH}/include") if(WIN32) set(MATH_LIB ${MATH_LIB_PATH}/lib/mklml${CMAKE_STATIC_LIBRARY_SUFFIX} ${MATH_LIB_PATH}/lib/libiomp5md${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(MATH_LIB ${MATH_LIB_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} ${MATH_LIB_PATH}/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(MKLDNN_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mkldnn") if(EXISTS ${MKLDNN_PATH}) include_directories("${MKLDNN_PATH}/include") if(WIN32) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib) else(WIN32) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) endif(WIN32) endif() else() set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas") include_directories("${OPENBLAS_LIB_PATH}/include/openblas") if(WIN32) set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() endif() if(WITH_STATIC_LIB) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() if(WIN32) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() endif() if (NOT WIN32) if (GCC_VERSION LESS "8.0") set(EXTERNAL_LIB ${EXTERNAL_LIB} "-lssl -lcrypto -lz -lleveldb -lsnappy") endif() set(EXTERNAL_LIB ${EXTERNAL_LIB} "-lrt -ldl -lpthread") set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags protobuf xxhash ${EXTERNAL_LIB}) else() set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags_static libprotobuf xxhash ${EXTERNAL_LIB}) set(DEPS ${DEPS} shlwapi.lib) endif(NOT WIN32) if(WITH_GPU) if(NOT WIN32) if (USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX}) else() if(USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX} ) endif() endif() cuda_add_library(pd_infer_custom_op ${CUSTOM_OPERATOR_FILES} SHARED) add_executable(${DEMO_NAME} ${DEMO_NAME}.cc) if (GCC_VERSION GREATER_EQUAL "8.0") set(DEPS ${DEPS} libssl.a libcrypto.a libz.a libleveldb.a libsnappy.a) endif() set(DEPS ${DEPS} boost pd_infer_custom_op)# libssl.a libcrypto.a libz.a libleveldb.a libsnappy.a) if(WIN32) if(USE_TENSORRT) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) endif() if(WITH_MKL) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release ) else() add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release ) endif() if(NOT WITH_STATIC_LIB) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_fluid.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) endif() endif() target_link_libraries(${DEMO_NAME} ${DEPS})
0
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp/compile.sh
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. mkdir -p build cd build rm -rf * DEMO_NAME=main WITH_MKL=ON WITH_GPU=ON USE_TENSORRT=ON LIB_DIR=/centerpoint/two_three/Paddle/build_apollo/paddle_inference_install_dir/ CUDNN_LIB=/usr/lib/x86_64-linux-gnu CUDA_LIB=/usr/local/cuda/lib64 TENSORRT_ROOT=/centerpoint/two_three/Paddle/TensorRT-8.2.5.1 CUSTOM_OPERATOR_FILES="custom_ops/voxelize_op.cu;custom_ops/voxelize_op.cc;custom_ops/iou3d_nms_kernel.cu;custom_ops/postprocess.cc;custom_ops/postprocess.cu" cmake .. -DPADDLE_LIB=${LIB_DIR} \ -DWITH_MKL=${WITH_MKL} \ -DDEMO_NAME=${DEMO_NAME} \ -DWITH_GPU=${WITH_GPU} \ -DWITH_STATIC_LIB=OFF \ -DUSE_TENSORRT=${USE_TENSORRT} \ -DCUDNN_LIB=${CUDNN_LIB} \ -DCUDA_LIB=${CUDA_LIB} \ -DTENSORRT_ROOT=${TENSORRT_ROOT} \ -DCUSTOM_OPERATOR_FILES=${CUSTOM_OPERATOR_FILES} make -j
0
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp/custom_ops/iou3d_nms_kernel.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include <stdio.h> #define THREADS_PER_BLOCK 16 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) // #define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(int64_t) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y) { x = _x, y = _y; } __device__ void set(float _x, float _y) { x = _x; y = _y; } __device__ Point operator+(const Point &b) const { return Point(x + b.x, y + b.y); } __device__ Point operator-(const Point &b) const { return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b) { return a.x * b.y - a.y * b.x; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0) { return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2) { int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) && min(q1.x, q2.x) <= max(p1.x, p2.x) && min(p1.y, p2.y) <= max(q1.y, q2.y) && min(q1.y, q2.y) <= max(p1.y, p2.y); return ret; } __device__ inline int check_in_box2d(const float *box, const Point &p) { // params: (7) [x, y, z, dx, dy, dz, heading] const float MARGIN = 1e-2; float center_x = box[0], center_y = box[1]; // rotate the point in the opposite direction of box float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point *ans) { // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if (fabs(s5 - s1) > EPS) { ans->x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans->y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else { float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans->x = (b0 * c1 - b1 * c0) / D; ans->y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point *p) { float new_x = (p->x - center.x) * angle_cos + (p->y - center.y) * (-angle_sin) + center.x; float new_y = (p->x - center.x) * angle_sin + (p->y - center.y) * angle_cos + center.y; p->set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center) { return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float box_overlap(const float *box_a, const float *box_b) { // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float a_angle = box_a[6], b_angle = box_b[6]; float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; Point center_a(box_a[0], box_a[1]); Point center_b(box_b[0], box_b[1]); #ifdef DEBUG printf( "a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle); printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y); #endif Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++) { #ifdef DEBUG printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners + k); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners + k); #ifdef DEBUG printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points + cnt); if (flag) { poly_center = poly_center + cross_points[cnt]; cnt++; #ifdef DEBUG printf( "Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), " "b(%.3f, %.3f)->(%.3f, %.3f) \n", cross_points[cnt - 1].x, cross_points[cnt - 1].y, box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x, box_a_corners[i + 1].y, box_b_corners[i].x, box_b_corners[i].y, box_b_corners[i + 1].x, box_b_corners[i + 1].y); #endif } } } // check corners for (int k = 0; k < 4; k++) { if (check_in_box2d(box_a, box_b_corners[k])) { poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; #ifdef DEBUG printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } if (check_in_box2d(box_b, box_a_corners[k])) { poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; #ifdef DEBUG printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++) { for (int i = 0; i < cnt - j - 1; i++) { if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) { temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } #ifdef DEBUG printf("cnt=%d\n", cnt); for (int i = 0; i < cnt; i++) { printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y); } #endif // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++) { area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b) { // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float sa = box_a[3] * box_a[4]; float sb = box_b[3] * box_b[4]; float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } __global__ void nms_kernel(const int num_bboxes, const int num_bboxes_for_nms, const float nms_overlap_thresh, const int decode_bboxes_dims, const float *bboxes, const int *index, const int64_t *sorted_index, int64_t *mask) { // params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] // params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(num_bboxes_for_nms - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(num_bboxes_for_nms - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { int box_idx = index[sorted_index[THREADS_PER_BLOCK_NMS * col_start + threadIdx.x]]; block_boxes[threadIdx.x * 7 + 0] = bboxes[box_idx * decode_bboxes_dims]; block_boxes[threadIdx.x * 7 + 1] = bboxes[box_idx * decode_bboxes_dims + 1]; block_boxes[threadIdx.x * 7 + 2] = bboxes[box_idx * decode_bboxes_dims + 2]; block_boxes[threadIdx.x * 7 + 3] = bboxes[box_idx * decode_bboxes_dims + 4]; block_boxes[threadIdx.x * 7 + 4] = bboxes[box_idx * decode_bboxes_dims + 3]; block_boxes[threadIdx.x * 7 + 5] = bboxes[box_idx * decode_bboxes_dims + 5]; block_boxes[threadIdx.x * 7 + 6] = -bboxes[box_idx * decode_bboxes_dims + decode_bboxes_dims - 1] - 3.141592653589793 / 2; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const int act_box_idx = index[sorted_index[cur_box_idx]]; float cur_box[7]; cur_box[0] = bboxes[act_box_idx * decode_bboxes_dims]; cur_box[1] = bboxes[act_box_idx * decode_bboxes_dims + 1]; cur_box[2] = bboxes[act_box_idx * decode_bboxes_dims + 2]; cur_box[3] = bboxes[act_box_idx * decode_bboxes_dims + 4]; cur_box[4] = bboxes[act_box_idx * decode_bboxes_dims + 3]; cur_box[5] = bboxes[act_box_idx * decode_bboxes_dims + 5]; cur_box[6] = -bboxes[act_box_idx * decode_bboxes_dims + decode_bboxes_dims - 1] - 3.141592653589793 / 2; int i = 0; int64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(num_bboxes_for_nms, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } void NmsLauncher(const cudaStream_t &stream, const float *bboxes, const int *index, const int64_t *sorted_index, const int num_bboxes, const int num_bboxes_for_nms, const float nms_overlap_thresh, const int decode_bboxes_dims, int64_t *mask) { dim3 blocks(DIVUP(num_bboxes_for_nms, THREADS_PER_BLOCK_NMS), DIVUP(num_bboxes_for_nms, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); nms_kernel<<<blocks, threads, 0, stream>>>( num_bboxes, num_bboxes_for_nms, nms_overlap_thresh, decode_bboxes_dims, bboxes, index, sorted_index, mask); }
0
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp/custom_ops/postprocess.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/include/experimental/ext_all.h" #define CHECK_INPUT_CUDA(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") #define CHECK_INPUT_BATCHSIZE(x) \ PD_CHECK(x.shape()[0] == 1, #x " batch size must be 1.") #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) const int THREADS_PER_BLOCK_NMS = sizeof(int64_t) * 8; void NmsLauncher(const cudaStream_t &stream, const float *bboxes, const int *index, const int64_t *sorted_index, const int num_bboxes, const int num_bboxes_for_nms, const float nms_overlap_thresh, const int decode_bboxes_dims, int64_t *mask); __global__ void decode_kernel( const float *score, const float *reg, const float *height, const float *dim, const float *vel, const float *rot, const float score_threshold, const int feat_w, const float down_ratio, const float voxel_size_x, const float voxel_size_y, const float point_cloud_range_x_min, const float point_cloud_range_y_min, const float post_center_range_x_min, const float post_center_range_y_min, const float post_center_range_z_min, const float post_center_range_x_max, const float post_center_range_y_max, const float post_center_range_z_max, const int num_bboxes, const bool with_velocity, const int decode_bboxes_dims, float *bboxes, bool *mask, int *score_idx) { int box_idx = blockIdx.x * blockDim.x + threadIdx.x; if (box_idx == num_bboxes || box_idx > num_bboxes) { return; } const int xs = box_idx % feat_w; const int ys = box_idx / feat_w; float x = reg[box_idx]; float y = reg[box_idx + num_bboxes]; float z = height[box_idx]; bboxes[box_idx * decode_bboxes_dims] = (x + xs) * down_ratio * voxel_size_x + point_cloud_range_x_min; bboxes[box_idx * decode_bboxes_dims + 1] = (y + ys) * down_ratio * voxel_size_y + point_cloud_range_y_min; bboxes[box_idx * decode_bboxes_dims + 2] = z; bboxes[box_idx * decode_bboxes_dims + 3] = dim[box_idx]; bboxes[box_idx * decode_bboxes_dims + 4] = dim[box_idx + num_bboxes]; bboxes[box_idx * decode_bboxes_dims + 5] = dim[box_idx + 2 * num_bboxes]; if (with_velocity) { bboxes[box_idx * decode_bboxes_dims + 6] = vel[box_idx]; bboxes[box_idx * decode_bboxes_dims + 7] = vel[box_idx + num_bboxes]; bboxes[box_idx * decode_bboxes_dims + 8] = atan2f(rot[box_idx], rot[box_idx + num_bboxes]); } else { bboxes[box_idx * decode_bboxes_dims + 6] = atan2f(rot[box_idx], rot[box_idx + num_bboxes]); } if (score[box_idx] > score_threshold && x <= post_center_range_x_max && y <= post_center_range_y_max && z <= post_center_range_z_max && x >= post_center_range_x_min && y >= post_center_range_y_min && z >= post_center_range_z_min) { mask[box_idx] = true; } score_idx[box_idx] = box_idx; } void DecodeLauncher( const cudaStream_t &stream, const float *score, const float *reg, const float *height, const float *dim, const float *vel, const float *rot, const float score_threshold, const int feat_w, const float down_ratio, const float voxel_size_x, const float voxel_size_y, const float point_cloud_range_x_min, const float point_cloud_range_y_min, const float post_center_range_x_min, const float post_center_range_y_min, const float post_center_range_z_min, const float post_center_range_x_max, const float post_center_range_y_max, const float post_center_range_z_max, const int num_bboxes, const bool with_velocity, const int decode_bboxes_dims, float *bboxes, bool *mask, int *score_idx) { dim3 blocks(DIVUP(num_bboxes, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); decode_kernel<<<blocks, threads, 0, stream>>>( score, reg, height, dim, vel, rot, score_threshold, feat_w, down_ratio, voxel_size_x, voxel_size_y, point_cloud_range_x_min, point_cloud_range_y_min, post_center_range_x_min, post_center_range_y_min, post_center_range_z_min, post_center_range_x_max, post_center_range_y_max, post_center_range_z_max, num_bboxes, with_velocity, decode_bboxes_dims, bboxes, mask, score_idx); } std::vector<paddle::Tensor> postprocess_gpu( const std::vector<paddle::Tensor> &hm, const std::vector<paddle::Tensor> &reg, const std::vector<paddle::Tensor> &height, const std::vector<paddle::Tensor> &dim, const std::vector<paddle::Tensor> &vel, const std::vector<paddle::Tensor> &rot, const std::vector<float> &voxel_size, const std::vector<float> &point_cloud_range, const std::vector<float> &post_center_range, const std::vector<int> &num_classes, const int down_ratio, const float score_threshold, const float nms_iou_threshold, const int nms_pre_max_size, const int nms_post_max_size, const bool with_velocity) { int num_tasks = hm.size(); int decode_bboxes_dims = 9; if (!with_velocity) { decode_bboxes_dims = 7; } float voxel_size_x = voxel_size[0]; float voxel_size_y = voxel_size[1]; float point_cloud_range_x_min = point_cloud_range[0]; float point_cloud_range_y_min = point_cloud_range[1]; float post_center_range_x_min = post_center_range[0]; float post_center_range_y_min = post_center_range[1]; float post_center_range_z_min = post_center_range[2]; float post_center_range_x_max = post_center_range[3]; float post_center_range_y_max = post_center_range[4]; float post_center_range_z_max = post_center_range[5]; std::vector<paddle::Tensor> scores; std::vector<paddle::Tensor> labels; std::vector<paddle::Tensor> bboxes; for (int task_id = 0; task_id < num_tasks; ++task_id) { CHECK_INPUT_BATCHSIZE(hm[0]); int feat_h = hm[0].shape()[2]; int feat_w = hm[0].shape()[3]; int num_bboxes = feat_h * feat_w; // score and label auto sigmoid_hm_per_task = paddle::experimental::sigmoid(hm[task_id]); auto label_per_task = paddle::experimental::argmax(sigmoid_hm_per_task, 1, true, false, 3); auto score_per_task = paddle::experimental::max(sigmoid_hm_per_task, {1}, true); // dim auto exp_dim_per_task = paddle::experimental::exp(dim[task_id]); // decode bboxed and get mask of bboxes for nms const float *score_ptr = score_per_task.data<float>(); const float *reg_ptr = reg[task_id].data<float>(); const float *height_ptr = height[task_id].data<float>(); // const float* dim_ptr = dim[task_id].data<float>(); const float *exp_dim_per_task_ptr = exp_dim_per_task.data<float>(); const float *vel_ptr = vel[task_id].data<float>(); const float *rot_ptr = rot[task_id].data<float>(); auto decode_bboxes = paddle::empty({num_bboxes, decode_bboxes_dims}, paddle::DataType::FLOAT32, paddle::GPUPlace()); float *decode_bboxes_ptr = decode_bboxes.data<float>(); auto thresh_mask = paddle::full({num_bboxes}, 0, paddle::DataType::BOOL, paddle::GPUPlace()); bool *thresh_mask_ptr = thresh_mask.data<bool>(); auto score_idx = paddle::empty({num_bboxes}, paddle::DataType::INT32, paddle::GPUPlace()); int *score_idx_ptr = score_idx.data<int32_t>(); DecodeLauncher(score_per_task.stream(), score_ptr, reg_ptr, height_ptr, exp_dim_per_task_ptr, vel_ptr, rot_ptr, score_threshold, feat_w, down_ratio, voxel_size_x, voxel_size_y, point_cloud_range_x_min, point_cloud_range_y_min, post_center_range_x_min, post_center_range_y_min, post_center_range_z_min, post_center_range_x_max, post_center_range_y_max, post_center_range_z_max, num_bboxes, with_velocity, decode_bboxes_dims, decode_bboxes_ptr, thresh_mask_ptr, score_idx_ptr); // select score by mask auto selected_score_idx = paddle::experimental::masked_select(score_idx, thresh_mask); auto flattened_selected_score = paddle::experimental::reshape(score_per_task, {num_bboxes}); auto selected_score = paddle::experimental::masked_select( flattened_selected_score, thresh_mask); int num_selected = selected_score.numel(); if (num_selected == 0 || num_selected < 0) { auto fake_out_boxes = paddle::full({1, decode_bboxes_dims}, 0., paddle::DataType::FLOAT32, paddle::GPUPlace()); auto fake_out_score = paddle::full({1}, -1., paddle::DataType::FLOAT32, paddle::GPUPlace()); auto fake_out_label = paddle::full({1}, 0, paddle::DataType::INT64, paddle::GPUPlace()); scores.push_back(fake_out_score); labels.push_back(fake_out_label); bboxes.push_back(fake_out_boxes); continue; } // sort score by descending auto sort_out = paddle::experimental::argsort(selected_score, 0, true); auto sorted_index = std::get<1>(sort_out); int num_bboxes_for_nms = num_selected > nms_pre_max_size ? nms_pre_max_size : num_selected; // nms // in NmsLauncher, rot = - theta - pi / 2 const int col_blocks = DIVUP(num_bboxes_for_nms, THREADS_PER_BLOCK_NMS); auto nms_mask = paddle::empty({num_bboxes_for_nms * col_blocks}, paddle::DataType::INT64, paddle::GPUPlace()); int64_t *nms_mask_data = nms_mask.data<int64_t>(); NmsLauncher(score_per_task.stream(), decode_bboxes.data<float>(), selected_score_idx.data<int>(), sorted_index.data<int64_t>(), num_selected, num_bboxes_for_nms, nms_iou_threshold, decode_bboxes_dims, nms_mask_data); const paddle::Tensor nms_mask_cpu_tensor = nms_mask.copy_to(paddle::CPUPlace(), true); const int64_t *nms_mask_cpu = nms_mask_cpu_tensor.data<int64_t>(); auto remv_cpu = paddle::full({col_blocks}, 0, paddle::DataType::INT64, paddle::CPUPlace()); int64_t *remv_cpu_data = remv_cpu.data<int64_t>(); int num_to_keep = 0; auto keep = paddle::empty({num_bboxes_for_nms}, paddle::DataType::INT32, paddle::CPUPlace()); int *keep_data = keep.data<int>(); for (int i = 0; i < num_bboxes_for_nms; i++) { int nblock = i / THREADS_PER_BLOCK_NMS; int inblock = i % THREADS_PER_BLOCK_NMS; if (!(remv_cpu_data[nblock] & (1ULL << inblock))) { keep_data[num_to_keep++] = i; const int64_t *p = &nms_mask_cpu[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv_cpu_data[j] |= p[j]; } } } int num_for_gather = num_to_keep > nms_post_max_size ? nms_post_max_size : num_to_keep; auto keep_gpu = paddle::empty({num_for_gather}, paddle::DataType::INT32, paddle::GPUPlace()); int *keep_gpu_ptr = keep_gpu.data<int>(); cudaMemcpy(keep_gpu_ptr, keep_data, num_for_gather * sizeof(int), cudaMemcpyHostToDevice); auto gather_sorted_index = paddle::experimental::gather(sorted_index, keep_gpu, 0); auto gather_index = paddle::experimental::gather(selected_score_idx, gather_sorted_index, 0); auto gather_score = paddle::experimental::gather(selected_score, gather_sorted_index, 0); auto flattened_label = paddle::experimental::reshape(label_per_task, {num_bboxes}); auto gather_label = paddle::experimental::gather(flattened_label, gather_index, 0); auto gather_bbox = paddle::experimental::gather(decode_bboxes, gather_index, 0); auto start_label = paddle::full( {1}, num_classes[task_id], paddle::DataType::INT64, paddle::GPUPlace()); auto added_label = paddle::experimental::add(gather_label, start_label); scores.push_back(gather_score); labels.push_back(added_label); bboxes.push_back(gather_bbox); } auto out_scores = paddle::experimental::concat(scores, 0); auto out_labels = paddle::experimental::concat(labels, 0); auto out_bboxes = paddle::experimental::concat(bboxes, 0); return {out_bboxes, out_scores, out_labels}; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp/custom_ops/voxelize_op.cc
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "paddle/include/experimental/ext_all.h" template <typename T, typename T_int> bool hard_voxelize_cpu_kernel( const T *points, const float point_cloud_range_x_min, const float point_cloud_range_y_min, const float point_cloud_range_z_min, const float voxel_size_x, const float voxel_size_y, const float voxel_size_z, const int grid_size_x, const int grid_size_y, const int grid_size_z, const int64_t num_points, const int num_point_dim, const int max_num_points_in_voxel, const int max_voxels, T *voxels, T_int *coords, T_int *num_points_per_voxel, T_int *grid_idx_to_voxel_idx, T_int *num_voxels) { std::fill(voxels, voxels + max_voxels * max_num_points_in_voxel * num_point_dim, static_cast<T>(0)); num_voxels[0] = 0; int voxel_idx, grid_idx, curr_num_point; int coord_x, coord_y, coord_z; for (int point_idx = 0; point_idx < num_points; ++point_idx) { coord_x = floor( (points[point_idx * num_point_dim + 0] - point_cloud_range_x_min) / voxel_size_x); coord_y = floor( (points[point_idx * num_point_dim + 1] - point_cloud_range_y_min) / voxel_size_y); coord_z = floor( (points[point_idx * num_point_dim + 2] - point_cloud_range_z_min) / voxel_size_z); if (coord_x < 0 || coord_x > grid_size_x || coord_x == grid_size_x) { continue; } if (coord_y < 0 || coord_y > grid_size_y || coord_y == grid_size_y) { continue; } if (coord_z < 0 || coord_z > grid_size_z || coord_z == grid_size_z) { continue; } grid_idx = coord_z * grid_size_y * grid_size_x + coord_y * grid_size_x + coord_x; voxel_idx = grid_idx_to_voxel_idx[grid_idx]; if (voxel_idx == -1) { voxel_idx = num_voxels[0]; if (num_voxels[0] == max_voxels || num_voxels[0] > max_voxels) { continue; } num_voxels[0]++; grid_idx_to_voxel_idx[grid_idx] = voxel_idx; coords[voxel_idx * 3 + 0] = coord_z; coords[voxel_idx * 3 + 1] = coord_y; coords[voxel_idx * 3 + 2] = coord_x; } curr_num_point = num_points_per_voxel[voxel_idx]; if (curr_num_point < max_num_points_in_voxel) { for (int j = 0; j < num_point_dim; ++j) { voxels[voxel_idx * max_num_points_in_voxel * num_point_dim + curr_num_point * num_point_dim + j] = points[point_idx * num_point_dim + j]; } num_points_per_voxel[voxel_idx] = curr_num_point + 1; } } return true; } std::vector<paddle::Tensor> hard_voxelize_cpu( const paddle::Tensor &points, const std::vector<float> &voxel_size, const std::vector<float> &point_cloud_range, const int max_num_points_in_voxel, const int max_voxels) { auto num_points = points.shape()[0]; auto num_point_dim = points.shape()[1]; const float voxel_size_x = voxel_size[0]; const float voxel_size_y = voxel_size[1]; const float voxel_size_z = voxel_size[2]; const float point_cloud_range_x_min = point_cloud_range[0]; const float point_cloud_range_y_min = point_cloud_range[1]; const float point_cloud_range_z_min = point_cloud_range[2]; int grid_size_x = static_cast<int>( round((point_cloud_range[3] - point_cloud_range[0]) / voxel_size_x)); int grid_size_y = static_cast<int>( round((point_cloud_range[4] - point_cloud_range[1]) / voxel_size_y)); int grid_size_z = static_cast<int>( round((point_cloud_range[5] - point_cloud_range[2]) / voxel_size_z)); auto voxels = paddle::empty({max_voxels, max_num_points_in_voxel, num_point_dim}, paddle::DataType::FLOAT32, paddle::CPUPlace()); auto coords = paddle::full({max_voxels, 3}, 0, paddle::DataType::INT32, paddle::CPUPlace()); auto *coords_data = coords.data<int>(); auto num_points_per_voxel = paddle::full( {max_voxels}, 0, paddle::DataType::INT32, paddle::CPUPlace()); auto *num_points_per_voxel_data = num_points_per_voxel.data<int>(); std::fill(num_points_per_voxel_data, num_points_per_voxel_data + num_points_per_voxel.size(), static_cast<int>(0)); auto num_voxels = paddle::full({1}, 0, paddle::DataType::INT32, paddle::CPUPlace()); auto *num_voxels_data = num_voxels.data<int>(); auto grid_idx_to_voxel_idx = paddle::full({grid_size_z, grid_size_y, grid_size_x}, -1, paddle::DataType::INT32, paddle::CPUPlace()); auto *grid_idx_to_voxel_idx_data = grid_idx_to_voxel_idx.data<int>(); PD_DISPATCH_FLOATING_TYPES( points.type(), "hard_voxelize_cpu_kernel", ([&] { hard_voxelize_cpu_kernel<data_t, int>( points.data<data_t>(), point_cloud_range_x_min, point_cloud_range_y_min, point_cloud_range_z_min, voxel_size_x, voxel_size_y, voxel_size_z, grid_size_x, grid_size_y, grid_size_z, num_points, num_point_dim, max_num_points_in_voxel, max_voxels, voxels.data<data_t>(), coords_data, num_points_per_voxel_data, grid_idx_to_voxel_idx_data, num_voxels_data); })); return {voxels, coords, num_points_per_voxel, num_voxels}; } #ifdef PADDLE_WITH_CUDA std::vector<paddle::Tensor> hard_voxelize_cuda( const paddle::Tensor &points, const std::vector<float> &voxel_size, const std::vector<float> &point_cloud_range, int max_num_points_in_voxel, int max_voxels); #endif std::vector<paddle::Tensor> hard_voxelize( const paddle::Tensor &points, const std::vector<float> &voxel_size, const std::vector<float> &point_cloud_range, const int max_num_points_in_voxel, const int max_voxels) { if (points.is_cpu()) { return hard_voxelize_cpu(points, voxel_size, point_cloud_range, max_num_points_in_voxel, max_voxels); #ifdef PADDLE_WITH_CUDA } else if (points.is_gpu() || points.is_gpu_pinned()) { return hard_voxelize_cuda(points, voxel_size, point_cloud_range, max_num_points_in_voxel, max_voxels); #endif } else { PD_THROW( "Unsupported device type for hard_voxelize " "operator."); } } std::vector<std::vector<int64_t>> HardInferShape( std::vector<int64_t> points_shape, const std::vector<float> &voxel_size, const std::vector<float> &point_cloud_range, const int &max_num_points_in_voxel, const int &max_voxels) { return {{max_voxels, max_num_points_in_voxel, points_shape[1]}, {max_voxels, 3}, {max_voxels}, {1}}; } std::vector<paddle::DataType> HardInferDtype(paddle::DataType points_dtype) { return {points_dtype, paddle::DataType::INT32, paddle::DataType::INT32, paddle::DataType::INT32}; } PD_BUILD_OP(hard_voxelize) .Inputs({"POINTS"}) .Outputs({"VOXELS", "COORS", "NUM_POINTS_PER_VOXEL", "num_voxels"}) .SetKernelFn(PD_KERNEL(hard_voxelize)) .Attrs({"voxel_size: std::vector<float>", "point_cloud_range: std::vector<float>", "max_num_points_in_voxel: int", "max_voxels: int"}) .SetInferShapeFn(PD_INFER_SHAPE(HardInferShape)) .SetInferDtypeFn(PD_INFER_DTYPE(HardInferDtype));
0
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp/custom_ops/postprocess.cc
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda.h> #include <cuda_runtime_api.h> #include "paddle/include/experimental/ext_all.h" std::vector<paddle::Tensor> postprocess_gpu( const std::vector<paddle::Tensor> &hm, const std::vector<paddle::Tensor> &reg, const std::vector<paddle::Tensor> &height, const std::vector<paddle::Tensor> &dim, const std::vector<paddle::Tensor> &vel, const std::vector<paddle::Tensor> &rot, const std::vector<float> &voxel_size, const std::vector<float> &point_cloud_range, const std::vector<float> &post_center_range, const std::vector<int> &num_classes, const int down_ratio, const float score_threshold, const float nms_iou_threshold, const int nms_pre_max_size, const int nms_post_max_size, const bool with_velocity); std::vector<paddle::Tensor> centerpoint_postprocess( const std::vector<paddle::Tensor> &hm, const std::vector<paddle::Tensor> &reg, const std::vector<paddle::Tensor> &height, const std::vector<paddle::Tensor> &dim, const std::vector<paddle::Tensor> &vel, const std::vector<paddle::Tensor> &rot, const std::vector<float> &voxel_size, const std::vector<float> &point_cloud_range, const std::vector<float> &post_center_range, const std::vector<int> &num_classes, const int down_ratio, const float score_threshold, const float nms_iou_threshold, const int nms_pre_max_size, const int nms_post_max_size, const bool with_velocity) { if (hm[0].is_gpu()) { return postprocess_gpu(hm, reg, height, dim, vel, rot, voxel_size, point_cloud_range, post_center_range, num_classes, down_ratio, score_threshold, nms_iou_threshold, nms_pre_max_size, nms_post_max_size, with_velocity); } else { PD_THROW( "Unsupported device type for centerpoint postprocess " "operator."); } } std::vector<std::vector<int64_t>> PostProcessInferShape( const std::vector<std::vector<int64_t>> &hm_shape, const std::vector<std::vector<int64_t>> &reg_shape, const std::vector<std::vector<int64_t>> &height_shape, const std::vector<std::vector<int64_t>> &dim_shape, const std::vector<std::vector<int64_t>> &vel_shape, const std::vector<std::vector<int64_t>> &rot_shape, const std::vector<float> &voxel_size, const std::vector<float> &point_cloud_range, const std::vector<float> &post_center_range, const std::vector<int> &num_classes, const int down_ratio, const float score_threshold, const float nms_iou_threshold, const int nms_pre_max_size, const int nms_post_max_size, const bool with_velocity) { if (with_velocity) { return {{-1, 9}, {-1}, {-1}}; } else { return {{-1, 7}, {-1}, {-1}}; } } std::vector<paddle::DataType> PostProcessInferDtype( const std::vector<paddle::DataType> &hm_dtype, const std::vector<paddle::DataType> &reg_dtype, const std::vector<paddle::DataType> &height_dtype, const std::vector<paddle::DataType> &dim_dtype, const std::vector<paddle::DataType> &vel_dtype, const std::vector<paddle::DataType> &rot_dtype) { return {reg_dtype[0], hm_dtype[0], paddle::DataType::INT64}; } PD_BUILD_OP(centerpoint_postprocess) .Inputs({paddle::Vec("HM"), paddle::Vec("REG"), paddle::Vec("HEIGHT"), paddle::Vec("DIM"), paddle::Vec("VEL"), paddle::Vec("ROT")}) .Outputs({"BBOXES", "SCORES", "LABELS"}) .SetKernelFn(PD_KERNEL(centerpoint_postprocess)) .Attrs({"voxel_size: std::vector<float>", "point_cloud_range: std::vector<float>", "post_center_range: std::vector<float>", "num_classes: std::vector<int>", "down_ratio: int", "score_threshold: float", "nms_iou_threshold: float", "nms_pre_max_size: int", "nms_post_max_size: int", "with_velocity: bool"}) .SetInferShapeFn(PD_INFER_SHAPE(PostProcessInferShape)) .SetInferDtypeFn(PD_INFER_DTYPE(PostProcessInferDtype));
0
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp/custom_ops/voxelize_op.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/include/experimental/ext_all.h" #define CHECK_INPUT_CUDA(x) \ PD_CHECK(x.is_gpu() || x.is_gpu_pinned(), #x " must be a GPU Tensor.") #define CUDA_KERNEL_LOOP(i, n) \ for (auto i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) template <typename T, typename T_int> __global__ void map_point_to_grid_kernel( const T *points, const float point_cloud_range_x_min, const float point_cloud_range_y_min, const float point_cloud_range_z_min, const float voxel_size_x, const float voxel_size_y, const float voxel_size_z, const int grid_size_x, const int grid_size_y, const int grid_size_z, const int64_t num_points, const int num_point_dim, const int max_num_points_in_voxel, T_int *points_to_grid_idx, T_int *points_to_num_idx, T_int *num_points_in_grid, int *points_valid) { int64_t point_idx = blockIdx.x * blockDim.x + threadIdx.x; if (point_idx > num_points || point_idx == num_points) { return; } int coord_x = floor((points[point_idx * num_point_dim + 0] - point_cloud_range_x_min) / voxel_size_x); int coord_y = floor((points[point_idx * num_point_dim + 1] - point_cloud_range_y_min) / voxel_size_y); int coord_z = floor((points[point_idx * num_point_dim + 2] - point_cloud_range_z_min) / voxel_size_z); if (coord_x < 0 || coord_x > grid_size_x || coord_x == grid_size_x) { return; } if (coord_y < 0 || coord_y > grid_size_y || coord_y == grid_size_y) { return; } if (coord_z < 0 || coord_z > grid_size_z || coord_z == grid_size_z) { return; } int grid_idx = coord_z * grid_size_y * grid_size_x + coord_y * grid_size_x + coord_x; T_int num = atomicAdd(num_points_in_grid + grid_idx, 1); if (num < max_num_points_in_voxel) { points_to_num_idx[point_idx] = num; points_to_grid_idx[point_idx] = grid_idx; atomicMin(points_valid + grid_idx, static_cast<int>(point_idx)); } } template <typename T_int> __global__ void update_points_flag(const int *points_valid, const T_int *points_to_grid_idx, const int num_points, int *points_flag) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < num_points; i += gridDim.x * blockDim.x) { T_int grid_idx = points_to_grid_idx[i]; if (grid_idx >= 0) { int id = points_valid[grid_idx]; if (id != num_points && id == i) { points_flag[i] = 1; } } } } template <typename T_int> __global__ void get_voxel_idx_kernel(const int *points_flag, const T_int *points_to_grid_idx, const int *points_flag_prefix_sum, const int num_points, const int max_voxels, T_int *num_voxels, T_int *grid_idx_to_voxel_idx) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < num_points; i += gridDim.x * blockDim.x) { if (points_flag[i] == 1) { T_int grid_idx = points_to_grid_idx[i]; int num = points_flag_prefix_sum[i]; if (num < max_voxels) { grid_idx_to_voxel_idx[grid_idx] = num; } } if (i == num_points - 1) { int num = points_flag_prefix_sum[i] + points_flag[i]; if (num < max_voxels) { num_voxels[0] = num; } else { num_voxels[0] = max_voxels; } } } } template <typename T> __global__ void init_voxels_kernel(const int64_t num, T *voxels) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > num || idx == num) { return; } voxels[idx] = static_cast<T>(0); } template <typename T, typename T_int> __global__ void assign_voxels_kernel( const T *points, const T_int *points_to_grid_idx, const T_int *points_to_num_idx, const T_int *grid_idx_to_voxel_idx, const int64_t num_points, const int num_point_dim, const int max_num_points_in_voxel, T *voxels) { int64_t point_idx = blockIdx.x * blockDim.x + threadIdx.x; if (point_idx > num_points || point_idx == num_points) { return; } T_int grid_idx = points_to_grid_idx[point_idx]; T_int num_idx = points_to_num_idx[point_idx]; if (grid_idx > -1 && num_idx > -1) { T_int voxel_idx = grid_idx_to_voxel_idx[grid_idx]; if (voxel_idx > -1) { for (int64_t i = 0; i < num_point_dim; ++i) { voxels[voxel_idx * max_num_points_in_voxel * num_point_dim + num_idx * num_point_dim + i] = points[point_idx * num_point_dim + i]; } } } } template <typename T, typename T_int> __global__ void assign_coords_kernel(const T_int *grid_idx_to_voxel_idx, const T_int *num_points_in_grid, const int num_grids, const int grid_size_x, const int grid_size_y, const int grid_size_z, const int max_num_points_in_voxel, T *coords, T *num_points_per_voxel) { int64_t grid_idx = blockIdx.x * blockDim.x + threadIdx.x; if (grid_idx > num_grids || grid_idx == num_grids) { return; } T_int voxel_idx = grid_idx_to_voxel_idx[grid_idx]; if (voxel_idx > -1) { T_int coord_z = grid_idx / grid_size_x / grid_size_y; T_int coord_y = (grid_idx - coord_z * grid_size_x * grid_size_y) / grid_size_x; T_int coord_x = grid_idx - coord_z * grid_size_x * grid_size_y - coord_y * grid_size_x; coords[voxel_idx * 3 + 0] = coord_z; coords[voxel_idx * 3 + 1] = coord_y; coords[voxel_idx * 3 + 2] = coord_x; num_points_per_voxel[voxel_idx] = min(num_points_in_grid[grid_idx], max_num_points_in_voxel); } } std::vector<paddle::Tensor> hard_voxelize_cuda( const paddle::Tensor &points, const std::vector<float> &voxel_size, const std::vector<float> &point_cloud_range, int max_num_points_in_voxel, int max_voxels) { // check device CHECK_INPUT_CUDA(points); int64_t num_points = points.shape()[0]; int64_t num_point_dim = points.shape()[1]; const float voxel_size_x = voxel_size[0]; const float voxel_size_y = voxel_size[1]; const float voxel_size_z = voxel_size[2]; const float point_cloud_range_x_min = point_cloud_range[0]; const float point_cloud_range_y_min = point_cloud_range[1]; const float point_cloud_range_z_min = point_cloud_range[2]; int grid_size_x = static_cast<int>( round((point_cloud_range[3] - point_cloud_range[0]) / voxel_size_x)); int grid_size_y = static_cast<int>( round((point_cloud_range[4] - point_cloud_range[1]) / voxel_size_y)); int grid_size_z = static_cast<int>( round((point_cloud_range[5] - point_cloud_range[2]) / voxel_size_z)); int num_grids = grid_size_x * grid_size_y * grid_size_z; auto voxels = paddle::empty({max_voxels, max_num_points_in_voxel, num_point_dim}, paddle::DataType::FLOAT32, paddle::GPUPlace()); auto coords = paddle::full({max_voxels, 3}, 0, paddle::DataType::INT32, paddle::GPUPlace()); auto *coords_data = coords.data<int>(); auto num_points_per_voxel = paddle::full( {max_voxels}, 0, paddle::DataType::INT32, paddle::GPUPlace()); auto *num_points_per_voxel_data = num_points_per_voxel.data<int>(); auto points_to_grid_idx = paddle::full( {num_points}, -1, paddle::DataType::INT32, paddle::GPUPlace()); auto *points_to_grid_idx_data = points_to_grid_idx.data<int>(); auto points_to_num_idx = paddle::full( {num_points}, -1, paddle::DataType::INT32, paddle::GPUPlace()); auto *points_to_num_idx_data = points_to_num_idx.data<int>(); auto num_points_in_grid = paddle::full({grid_size_z, grid_size_y, grid_size_x}, 0, paddle::DataType::INT32, paddle::GPUPlace()); auto *num_points_in_grid_data = num_points_in_grid.data<int>(); auto grid_idx_to_voxel_idx = paddle::full({grid_size_z, grid_size_y, grid_size_x}, -1, paddle::DataType::INT32, paddle::GPUPlace()); auto *grid_idx_to_voxel_idx_data = grid_idx_to_voxel_idx.data<int>(); auto num_voxels = paddle::full({1}, 0, paddle::DataType::INT32, paddle::GPUPlace()); auto *num_voxels_data = num_voxels.data<int>(); auto points_valid = paddle::full({grid_size_z * grid_size_y * grid_size_x}, static_cast<int>(num_points), paddle::DataType::INT32, paddle::GPUPlace()); int *points_valid_data = points_valid.data<int>(); auto points_flag = paddle::full({num_points}, 0, paddle::DataType::INT32, paddle::GPUPlace()); // 1. Find the grid index for each point, compute the // number of points in each grid int64_t threads = 512; int64_t blocks = (num_points + threads - 1) / threads; PD_DISPATCH_FLOATING_TYPES( points.type(), "map_point_to_grid_kernel", ([&] { map_point_to_grid_kernel<data_t, int> <<<blocks, threads, 0, points.stream()>>>( points.data<data_t>(), point_cloud_range_x_min, point_cloud_range_y_min, point_cloud_range_z_min, voxel_size_x, voxel_size_y, voxel_size_z, grid_size_x, grid_size_y, grid_size_z, num_points, num_point_dim, max_num_points_in_voxel, points_to_grid_idx_data, points_to_num_idx_data, num_points_in_grid_data, points_valid_data); })); // 2. Find the number of non-zero voxels int *points_flag_data = points_flag.data<int>(); threads = 512; blocks = (num_points + threads - 1) / threads; update_points_flag<int><<<blocks, threads, 0, points.stream()>>>( points_valid_data, points_to_grid_idx_data, num_points, points_flag_data); auto points_flag_prefix_sum = paddle::experimental::cumsum(points_flag, 0, false, true, false); int *points_flag_prefix_sum_data = points_flag_prefix_sum.data<int>(); get_voxel_idx_kernel<int><<<blocks, threads, 0, points.stream()>>>( points_flag_data, points_to_grid_idx_data, points_flag_prefix_sum_data, num_points, max_voxels, num_voxels_data, grid_idx_to_voxel_idx_data); // 3. Store points to voxels coords and num_points_per_voxel int64_t num = max_voxels * max_num_points_in_voxel * num_point_dim; threads = 512; blocks = (num + threads - 1) / threads; PD_DISPATCH_FLOATING_TYPES(points.type(), "init_voxels_kernel", ([&] { init_voxels_kernel<data_t> <<<blocks, threads, 0, points.stream()>>>( num, voxels.data<data_t>()); })); threads = 512; blocks = (num_points + threads - 1) / threads; PD_DISPATCH_FLOATING_TYPES( points.type(), "assign_voxels_kernel", ([&] { assign_voxels_kernel<data_t, int> <<<blocks, threads, 0, points.stream()>>>( points.data<data_t>(), points_to_grid_idx_data, points_to_num_idx_data, grid_idx_to_voxel_idx_data, num_points, num_point_dim, max_num_points_in_voxel, voxels.data<data_t>()); })); // 4. Store coords, num_points_per_voxel blocks = (num_grids + threads - 1) / threads; assign_coords_kernel<int><<<blocks, threads, 0, points.stream()>>>( grid_idx_to_voxel_idx_data, num_points_in_grid_data, num_grids, grid_size_x, grid_size_y, grid_size_z, max_num_points_in_voxel, coords_data, num_points_per_voxel_data); return {voxels, coords, num_points_per_voxel, num_voxels}; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp/cmake
apollo_public_repos/apollo-model-centerpoint/deploy/centerpoint/cpp/cmake/external/boost.cmake
include(ExternalProject) set(BOOST_PROJECT "extern_boost") # To release PaddlePaddle as a pip package, we have to follow the # manylinux1 standard, which features as old Linux kernels and # compilers as possible and recommends CentOS 5. Indeed, the earliest # CentOS version that works with NVIDIA CUDA is CentOS 6. And a new # version of boost, say, 1.66.0, doesn't build on CentOS 6. We # checked that the devtools package of CentOS 6 installs boost 1.41.0. # So we use 1.41.0 here. set(BOOST_VER "1.41.0") set(BOOST_TAR "boost_1_41_0" CACHE STRING "" FORCE) set(BOOST_URL "http://paddlepaddledeps.bj.bcebos.com/${BOOST_TAR}.tar.gz" CACHE STRING "" FORCE) MESSAGE(STATUS "BOOST_TAR: ${BOOST_TAR}, BOOST_URL: ${BOOST_URL}") set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost) set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}") set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}" CACHE PATH "boost include directory." FORCE) set_directory_properties(PROPERTIES CLEAN_NO_CUSTOM 1) include_directories(${BOOST_INCLUDE_DIR}) ExternalProject_Add( ${BOOST_PROJECT} ${EXTERNAL_PROJECT_LOG_ARGS} DOWNLOAD_DIR ${BOOST_DOWNLOAD_DIR} URL ${BOOST_URL} DOWNLOAD_NO_PROGRESS 1 PREFIX ${BOOST_SOURCES_DIR} CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" UPDATE_COMMAND "" ) if (${CMAKE_VERSION} VERSION_LESS "3.3.0" OR NOT WIN32) set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/boost_dummy.c) file(WRITE ${dummyfile} "const char *dummy = \"${dummyfile}\";") add_library(boost STATIC ${dummyfile}) else() add_library(boost INTERFACE) endif() add_dependencies(boost ${BOOST_PROJECT}) set(Boost_INCLUDE_DIR ${BOOST_INCLUDE_DIR})
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/python/infer.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import numpy as np import paddle from paddle.inference import Config, PrecisionType, create_predictor from paddle3d.ops.iou3d_nms_cuda import nms_gpu from paddle3d.ops.pointnet2_ops import (ball_query, farthest_point_sample, gather_operation, group_operation) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--model_file", type=str, help="Model filename, Specify this when your model is a combined model.", required=True) parser.add_argument( "--params_file", type=str, help= "Parameter filename, Specify this when your model is a combined model.", required=True) parser.add_argument( '--lidar_file', type=str, help='The lidar path.', required=True) parser.add_argument("--gpu_id", type=int, default=0, help="GPU card id.") parser.add_argument( "--run_mode", type=str, default="", help="Run_mode which can be: trt_fp32, trt_fp16, trt_int8 and gpu_fp16." ) parser.add_argument( "--trt_use_static", type=int, default=0, help="Whether to load the tensorrt graph optimization from a disk path." ) parser.add_argument( "--trt_static_dir", type=str, help="Path of a tensorrt graph optimization directory.") return parser.parse_args() def read_point(lidar_file): points = np.fromfile(lidar_file, np.float32).reshape(-1, 4) return points def filter_points_outside_range(points, point_cloud_range): limit_range = np.asarray(point_cloud_range, dtype=np.float32) mask = (points[:, 0] >= limit_range[0]) & (points[:, 0] <= limit_range[3]) \ & (points[:, 1] >= limit_range[1]) & (points[:, 1] <= limit_range[4]) points = points[mask] return points def sample_point(points, num_points): if num_points < len(points): pts_depth = np.linalg.norm(points[:, 0:3], axis=1) pts_near_flag = pts_depth < 40.0 far_idxs_choice = np.where(pts_near_flag == 0)[0] near_idxs = np.where(pts_near_flag == 1)[0] if num_points > len(far_idxs_choice): near_idxs_choice = np.random.choice( near_idxs, num_points - len(far_idxs_choice), replace=False) choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \ if len(far_idxs_choice) > 0 else near_idxs_choice else: choice = np.arange(0, len(points), dtype=np.int32) choice = np.random.choice(choice, num_points, replace=False) np.random.shuffle(choice) else: choice = np.arange(0, len(points), dtype=np.int32) if num_points > len(points): extra_choice = np.random.choice(choice, num_points - len(points)) choice = np.concatenate((choice, extra_choice), axis=0) np.random.shuffle(choice) points = points[choice] return points def preprocess(lidar_file, num_points, point_cloud_range): points = read_point(lidar_file) points = filter_points_outside_range(points, point_cloud_range) points = sample_point(points, num_points) return points def init_predictor(model_file, params_file, gpu_id=0, run_mode=None, trt_use_static=False, trt_static_dir=None): config = Config(model_file, params_file) config.enable_memory_optim() config.enable_use_gpu(1000, gpu_id) if args.run_mode == "gpu_fp16": config.exp_enable_use_gpu_fp16() elif args.run_mode == "trt_fp32": config.enable_tensorrt_engine( workspace_size=1 << 30, max_batch_size=1, min_subgraph_size=15, precision_mode=PrecisionType.Float32, use_static=trt_use_static, use_calib_mode=False) elif args.run_mode == "trt_fp16": config.enable_tensorrt_engine( workspace_size=1 << 30, max_batch_size=1, min_subgraph_size=15, precision_mode=PrecisionType.Half, use_static=trt_use_static, use_calib_mode=False) elif args.run_mode == "trt_int8": config.enable_tensorrt_engine( workspace_size=1 << 30, max_batch_size=1, min_subgraph_size=15, precision_mode=PrecisionType.Int8, use_static=trt_use_static, use_calib_mode=True) if trt_use_static: config.set_optim_cache_dir(trt_static_dir) predictor = create_predictor(config) return predictor def run(predictor, points): # copy points data into input_tensor input_names = predictor.get_input_names() input_tensor = predictor.get_input_handle(input_names[0]) input_tensor.reshape(points.shape) input_tensor.copy_from_cpu(points.copy()) # do the inference predictor.run() # get out data from output tensor output_names = predictor.get_output_names() return [ predictor.get_output_handle(name).copy_to_cpu() for name in output_names ] def main(args): np.random.seed(1024) predictor = init_predictor(args.model_file, args.params_file, args.gpu_id, args.run_mode) num_points = 16384 point_cloud_range = [0, -40, -3, 70.4, 40, 1] points = preprocess(args.lidar_file, num_points, point_cloud_range) box3d_lidar, label_preds, scores = run(predictor, points) print({'boxes': box3d_lidar, 'labels': label_preds, 'scores': scores}) if __name__ == '__main__': args = parse_args() main(args)
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/main.cc
#include <gflags/gflags.h> #include <glog/logging.h> #include <algorithm> #include <chrono> #include <cmath> #include <fstream> #include <iostream> #include <numeric> #include <random> #include <string> #include "paddle/include/paddle_inference_api.h" using paddle_infer::Config; using paddle_infer::CreatePredictor; using paddle_infer::PrecisionType; using paddle_infer::Predictor; DEFINE_string(model_file, "", "Path of inference model"); DEFINE_string(params_file, "", "Path of inference params"); DEFINE_string(lidar_file, "", "Path of lidar file"); DEFINE_string(run_mode, "", "Run mode, could be fp32, trt_fp32, trt_fp16"); DEFINE_int32(gpu_id, 0, "GPU Id"); bool read_point(const std::string &file_path, const int &num_point_dim, void **buffer, int *num_point) { std::ifstream file_in(file_path, std::ios::in | std::ios::binary); if (!file_in) { LOG(ERROR) << "Failed to read file: " << file_path << "\n"; return false; } // get file size std::streampos file_size; file_in.seekg(0, std::ios::end); file_size = file_in.tellg(); file_in.seekg(0, std::ios::beg); *buffer = malloc(file_size); if (*buffer == nullptr) { LOG(ERROR) << "Failed to malloc memory of size: " << file_size << "\n"; return false; } file_in.read(reinterpret_cast<char *>(*buffer), file_size); file_in.close(); if (file_size / sizeof(float) % num_point_dim != 0) { LOG(ERROR) << "Loaded file size (" << file_size << ") is not evenly divisible by num_point_dim (" << num_point_dim << ")\n"; return false; } *num_point = file_size / sizeof(float) / num_point_dim; return true; } void mask_points_outside_range(const std::vector<float> &points, const std::vector<float> &point_cloud_range, const int &num_point_dim, std::vector<float> *selected_points) { for (int i = 0; i < points.size(); i += num_point_dim) { float pt_x = points[i]; float pt_y = points[i + 1]; // in [-x, x] and [-y, y] range if ((pt_x >= point_cloud_range[0]) && (pt_x <= point_cloud_range[3]) && (pt_y >= point_cloud_range[1]) && (pt_y <= point_cloud_range[4])) { for (int d = 0; d < num_point_dim; ++d) { selected_points->push_back(points[i + d]); } } } } void sample_points(const std::vector<float> &points, const int &dst_num_points, const int &num_point_dim, std::vector<float> *selected_points) { int src_num_points = points.size() / num_point_dim; std::vector<int> far_idx_choice; std::vector<int> near_idx; std::vector<int> choice; std::random_device rd; std::mt19937 g(rd()); g.seed(1024); if (dst_num_points < src_num_points) { for (int i = 0; i < src_num_points; ++i) { float pt_x = points[i * num_point_dim]; float pt_y = points[i * num_point_dim + 1]; float pt_z = points[i * num_point_dim + 2]; float dist = sqrt(pt_x * pt_x + pt_y * pt_y + pt_z * pt_z); if (dist < 40.0) { near_idx.push_back(i); } else { far_idx_choice.push_back(i); } } if (dst_num_points > far_idx_choice.size()) { // shuffle near_idx std::shuffle(near_idx.begin(), near_idx.end(), g); choice.insert(choice.begin(), near_idx.begin(), near_idx.begin() + dst_num_points - far_idx_choice.size()); if (far_idx_choice.size() > 0) { choice.insert(choice.end(), far_idx_choice.begin(), far_idx_choice.end()); } } else { std::vector<int> src_idx(src_num_points); for (int v = 0; v < src_num_points; ++v) { src_idx[v] = v; } // shuffle src_idx std::shuffle(src_idx.begin(), src_idx.end(), g); choice.insert(choice.begin(), src_idx.begin(), src_idx.begin() + dst_num_points); } } else { std::vector<int> src_idx(src_num_points); for (int v = 0; v < src_num_points; ++v) { src_idx[v] = v; } choice.insert(choice.begin(), src_idx.begin(), src_idx.end()); if (dst_num_points > src_num_points) { for (int i = src_num_points; i < dst_num_points; ++i) { std::uniform_int_distribution<int> uniform_dist(0, src_num_points - 1); choice.push_back(uniform_dist(g)); } } } // sample points by selected choice for (int i = 0; i < choice.size(); ++i) { int idx = choice[i]; for (int d = 0; d < num_point_dim; ++d) { selected_points->push_back(points[idx * num_point_dim + d]); } } } std::shared_ptr<Predictor> init_predictor(const std::string &model_file, const std::string &params_file, const std::string &run_mode, const int &gpu_id) { // init config Config config; config.SetModel(model_file, params_file); config.EnableUseGpu(1000, gpu_id); // trt setting if (run_mode == "trt_fp32") { config.EnableTensorRtEngine(1 << 30, 1, 15, PrecisionType::kFloat32, false, false); } else if (run_mode == "trt_fp16") { config.EnableTensorRtEngine(1 << 30, 1, 15, PrecisionType::kHalf, false, false); } // memory optim config.EnableMemoryOptim(); return CreatePredictor(config); } void run(Predictor *predictor, std::vector<float> &points, std::vector<int> &input_shape, std::vector<float> *boxes, std::vector<long> *labels, std::vector<float> *scores) { // setup input points handle auto input_names = predictor->GetInputNames(); auto points_handle = predictor->GetInputHandle(input_names[0]); // just one input: point cloud points_handle->Reshape(input_shape); points_handle->CopyFromCpu(points.data()); // do infer CHECK(predictor->Run()); // fetch predict boxes, labels, scores auto output_names = predictor->GetOutputNames(); for (int i = 0; i < output_names.size(); ++i) { auto output = predictor->GetOutputHandle(output_names[i]); std::vector<int> output_shape = output->shape(); int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies<int>()); if (i == 0) { boxes->resize(out_num); output->CopyToCpu(boxes->data()); } else if (i == 1) { labels->resize(out_num); output->CopyToCpu(labels->data()); } else if (i == 2) { scores->resize(out_num); output->CopyToCpu(scores->data()); } } } void print_results(const std::vector<float> &boxes, const std::vector<long> &labels, const std::vector<float> &scores) { int num_boxes = scores.size(); int boxes_dim = boxes.size() / num_boxes; for (int box_idx = 0; box_idx != num_boxes; ++box_idx) { // filter fake results: label = -1 if (labels[box_idx] < 0.0) { continue; } LOG(INFO) << "Score: " << scores[box_idx] << " Label: " << labels[box_idx] << " "; if (boxes_dim == 7) { LOG(INFO) << "Box (x_c, y_c, z_c, w, l, h, rot): " << boxes[box_idx * 7 + 0] << " " << boxes[box_idx * 7 + 1] << " " << boxes[box_idx * 7 + 2] << " " << boxes[box_idx * 7 + 3] << " " << boxes[box_idx * 7 + 4] << " " << boxes[box_idx * 7 + 5] << " " << boxes[box_idx * 7 + 6] << "\n"; } } } int main(int argc, char *argv[]) { google::ParseCommandLineFlags(&argc, &argv, true); auto predictor = init_predictor(FLAGS_model_file, FLAGS_params_file, FLAGS_run_mode, FLAGS_gpu_id); // input handle and settings const int num_sampled_point = 16384; const int num_point_dim = 4; // xyz + intensity std::vector<float> point_cloud_range = {0, -40, -3, 70.4, 40, 1}; std::vector<int> input_shape = {num_sampled_point, num_point_dim}; // read points int num_point; void *buffer = nullptr; if (!read_point(FLAGS_lidar_file, num_point_dim, &buffer, &num_point)) { return false; } float *points = static_cast<float *>(buffer); std::vector<float> input_data(points, points + num_point * num_point_dim); // preprocess std::vector<float> masked_points; mask_points_outside_range(input_data, point_cloud_range, num_point_dim, &masked_points); std::vector<float> selected_points; sample_points(masked_points, num_sampled_point, num_point_dim, &selected_points); // output handle std::vector<float> boxes; std::vector<long> labels; std::vector<float> scores; // run infer run(predictor.get(), selected_points, input_shape, &boxes, &labels, &scores); // print results print_results(boxes, labels, scores); return 0; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/CMakeLists.txt
cmake_minimum_required(VERSION 3.0) project(cpp_inference_demo CXX C) option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON) option(USE_TENSORRT "Compile demo with TensorRT." ON) option(CUSTOM_OPERATOR_FILES "List of file names for custom operators" "") execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpfullversion -dumpversion OUTPUT_VARIABLE GCC_VERSION) string(REGEX MATCHALL "[0-9]+" GCC_VERSION_COMPONENTS ${GCC_VERSION}) list(GET GCC_VERSION_COMPONENTS 0 GCC_MAJOR) list(GET GCC_VERSION_COMPONENTS 1 GCC_MINOR) set(GCC_VERSION "${GCC_MAJOR}.${GCC_MINOR}") if (GCC_VERSION LESS "8.0") set(CMAKE_CXX_FLAGS "-Wl,--no-as-needed") endif() set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") include(external/boost) if(WITH_GPU) find_package(CUDA REQUIRED) add_definitions("-DPADDLE_WITH_CUDA") endif() if(NOT WITH_STATIC_LIB) add_definitions("-DPADDLE_WITH_SHARED_LIB") else() # PD_INFER_DECL is mainly used to set the dllimport/dllexport attribute in dynamic library mode. # Set it to empty in static library mode to avoid compilation issues. add_definitions("/DPD_INFER_DECL=") endif() macro(safe_set_static_flag) foreach(flag_var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) if(${flag_var} MATCHES "/MD") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") endif(${flag_var} MATCHES "/MD") endforeach(flag_var) endmacro() if(NOT DEFINED PADDLE_LIB) message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib") endif() if(NOT DEFINED DEMO_NAME) message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name") endif() include_directories("${PADDLE_LIB}/") set(PADDLE_LIB_THIRD_PARTY_PATH "${PADDLE_LIB}/third_party/install/") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/include") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/lib") link_directories("${PADDLE_LIB}/paddle/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib") if (WIN32) add_definitions("/DGOOGLE_GLOG_DLL_DECL=") option(MSVC_STATIC_CRT "use static C Runtime library by default" ON) if (MSVC_STATIC_CRT) if (WITH_MKL) set(FLAG_OPENMP "/openmp") endif() set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") safe_set_static_flag() if (WITH_STATIC_LIB) add_definitions(-DSTATIC_LIB) endif() endif() else() if(WITH_MKL) set(FLAG_OPENMP "-fopenmp") endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 ${FLAG_OPENMP}") endif() if(WITH_GPU) if(NOT WIN32) set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") else() if(CUDA_LIB STREQUAL "") set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\lib\\x64") endif() endif(NOT WIN32) endif() if (USE_TENSORRT AND WITH_GPU) set(TENSORRT_ROOT "" CACHE STRING "The root directory of TensorRT library") if("${TENSORRT_ROOT}" STREQUAL "") message(FATAL_ERROR "The TENSORRT_ROOT is empty, you must assign it a value with CMake command. Such as: -DTENSORRT_ROOT=TENSORRT_ROOT_PATH ") endif() set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include) set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib) endif() if (NOT WIN32) if (USE_TENSORRT AND WITH_GPU) include_directories("${TENSORRT_INCLUDE_DIR}") link_directories("${TENSORRT_LIB_DIR}") endif() endif(NOT WIN32) if(WITH_MKL) set(MATH_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mklml") include_directories("${MATH_LIB_PATH}/include") if(WIN32) set(MATH_LIB ${MATH_LIB_PATH}/lib/mklml${CMAKE_STATIC_LIBRARY_SUFFIX} ${MATH_LIB_PATH}/lib/libiomp5md${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(MATH_LIB ${MATH_LIB_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} ${MATH_LIB_PATH}/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(MKLDNN_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mkldnn") if(EXISTS ${MKLDNN_PATH}) include_directories("${MKLDNN_PATH}/include") if(WIN32) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib) else(WIN32) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) endif(WIN32) endif() else() set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas") include_directories("${OPENBLAS_LIB_PATH}/include/openblas") if(WIN32) set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() endif() if(WITH_STATIC_LIB) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() if(WIN32) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() endif() if (NOT WIN32) if (GCC_VERSION LESS "8.0") set(EXTERNAL_LIB ${EXTERNAL_LIB} "-lssl -lcrypto -lz -lleveldb -lsnappy") endif() set(EXTERNAL_LIB ${EXTERNAL_LIB} "-lrt -ldl -lpthread") set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags protobuf xxhash ${EXTERNAL_LIB}) else() set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags_static libprotobuf xxhash ${EXTERNAL_LIB}) set(DEPS ${DEPS} shlwapi.lib) endif(NOT WIN32) if(WITH_GPU) if(NOT WIN32) if (USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX}) else() if(USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX} ) endif() endif() cuda_add_library(pd_infer_custom_op ${CUSTOM_OPERATOR_FILES} SHARED) add_executable(${DEMO_NAME} ${DEMO_NAME}.cc) if (GCC_VERSION GREATER_EQUAL "8.0") set(DEPS ${DEPS} libssl.a libcrypto.a libz.a libleveldb.a libsnappy.a) endif() set(DEPS ${DEPS} boost pd_infer_custom_op)# libssl.a libcrypto.a libz.a libleveldb.a libsnappy.a) if(WIN32) if(USE_TENSORRT) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) endif() if(WITH_MKL) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release ) else() add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release ) endif() if(NOT WITH_STATIC_LIB) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_fluid.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) endif() endif() target_link_libraries(${DEMO_NAME} ${DEPS})
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/compile.sh
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. mkdir -p build cd build rm -rf * DEMO_NAME=main WITH_MKL=ON WITH_GPU=ON USE_TENSORRT=ON LIB_DIR=/workspace/Paddle3D/deploy/iassd/cpp/paddle_inference CUDNN_LIB=/usr/lib/x86_64-linux-gnu CUDA_LIB=/usr/local/cuda/lib64 TENSORRT_ROOT=/workspace/TensorRT-8.2.5.1 CUSTOM_OPERATOR_FILES="custom_ops/ball_query_gpu.cu;custom_ops/ball_query.cc;\ custom_ops/gather_points_gpu.cu;custom_ops/gather_points.cc;\ custom_ops/group_points_gpu.cu;custom_ops/group_points.cc;\ custom_ops/sampling_gpu.cu;custom_ops/sampling.cc;\ custom_ops/iou3d_cpu.cpp;custom_ops/iou3d_nms_api.cpp;custom_ops/iou3d_nms.cpp;custom_ops/iou3d_nms_kernel.cu" cmake .. -DPADDLE_LIB=${LIB_DIR} \ -DWITH_MKL=${WITH_MKL} \ -DDEMO_NAME=${DEMO_NAME} \ -DWITH_GPU=${WITH_GPU} \ -DWITH_STATIC_LIB=OFF \ -DUSE_TENSORRT=${USE_TENSORRT} \ -DCUDNN_LIB=${CUDNN_LIB} \ -DCUDA_LIB=${CUDA_LIB} \ -DTENSORRT_ROOT=${TENSORRT_ROOT} \ -DCUSTOM_OPERATOR_FILES=${CUSTOM_OPERATOR_FILES} make -j
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/custom_ops/iou3d_cpu.cpp
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* 3D Rotated IoU Calculation (CPU) Written by Shaoshuai Shi All Rights Reserved 2020. */ #include "iou3d_cpu.h" #include <cuda.h> #include <cuda_runtime_api.h> #include <math.h> #include <stdio.h> #include <vector> #include "paddle/include/experimental/ext_all.h" inline float min(float a, float b) { return a > b ? b : a; } inline float max(float a, float b) { return a > b ? a : b; } const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y) { x = _x, y = _y; } __device__ void set(float _x, float _y) { x = _x; y = _y; } __device__ Point operator+(const Point &b) const { return Point(x + b.x, y + b.y); } __device__ Point operator-(const Point &b) const { return Point(x - b.x, y - b.y); } }; inline float cross(const Point &a, const Point &b) { return a.x * b.y - a.y * b.x; } inline float cross(const Point &p1, const Point &p2, const Point &p0) { return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } inline int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2) { int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) && min(q1.x, q2.x) <= max(p1.x, p2.x) && min(p1.y, p2.y) <= max(q1.y, q2.y) && min(q1.y, q2.y) <= max(p1.y, p2.y); return ret; } inline int check_in_box2d(const float *box, const Point &p) { // params: (7) [x, y, z, dx, dy, dz, heading] const float MARGIN = 1e-2; float center_x = box[0], center_y = box[1]; float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); } inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans) { // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if (fabs(s5 - s1) > EPS) { ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else { float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p) { float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } inline int point_cmp(const Point &a, const Point &b, const Point &center) { return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } inline float box_overlap(const float *box_a, const float *box_b) { // params: box_a (7) [x, y, z, dx, dy, dz, heading] // params: box_b (7) [x, y, z, dx, dy, dz, heading] // float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = // box_a[3], a_angle = box_a[4]; // float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = // box_b[3], b_angle = box_b[4]; float a_angle = box_a[6], b_angle = box_b[6]; float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; Point center_a(box_a[0], box_a[1]); Point center_b(box_b[0], box_b[1]); Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++) { rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag) { poly_center = poly_center + cross_points[cnt]; cnt++; } } } // check corners for (int k = 0; k < 4; k++) { if (check_in_box2d(box_a, box_b_corners[k])) { poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; } if (check_in_box2d(box_b, box_a_corners[k])) { poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++) { for (int i = 0; i < cnt - j - 1; i++) { if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) { temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++) { area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } inline float iou_bev(const float *box_a, const float *box_b) { // params: box_a (7) [x, y, z, dx, dy, dz, heading] // params: box_b (7) [x, y, z, dx, dy, dz, heading] float sa = box_a[3] * box_a[4]; float sb = box_b[3] * box_b[4]; float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } std::vector<paddle::Tensor> boxes_iou_bev_cpu( const paddle::Tensor &boxes_a_tensor, const paddle::Tensor &boxes_b_tensor) { // params boxes_a_tensor: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b_tensor: (M, 7) [x, y, z, dx, dy, dz, heading] // params ans_iou_tensor: (N, M) int num_boxes_a = boxes_a_tensor.shape()[0]; int num_boxes_b = boxes_b_tensor.shape()[0]; const float *boxes_a = boxes_a_tensor.data<float>(); const float *boxes_b = boxes_b_tensor.data<float>(); auto ans_iou_tensor = paddle::empty({num_boxes_a, num_boxes_b}, paddle::DataType::FLOAT32, paddle::CPUPlace()); float *ans_iou = ans_iou_tensor.data<float>(); for (int i = 0; i < num_boxes_a; i++) { for (int j = 0; j < num_boxes_b; j++) { ans_iou[i * num_boxes_b + j] = iou_bev(boxes_a + i * 7, boxes_b + j * 7); } } return {ans_iou_tensor}; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/custom_ops/iou3d_nms.cpp
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include "iou3d_nms.h" #include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "paddle/include/experimental/ext_all.h" #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) const int THREADS_PER_BLOCK_NMS = sizeof(int64_t) * 8; void BoxesOverlapLauncher(const cudaStream_t &stream, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap); void BoxesIouBevLauncher(const cudaStream_t &stream, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou); void NmsLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask, int boxes_num, float nms_overlap_thresh); void NmsNormalLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask, int boxes_num, float nms_overlap_thresh); std::vector<paddle::Tensor> boxes_overlap_bev_gpu( const paddle::Tensor &boxes_a, const paddle::Tensor &boxes_b) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] // params ans_overlap: (N, M) int num_a = boxes_a.shape()[0]; int num_b = boxes_b.shape()[0]; const float *boxes_a_data = boxes_a.data<float>(); const float *boxes_b_data = boxes_b.data<float>(); auto ans_overlap = paddle::empty({num_a, num_b}, paddle::DataType::FLOAT32, paddle::GPUPlace()); float *ans_overlap_data = ans_overlap.data<float>(); BoxesOverlapLauncher(boxes_a.stream(), num_a, boxes_a_data, num_b, boxes_b_data, ans_overlap_data); return {ans_overlap}; } std::vector<paddle::Tensor> boxes_iou_bev_gpu( const paddle::Tensor &boxes_a_tensor, const paddle::Tensor &boxes_b_tensor) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] // params ans_overlap: (N, M) int num_a = boxes_a_tensor.shape()[0]; int num_b = boxes_b_tensor.shape()[0]; const float *boxes_a_data = boxes_a_tensor.data<float>(); const float *boxes_b_data = boxes_b_tensor.data<float>(); auto ans_iou_tensor = paddle::empty({num_a, num_b}, paddle::DataType::FLOAT32, paddle::GPUPlace()); float *ans_iou_data = ans_iou_tensor.data<float>(); BoxesIouBevLauncher(boxes_a_tensor.stream(), num_a, boxes_a_data, num_b, boxes_b_data, ans_iou_data); return {ans_iou_tensor}; } std::vector<paddle::Tensor> nms_gpu(const paddle::Tensor &boxes, float nms_overlap_thresh) { // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] auto keep = paddle::empty({boxes.shape()[0]}, paddle::DataType::INT32, paddle::CPUPlace()); auto num_to_keep_tensor = paddle::empty({1}, paddle::DataType::INT32, paddle::CPUPlace()); int *num_to_keep_data = num_to_keep_tensor.data<int>(); int boxes_num = boxes.shape()[0]; const float *boxes_data = boxes.data<float>(); int *keep_data = keep.data<int>(); const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); // int64_t *mask_data = NULL; // CHECK_ERROR(cudaMalloc((void**)&mask_data, boxes_num * col_blocks * // sizeof(int64_t))); auto mask = paddle::empty({boxes_num * col_blocks}, paddle::DataType::INT64, paddle::GPUPlace()); int64_t *mask_data = mask.data<int64_t>(); NmsLauncher(boxes.stream(), boxes_data, mask_data, boxes_num, nms_overlap_thresh); // std::vector<int64_t> mask_cpu(boxes_num * col_blocks); // CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks * // sizeof(int64_t), // cudaMemcpyDeviceToHost)); const paddle::Tensor mask_cpu_tensor = mask.copy_to(paddle::CPUPlace(), true); const int64_t *mask_cpu = mask_cpu_tensor.data<int64_t>(); // cudaFree(mask_data); int64_t remv_cpu[col_blocks]; memset(remv_cpu, 0, col_blocks * sizeof(int64_t)); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / THREADS_PER_BLOCK_NMS; int inblock = i % THREADS_PER_BLOCK_NMS; if (!(remv_cpu[nblock] & (1ULL << inblock))) { keep_data[num_to_keep++] = i; const int64_t *p = &mask_cpu[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv_cpu[j] |= p[j]; } } } num_to_keep_data[0] = num_to_keep; if (cudaSuccess != cudaGetLastError()) printf("Error!\n"); return {keep, num_to_keep_tensor}; } std::vector<paddle::Tensor> nms_normal_gpu(const paddle::Tensor &boxes, float nms_overlap_thresh) { // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] // params keep: (N) auto keep = paddle::empty({boxes.shape()[0]}, paddle::DataType::INT32, paddle::CPUPlace()); auto num_to_keep_tensor = paddle::empty({1}, paddle::DataType::INT32, paddle::CPUPlace()); int *num_to_keep_data = num_to_keep_tensor.data<int>(); int boxes_num = boxes.shape()[0]; const float *boxes_data = boxes.data<float>(); int *keep_data = keep.data<int>(); const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); // int64_t *mask_data = NULL; // CHECK_ERROR(cudaMalloc((void**)&mask_data, boxes_num * col_blocks * // sizeof(int64_t))); auto mask = paddle::empty({boxes_num * col_blocks}, paddle::DataType::INT64, paddle::GPUPlace()); int64_t *mask_data = mask.data<int64_t>(); NmsNormalLauncher(boxes.stream(), boxes_data, mask_data, boxes_num, nms_overlap_thresh); // int64_t mask_cpu[boxes_num * col_blocks]; // int64_t *mask_cpu = new int64_t [boxes_num * col_blocks]; // std::vector<int64_t> mask_cpu(boxes_num * col_blocks); // CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks * // sizeof(int64_t), // cudaMemcpyDeviceToHost)); // cudaFree(mask_data); const paddle::Tensor mask_cpu_tensor = mask.copy_to(paddle::CPUPlace(), true); const int64_t *mask_cpu = mask_cpu_tensor.data<int64_t>(); int64_t remv_cpu[col_blocks]; memset(remv_cpu, 0, col_blocks * sizeof(int64_t)); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / THREADS_PER_BLOCK_NMS; int inblock = i % THREADS_PER_BLOCK_NMS; if (!(remv_cpu[nblock] & (1ULL << inblock))) { keep_data[num_to_keep++] = i; const int64_t *p = &mask_cpu[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv_cpu[j] |= p[j]; } } } num_to_keep_data[0] = num_to_keep; if (cudaSuccess != cudaGetLastError()) { printf("Error!\n"); } return {keep, num_to_keep_tensor}; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/custom_ops/iou3d_nms_kernel.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include <stdio.h> #define THREADS_PER_BLOCK 16 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) // #define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(int64_t) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y) { x = _x, y = _y; } __device__ void set(float _x, float _y) { x = _x; y = _y; } __device__ Point operator+(const Point &b) const { return Point(x + b.x, y + b.y); } __device__ Point operator-(const Point &b) const { return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b) { return a.x * b.y - a.y * b.x; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0) { return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2) { int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) && min(q1.x, q2.x) <= max(p1.x, p2.x) && min(p1.y, p2.y) <= max(q1.y, q2.y) && min(q1.y, q2.y) <= max(p1.y, p2.y); return ret; } __device__ inline int check_in_box2d(const float *box, const Point &p) { // params: (7) [x, y, z, dx, dy, dz, heading] const float MARGIN = 1e-2; float center_x = box[0], center_y = box[1]; float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans) { // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if (fabs(s5 - s1) > EPS) { ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else { float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p) { float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center) { return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float box_overlap(const float *box_a, const float *box_b) { // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float a_angle = box_a[6], b_angle = box_b[6]; float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; Point center_a(box_a[0], box_a[1]); Point center_b(box_b[0], box_b[1]); #ifdef DEBUG printf( "a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle); printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y); #endif Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++) { #ifdef DEBUG printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); #ifdef DEBUG printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag) { poly_center = poly_center + cross_points[cnt]; cnt++; #ifdef DEBUG printf( "Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), b(%.3f, " "%.3f)->(%.3f, %.3f) \n", cross_points[cnt - 1].x, cross_points[cnt - 1].y, box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x, box_a_corners[i + 1].y, box_b_corners[i].x, box_b_corners[i].y, box_b_corners[i + 1].x, box_b_corners[i + 1].y); #endif } } } // check corners for (int k = 0; k < 4; k++) { if (check_in_box2d(box_a, box_b_corners[k])) { poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; #ifdef DEBUG printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } if (check_in_box2d(box_b, box_a_corners[k])) { poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; #ifdef DEBUG printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++) { for (int i = 0; i < cnt - j - 1; i++) { if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) { temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } #ifdef DEBUG printf("cnt=%d\n", cnt); for (int i = 0; i < cnt; i++) { printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y); } #endif // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++) { area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b) { // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float sa = box_a[3] * box_a[4]; float sb = box_b[3] * box_b[4]; float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } __global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b) { return; } const float *cur_box_a = boxes_a + a_idx * 7; const float *cur_box_b = boxes_b + b_idx * 7; float s_overlap = box_overlap(cur_box_a, cur_box_b); ans_overlap[a_idx * num_b + b_idx] = s_overlap; } __global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b) { return; } const float *cur_box_a = boxes_a + a_idx * 7; const float *cur_box_b = boxes_b + b_idx * 7; float cur_iou_bev = iou_bev(cur_box_a, cur_box_b); ans_iou[a_idx * num_b + b_idx] = cur_iou_bev; } __global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, int64_t *mask) { // params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] // params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 7; int i = 0; int64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } __device__ inline float iou_normal(float const *const a, float const *const b) { // params: a: [x, y, z, dx, dy, dz, heading] // params: b: [x, y, z, dx, dy, dz, heading] float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2), right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2); float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2), bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2); float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); float interS = width * height; float Sa = a[3] * a[4]; float Sb = b[3] * b[4]; return interS / fmaxf(Sa + Sb - interS, EPS); } __global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, int64_t *mask) { // params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] // params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 7; int i = 0; int64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } void BoxesOverlapLauncher(const cudaStream_t &stream, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap) { dim3 blocks( DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); boxes_overlap_kernel<<<blocks, threads, 0, stream>>>(num_a, boxes_a, num_b, boxes_b, ans_overlap); #ifdef DEBUG cudaDeviceSynchronize(); // for using printf in kernel function #endif } void BoxesIouBevLauncher(const cudaStream_t &stream, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou) { dim3 blocks( DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); boxes_iou_bev_kernel<<<blocks, threads, 0, stream>>>(num_a, boxes_a, num_b, boxes_b, ans_iou); #ifdef DEBUG cudaDeviceSynchronize(); // for using printf in kernel function #endif } void NmsLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask, int boxes_num, float nms_overlap_thresh) { dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); nms_kernel<<<blocks, threads, 0, stream>>>(boxes_num, nms_overlap_thresh, boxes, mask); } void NmsNormalLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask, int boxes_num, float nms_overlap_thresh) { dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); nms_normal_kernel<<<blocks, threads, 0, stream>>>( boxes_num, nms_overlap_thresh, boxes, mask); }
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/custom_ops/iou3d_nms.h
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef IOU3D_NMS_H #define IOU3D_NMS_H #include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "paddle/include/experimental/ext_all.h" std::vector<paddle::Tensor> boxes_overlap_bev_gpu( const paddle::Tensor &boxes_a, const paddle::Tensor &boxes_b); std::vector<paddle::Tensor> boxes_iou_bev_gpu( const paddle::Tensor &boxes_a_tensor, const paddle::Tensor &boxes_b_tensor); std::vector<paddle::Tensor> nms_gpu(const paddle::Tensor &boxes, float nms_overlap_thresh); std::vector<paddle::Tensor> nms_normal_gpu(const paddle::Tensor &boxes, float nms_overlap_thresh); #endif
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/custom_ops/iou3d_cpu.h
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef IOU3D_CPU_H #define IOU3D_CPU_H #include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "paddle/include/experimental/ext_all.h" std::vector<paddle::Tensor> boxes_iou_bev_cpu( const paddle::Tensor& boxes_a_tensor, const paddle::Tensor& boxes_b_tensor); #endif
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/custom_ops/group_points_gpu.cu
#include <stdio.h> #include <stdlib.h> #define THREADS_PER_BLOCK 512 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) __global__ void group_points_cuda_kernel(const int b, const int c, const int n, const int npoints, const int nsample, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { // points: (B, C, N) // idx: (B, npoints, nsample) // output: // out: (B, C, npoints, nsample) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int index = blockIdx.x * blockDim.x + threadIdx.x; int pt_idx = index / nsample; if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return; int sample_idx = index % nsample; idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; int in_idx = bs_idx * c * n + c_idx * n + idx[0]; int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx; out[out_idx] = points[in_idx]; } void group_points_cuda_launcher(const int b, const int c, const int n, const int npoints, const int nsample, const float *points, const int *idx, float *out) { // points: (B, C, N) // idx: (B, npoints, nsample) // output: // out: (B, C, npoints, nsample) cudaError_t err; dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); group_points_cuda_kernel<<<blocks, threads>>>(b, c, n, npoints, nsample, points, idx, out); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void group_points_grad_cuda_kernel( const int b, const int c, const int n, const int npoints, const int nsample, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { // grad_out: (B, C, npoints, nsample) // idx: (B, npoints, nsample) // output: // grad_points: (B, C, N) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int index = blockIdx.x * blockDim.x + threadIdx.x; int pt_idx = index / nsample; if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return; int sample_idx = index % nsample; grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx; idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0], grad_out[0]); } void group_points_grad_cuda_launcher(const int b, const int c, const int n, const int npoints, const int nsample, const float *grad_out, const int *idx, float *grad_points) { // grad_out: (B, C, npoints, nsample) // idx: (B, npoints, nsample) // output: // grad_points: (B, C, N) cudaError_t err; dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); group_points_grad_cuda_kernel<<<blocks, threads>>>( b, c, n, npoints, nsample, grad_out, idx, grad_points); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/custom_ops/sampling.cc
#include <vector> #include "paddle/include/experimental/ext_all.h" #define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") // cuda launcher declaration void farthest_point_sampling_kernel_launcher(int b, int n, int m, const float *dataset, float *temp, int *idxs); // op forward wrapper std::vector<paddle::Tensor> farthest_point_sampling_cuda_forward( const paddle::Tensor &points_tensor, const int &npoints) { // points_tensor: (B, N, 3) // tmp_tensor: (B, N) // output: // idx_tensor: (B, npoints) const int b = points_tensor.shape()[0]; const int n = points_tensor.shape()[1]; auto *points = points_tensor.data<float>(); auto temp_tensor = paddle::full({b, n}, 1e10, paddle::DataType::FLOAT32, paddle::GPUPlace()); auto idx_tensor = paddle::empty({b, npoints}, paddle::DataType::INT32, paddle::GPUPlace()); auto *temp = temp_tensor.data<float>(); auto *idx = idx_tensor.data<int>(); farthest_point_sampling_kernel_launcher(b, n, npoints, points, temp, idx); return {idx_tensor}; } // shape infer std::vector<std::vector<int64_t>> FPSInferShape( std::vector<int64_t> points_shape, const int &npoints) { return {{points_shape[0], npoints}}; } // dtype infer std::vector<paddle::DataType> FPSInferDtype(paddle::DataType points_dtype) { return {paddle::DataType::INT32}; } // build op forward PD_BUILD_OP(farthest_point_sample) .Inputs({"points_tensor"}) .Outputs({"idx_tensor"}) .Attrs({"npoints: int"}) .SetKernelFn(PD_KERNEL(farthest_point_sampling_cuda_forward)) .SetInferShapeFn(PD_INFER_SHAPE(FPSInferShape)) .SetInferDtypeFn(PD_INFER_DTYPE(FPSInferDtype));
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/custom_ops/ball_query_gpu.cu
#include <stdio.h> #include <stdlib.h> #define THREADS_PER_BLOCK 512 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) __global__ void ball_query_cuda_kernel(const int b, const int n, const int m, const float radius, const int nsample, const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) { // new_xyz: (B, M, 3) // xyz: (B, N, 3) // output: // idx: (B, M, nsample) int bs_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || pt_idx >= m) return; new_xyz += bs_idx * m * 3 + pt_idx * 3; xyz += bs_idx * n * 3; idx += bs_idx * m * nsample + pt_idx * nsample; float radius2 = radius * radius; float new_x = new_xyz[0]; float new_y = new_xyz[1]; float new_z = new_xyz[2]; int cnt = 0; for (int k = 0; k < n; ++k) { float x = xyz[k * 3 + 0]; float y = xyz[k * 3 + 1]; float z = xyz[k * 3 + 2]; float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); if (d2 < radius2) { if (cnt == 0) { for (int l = 0; l < nsample; ++l) { idx[l] = k; } } idx[cnt] = k; ++cnt; if (cnt >= nsample) break; } } } void ball_query_cuda_launcher(const int b, const int n, const int m, const float radius, const int nsample, const float *new_xyz, const float *xyz, int *idx) { // new_xyz: (B, M, 3) // xyz: (B, N, 3) // output: // idx: (B, M, nsample) cudaError_t err; dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); dim3 threads(THREADS_PER_BLOCK); ball_query_cuda_kernel<<<blocks, threads>>>(b, n, m, radius, nsample, new_xyz, xyz, idx); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/custom_ops/sampling_gpu.cu
#include <stdio.h> #include <stdlib.h> #include <cmath> #define TOTAL_THREADS 1024 #define THREADS_PER_BLOCK 512 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) inline int opt_n_threads(int work_size) { const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0); return max(min(1 << pow_2, TOTAL_THREADS), 1); } __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2) { const float v1 = dists[idx1], v2 = dists[idx2]; const int i1 = dists_i[idx1], i2 = dists_i[idx2]; dists[idx1] = max(v1, v2); dists_i[idx1] = v2 > v1 ? i2 : i1; } template <unsigned int block_size> __global__ void farthest_point_sampling_kernel( int b, int n, int m, const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { // dataset: (B, N, 3) // tmp: (B, N) // output: // idx: (B, M) if (m <= 0) return; __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; int batch_index = blockIdx.x; dataset += batch_index * n * 3; temp += batch_index * n; idxs += batch_index * m; int tid = threadIdx.x; const int stride = block_size; int old = 0; if (threadIdx.x == 0) idxs[0] = old; __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; float x1 = dataset[old * 3 + 0]; float y1 = dataset[old * 3 + 1]; float z1 = dataset[old * 3 + 2]; for (int k = tid; k < n; k += stride) { float x2, y2, z2; x2 = dataset[k * 3 + 0]; y2 = dataset[k * 3 + 1]; z2 = dataset[k * 3 + 2]; float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, temp[k]); temp[k] = d2; besti = d2 > best ? k : besti; best = d2 > best ? d2 : best; } dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 1024) { if (tid < 512) { __update(dists, dists_i, tid, tid + 512); } __syncthreads(); } if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idxs[j] = old; } } void farthest_point_sampling_kernel_launcher(int b, int n, int m, const float *dataset, float *temp, int *idxs) { // dataset: (B, N, 3) // tmp: (B, N) // output: // idx: (B, M) cudaError_t err; unsigned int n_threads = opt_n_threads(n); switch (n_threads) { case 1024: farthest_point_sampling_kernel<1024> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 512: farthest_point_sampling_kernel<512> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 256: farthest_point_sampling_kernel<256> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 128: farthest_point_sampling_kernel<128> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 64: farthest_point_sampling_kernel<64> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 32: farthest_point_sampling_kernel<32> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 16: farthest_point_sampling_kernel<16> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 8: farthest_point_sampling_kernel<8> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 4: farthest_point_sampling_kernel<4> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 2: farthest_point_sampling_kernel<2> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 1: farthest_point_sampling_kernel<1> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; default: farthest_point_sampling_kernel<512> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); } err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/custom_ops/gather_points.cc
#include <vector> #include "paddle/include/experimental/ext_all.h" #define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") // cuda launcher declaration void gather_points_cuda_launcher(const int b, const int c, const int n, const int npoints, const float *points, const int *idx, float *out); void gather_points_grad_cuda_launcher(const int b, const int c, const int n, const int npoints, const float *grad_out, const int *idx, float *grad_points); // op forward wrapper std::vector<paddle::Tensor> gather_points_cuda_forward( const paddle::Tensor &points_tensor, const paddle::Tensor &idx_tensor) { // points: (B, C, N) // idx: (B, npoints) // output: // out: (B, C, npoints) CHECK_INPUT(points_tensor); CHECK_INPUT(idx_tensor); const int b = points_tensor.shape()[0]; const int c = points_tensor.shape()[1]; const int n = points_tensor.shape()[2]; const int npoints = idx_tensor.shape()[1]; auto *points = points_tensor.data<float>(); auto *idx = idx_tensor.data<int>(); auto out_tensor = paddle::empty({b, c, npoints}, paddle::DataType::FLOAT32, paddle::GPUPlace()); auto *out = out_tensor.data<float>(); gather_points_cuda_launcher(b, c, n, npoints, points, idx, out); return {out_tensor}; } // op backward wrapper std::vector<paddle::Tensor> gather_points_cuda_backwarad( const paddle::Tensor &grad_out_tensor, const paddle::Tensor &idx_tensor, const paddle::Tensor &points_tensor) { // grad_out: (B, C, npoints) // idx: (B, npoints) // output: // grad_points: (B, C, N) CHECK_INPUT(grad_out_tensor); CHECK_INPUT(idx_tensor); CHECK_INPUT(points_tensor); const int b = grad_out_tensor.shape()[0]; const int c = grad_out_tensor.shape()[1]; const int npoints = grad_out_tensor.shape()[2]; const int n = points_tensor.shape()[2]; auto *grad_out = grad_out_tensor.data<float>(); auto *idx = idx_tensor.data<int>(); auto grad_points_tensor = paddle::full( {b, c, n}, 0.0, paddle::DataType::FLOAT32, paddle::GPUPlace()); auto *grad_points = grad_points_tensor.data<float>(); gather_points_grad_cuda_launcher(b, c, n, npoints, grad_out, idx, grad_points); return {grad_points_tensor}; } // shape infer std::vector<std::vector<int64_t>> GatherInferShape( std::vector<int64_t> points_shape, std::vector<int64_t> idx_shape) { const int b = points_shape[0]; const int c = points_shape[1]; const int npoints = idx_shape[1]; return {{b, c, npoints}}; } // data type infer std::vector<paddle::DataType> GatherInferDtype(paddle::DataType points_dtype, paddle::DataType idx_dtype) { return {points_dtype}; } // build op forward PD_BUILD_OP(gather_operation) .Inputs({"points", "idx"}) .Outputs({"out"}) .SetKernelFn(PD_KERNEL(gather_points_cuda_forward)) .SetInferShapeFn(PD_INFER_SHAPE(GatherInferShape)) .SetInferDtypeFn(PD_INFER_DTYPE(GatherInferDtype)); // build op backward PD_BUILD_GRAD_OP(gather_operation) .Inputs({paddle::Grad("out"), "idx", "points"}) .Outputs({paddle::Grad("points")}) .SetKernelFn(PD_KERNEL(gather_points_cuda_backwarad));
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/custom_ops/ball_query.cc
#include <vector> #include "paddle/include/experimental/ext_all.h" #define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") // cuda launcher declaration void ball_query_cuda_launcher(const int b, const int n, const int m, const float radius, const int nsample, const float *new_xyz, const float *xyz, int *idx); // op forward wrapper std::vector<paddle::Tensor> ball_query_cuda_forward( const paddle::Tensor &new_xyz_tensor, const paddle::Tensor &xyz_tensor, const float &radius, const int &nsample) { CHECK_INPUT(new_xyz_tensor); CHECK_INPUT(xyz_tensor); const int b = new_xyz_tensor.shape()[0]; const int m = new_xyz_tensor.shape()[1]; const int n = xyz_tensor.shape()[1]; auto *new_xyz = new_xyz_tensor.data<float>(); auto *xyz = xyz_tensor.data<float>(); auto idx_tensor = paddle::empty({b, m, nsample}, paddle::DataType::INT32, paddle::GPUPlace()); auto *idx = idx_tensor.data<int>(); ball_query_cuda_launcher(b, n, m, radius, nsample, new_xyz, xyz, idx); return {idx_tensor}; } // shape infer std::vector<std::vector<int64_t>> BallQueryInferShape( std::vector<int64_t> new_xyz_shape, std::vector<int64_t> xyz_shape, const float &radius, const int &nsample) { return {{new_xyz_shape[0], new_xyz_shape[1], nsample}}; } // data type infer std::vector<paddle::DataType> BallQueryInferDtype(paddle::DataType t1, paddle::DataType t2) { return {paddle::DataType::INT32}; } // build forward op PD_BUILD_OP(ball_query) .Inputs({"new_xyz_tensor", "xyz_tensor"}) .Outputs({"idx"}) .Attrs({"radius: float", "nsample: int"}) .SetKernelFn(PD_KERNEL(ball_query_cuda_forward)) .SetInferShapeFn(PD_INFER_SHAPE(BallQueryInferShape)) .SetInferDtypeFn(PD_INFER_DTYPE(BallQueryInferDtype));
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/custom_ops/iou3d_nms_api.cpp
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "iou3d_cpu.h" #include "iou3d_nms.h" #include "paddle/include/experimental/ext_all.h" std::vector<paddle::DataType> BoxesIouBevCpuInferDtype( paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) { return {boxes_a_dtype}; } std::vector<std::vector<int64_t>> BoxesIouBevCpuInferShape( std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) { return {{boxes_a_shape[0], boxes_b_shape[0]}}; } std::vector<paddle::DataType> NmsInferDtype(paddle::DataType boxes_dtype) { return {paddle::DataType::INT64, paddle::DataType::INT64}; } std::vector<std::vector<int64_t>> NmsInferShape( std::vector<int64_t> boxes_shape) { return {{boxes_shape[0]}, {1}}; } std::vector<paddle::DataType> NmsNormalInferDtype( paddle::DataType boxes_dtype) { return {paddle::DataType::INT64, paddle::DataType::INT64}; } std::vector<std::vector<int64_t>> NmsNormalInferShape( std::vector<int64_t> boxes_shape) { return {{boxes_shape[0]}, {1}}; } std::vector<paddle::DataType> BoxesIouBevGpuInferDtype( paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) { return {boxes_a_dtype}; } std::vector<std::vector<int64_t>> BoxesIouBevGpuInferShape( std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) { return {{boxes_a_shape[0], boxes_b_shape[0]}}; } std::vector<paddle::DataType> BoxesOverlapBevGpuInferDtype( paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) { return {boxes_a_dtype}; } std::vector<std::vector<int64_t>> BoxesOverlapBevGpuInferShape( std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) { return {{boxes_a_shape[0], boxes_b_shape[0]}}; } PD_BUILD_OP(boxes_iou_bev_cpu) .Inputs({"boxes_a_tensor", " boxes_b_tensor"}) .Outputs({"ans_iou_tensor"}) .SetKernelFn(PD_KERNEL(boxes_iou_bev_cpu)) .SetInferDtypeFn(PD_INFER_DTYPE(BoxesIouBevCpuInferDtype)) .SetInferShapeFn(PD_INFER_SHAPE(BoxesIouBevCpuInferShape)); PD_BUILD_OP(boxes_iou_bev_gpu) .Inputs({"boxes_a_tensor", " boxes_b_tensor"}) .Outputs({"ans_iou_tensor"}) .SetKernelFn(PD_KERNEL(boxes_iou_bev_gpu)) .SetInferDtypeFn(PD_INFER_DTYPE(BoxesIouBevGpuInferDtype)) .SetInferShapeFn(PD_INFER_SHAPE(BoxesIouBevGpuInferShape)); PD_BUILD_OP(boxes_overlap_bev_gpu) .Inputs({"boxes_a", " boxes_b"}) .Outputs({"ans_overlap"}) .SetKernelFn(PD_KERNEL(boxes_overlap_bev_gpu)) .SetInferDtypeFn(PD_INFER_DTYPE(BoxesOverlapBevGpuInferDtype)) .SetInferShapeFn(PD_INFER_SHAPE(BoxesOverlapBevGpuInferShape)); PD_BUILD_OP(nms_gpu) .Inputs({"boxes"}) .Outputs({"keep", "num_to_keep"}) .Attrs({"nms_overlap_thresh: float"}) .SetKernelFn(PD_KERNEL(nms_gpu)) .SetInferDtypeFn(PD_INFER_DTYPE(NmsInferDtype)) .SetInferShapeFn(PD_INFER_SHAPE(NmsInferShape)); PD_BUILD_OP(nms_normal_gpu) .Inputs({"boxes"}) .Outputs({"keep", "num_to_keep"}) .Attrs({"nms_overlap_thresh: float"}) .SetInferShapeFn(PD_INFER_SHAPE(NmsNormalInferShape)) .SetKernelFn(PD_KERNEL(nms_normal_gpu)) .SetInferDtypeFn(PD_INFER_DTYPE(NmsNormalInferDtype));
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/custom_ops/gather_points_gpu.cu
#include <stdio.h> #include <stdlib.h> #define THREADS_PER_BLOCK 512 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) __global__ void gather_points_cuda_kernel(const int b, const int c, const int n, const int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { // points: (B, C, N) // idx: (B, M) // output: // out: (B, C, M) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; points += bs_idx * c * n + c_idx * n; out[0] = points[idx[0]]; } void gather_points_cuda_launcher(const int b, const int c, const int n, const int npoints, const float *points, const int *idx, float *out) { // points: (B, C, N) // idx: (B, npoints) // output: // out: (B, C, npoints) cudaError_t err; dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); gather_points_cuda_kernel<<<blocks, threads>>>(b, c, n, npoints, points, idx, out); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void gather_points_grad_cuda_kernel( const int b, const int c, const int n, const int m, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { // grad_out: (B, C, M) // idx: (B, M) // output: // grad_points: (B, C, N) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; grad_out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; grad_points += bs_idx * c * n + c_idx * n; atomicAdd(grad_points + idx[0], grad_out[0]); } void gather_points_grad_cuda_launcher(const int b, const int c, const int n, const int npoints, const float *grad_out, const int *idx, float *grad_points) { // grad_out: (B, C, npoints) // idx: (B, npoints) // output: // grad_points: (B, C, N) cudaError_t err; dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); gather_points_grad_cuda_kernel<<<blocks, threads>>>( b, c, n, npoints, grad_out, idx, grad_points); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/custom_ops/group_points.cc
#include <vector> #include "paddle/include/experimental/ext_all.h" #define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") // cuda launcher declaration void group_points_cuda_launcher(const int b, const int c, const int n, const int npoints, const int nsample, const float *points, const int *idx, float *out); void group_points_grad_cuda_launcher(const int b, const int c, const int n, const int npoints, const int nsample, const float *grad_out, const int *idx, float *grad_points); // op forward wrapper std::vector<paddle::Tensor> group_points_cuda_forward( const paddle::Tensor &points_tensor, const paddle::Tensor &idx_tensor) { CHECK_INPUT(points_tensor); CHECK_INPUT(idx_tensor); const int b = points_tensor.shape()[0]; const int c = points_tensor.shape()[1]; const int n = points_tensor.shape()[2]; const int npoints = idx_tensor.shape()[1]; const int nsample = idx_tensor.shape()[2]; auto *points = points_tensor.data<float>(); auto *idx = idx_tensor.data<int>(); auto out_tensor = paddle::empty( {b, c, npoints, nsample}, paddle::DataType::FLOAT32, paddle::GPUPlace()); auto *out = out_tensor.data<float>(); group_points_cuda_launcher(b, c, n, npoints, nsample, points, idx, out); return {out_tensor}; } // op backward wrapper std::vector<paddle::Tensor> group_points_cuda_backward( const paddle::Tensor &grad_out_tensor, const paddle::Tensor &idx_tensor, const paddle::Tensor &points_tensor) { CHECK_INPUT(grad_out_tensor); CHECK_INPUT(idx_tensor); const int b = grad_out_tensor.shape()[0]; const int c = grad_out_tensor.shape()[1]; const int npoints = grad_out_tensor.shape()[2]; const int nsample = grad_out_tensor.shape()[3]; const int n = points_tensor.shape()[2]; auto *grad_out = grad_out_tensor.data<float>(); auto *idx = idx_tensor.data<int>(); auto grad_points_tensor = paddle::full( {b, c, n}, 0.0, paddle::DataType::FLOAT32, paddle::GPUPlace()); auto *grad_points = grad_points_tensor.data<float>(); group_points_grad_cuda_launcher(b, c, n, npoints, nsample, grad_out, idx, grad_points); return {grad_points_tensor}; } // shape infer std::vector<std::vector<int64_t>> GroupInferShape( std::vector<int64_t> points_shape, std::vector<int64_t> idx_shape) { const int b = points_shape[0]; const int c = points_shape[1]; const int npoints = idx_shape[1]; const int nsample = idx_shape[2]; return {{b, c, npoints, nsample}}; } // data type infer std::vector<paddle::DataType> GroupInferDtype(paddle::DataType points_dtype, paddle::DataType idx_dtype) { return {points_dtype}; } // build forward op PD_BUILD_OP(group_operation) .Inputs({"points_tensor", "idx_tensor"}) .Outputs({"out_tensor"}) .SetKernelFn(PD_KERNEL(group_points_cuda_forward)) .SetInferShapeFn(PD_INFER_SHAPE(GroupInferShape)) .SetInferDtypeFn(PD_INFER_DTYPE(GroupInferDtype)); // build backward op PD_BUILD_GRAD_OP(group_operation) .Inputs({paddle::Grad("out_tensor"), "idx_tensor", "points_tensor"}) .Outputs({paddle::Grad("points_tensor")}) .SetKernelFn(PD_KERNEL(group_points_cuda_backward));
0
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/cmake
apollo_public_repos/apollo-model-centerpoint/deploy/iassd/cpp/cmake/external/boost.cmake
include(ExternalProject) set(BOOST_PROJECT "extern_boost") # To release PaddlePaddle as a pip package, we have to follow the # manylinux1 standard, which features as old Linux kernels and # compilers as possible and recommends CentOS 5. Indeed, the earliest # CentOS version that works with NVIDIA CUDA is CentOS 6. And a new # version of boost, say, 1.66.0, doesn't build on CentOS 6. We # checked that the devtools package of CentOS 6 installs boost 1.41.0. # So we use 1.41.0 here. set(BOOST_VER "1.41.0") set(BOOST_TAR "boost_1_41_0" CACHE STRING "" FORCE) set(BOOST_URL "http://paddlepaddledeps.bj.bcebos.com/${BOOST_TAR}.tar.gz" CACHE STRING "" FORCE) MESSAGE(STATUS "BOOST_TAR: ${BOOST_TAR}, BOOST_URL: ${BOOST_URL}") set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost) set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}") set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}" CACHE PATH "boost include directory." FORCE) set_directory_properties(PROPERTIES CLEAN_NO_CUSTOM 1) include_directories(${BOOST_INCLUDE_DIR}) ExternalProject_Add( ${BOOST_PROJECT} ${EXTERNAL_PROJECT_LOG_ARGS} DOWNLOAD_DIR ${BOOST_DOWNLOAD_DIR} URL ${BOOST_URL} DOWNLOAD_NO_PROGRESS 1 PREFIX ${BOOST_SOURCES_DIR} CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" UPDATE_COMMAND "" ) if (${CMAKE_VERSION} VERSION_LESS "3.3.0" OR NOT WIN32) set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/boost_dummy.c) file(WRITE ${dummyfile} "const char *dummy = \"${dummyfile}\";") add_library(boost STATIC ${dummyfile}) else() add_library(boost INTERFACE) endif() add_dependencies(boost ${BOOST_PROJECT}) set(Boost_INCLUDE_DIR ${BOOST_INCLUDE_DIR})
0
apollo_public_repos/apollo-model-centerpoint/deploy/caddn
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/python/infer.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import sys import numpy as np import paddle import paddle.nn.functional as F from paddle import inference from paddle.static import InputSpec from skimage import io from paddle3d.apis.config import Config from paddle3d.ops.grid_sample_3d import grid_sample_3d from paddle3d.ops.iou3d_nms_cuda import nms_gpu def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--model_file", type=str, help="Model filename, Specify this when your model is a combined model.", required=True) parser.add_argument( "--params_file", type=str, help= "Parameter filename, Specify this when your model is a combined model.", required=True) parser.add_argument( '--img_path', type=str, help='The image path.', required=True) parser.add_argument("--gpu_id", type=int, default=0, help="GPU card id.") parser.add_argument( "--use_trt", type=int, default=0, help="Whether to use tensorrt to accelerate when using gpu.") parser.add_argument( "--trt_precision", type=int, default=0, help="Precision type of tensorrt, 0: kFloat32, 1: kHalf.") parser.add_argument( "--trt_use_static", type=int, default=0, help="Whether to load the tensorrt graph optimization from a disk path." ) parser.add_argument( "--trt_static_dir", type=str, help="Path of a tensorrt graph optimization directory.") parser.add_argument( "--collect_shape_info", type=int, default=0, help="Whether to collect dynamic shape before using tensorrt.") parser.add_argument( "--dynamic_shape_file", type=str, default="", help="Path of a dynamic shape file for tensorrt.") return parser.parse_args() def load_predictor(model_file, params_file, gpu_id=0, use_trt=False, trt_precision=0, trt_use_static=False, trt_static_dir=None, collect_shape_info=False, dynamic_shape_file=None): """load_predictor initialize the inference engine """ config = inference.Config(model_file, params_file) config.enable_use_gpu(1000, gpu_id) # enable memory optim config.enable_memory_optim() config.disable_glog_info() config.switch_use_feed_fetch_ops(False) config.switch_ir_optim(True) # create predictor if use_trt: precision_mode = paddle.inference.PrecisionType.Float32 if trt_precision == 1: precision_mode = paddle.inference.PrecisionType.Half config.enable_tensorrt_engine( workspace_size=1 << 20, max_batch_size=1, min_subgraph_size=30, precision_mode=precision_mode, use_static=trt_use_static, use_calib_mode=False) if collect_shape_info: config.collect_shape_range_info(dynamic_shape_file) else: config.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file, True) if trt_use_static: config.set_optim_cache_dir(trt_static_dir) predictor = inference.create_predictor(config) return predictor def get_image(img_path): """ Loads image for a sample Args: idx [int]: Index of the image sample Returns: image [np.ndarray(H, W, 3)]: RGB Image """ assert os.path.exists(img_path) image = io.imread(img_path) image = image[:, :, :3] # Remove alpha channel image = image.astype(np.float32) image /= 255.0 image = np.expand_dims(image, axis=0) image = image.transpose([0, 3, 1, 2]) return image def run(predictor, img): input_names = predictor.get_input_names() input_tensor1 = predictor.get_input_handle(input_names[0]) input_tensor2 = predictor.get_input_handle(input_names[1]) input_tensor3 = predictor.get_input_handle(input_names[2]) data = {} data["images"] = img data["trans_lidar_to_cam"] = np.asarray( [[[0.0048523, -0.9999298, -0.01081266, -0.00711321], [-0.00302069, 0.01079808, -0.99993706, -0.06176636], [0.99998367, 0.00488465, -0.00296808, -0.26739058], [0.000000e+00, 0.000000e+00, 0.000000e+00, 1.000000e+00]]], dtype='float32') data["trans_cam_to_img"] = np.asarray( [[[7.183351e+02, 0.000000e+00, 6.003891e+02, 4.450382e+01], [0.000000e+00, 7.183351e+02, 1.815122e+02, -5.951107e-01], [0.000000e+00, 0.000000e+00, 1.000000e+00, 2.616315e-03]]], dtype='float32') input_tensor1.copy_from_cpu(data[input_names[0]]) input_tensor2.copy_from_cpu(data[input_names[1]]) input_tensor3.copy_from_cpu(data[input_names[2]]) predictor.run() outs = [] output_names = predictor.get_output_names() for name in output_names: out = predictor.get_output_handle(name) out = out.copy_to_cpu() out = paddle.to_tensor(out) outs.append(out) result = {} result['pred_boxes'] = outs[0] result['pred_labels'] = outs[1] result['pred_scores'] = outs[2] return result def main(args): predictor = load_predictor(args.model_file, args.params_file, args.gpu_id, args.use_trt, args.trt_precision, args.collect_shape_info, args.dynamic_shape_file) image = get_image(args.img_path) result = run(predictor, image) print(result) if __name__ == '__main__': args = parse_args() main(args)
0
apollo_public_repos/apollo-model-centerpoint/deploy/caddn
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp/main.cc
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <gflags/gflags.h> #include <glog/logging.h> #include <time.h> #include <chrono> #include <iostream> #include <numeric> #include <opencv2/opencv.hpp> #include "paddle/include/paddle_inference_api.h" DEFINE_string(model_file, "", "Path of a inference model"); DEFINE_string(params_file, "", "Path of a inference params"); DEFINE_string(image_file, "", "Path of a image file to be predicted"); DEFINE_int32(gpu_id, 0, "GPU card id"); DEFINE_int32(use_trt, 0, "Whether to use tensorrt to accelerate when using gpu"); DEFINE_int32(trt_precision, 0, "Precision type of tensorrt, 0: kFloat32, 1: kHalf"); DEFINE_int32( trt_use_static, 0, "Whether to load the tensorrt graph optimization from a disk path"); DEFINE_string(trt_static_dir, "", "Path of a tensorrt graph optimization directory"); DEFINE_int32(collect_shape_info, 0, "Whether to collect dynamic shape before using tensorrt"); DEFINE_string(dynamic_shape_file, "", "Path of a dynamic shape file for tensorrt"); using paddle_infer::Config; using paddle_infer::CreatePredictor; using paddle_infer::Predictor; const std::string shape_range_info = "deeplab_model/shape_range_info.pbtxt"; paddle_infer::PrecisionType GetPrecisionType(const std::string &ptype) { if (ptype == "trt_fp32") return paddle_infer::PrecisionType::kFloat32; if (ptype == "trt_fp16") return paddle_infer::PrecisionType::kHalf; return paddle_infer::PrecisionType::kFloat32; } std::shared_ptr<paddle_infer::Predictor> create_predictor( const std::string &model_path, const std::string &params_path, const int gpu_id, const int use_trt, const int trt_precision, const int trt_use_static, const std::string trt_static_dir, const int collect_shape_info, const std::string dynamic_shape_file) { paddle::AnalysisConfig config; config.EnableUseGpu(1000, gpu_id); config.SetModel(model_path, params_path); config.EnableMemoryOptim(); if (use_trt) { paddle::AnalysisConfig::Precision precision; if (trt_precision == 0) { precision = paddle_infer::PrecisionType::kFloat32; } else if (trt_precision == 1) { precision = paddle_infer::PrecisionType::kHalf; } else { LOG(ERROR) << "Tensorrt type can only support 0 or 1, but recieved is" << trt_precision << "\n"; return nullptr; } config.EnableTensorRtEngine(1 << 30, 1, 12, precision, trt_use_static, false); if (dynamic_shape_file == "") { LOG(ERROR) << "dynamic_shape_file should be set, but recieved is " << dynamic_shape_file << "\n"; return nullptr; } if (collect_shape_info) { config.CollectShapeRangeInfo(dynamic_shape_file); } else { config.EnableTunedTensorRtDynamicShape(dynamic_shape_file, true); } if (trt_use_static) { if (trt_static_dir == "") { LOG(ERROR) << "trt_static_dir should be set, but recieved is " << trt_static_dir << "\n"; return nullptr; } config.SetOptimCacheDir(trt_static_dir); } } config.SwitchIrOptim(true); return paddle_infer::CreatePredictor(config); } void normalize(cv::Mat *im, const std::vector<float> &mean, const std::vector<float> &std, float &scale) { if (scale) { (*im).convertTo(*im, CV_32FC3, scale); } for (int h = 0; h < im->rows; h++) { for (int w = 0; w < im->cols; w++) { im->at<cv::Vec3f>(h, w)[0] = (im->at<cv::Vec3f>(h, w)[0] - mean[0]) / std[0]; im->at<cv::Vec3f>(h, w)[1] = (im->at<cv::Vec3f>(h, w)[1] - mean[1]) / std[1]; im->at<cv::Vec3f>(h, w)[2] = (im->at<cv::Vec3f>(h, w)[2] - mean[2]) / std[2]; } } } void mat_to_vec(const cv::Mat *im, float *data) { int rh = im->rows; int rw = im->cols; int rc = im->channels(); for (int i = 0; i < rc; ++i) { cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, data + i * rh * rw), i); } } void run(Predictor *predictor, const std::vector<int> &images_shape, const std::vector<float> &images_data, const std::vector<int> &cam_shape, const std::vector<float> &cam_data, const std::vector<int> &lidar_shape, const std::vector<float> &lidar_data, std::vector<float> *boxes, std::vector<float> *labels, std::vector<float> *scores) { auto input_names = predictor->GetInputNames(); for (const auto &tensor_name : input_names) { auto in_tensor = predictor->GetInputHandle(tensor_name); if (tensor_name == "images") { in_tensor->Reshape(images_shape); in_tensor->CopyFromCpu(images_data.data()); } else if (tensor_name == "trans_cam_to_img") { in_tensor->Reshape(cam_shape); in_tensor->CopyFromCpu(cam_data.data()); } else if (tensor_name == "trans_lidar_to_cam") { in_tensor->Reshape(lidar_shape); in_tensor->CopyFromCpu(lidar_data.data()); } } for (int i = 0; i < 100; i++) { auto start_time = std::chrono::steady_clock::now(); CHECK(predictor->Run()); auto output_names = predictor->GetOutputNames(); for (size_t i = 0; i != output_names.size(); i++) { auto output = predictor->GetOutputHandle(output_names[i]); std::vector<int> output_shape = output->shape(); int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies<int>()); if (i == 0) { boxes->resize(out_num); output->CopyToCpu(boxes->data()); } else if (i == 1) { labels->resize(out_num); output->CopyToCpu(labels->data()); } else if (i == 2) { scores->resize(out_num); output->CopyToCpu(scores->data()); } } auto end_time = std::chrono::steady_clock::now(); auto tt = std::chrono::duration_cast<std::chrono::nanoseconds>(end_time - start_time) .count() / 1000000.0; LOG(INFO) << "time per file: " << tt << "(ms).\n"; } } void resize(const cv::Mat &img, cv::Mat &resize_img, int resized_h, int resized_w) { cv::resize(img, resize_img, cv::Size(resized_h, resized_w), 0, 0, cv::INTER_LINEAR); } int main(int argc, char *argv[]) { google::ParseCommandLineFlags(&argc, &argv, true); if (FLAGS_model_file == "" || FLAGS_params_file == "" || FLAGS_lidar_file == "") { LOG(INFO) << "Missing required parameter" << "\n"; LOG(INFO) << "Usage: " << std::string(argv[0]) << " --model_file ${MODEL_FILE} " << "--params_file ${PARAMS_FILE} " << "--lidar_file ${LIDAR_FILE}" << "\n"; return -1; } auto predictor = create_predictor( FLAGS_model_file, FLAGS_params_file, FLAGS_gpu_id, FLAGS_use_trt, FLAGS_trt_precision, FLAGS_trt_use_static, FLAGS_trt_static_dir, FLAGS_collect_shape_info, FLAGS_dynamic_shape_file); if (predictor == nullptr) { return 0; } cv::Mat img_resized; std::vector<float> input_data(1 * 3 * 640 * 960, 0.0f); cv::Mat img = imread(FLAGS_image_file, cv::IMREAD_COLOR); cv::cvtColor(img, img, cv::COLOR_RGB2BGR); resize(img, img_resized, 960, 640); img_resized.convertTo(img_resized, CV_32F, 1.0f / 255.0f); mat_to_vec(&img_resized, input_data.data()); std::vector<int> images_shape = {1, 3, 640, 960}; std::vector<int> cam_shape = {1, 3, 4}; std::vector<float> cam_data{7.183351e+02, 0.000000e+00, 6.003891e+02, 4.450382e+01, 0.000000e+00, 7.183351e+02, 1.815122e+02, -5.951107e-01, 0.000000e+00, 0.000000e+00, 1.000000e+00, 2.616315e-03}; std::vector<int> lidar_shape = {1, 4, 4}; std::vector<float> lidar_data = { 0.0048523, -0.9999298, -0.01081266, -0.00711321, -0.00302069, 0.01079808, -0.99993706, -0.06176636, 0.99998367, 0.00488465, -0.00296808, -0.26739058, 0., 0., 0., 1.}; std::vector<float> boxes; std::vector<float> labels; std::vector<float> scores; run(predictor.get(), images_shape, input_data, cam_shape, cam_data, lidar_shape, lidar_data, &boxes, &labels, &scores); // boxes 7个数据 std::cout << "boxes" << "\n"; for (auto e : boxes) { LOG(INFO) << e; } // labels 1个数据 std::cout << "labels" << "\n"; for (auto e : labels) { LOG(INFO) << e; } // scores:1个数据 std::cout << "scores" << "\n"; for (auto e : scores) { LOG(INFO) << e; } return 0; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/caddn
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp/CMakeLists.txt
cmake_minimum_required(VERSION 3.0) project(cpp_inference_demo CXX C) option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON) option(USE_TENSORRT "Compile demo with TensorRT." ON) option(CUSTOM_OPERATOR_FILES "List of file names for custom operators" "") execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpfullversion -dumpversion OUTPUT_VARIABLE GCC_VERSION) string(REGEX MATCHALL "[0-9]+" GCC_VERSION_COMPONENTS ${GCC_VERSION}) list(GET GCC_VERSION_COMPONENTS 0 GCC_MAJOR) list(GET GCC_VERSION_COMPONENTS 1 GCC_MINOR) set(GCC_VERSION "${GCC_MAJOR}.${GCC_MINOR}") if (GCC_VERSION LESS "8.0") set(CMAKE_CXX_FLAGS "-Wl,--no-as-needed") endif() set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") include(external/boost) SET(OPENCV_DIR "" CACHE PATH "Location of libraries") find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR} NO_DEFAULT_PATH) include_directories(${OpenCV_INCLUDE_DIRS}) if(WITH_GPU) find_package(CUDA REQUIRED) add_definitions("-DPADDLE_WITH_CUDA") endif() if(NOT WITH_STATIC_LIB) add_definitions("-DPADDLE_WITH_SHARED_LIB") else() # PD_INFER_DECL is mainly used to set the dllimport/dllexport attribute in dynamic library mode. # Set it to empty in static library mode to avoid compilation issues. add_definitions("/DPD_INFER_DECL=") endif() macro(safe_set_static_flag) foreach(flag_var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) if(${flag_var} MATCHES "/MD") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") endif(${flag_var} MATCHES "/MD") endforeach(flag_var) endmacro() if(NOT DEFINED PADDLE_LIB) message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib") endif() if(NOT DEFINED DEMO_NAME) message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name") endif() include_directories("${PADDLE_LIB}/") set(PADDLE_LIB_THIRD_PARTY_PATH "${PADDLE_LIB}/third_party/install/") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/include") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/lib") link_directories("${PADDLE_LIB}/paddle/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib") if (WIN32) add_definitions("/DGOOGLE_GLOG_DLL_DECL=") option(MSVC_STATIC_CRT "use static C Runtime library by default" ON) if (MSVC_STATIC_CRT) if (WITH_MKL) set(FLAG_OPENMP "/openmp") endif() set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") safe_set_static_flag() if (WITH_STATIC_LIB) add_definitions(-DSTATIC_LIB) endif() endif() else() if(WITH_MKL) set(FLAG_OPENMP "-fopenmp") endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 ${FLAG_OPENMP}") endif() if(WITH_GPU) if(NOT WIN32) set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") else() if(CUDA_LIB STREQUAL "") set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\lib\\x64") endif() endif(NOT WIN32) endif() if (USE_TENSORRT AND WITH_GPU) set(TENSORRT_ROOT "" CACHE STRING "The root directory of TensorRT library") if("${TENSORRT_ROOT}" STREQUAL "") message(FATAL_ERROR "The TENSORRT_ROOT is empty, you must assign it a value with CMake command. Such as: -DTENSORRT_ROOT=TENSORRT_ROOT_PATH ") endif() set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include) set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib) endif() if (NOT WIN32) if (USE_TENSORRT AND WITH_GPU) include_directories("${TENSORRT_INCLUDE_DIR}") link_directories("${TENSORRT_LIB_DIR}") endif() endif(NOT WIN32) if(WITH_MKL) set(MATH_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mklml") include_directories("${MATH_LIB_PATH}/include") if(WIN32) set(MATH_LIB ${MATH_LIB_PATH}/lib/mklml${CMAKE_STATIC_LIBRARY_SUFFIX} ${MATH_LIB_PATH}/lib/libiomp5md${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(MATH_LIB ${MATH_LIB_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} ${MATH_LIB_PATH}/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(MKLDNN_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mkldnn") if(EXISTS ${MKLDNN_PATH}) include_directories("${MKLDNN_PATH}/include") if(WIN32) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib) else(WIN32) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) endif(WIN32) endif() else() set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas") include_directories("${OPENBLAS_LIB_PATH}/include/openblas") if(WIN32) set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() endif() if(WITH_STATIC_LIB) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() if(WIN32) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() endif() if (NOT WIN32) if (GCC_VERSION LESS "8.0") set(EXTERNAL_LIB ${EXTERNAL_LIB} "-lssl -lcrypto -lz -lleveldb -lsnappy") endif() set(EXTERNAL_LIB ${EXTERNAL_LIB} "-lrt -ldl -lpthread") set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags protobuf xxhash ${EXTERNAL_LIB}) else() set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags_static libprotobuf xxhash ${EXTERNAL_LIB}) set(DEPS ${DEPS} shlwapi.lib) endif(NOT WIN32) if(WITH_GPU) if(NOT WIN32) if (USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX}) else() if(USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX} ) endif() endif() cuda_add_library(pd_infer_custom_op ${CUSTOM_OPERATOR_FILES} SHARED) add_executable(${DEMO_NAME} ${DEMO_NAME}.cc) if (GCC_VERSION GREATER_EQUAL "8.0") set(DEPS ${DEPS} libssl.a libcrypto.a libz.a libleveldb.a libsnappy.a) endif() set(DEPS ${DEPS} boost ${OpenCV_LIBS} pd_infer_custom_op) if(WIN32) if(USE_TENSORRT) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) endif() if(WITH_MKL) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release ) else() add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release ) endif() if(NOT WITH_STATIC_LIB) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_fluid.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) endif() endif() target_link_libraries(${DEMO_NAME} ${DEPS})
0
apollo_public_repos/apollo-model-centerpoint/deploy/caddn
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp/compile.sh
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. mkdir -p build cd build rm -rf * DEMO_NAME=main WITH_MKL=ON WITH_GPU=ON USE_TENSORRT=ON # paddle inference dir LIB_DIR=/workspace/cadnn/paddle_inference OPENCV_DIR=/workspace/cadnn/opencv-3.4.7/build/ CUDNN_LIB=/usr/local/x86_64-pc-linux-gnu/ CUDA_LIB=/usr/local/cuda/lib64 TENSORRT_ROOT=/usr/local/TensorRT-8.2.5.1 CUSTOM_OPERATOR_FILES="custom_ops/grid_sample_3d.cc;custom_ops/grid_sample_3d.cu;custom_ops/iou3d_nms.cpp;custom_ops/iou3d_nms_api.cpp;custom_ops/iou3d_nms_kernel.cu" cmake .. -DPADDLE_LIB=${LIB_DIR} \ -DOPENCV_DIR=${OPENCV_DIR} \ -DWITH_MKL=${WITH_MKL} \ -DDEMO_NAME=${DEMO_NAME} \ -DWITH_GPU=${WITH_GPU} \ -DWITH_STATIC_LIB=OFF \ -DUSE_TENSORRT=${USE_TENSORRT} \ -DCUDNN_LIB=${CUDNN_LIB} \ -DCUDA_LIB=${CUDA_LIB} \ -DTENSORRT_ROOT=${TENSORRT_ROOT} \ -DCUSTOM_OPERATOR_FILES=${CUSTOM_OPERATOR_FILES} make -j
0
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp/custom_ops/iou3d_cpu.cpp
/* 3D Rotated IoU Calculation (CPU) Written by Shaoshuai Shi All Rights Reserved 2020. */ #include "iou3d_cpu.h" #include <cuda.h> #include <cuda_runtime_api.h> #include <math.h> #include <stdio.h> #include <vector> #include "paddle/include/experimental/ext_all.h" // #define CHECK_CUDA(x) do { \ // if (!x.type().is_cuda()) { \ // fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ // exit(-1); \ // } \ // } while (0) // #define CHECK_CONTIGUOUS(x) do { \ // if (!x.is_contiguous()) { \ // fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ // exit(-1); \ // } \ // } while (0) // #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) #define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") inline float min(float a, float b) { return a > b ? b : a; } inline float max(float a, float b) { return a > b ? a : b; } const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y) { x = _x, y = _y; } __device__ void set(float _x, float _y) { x = _x; y = _y; } __device__ Point operator+(const Point &b) const { return Point(x + b.x, y + b.y); } __device__ Point operator-(const Point &b) const { return Point(x - b.x, y - b.y); } }; inline float cross(const Point &a, const Point &b) { return a.x * b.y - a.y * b.x; } inline float cross(const Point &p1, const Point &p2, const Point &p0) { return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } inline int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2) { int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) && min(q1.x, q2.x) <= max(p1.x, p2.x) && min(p1.y, p2.y) <= max(q1.y, q2.y) && min(q1.y, q2.y) <= max(p1.y, p2.y); return ret; } inline int check_in_box2d(const float *box, const Point &p) { // params: (7) [x, y, z, dx, dy, dz, heading] const float MARGIN = 1e-2; float center_x = box[0], center_y = box[1]; float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); } inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans) { // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if (fabs(s5 - s1) > EPS) { ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else { float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p) { float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } inline int point_cmp(const Point &a, const Point &b, const Point &center) { return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } inline float box_overlap(const float *box_a, const float *box_b) { // params: box_a (7) [x, y, z, dx, dy, dz, heading] // params: box_b (7) [x, y, z, dx, dy, dz, heading] // float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = // box_a[3], a_angle = box_a[4]; // float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = // box_b[3], b_angle = box_b[4]; float a_angle = box_a[6], b_angle = box_b[6]; float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; Point center_a(box_a[0], box_a[1]); Point center_b(box_b[0], box_b[1]); Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++) { rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag) { poly_center = poly_center + cross_points[cnt]; cnt++; } } } // check corners for (int k = 0; k < 4; k++) { if (check_in_box2d(box_a, box_b_corners[k])) { poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; } if (check_in_box2d(box_b, box_a_corners[k])) { poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++) { for (int i = 0; i < cnt - j - 1; i++) { if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) { temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++) { area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } inline float iou_bev(const float *box_a, const float *box_b) { // params: box_a (7) [x, y, z, dx, dy, dz, heading] // params: box_b (7) [x, y, z, dx, dy, dz, heading] float sa = box_a[3] * box_a[4]; float sb = box_b[3] * box_b[4]; float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } int boxes_iou_bev_cpu(paddle::Tensor boxes_a_tensor, paddle::Tensor boxes_b_tensor, paddle::Tensor ans_iou_tensor) { // params boxes_a_tensor: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b_tensor: (M, 7) [x, y, z, dx, dy, dz, heading] // params ans_iou_tensor: (N, M) // CHECK_CONTIGUOUS(boxes_a_tensor); // CHECK_CONTIGUOUS(boxes_b_tensor); int num_boxes_a = boxes_a_tensor.shape()[0]; int num_boxes_b = boxes_b_tensor.shape()[0]; const float *boxes_a = boxes_a_tensor.data<float>(); const float *boxes_b = boxes_b_tensor.data<float>(); float *ans_iou = ans_iou_tensor.data<float>(); for (int i = 0; i < num_boxes_a; i++) { for (int j = 0; j < num_boxes_b; j++) { ans_iou[i * num_boxes_b + j] = iou_bev(boxes_a + i * 7, boxes_b + j * 7); } } return 1; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp/custom_ops/iou3d_nms.cpp
/* 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include "iou3d_nms.h" #include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "paddle/include/experimental/ext_all.h" // #define CHECK_CUDA(x) do { \ // if (!x.type().is_cuda()) { \ // fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ // exit(-1); \ // } \ // } while (0) // #define CHECK_CONTIGUOUS(x) do { \ // if (!x.is_contiguous()) { \ // fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ // exit(-1); \ // } \ // } while (0) // #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) #define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) #define CHECK_ERROR(ans) \ { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #define D(x) \ PD_THROW('\n', x, \ "\n--------------------------------- where is the error ? " \ "---------------------------------------\n"); const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; void boxesoverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap); void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou); void nmsLauncher(const float *boxes, unsigned long long *mask, int boxes_num, float nms_overlap_thresh); void nmsNormalLauncher(const float *boxes, unsigned long long *mask, int boxes_num, float nms_overlap_thresh); int boxes_overlap_bev_gpu(paddle::Tensor boxes_a, paddle::Tensor boxes_b, paddle::Tensor ans_overlap) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] // params ans_overlap: (N, M) CHECK_INPUT(boxes_a); CHECK_INPUT(boxes_b); CHECK_INPUT(ans_overlap); int num_a = boxes_a.shape()[0]; int num_b = boxes_b.shape()[0]; const float *boxes_a_data = boxes_a.data<float>(); const float *boxes_b_data = boxes_b.data<float>(); float *ans_overlap_data = ans_overlap.data<float>(); boxesoverlapLauncher(num_a, boxes_a_data, num_b, boxes_b_data, ans_overlap_data); return 1; } int boxes_iou_bev_gpu(paddle::Tensor boxes_a, paddle::Tensor boxes_b, paddle::Tensor ans_iou) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] // params ans_overlap: (N, M) CHECK_INPUT(boxes_a); CHECK_INPUT(boxes_b); CHECK_INPUT(ans_iou); int num_a = boxes_a.shape()[0]; int num_b = boxes_b.shape()[0]; const float *boxes_a_data = boxes_a.data<float>(); const float *boxes_b_data = boxes_b.data<float>(); float *ans_iou_data = ans_iou.data<float>(); boxesioubevLauncher(num_a, boxes_a_data, num_b, boxes_b_data, ans_iou_data); return 1; } std::vector<paddle::Tensor> nms_gpu(const paddle::Tensor &boxes, float nms_overlap_thresh) { // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] // params keep: (N) CHECK_INPUT(boxes); // CHECK_CONTIGUOUS(keep); auto keep = paddle::empty({boxes.shape()[0]}, paddle::DataType::FLOAT32, paddle::CPUPlace()); auto num_to_keep_tensor = paddle::empty({1}, paddle::DataType::FLOAT32, paddle::CPUPlace()); int *num_to_keep_data = num_to_keep_tensor.data<int>(); int boxes_num = boxes.shape()[0]; const float *boxes_data = boxes.data<float>(); long *keep_data = keep.data<long>(); const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); unsigned long long *mask_data = NULL; CHECK_ERROR(cudaMalloc((void **)&mask_data, boxes_num * col_blocks * sizeof(unsigned long long))); nmsLauncher(boxes_data, mask_data, boxes_num, nms_overlap_thresh); // unsigned long long mask_cpu[boxes_num * col_blocks]; // unsigned long long *mask_cpu = new unsigned long long [boxes_num * // col_blocks]; std::vector<unsigned long long> mask_cpu(boxes_num * col_blocks); // printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks); CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks * sizeof(unsigned long long), cudaMemcpyDeviceToHost)); cudaFree(mask_data); unsigned long long remv_cpu[col_blocks]; memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long)); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / THREADS_PER_BLOCK_NMS; int inblock = i % THREADS_PER_BLOCK_NMS; if (!(remv_cpu[nblock] & (1ULL << inblock))) { keep_data[num_to_keep++] = i; unsigned long long *p = &mask_cpu[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv_cpu[j] |= p[j]; } } } num_to_keep_data[0] = num_to_keep; if (cudaSuccess != cudaGetLastError()) printf("Error!\n"); return {keep, num_to_keep_tensor}; } int nms_normal_gpu(paddle::Tensor boxes, paddle::Tensor keep, float nms_overlap_thresh) { // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] // params keep: (N) CHECK_INPUT(boxes); // CHECK_CONTIGUOUS(keep); int boxes_num = boxes.shape()[0]; const float *boxes_data = boxes.data<float>(); long *keep_data = keep.data<long>(); const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); unsigned long long *mask_data = NULL; CHECK_ERROR(cudaMalloc((void **)&mask_data, boxes_num * col_blocks * sizeof(unsigned long long))); nmsNormalLauncher(boxes_data, mask_data, boxes_num, nms_overlap_thresh); // unsigned long long mask_cpu[boxes_num * col_blocks]; // unsigned long long *mask_cpu = new unsigned long long [boxes_num * // col_blocks]; std::vector<unsigned long long> mask_cpu(boxes_num * col_blocks); // printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks); CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks * sizeof(unsigned long long), cudaMemcpyDeviceToHost)); cudaFree(mask_data); unsigned long long remv_cpu[col_blocks]; memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long)); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / THREADS_PER_BLOCK_NMS; int inblock = i % THREADS_PER_BLOCK_NMS; if (!(remv_cpu[nblock] & (1ULL << inblock))) { keep_data[num_to_keep++] = i; unsigned long long *p = &mask_cpu[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv_cpu[j] |= p[j]; } } } if (cudaSuccess != cudaGetLastError()) printf("Error!\n"); return num_to_keep; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp/custom_ops/grid_sample_3d.cc
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "grid_sample_3d.h" #include <vector> #include "paddle/include/experimental/ext_all.h" std::vector<paddle::Tensor> GridSample3DCUDAForward( const paddle::Tensor& x, const paddle::Tensor& grid, const std::string& mode, const std::string& padding_mode, bool align_corners); std::vector<paddle::Tensor> GridSample3DForward(const paddle::Tensor& x, const paddle::Tensor& grid, const std::string& mode, const std::string& padding_mode, bool align_corners) { return GridSample3DCUDAForward(x, grid, mode, padding_mode, align_corners); } std::vector<paddle::Tensor> GridSample3DCUDABackward( const paddle::Tensor& x, const paddle::Tensor& grid, const paddle::Tensor& grad_out, const std::string& mode, const std::string& padding_mode, bool align_corners); std::vector<paddle::Tensor> GridSample3DBackward( const paddle::Tensor& x, const paddle::Tensor& grid, const paddle::Tensor& grad_out, const std::string& mode, const std::string& padding_mode, bool align_corners) { return GridSample3DCUDABackward(x, grid, grad_out, mode, padding_mode, align_corners); } std::vector<std::vector<int64_t>> GridSample3DInferShape( std::vector<int64_t> x_shape, std::vector<int64_t> grid_shape) { return { {x_shape[0], x_shape[1], grid_shape[1], grid_shape[2], grid_shape[3]}}; } std::vector<std::vector<int64_t>> GridSample3DInferBackShape( std::vector<int64_t> x_shape, std::vector<int64_t> grid_shape) { return {x_shape}; } std::vector<paddle::DataType> GridSample3DInferDtype( paddle::DataType x_dtype, paddle::DataType grid_dtype) { return {x_dtype}; } PD_BUILD_OP(grid_sample_3d) .Inputs({"x", "grid"}) .Attrs({"mode: std::string", "padding_mode: std::string", "align_corners: bool"}) .Outputs({"out"}) .SetKernelFn(PD_KERNEL(GridSample3DForward)) .SetInferShapeFn(PD_INFER_SHAPE(GridSample3DInferShape)) .SetInferDtypeFn(PD_INFER_DTYPE(GridSample3DInferDtype)); PD_BUILD_GRAD_OP(grid_sample_3d) .Inputs({"x", "grid", paddle::Grad("out")}) .Attrs({"mode: std::string", "padding_mode: std::string", "align_corners: bool"}) .Outputs({paddle::Grad("x")}) .SetKernelFn(PD_KERNEL(GridSample3DBackward)) .SetInferShapeFn(PD_INFER_SHAPE(GridSample3DInferBackShape));
0
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp/custom_ops/iou3d_nms_kernel.cu
/* 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include <stdio.h> #define THREADS_PER_BLOCK 16 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) // #define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y) { x = _x, y = _y; } __device__ void set(float _x, float _y) { x = _x; y = _y; } __device__ Point operator+(const Point &b) const { return Point(x + b.x, y + b.y); } __device__ Point operator-(const Point &b) const { return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b) { return a.x * b.y - a.y * b.x; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0) { return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2) { int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) && min(q1.x, q2.x) <= max(p1.x, p2.x) && min(p1.y, p2.y) <= max(q1.y, q2.y) && min(q1.y, q2.y) <= max(p1.y, p2.y); return ret; } __device__ inline int check_in_box2d(const float *box, const Point &p) { // params: (7) [x, y, z, dx, dy, dz, heading] // const float MARGIN = 1e-2; // Align with the setting of mmdet3d const float MARGIN = 1e-5; float center_x = box[0], center_y = box[1]; float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box // float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * // (-angle_sin); // float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; // return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + // MARGIN); // Align with the implement of mmdet3d float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * angle_sin + center_x; float rot_y = -(p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y; float x1 = center_x - box[3] / 2; float x2 = center_x + box[3] / 2; float y1 = center_y - box[4] / 2; float y2 = center_y + box[4] / 2; return (rot_x > x1 - MARGIN && rot_x < x2 + MARGIN && rot_y > y1 - MARGIN && rot_y < y2 + MARGIN); } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans) { // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if (fabs(s5 - s1) > EPS) { ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else { float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p) { // float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * // (-angle_sin) + center.x; // float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + // center.y; // p.set(new_x, new_y); // Aligh with the implement of mmdet3d float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x; float new_y = -(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center) { return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float box_overlap(const float *box_a, const float *box_b) { // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float a_angle = box_a[6], b_angle = box_b[6]; float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; Point center_a(box_a[0], box_a[1]); Point center_b(box_b[0], box_b[1]); #ifdef DEBUG printf( "a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle); printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y); #endif Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++) { #ifdef DEBUG printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); #ifdef DEBUG printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag) { poly_center = poly_center + cross_points[cnt]; cnt++; #ifdef DEBUG printf( "Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), b(%.3f, " "%.3f)->(%.3f, %.3f) \n", cross_points[cnt - 1].x, cross_points[cnt - 1].y, box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x, box_a_corners[i + 1].y, box_b_corners[i].x, box_b_corners[i].y, box_b_corners[i + 1].x, box_b_corners[i + 1].y); #endif } } } // check corners for (int k = 0; k < 4; k++) { if (check_in_box2d(box_a, box_b_corners[k])) { poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; #ifdef DEBUG printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } if (check_in_box2d(box_b, box_a_corners[k])) { poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; #ifdef DEBUG printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++) { for (int i = 0; i < cnt - j - 1; i++) { if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) { temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } #ifdef DEBUG printf("cnt=%d\n", cnt); for (int i = 0; i < cnt; i++) { printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y); } #endif // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++) { area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b) { // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float sa = box_a[3] * box_a[4]; float sb = box_b[3] * box_b[4]; float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } __global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b) { return; } const float *cur_box_a = boxes_a + a_idx * 7; const float *cur_box_b = boxes_b + b_idx * 7; float s_overlap = box_overlap(cur_box_a, cur_box_b); ans_overlap[a_idx * num_b + b_idx] = s_overlap; } __global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b) { return; } const float *cur_box_a = boxes_a + a_idx * 7; const float *cur_box_b = boxes_b + b_idx * 7; float cur_iou_bev = iou_bev(cur_box_a, cur_box_b); ans_iou[a_idx * num_b + b_idx] = cur_iou_bev; } __global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long *mask) { // params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] // params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 7; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } __device__ inline float iou_normal(float const *const a, float const *const b) { // params: a: [x, y, z, dx, dy, dz, heading] // params: b: [x, y, z, dx, dy, dz, heading] float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2), right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2); float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2), bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2); float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); float interS = width * height; float Sa = a[3] * a[4]; float Sb = b[3] * b[4]; return interS / fmaxf(Sa + Sb - interS, EPS); } __global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long *mask) { // params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] // params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 7; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } void boxesoverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap) { dim3 blocks( DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); boxes_overlap_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b, ans_overlap); #ifdef DEBUG cudaDeviceSynchronize(); // for using printf in kernel function #endif } void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou) { dim3 blocks( DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); boxes_iou_bev_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b, ans_iou); #ifdef DEBUG cudaDeviceSynchronize(); // for using printf in kernel function #endif } void nmsLauncher(const float *boxes, unsigned long long *mask, int boxes_num, float nms_overlap_thresh) { dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask); } void nmsNormalLauncher(const float *boxes, unsigned long long *mask, int boxes_num, float nms_overlap_thresh) { dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); nms_normal_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask); }
0
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp/custom_ops/iou3d_nms.h
#ifndef IOU3D_NMS_H #define IOU3D_NMS_H // #include <paddle/extension.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "paddle/include/experimental/ext_all.h" int boxes_overlap_bev_gpu(paddle::Tensor boxes_a, paddle::Tensor boxes_b, paddle::Tensor ans_overlap); int boxes_iou_bev_gpu(paddle::Tensor boxes_a, paddle::Tensor boxes_b, paddle::Tensor ans_iou); std::vector<paddle::Tensor> nms_gpu(const paddle::Tensor& boxes, float nms_overlap_thresh); int nms_normal_gpu(paddle::Tensor boxes, paddle::Tensor keep, float nms_overlap_thresh); #endif
0
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp/custom_ops/iou3d_cpu.h
#ifndef IOU3D_CPU_H #define IOU3D_CPU_H #include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "paddle/include/experimental/ext_all.h" int boxes_iou_bev_cpu(paddle::Tensor boxes_a_tensor, paddle::Tensor boxes_b_tensor, paddle::Tensor ans_iou_tensor); #endif
0
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp/custom_ops/grid_sample_3d.h
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef GRID_SAMPLE_3D_H #define GRID_SAMPLE_3D_H #include <cassert> #include <cmath> #include <vector> #define HOST_DEVICE __host__ __device__ #define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__ enum class Mode { bilinear, nearest }; enum class PaddingMode { zeros, border, reflect }; namespace {} #endif
0
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp/custom_ops/grid_sample_3d.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda.h> #include "grid_sample_3d.h" #include "paddle/include/experimental/ext_all.h" #define CHECK_INPUT_GPU(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") static __forceinline__ __device__ bool InBounds3D(int64_t d, int64_t h, int64_t w, int64_t D, int64_t H, int64_t W) { return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W; } #define CUDA_KERNEL_LOOP_TYPE(i, n, index_type) \ index_type _i_n_d_e_x = blockIdx.x * blockDim.x + threadIdx.x; \ for (index_type i = _i_n_d_e_x; _i_n_d_e_x < (n); \ _i_n_d_e_x += blockDim.x * gridDim.x, i = _i_n_d_e_x) #define CUDA_KERNEL_LOOP(i, n) CUDA_KERNEL_LOOP_TYPE(i, n, int) template <typename T> static __forceinline__ __device__ T Unnormalize(T coord, int size, bool align_corners) { if (align_corners) { return ((coord + 1.f) / 2) * (size - 1); } else { return ((coord + 1.f) * size - 1) / 2; } } template <typename T> static __forceinline__ __device__ T ClipIndexes(T in, int max_value) { return min(static_cast<T>(max_value), max(in, static_cast<T>(0))); } template <typename T> static __forceinline__ __device__ T ReflectIndexes(T in, int twice_low, int twice_high) { if (twice_low == twice_high) { return static_cast<T>(0); } T min = static_cast<T>(twice_low) / 2; T span = static_cast<T>(twice_high - twice_low) / 2; in = fabs(in - min); T extra = fmod(in, span); int flips = static_cast<int>(floor(in / span)); if (flips % 2 == 0) { return extra + min; } else { return span - extra + min; } } template <typename T> static __forceinline__ __device__ T ComputePositions(T coord, int size, PaddingMode padding_mode, bool align_corners) { coord = Unnormalize<T>(coord, size, align_corners); if (padding_mode == PaddingMode::border) { coord = ClipIndexes(coord, size - 1); } else if (padding_mode == PaddingMode::reflect) { if (align_corners) { coord = ReflectIndexes(coord, 0, 2 * (size - 1)); } else { coord = ReflectIndexes(coord, -1, 2 * size - 1); } coord = ClipIndexes(coord, size - 1); } return coord; } template <typename T, typename index_t> __global__ void GridSample3DCudaKernel( const index_t nthreads, index_t out_c, index_t out_d, index_t out_h, index_t out_w, index_t in_d, index_t in_h, index_t in_w, const T* input, const T* grid, T* output, const Mode interpolation_mode, const PaddingMode padding_mode, bool align_corners) { // printf("size: %d, %d, %d, %d, %d, %d \n", out_c, out_d, out_w, out_h, in_d, // in_w); index_t inp_sW = 1; index_t inp_sH = in_w; index_t inp_sD = in_h * in_w; index_t inp_sC = in_d * inp_sD; index_t inp_sN = out_c * inp_sC; index_t grid_sCoor = 1; index_t grid_sW = 3; index_t grid_sH = out_w * grid_sW; index_t grid_sD = out_h * grid_sH; index_t grid_sN = out_d * grid_sD; index_t out_sW = 1; index_t out_sH = out_w; index_t out_sD = out_h * out_w; index_t out_sC = out_d * out_sD; index_t out_sN = out_c * out_sC; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_w; const index_t h = (index / out_w) % out_h; const index_t d = (index / (out_h * out_w)) % out_d; const index_t n = index / (out_d * out_h * out_w); const index_t grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid T ix = grid[grid_offset]; T iy = grid[grid_offset + grid_sCoor]; T iz = grid[grid_offset + 2 * grid_sCoor]; ix = ComputePositions(ix, in_w, padding_mode, align_corners); iy = ComputePositions(iy, in_h, padding_mode, align_corners); iz = ComputePositions(iz, in_d, padding_mode, align_corners); // printf("ix: %f, iy: %f, iz: %f \n", ix, iy, iz); if (interpolation_mode == Mode::bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom index_t ix_tnw = static_cast<index_t>(std::floor(ix)); index_t iy_tnw = static_cast<index_t>(std::floor(iy)); index_t iz_tnw = static_cast<index_t>(std::floor(iz)); index_t ix_tne = ix_tnw + 1; index_t iy_tne = iy_tnw; index_t iz_tne = iz_tnw; index_t ix_tsw = ix_tnw; index_t iy_tsw = iy_tnw + 1; index_t iz_tsw = iz_tnw; index_t ix_tse = ix_tnw + 1; index_t iy_tse = iy_tnw + 1; index_t iz_tse = iz_tnw; index_t ix_bnw = ix_tnw; index_t iy_bnw = iy_tnw; index_t iz_bnw = iz_tnw + 1; index_t ix_bne = ix_tnw + 1; index_t iy_bne = iy_tnw; index_t iz_bne = iz_tnw + 1; index_t ix_bsw = ix_tnw; index_t iy_bsw = iy_tnw + 1; index_t iz_bsw = iz_tnw + 1; index_t ix_bse = ix_tnw + 1; index_t iy_bse = iy_tnw + 1; index_t iz_bse = iz_tnw + 1; // get surfaces to each neighbor: T tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); T tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); T tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); T tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); T bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); T bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); T bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); T bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); auto inp_ptr_NC = input + n * inp_sN; auto out_ptr_NCDHW = output + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (index_t c = 0; c < out_c; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { *out_ptr_NCDHW = static_cast<T>(0); if (InBounds3D(iz_tnw, iy_tnw, ix_tnw, in_d, in_h, in_w)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; } if (InBounds3D(iz_tne, iy_tne, ix_tne, in_d, in_h, in_w)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; } if (InBounds3D(iz_tsw, iy_tsw, ix_tsw, in_d, in_h, in_w)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; } if (InBounds3D(iz_tse, iy_tse, ix_tse, in_d, in_h, in_w)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; } if (InBounds3D(iz_bnw, iy_bnw, ix_bnw, in_d, in_h, in_w)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; } if (InBounds3D(iz_bne, iy_bne, ix_bne, in_d, in_h, in_w)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; } if (InBounds3D(iz_bsw, iy_bsw, ix_bsw, in_d, in_h, in_w)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; } if (InBounds3D(iz_bse, iy_bse, ix_bse, in_d, in_h, in_w)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; } } } else if (interpolation_mode == Mode::nearest) { index_t ix_nearest = static_cast<index_t>(std::round(ix)); index_t iy_nearest = static_cast<index_t>(std::round(iy)); index_t iz_nearest = static_cast<index_t>(std::round(iz)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input + n * inp_sN; auto out_ptr_NCDHW = output + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (index_t c = 0; c < out_c; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { if (InBounds3D(iz_nearest, iy_nearest, ix_nearest, in_d, in_h, in_w)) { *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCDHW = static_cast<T>(0); } } } } } std::vector<paddle::Tensor> GridSample3DCUDAForward( const paddle::Tensor& x, const paddle::Tensor& grid, const std::string& mode, const std::string& padding_mode, bool align_corners) { CHECK_INPUT_GPU(x); CHECK_INPUT_GPU(grid); PaddingMode enum_padding_mode; Mode enum_mode; if (padding_mode == "border") { enum_padding_mode = PaddingMode::border; } else if (padding_mode == "reflection") { enum_padding_mode = PaddingMode::reflect; } else { enum_padding_mode = PaddingMode::zeros; } if (mode == "nearest") { enum_mode = Mode::nearest; } else { enum_mode = Mode::bilinear; } const int n = grid.shape()[0]; const int out_d = grid.shape()[1]; const int out_h = grid.shape()[2]; const int out_w = grid.shape()[3]; const int c = x.shape()[1]; const int in_d = x.shape()[2]; const int in_h = x.shape()[3]; const int in_w = x.shape()[4]; auto output = paddle::full({n, c, out_d, out_h, out_w}, 0, paddle::DataType::FLOAT32, paddle::GPUPlace()); const int count = static_cast<int>(n * out_d * out_h * out_w); int max_threads_per_block = 512; int block_num = (count - 1) / max_threads_per_block + 1; // printf("size: %d, %d, %d, %d, %d, %d \n", n, c, out_d, out_h, count, // block_num); GridSample3DCudaKernel<float, int> <<<block_num, max_threads_per_block, 0, x.stream()>>>( count, c, out_d, out_h, out_w, in_d, in_h, in_w, x.data<float>(), grid.data<float>(), output.data<float>(), enum_mode, enum_padding_mode, align_corners); cudaError_t error_check; error_check = cudaGetLastError(); if (error_check != cudaSuccess) { printf("%s\n", cudaGetErrorString(error_check)); } // printf("size: %d, %d, %d, %d, %d, %d \n", n, c, out_d, out_h, count, // block_num); return {output}; } template <typename T> static __forceinline__ __device__ T UnnormalizeWithMask(T coord, int size, bool align_corners, T* grad_in) { if (align_corners) { *grad_in = static_cast<T>(size - 1) / 2; return ((coord + 1.f) / 2) * (size - 1); } else { *grad_in = static_cast<T>(size) / 2; return ((coord + 1.f) * size - 1) / 2; } } template <typename T> static __forceinline__ __device__ T ClipIndexesWithMask(T in, int clip_limit, T* grad_in) { if (in <= static_cast<T>(0)) { *grad_in = static_cast<T>(0); return static_cast<T>(0); } else { T max = static_cast<T>(clip_limit - 1); if (in >= max) { *grad_in = static_cast<T>(0); return max; } else { *grad_in = static_cast<T>(1); return in; } } } template <typename T> static __forceinline__ __device__ T ReflectIndexesWithMask(T in, int twice_low, int twice_high, T* grad_in) { if (twice_low == twice_high) { *grad_in = static_cast<T>(0); return static_cast<T>(0); } int grad_in_mult_; T min = static_cast<T>(twice_low) / 2; T span = static_cast<T>(twice_high - twice_low) / 2; in = in - min; if (in < static_cast<T>(0)) { grad_in_mult_ = -1; in = -in; } else { grad_in_mult_ = 1; } T extra = fmod(in, span); int flips = static_cast<int>(floor(in / span)); if (flips % 2 == 0) { *grad_in = static_cast<T>(grad_in_mult_); return extra + min; } else { *grad_in = static_cast<T>(-grad_in_mult_); return span - extra + min; } } template <typename T> static __forceinline__ __device__ T ComputePositionsWithMask(T coord, int size, PaddingMode padding_mode, bool align_corners, T* grad_in) { T grad_clip, grad_refl; coord = UnnormalizeWithMask<T>(coord, size, align_corners, grad_in); if (padding_mode == PaddingMode::border) { coord = ClipIndexesWithMask(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_clip; } else if (padding_mode == PaddingMode::reflect) { if (align_corners) { coord = ReflectIndexesWithMask(coord, 0, 2 * (size - 1), &grad_refl); } else { coord = ReflectIndexesWithMask(coord, -1, 2 * size - 1, &grad_refl); } coord = ClipIndexesWithMask(coord, size, &grad_clip); *grad_in = (*grad_in) * grad_refl * grad_clip; } return coord; } template <typename T> static __forceinline__ __device__ void AtomicAdd3D( T* data, int64_t d, int64_t h, int64_t w, int64_t sD, int64_t sH, int64_t sW, int64_t D, int64_t H, int64_t W, T delta) { if (InBounds3D(d, h, w, D, H, W)) { atomicAdd(data + d * sD + h * sH + w * sW, delta); } } template <typename T, typename index_t> __global__ void GridSample3DCudaBackwardKernel( const index_t nthreads, const T* grad_output, const T* input, const T* grid, index_t out_c, index_t out_d, index_t out_h, index_t out_w, index_t in_d, index_t in_h, index_t in_w, T* grad_input, T* grad_grid, const Mode mode, const PaddingMode padding_mode, bool align_corners) { index_t inp_sW = 1; index_t inp_sH = in_w; index_t inp_sD = in_h * in_w; index_t inp_sC = in_d * inp_sD; index_t inp_sN = out_c * inp_sC; index_t grid_sCoor = 1; index_t grid_sW = 3; index_t grid_sH = out_w * grid_sW; index_t grid_sD = out_h * grid_sH; index_t grid_sN = out_d * grid_sD; index_t gOut_sW = 1; index_t gOut_sH = out_w; index_t gOut_sD = out_h * out_w; index_t gOut_sC = out_d * gOut_sD; index_t gOut_sN = out_c * gOut_sC; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_w; const index_t h = (index / out_w) % out_h; const index_t d = (index / (out_h * out_w)) % out_d; const index_t n = index / (out_d * out_h * out_w); const auto grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid T ix = grid[grid_offset]; T iy = grid[grid_offset + grid_sCoor]; T iz = grid[grid_offset + 2 * grid_sCoor]; // multipliers for gradients on ix, iy, and iz T gix_mult, giy_mult, giz_mult; ix = ComputePositionsWithMask(ix, in_w, padding_mode, align_corners, &gix_mult); iy = ComputePositionsWithMask(iy, in_h, padding_mode, align_corners, &giy_mult); iz = ComputePositionsWithMask(iz, in_d, padding_mode, align_corners, &giz_mult); if (mode == Mode::bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom index_t ix_tnw = static_cast<index_t>(std::floor(ix)); index_t iy_tnw = static_cast<index_t>(std::floor(iy)); index_t iz_tnw = static_cast<index_t>(std::floor(iz)); index_t ix_tne = ix_tnw + 1; index_t iy_tne = iy_tnw; index_t iz_tne = iz_tnw; index_t ix_tsw = ix_tnw; index_t iy_tsw = iy_tnw + 1; index_t iz_tsw = iz_tnw; index_t ix_tse = ix_tnw + 1; index_t iy_tse = iy_tnw + 1; index_t iz_tse = iz_tnw; index_t ix_bnw = ix_tnw; index_t iy_bnw = iy_tnw; index_t iz_bnw = iz_tnw + 1; index_t ix_bne = ix_tnw + 1; index_t iy_bne = iy_tnw; index_t iz_bne = iz_tnw + 1; index_t ix_bsw = ix_tnw; index_t iy_bsw = iy_tnw + 1; index_t iz_bsw = iz_tnw + 1; index_t ix_bse = ix_tnw + 1; index_t iy_bse = iy_tnw + 1; index_t iz_bse = iz_tnw + 1; // get surfaces to each neighbor: T tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); T tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); T tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); T tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); T bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); T bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); T bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); T bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); T gix = static_cast<T>(0), giy = static_cast<T>(0), giz = static_cast<T>(0); index_t gOut_offset = n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; index_t inp_offset_NC = n * inp_sN; T* gInp_ptr_NC = grad_input + n * inp_sN; for (index_t c = 0; c < out_c; ++c, gOut_offset += gOut_sC, gInp_ptr_NC += inp_sC, inp_offset_NC += inp_sC) { T gOut = grad_output[gOut_offset]; AtomicAdd3D(gInp_ptr_NC, iz_tnw, iy_tnw, ix_tnw, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, tnw * gOut); AtomicAdd3D(gInp_ptr_NC, iz_tne, iy_tne, ix_tne, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, tne * gOut); AtomicAdd3D(gInp_ptr_NC, iz_tsw, iy_tsw, ix_tsw, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, tsw * gOut); AtomicAdd3D(gInp_ptr_NC, iz_tse, iy_tse, ix_tse, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, tse * gOut); AtomicAdd3D(gInp_ptr_NC, iz_bnw, iy_bnw, ix_bnw, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, bnw * gOut); AtomicAdd3D(gInp_ptr_NC, iz_bne, iy_bne, ix_bne, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, bne * gOut); AtomicAdd3D(gInp_ptr_NC, iz_bsw, iy_bsw, ix_bsw, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, bsw * gOut); AtomicAdd3D(gInp_ptr_NC, iz_bse, iy_bse, ix_bse, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, bse * gOut); // calculate grad_grid if (InBounds3D(iz_tnw, iy_tnw, ix_tnw, in_d, in_h, in_w)) { T tnw_val = input[inp_offset_NC + iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW]; gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut; giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut; giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut; } if (InBounds3D(iz_tne, iy_tne, ix_tne, in_d, in_h, in_w)) { T tne_val = input[inp_offset_NC + iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW]; gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut; giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut; giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut; } if (InBounds3D(iz_tsw, iy_tsw, ix_tsw, in_d, in_h, in_w)) { T tsw_val = input[inp_offset_NC + iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW]; gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut; giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut; giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut; } if (InBounds3D(iz_tse, iy_tse, ix_tse, in_d, in_h, in_w)) { T tse_val = input[inp_offset_NC + iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW]; gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut; giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut; giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut; } if (InBounds3D(iz_bnw, iy_bnw, ix_bnw, in_d, in_h, in_w)) { T bnw_val = input[inp_offset_NC + iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW]; gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut; giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut; giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut; } if (InBounds3D(iz_bne, iy_bne, ix_bne, in_d, in_h, in_w)) { T bne_val = input[inp_offset_NC + iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW]; gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut; giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut; giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut; } if (InBounds3D(iz_bsw, iy_bsw, ix_bsw, in_d, in_h, in_w)) { T bsw_val = input[inp_offset_NC + iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW]; gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut; giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut; giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut; } if (InBounds3D(iz_bse, iy_bse, ix_bse, in_d, in_h, in_w)) { T bse_val = input[inp_offset_NC + iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW]; gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut; giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut; giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut; } } if (grad_grid != nullptr) { T* gGrid_ptr_NDHW = grad_grid + index * grid_sW; gGrid_ptr_NDHW[0] = gix_mult * gix; gGrid_ptr_NDHW[1] = giy_mult * giy; gGrid_ptr_NDHW[2] = giz_mult * giz; } } else if (mode == Mode::nearest) { auto ix_nearest = static_cast<index_t>(std::round(ix)); auto iy_nearest = static_cast<index_t>(std::round(iy)); auto iz_nearest = static_cast<index_t>(std::round(iz)); // assign nearest neighor pixel value to output pixel index_t gOut_offset = n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; T* gInp_ptr_NC = grad_input + n * inp_sN; for (index_t c = 0; c < out_c; ++c, gOut_offset += gOut_sC, gInp_ptr_NC += inp_sC) { AtomicAdd3D(gInp_ptr_NC, iz_nearest, iy_nearest, ix_nearest, inp_sD, inp_sH, inp_sW, in_d, in_h, in_w, grad_output[gOut_offset]); } if (grad_grid != nullptr) { T* gGrid_ptr_NDHW = grad_grid + index * grid_sW; gGrid_ptr_NDHW[0] = static_cast<T>(0); gGrid_ptr_NDHW[1] = static_cast<T>(0); gGrid_ptr_NDHW[2] = static_cast<T>(0); } } } } std::vector<paddle::Tensor> GridSample3DCUDABackward( const paddle::Tensor& x, const paddle::Tensor& grid, const paddle::Tensor& grad_out, const std::string& mode, const std::string& padding_mode, bool align_corners) { PaddingMode enum_padding_mode; Mode enum_mode; if (padding_mode == "border") { enum_padding_mode = PaddingMode::border; } else if (padding_mode == "reflection") { enum_padding_mode = PaddingMode::reflect; } else { enum_padding_mode = PaddingMode::zeros; } if (mode == "nearest") { enum_mode = Mode::nearest; } else { enum_mode = Mode::bilinear; } const int out_d = grid.shape()[1]; const int out_h = grid.shape()[2]; const int out_w = grid.shape()[3]; const int n = x.shape()[0]; const int c = x.shape()[1]; const int in_d = x.shape()[2]; const int in_h = x.shape()[3]; const int in_w = x.shape()[4]; auto grid_grad_output = paddle::empty({n, out_d, out_h, out_w, 3}, paddle::DataType::FLOAT32, paddle::GPUPlace()); auto x_grad_output = paddle::full({n, c, in_d, in_h, in_w}, 0, paddle::DataType::FLOAT32, paddle::GPUPlace()); const int count = static_cast<int>(n * out_d * out_h * out_w); int max_threads_per_block = 512; int block_num = (count - 1) / max_threads_per_block + 1; GridSample3DCudaBackwardKernel<float, int> <<<block_num, max_threads_per_block, 0, x.stream()>>>( count, grad_out.data<float>(), x.data<float>(), grid.data<float>(), c, out_d, out_h, out_w, in_d, in_h, in_w, x_grad_output.data<float>(), grid_grad_output.data<float>(), enum_mode, enum_padding_mode, align_corners); return {x_grad_output}; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp/custom_ops/iou3d_nms_api.cpp
#include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "iou3d_cpu.h" #include "iou3d_nms.h" #include "paddle/include/experimental/ext_all.h" std::vector<std::vector<int64_t>> NMSInferShape( std::vector<int64_t> boxes_shape) { int64_t keep_num = 1; return {{boxes_shape[0]}, {keep_num}}; } std::vector<paddle::DataType> NMSInferDtype(paddle::DataType boxes_dtype) { return {paddle::DataType::INT64, paddle::DataType::INT64}; } // PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { // m.def("boxes_overlap_bev_gpu", &boxes_overlap_bev_gpu, "oriented boxes // overlap"); // m.def("boxes_iou_bev_gpu", &boxes_iou_bev_gpu, "oriented boxes iou"); // m.def("nms_gpu", &nms_gpu, "oriented nms gpu"); // m.def("nms_normal_gpu", &nms_normal_gpu, "nms gpu"); // m.def("boxes_iou_bev_cpu", &boxes_iou_bev_cpu, "oriented boxes iou"); // } PD_BUILD_OP(nms_gpu) .Inputs({"boxes"}) .Outputs({"keep", "num_to_keep"}) .Attrs({"nms_overlap_thresh: float"}) .SetKernelFn(PD_KERNEL(nms_gpu)) .SetInferDtypeFn(PD_INFER_DTYPE(NMSInferDtype)) .SetInferShapeFn(PD_INFER_SHAPE(NMSInferShape));
0
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp/cmake
apollo_public_repos/apollo-model-centerpoint/deploy/caddn/cpp/cmake/external/boost.cmake
include(ExternalProject) set(BOOST_PROJECT "extern_boost") # To release PaddlePaddle as a pip package, we have to follow the # manylinux1 standard, which features as old Linux kernels and # compilers as possible and recommends CentOS 5. Indeed, the earliest # CentOS version that works with NVIDIA CUDA is CentOS 6. And a new # version of boost, say, 1.66.0, doesn't build on CentOS 6. We # checked that the devtools package of CentOS 6 installs boost 1.41.0. # So we use 1.41.0 here. set(BOOST_VER "1.41.0") set(BOOST_TAR "boost_1_41_0" CACHE STRING "" FORCE) set(BOOST_URL "http://paddlepaddledeps.bj.bcebos.com/${BOOST_TAR}.tar.gz" CACHE STRING "" FORCE) MESSAGE(STATUS "BOOST_TAR: ${BOOST_TAR}, BOOST_URL: ${BOOST_URL}") set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost) set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}") set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}" CACHE PATH "boost include directory." FORCE) set_directory_properties(PROPERTIES CLEAN_NO_CUSTOM 1) include_directories(${BOOST_INCLUDE_DIR}) ExternalProject_Add( ${BOOST_PROJECT} ${EXTERNAL_PROJECT_LOG_ARGS} DOWNLOAD_DIR ${BOOST_DOWNLOAD_DIR} URL ${BOOST_URL} DOWNLOAD_NO_PROGRESS 1 PREFIX ${BOOST_SOURCES_DIR} CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" UPDATE_COMMAND "" ) if (${CMAKE_VERSION} VERSION_LESS "3.3.0" OR NOT WIN32) set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/boost_dummy.c) file(WRITE ${dummyfile} "const char *dummy = \"${dummyfile}\";") add_library(boost STATIC ${dummyfile}) else() add_library(boost INTERFACE) endif() add_dependencies(boost ${BOOST_PROJECT}) set(Boost_INCLUDE_DIR ${BOOST_INCLUDE_DIR})
0
apollo_public_repos/apollo-model-centerpoint/deploy/squeezesegv3
apollo_public_repos/apollo-model-centerpoint/deploy/squeezesegv3/python/infer.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import cv2 import numpy as np import paddle from paddle.inference import Config, create_predictor from paddle3d import transforms as T from paddle3d.sample import Sample from paddle3d.transforms.normalize import NormalizeRangeImage from paddle3d.transforms.reader import LoadSemanticKITTIRange def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--model_file", type=str, help="Model filename, Specify this when your model is a combined model.", required=True) parser.add_argument( "--params_file", type=str, help= "Parameter filename, Specify this when your model is a combined model.", required=True) parser.add_argument( '--lidar_file', type=str, help='The lidar path.', required=True) parser.add_argument( '--img_mean', type=str, help='The mean value of range-view image.', required=True) parser.add_argument( '--img_std', type=str, help='The variance value of range-view image.', required=True) parser.add_argument("--gpu_id", type=int, default=0, help="GPU card id.") parser.add_argument( "--use_trt", type=int, default=0, help="Whether to use tensorrt to accelerate when using gpu.") parser.add_argument( "--trt_precision", type=int, default=0, help="Precision type of tensorrt, 0: kFloat32, 1: kHalf.") parser.add_argument( "--trt_use_static", type=int, default=0, help="Whether to load the tensorrt graph optimization from a disk path." ) parser.add_argument( "--trt_static_dir", type=str, help="Path of a tensorrt graph optimization directory.") return parser.parse_args() def preprocess(file_path, img_mean, img_std): if isinstance(img_mean, str): img_mean = eval(img_mean) if isinstance(img_std, str): img_std = eval(img_std) sample = Sample(path=file_path, modality="lidar") transforms = T.Compose([ LoadSemanticKITTIRange(project_label=False), NormalizeRangeImage(mean=img_mean, std=img_std) ]) sample = transforms(sample) if "proj_mask" in sample.meta: sample.data *= sample.meta.pop("proj_mask") return np.expand_dims(sample.data, 0), sample.meta.proj_x, sample.meta.proj_y def init_predictor(model_file, params_file, gpu_id=0, use_trt=False, trt_precision=0, trt_use_static=False, trt_static_dir=None): config = Config(model_file, params_file) config.enable_memory_optim() config.enable_use_gpu(1000, gpu_id) if use_trt: precision_mode = paddle.inference.PrecisionType.Float32 if trt_precision == 1: precision_mode = paddle.inference.PrecisionType.Half config.enable_tensorrt_engine( workspace_size=1 << 20, max_batch_size=1, min_subgraph_size=3, precision_mode=precision_mode, use_static=trt_use_static, use_calib_mode=False) if trt_use_static: config.set_optim_cache_dir(trt_static_dir) predictor = create_predictor(config) return predictor def run(predictor, points): # copy img data to input tensor input_names = predictor.get_input_names() input_tensor = predictor.get_input_handle(input_names[0]) input_tensor.reshape(points.shape) input_tensor.copy_from_cpu(points.copy()) # do the inference predictor.run() results = [] # get out data from output tensor output_names = predictor.get_output_names() output_tensor = predictor.get_output_handle(output_names[0]) pred_label = output_tensor.copy_to_cpu() return pred_label[0] def postprocess(pred_img_label, proj_x, proj_y): return pred_img_label[proj_y, proj_x] def main(args): predictor = init_predictor(args.model_file, args.params_file, args.gpu_id, args.use_trt, args.trt_precision, args.trt_use_static, args.trt_static_dir) range_img, proj_x, proj_y = preprocess(args.lidar_file, args.img_mean, args.img_std) pred_img_label = run(predictor, range_img) pred_point_label = postprocess(pred_img_label, proj_x, proj_y) return pred_point_label if __name__ == '__main__': args = parse_args() main(args)
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/python/infer.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import cv2 import numpy as np import paddle from paddle.inference import Config, create_predictor from paddle3d.ops.voxelize import hard_voxelize from paddle3d.ops.pointnet2_ops import ball_query, grouping_operation, farthest_point_sample from paddle3d.ops.iou3d_nms_cuda import nms_gpu def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--model_file", type=str, help="Model filename, Specify this when your model is a combined model.", required=True) parser.add_argument( "--params_file", type=str, help= "Parameter filename, Specify this when your model is a combined model.", required=True) parser.add_argument( '--lidar_file', type=str, help='The lidar path.', required=True) parser.add_argument( "--num_point_dim", type=int, default=4, help="Dimension of a point in the lidar file.") parser.add_argument( "--point_cloud_range", dest='point_cloud_range', nargs='+', help="Range of point cloud for voxelize operation.", type=float, default=None) parser.add_argument("--gpu_id", type=int, default=0, help="GPU card id.") parser.add_argument( "--use_trt", type=int, default=0, help="Whether to use tensorrt to accelerate when using gpu.") parser.add_argument( "--trt_precision", type=int, default=0, help="Precision type of tensorrt, 0: kFloat32, 1: kHalf.") parser.add_argument( "--trt_use_static", type=int, default=0, help="Whether to load the tensorrt graph optimization from a disk path." ) parser.add_argument( "--trt_static_dir", type=str, help="Path of a tensorrt graph optimization directory.") parser.add_argument( "--collect_shape_info", type=int, default=0, help="Whether to collect dynamic shape before using tensorrt.") parser.add_argument( "--dynamic_shape_file", type=str, default="", help="Path of a dynamic shape file for tensorrt.") return parser.parse_args() def read_point(file_path, num_point_dim): points = np.fromfile(file_path, np.float32).reshape(-1, num_point_dim) points = points[:, :4] return points def filter_points_outside_range(points, point_cloud_range): limit_range = np.asarray(point_cloud_range, dtype=np.float32) mask = (points[:, 0] >= limit_range[0]) & (points[:, 0] <= limit_range[3]) \ & (points[:, 1] >= limit_range[1]) & (points[:, 1] <= limit_range[4]) points = points[mask] return points def preprocess(file_path, num_point_dim, point_cloud_range): points = read_point(file_path, num_point_dim) points = filter_points_outside_range(points, point_cloud_range) return points def init_predictor(model_file, params_file, gpu_id=0, use_trt=False, trt_precision=0, trt_use_static=False, trt_static_dir=None, collect_shape_info=False, dynamic_shape_file=None): config = Config(model_file, params_file) config.enable_memory_optim() config.enable_use_gpu(1000, gpu_id) if use_trt: precision_mode = paddle.inference.PrecisionType.Float32 if trt_precision == 1: precision_mode = paddle.inference.PrecisionType.Half config.enable_tensorrt_engine( workspace_size=1 << 30, max_batch_size=1, min_subgraph_size=20, precision_mode=precision_mode, use_static=trt_use_static, use_calib_mode=False) if collect_shape_info: config.collect_shape_range_info(dynamic_shape_file) else: config.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file, True) if trt_use_static: config.set_optim_cache_dir(trt_static_dir) predictor = create_predictor(config) return predictor def parse_result(box3d_lidar, label_preds, scores): num_bbox3d, bbox3d_dims = box3d_lidar.shape for box_idx in range(num_bbox3d): # filter fake results: score = -1 if scores[box_idx] < 0: continue print( "Score: {} Label: {} Box(x_c, y_c, z_c, w, l, h, -rot): {} {} {} {} {} {} {}" .format(scores[box_idx], label_preds[box_idx], box3d_lidar[box_idx, 0], box3d_lidar[box_idx, 1], box3d_lidar[box_idx, 2], box3d_lidar[box_idx, 3], box3d_lidar[box_idx, 4], box3d_lidar[box_idx, 5], box3d_lidar[box_idx, 6])) def run(predictor, points): # copy img data to input tensor input_names = predictor.get_input_names() for i, name in enumerate(input_names): if name == "data": input_tensor = predictor.get_input_handle(name) input_tensor.reshape(points.shape) input_tensor.copy_from_cpu(points.copy()) # do the inference predictor.run() results = [] # get out data from output tensor output_names = predictor.get_output_names() for i, name in enumerate(output_names): output_tensor = predictor.get_output_handle(name) if i == 0: box3d_lidar = output_tensor.copy_to_cpu() elif i == 1: scores = output_tensor.copy_to_cpu() elif i == 2: label_preds = output_tensor.copy_to_cpu() return box3d_lidar, label_preds, scores def main(args): predictor = init_predictor(args.model_file, args.params_file, args.gpu_id, args.use_trt, args.trt_precision, args.trt_use_static, args.trt_static_dir, args.collect_shape_info, args.dynamic_shape_file) points = preprocess(args.lidar_file, args.num_point_dim, args.point_cloud_range) box3d_lidar, label_preds, scores = run(predictor, points) parse_result(box3d_lidar, label_preds, scores) if __name__ == '__main__': args = parse_args() main(args)
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/main.cc
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gflags/gflags.h> #include <glog/logging.h> #include <chrono> #include <cmath> #include <fstream> #include <iostream> #include <numeric> #include <string> #include "paddle/include/paddle_inference_api.h" using paddle_infer::Config; using paddle_infer::CreatePredictor; using paddle_infer::Predictor; DEFINE_string(model_file, "", "Path of a inference model"); DEFINE_string(params_file, "", "Path of a inference params"); DEFINE_string(lidar_file, "", "Path of a lidar file to be predicted"); DEFINE_int32(num_point_dim, 4, "Dimension of a point in the lidar file"); DEFINE_string(point_cloud_range, "", "Range of point cloud for voxelize operation"); DEFINE_int32(gpu_id, 0, "GPU card id"); DEFINE_int32(use_trt, 0, "Whether to use tensorrt to accelerate when using gpu"); DEFINE_int32(trt_precision, 0, "Precision type of tensorrt, 0: kFloat32, 1: kHalf"); DEFINE_int32( trt_use_static, 0, "Whether to load the tensorrt graph optimization from a disk path"); DEFINE_string(trt_static_dir, "", "Path of a tensorrt graph optimization directory"); DEFINE_int32(collect_shape_info, 0, "Whether to collect dynamic shape before using tensorrt"); DEFINE_string(dynamic_shape_file, "", "Path of a dynamic shape file for tensorrt"); void parse_string_to_vector(const std::string &str, std::vector<float> *vec) { std::stringstream ss(str); float number; while (ss >> number) { vec->push_back(number); } } bool read_point(const std::string &file_path, const int num_point_dim, void **buffer, int *num_points) { std::ifstream file_in(file_path, std::ios::in | std::ios::binary); if (num_point_dim < 4) { LOG(ERROR) << "Point dimension must not be less than 4, but recieved " << "num_point_dim is " << num_point_dim << ".\n"; } if (!file_in) { LOG(ERROR) << "Failed to read file: " << file_path << "\n"; return false; } std::streampos file_size; file_in.seekg(0, std::ios::end); file_size = file_in.tellg(); file_in.seekg(0, std::ios::beg); *buffer = malloc(file_size); if (*buffer == nullptr) { LOG(ERROR) << "Failed to malloc memory of size: " << file_size << "\n"; return false; } file_in.read(reinterpret_cast<char *>(*buffer), file_size); file_in.close(); if (file_size / sizeof(float) % num_point_dim != 0) { LOG(ERROR) << "Loaded file size (" << file_size << ") is not evenly divisible by num_point_dim (" << num_point_dim << ")\n"; return false; } *num_points = file_size / sizeof(float) / num_point_dim; return true; } void mask_points_outside_range(const float *points, const int num_points, const std::vector<float> &point_cloud_range, const int num_point_dim, std::vector<float> *selected_points) { for (int i = 0; i < num_points; i += num_point_dim) { float pt_x = points[i]; float pt_y = points[i + 1]; // in [-x, x] and [-y, y] range if ((pt_x >= point_cloud_range[0]) && (pt_x <= point_cloud_range[3]) && (pt_y >= point_cloud_range[1]) && (pt_y <= point_cloud_range[4])) { for (int d = 0; d < num_point_dim; ++d) { selected_points->push_back(points[i + d]); } } } } bool preprocess(const std::string &file_path, const int num_point_dim, const std::vector<float> &point_cloud_range, std::vector<int> *points_shape, std::vector<float> *points_data) { void *buffer = nullptr; int num_points = 0; if (!read_point(file_path, num_point_dim, &buffer, &num_points)) { return false; } float *points = static_cast<float *>(buffer); std::vector<float> masked_points; mask_points_outside_range(points, num_points, point_cloud_range, num_point_dim, &masked_points); points_data->assign(masked_points.begin(), masked_points.end()); points_shape->push_back(masked_points.size() / num_point_dim); points_shape->push_back(num_point_dim); free(points); return true; } std::shared_ptr<paddle_infer::Predictor> create_predictor( const std::string &model_path, const std::string &params_path, const int gpu_id, const int use_trt, const int trt_precision, const int trt_use_static, const std::string trt_static_dir, const int collect_shape_info, const std::string dynamic_shape_file) { paddle::AnalysisConfig config; config.EnableUseGpu(1000, gpu_id); config.SetModel(model_path, params_path); if (use_trt) { paddle::AnalysisConfig::Precision precision; if (trt_precision == 0) { precision = paddle_infer::PrecisionType::kFloat32; } else if (trt_precision == 1) { precision = paddle_infer::PrecisionType::kHalf; } else { LOG(ERROR) << "Tensorrt type can only support 0 or 1, but recieved is" << trt_precision << "\n"; return nullptr; } config.EnableTensorRtEngine(1 << 30, 1, 20, precision, trt_use_static, false); if (dynamic_shape_file == "") { LOG(ERROR) << "dynamic_shape_file should be set, but recieved is " << dynamic_shape_file << "\n"; return nullptr; } if (collect_shape_info) { config.CollectShapeRangeInfo(dynamic_shape_file); } else { config.EnableTunedTensorRtDynamicShape(dynamic_shape_file, true); } if (trt_use_static) { if (trt_static_dir == "") { LOG(ERROR) << "trt_static_dir should be set, but recieved is " << trt_static_dir << "\n"; return nullptr; } config.SetOptimCacheDir(trt_static_dir); } } config.SwitchIrOptim(true); return paddle_infer::CreatePredictor(config); } void run(Predictor *predictor, const std::vector<int> &points_shape, const std::vector<float> &points_data, std::vector<float> *box3d_lidar, std::vector<int64_t> *label_preds, std::vector<float> *scores) { auto input_names = predictor->GetInputNames(); for (const auto &tensor_name : input_names) { auto in_tensor = predictor->GetInputHandle(tensor_name); if (tensor_name == "data") { in_tensor->Reshape(points_shape); in_tensor->CopyFromCpu(points_data.data()); } } CHECK(predictor->Run()); auto output_names = predictor->GetOutputNames(); for (size_t i = 0; i != output_names.size(); i++) { auto output = predictor->GetOutputHandle(output_names[i]); std::vector<int> output_shape = output->shape(); int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies<int>()); if (i == 0) { box3d_lidar->resize(out_num); output->CopyToCpu(box3d_lidar->data()); } else if (i == 1) { scores->resize(out_num); output->CopyToCpu(scores->data()); } else if (i == 2) { label_preds->resize(out_num); output->CopyToCpu(label_preds->data()); } } } bool parse_result(const std::vector<float> &box3d_lidar, const std::vector<int64_t> &label_preds, const std::vector<float> &scores) { int num_bbox3d = scores.size(); for (size_t box_idx = 0; box_idx != num_bbox3d; ++box_idx) { // filter fake results: score = -1 if (scores[box_idx] < 0) { continue; } LOG(INFO) << "Score: " << scores[box_idx] << " Label: " << label_preds[box_idx] << " "; LOG(INFO) << "Box (x_c, y_c, z_c, w, l, h, -rot): " << box3d_lidar[box_idx * 7 + 0] << " " << box3d_lidar[box_idx * 7 + 1] << " " << box3d_lidar[box_idx * 7 + 2] << " " << box3d_lidar[box_idx * 7 + 3] << " " << box3d_lidar[box_idx * 7 + 4] << " " << box3d_lidar[box_idx * 7 + 5] << " " << box3d_lidar[box_idx * 7 + 6] << "\n"; } return true; } int main(int argc, char *argv[]) { google::ParseCommandLineFlags(&argc, &argv, true); if (FLAGS_model_file == "" || FLAGS_params_file == "" || FLAGS_lidar_file == "" || FLAGS_point_cloud_range == "") { LOG(INFO) << "Missing required parameter" << "\n"; LOG(INFO) << "Usage: " << std::string(argv[0]) << " --model_file ${MODEL_FILE} " << "--params_file ${PARAMS_FILE} " << "--lidar_file ${LIDAR_FILE}" << "--point_cloud_range ${POINT_CLOUD_RANGE} " << "\n"; return -1; } auto predictor = create_predictor( FLAGS_model_file, FLAGS_params_file, FLAGS_gpu_id, FLAGS_use_trt, FLAGS_trt_precision, FLAGS_trt_use_static, FLAGS_trt_static_dir, FLAGS_collect_shape_info, FLAGS_dynamic_shape_file); if (predictor == nullptr) { return 0; } std::vector<float> point_cloud_range; parse_string_to_vector(FLAGS_point_cloud_range, &point_cloud_range); std::vector<int> points_shape; std::vector<float> points_data; if (!preprocess(FLAGS_lidar_file, FLAGS_num_point_dim, point_cloud_range, &points_shape, &points_data)) { LOG(ERROR) << "Failed to preprocess!\n"; return 0; } std::vector<float> box3d_lidar; std::vector<int64_t> label_preds; std::vector<float> scores; run(predictor.get(), points_shape, points_data, &box3d_lidar, &label_preds, &scores); parse_result(box3d_lidar, label_preds, scores); return 0; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/CMakeLists.txt
cmake_minimum_required(VERSION 3.0) project(cpp_inference_demo CXX C) option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON) option(USE_TENSORRT "Compile demo with TensorRT." ON) option(CUSTOM_OPERATOR_FILES "List of file names for custom operators" "") execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpfullversion -dumpversion OUTPUT_VARIABLE GCC_VERSION) string(REGEX MATCHALL "[0-9]+" GCC_VERSION_COMPONENTS ${GCC_VERSION}) list(GET GCC_VERSION_COMPONENTS 0 GCC_MAJOR) list(GET GCC_VERSION_COMPONENTS 1 GCC_MINOR) set(GCC_VERSION "${GCC_MAJOR}.${GCC_MINOR}") if (GCC_VERSION LESS "8.0") set(CMAKE_CXX_FLAGS "-Wl,--no-as-needed") endif() set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") include(external/boost) if(WITH_GPU) find_package(CUDA REQUIRED) add_definitions("-DPADDLE_WITH_CUDA") endif() if(NOT WITH_STATIC_LIB) add_definitions("-DPADDLE_WITH_SHARED_LIB") else() # PD_INFER_DECL is mainly used to set the dllimport/dllexport attribute in dynamic library mode. # Set it to empty in static library mode to avoid compilation issues. add_definitions("/DPD_INFER_DECL=") endif() macro(safe_set_static_flag) foreach(flag_var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) if(${flag_var} MATCHES "/MD") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") endif(${flag_var} MATCHES "/MD") endforeach(flag_var) endmacro() if(NOT DEFINED PADDLE_LIB) message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib") endif() if(NOT DEFINED DEMO_NAME) message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name") endif() include_directories("${PADDLE_LIB}/") set(PADDLE_LIB_THIRD_PARTY_PATH "${PADDLE_LIB}/third_party/install/") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/include") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/lib") link_directories("${PADDLE_LIB}/paddle/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib") if (WIN32) add_definitions("/DGOOGLE_GLOG_DLL_DECL=") option(MSVC_STATIC_CRT "use static C Runtime library by default" ON) if (MSVC_STATIC_CRT) if (WITH_MKL) set(FLAG_OPENMP "/openmp") endif() set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") safe_set_static_flag() if (WITH_STATIC_LIB) add_definitions(-DSTATIC_LIB) endif() endif() else() if(WITH_MKL) set(FLAG_OPENMP "-fopenmp") endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 ${FLAG_OPENMP}") endif() if(WITH_GPU) if(NOT WIN32) set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") else() if(CUDA_LIB STREQUAL "") set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\lib\\x64") endif() endif(NOT WIN32) endif() if (USE_TENSORRT AND WITH_GPU) set(TENSORRT_ROOT "" CACHE STRING "The root directory of TensorRT library") if("${TENSORRT_ROOT}" STREQUAL "") message(FATAL_ERROR "The TENSORRT_ROOT is empty, you must assign it a value with CMake command. Such as: -DTENSORRT_ROOT=TENSORRT_ROOT_PATH ") endif() set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include) set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib) endif() if (NOT WIN32) if (USE_TENSORRT AND WITH_GPU) include_directories("${TENSORRT_INCLUDE_DIR}") link_directories("${TENSORRT_LIB_DIR}") endif() endif(NOT WIN32) if(WITH_MKL) set(MATH_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mklml") include_directories("${MATH_LIB_PATH}/include") if(WIN32) set(MATH_LIB ${MATH_LIB_PATH}/lib/mklml${CMAKE_STATIC_LIBRARY_SUFFIX} ${MATH_LIB_PATH}/lib/libiomp5md${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(MATH_LIB ${MATH_LIB_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} ${MATH_LIB_PATH}/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(MKLDNN_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mkldnn") if(EXISTS ${MKLDNN_PATH}) include_directories("${MKLDNN_PATH}/include") if(WIN32) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib) else(WIN32) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) endif(WIN32) endif() else() set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas") include_directories("${OPENBLAS_LIB_PATH}/include/openblas") if(WIN32) set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() endif() if(WITH_STATIC_LIB) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() if(WIN32) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() endif() if (NOT WIN32) if (GCC_VERSION LESS "8.0") set(EXTERNAL_LIB ${EXTERNAL_LIB} "-lssl -lcrypto -lz -lleveldb -lsnappy") endif() set(EXTERNAL_LIB ${EXTERNAL_LIB} "-lrt -ldl -lpthread") set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags protobuf xxhash ${EXTERNAL_LIB}) else() set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags_static libprotobuf xxhash ${EXTERNAL_LIB}) set(DEPS ${DEPS} shlwapi.lib) endif(NOT WIN32) if(WITH_GPU) if(NOT WIN32) if (USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX}) else() if(USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX} ) endif() endif() cuda_add_library(pd_infer_custom_op ${CUSTOM_OPERATOR_FILES} SHARED) add_executable(${DEMO_NAME} ${DEMO_NAME}.cc) if (GCC_VERSION GREATER_EQUAL "8.0") set(DEPS ${DEPS} libssl.a libcrypto.a libz.a libleveldb.a libsnappy.a) endif() set(DEPS ${DEPS} boost pd_infer_custom_op)# libssl.a libcrypto.a libz.a libleveldb.a libsnappy.a) if(WIN32) if(USE_TENSORRT) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) endif() if(WITH_MKL) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release ) else() add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release ) endif() if(NOT WITH_STATIC_LIB) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_fluid.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) endif() endif() target_link_libraries(${DEMO_NAME} ${DEPS})
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/compile.sh
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. mkdir -p build cd build rm -rf * DEMO_NAME=main WITH_MKL=ON WITH_GPU=ON USE_TENSORRT=OFF LIB_DIR=/centerpoint/kaihuo/Paddle/build/paddle_inference_install_dir CUDNN_LIB=/usr/lib/x86_64-linux-gnu CUDA_LIB=/usr/local/cuda/lib64 TENSORRT_ROOT=/centerpoint/two_three/Paddle/TensorRT-8.2.5.1 CUSTOM_OPERATOR_FILES="custom_ops/voxel/voxelize_op.cc;custom_ops/voxel/voxelize_op.cu;custom_ops/iou3d_nms/iou3d_cpu.cpp;custom_ops/iou3d_nms/iou3d_nms_api.cpp;custom_ops/iou3d_nms/iou3d_nms.cpp;custom_ops/iou3d_nms/iou3d_nms_kernel.cu;custom_ops/pointnet2/sampling_gpu.cu;custom_ops/pointnet2/sampling.cc;custom_ops/pointnet2/ball_query_gpu.cu;custom_ops/pointnet2/ball_query.cc;custom_ops/pointnet2/group_points.cc;custom_ops/pointnet2/group_points_gpu.cu" cmake .. -DPADDLE_LIB=${LIB_DIR} \ -DWITH_MKL=${WITH_MKL} \ -DDEMO_NAME=${DEMO_NAME} \ -DWITH_GPU=${WITH_GPU} \ -DWITH_STATIC_LIB=OFF \ -DUSE_TENSORRT=${USE_TENSORRT} \ -DCUDNN_LIB=${CUDNN_LIB} \ -DCUDA_LIB=${CUDA_LIB} \ -DTENSORRT_ROOT=${TENSORRT_ROOT} \ -DCUSTOM_OPERATOR_FILES=${CUSTOM_OPERATOR_FILES} make -j
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops/pointnet2/group_points_gpu.cu
/* Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include "paddle/include/experimental/ext_all.h" #define THREADS_PER_BLOCK 256 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) __global__ void group_points_grad_kernel_stack( int B, int M, int C, int N, int nsample, const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features) { // :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the // output from forward :param idx: (M1 + M2 ..., nsample) tensor containing // the indicies of features to group with :param idx_batch_cnt: (batch_size) // [M1 + M2 ...] tensor containing the indicies of features to group with // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the // indicies of features to group with :return: // grad_features: (N1 + N2 ..., C) gradient of the features int index = blockIdx.x * blockDim.x + threadIdx.x; int sample_idx = index % nsample; int C_idx = (index / nsample) % C; int pt_idx = (index / nsample / C); if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return; int bs_idx = 0, pt_cnt = idx_batch_cnt[0]; for (int k = 1; k < B; k++) { if (pt_idx < pt_cnt) break; pt_cnt += idx_batch_cnt[k]; bs_idx = k; } int features_batch_start_idx = 0; for (int k = 0; k < bs_idx; k++) features_batch_start_idx += features_batch_cnt[k]; grad_out += pt_idx * C * nsample + C_idx * nsample + sample_idx; idx += pt_idx * nsample + sample_idx; grad_features += (features_batch_start_idx + idx[0]) * C + C_idx; atomicAdd(grad_features, grad_out[0]); } void group_points_grad_kernel_launcher_stack(int B, int M, int C, int N, int nsample, const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features) { // :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the // output from forward :param idx: (M1 + M2 ..., nsample) tensor containing // the indicies of features to group with :param idx_batch_cnt: (batch_size) // [M1 + M2 ...] tensor containing the indicies of features to group with // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the // indicies of features to group with :return: // grad_features: (N1 + N2 ..., C) gradient of the features cudaError_t err; // dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // // blockIdx.x(col), blockIdx.y(row) dim3 blocks(DIVUP(M * C * nsample, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); group_points_grad_kernel_stack<<<blocks, threads>>>( B, M, C, N, nsample, grad_out, idx, idx_batch_cnt, features_batch_cnt, grad_features); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void group_points_kernel_stack(int B, int M, int C, int nsample, const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out) { // :param features: (N1 + N2 ..., C) tensor of features to group // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the // indicies of features to group with :param idx: (M1 + M2 ..., nsample) // tensor containing the indicies of features to group with :param // idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of // features to group with :return: // output: (M1 + M2, C, nsample) tensor int index = blockIdx.x * blockDim.x + threadIdx.x; int sample_idx = index % nsample; int C_idx = (index / nsample) % C; int pt_idx = (index / nsample / C); if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return; int bs_idx = 0, pt_cnt = idx_batch_cnt[0]; for (int k = 1; k < B; k++) { if (pt_idx < pt_cnt) break; pt_cnt += idx_batch_cnt[k]; bs_idx = k; } int features_batch_start_idx = 0; for (int k = 0; k < bs_idx; k++) features_batch_start_idx += features_batch_cnt[k]; features += features_batch_start_idx * C; idx += pt_idx * nsample + sample_idx; int in_idx = idx[0] * C + C_idx; int out_idx = pt_idx * C * nsample + C_idx * nsample + sample_idx; out[out_idx] = features[in_idx]; } void group_points_kernel_launcher_stack(const int B, const int M, const int C, const int nsample, const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out) { // :param features: (N1 + N2 ..., C) tensor of features to group // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the // indicies of features to group with :param idx: (M1 + M2 ..., nsample) // tensor containing the indicies of features to group with :param // idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of // features to group with :return: // output: (M1 + M2, C, nsample) tensor cudaError_t err; dim3 blocks(DIVUP(M * C * nsample, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); group_points_kernel_stack<<<blocks, threads>>>( B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops/pointnet2/sampling.cc
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "paddle/include/experimental/ext_all.h" #define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") // cuda launcher declaration void farthest_point_sampling_kernel_launcher(int b, int n, int m, const float *dataset, float *temp, int *idxs); // op forward wrapper std::vector<paddle::Tensor> farthest_point_sampling_cuda_forward( const paddle::Tensor &points_tensor, const int &npoints) { // points_tensor: (B, N, 3) // tmp_tensor: (B, N) // output: // idx_tensor: (B, npoints) const int b = points_tensor.shape()[0]; const int n = points_tensor.shape()[1]; auto *points = points_tensor.data<float>(); auto temp_tensor = paddle::full({b, n}, 1e10, paddle::DataType::FLOAT32, paddle::GPUPlace()); auto idx_tensor = paddle::empty({b, npoints}, paddle::DataType::INT32, paddle::GPUPlace()); auto *temp = temp_tensor.data<float>(); auto *idx = idx_tensor.data<int>(); farthest_point_sampling_kernel_launcher(b, n, npoints, points, temp, idx); return {idx_tensor}; } // shape infer std::vector<std::vector<int64_t>> FPSInferShape( std::vector<int64_t> points_shape, const int &npoints) { return {{points_shape[0], npoints}}; } // dtype infer std::vector<paddle::DataType> FPSInferDtype(paddle::DataType points_dtype) { return {paddle::DataType::INT32}; } // build op forward PD_BUILD_OP(farthest_point_sample) .Inputs({"points_tensor"}) .Outputs({"idx_tensor"}) .Attrs({"npoints: int"}) .SetKernelFn(PD_KERNEL(farthest_point_sampling_cuda_forward)) .SetInferShapeFn(PD_INFER_SHAPE(FPSInferShape)) .SetInferDtypeFn(PD_INFER_DTYPE(FPSInferDtype));
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops/pointnet2/ball_query_gpu.cu
/* Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include "paddle/include/experimental/ext_all.h" #define THREADS_PER_BLOCK 256 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) __global__ void ball_query_kernel_stack(int B, int M, float radius, int nsample, const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx) { // :param xyz: (N1 + N2 ..., 3) xyz coordinates of the features // :param xyz_batch_cnt: (batch_size), [N1, N2, ...] // :param new_xyz: (M1 + M2 ..., 3) centers of the ball query // :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...] // output: // idx: (M, nsample) int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (pt_idx >= M) return; int bs_idx = 0, pt_cnt = new_xyz_batch_cnt[0]; for (int k = 1; k < B; k++) { if (pt_idx < pt_cnt) break; pt_cnt += new_xyz_batch_cnt[k]; bs_idx = k; } int xyz_batch_start_idx = 0; for (int k = 0; k < bs_idx; k++) xyz_batch_start_idx += xyz_batch_cnt[k]; // for (int k = 0; k < bs_idx; k++) new_xyz_batch_start_idx += // new_xyz_batch_cnt[k]; new_xyz += pt_idx * 3; xyz += xyz_batch_start_idx * 3; idx += pt_idx * nsample; float radius2 = radius * radius; float new_x = new_xyz[0]; float new_y = new_xyz[1]; float new_z = new_xyz[2]; int n = xyz_batch_cnt[bs_idx]; int cnt = 0; for (int k = 0; k < n; ++k) { float x = xyz[k * 3 + 0]; float y = xyz[k * 3 + 1]; float z = xyz[k * 3 + 2]; float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); if (d2 < radius2) { if (cnt == 0) { for (int l = 0; l < nsample; ++l) { idx[l] = k; } } idx[cnt] = k; ++cnt; if (cnt >= nsample) break; } } if (cnt == 0) idx[0] = -1; } void ball_query_kernel_launcher_stack(const int B, const int M, const float radius, const int nsample, const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx) { // :param xyz: (N1 + N2 ..., 3) xyz coordinates of the features // :param xyz_batch_cnt: (batch_size), [N1, N2, ...] // :param new_xyz: (M1 + M2 ..., 3) centers of the ball query // :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...] // output: // idx: (M, nsample) cudaError_t err; dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); ball_query_kernel_stack<<<blocks, threads>>>(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops/pointnet2/sampling_gpu.cu
#include <cmath> #include "paddle/include/experimental/ext_all.h" #define TOTAL_THREADS 1024 #define THREADS_PER_BLOCK 256 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) inline int opt_n_threads(int work_size) { const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0); return max(min(1 << pow_2, TOTAL_THREADS), 1); } __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2) { const float v1 = dists[idx1], v2 = dists[idx2]; const int i1 = dists_i[idx1], i2 = dists_i[idx2]; dists[idx1] = max(v1, v2); dists_i[idx1] = v2 > v1 ? i2 : i1; } template <unsigned int block_size> __global__ void farthest_point_sampling_kernel( int b, int n, int m, const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { // dataset: (B, N, 3) // tmp: (B, N) // output: // idx: (B, M) if (m <= 0) return; __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; int batch_index = blockIdx.x; dataset += batch_index * n * 3; temp += batch_index * n; idxs += batch_index * m; int tid = threadIdx.x; const int stride = block_size; int old = 0; if (threadIdx.x == 0) idxs[0] = old; __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; float x1 = dataset[old * 3 + 0]; float y1 = dataset[old * 3 + 1]; float z1 = dataset[old * 3 + 2]; for (int k = tid; k < n; k += stride) { float x2, y2, z2; x2 = dataset[k * 3 + 0]; y2 = dataset[k * 3 + 1]; z2 = dataset[k * 3 + 2]; float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, temp[k]); temp[k] = d2; besti = d2 > best ? k : besti; best = d2 > best ? d2 : best; } dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 1024) { if (tid < 512) { __update(dists, dists_i, tid, tid + 512); } __syncthreads(); } if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idxs[j] = old; } } void farthest_point_sampling_kernel_launcher(int b, int n, int m, const float *dataset, float *temp, int *idxs) { // dataset: (B, N, 3) // tmp: (B, N) // output: // idx: (B, M) cudaError_t err; unsigned int n_threads = opt_n_threads(n); switch (n_threads) { case 1024: farthest_point_sampling_kernel<1024> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 512: farthest_point_sampling_kernel<512> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 256: farthest_point_sampling_kernel<256> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 128: farthest_point_sampling_kernel<128> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 64: farthest_point_sampling_kernel<64> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 32: farthest_point_sampling_kernel<32> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 16: farthest_point_sampling_kernel<16> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 8: farthest_point_sampling_kernel<8> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 4: farthest_point_sampling_kernel<4> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 2: farthest_point_sampling_kernel<2> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 1: farthest_point_sampling_kernel<1> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; default: farthest_point_sampling_kernel<512> <<<b, n_threads>>>(b, n, m, dataset, temp, idxs); } err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops/pointnet2/ball_query.cc
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "paddle/include/experimental/ext_all.h" #define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") // cuda launcher declaration void ball_query_kernel_launcher_stack(const int b, const int m, const float radius, const int nsample, const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx); // op forward wrapper std::vector<paddle::Tensor> ball_query_cuda_forward( const paddle::Tensor &new_xyz_tensor, const paddle::Tensor &new_xyz_batch_cnt_tensor, const paddle::Tensor &xyz_tensor, const paddle::Tensor &xyz_batch_cnt_tensor, const float radius, const int nsample) { CHECK_INPUT(new_xyz_tensor); CHECK_INPUT(new_xyz_batch_cnt_tensor); CHECK_INPUT(xyz_tensor); CHECK_INPUT(xyz_batch_cnt_tensor); const int b = xyz_batch_cnt_tensor.shape()[0]; const int m = new_xyz_tensor.shape()[0]; const float *new_xyz = new_xyz_tensor.data<float>(); const int *new_xyz_batch_cnt = new_xyz_batch_cnt_tensor.data<int>(); const float *xyz = xyz_tensor.data<float>(); const int *xyz_batch_cnt = xyz_batch_cnt_tensor.data<int>(); auto idx_tensor = paddle::full({m, nsample}, 0, paddle::DataType::INT32, paddle::GPUPlace()); int *idx = idx_tensor.data<int>(); ball_query_kernel_launcher_stack(b, m, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx); return {idx_tensor}; } // shape infer std::vector<std::vector<int64_t>> BallQueryInferShape( std::vector<int64_t> new_xyz_shape, std::vector<int64_t> new_xyz_batch_cnt_shape, std::vector<int64_t> xyz_shape, std::vector<int64_t> xyz_batch_cnt_shape, const float radius, const int nsample) { return {{new_xyz_shape[0], nsample}}; } // data type infer std::vector<paddle::DataType> BallQueryInferDtype( paddle::DataType new_xyz_type, paddle::DataType new_xyz_batch_cnt_type, paddle::DataType xyz_type, paddle::DataType xyz_batch_cnt_type) { return {paddle::DataType::INT32}; } // build forward op PD_BUILD_OP(ball_query) .Inputs({"new_xyz_tensor", "new_xyz_batch_cnt_tensor", "xyz_tensor", "xyz_batch_cnt_tensor"}) .Outputs({"idx_tensor"}) .Attrs({"radius: float", "nsample: int"}) .SetKernelFn(PD_KERNEL(ball_query_cuda_forward)) .SetInferShapeFn(PD_INFER_SHAPE(BallQueryInferShape)) .SetInferDtypeFn(PD_INFER_DTYPE(BallQueryInferDtype));
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops/pointnet2/group_points.cc
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "paddle/include/experimental/ext_all.h" #define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.") // cuda launcher declaration void group_points_kernel_launcher_stack(const int B, const int M, const int C, const int nsample, const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out); void group_points_grad_kernel_launcher_stack( const int B, const int M, const int C, const int N, const int nsample, const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features); // op forward wrapper std::vector<paddle::Tensor> group_points_cuda_forward( const paddle::Tensor &features_tensor, const paddle::Tensor &features_batch_cnt_tensor, const paddle::Tensor &idx_tensor, const paddle::Tensor &idx_batch_cnt_tensor) { CHECK_INPUT(features_tensor); CHECK_INPUT(features_batch_cnt_tensor); CHECK_INPUT(idx_tensor); CHECK_INPUT(idx_batch_cnt_tensor); const int m = idx_tensor.shape()[0]; const int nsample = idx_tensor.shape()[1]; const int n = features_tensor.shape()[0]; const int c = features_tensor.shape()[1]; const int b = idx_batch_cnt_tensor.shape()[0]; const float *features = features_tensor.data<float>(); const int *features_batch_cnt = features_batch_cnt_tensor.data<int>(); const int *idx = idx_tensor.data<int>(); const int *idx_batch_cnt = idx_batch_cnt_tensor.data<int>(); auto out_tensor = paddle::empty({m, c, nsample}, paddle::DataType::FLOAT32, paddle::GPUPlace()); float *out = out_tensor.data<float>(); group_points_kernel_launcher_stack( b, m, c, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out); return {out_tensor}; } // op backward wrapper std::vector<paddle::Tensor> group_points_cuda_backward( const paddle::Tensor &grad_out_tensor, const paddle::Tensor &features_tensor, const paddle::Tensor &features_batch_cnt_tensor, const paddle::Tensor &idx_tensor, const paddle::Tensor &idx_batch_cnt_tensor) { CHECK_INPUT(grad_out_tensor); CHECK_INPUT(features_tensor); CHECK_INPUT(features_batch_cnt_tensor); CHECK_INPUT(idx_tensor); CHECK_INPUT(idx_batch_cnt_tensor); const int m = idx_tensor.shape()[0]; const int nsample = idx_tensor.shape()[1]; const int n = features_tensor.shape()[0]; const int c = features_tensor.shape()[1]; const int b = idx_batch_cnt_tensor.shape()[0]; const float *grad_out = grad_out_tensor.data<float>(); const int *features_batch_cnt = features_batch_cnt_tensor.data<int>(); const int *idx = idx_tensor.data<int>(); const int *idx_batch_cnt = idx_batch_cnt_tensor.data<int>(); auto grad_features_tensor = paddle::full({n, c}, 0., paddle::DataType::FLOAT32, paddle::GPUPlace()); float *grad_features = grad_features_tensor.data<float>(); group_points_grad_kernel_launcher_stack(b, m, c, n, nsample, grad_out, idx, idx_batch_cnt, features_batch_cnt, grad_features); return {grad_features_tensor}; } // shape infer std::vector<std::vector<int64_t>> GroupInferShape( std::vector<int64_t> features_shape, std::vector<int64_t> features_batch_cnt_shapeshape, std::vector<int64_t> idx_shape, std::vector<int64_t> idx_batch_cnt_shape) { const int m = idx_shape[0]; const int nsample = idx_shape[1]; const int c = features_shape[1]; return {{m, c, nsample}}; } // data type infer std::vector<paddle::DataType> GroupInferDtype( paddle::DataType features_dtype, paddle::DataType features_batch_cnt_dtype, paddle::DataType idx_dtype, paddle::DataType idx_batch_cnt_dtype) { return {features_dtype}; } // build forward op PD_BUILD_OP(grouping_operation) .Inputs({"features_tensor", "features_batch_cnt_tensor", "idx_tensor", "idx_batch_cnt_tensor"}) .Outputs({"out_tensor"}) .SetKernelFn(PD_KERNEL(group_points_cuda_forward)) .SetInferShapeFn(PD_INFER_SHAPE(GroupInferShape)) .SetInferDtypeFn(PD_INFER_DTYPE(GroupInferDtype)); // build backward op PD_BUILD_GRAD_OP(grouping_operation) .Inputs({paddle::Grad("out_tensor"), "features_tensor", "features_batch_cnt_tensor", "idx_tensor", "idx_batch_cnt_tensor"}) .Outputs({paddle::Grad("features_tensor")}) .SetKernelFn(PD_KERNEL(group_points_cuda_backward));
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops/voxel/voxelize_op.cc
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "paddle/include/experimental/ext_all.h" template <typename T, typename T_int> bool hard_voxelize_cpu_kernel( const T *points, const float point_cloud_range_x_min, const float point_cloud_range_y_min, const float point_cloud_range_z_min, const float voxel_size_x, const float voxel_size_y, const float voxel_size_z, const int grid_size_x, const int grid_size_y, const int grid_size_z, const int64_t num_points, const int num_point_dim, const int max_num_points_in_voxel, const int max_voxels, T *voxels, T_int *coords, T_int *num_points_per_voxel, T_int *grid_idx_to_voxel_idx, T_int *num_voxels) { std::fill(voxels, voxels + max_voxels * max_num_points_in_voxel * num_point_dim, static_cast<T>(0)); num_voxels[0] = 0; int voxel_idx, grid_idx, curr_num_point; int coord_x, coord_y, coord_z; for (int point_idx = 0; point_idx < num_points; ++point_idx) { coord_x = floor( (points[point_idx * num_point_dim + 0] - point_cloud_range_x_min) / voxel_size_x); coord_y = floor( (points[point_idx * num_point_dim + 1] - point_cloud_range_y_min) / voxel_size_y); coord_z = floor( (points[point_idx * num_point_dim + 2] - point_cloud_range_z_min) / voxel_size_z); if (coord_x < 0 || coord_x > grid_size_x || coord_x == grid_size_x) { continue; } if (coord_y < 0 || coord_y > grid_size_y || coord_y == grid_size_y) { continue; } if (coord_z < 0 || coord_z > grid_size_z || coord_z == grid_size_z) { continue; } grid_idx = coord_z * grid_size_y * grid_size_x + coord_y * grid_size_x + coord_x; voxel_idx = grid_idx_to_voxel_idx[grid_idx]; if (voxel_idx == -1) { voxel_idx = num_voxels[0]; if (num_voxels[0] == max_voxels || num_voxels[0] > max_voxels) { continue; } num_voxels[0]++; grid_idx_to_voxel_idx[grid_idx] = voxel_idx; coords[voxel_idx * 3 + 0] = coord_z; coords[voxel_idx * 3 + 1] = coord_y; coords[voxel_idx * 3 + 2] = coord_x; } curr_num_point = num_points_per_voxel[voxel_idx]; if (curr_num_point < max_num_points_in_voxel) { for (int j = 0; j < num_point_dim; ++j) { voxels[voxel_idx * max_num_points_in_voxel * num_point_dim + curr_num_point * num_point_dim + j] = points[point_idx * num_point_dim + j]; } num_points_per_voxel[voxel_idx] = curr_num_point + 1; } } return true; } std::vector<paddle::Tensor> hard_voxelize_cpu( const paddle::Tensor &points, const std::vector<float> &voxel_size, const std::vector<float> &point_cloud_range, const int max_num_points_in_voxel, const int max_voxels) { auto num_points = points.shape()[0]; auto num_point_dim = points.shape()[1]; const float voxel_size_x = voxel_size[0]; const float voxel_size_y = voxel_size[1]; const float voxel_size_z = voxel_size[2]; const float point_cloud_range_x_min = point_cloud_range[0]; const float point_cloud_range_y_min = point_cloud_range[1]; const float point_cloud_range_z_min = point_cloud_range[2]; int grid_size_x = static_cast<int>( round((point_cloud_range[3] - point_cloud_range[0]) / voxel_size_x)); int grid_size_y = static_cast<int>( round((point_cloud_range[4] - point_cloud_range[1]) / voxel_size_y)); int grid_size_z = static_cast<int>( round((point_cloud_range[5] - point_cloud_range[2]) / voxel_size_z)); auto voxels = paddle::empty({max_voxels, max_num_points_in_voxel, num_point_dim}, paddle::DataType::FLOAT32, paddle::CPUPlace()); auto coords = paddle::full({max_voxels, 3}, 0, paddle::DataType::INT32, paddle::CPUPlace()); auto *coords_data = coords.data<int>(); auto num_points_per_voxel = paddle::full( {max_voxels}, 0, paddle::DataType::INT32, paddle::CPUPlace()); auto *num_points_per_voxel_data = num_points_per_voxel.data<int>(); std::fill(num_points_per_voxel_data, num_points_per_voxel_data + num_points_per_voxel.size(), static_cast<int>(0)); auto num_voxels = paddle::full({1}, 0, paddle::DataType::INT32, paddle::CPUPlace()); auto *num_voxels_data = num_voxels.data<int>(); auto grid_idx_to_voxel_idx = paddle::full({grid_size_z, grid_size_y, grid_size_x}, -1, paddle::DataType::INT32, paddle::CPUPlace()); auto *grid_idx_to_voxel_idx_data = grid_idx_to_voxel_idx.data<int>(); PD_DISPATCH_FLOATING_TYPES( points.type(), "hard_voxelize_cpu_kernel", ([&] { hard_voxelize_cpu_kernel<data_t, int>( points.data<data_t>(), point_cloud_range_x_min, point_cloud_range_y_min, point_cloud_range_z_min, voxel_size_x, voxel_size_y, voxel_size_z, grid_size_x, grid_size_y, grid_size_z, num_points, num_point_dim, max_num_points_in_voxel, max_voxels, voxels.data<data_t>(), coords_data, num_points_per_voxel_data, grid_idx_to_voxel_idx_data, num_voxels_data); })); return {voxels, coords, num_points_per_voxel, num_voxels}; } #ifdef PADDLE_WITH_CUDA std::vector<paddle::Tensor> hard_voxelize_cuda( const paddle::Tensor &points, const std::vector<float> &voxel_size, const std::vector<float> &point_cloud_range, int max_num_points_in_voxel, int max_voxels); #endif std::vector<paddle::Tensor> hard_voxelize( const paddle::Tensor &points, const std::vector<float> &voxel_size, const std::vector<float> &point_cloud_range, const int max_num_points_in_voxel, const int max_voxels) { if (points.is_cpu()) { return hard_voxelize_cpu(points, voxel_size, point_cloud_range, max_num_points_in_voxel, max_voxels); #ifdef PADDLE_WITH_CUDA } else if (points.is_gpu() || points.is_gpu_pinned()) { return hard_voxelize_cuda(points, voxel_size, point_cloud_range, max_num_points_in_voxel, max_voxels); #endif } else { PD_THROW( "Unsupported device type for hard_voxelize " "operator."); } } std::vector<std::vector<int64_t>> HardInferShape( std::vector<int64_t> points_shape, const std::vector<float> &voxel_size, const std::vector<float> &point_cloud_range, const int &max_num_points_in_voxel, const int &max_voxels) { return {{max_voxels, max_num_points_in_voxel, points_shape[1]}, {max_voxels, 3}, {max_voxels}, {1}}; } std::vector<paddle::DataType> HardInferDtype(paddle::DataType points_dtype) { return {points_dtype, paddle::DataType::INT32, paddle::DataType::INT32, paddle::DataType::INT32}; } PD_BUILD_OP(hard_voxelize) .Inputs({"POINTS"}) .Outputs({"VOXELS", "COORS", "NUM_POINTS_PER_VOXEL", "num_voxels"}) .SetKernelFn(PD_KERNEL(hard_voxelize)) .Attrs({"voxel_size: std::vector<float>", "point_cloud_range: std::vector<float>", "max_num_points_in_voxel: int", "max_voxels: int"}) .SetInferShapeFn(PD_INFER_SHAPE(HardInferShape)) .SetInferDtypeFn(PD_INFER_DTYPE(HardInferDtype));
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops/voxel/voxelize_op.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/include/experimental/ext_all.h" #define CHECK_INPUT_CUDA(x) \ PD_CHECK(x.is_gpu() || x.is_gpu_pinned(), #x " must be a GPU Tensor.") #define CUDA_KERNEL_LOOP(i, n) \ for (auto i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) template <typename T, typename T_int> __global__ void map_point_to_grid_kernel( const T *points, const float point_cloud_range_x_min, const float point_cloud_range_y_min, const float point_cloud_range_z_min, const float voxel_size_x, const float voxel_size_y, const float voxel_size_z, const int grid_size_x, const int grid_size_y, const int grid_size_z, const int64_t num_points, const int num_point_dim, const int max_num_points_in_voxel, T_int *points_to_grid_idx, T_int *points_to_num_idx, T_int *num_points_in_grid, int *points_valid) { int64_t point_idx = blockIdx.x * blockDim.x + threadIdx.x; if (point_idx > num_points || point_idx == num_points) { return; } int coord_x = floor((points[point_idx * num_point_dim + 0] - point_cloud_range_x_min) / voxel_size_x); int coord_y = floor((points[point_idx * num_point_dim + 1] - point_cloud_range_y_min) / voxel_size_y); int coord_z = floor((points[point_idx * num_point_dim + 2] - point_cloud_range_z_min) / voxel_size_z); if (coord_x < 0 || coord_x > grid_size_x || coord_x == grid_size_x) { return; } if (coord_y < 0 || coord_y > grid_size_y || coord_y == grid_size_y) { return; } if (coord_z < 0 || coord_z > grid_size_z || coord_z == grid_size_z) { return; } int grid_idx = coord_z * grid_size_y * grid_size_x + coord_y * grid_size_x + coord_x; T_int num = atomicAdd(num_points_in_grid + grid_idx, 1); if (num < max_num_points_in_voxel) { points_to_num_idx[point_idx] = num; points_to_grid_idx[point_idx] = grid_idx; atomicMin(points_valid + grid_idx, static_cast<int>(point_idx)); } } template <typename T_int> __global__ void update_points_flag(const int *points_valid, const T_int *points_to_grid_idx, const int num_points, int *points_flag) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < num_points; i += gridDim.x * blockDim.x) { T_int grid_idx = points_to_grid_idx[i]; if (grid_idx >= 0) { int id = points_valid[grid_idx]; if (id != num_points && id == i) { points_flag[i] = 1; } } } } template <typename T_int> __global__ void get_voxel_idx_kernel(const int *points_flag, const T_int *points_to_grid_idx, const int *points_flag_prefix_sum, const int num_points, const int max_voxels, T_int *num_voxels, T_int *grid_idx_to_voxel_idx) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < num_points; i += gridDim.x * blockDim.x) { if (points_flag[i] == 1) { T_int grid_idx = points_to_grid_idx[i]; int num = points_flag_prefix_sum[i]; if (num < max_voxels) { grid_idx_to_voxel_idx[grid_idx] = num; } } if (i == num_points - 1) { int num = points_flag_prefix_sum[i] + points_flag[i]; if (num < max_voxels) { num_voxels[0] = num; } else { num_voxels[0] = max_voxels; } } } } template <typename T> __global__ void init_voxels_kernel(const int64_t num, T *voxels) { int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > num || idx == num) { return; } voxels[idx] = static_cast<T>(0); } template <typename T, typename T_int> __global__ void assign_voxels_kernel( const T *points, const T_int *points_to_grid_idx, const T_int *points_to_num_idx, const T_int *grid_idx_to_voxel_idx, const int64_t num_points, const int num_point_dim, const int max_num_points_in_voxel, T *voxels) { int64_t point_idx = blockIdx.x * blockDim.x + threadIdx.x; if (point_idx > num_points || point_idx == num_points) { return; } T_int grid_idx = points_to_grid_idx[point_idx]; T_int num_idx = points_to_num_idx[point_idx]; if (grid_idx > -1 && num_idx > -1) { T_int voxel_idx = grid_idx_to_voxel_idx[grid_idx]; if (voxel_idx > -1) { for (int64_t i = 0; i < num_point_dim; ++i) { voxels[voxel_idx * max_num_points_in_voxel * num_point_dim + num_idx * num_point_dim + i] = points[point_idx * num_point_dim + i]; } } } } template <typename T, typename T_int> __global__ void assign_coords_kernel(const T_int *grid_idx_to_voxel_idx, const T_int *num_points_in_grid, const int num_grids, const int grid_size_x, const int grid_size_y, const int grid_size_z, const int max_num_points_in_voxel, T *coords, T *num_points_per_voxel) { int64_t grid_idx = blockIdx.x * blockDim.x + threadIdx.x; if (grid_idx > num_grids || grid_idx == num_grids) { return; } T_int voxel_idx = grid_idx_to_voxel_idx[grid_idx]; if (voxel_idx > -1) { T_int coord_z = grid_idx / grid_size_x / grid_size_y; T_int coord_y = (grid_idx - coord_z * grid_size_x * grid_size_y) / grid_size_x; T_int coord_x = grid_idx - coord_z * grid_size_x * grid_size_y - coord_y * grid_size_x; coords[voxel_idx * 3 + 0] = coord_z; coords[voxel_idx * 3 + 1] = coord_y; coords[voxel_idx * 3 + 2] = coord_x; num_points_per_voxel[voxel_idx] = min(num_points_in_grid[grid_idx], max_num_points_in_voxel); } } std::vector<paddle::Tensor> hard_voxelize_cuda( const paddle::Tensor &points, const std::vector<float> &voxel_size, const std::vector<float> &point_cloud_range, int max_num_points_in_voxel, int max_voxels) { // check device CHECK_INPUT_CUDA(points); int64_t num_points = points.shape()[0]; int64_t num_point_dim = points.shape()[1]; const float voxel_size_x = voxel_size[0]; const float voxel_size_y = voxel_size[1]; const float voxel_size_z = voxel_size[2]; const float point_cloud_range_x_min = point_cloud_range[0]; const float point_cloud_range_y_min = point_cloud_range[1]; const float point_cloud_range_z_min = point_cloud_range[2]; int grid_size_x = static_cast<int>( round((point_cloud_range[3] - point_cloud_range[0]) / voxel_size_x)); int grid_size_y = static_cast<int>( round((point_cloud_range[4] - point_cloud_range[1]) / voxel_size_y)); int grid_size_z = static_cast<int>( round((point_cloud_range[5] - point_cloud_range[2]) / voxel_size_z)); int num_grids = grid_size_x * grid_size_y * grid_size_z; auto voxels = paddle::empty({max_voxels, max_num_points_in_voxel, num_point_dim}, paddle::DataType::FLOAT32, paddle::GPUPlace()); auto coords = paddle::full({max_voxels, 3}, 0, paddle::DataType::INT32, paddle::GPUPlace()); auto *coords_data = coords.data<int>(); auto num_points_per_voxel = paddle::full( {max_voxels}, 0, paddle::DataType::INT32, paddle::GPUPlace()); auto *num_points_per_voxel_data = num_points_per_voxel.data<int>(); auto points_to_grid_idx = paddle::full( {num_points}, -1, paddle::DataType::INT32, paddle::GPUPlace()); auto *points_to_grid_idx_data = points_to_grid_idx.data<int>(); auto points_to_num_idx = paddle::full( {num_points}, -1, paddle::DataType::INT32, paddle::GPUPlace()); auto *points_to_num_idx_data = points_to_num_idx.data<int>(); auto num_points_in_grid = paddle::full({grid_size_z, grid_size_y, grid_size_x}, 0, paddle::DataType::INT32, paddle::GPUPlace()); auto *num_points_in_grid_data = num_points_in_grid.data<int>(); auto grid_idx_to_voxel_idx = paddle::full({grid_size_z, grid_size_y, grid_size_x}, -1, paddle::DataType::INT32, paddle::GPUPlace()); auto *grid_idx_to_voxel_idx_data = grid_idx_to_voxel_idx.data<int>(); auto num_voxels = paddle::full({1}, 0, paddle::DataType::INT32, paddle::GPUPlace()); auto *num_voxels_data = num_voxels.data<int>(); auto points_valid = paddle::full({grid_size_z * grid_size_y * grid_size_x}, static_cast<int>(num_points), paddle::DataType::INT32, paddle::GPUPlace()); int *points_valid_data = points_valid.data<int>(); auto points_flag = paddle::full({num_points}, 0, paddle::DataType::INT32, paddle::GPUPlace()); // 1. Find the grid index for each point, compute the // number of points in each grid int64_t threads = 512; int64_t blocks = (num_points + threads - 1) / threads; PD_DISPATCH_FLOATING_TYPES( points.type(), "map_point_to_grid_kernel", ([&] { map_point_to_grid_kernel<data_t, int> <<<blocks, threads, 0, points.stream()>>>( points.data<data_t>(), point_cloud_range_x_min, point_cloud_range_y_min, point_cloud_range_z_min, voxel_size_x, voxel_size_y, voxel_size_z, grid_size_x, grid_size_y, grid_size_z, num_points, num_point_dim, max_num_points_in_voxel, points_to_grid_idx_data, points_to_num_idx_data, num_points_in_grid_data, points_valid_data); })); // 2. Find the number of non-zero voxels int *points_flag_data = points_flag.data<int>(); threads = 512; blocks = (num_points + threads - 1) / threads; update_points_flag<int><<<blocks, threads, 0, points.stream()>>>( points_valid_data, points_to_grid_idx_data, num_points, points_flag_data); auto points_flag_prefix_sum = paddle::experimental::cumsum(points_flag, 0, false, true, false); int *points_flag_prefix_sum_data = points_flag_prefix_sum.data<int>(); get_voxel_idx_kernel<int><<<blocks, threads, 0, points.stream()>>>( points_flag_data, points_to_grid_idx_data, points_flag_prefix_sum_data, num_points, max_voxels, num_voxels_data, grid_idx_to_voxel_idx_data); // 3. Store points to voxels coords and num_points_per_voxel int64_t num = max_voxels * max_num_points_in_voxel * num_point_dim; threads = 512; blocks = (num + threads - 1) / threads; PD_DISPATCH_FLOATING_TYPES(points.type(), "init_voxels_kernel", ([&] { init_voxels_kernel<data_t> <<<blocks, threads, 0, points.stream()>>>( num, voxels.data<data_t>()); })); threads = 512; blocks = (num_points + threads - 1) / threads; PD_DISPATCH_FLOATING_TYPES( points.type(), "assign_voxels_kernel", ([&] { assign_voxels_kernel<data_t, int> <<<blocks, threads, 0, points.stream()>>>( points.data<data_t>(), points_to_grid_idx_data, points_to_num_idx_data, grid_idx_to_voxel_idx_data, num_points, num_point_dim, max_num_points_in_voxel, voxels.data<data_t>()); })); // 4. Store coords, num_points_per_voxel blocks = (num_grids + threads - 1) / threads; assign_coords_kernel<int><<<blocks, threads, 0, points.stream()>>>( grid_idx_to_voxel_idx_data, num_points_in_grid_data, num_grids, grid_size_x, grid_size_y, grid_size_z, max_num_points_in_voxel, coords_data, num_points_per_voxel_data); return {voxels, coords, num_points_per_voxel, num_voxels}; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops/iou3d_nms/iou3d_cpu.cpp
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* 3D Rotated IoU Calculation (CPU) Written by Shaoshuai Shi All Rights Reserved 2020. */ #include "iou3d_cpu.h" #include <cuda.h> #include <cuda_runtime_api.h> #include <math.h> #include <stdio.h> #include <vector> #include "paddle/include/experimental/ext_all.h" inline float min(float a, float b) { return a > b ? b : a; } inline float max(float a, float b) { return a > b ? a : b; } const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y) { x = _x, y = _y; } __device__ void set(float _x, float _y) { x = _x; y = _y; } __device__ Point operator+(const Point &b) const { return Point(x + b.x, y + b.y); } __device__ Point operator-(const Point &b) const { return Point(x - b.x, y - b.y); } }; inline float cross(const Point &a, const Point &b) { return a.x * b.y - a.y * b.x; } inline float cross(const Point &p1, const Point &p2, const Point &p0) { return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } inline int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2) { int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) && min(q1.x, q2.x) <= max(p1.x, p2.x) && min(p1.y, p2.y) <= max(q1.y, q2.y) && min(q1.y, q2.y) <= max(p1.y, p2.y); return ret; } inline int check_in_box2d(const float *box, const Point &p) { // params: (7) [x, y, z, dx, dy, dz, heading] const float MARGIN = 1e-2; float center_x = box[0], center_y = box[1]; float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); } inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans) { // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if (fabs(s5 - s1) > EPS) { ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else { float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p) { float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } inline int point_cmp(const Point &a, const Point &b, const Point &center) { return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } inline float box_overlap(const float *box_a, const float *box_b) { // params: box_a (7) [x, y, z, dx, dy, dz, heading] // params: box_b (7) [x, y, z, dx, dy, dz, heading] // float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = // box_a[3], a_angle = box_a[4]; // float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = // box_b[3], b_angle = box_b[4]; float a_angle = box_a[6], b_angle = box_b[6]; float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; Point center_a(box_a[0], box_a[1]); Point center_b(box_b[0], box_b[1]); Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++) { rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag) { poly_center = poly_center + cross_points[cnt]; cnt++; } } } // check corners for (int k = 0; k < 4; k++) { if (check_in_box2d(box_a, box_b_corners[k])) { poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; } if (check_in_box2d(box_b, box_a_corners[k])) { poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++) { for (int i = 0; i < cnt - j - 1; i++) { if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) { temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++) { area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } inline float iou_bev(const float *box_a, const float *box_b) { // params: box_a (7) [x, y, z, dx, dy, dz, heading] // params: box_b (7) [x, y, z, dx, dy, dz, heading] float sa = box_a[3] * box_a[4]; float sb = box_b[3] * box_b[4]; float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } std::vector<paddle::Tensor> boxes_iou_bev_cpu( const paddle::Tensor &boxes_a_tensor, const paddle::Tensor &boxes_b_tensor) { // params boxes_a_tensor: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b_tensor: (M, 7) [x, y, z, dx, dy, dz, heading] // params ans_iou_tensor: (N, M) int num_boxes_a = boxes_a_tensor.shape()[0]; int num_boxes_b = boxes_b_tensor.shape()[0]; const float *boxes_a = boxes_a_tensor.data<float>(); const float *boxes_b = boxes_b_tensor.data<float>(); auto ans_iou_tensor = paddle::empty({num_boxes_a, num_boxes_b}, paddle::DataType::FLOAT32, paddle::CPUPlace()); float *ans_iou = ans_iou_tensor.data<float>(); for (int i = 0; i < num_boxes_a; i++) { for (int j = 0; j < num_boxes_b; j++) { ans_iou[i * num_boxes_b + j] = iou_bev(boxes_a + i * 7, boxes_b + j * 7); } } return {ans_iou_tensor}; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops/iou3d_nms/iou3d_nms.cpp
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include "iou3d_nms.h" #include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "paddle/include/experimental/ext_all.h" #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) const int THREADS_PER_BLOCK_NMS = sizeof(int64_t) * 8; void BoxesOverlapLauncher(const cudaStream_t &stream, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap); void BoxesIouBevLauncher(const cudaStream_t &stream, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou); void NmsLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask, int boxes_num, float nms_overlap_thresh); void NmsNormalLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask, int boxes_num, float nms_overlap_thresh); std::vector<paddle::Tensor> boxes_overlap_bev_gpu( const paddle::Tensor &boxes_a, const paddle::Tensor &boxes_b) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] // params ans_overlap: (N, M) int num_a = boxes_a.shape()[0]; int num_b = boxes_b.shape()[0]; const float *boxes_a_data = boxes_a.data<float>(); const float *boxes_b_data = boxes_b.data<float>(); auto ans_overlap = paddle::empty({num_a, num_b}, paddle::DataType::FLOAT32, paddle::GPUPlace()); float *ans_overlap_data = ans_overlap.data<float>(); BoxesOverlapLauncher(boxes_a.stream(), num_a, boxes_a_data, num_b, boxes_b_data, ans_overlap_data); return {ans_overlap}; } std::vector<paddle::Tensor> boxes_iou_bev_gpu( const paddle::Tensor &boxes_a_tensor, const paddle::Tensor &boxes_b_tensor) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] // params ans_overlap: (N, M) int num_a = boxes_a_tensor.shape()[0]; int num_b = boxes_b_tensor.shape()[0]; const float *boxes_a_data = boxes_a_tensor.data<float>(); const float *boxes_b_data = boxes_b_tensor.data<float>(); auto ans_iou_tensor = paddle::empty({num_a, num_b}, paddle::DataType::FLOAT32, paddle::GPUPlace()); float *ans_iou_data = ans_iou_tensor.data<float>(); BoxesIouBevLauncher(boxes_a_tensor.stream(), num_a, boxes_a_data, num_b, boxes_b_data, ans_iou_data); return {ans_iou_tensor}; } std::vector<paddle::Tensor> nms_gpu(const paddle::Tensor &boxes, float nms_overlap_thresh) { // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] auto keep = paddle::empty({boxes.shape()[0]}, paddle::DataType::INT32, paddle::CPUPlace()); auto num_to_keep_tensor = paddle::empty({1}, paddle::DataType::INT32, paddle::CPUPlace()); int *num_to_keep_data = num_to_keep_tensor.data<int>(); int boxes_num = boxes.shape()[0]; const float *boxes_data = boxes.data<float>(); int *keep_data = keep.data<int>(); const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); // int64_t *mask_data = NULL; // CHECK_ERROR(cudaMalloc((void**)&mask_data, boxes_num * col_blocks * // sizeof(int64_t))); auto mask = paddle::empty({boxes_num * col_blocks}, paddle::DataType::INT64, paddle::GPUPlace()); int64_t *mask_data = mask.data<int64_t>(); NmsLauncher(boxes.stream(), boxes_data, mask_data, boxes_num, nms_overlap_thresh); // std::vector<int64_t> mask_cpu(boxes_num * col_blocks); // CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks * // sizeof(int64_t), // cudaMemcpyDeviceToHost)); const paddle::Tensor mask_cpu_tensor = mask.copy_to(paddle::CPUPlace(), true); const int64_t *mask_cpu = mask_cpu_tensor.data<int64_t>(); // cudaFree(mask_data); int64_t remv_cpu[col_blocks]; memset(remv_cpu, 0, col_blocks * sizeof(int64_t)); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / THREADS_PER_BLOCK_NMS; int inblock = i % THREADS_PER_BLOCK_NMS; if (!(remv_cpu[nblock] & (1ULL << inblock))) { keep_data[num_to_keep++] = i; const int64_t *p = &mask_cpu[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv_cpu[j] |= p[j]; } } } num_to_keep_data[0] = num_to_keep; if (cudaSuccess != cudaGetLastError()) printf("Error!\n"); return {keep, num_to_keep_tensor}; } std::vector<paddle::Tensor> nms_normal_gpu(const paddle::Tensor &boxes, float nms_overlap_thresh) { // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] // params keep: (N) auto keep = paddle::empty({boxes.shape()[0]}, paddle::DataType::INT32, paddle::CPUPlace()); auto num_to_keep_tensor = paddle::empty({1}, paddle::DataType::INT32, paddle::CPUPlace()); int *num_to_keep_data = num_to_keep_tensor.data<int>(); int boxes_num = boxes.shape()[0]; const float *boxes_data = boxes.data<float>(); int *keep_data = keep.data<int>(); const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); // int64_t *mask_data = NULL; // CHECK_ERROR(cudaMalloc((void**)&mask_data, boxes_num * col_blocks * // sizeof(int64_t))); auto mask = paddle::empty({boxes_num * col_blocks}, paddle::DataType::INT64, paddle::GPUPlace()); int64_t *mask_data = mask.data<int64_t>(); NmsNormalLauncher(boxes.stream(), boxes_data, mask_data, boxes_num, nms_overlap_thresh); // int64_t mask_cpu[boxes_num * col_blocks]; // int64_t *mask_cpu = new int64_t [boxes_num * col_blocks]; // std::vector<int64_t> mask_cpu(boxes_num * col_blocks); // CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks * // sizeof(int64_t), // cudaMemcpyDeviceToHost)); // cudaFree(mask_data); const paddle::Tensor mask_cpu_tensor = mask.copy_to(paddle::CPUPlace(), true); const int64_t *mask_cpu = mask_cpu_tensor.data<int64_t>(); int64_t remv_cpu[col_blocks]; memset(remv_cpu, 0, col_blocks * sizeof(int64_t)); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / THREADS_PER_BLOCK_NMS; int inblock = i % THREADS_PER_BLOCK_NMS; if (!(remv_cpu[nblock] & (1ULL << inblock))) { keep_data[num_to_keep++] = i; const int64_t *p = &mask_cpu[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv_cpu[j] |= p[j]; } } } num_to_keep_data[0] = num_to_keep; if (cudaSuccess != cudaGetLastError()) { printf("Error!\n"); } return {keep, num_to_keep_tensor}; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops/iou3d_nms/iou3d_nms_kernel.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include <stdio.h> #define THREADS_PER_BLOCK 16 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) // #define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(int64_t) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y) { x = _x, y = _y; } __device__ void set(float _x, float _y) { x = _x; y = _y; } __device__ Point operator+(const Point &b) const { return Point(x + b.x, y + b.y); } __device__ Point operator-(const Point &b) const { return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b) { return a.x * b.y - a.y * b.x; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0) { return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2) { int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) && min(q1.x, q2.x) <= max(p1.x, p2.x) && min(p1.y, p2.y) <= max(q1.y, q2.y) && min(q1.y, q2.y) <= max(p1.y, p2.y); return ret; } __device__ inline int check_in_box2d(const float *box, const Point &p) { // params: (7) [x, y, z, dx, dy, dz, heading] const float MARGIN = 1e-2; float center_x = box[0], center_y = box[1]; float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans) { // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if (fabs(s5 - s1) > EPS) { ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else { float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p) { float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center) { return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float box_overlap(const float *box_a, const float *box_b) { // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float a_angle = box_a[6], b_angle = box_b[6]; float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; Point center_a(box_a[0], box_a[1]); Point center_b(box_b[0], box_b[1]); #ifdef DEBUG printf( "a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle); printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y); #endif Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++) { #ifdef DEBUG printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); #ifdef DEBUG printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag) { poly_center = poly_center + cross_points[cnt]; cnt++; #ifdef DEBUG printf( "Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), b(%.3f, " "%.3f)->(%.3f, %.3f) \n", cross_points[cnt - 1].x, cross_points[cnt - 1].y, box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x, box_a_corners[i + 1].y, box_b_corners[i].x, box_b_corners[i].y, box_b_corners[i + 1].x, box_b_corners[i + 1].y); #endif } } } // check corners for (int k = 0; k < 4; k++) { if (check_in_box2d(box_a, box_b_corners[k])) { poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; #ifdef DEBUG printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } if (check_in_box2d(box_b, box_a_corners[k])) { poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; #ifdef DEBUG printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++) { for (int i = 0; i < cnt - j - 1; i++) { if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) { temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } #ifdef DEBUG printf("cnt=%d\n", cnt); for (int i = 0; i < cnt; i++) { printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y); } #endif // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++) { area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b) { // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float sa = box_a[3] * box_a[4]; float sb = box_b[3] * box_b[4]; float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } __global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b) { return; } const float *cur_box_a = boxes_a + a_idx * 7; const float *cur_box_b = boxes_b + b_idx * 7; float s_overlap = box_overlap(cur_box_a, cur_box_b); ans_overlap[a_idx * num_b + b_idx] = s_overlap; } __global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b) { return; } const float *cur_box_a = boxes_a + a_idx * 7; const float *cur_box_b = boxes_b + b_idx * 7; float cur_iou_bev = iou_bev(cur_box_a, cur_box_b); ans_iou[a_idx * num_b + b_idx] = cur_iou_bev; } __global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, int64_t *mask) { // params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] // params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 7; int i = 0; int64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } __device__ inline float iou_normal(float const *const a, float const *const b) { // params: a: [x, y, z, dx, dy, dz, heading] // params: b: [x, y, z, dx, dy, dz, heading] float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2), right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2); float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2), bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2); float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); float interS = width * height; float Sa = a[3] * a[4]; float Sb = b[3] * b[4]; return interS / fmaxf(Sa + Sb - interS, EPS); } __global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, int64_t *mask) { // params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] // params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 7; int i = 0; int64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } void BoxesOverlapLauncher(const cudaStream_t &stream, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap) { dim3 blocks( DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); boxes_overlap_kernel<<<blocks, threads, 0, stream>>>(num_a, boxes_a, num_b, boxes_b, ans_overlap); #ifdef DEBUG cudaDeviceSynchronize(); // for using printf in kernel function #endif } void BoxesIouBevLauncher(const cudaStream_t &stream, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou) { dim3 blocks( DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); boxes_iou_bev_kernel<<<blocks, threads, 0, stream>>>(num_a, boxes_a, num_b, boxes_b, ans_iou); #ifdef DEBUG cudaDeviceSynchronize(); // for using printf in kernel function #endif } void NmsLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask, int boxes_num, float nms_overlap_thresh) { dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); nms_kernel<<<blocks, threads, 0, stream>>>(boxes_num, nms_overlap_thresh, boxes, mask); } void NmsNormalLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask, int boxes_num, float nms_overlap_thresh) { dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); nms_normal_kernel<<<blocks, threads, 0, stream>>>( boxes_num, nms_overlap_thresh, boxes, mask); }
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops/iou3d_nms/iou3d_nms.h
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef IOU3D_NMS_H #define IOU3D_NMS_H #include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "paddle/include/experimental/ext_all.h" std::vector<paddle::Tensor> boxes_overlap_bev_gpu( const paddle::Tensor &boxes_a, const paddle::Tensor &boxes_b); std::vector<paddle::Tensor> boxes_iou_bev_gpu( const paddle::Tensor &boxes_a_tensor, const paddle::Tensor &boxes_b_tensor); std::vector<paddle::Tensor> nms_gpu(const paddle::Tensor &boxes, float nms_overlap_thresh); std::vector<paddle::Tensor> nms_normal_gpu(const paddle::Tensor &boxes, float nms_overlap_thresh); #endif
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops/iou3d_nms/iou3d_cpu.h
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef IOU3D_CPU_H #define IOU3D_CPU_H #include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "paddle/include/experimental/ext_all.h" std::vector<paddle::Tensor> boxes_iou_bev_cpu( const paddle::Tensor& boxes_a_tensor, const paddle::Tensor& boxes_b_tensor); #endif
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/custom_ops/iou3d_nms/iou3d_nms_api.cpp
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "iou3d_cpu.h" #include "iou3d_nms.h" #include "paddle/include/experimental/ext_all.h" std::vector<paddle::DataType> BoxesIouBevCpuInferDtype( paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) { return {boxes_a_dtype}; } std::vector<std::vector<int64_t>> BoxesIouBevCpuInferShape( std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) { return {{boxes_a_shape[0], boxes_b_shape[0]}}; } std::vector<paddle::DataType> NmsInferDtype(paddle::DataType boxes_dtype) { return {paddle::DataType::INT64, paddle::DataType::INT64}; } std::vector<std::vector<int64_t>> NmsInferShape( std::vector<int64_t> boxes_shape) { return {{boxes_shape[0]}, {1}}; } std::vector<paddle::DataType> NmsNormalInferDtype( paddle::DataType boxes_dtype) { return {paddle::DataType::INT64, paddle::DataType::INT64}; } std::vector<std::vector<int64_t>> NmsNormalInferShape( std::vector<int64_t> boxes_shape) { return {{boxes_shape[0]}, {1}}; } std::vector<paddle::DataType> BoxesIouBevGpuInferDtype( paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) { return {boxes_a_dtype}; } std::vector<std::vector<int64_t>> BoxesIouBevGpuInferShape( std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) { return {{boxes_a_shape[0], boxes_b_shape[0]}}; } std::vector<paddle::DataType> BoxesOverlapBevGpuInferDtype( paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) { return {boxes_a_dtype}; } std::vector<std::vector<int64_t>> BoxesOverlapBevGpuInferShape( std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) { return {{boxes_a_shape[0], boxes_b_shape[0]}}; } PD_BUILD_OP(boxes_iou_bev_cpu) .Inputs({"boxes_a_tensor", " boxes_b_tensor"}) .Outputs({"ans_iou_tensor"}) .SetKernelFn(PD_KERNEL(boxes_iou_bev_cpu)) .SetInferDtypeFn(PD_INFER_DTYPE(BoxesIouBevCpuInferDtype)) .SetInferShapeFn(PD_INFER_SHAPE(BoxesIouBevCpuInferShape)); PD_BUILD_OP(boxes_iou_bev_gpu) .Inputs({"boxes_a_tensor", " boxes_b_tensor"}) .Outputs({"ans_iou_tensor"}) .SetKernelFn(PD_KERNEL(boxes_iou_bev_gpu)) .SetInferDtypeFn(PD_INFER_DTYPE(BoxesIouBevGpuInferDtype)) .SetInferShapeFn(PD_INFER_SHAPE(BoxesIouBevGpuInferShape)); PD_BUILD_OP(boxes_overlap_bev_gpu) .Inputs({"boxes_a", " boxes_b"}) .Outputs({"ans_overlap"}) .SetKernelFn(PD_KERNEL(boxes_overlap_bev_gpu)) .SetInferDtypeFn(PD_INFER_DTYPE(BoxesOverlapBevGpuInferDtype)) .SetInferShapeFn(PD_INFER_SHAPE(BoxesOverlapBevGpuInferShape)); PD_BUILD_OP(nms_gpu) .Inputs({"boxes"}) .Outputs({"keep", "num_to_keep"}) .Attrs({"nms_overlap_thresh: float"}) .SetKernelFn(PD_KERNEL(nms_gpu)) .SetInferDtypeFn(PD_INFER_DTYPE(NmsInferDtype)) .SetInferShapeFn(PD_INFER_SHAPE(NmsInferShape)); PD_BUILD_OP(nms_normal_gpu) .Inputs({"boxes"}) .Outputs({"keep", "num_to_keep"}) .Attrs({"nms_overlap_thresh: float"}) .SetInferShapeFn(PD_INFER_SHAPE(NmsNormalInferShape)) .SetKernelFn(PD_KERNEL(nms_normal_gpu)) .SetInferDtypeFn(PD_INFER_DTYPE(NmsNormalInferDtype));
0
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/cmake
apollo_public_repos/apollo-model-centerpoint/deploy/pv_rcnn/cpp/cmake/external/boost.cmake
include(ExternalProject) set(BOOST_PROJECT "extern_boost") # To release PaddlePaddle as a pip package, we have to follow the # manylinux1 standard, which features as old Linux kernels and # compilers as possible and recommends CentOS 5. Indeed, the earliest # CentOS version that works with NVIDIA CUDA is CentOS 6. And a new # version of boost, say, 1.66.0, doesn't build on CentOS 6. We # checked that the devtools package of CentOS 6 installs boost 1.41.0. # So we use 1.41.0 here. set(BOOST_VER "1.41.0") set(BOOST_TAR "boost_1_41_0" CACHE STRING "" FORCE) set(BOOST_URL "http://paddlepaddledeps.bj.bcebos.com/${BOOST_TAR}.tar.gz" CACHE STRING "" FORCE) MESSAGE(STATUS "BOOST_TAR: ${BOOST_TAR}, BOOST_URL: ${BOOST_URL}") set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost) set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}") set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}" CACHE PATH "boost include directory." FORCE) set_directory_properties(PROPERTIES CLEAN_NO_CUSTOM 1) include_directories(${BOOST_INCLUDE_DIR}) ExternalProject_Add( ${BOOST_PROJECT} ${EXTERNAL_PROJECT_LOG_ARGS} DOWNLOAD_DIR ${BOOST_DOWNLOAD_DIR} URL ${BOOST_URL} DOWNLOAD_NO_PROGRESS 1 PREFIX ${BOOST_SOURCES_DIR} CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" UPDATE_COMMAND "" ) if (${CMAKE_VERSION} VERSION_LESS "3.3.0" OR NOT WIN32) set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/boost_dummy.c) file(WRITE ${dummyfile} "const char *dummy = \"${dummyfile}\";") add_library(boost STATIC ${dummyfile}) else() add_library(boost INTERFACE) endif() add_dependencies(boost ${BOOST_PROJECT}) set(Boost_INCLUDE_DIR ${BOOST_INCLUDE_DIR})
0
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/python/infer.py
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import numba import numpy as np import paddle from paddle.inference import Config, create_predictor from paddle3d.ops.iou3d_nms_cuda import nms_gpu def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--model_file", type=str, help="Model filename, Specify this when your model is a combined model.", required=True) parser.add_argument( "--params_file", type=str, help= "Parameter filename, Specify this when your model is a combined model.", required=True) parser.add_argument( '--lidar_file', type=str, help='The lidar path.', required=True) parser.add_argument( "--num_point_dim", type=int, default=4, help="Dimension of a point in the lidar file.") parser.add_argument( "--point_cloud_range", dest='point_cloud_range', nargs='+', help="Range of point cloud for voxelize operation.", type=float, default=None) parser.add_argument( "--voxel_size", dest='voxel_size', nargs='+', help="Size of voxels for voxelize operation.", type=float, default=None) parser.add_argument( "--max_points_in_voxel", type=int, default=100, help="Maximum number of points in a voxel.") parser.add_argument( "--max_voxel_num", type=int, default=12000, help="Maximum number of voxels.") parser.add_argument("--gpu_id", type=int, default=0, help="GPU card id.") parser.add_argument( "--use_trt", type=int, default=0, help="Whether to use tensorrt to accelerate when using gpu.") parser.add_argument( "--trt_precision", type=int, default=0, help="Precision type of tensorrt, 0: kFloat32, 1: kHalf.") parser.add_argument( "--trt_use_static", type=int, default=0, help="Whether to load the tensorrt graph optimization from a disk path." ) parser.add_argument( "--trt_static_dir", type=str, help="Path of a tensorrt graph optimization directory.") parser.add_argument( "--collect_shape_info", type=int, default=0, help="Whether to collect dynamic shape before using tensorrt.") parser.add_argument( "--dynamic_shape_file", type=str, default="", help="Path of a dynamic shape file for tensorrt.") return parser.parse_args() def read_point(file_path, num_point_dim): points = np.fromfile(file_path, np.float32).reshape(-1, num_point_dim) points = points[:, :4] return points @numba.jit(nopython=True) def _points_to_voxel(points, voxel_size, point_cloud_range, grid_size, voxels, coords, num_points_per_voxel, grid_idx_to_voxel_idx, max_points_in_voxel, max_voxel_num): num_voxels = 0 num_points = points.shape[0] # x, y, z coord = np.zeros(shape=(3, ), dtype=np.int32) for point_idx in range(num_points): outside = False for i in range(3): coord[i] = np.floor( (points[point_idx, i] - point_cloud_range[i]) / voxel_size[i]) if coord[i] < 0 or coord[i] >= grid_size[i]: outside = True break if outside: continue voxel_idx = grid_idx_to_voxel_idx[coord[2], coord[1], coord[0]] if voxel_idx == -1: voxel_idx = num_voxels if num_voxels >= max_voxel_num: continue num_voxels += 1 grid_idx_to_voxel_idx[coord[2], coord[1], coord[0]] = voxel_idx coords[voxel_idx, 0:3] = coord[::-1] curr_num_point = num_points_per_voxel[voxel_idx] if curr_num_point < max_points_in_voxel: voxels[voxel_idx, curr_num_point] = points[point_idx] num_points_per_voxel[voxel_idx] = curr_num_point + 1 return num_voxels def hardvoxelize(points, point_cloud_range, voxel_size, max_points_in_voxel, max_voxel_num): num_points, num_point_dim = points.shape[0:2] point_cloud_range = np.array(point_cloud_range) voxel_size = np.array(voxel_size) voxels = np.zeros((max_voxel_num, max_points_in_voxel, num_point_dim), dtype=points.dtype) coords = np.zeros((max_voxel_num, 3), dtype=np.int32) num_points_per_voxel = np.zeros((max_voxel_num, ), dtype=np.int32) grid_size = np.round((point_cloud_range[3:6] - point_cloud_range[0:3]) / voxel_size).astype('int32') grid_size_x, grid_size_y, grid_size_z = grid_size grid_idx_to_voxel_idx = np.full((grid_size_z, grid_size_y, grid_size_x), -1, dtype=np.int32) num_voxels = _points_to_voxel(points, voxel_size, point_cloud_range, grid_size, voxels, coords, num_points_per_voxel, grid_idx_to_voxel_idx, max_points_in_voxel, max_voxel_num) voxels = voxels[:num_voxels] coords = coords[:num_voxels] num_points_per_voxel = num_points_per_voxel[:num_voxels] return voxels, coords, num_points_per_voxel def preprocess(file_path, num_point_dim, point_cloud_range, voxel_size, max_points_in_voxel, max_voxel_num): points = read_point(file_path, num_point_dim) voxels, coords, num_points_per_voxel = hardvoxelize( points, point_cloud_range, voxel_size, max_points_in_voxel, max_voxel_num) return voxels, coords, num_points_per_voxel def init_predictor(model_file, params_file, gpu_id=0, use_trt=False, trt_precision=0, trt_use_static=False, trt_static_dir=None, collect_shape_info=False, dynamic_shape_file=None): config = Config(model_file, params_file) config.enable_memory_optim() config.enable_use_gpu(1000, gpu_id) if use_trt: precision_mode = paddle.inference.PrecisionType.Float32 if trt_precision == 1: precision_mode = paddle.inference.PrecisionType.Half config.enable_tensorrt_engine( workspace_size=1 << 30, max_batch_size=1, min_subgraph_size=10, precision_mode=precision_mode, use_static=trt_use_static, use_calib_mode=False) if collect_shape_info: config.collect_shape_range_info(dynamic_shape_file) else: config.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file, True) if trt_use_static: config.set_optim_cache_dir(trt_static_dir) predictor = create_predictor(config) return predictor def parse_result(box3d_lidar, label_preds, scores): num_bbox3d, bbox3d_dims = box3d_lidar.shape for box_idx in range(num_bbox3d): # filter fake results: score = -1 if scores[box_idx] < 0: continue if bbox3d_dims == 7: print( "Score: {} Label: {} Box(x_c, y_c, z_c, w, l, h, -rot): {} {} {} {} {} {} {}" .format(scores[box_idx], label_preds[box_idx], box3d_lidar[box_idx, 0], box3d_lidar[box_idx, 1], box3d_lidar[box_idx, 2], box3d_lidar[box_idx, 3], box3d_lidar[box_idx, 4], box3d_lidar[box_idx, 5], box3d_lidar[box_idx, 6])) def run(predictor, voxels, coords, num_points_per_voxel): input_names = predictor.get_input_names() for i, name in enumerate(input_names): input_tensor = predictor.get_input_handle(name) if name == "voxels": input_tensor.reshape(voxels.shape) input_tensor.copy_from_cpu(voxels.copy()) elif name == "coords": input_tensor.reshape(coords.shape) input_tensor.copy_from_cpu(coords.copy()) elif name == "num_points_per_voxel": input_tensor.reshape(num_points_per_voxel.shape) input_tensor.copy_from_cpu(num_points_per_voxel.copy()) # do the inference predictor.run() # get out data from output tensor output_names = predictor.get_output_names() for i, name in enumerate(output_names): output_tensor = predictor.get_output_handle(name) if i == 0: box3d_lidar = output_tensor.copy_to_cpu() elif i == 1: label_preds = output_tensor.copy_to_cpu() elif i == 2: scores = output_tensor.copy_to_cpu() return box3d_lidar, label_preds, scores def main(args): predictor = init_predictor(args.model_file, args.params_file, args.gpu_id, args.use_trt, args.trt_precision, args.trt_use_static, args.trt_static_dir, args.collect_shape_info, args.dynamic_shape_file) voxels, coords, num_points_per_voxel = preprocess( args.lidar_file, args.num_point_dim, args.point_cloud_range, args.voxel_size, args.max_points_in_voxel, args.max_voxel_num) box3d_lidar, label_preds, scores = run(predictor, voxels, coords, num_points_per_voxel) parse_result(box3d_lidar, label_preds, scores) if __name__ == '__main__': args = parse_args() main(args)
0
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp/main.cc
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gflags/gflags.h> #include <glog/logging.h> #include <chrono> #include <cmath> #include <fstream> #include <iostream> #include <numeric> #include <sstream> #include <string> #include "paddle/include/paddle_inference_api.h" using paddle_infer::Config; using paddle_infer::CreatePredictor; using paddle_infer::Predictor; DEFINE_string(model_file, "", "Path of a inference model"); DEFINE_string(params_file, "", "Path of a inference params"); DEFINE_string(lidar_file, "", "Path of a lidar file to be predicted"); DEFINE_int32(num_point_dim, 4, "Dimension of a point in the lidar file"); DEFINE_string(point_cloud_range, "", "Range of point cloud for voxelize operation"); DEFINE_string(voxel_size, "", "Size of voxels for voxelize operation"); DEFINE_int32(max_points_in_voxel, 100, "Maximum number of points in a voxel"); DEFINE_int32(max_voxel_num, 12000, "Maximum number of voxels"); DEFINE_int32(gpu_id, 0, "GPU card id"); DEFINE_int32(use_trt, 0, "Whether to use tensorrt to accelerate when using gpu"); DEFINE_int32(trt_precision, 0, "Precision type of tensorrt, 0: kFloat32, 1: kHalf"); DEFINE_int32( trt_use_static, 0, "Whether to load the tensorrt graph optimization from a disk path"); DEFINE_string(trt_static_dir, "", "Path of a tensorrt graph optimization directory"); DEFINE_int32(collect_shape_info, 0, "Whether to collect dynamic shape before using tensorrt"); DEFINE_string(dynamic_shape_file, "", "Path of a dynamic shape file for tensorrt"); bool read_point(const std::string &file_path, const int num_point_dim, void **buffer, int *num_points) { std::ifstream file_in(file_path, std::ios::in | std::ios::binary); if (!file_in) { LOG(ERROR) << "Failed to read file: " << file_path << "\n"; return false; } std::streampos file_size; file_in.seekg(0, std::ios::end); file_size = file_in.tellg(); file_in.seekg(0, std::ios::beg); *buffer = malloc(file_size); if (*buffer == nullptr) { LOG(ERROR) << "Failed to malloc memory of size: " << file_size << "\n"; return false; } file_in.read(reinterpret_cast<char *>(*buffer), file_size); file_in.close(); if (file_size / sizeof(float) % num_point_dim != 0) { LOG(ERROR) << "Loaded file size (" << file_size << ") is not evenly divisible by num_point_dim (" << num_point_dim << ")\n"; return false; } *num_points = file_size / sizeof(float) / num_point_dim; return true; } bool hard_voxelize(const float point_cloud_range_x_min, const float point_cloud_range_y_min, const float point_cloud_range_z_min, const float voxel_size_x, const float voxel_size_y, const float voxel_size_z, const int grid_size_x, const int grid_size_y, const int grid_size_z, const int max_num_points_in_voxel, const int max_voxels, const float *points, const int num_point_dim, const int num_points, float *voxels, int *coords, int *num_points_per_voxel, int *voxel_num) { voxel_num[0] = 0; int voxel_idx, grid_idx, curr_num_point; int coord_x, coord_y, coord_z; int *grid_idx_to_voxel_idx = new int[grid_size_x * grid_size_y * grid_size_z]; memset(grid_idx_to_voxel_idx, -1, sizeof(int) * grid_size_x * grid_size_y * grid_size_z); for (int point_idx = 0; point_idx < num_points; ++point_idx) { coord_x = floor( (points[point_idx * num_point_dim + 0] - point_cloud_range_x_min) / voxel_size_x); coord_y = floor( (points[point_idx * num_point_dim + 1] - point_cloud_range_y_min) / voxel_size_y); coord_z = floor( (points[point_idx * num_point_dim + 2] - point_cloud_range_z_min) / voxel_size_z); if (coord_x < 0 || coord_x > grid_size_x || coord_x == grid_size_x) { continue; } if (coord_y < 0 || coord_y > grid_size_y || coord_y == grid_size_y) { continue; } if (coord_z < 0 || coord_z > grid_size_z || coord_z == grid_size_z) { continue; } grid_idx = coord_z * grid_size_y * grid_size_x + coord_y * grid_size_x + coord_x; voxel_idx = grid_idx_to_voxel_idx[grid_idx]; if (voxel_idx == -1) { voxel_idx = voxel_num[0]; if (voxel_num[0] == max_voxels || voxel_num[0] > max_voxels) { continue; } voxel_num[0]++; grid_idx_to_voxel_idx[grid_idx] = voxel_idx; coords[voxel_idx * 3 + 0] = coord_z; coords[voxel_idx * 3 + 1] = coord_y; coords[voxel_idx * 3 + 2] = coord_x; } curr_num_point = num_points_per_voxel[voxel_idx]; if (curr_num_point < max_num_points_in_voxel) { for (int j = 0; j < num_point_dim; ++j) { voxels[voxel_idx * max_num_points_in_voxel * num_point_dim + curr_num_point * num_point_dim + j] = points[point_idx * num_point_dim + j]; } num_points_per_voxel[voxel_idx] = curr_num_point + 1; } } delete[] grid_idx_to_voxel_idx; return true; } bool preprocess(const std::string &file_path, const int num_point_dim, const float point_cloud_range_x_min, const float point_cloud_range_y_min, const float point_cloud_range_z_min, const float voxel_size_x, const float voxel_size_y, const float voxel_size_z, const int grid_size_x, const int grid_size_y, const int grid_size_z, const int max_num_points_in_voxel, const int max_voxels, std::vector<int> *voxels_shape, std::vector<float> *voxels_data, std::vector<int> *num_points_shape, std::vector<int> *num_points_data, std::vector<int> *coords_shape, std::vector<int> *coords_data) { void *buffer = nullptr; int num_points; if (!read_point(file_path, num_point_dim, &buffer, &num_points)) { return false; } float *points = static_cast<float *>(buffer); float *voxels_ptr = new float[max_voxels * max_num_points_in_voxel * num_point_dim](); int *num_points_ptr = new int[max_voxels](); int *coords_ptr = new int[max_voxels * 3](); int *voxel_num_ptr = new int[1](); hard_voxelize( point_cloud_range_x_min, point_cloud_range_y_min, point_cloud_range_z_min, voxel_size_x, voxel_size_y, voxel_size_z, grid_size_x, grid_size_y, grid_size_z, max_num_points_in_voxel, max_voxels, points, num_point_dim, num_points, voxels_ptr, coords_ptr, num_points_ptr, voxel_num_ptr); free(points); voxels_data->assign( voxels_ptr, voxels_ptr + voxel_num_ptr[0] * max_num_points_in_voxel * num_point_dim); num_points_data->assign(num_points_ptr, num_points_ptr + voxel_num_ptr[0]); coords_data->assign(coords_ptr, coords_ptr + voxel_num_ptr[0] * 3); voxels_shape->push_back(voxel_num_ptr[0]); voxels_shape->push_back(max_num_points_in_voxel); voxels_shape->push_back(num_point_dim); num_points_shape->push_back(voxel_num_ptr[0]); coords_shape->push_back(voxel_num_ptr[0]); coords_shape->push_back(3); // batch_id, z, y, x delete[] voxels_ptr; delete[] num_points_ptr; delete[] coords_ptr; delete[] voxel_num_ptr; return true; } std::shared_ptr<paddle_infer::Predictor> create_predictor( const std::string &model_path, const std::string &params_path, const int gpu_id, const int use_trt, const int trt_precision, const int trt_use_static, const std::string trt_static_dir, const int collect_shape_info, const std::string dynamic_shape_file) { paddle::AnalysisConfig config; config.EnableUseGpu(1000, gpu_id); config.SetModel(model_path, params_path); if (use_trt) { paddle::AnalysisConfig::Precision precision; if (trt_precision == 0) { precision = paddle_infer::PrecisionType::kFloat32; } else if (trt_precision == 1) { precision = paddle_infer::PrecisionType::kHalf; } else { LOG(ERROR) << "Tensorrt type can only support 0 or 1, but recieved is" << trt_precision << "\n"; return nullptr; } config.EnableTensorRtEngine(1 << 30, 1, 10, precision, trt_use_static, false); if (dynamic_shape_file == "") { LOG(ERROR) << "dynamic_shape_file should be set, but recieved is " << dynamic_shape_file << "\n"; return nullptr; } if (collect_shape_info) { config.CollectShapeRangeInfo(dynamic_shape_file); } else { config.EnableTunedTensorRtDynamicShape(dynamic_shape_file, true); } if (trt_use_static) { if (trt_static_dir == "") { LOG(ERROR) << "trt_static_dir should be set, but recieved is " << trt_static_dir << "\n"; return nullptr; } config.SetOptimCacheDir(trt_static_dir); } } config.SwitchIrOptim(true); return paddle_infer::CreatePredictor(config); } void run(Predictor *predictor, const std::vector<int> &voxels_shape, const std::vector<float> &voxels_data, const std::vector<int> &coords_shape, const std::vector<int> &coords_data, const std::vector<int> &num_points_shape, const std::vector<int> &num_points_data, std::vector<float> *box3d_lidar, std::vector<int64_t> *label_preds, std::vector<float> *scores) { auto input_names = predictor->GetInputNames(); for (const auto &tensor_name : input_names) { auto in_tensor = predictor->GetInputHandle(tensor_name); if (tensor_name == "voxels") { in_tensor->Reshape(voxels_shape); in_tensor->CopyFromCpu(voxels_data.data()); } else if (tensor_name == "coords") { in_tensor->Reshape(coords_shape); in_tensor->CopyFromCpu(coords_data.data()); } else if (tensor_name == "num_points_per_voxel") { in_tensor->Reshape(num_points_shape); in_tensor->CopyFromCpu(num_points_data.data()); } } CHECK(predictor->Run()); auto output_names = predictor->GetOutputNames(); for (size_t i = 0; i != output_names.size(); i++) { auto output = predictor->GetOutputHandle(output_names[i]); std::vector<int> output_shape = output->shape(); int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies<int>()); if (i == 0) { box3d_lidar->resize(out_num); output->CopyToCpu(box3d_lidar->data()); } else if (i == 1) { label_preds->resize(out_num); output->CopyToCpu(label_preds->data()); } else if (i == 2) { scores->resize(out_num); output->CopyToCpu(scores->data()); } } } bool parse_result(const std::vector<float> &box3d_lidar, const std::vector<int64_t> &label_preds, const std::vector<float> &scores) { int num_bbox3d = scores.size(); int bbox3d_dims = box3d_lidar.size() / num_bbox3d; for (size_t box_idx = 0; box_idx != num_bbox3d; ++box_idx) { // filter fake results: score = -1 if (scores[box_idx] < 0) { continue; } LOG(INFO) << "Score: " << scores[box_idx] << " Label: " << label_preds[box_idx] << " "; if (bbox3d_dims == 7) { LOG(INFO) << "Box (x_c, y_c, z_c, w, l, h, -rot): " << box3d_lidar[box_idx * 7 + 0] << " " << box3d_lidar[box_idx * 7 + 1] << " " << box3d_lidar[box_idx * 7 + 2] << " " << box3d_lidar[box_idx * 7 + 3] << " " << box3d_lidar[box_idx * 7 + 4] << " " << box3d_lidar[box_idx * 7 + 5] << " " << box3d_lidar[box_idx * 7 + 6] << "\n"; } } return true; } void parse_string_to_vector(const std::string &str, std::vector<float> *vec) { std::stringstream ss(str); float number; while (ss >> number) { vec->push_back(number); } } int main(int argc, char *argv[]) { google::ParseCommandLineFlags(&argc, &argv, true); if (FLAGS_model_file == "" || FLAGS_params_file == "" || FLAGS_lidar_file == "" || FLAGS_point_cloud_range == "" || FLAGS_voxel_size == "") { LOG(INFO) << "Missing required parameter" << "\n"; LOG(INFO) << "Usage: " << std::string(argv[0]) << " --model_file ${MODEL_FILE} " << "--params_file ${PARAMS_FILE} " << "--lidar_file ${LIDAR_FILE} " << "--point_cloud_range ${POINT_CLOUD_RANGE} " << "--voxel_size ${VOXEL_SIZE} " << "\n"; return -1; } auto predictor = create_predictor( FLAGS_model_file, FLAGS_params_file, FLAGS_gpu_id, FLAGS_use_trt, FLAGS_trt_precision, FLAGS_trt_use_static, FLAGS_trt_static_dir, FLAGS_collect_shape_info, FLAGS_dynamic_shape_file); if (predictor == nullptr) { return 0; } std::vector<float> point_cloud_range; parse_string_to_vector(FLAGS_point_cloud_range, &point_cloud_range); std::vector<float> voxel_size; parse_string_to_vector(FLAGS_voxel_size, &voxel_size); const float point_cloud_range_x_min = point_cloud_range[0]; const float point_cloud_range_y_min = point_cloud_range[1]; const float point_cloud_range_z_min = point_cloud_range[2]; const float voxel_size_x = voxel_size[0]; const float voxel_size_y = voxel_size[1]; const float voxel_size_z = voxel_size[2]; int grid_size_x = static_cast<int>( round((point_cloud_range[3] - point_cloud_range[0]) / voxel_size_x)); int grid_size_y = static_cast<int>( round((point_cloud_range[4] - point_cloud_range[1]) / voxel_size_y)); int grid_size_z = static_cast<int>( round((point_cloud_range[5] - point_cloud_range[2]) / voxel_size_z)); std::vector<int> voxels_shape; std::vector<float> voxels_data; std::vector<int> num_points_shape; std::vector<int> num_points_data; std::vector<int> coords_shape; std::vector<int> coords_data; if (!preprocess(FLAGS_lidar_file, FLAGS_num_point_dim, point_cloud_range_x_min, point_cloud_range_y_min, point_cloud_range_z_min, voxel_size_x, voxel_size_y, voxel_size_z, grid_size_x, grid_size_y, grid_size_z, FLAGS_max_points_in_voxel, FLAGS_max_voxel_num, &voxels_shape, &voxels_data, &num_points_shape, &num_points_data, &coords_shape, &coords_data)) { LOG(ERROR) << "Failed to preprocess!\n"; return 0; } std::vector<float> box3d_lidar; std::vector<int64_t> label_preds; std::vector<float> scores; run(predictor.get(), voxels_shape, voxels_data, coords_shape, coords_data, num_points_shape, num_points_data, &box3d_lidar, &label_preds, &scores); parse_result(box3d_lidar, label_preds, scores); return 0; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp/CMakeLists.txt
cmake_minimum_required(VERSION 3.0) project(cpp_inference_demo CXX C) option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON) option(USE_TENSORRT "Compile demo with TensorRT." ON) option(CUSTOM_OPERATOR_FILES "List of file names for custom operators" "") execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpfullversion -dumpversion OUTPUT_VARIABLE GCC_VERSION) string(REGEX MATCHALL "[0-9]+" GCC_VERSION_COMPONENTS ${GCC_VERSION}) list(GET GCC_VERSION_COMPONENTS 0 GCC_MAJOR) list(GET GCC_VERSION_COMPONENTS 1 GCC_MINOR) set(GCC_VERSION "${GCC_MAJOR}.${GCC_MINOR}") if (GCC_VERSION LESS "8.0") set(CMAKE_CXX_FLAGS "-Wl,--no-as-needed") endif() set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") include(external/boost) if(WITH_GPU) find_package(CUDA REQUIRED) add_definitions("-DPADDLE_WITH_CUDA") endif() if(NOT WITH_STATIC_LIB) add_definitions("-DPADDLE_WITH_SHARED_LIB") else() # PD_INFER_DECL is mainly used to set the dllimport/dllexport attribute in dynamic library mode. # Set it to empty in static library mode to avoid compilation issues. add_definitions("/DPD_INFER_DECL=") endif() macro(safe_set_static_flag) foreach(flag_var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) if(${flag_var} MATCHES "/MD") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") endif(${flag_var} MATCHES "/MD") endforeach(flag_var) endmacro() if(NOT DEFINED PADDLE_LIB) message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib") endif() if(NOT DEFINED DEMO_NAME) message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name") endif() include_directories("${PADDLE_LIB}/") set(PADDLE_LIB_THIRD_PARTY_PATH "${PADDLE_LIB}/third_party/install/") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/include") include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/include") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/lib") link_directories("${PADDLE_LIB}/paddle/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}onnxruntime/lib") link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}paddle2onnx/lib") if (WIN32) add_definitions("/DGOOGLE_GLOG_DLL_DECL=") option(MSVC_STATIC_CRT "use static C Runtime library by default" ON) if (MSVC_STATIC_CRT) if (WITH_MKL) set(FLAG_OPENMP "/openmp") endif() set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}") safe_set_static_flag() if (WITH_STATIC_LIB) add_definitions(-DSTATIC_LIB) endif() endif() else() if(WITH_MKL) set(FLAG_OPENMP "-fopenmp") endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 ${FLAG_OPENMP}") endif() if(WITH_GPU) if(NOT WIN32) set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") else() if(CUDA_LIB STREQUAL "") set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\lib\\x64") endif() endif(NOT WIN32) endif() if (USE_TENSORRT AND WITH_GPU) set(TENSORRT_ROOT "" CACHE STRING "The root directory of TensorRT library") if("${TENSORRT_ROOT}" STREQUAL "") message(FATAL_ERROR "The TENSORRT_ROOT is empty, you must assign it a value with CMake command. Such as: -DTENSORRT_ROOT=TENSORRT_ROOT_PATH ") endif() set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include) set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib) endif() if (NOT WIN32) if (USE_TENSORRT AND WITH_GPU) include_directories("${TENSORRT_INCLUDE_DIR}") link_directories("${TENSORRT_LIB_DIR}") endif() endif(NOT WIN32) if(WITH_MKL) set(MATH_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mklml") include_directories("${MATH_LIB_PATH}/include") if(WIN32) set(MATH_LIB ${MATH_LIB_PATH}/lib/mklml${CMAKE_STATIC_LIBRARY_SUFFIX} ${MATH_LIB_PATH}/lib/libiomp5md${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(MATH_LIB ${MATH_LIB_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} ${MATH_LIB_PATH}/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(MKLDNN_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mkldnn") if(EXISTS ${MKLDNN_PATH}) include_directories("${MKLDNN_PATH}/include") if(WIN32) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib) else(WIN32) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) endif(WIN32) endif() else() set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas") include_directories("${OPENBLAS_LIB_PATH}/include/openblas") if(WIN32) set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() endif() if(WITH_STATIC_LIB) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() if(WIN32) set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() endif() if (NOT WIN32) if (GCC_VERSION LESS "8.0") set(EXTERNAL_LIB ${EXTERNAL_LIB} "-lssl -lcrypto -lz -lleveldb -lsnappy") endif() set(EXTERNAL_LIB ${EXTERNAL_LIB} "-lrt -ldl -lpthread") set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags protobuf xxhash ${EXTERNAL_LIB}) else() set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags_static libprotobuf xxhash ${EXTERNAL_LIB}) set(DEPS ${DEPS} shlwapi.lib) endif(NOT WIN32) if(WITH_GPU) if(NOT WIN32) if (USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX}) else() if(USE_TENSORRT) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX}) set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} ) set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX} ) endif() endif() cuda_add_library(pd_infer_custom_op ${CUSTOM_OPERATOR_FILES} SHARED) add_executable(${DEMO_NAME} ${DEMO_NAME}.cc) if (GCC_VERSION GREATER_EQUAL "8.0") set(DEPS ${DEPS} libssl.a libcrypto.a libz.a libleveldb.a libsnappy.a) endif() set(DEPS ${DEPS} boost pd_infer_custom_op)# libssl.a libcrypto.a libz.a libleveldb.a libsnappy.a) if(WIN32) if(USE_TENSORRT) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) endif() if(WITH_MKL) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release ) else() add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release ) endif() if(NOT WITH_STATIC_LIB) add_custom_command(TARGET ${DEMO_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_fluid.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE} ) endif() endif() target_link_libraries(${DEMO_NAME} ${DEPS})
0
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp/compile.sh
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. mkdir -p build cd build rm -rf * DEMO_NAME=main WITH_MKL=ON WITH_GPU=ON USE_TENSORRT=ON LIB_DIR=/home/dependency/paddle_inference_install_dir CUDNN_LIB=/usr/lib/x86_64-linux-gnu CUDA_LIB=/usr/local/cuda/lib64 TENSORRT_ROOT=/home/dependency/TensorRT-8.2.5.1 CUSTOM_OPERATOR_FILES="custom_ops/iou3d_cpu.cpp;custom_ops/iou3d_nms_api.cpp;custom_ops/iou3d_nms.cpp;custom_ops/iou3d_nms_kernel.cu" cmake .. -DPADDLE_LIB=${LIB_DIR} \ -DWITH_MKL=${WITH_MKL} \ -DDEMO_NAME=${DEMO_NAME} \ -DWITH_GPU=${WITH_GPU} \ -DWITH_STATIC_LIB=OFF \ -DUSE_TENSORRT=${USE_TENSORRT} \ -DCUDNN_LIB=${CUDNN_LIB} \ -DCUDA_LIB=${CUDA_LIB} \ -DTENSORRT_ROOT=${TENSORRT_ROOT} \ -DCUSTOM_OPERATOR_FILES=${CUSTOM_OPERATOR_FILES} make -j
0
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp/custom_ops/iou3d_cpu.cpp
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* 3D Rotated IoU Calculation (CPU) Written by Shaoshuai Shi All Rights Reserved 2020. */ #include "iou3d_cpu.h" #include <cuda.h> #include <cuda_runtime_api.h> #include <math.h> #include <stdio.h> #include <vector> #include "paddle/include/experimental/ext_all.h" inline float min(float a, float b) { return a > b ? b : a; } inline float max(float a, float b) { return a > b ? a : b; } const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y) { x = _x, y = _y; } __device__ void set(float _x, float _y) { x = _x; y = _y; } __device__ Point operator+(const Point &b) const { return Point(x + b.x, y + b.y); } __device__ Point operator-(const Point &b) const { return Point(x - b.x, y - b.y); } }; inline float cross(const Point &a, const Point &b) { return a.x * b.y - a.y * b.x; } inline float cross(const Point &p1, const Point &p2, const Point &p0) { return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } inline int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2) { int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) && min(q1.x, q2.x) <= max(p1.x, p2.x) && min(p1.y, p2.y) <= max(q1.y, q2.y) && min(q1.y, q2.y) <= max(p1.y, p2.y); return ret; } inline int check_in_box2d(const float *box, const Point &p) { // params: (7) [x, y, z, dx, dy, dz, heading] const float MARGIN = 1e-2; float center_x = box[0], center_y = box[1]; float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); } inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans) { // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if (fabs(s5 - s1) > EPS) { ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else { float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p) { float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } inline int point_cmp(const Point &a, const Point &b, const Point &center) { return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } inline float box_overlap(const float *box_a, const float *box_b) { // params: box_a (7) [x, y, z, dx, dy, dz, heading] // params: box_b (7) [x, y, z, dx, dy, dz, heading] // float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = // box_a[3], a_angle = box_a[4]; // float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = // box_b[3], b_angle = box_b[4]; float a_angle = box_a[6], b_angle = box_b[6]; float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; Point center_a(box_a[0], box_a[1]); Point center_b(box_b[0], box_b[1]); Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++) { rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag) { poly_center = poly_center + cross_points[cnt]; cnt++; } } } // check corners for (int k = 0; k < 4; k++) { if (check_in_box2d(box_a, box_b_corners[k])) { poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; } if (check_in_box2d(box_b, box_a_corners[k])) { poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++) { for (int i = 0; i < cnt - j - 1; i++) { if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) { temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++) { area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } inline float iou_bev(const float *box_a, const float *box_b) { // params: box_a (7) [x, y, z, dx, dy, dz, heading] // params: box_b (7) [x, y, z, dx, dy, dz, heading] float sa = box_a[3] * box_a[4]; float sb = box_b[3] * box_b[4]; float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } std::vector<paddle::Tensor> boxes_iou_bev_cpu( const paddle::Tensor &boxes_a_tensor, const paddle::Tensor &boxes_b_tensor) { // params boxes_a_tensor: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b_tensor: (M, 7) [x, y, z, dx, dy, dz, heading] // params ans_iou_tensor: (N, M) int num_boxes_a = boxes_a_tensor.shape()[0]; int num_boxes_b = boxes_b_tensor.shape()[0]; const float *boxes_a = boxes_a_tensor.data<float>(); const float *boxes_b = boxes_b_tensor.data<float>(); auto ans_iou_tensor = paddle::empty({num_boxes_a, num_boxes_b}, paddle::DataType::FLOAT32, paddle::CPUPlace()); float *ans_iou = ans_iou_tensor.data<float>(); for (int i = 0; i < num_boxes_a; i++) { for (int j = 0; j < num_boxes_b; j++) { ans_iou[i * num_boxes_b + j] = iou_bev(boxes_a + i * 7, boxes_b + j * 7); } } return {ans_iou_tensor}; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp/custom_ops/iou3d_nms.cpp
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include "iou3d_nms.h" #include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "paddle/include/experimental/ext_all.h" #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) const int THREADS_PER_BLOCK_NMS = sizeof(int64_t) * 8; void BoxesOverlapLauncher(const cudaStream_t &stream, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap); void BoxesIouBevLauncher(const cudaStream_t &stream, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou); void NmsLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask, int boxes_num, float nms_overlap_thresh); void NmsNormalLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask, int boxes_num, float nms_overlap_thresh); std::vector<paddle::Tensor> boxes_overlap_bev_gpu( const paddle::Tensor &boxes_a, const paddle::Tensor &boxes_b) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] // params ans_overlap: (N, M) int num_a = boxes_a.shape()[0]; int num_b = boxes_b.shape()[0]; const float *boxes_a_data = boxes_a.data<float>(); const float *boxes_b_data = boxes_b.data<float>(); auto ans_overlap = paddle::empty({num_a, num_b}, paddle::DataType::FLOAT32, paddle::GPUPlace()); float *ans_overlap_data = ans_overlap.data<float>(); BoxesOverlapLauncher(boxes_a.stream(), num_a, boxes_a_data, num_b, boxes_b_data, ans_overlap_data); return {ans_overlap}; } std::vector<paddle::Tensor> boxes_iou_bev_gpu( const paddle::Tensor &boxes_a_tensor, const paddle::Tensor &boxes_b_tensor) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] // params ans_overlap: (N, M) int num_a = boxes_a_tensor.shape()[0]; int num_b = boxes_b_tensor.shape()[0]; const float *boxes_a_data = boxes_a_tensor.data<float>(); const float *boxes_b_data = boxes_b_tensor.data<float>(); auto ans_iou_tensor = paddle::empty({num_a, num_b}, paddle::DataType::FLOAT32, paddle::GPUPlace()); float *ans_iou_data = ans_iou_tensor.data<float>(); BoxesIouBevLauncher(boxes_a_tensor.stream(), num_a, boxes_a_data, num_b, boxes_b_data, ans_iou_data); return {ans_iou_tensor}; } std::vector<paddle::Tensor> nms_gpu(const paddle::Tensor &boxes, float nms_overlap_thresh) { // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] auto keep = paddle::empty({boxes.shape()[0]}, paddle::DataType::INT32, paddle::CPUPlace()); auto num_to_keep_tensor = paddle::empty({1}, paddle::DataType::INT32, paddle::CPUPlace()); int *num_to_keep_data = num_to_keep_tensor.data<int>(); int boxes_num = boxes.shape()[0]; const float *boxes_data = boxes.data<float>(); int *keep_data = keep.data<int>(); const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); // int64_t *mask_data = NULL; // CHECK_ERROR(cudaMalloc((void**)&mask_data, boxes_num * col_blocks * // sizeof(int64_t))); auto mask = paddle::empty({boxes_num * col_blocks}, paddle::DataType::INT64, paddle::GPUPlace()); int64_t *mask_data = mask.data<int64_t>(); NmsLauncher(boxes.stream(), boxes_data, mask_data, boxes_num, nms_overlap_thresh); // std::vector<int64_t> mask_cpu(boxes_num * col_blocks); // CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks * // sizeof(int64_t), // cudaMemcpyDeviceToHost)); const paddle::Tensor mask_cpu_tensor = mask.copy_to(paddle::CPUPlace(), true); const int64_t *mask_cpu = mask_cpu_tensor.data<int64_t>(); // cudaFree(mask_data); int64_t remv_cpu[col_blocks]; memset(remv_cpu, 0, col_blocks * sizeof(int64_t)); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / THREADS_PER_BLOCK_NMS; int inblock = i % THREADS_PER_BLOCK_NMS; if (!(remv_cpu[nblock] & (1ULL << inblock))) { keep_data[num_to_keep++] = i; const int64_t *p = &mask_cpu[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv_cpu[j] |= p[j]; } } } num_to_keep_data[0] = num_to_keep; if (cudaSuccess != cudaGetLastError()) printf("Error!\n"); return {keep, num_to_keep_tensor}; } std::vector<paddle::Tensor> nms_normal_gpu(const paddle::Tensor &boxes, float nms_overlap_thresh) { // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] // params keep: (N) auto keep = paddle::empty({boxes.shape()[0]}, paddle::DataType::INT32, paddle::CPUPlace()); auto num_to_keep_tensor = paddle::empty({1}, paddle::DataType::INT32, paddle::CPUPlace()); int *num_to_keep_data = num_to_keep_tensor.data<int>(); int boxes_num = boxes.shape()[0]; const float *boxes_data = boxes.data<float>(); int *keep_data = keep.data<int>(); const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); // int64_t *mask_data = NULL; // CHECK_ERROR(cudaMalloc((void**)&mask_data, boxes_num * col_blocks * // sizeof(int64_t))); auto mask = paddle::empty({boxes_num * col_blocks}, paddle::DataType::INT64, paddle::GPUPlace()); int64_t *mask_data = mask.data<int64_t>(); NmsNormalLauncher(boxes.stream(), boxes_data, mask_data, boxes_num, nms_overlap_thresh); // int64_t mask_cpu[boxes_num * col_blocks]; // int64_t *mask_cpu = new int64_t [boxes_num * col_blocks]; // std::vector<int64_t> mask_cpu(boxes_num * col_blocks); // CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks * // sizeof(int64_t), // cudaMemcpyDeviceToHost)); // cudaFree(mask_data); const paddle::Tensor mask_cpu_tensor = mask.copy_to(paddle::CPUPlace(), true); const int64_t *mask_cpu = mask_cpu_tensor.data<int64_t>(); int64_t remv_cpu[col_blocks]; memset(remv_cpu, 0, col_blocks * sizeof(int64_t)); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / THREADS_PER_BLOCK_NMS; int inblock = i % THREADS_PER_BLOCK_NMS; if (!(remv_cpu[nblock] & (1ULL << inblock))) { keep_data[num_to_keep++] = i; const int64_t *p = &mask_cpu[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv_cpu[j] |= p[j]; } } } num_to_keep_data[0] = num_to_keep; if (cudaSuccess != cudaGetLastError()) { printf("Error!\n"); } return {keep, num_to_keep_tensor}; }
0
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp/custom_ops/iou3d_nms_kernel.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ #include <stdio.h> #define THREADS_PER_BLOCK 16 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) // #define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(int64_t) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y) { x = _x, y = _y; } __device__ void set(float _x, float _y) { x = _x; y = _y; } __device__ Point operator+(const Point &b) const { return Point(x + b.x, y + b.y); } __device__ Point operator-(const Point &b) const { return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b) { return a.x * b.y - a.y * b.x; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0) { return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2) { int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) && min(q1.x, q2.x) <= max(p1.x, p2.x) && min(p1.y, p2.y) <= max(q1.y, q2.y) && min(q1.y, q2.y) <= max(p1.y, p2.y); return ret; } __device__ inline int check_in_box2d(const float *box, const Point &p) { // params: (7) [x, y, z, dx, dy, dz, heading] const float MARGIN = 1e-2; float center_x = box[0], center_y = box[1]; float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans) { // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if (fabs(s5 - s1) > EPS) { ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else { float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p) { float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center) { return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float box_overlap(const float *box_a, const float *box_b) { // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float a_angle = box_a[6], b_angle = box_b[6]; float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; Point center_a(box_a[0], box_a[1]); Point center_b(box_b[0], box_b[1]); #ifdef DEBUG printf( "a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle); printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y); #endif Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++) { #ifdef DEBUG printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); #ifdef DEBUG printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag) { poly_center = poly_center + cross_points[cnt]; cnt++; #ifdef DEBUG printf( "Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), b(%.3f, " "%.3f)->(%.3f, %.3f) \n", cross_points[cnt - 1].x, cross_points[cnt - 1].y, box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x, box_a_corners[i + 1].y, box_b_corners[i].x, box_b_corners[i].y, box_b_corners[i + 1].x, box_b_corners[i + 1].y); #endif } } } // check corners for (int k = 0; k < 4; k++) { if (check_in_box2d(box_a, box_b_corners[k])) { poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; #ifdef DEBUG printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } if (check_in_box2d(box_b, box_a_corners[k])) { poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; #ifdef DEBUG printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++) { for (int i = 0; i < cnt - j - 1; i++) { if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) { temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } #ifdef DEBUG printf("cnt=%d\n", cnt); for (int i = 0; i < cnt; i++) { printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y); } #endif // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++) { area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b) { // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float sa = box_a[3] * box_a[4]; float sb = box_b[3] * box_b[4]; float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } __global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b) { return; } const float *cur_box_a = boxes_a + a_idx * 7; const float *cur_box_b = boxes_b + b_idx * 7; float s_overlap = box_overlap(cur_box_a, cur_box_b); ans_overlap[a_idx * num_b + b_idx] = s_overlap; } __global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou) { // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b) { return; } const float *cur_box_a = boxes_a + a_idx * 7; const float *cur_box_b = boxes_b + b_idx * 7; float cur_iou_bev = iou_bev(cur_box_a, cur_box_b); ans_iou[a_idx * num_b + b_idx] = cur_iou_bev; } __global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, int64_t *mask) { // params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] // params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 7; int i = 0; int64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } __device__ inline float iou_normal(float const *const a, float const *const b) { // params: a: [x, y, z, dx, dy, dz, heading] // params: b: [x, y, z, dx, dy, dz, heading] float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2), right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2); float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2), bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2); float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); float interS = width * height; float Sa = a[3] * a[4]; float Sb = b[3] * b[4]; return interS / fmaxf(Sa + Sb - interS, EPS); } __global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, int64_t *mask) { // params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] // params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 7; int i = 0; int64_t t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } void BoxesOverlapLauncher(const cudaStream_t &stream, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap) { dim3 blocks( DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); boxes_overlap_kernel<<<blocks, threads, 0, stream>>>(num_a, boxes_a, num_b, boxes_b, ans_overlap); #ifdef DEBUG cudaDeviceSynchronize(); // for using printf in kernel function #endif } void BoxesIouBevLauncher(const cudaStream_t &stream, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou) { dim3 blocks( DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); boxes_iou_bev_kernel<<<blocks, threads, 0, stream>>>(num_a, boxes_a, num_b, boxes_b, ans_iou); #ifdef DEBUG cudaDeviceSynchronize(); // for using printf in kernel function #endif } void NmsLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask, int boxes_num, float nms_overlap_thresh) { dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); nms_kernel<<<blocks, threads, 0, stream>>>(boxes_num, nms_overlap_thresh, boxes, mask); } void NmsNormalLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask, int boxes_num, float nms_overlap_thresh) { dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); dim3 threads(THREADS_PER_BLOCK_NMS); nms_normal_kernel<<<blocks, threads, 0, stream>>>( boxes_num, nms_overlap_thresh, boxes, mask); }
0
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp/custom_ops/iou3d_nms.h
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef IOU3D_NMS_H #define IOU3D_NMS_H #include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "paddle/include/experimental/ext_all.h" std::vector<paddle::Tensor> boxes_overlap_bev_gpu( const paddle::Tensor &boxes_a, const paddle::Tensor &boxes_b); std::vector<paddle::Tensor> boxes_iou_bev_gpu( const paddle::Tensor &boxes_a_tensor, const paddle::Tensor &boxes_b_tensor); std::vector<paddle::Tensor> nms_gpu(const paddle::Tensor &boxes, float nms_overlap_thresh); std::vector<paddle::Tensor> nms_normal_gpu(const paddle::Tensor &boxes, float nms_overlap_thresh); #endif
0
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp
apollo_public_repos/apollo-model-centerpoint/deploy/pointpillars/cpp/custom_ops/iou3d_cpu.h
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef IOU3D_CPU_H #define IOU3D_CPU_H #include <cuda.h> #include <cuda_runtime_api.h> #include <vector> #include "paddle/include/experimental/ext_all.h" std::vector<paddle::Tensor> boxes_iou_bev_cpu( const paddle::Tensor& boxes_a_tensor, const paddle::Tensor& boxes_b_tensor); #endif
0