admin管理员组文章数量:1122846
I am working on a project with PointPillars model, I am trying to add a mask to the model that within a specified xyz range, the mask with iterate through all the possible values and output the filtered point cloud. I'm adding the mask to test.py and before I added this mask everything works just fine. I'm using a nested for loop to iterate through all the possible mask values with step value of 0.16, here's the code after I added the mask.
import argparse
import cv2
import numpy as np
import os
import torch
import pdb
from utils import setup_seed, read_points, read_calib, read_label, \
keep_bbox_from_image_range, keep_bbox_from_lidar_range, vis_pc, \
vis_img_3d, bbox3d2corners_camera, points_camera2image, \
bbox_camera2lidar
from model import PointPillars
def point_range_filter(pts, x_min, x_max, yl, yr, z_max):
'''
Applies the XYZ mask to filter points within the given ranges.
'''
mask_x = (pts[:, 0] > x_min) & (pts[:, 0] < x_max)
mask_y_left = pts[:, 1] > yl
mask_y_right = pts[:, 1] < yr
mask_z = pts[:, 2] < z_max
mask = mask_x & mask_y_left & mask_y_right & mask_z
pts = pts[mask]
return pts
def main(args):
CLASSES = {
'Pedestrian': 0,
'Cyclist': 1,
'Car': 2
}
LABEL2CLASSES = {v:k for k, v in CLASSES.items()}
pcd_limit_range = np.array([0, -40, -3, 70.4, 40, 0.0], dtype=np.float32)
if not args.no_cuda:
model = PointPillars(nclasses=len(CLASSES)).cuda()
model.load_state_dict(torch.load(args.ckpt))
else:
model = PointPillars(nclasses=len(CLASSES))
model.load_state_dict(
torch.load(args.ckpt, map_location=torch.device('cpu')))
if not os.path.exists(args.pc_path):
raise FileNotFoundError
pc = read_points(args.pc_path)
x_min, x_max = 0, 69.12
y_min, y_max = -39.68, 39.68
z_min, z_max = -3, 1
step = 0.16
for x in np.arange(x_min, x_max, step):
for yl in np.arange(y_min, y_max, step):
for yr in np.arange(yl + step, y_max, step):
for z in np.arange(z_min, z_max, step):
# Apply the mask to the point cloud
filtered_pc = point_range_filter(pc, x, x_max, yl, yr, z)
#pc, x, x_max, yl, yr, z(for dynamic mask)
#pc, x_min=0, x_max=70.4, yl=-39.68, yr=39.68, z_max=1(for fixed mask)
if filtered_pc.size == 0:
continue
pc_torch = torch.from_numpy(filtered_pc)
if os.path.exists(args.calib_path):
calib_info = read_calib(args.calib_path)
else:
calib_info = None
if os.path.exists(args.gt_path):
gt_label = read_label(args.gt_path)
else:
gt_label = None
if os.path.exists(args.img_path):
img = cv2.imread(args.img_path, 1)
else:
img = None
model.eval()
with torch.no_grad():
if not args.no_cuda:
pc_torch = pc_torch.cuda()
result_filter = model(batched_pts=[pc_torch],
mode='test')[0]
if calib_info is not None and img is not None:
tr_velo_to_cam = calib_info['Tr_velo_to_cam'].astype(np.float32)
r0_rect = calib_info['R0_rect'].astype(np.float32)
P2 = calib_info['P2'].astype(np.float32)
image_shape = img.shape[:2]
result_filter = keep_bbox_from_image_range(result_filter, tr_velo_to_cam, r0_rect, P2, image_shape)
result_filter = keep_bbox_from_lidar_range(result_filter, pcd_limit_range)
if not result_filter:
print("Empty result_filter, skipped")
return
lidar_bboxes = result_filter['lidar_bboxes']
labels, scores = result_filter['labels'], result_filter['scores']
vis_pc(filtered_pc, bboxes=lidar_bboxes, labels=labels)
if calib_info is not None and img is not None:
bboxes2d, camera_bboxes = result_filter['bboxes2d'], result_filter['camera_bboxes']
bboxes_corners = bbox3d2corners_camera(camera_bboxes)
image_points = points_camera2image(bboxes_corners, P2)
img = vis_img_3d(img, image_points, labels, rt=True)
if calib_info is not None and gt_label is not None:
tr_velo_to_cam = calib_info['Tr_velo_to_cam'].astype(np.float32)
r0_rect = calib_info['R0_rect'].astype(np.float32)
dimensions = gt_label['dimensions']
location = gt_label['location']
rotation_y = gt_label['rotation_y']
gt_labels = np.array([CLASSES.get(item, -1) for item in gt_label['name']])
sel = gt_labels != -1
gt_labels = gt_labels[sel]
bboxes_camera = np.concatenate([location, dimensions, rotation_y[:, None]], axis=-1)
gt_lidar_bboxes = bbox_camera2lidar(bboxes_camera, tr_velo_to_cam, r0_rect)
bboxes_camera = bboxes_camera[sel]
gt_lidar_bboxes = gt_lidar_bboxes[sel]
gt_labels = [-1] * len(gt_label['name']) # to distinguish between the ground truth and the predictions
pred_gt_lidar_bboxes = np.concatenate([lidar_bboxes, gt_lidar_bboxes], axis=0)
pred_gt_labels = np.concatenate([labels, gt_labels])
vis_pc(pc, pred_gt_lidar_bboxes, labels=pred_gt_labels)
if img is not None:
bboxes_corners = bbox3d2corners_camera(bboxes_camera)
image_points = points_camera2image(bboxes_corners, P2)
gt_labels = [-1] * len(gt_label['name'])
img = vis_img_3d(img, image_points, gt_labels, rt=True)
if calib_info is not None and img is not None:
cv2.imshow(f'{os.path.basename(args.img_path)}-3d bbox', img)
cv2.waitKey(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Configuration Parameters')
parser.add_argument('--ckpt', default='pretrained/epoch_160.pth', help='your checkpoint for kitti')
parser.add_argument('--pc_path', help='your point cloud path')
parser.add_argument('--calib_path', default='', help='your calib file path')
parser.add_argument('--gt_path', default='', help='your ground truth path')
parser.add_argument('--img_path', default='', help='your image path')
parser.add_argument('--no_cuda', action='store_true',
help='whether to use cuda')
args = parser.parse_args()
main(args)
but when I tried to run it, it outputs this error
Traceback (most recent call last):
File "test.py", line 158, in <module>
main(args)
File "test.py", line 98, in main
result_filter = keep_bbox_from_image_range(result_filter, tr_velo_to_cam, r0_rect, P2, image_shape)
File "C:\Users\1016b\PointPillars\utils\process.py", line 561, in keep_bbox_from_image_range
lidar_bboxes = result['lidar_bboxes']
TypeError: tuple indices must be integers or slices, not str
I know that lidar_bboxes = result['lidar_bboxes'] was not the issue cuz it was from the original code and I did not edit it at all. Can someone help me address what the issue might be?
here is the original pointpillars github link: PointPillars
I figured that it might be because after I added the filter it might generate some empty result and pass it to the "lidar_bboxes" so I added a condition to check if the result_filter is empty, but it did not work, I still got the same issue.
本文标签: pythonPointPillars Model Debug requestStack Overflow
版权声明:本文标题:python - PointPillars Model Debug request - Stack Overflow 内容由网友自发贡献,该文观点仅代表作者本人, 转载请联系作者并注明出处:http://www.betaflare.com/web/1736305388a1932574.html, 本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌抄袭侵权/违法违规的内容,一经查实,本站将立刻删除。
发表评论