Thanks for the tip! Could you help me figure out what am I doing wrong here?
from pyquaternion import Quaternion
import numpy as np
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.geometry_utils import BoxVisibility
def quaternion_yaw(q: Quaternion, in_image_frame: bool=True) -> float:
if in_image_frame:
v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))
yaw = -np.arctan2(v[2], v[0])
else:
v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))
yaw = np.arctan2(v[1], v[0])
return yaw
nusc = NuScenes()
scene = nusc.scene[0]
sample = nusc.get('sample', scene['first_sample_token'])
annotation_token = sample['anns'][0]
box_global = nusc.get('sample_annotation', annotation_token) # in global frame
gt_yaw_global = quaternion_yaw(Quaternion(box_global['rotation']), False)
boxes = []
cams = [key for key in sample['data'].keys() if 'CAM' in key]
for cam in cams:
_, boxes, _ = nusc.get_sample_data(sample['data'][cam], box_vis_level=BoxVisibility.ANY, selected_anntokens=[annotation_token])
if len(boxes) > 0:
break # found image match
cam_data = nusc.get('sample_data', sample['data'][cam])
cam_pose = nusc.get('calibrated_sensor', cam_data['calibrated_sensor_token'])
car_from_cam = Quaternion(cam_pose['rotation'])
ego_pose_cam = nusc.get('ego_pose', cam_data['ego_pose_token'])
global_from_car = Quaternion(ego_pose_cam['rotation'])
yaw_cam = quaternion_yaw(boxes[0].orientation)
q_yaw_cam = Quaternion(axis=(0, 0, 1), radians=yaw_cam)
q_yaw_global = global_from_car * (car_from_cam * q_yaw_cam)
yaw_global = quaternion_yaw(q_yaw_global, False)
print(yaw_cam, gt_yaw_global)
For mini
dataset I get output -3.120828115749234 -0.36810739253486857
.