diff --git a/source/applications/advanced/get_checkerboard_pose_from_zdf.py b/source/applications/advanced/get_checkerboard_pose_from_zdf.py index 67c4a44d..f979d6e5 100644 --- a/source/applications/advanced/get_checkerboard_pose_from_zdf.py +++ b/source/applications/advanced/get_checkerboard_pose_from_zdf.py @@ -50,7 +50,7 @@ def _visualize_checkerboard_point_cloud_with_coordinate_system( Args: point_cloud_open3d: An Open3d point cloud of a checkerboard - transform: Transformation matrix + transform: Transformation matrix (4x4) """ coord_system_mesh = o3d.geometry.TriangleMesh.create_coordinate_frame(size=30) @@ -72,18 +72,18 @@ def _main() -> None: point_cloud = frame.point_cloud() print("Detecting checkerboard and estimating its pose in camera frame") - transform_camera_to_checkerboard = zivid.calibration.detect_calibration_board(frame).pose().to_matrix() - print(f"Camera pose in checkerboard frame:\n{transform_camera_to_checkerboard}") + camera_to_checkerboard_transform = zivid.calibration.detect_calibration_board(frame).pose().to_matrix() + print(f"Camera pose in checkerboard frame:\n{camera_to_checkerboard_transform}") transform_file_name = "CameraToCheckerboardTransform.yaml" print(f"Saving detected checkerboard pose to YAML file: {transform_file_name}") transform_file_path = Path(__file__).parent / transform_file_name - assert_affine_matrix_and_save(transform_camera_to_checkerboard, transform_file_path) + assert_affine_matrix_and_save(camera_to_checkerboard_transform, transform_file_path) print("Visualizing checkerboard with coordinate system") checkerboard_point_cloud = _create_open3d_point_cloud(point_cloud) _visualize_checkerboard_point_cloud_with_coordinate_system( - checkerboard_point_cloud, transform_camera_to_checkerboard + checkerboard_point_cloud, camera_to_checkerboard_transform ) diff --git a/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robodk_hand_eye_calibration.py b/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robodk_hand_eye_calibration.py index 60e416ba..45b3072c 100644 --- a/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robodk_hand_eye_calibration.py +++ b/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robodk_hand_eye_calibration.py @@ -57,7 +57,7 @@ def _get_frame_and_transform(robot: Item, camera: zivid.Camera) -> Tuple[zivid.F Returns: Zivid frame - 4x4 transformation matrix + Transformation matrix (4x4) """ frame = _assisted_capture(camera) @@ -77,7 +77,7 @@ def _save_point_cloud_and_pose( save_directory: Path to where data will be saved image_and_pose_iterator: Image number frame: Point cloud stored as ZDF - transform: 4x4 transformation matrix + transform: Transformation matrix (4x4) """ frame.save(save_directory / f"img{image_and_pose_iterator:02d}.zdf") @@ -143,7 +143,7 @@ def _save_hand_eye_results(save_directory: Path, transform: np.ndarray, residual Args: save_directory: Path to where data will be saved - transform: 4x4 transformation matrix + transform: Transformation matrix (4x4) residuals: List of residuals """ @@ -232,7 +232,7 @@ def perform_hand_eye_calibration( user_options: Input arguments Returns: - transform: 4x4 transformation matrix + transform: Transformation matrix (4x4) residuals: List of residuals Raises: diff --git a/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/universal_robots_perform_hand_eye_calibration.py b/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/universal_robots_perform_hand_eye_calibration.py index 6add2c60..4a3ea52e 100644 --- a/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/universal_robots_perform_hand_eye_calibration.py +++ b/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/universal_robots_perform_hand_eye_calibration.py @@ -127,7 +127,7 @@ def _save_zdf_and_pose(save_dir: Path, image_num: int, frame: zivid.Frame, trans save_dir: Directory to save data image_num: Image number frame: Point cloud stored as ZDF - transform: 4x4 transformation matrix + transform: Transformation matrix (4x4) """ frame.save(save_dir / f"img{image_num:02d}.zdf") @@ -164,7 +164,7 @@ def _get_frame_and_transform_matrix( Returns: frame: Zivid frame - transform: 4x4 transformation matrix + transform: Transformation matrix (4x4) """ frame = camera.capture(settings) @@ -222,7 +222,7 @@ def _save_hand_eye_results(save_dir: Path, transform: np.ndarray, residuals: Lis Args: save_dir: Path to where data will be saved - transform: 4x4 transformation matrix + transform: Transformation matrix (4x4) residuals: List of residuals """ @@ -392,7 +392,7 @@ def perform_hand_eye_calibration( data_dir: Path to dataset Returns: - transform: 4x4 transformation matrix + transform: Transformation matrix (4x4) residuals: List of residuals Raises: diff --git a/source/applications/advanced/hand_eye_calibration/utilize_hand_eye_calibration.py b/source/applications/advanced/hand_eye_calibration/utilize_hand_eye_calibration.py index 6ef85913..d7092c12 100644 --- a/source/applications/advanced/hand_eye_calibration/utilize_hand_eye_calibration.py +++ b/source/applications/advanced/hand_eye_calibration/utilize_hand_eye_calibration.py @@ -53,7 +53,7 @@ def _main() -> None: eye_to_hand_transform_file_path = get_sample_data_path() / "EyeToHandTransform.yaml" print("Reading camera pose in robot base reference frame (result of eye-to-hand calibration)") - transform_base_to_camera = load_and_assert_affine_matrix(eye_to_hand_transform_file_path) + base_to_camera_transform = load_and_assert_affine_matrix(eye_to_hand_transform_file_path) break @@ -71,13 +71,13 @@ def _main() -> None: print( "Reading camera pose in flange (end-effector) reference frame (result of eye-in-hand calibration)" ) - transform_flange_to_camera = load_and_assert_affine_matrix(eye_in_hand_transform_file_path) + flange_to_camera_transform = load_and_assert_affine_matrix(eye_in_hand_transform_file_path) print("Reading flange (end-effector) pose in robot base reference frame") - transform_base_to_flange = load_and_assert_affine_matrix(robot_transform_file_path) + base_to_flange_transform = load_and_assert_affine_matrix(robot_transform_file_path) print("Computing camera pose in robot base reference frame") - transform_base_to_camera = np.matmul(transform_base_to_flange, transform_flange_to_camera) + base_to_camera_transform = np.matmul(base_to_flange_transform, flange_to_camera_transform) break @@ -107,7 +107,7 @@ def _main() -> None: print(f"Point coordinates in camera reference frame: {point_in_camera_frame[0:3]}") print("Transforming (picking) point from camera to robot base reference frame") - point_in_base_frame = np.matmul(transform_base_to_camera, point_in_camera_frame) + point_in_base_frame = np.matmul(base_to_camera_transform, point_in_camera_frame) print(f"Point coordinates in robot base reference frame: {point_in_base_frame[0:3]}") @@ -116,7 +116,7 @@ def _main() -> None: if command.lower() == "p": print("Transforming point cloud") - point_cloud.transform(transform_base_to_camera) + point_cloud.transform(base_to_camera_transform) save_file = "ZividGemTransformed.zdf" print(f"Saving point cloud to file: {save_file}") diff --git a/source/applications/advanced/reproject_points.py b/source/applications/advanced/reproject_points.py index 21e19249..35895e73 100644 --- a/source/applications/advanced/reproject_points.py +++ b/source/applications/advanced/reproject_points.py @@ -36,23 +36,23 @@ def _checkerboard_grid() -> List[np.ndarray]: return list(points) -def _transform_grid_to_calibration_board( - grid: List[np.ndarray], transform_camera_to_checkerboard: np.ndarray +def _transform_grid_to_camera_frame( + grid: List[np.ndarray], camera_to_checkerboard_transform: np.ndarray ) -> List[np.ndarray]: """Transform a list of grid points to the camera frame. Args: grid: List of 4D points (X,Y,Z,W) for each corner in the checkerboard, in the checkerboard frame - transform_camera_to_checkerboard: 4x4 transformation matrix + camera_to_checkerboard_transform: 4x4 transformation matrix Returns: List of 3D grid points in the camera frame """ points_in_camera_frame = [] - for point in grid: - transformed_point = transform_camera_to_checkerboard @ point - points_in_camera_frame.append(transformed_point[:3]) + for point_in_checkerboard_frame in grid: + point_in_camera_frame = camera_to_checkerboard_transform @ point_in_checkerboard_frame + points_in_camera_frame.append(point_in_camera_frame[:3]) return points_in_camera_frame @@ -87,17 +87,19 @@ def _main() -> None: raise RuntimeError("Calibration board not detected!") print("Estimating checkerboard pose") - transform_camera_to_checkerboard = detection_result.pose().to_matrix() - print(transform_camera_to_checkerboard) + camera_to_checkerboard_transform = detection_result.pose().to_matrix() + print(camera_to_checkerboard_transform) print("Creating a grid of 7 x 6 points (3D) with 30 mm spacing to match checkerboard corners") - grid = _checkerboard_grid() + grid_points_in_checkerboard_frame = _checkerboard_grid() print("Transforming the grid to the camera frame") - points_in_camera_frame = _transform_grid_to_calibration_board(grid, transform_camera_to_checkerboard) + grid_points_in_camera_frame = _transform_grid_to_camera_frame( + grid_points_in_checkerboard_frame, camera_to_checkerboard_transform + ) print("Getting projector pixels (2D) corresponding to points (3D) in the camera frame") - projector_pixels = zivid.projection.pixels_from_3d_points(camera, points_in_camera_frame) + projector_pixels = zivid.projection.pixels_from_3d_points(camera, grid_points_in_camera_frame) print("Retrieving the projector resolution that the camera supports") projector_resolution = zivid.projection.projector_resolution(camera) diff --git a/source/applications/advanced/robot_guidance/robodk_robot_guidance.py b/source/applications/advanced/robot_guidance/robodk_robot_guidance.py index e5e53c36..9b329fdc 100644 --- a/source/applications/advanced/robot_guidance/robodk_robot_guidance.py +++ b/source/applications/advanced/robot_guidance/robodk_robot_guidance.py @@ -44,7 +44,7 @@ def _options() -> argparse.Namespace: parser.add_argument( "--tool-yaml", required=True, - help="Path to YAML file that contains the tool tip to robot flange transformation matrix", + help="Path to YAML file that contains the tool tip transformation matrix (tool tip in robot flange frame)", ) parser.add_argument( "--hand-eye-yaml", @@ -78,7 +78,7 @@ def _transform_points(points: List[np.ndarray], transform: np.ndarray) -> List[n Args: points: List of 3D points to be transformed - transform: Homogenous transform (4x4) + transform: Transformation matrix (4x4) Returns: List of transformed 3D points @@ -127,13 +127,13 @@ def _zivid_logo_from_grid() -> List[int]: def _generate_tool_poses_from_checkerboard( - camera: zivid.Camera, camera_to_base_transform: np.ndarray + camera: zivid.Camera, base_to_camera_transform: np.ndarray ) -> List[np.ndarray]: """Generate a tool path as a list of poses in the camera frame using the checkerboard. Args: camera: Zivid camera - camera_to_base_transform: Homogenous transform (4x4) from the camera frame to the robot base + base_to_camera_transform: Camera pose in robot base frame (4x4) Raises: RuntimeError: If the calibration board is not detected @@ -146,12 +146,12 @@ def _generate_tool_poses_from_checkerboard( if not detection_result.valid(): raise RuntimeError("Calibration board not detected!") - checkerboard_to_camera_transform = detection_result.pose().to_matrix() - checkerboard_to_base_transform = camera_to_base_transform @ checkerboard_to_camera_transform + camera_to_checkerboard_transform = detection_result.pose().to_matrix() + base_to_checkerboard_transform = base_to_camera_transform @ camera_to_checkerboard_transform - grid_points = _checkerboard_grid() - grid_points_in_base_frame = _transform_points(grid_points, checkerboard_to_base_transform) - grid_poses_in_base_frame = _points_to_poses(grid_points_in_base_frame, checkerboard_to_base_transform[:3, :3]) + grid_points_in_checkerboard_frame = _checkerboard_grid() + grid_points_in_base_frame = _transform_points(grid_points_in_checkerboard_frame, base_to_checkerboard_transform) + grid_poses_in_base_frame = _points_to_poses(grid_points_in_base_frame, base_to_checkerboard_transform[:3, :3]) tool_poses_in_base_frame = [grid_poses_in_base_frame[idx] for idx in _zivid_logo_from_grid()] @@ -242,25 +242,25 @@ def _main() -> None: camera = app.connect_camera() print("Generating tool path from the checkerboard") - tool_to_flange_transform = load_and_assert_affine_matrix(user_options.tool_yaml) - flange_to_base_transform = np.array(robot.Pose()).T + flange_to_tcp_transform = load_and_assert_affine_matrix(user_options.tool_yaml) + base_to_flange_transform = np.array(robot.Pose()).T if user_options.eih: - camera_to_flange_transform = load_and_assert_affine_matrix(user_options.hand_eye_yaml) - camera_to_base_transform = flange_to_base_transform @ camera_to_flange_transform + flange_to_camera_transform = load_and_assert_affine_matrix(user_options.hand_eye_yaml) + base_to_camera_transform = base_to_flange_transform @ flange_to_camera_transform else: - camera_to_base_transform = load_and_assert_affine_matrix(user_options.hand_eye_yaml) + base_to_camera_transform = load_and_assert_affine_matrix(user_options.hand_eye_yaml) - tool_poses_in_robot_base = _generate_tool_poses_from_checkerboard(camera, camera_to_base_transform) - flange_poses_in_robot_base = [ - tool_pose_in_robot_base @ np.linalg.inv(tool_to_flange_transform) - for tool_pose_in_robot_base in tool_poses_in_robot_base + tool_poses_in_base_frame = _generate_tool_poses_from_checkerboard(camera, base_to_camera_transform) + flange_poses_in_base_frame = [ + tool_pose_in_base_frame @ np.linalg.inv(flange_to_tcp_transform) + for tool_pose_in_base_frame in tool_poses_in_base_frame ] print("Displaying the tool path on the checkerboard") tool_poses_in_camera_frame = [ - np.linalg.inv(camera_to_base_transform) @ tool_pose_in_robot_base - for tool_pose_in_robot_base in tool_poses_in_robot_base + np.linalg.inv(base_to_camera_transform) @ tool_pose_in_base_frame + for tool_pose_in_base_frame in tool_poses_in_base_frame ] projector_image = _projected_tool_path(camera, tool_poses_in_camera_frame) @@ -270,9 +270,9 @@ def _main() -> None: if user_options.eih: projected_image_handle.stop() - _approach_target_pose(robot, flange_poses_in_robot_base[0], offset=np.array([0, 0, -100])) + _approach_target_pose(robot, flange_poses_in_base_frame[0], offset=np.array([0, 0, -100])) - _follow_linear_path(robot, flange_poses_in_robot_base) + _follow_linear_path(robot, flange_poses_in_base_frame) robot.MoveJ(robot.JointsHome()) diff --git a/source/applications/advanced/roi_box_via_aruco_marker.py b/source/applications/advanced/roi_box_via_aruco_marker.py index da20958a..bccf4198 100644 --- a/source/applications/advanced/roi_box_via_aruco_marker.py +++ b/source/applications/advanced/roi_box_via_aruco_marker.py @@ -18,7 +18,7 @@ def _transform_points(points: List[np.ndarray], transform: np.ndarray) -> List[n Args: points: list of 3D points to be transformed - transform: homogenous transform (4x4) + transform: homogenous transformation matrix (4x4) Returns: transformed_points: list of transformed 3D points @@ -88,12 +88,12 @@ def _main() -> None: raise RuntimeError("No ArUco markers detected") print("Estimating pose of detected ArUco marker") - transform_camera_to_marker = detection_result.detected_markers()[0].pose.to_matrix() + camera_to_marker_transform = detection_result.detected_markers()[0].pose.to_matrix() print("Transforming the ROI base frame points to the camera frame") roi_points_in_camera_frame = _transform_points( [point_o_in_aruco_frame, point_a_in_aruco_frame, point_b_in_aruco_frame], - transform_camera_to_marker, + camera_to_marker_transform, ) print("Setting the ROI") diff --git a/source/applications/advanced/roi_box_via_checkerboard.py b/source/applications/advanced/roi_box_via_checkerboard.py index 1c4f2dab..5b9f0028 100644 --- a/source/applications/advanced/roi_box_via_checkerboard.py +++ b/source/applications/advanced/roi_box_via_checkerboard.py @@ -18,7 +18,7 @@ def _transform_points(points: List[np.ndarray], transform: np.ndarray) -> List[n Args: points: list of 3D points to be transformed - transform: homogenous transform (4x4) + transform: homogenous transformation matrix (4x4) Returns: transformed_points: list of transformed 3D points @@ -79,12 +79,12 @@ def _main() -> None: print("Detecting and estimating pose of the Zivid checkerboard in the camera frame") detection_result = zivid.calibration.detect_feature_points(original_frame) - transform_camera_to_checkerboard = detection_result.pose().to_matrix() + camera_to_checkerboard_transform = detection_result.pose().to_matrix() print("Transforming the ROI base frame points to the camera frame") roi_points_in_camera_frame = _transform_points( [point_o_in_checkerboard_frame, point_a_in_checkerboard_frame, point_b_in_checkerboard_frame], - transform_camera_to_checkerboard, + camera_to_checkerboard_transform, ) print("Setting the ROI") diff --git a/source/applications/advanced/transform_point_cloud_via_checkerboard.py b/source/applications/advanced/transform_point_cloud_via_checkerboard.py index b561740a..5e374599 100644 --- a/source/applications/advanced/transform_point_cloud_via_checkerboard.py +++ b/source/applications/advanced/transform_point_cloud_via_checkerboard.py @@ -125,7 +125,7 @@ def _get_coordinate_system_points( } -def _draw_coordinae_system(frame: zivid.Frame, checkerboard_pose: np.ndarray, bgra_image: np.ndarray) -> None: +def _draw_coordinate_system(frame: zivid.Frame, checkerboard_pose: np.ndarray, bgra_image: np.ndarray) -> None: """Draw a coordinate system on a BGRA image. Args: @@ -164,24 +164,24 @@ def _main() -> None: print("Detecting and estimating pose of the Zivid checkerboard in the camera frame") detection_result = zivid.calibration.detect_calibration_board(frame) - transform_camera_to_checkerboard = detection_result.pose().to_matrix() - print(transform_camera_to_checkerboard) + camera_to_checkerboard_transform = detection_result.pose().to_matrix() + print(camera_to_checkerboard_transform) print("Camera pose in checkerboard frame:") - transform_checkerboard_to_camera = np.linalg.inv(transform_camera_to_checkerboard) - print(transform_checkerboard_to_camera) + checkerboard_to_camera_transform = np.linalg.inv(camera_to_checkerboard_transform) + print(checkerboard_to_camera_transform) transform_file = Path("CheckerboardToCameraTransform.yaml") print("Saving a YAML file with Inverted checkerboard pose to file: ") - assert_affine_matrix_and_save(transform_checkerboard_to_camera, transform_file) + assert_affine_matrix_and_save(checkerboard_to_camera_transform, transform_file) print("Transforming point cloud from camera frame to Checkerboard frame") - point_cloud.transform(transform_checkerboard_to_camera) + point_cloud.transform(checkerboard_to_camera_transform) print("Converting to OpenCV image format") bgra_image = point_cloud.copy_data("bgra") print("Visualizing checkerboard with coordinate system") - _draw_coordinae_system(frame, transform_camera_to_checkerboard, bgra_image) + _draw_coordinate_system(frame, camera_to_checkerboard_transform, bgra_image) display_bgr(bgra_image, "Checkerboard transformation frame") checkerboard_transformed_file = "CalibrationBoardInCheckerboardOrigin.zdf" diff --git a/source/applications/advanced/verify_hand_eye_calibration/robodk_verify_hand_eye_calibration.py b/source/applications/advanced/verify_hand_eye_calibration/robodk_verify_hand_eye_calibration.py index cc8fa3e6..928a0d26 100644 --- a/source/applications/advanced/verify_hand_eye_calibration/robodk_verify_hand_eye_calibration.py +++ b/source/applications/advanced/verify_hand_eye_calibration/robodk_verify_hand_eye_calibration.py @@ -5,8 +5,8 @@ The robot touches the Zivid calibration object to verify hand-eye calibration. The sample requires as follows: - Type of calibration used (eye-in-hand or eye-to-hand) -- YAML file with Hand-Eye transformation -- YAML file with Pointed Hand-Eye Verification Tool transformation +- YAML file with Hand-Eye transformation matrix +- YAML file with Pointed Hand-Eye Verification Tool transformation matrix - Capture pose target name used in RoboDK - Calibration object used (Zivid calibration board or ArUco marker) @@ -84,20 +84,20 @@ def _estimate_calibration_object_pose(frame: zivid.Frame, user_options: argparse return calibration_object_pose -def _get_robot_base_to_calibration_object_transform( +def _get_base_to_calibration_object_transform( user_options: argparse.Namespace, camera_to_calibration_object_transform: np.ndarray, robot: Item, ) -> np.ndarray: - """Calculating the robot base to the calibration object transform matrix. + """Calculating the robot base to the calibration object transformation matrix. Args: - user_options: Arguments from user that contain the type of hand-eye calibration done and the path to the matrix that resulted from that calibration + user_options: Arguments from user that contain the type of hand-eye calibration done and the path to the transformation matrix that resulted from that calibration camera_to_calibration_object_transform: A 4x4 numpy array containing the calibration object pose in the camera frame robot: Robot item in open RoboDK rdk file Returns: - robot_base_to_calibration_object_transform: A 4x4 numpy array containing the calibration object pose in the robot base frame + base_to_calibration_object_transform: A 4x4 numpy array containing the calibration object pose in the robot base frame Raises: ValueError: If an invalid calibration type is selected @@ -105,22 +105,20 @@ def _get_robot_base_to_calibration_object_transform( """ if user_options.eih: print("Loading current robot pose") - robot_base_to_flange_transform = np.array(robot.Pose()).T + base_to_flange_transform = np.array(robot.Pose()).T flange_to_camera_transform = load_and_assert_affine_matrix(user_options.hand_eye_yaml) - robot_base_to_calibration_object_transform = ( - robot_base_to_flange_transform @ flange_to_camera_transform @ camera_to_calibration_object_transform + base_to_calibration_object_transform = ( + base_to_flange_transform @ flange_to_camera_transform @ camera_to_calibration_object_transform ) elif user_options.eth: - robot_base_to_camera_transform = load_and_assert_affine_matrix(user_options.hand_eye_yaml) + base_to_camera_transform = load_and_assert_affine_matrix(user_options.hand_eye_yaml) - robot_base_to_calibration_object_transform = ( - robot_base_to_camera_transform @ camera_to_calibration_object_transform - ) + base_to_calibration_object_transform = base_to_camera_transform @ camera_to_calibration_object_transform else: raise ValueError("Invalid calibration type. Please choose either eye-in-hand or eye-to-hand.") - return robot_base_to_calibration_object_transform + return base_to_calibration_object_transform def _yes_no_prompt(question: str) -> str: @@ -342,15 +340,15 @@ def _main() -> None: set_robot_speed_and_acceleration(robot, speed=100, joint_speed=100, acceleration=50, joint_acceleration=50) print("Loading the Pointed Hand-Eye Verification Tool transformation matrix from a YAML file") - pointed_hand_eye_verification_tool_matrix = load_and_assert_affine_matrix(user_options.tool_yaml) + tool_base_to_tool_tip_transform = load_and_assert_affine_matrix(user_options.tool_yaml) if user_options.mounts_yaml: print("Loading the on-arm mounts transformation matrix from a YAML file") - flange_to_on_arm_mounts_transform = load_and_assert_affine_matrix(user_options.mounts_yaml) + flange_to_tool_base_transform = load_and_assert_affine_matrix(user_options.mounts_yaml) - tcp = flange_to_on_arm_mounts_transform @ pointed_hand_eye_verification_tool_matrix + flange_to_tcp_transform = flange_to_tool_base_transform @ tool_base_to_tool_tip_transform else: - tcp = pointed_hand_eye_verification_tool_matrix + flange_to_tcp_transform = tool_base_to_tool_tip_transform capture_pose = get_robot_targets(rdk, target_keyword=user_options.target_keyword) if not capture_pose: @@ -368,14 +366,14 @@ def _main() -> None: camera_to_calibration_object_transform = _estimate_calibration_object_pose(frame, user_options) print("Calculating the calibration object pose in robot base frame") - robot_base_to_calibration_object_transform = _get_robot_base_to_calibration_object_transform( + base_to_calibration_object_transform = _get_base_to_calibration_object_transform( user_options, camera_to_calibration_object_transform, robot, ) print("Calculating pose for robot to touch the calibration object") - touch_pose = robot_base_to_calibration_object_transform @ np.linalg.inv(tcp) + touch_pose = base_to_calibration_object_transform @ np.linalg.inv(flange_to_tcp_transform) print("Calculating pose for the robot to approach the calibration object") touch_pose_offset = np.identity(4) diff --git a/source/applications/point_cloud_tutorial.md b/source/applications/point_cloud_tutorial.md index 55bdd945..941ba4d4 100644 --- a/source/applications/point_cloud_tutorial.md +++ b/source/applications/point_cloud_tutorial.md @@ -159,7 +159,7 @@ m](https://support.zivid.com/latest//academy/applications/transform/transform-mi source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/hand_eye_calibration/utilize_hand_eye_calibration.py#L119)) ``` sourceCode python -point_cloud.transform(transform_base_to_camera) +point_cloud.transform(base_to_camera_transform) ``` ## Downsample diff --git a/source/sample_utils/save_load_matrix.py b/source/sample_utils/save_load_matrix.py index b73a5c41..66e2a4c7 100644 --- a/source/sample_utils/save_load_matrix.py +++ b/source/sample_utils/save_load_matrix.py @@ -14,7 +14,7 @@ def assert_affine(matrix: Union[np.ndarray, zivid.Matrix4x4]) -> None: """Ensures that the matrix is affine. Args: - matrix: 4x4 transformation matrix, np.ndarray or zivid.Matrix4x4 + matrix: Transformation matrix (4x4), np.ndarray or zivid.Matrix4x4 Raises: RuntimeError: If matrix is not affine @@ -30,7 +30,7 @@ def assert_affine_matrix_and_save(matrix: Union[np.ndarray, zivid.Matrix4x4], ya """Save transformation matrix to YAML. Args: - matrix: 4x4 transformation matrix, np.ndarray or zivid.Matrix4x4 + matrix: Transformation matrix (4x4), np.ndarray or zivid.Matrix4x4 yaml_path: Path to the YAML file """