diff --git a/README.md b/README.md index 2407322a..700fa89f 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Python samples -This repository contains python code samples for Zivid SDK v2.12.0. For +This repository contains python code samples for Zivid SDK v2.13.1. For tested compatibility with earlier SDK versions, please check out [accompanying releases](https://github.com/zivid/zivid-python-samples/tree/master/../../releases). @@ -60,6 +60,9 @@ from the camera can be used. - [capture\_hdr\_print\_normals](https://github.com/zivid/zivid-python-samples/tree/master/source/camera/advanced/capture_hdr_print_normals.py) - Capture Zivid point clouds, compute normals and print a subset. - **info\_util\_other** + - [automatic\_network\_configuration\_for\_cameras](https://github.com/zivid/zivid-python-samples/tree/master/source/camera/info_util_other/automatic_network_configuration_for_cameras.py) - Automatically set the IP addresses of any number of + cameras to be in the same subnet as the provided IP address + of the network interface. - [camera\_info](https://github.com/zivid/zivid-python-samples/tree/master/source/camera/info_util_other/camera_info.py) - Print version information for Python, zivid-python and Zivid SDK, then list cameras and print camera info and state for each connected camera. @@ -69,6 +72,8 @@ from the camera can be used. - [firmware\_updater](https://github.com/zivid/zivid-python-samples/tree/master/source/camera/info_util_other/firmware_updater.py) - Update firmware on the Zivid camera. - [get\_camera\_intrinsics](https://github.com/zivid/zivid-python-samples/tree/master/source/camera/info_util_other/get_camera_intrinsics.py) - Read intrinsic parameters from the Zivid camera (OpenCV model) or estimate them from the point cloud. + - [network\_configuration](https://github.com/zivid/zivid-python-samples/tree/master/source/camera/info_util_other/network_configuration.py) - Uses Zivid API to change the IP address of the Zivid + camera. - [warmup](https://github.com/zivid/zivid-python-samples/tree/master/source/camera/info_util_other/warmup.py) - A basic warm-up method for a Zivid camera with specified time and capture cycle. - **maintenance** @@ -114,8 +119,16 @@ from the camera can be used. images to find the marker coordinates (2D and 3D). - [reproject\_points](https://github.com/zivid/zivid-python-samples/tree/master/source/applications/advanced/reproject_points.py) - Illuminate checkerboard (Zivid Calibration Board) corners by getting checkerboard pose + - [roi\_box\_via\_aruco\_marker](https://github.com/zivid/zivid-python-samples/tree/master/source/applications/advanced/roi_box_via_aruco_marker.py) - Filter the point cloud based on a ROI box given relative + to the ArUco marker on a Zivid Calibration Board. - [roi\_box\_via\_checkerboard](https://github.com/zivid/zivid-python-samples/tree/master/source/applications/advanced/roi_box_via_checkerboard.py) - Filter the point cloud based on a ROI box given relative to the Zivid Calibration Board. + - [transform\_point\_cloud\_via\_aruco\_marker](https://github.com/zivid/zivid-python-samples/tree/master/source/applications/advanced/transform_point_cloud_via_aruco_marker.py) - Transform a point cloud from camera to ArUco marker + coordinate frame by estimating the marker's pose from the + point cloud. + - [transform\_point\_cloud\_via\_checkerboard](https://github.com/zivid/zivid-python-samples/tree/master/source/applications/advanced/transform_point_cloud_via_checkerboard.py) - Transform a point cloud from camera to checkerboard (Zivid + Calibration Board) coordinate frame by getting checkerboard + pose from the API. - **hand\_eye\_calibration** - [pose\_conversions](https://github.com/zivid/zivid-python-samples/tree/master/source/applications/advanced/hand_eye_calibration/pose_conversions.py) - Convert to/from Transformation Matrix (Rotation Matrix + Translation Vector). @@ -134,7 +147,8 @@ from the camera can be used. - [display](https://github.com/zivid/zivid-python-samples/tree/master/source/sample_utils/display.py) - Display relevant data for Zivid Samples. - [paths](https://github.com/zivid/zivid-python-samples/tree/master/source/sample_utils/paths.py) - Get relevant paths for Zivid Samples. - [robodk\_tools](https://github.com/zivid/zivid-python-samples/tree/master/source/sample_utils/robodk_tools.py) - Robot Control Module - - [save\_load\_matrix](https://github.com/zivid/zivid-python-samples/tree/master/source/sample_utils/save_load_matrix.py) - try: + - [save\_load\_matrix](https://github.com/zivid/zivid-python-samples/tree/master/source/sample_utils/save_load_matrix.py) - Save and load Zivid 4x4 transformation matrices from and to + YAML files. - [white\_balance\_calibration](https://github.com/zivid/zivid-python-samples/tree/master/source/sample_utils/white_balance_calibration.py) - Balance color for 2D capture using white surface as reference. - **applications** - **advanced** diff --git a/continuous-integration/setup.sh b/continuous-integration/setup.sh index 95512490..2713fc6f 100755 --- a/continuous-integration/setup.sh +++ b/continuous-integration/setup.sh @@ -28,7 +28,7 @@ function install_www_deb { rm -r $TMP_DIR || exit } -install_www_deb "https://downloads.zivid.com/sdk/releases/2.12.0+6afd4961-1/u${VERSION_ID:0:2}/zivid_2.12.0+6afd4961-1_amd64.deb" || exit +install_www_deb "https://downloads.zivid.com/sdk/releases/2.13.1+18e79e79-1/u${VERSION_ID:0:2}/zivid_2.13.1+18e79e79-1_amd64.deb" || exit python3 -m pip install --upgrade pip || exit python3 -m pip install --requirement "$ROOT_DIR/requirements.txt" || exit diff --git a/source/applications/advanced/auto_2d_settings.py b/source/applications/advanced/auto_2d_settings.py index b90db00f..652cc946 100644 --- a/source/applications/advanced/auto_2d_settings.py +++ b/source/applications/advanced/auto_2d_settings.py @@ -16,8 +16,6 @@ first. If you want to use your own white reference (white wall, piece of paper, etc.) instead of using the calibration board, you can provide your own mask in _main(). Then you will have to specify the lower limit for f-number yourself. -Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice. - """ import argparse @@ -29,7 +27,6 @@ import matplotlib.pyplot as plt import numpy as np import zivid -import zivid.experimental.calibration from sample_utils.calibration_board_utils import find_white_mask_from_checkerboard from sample_utils.white_balance_calibration import compute_mean_rgb_from_mask, white_balance_calibration @@ -152,7 +149,7 @@ def _find_white_mask_and_distance_to_checkerboard(camera: zivid.Camera) -> Tuple settings = _capture_assistant_settings(camera) frame = camera.capture(settings) - checkerboard_pose = zivid.experimental.calibration.detect_feature_points(frame).pose().to_matrix() + checkerboard_pose = zivid.calibration.detect_calibration_board(frame).pose().to_matrix() distance_to_checkerboard = checkerboard_pose[2, 3] rgb = frame.point_cloud().copy_data("rgba")[:, :, :3] @@ -520,10 +517,10 @@ def _print_poor_pixel_distribution(rgb: np.ndarray) -> None: black_and = np.sum(np.logical_and(np.logical_and(rgb[:, :, 0] == 0, rgb[:, :, 1] == 0), rgb[:, :, 2] == 0)) print("Distribution of saturated (255) and black (0) pixels with final settings:") - print(f"Saturated pixels (at least one channel): {saturated_or}\t ({100*saturated_or/total_num_pixels:.2f}%)") - print(f"Saturated pixels (all channels):\t {saturated_and}\t ({100*saturated_and/total_num_pixels:.2f}%)") - print(f"Black pixels (at least one channel):\t {black_or}\t ({100*black_or/total_num_pixels:.2f}%)") - print(f"Black pixels (all channels):\t\t {black_and}\t ({100*black_and/total_num_pixels:.2f}%)") + print(f"Saturated pixels (at least one channel): {saturated_or}\t ({100 * saturated_or / total_num_pixels:.2f}%)") + print(f"Saturated pixels (all channels):\t {saturated_and}\t ({100 * saturated_and / total_num_pixels:.2f}%)") + print(f"Black pixels (at least one channel):\t {black_or}\t ({100 * black_or / total_num_pixels:.2f}%)") + print(f"Black pixels (all channels):\t\t {black_and}\t ({100 * black_and / total_num_pixels:.2f}%)") def _plot_image_with_histogram(rgb: np.ndarray, settings_2d: zivid.Settings2D) -> None: diff --git a/source/applications/advanced/get_checkerboard_pose_from_zdf.py b/source/applications/advanced/get_checkerboard_pose_from_zdf.py index e4fce1a5..67c4a44d 100644 --- a/source/applications/advanced/get_checkerboard_pose_from_zdf.py +++ b/source/applications/advanced/get_checkerboard_pose_from_zdf.py @@ -5,8 +5,6 @@ The checkerboard point cloud is also visualized with a coordinate system. The ZDF file for this sample can be found under the main instructions for Zivid samples. -Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice. - """ from pathlib import Path @@ -14,7 +12,6 @@ import numpy as np import open3d as o3d import zivid -import zivid.experimental.calibration from sample_utils.paths import get_sample_data_path from sample_utils.save_load_matrix import assert_affine_matrix_and_save @@ -35,8 +32,8 @@ def _create_open3d_point_cloud(point_cloud: zivid.PointCloud) -> o3d.geometry.Po xyz = np.nan_to_num(xyz).reshape(-1, 3) rgb = rgba[:, :, 0:3].reshape(-1, 3) - point_cloud_open3d = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(xyz)) - point_cloud_open3d.colors = o3d.utility.Vector3dVector(rgb / 255) + point_cloud_open3d = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(xyz.astype(np.float64))) + point_cloud_open3d.colors = o3d.utility.Vector3dVector(rgb.astype(np.float64) / 255) refined_point_cloud_open3d = o3d.geometry.PointCloud.remove_non_finite_points( point_cloud_open3d, remove_nan=True, remove_infinite=True @@ -75,9 +72,7 @@ def _main() -> None: point_cloud = frame.point_cloud() print("Detecting checkerboard and estimating its pose in camera frame") - transform_camera_to_checkerboard = ( - zivid.experimental.calibration.detect_feature_points(frame).pose().to_matrix() - ) + transform_camera_to_checkerboard = zivid.calibration.detect_calibration_board(frame).pose().to_matrix() print(f"Camera pose in checkerboard frame:\n{transform_camera_to_checkerboard}") transform_file_name = "CameraToCheckerboardTransform.yaml" diff --git a/source/applications/advanced/hand_eye_calibration/hand_eye_calibration.py b/source/applications/advanced/hand_eye_calibration/hand_eye_calibration.py index 3c3c5db3..a6e35655 100644 --- a/source/applications/advanced/hand_eye_calibration/hand_eye_calibration.py +++ b/source/applications/advanced/hand_eye_calibration/hand_eye_calibration.py @@ -1,17 +1,14 @@ """ Perform Hand-Eye calibration. -Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice. - """ import datetime from pathlib import Path -from typing import List +from typing import List, Tuple import numpy as np import zivid -import zivid.experimental.calibration from sample_utils.save_load_matrix import assert_affine_matrix_and_save @@ -26,7 +23,7 @@ def _enter_robot_pose(index: int) -> zivid.calibration.Pose: """ inputted = input( - f"Enter pose with id={index} (a line with 16 space separated values describing 4x4 row-major matrix):" + f"Enter pose with id={index} (a line with 16 space separated values describing 4x4 row-major matrix): " ) elements = inputted.split(maxsplit=15) data = np.array(elements, dtype=np.float64).reshape((4, 4)) @@ -46,14 +43,14 @@ def _perform_calibration(hand_eye_input: List[zivid.calibration.HandEyeInput]) - """ while True: - calibration_type = input("Enter type of calibration, eth (for eye-to-hand) or eih (for eye-in-hand):").strip() + calibration_type = input("Enter type of calibration, eth (for eye-to-hand) or eih (for eye-in-hand): ").strip() if calibration_type.lower() == "eth": - print("Performing eye-to-hand calibration") + print(f"Performing eye-to-hand calibration with {len(hand_eye_input)} dataset pairs") print("The resulting transform is the camera pose in robot base frame") hand_eye_output = zivid.calibration.calibrate_eye_to_hand(hand_eye_input) return hand_eye_output if calibration_type.lower() == "eih": - print("Performing eye-in-hand calibration") + print(f"Performing eye-in-hand calibration with {len(hand_eye_input)} dataset pairs") print("The resulting transform is the camera pose in flange (end-effector) frame") hand_eye_output = zivid.calibration.calibrate_eye_in_hand(hand_eye_input) return hand_eye_output @@ -78,6 +75,58 @@ def _assisted_capture(camera: zivid.Camera) -> zivid.Frame: return camera.capture(settings) +def _handle_add_pose( + current_pose_id: int, hand_eye_input: List, camera: zivid.Camera, calibration_object: str +) -> Tuple[int, List]: + """Acquire frame with capture assistant. + + Args: + current_pose_id: Counter of the current pose in the hand-eye calibration dataset + hand_eye_input: List of hand-eye calibration dataset pairs (poses and point clouds) + camera: Zivid camera + calibration_object: m (for ArUco marker(s)) or c (for Zivid checkerboard) + + Returns: + Tuple[int, List]: Updated current_pose_id and hand_eye_input + + """ + + robot_pose = _enter_robot_pose(current_pose_id) + + print("Detecting calibration object in point cloud") + + if calibration_object == "c": + + frame = zivid.calibration.capture_calibration_board(camera) + detection_result = zivid.calibration.detect_calibration_board(frame) + + if detection_result.valid(): + print("Calibration board detected") + hand_eye_input.append(zivid.calibration.HandEyeInput(robot_pose, detection_result)) + current_pose_id += 1 + else: + print("Failed to detect calibration board, ensure that the entire board is in the view of the camera") + elif calibration_object == "m": + + frame = _assisted_capture(camera) + + marker_dictionary = zivid.calibration.MarkerDictionary.aruco4x4_50 + marker_ids = [1, 2, 3] + + print(f"Detecting arUco marker IDs {marker_ids} from the dictionary {marker_dictionary}") + detection_result = zivid.calibration.detect_markers(frame, marker_ids, marker_dictionary) + + if detection_result.valid(): + print(f"ArUco marker(s) detected: {len(detection_result.detected_markers())}") + hand_eye_input.append(zivid.calibration.HandEyeInput(robot_pose, detection_result)) + current_pose_id += 1 + else: + print( + "Failed to detect any ArUco markers, ensure that at least one ArUco marker is in the view of the camera" + ) + return current_pose_id, hand_eye_input + + def _main() -> None: app = zivid.Application() @@ -88,6 +137,13 @@ def _main() -> None: hand_eye_input = [] calibrate = False + while True: + calibration_object = input( + "Enter calibration object you are using, m (for ArUco marker(s)) or c (for Zivid checkerboard): " + ).strip() + if calibration_object.lower() == "m" or calibration_object.lower() == "c": + break + print( "Zivid primarily operates with a (4x4) transformation matrix. To convert\n" "from axis-angle, rotation vector, roll-pitch-yaw, or quaternion, check out\n" @@ -95,24 +151,12 @@ def _main() -> None: ) while not calibrate: - command = input("Enter command, p (to add robot pose) or c (to perform calibration):").strip() + command = input("Enter command, p (to add robot pose) or c (to perform calibration): ").strip() if command == "p": try: - robot_pose = _enter_robot_pose(current_pose_id) - - frame = _assisted_capture(camera) - - print("Detecting checkerboard in point cloud") - detection_result = zivid.experimental.calibration.detect_feature_points(frame) - - if detection_result.valid(): - print("Calibration board detected") - hand_eye_input.append(zivid.calibration.HandEyeInput(robot_pose, detection_result)) - current_pose_id += 1 - else: - print( - "Failed to detect calibration board, ensure that the entire board is in the view of the camera" - ) + current_pose_id, hand_eye_input = _handle_add_pose( + current_pose_id, hand_eye_input, camera, calibration_object + ) except ValueError as ex: print(ex) elif command == "c": diff --git a/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robodk_hand_eye_calibration.py b/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robodk_hand_eye_calibration.py index 60ab2d9f..60e416ba 100644 --- a/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robodk_hand_eye_calibration.py +++ b/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robodk_hand_eye_calibration.py @@ -14,8 +14,6 @@ https://support.zivid.com/latest/academy/applications/hand-eye/hand-eye-calibration-process.html Make sure to launch your RDK file and connect to robot through Robodk before running this script. -Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice. - """ import argparse @@ -27,7 +25,6 @@ import cv2 import numpy as np import zivid -import zivid.experimental.calibration from robodk.robolink import Item from sample_utils.robodk_tools import connect_to_robot, get_robot_targets, set_robot_speed_and_acceleration from sample_utils.save_load_matrix import assert_affine_matrix_and_save, load_and_assert_affine_matrix @@ -63,7 +60,7 @@ def _get_frame_and_transform(robot: Item, camera: zivid.Camera) -> Tuple[zivid.F 4x4 transformation matrix """ - frame = assisted_capture(camera) + frame = _assisted_capture(camera) transform = np.array(robot.Pose()).T return frame, transform @@ -88,19 +85,25 @@ def _save_point_cloud_and_pose( assert_affine_matrix_and_save(transform, save_directory / f"pos{image_and_pose_iterator:02d}.yaml") -def _verify_good_capture(frame: zivid.Frame) -> None: - """Verify that checkerboard feature points are detected in the frame. +def _verify_good_capture(frame: zivid.Frame, user_options: argparse.Namespace) -> None: + """Verify that calibration object feature points are detected in the frame. Args: frame: Zivid frame containing point cloud + user_options: Input arguments Raises: RuntimeError: If no feature points are detected in frame """ - detected_features = zivid.experimental.calibration.detect_feature_points(frame) - if not detected_features.valid(): - raise RuntimeError("Failed to detect feature points from captured frame.") + if user_options.calibration_object == "checkerboard": + detected_features = zivid.calibration.detect_calibration_board(frame) + if not detected_features.valid(): + raise RuntimeError("Failed to detect Zivid checkerboard from captured frame.") + if user_options.calibration_object == "marker": + detected_features = zivid.calibration.detect_markers(frame, user_options.ids, user_options.dictionary) + if not detected_features.valid(): + raise RuntimeError("Failed to detect any ArUco markers from captured frame.") def _capture_one_frame_and_robot_pose( @@ -109,6 +112,7 @@ def _capture_one_frame_and_robot_pose( save_directory: Path, image_and_pose_iterator: int, next_target: Item, + user_options: argparse.Namespace, ) -> None: """Captures and saves point cloud at a given robot pose, saves robot pose, then signals the robot to move to the next pose. @@ -119,6 +123,7 @@ def _capture_one_frame_and_robot_pose( save_directory: Path to where data will be saved image_and_pose_iterator: Counter for point cloud and pose acquisition sequence next_target: Next pose the robot should move to in sequence + user_options: Input arguments """ robot.WaitMove() @@ -129,11 +134,11 @@ def _capture_one_frame_and_robot_pose( # Verifying capture from previous pose while moving to new pose if next_target is not None: robot.MoveJ(next_target, blocking=False) - _verify_good_capture(frame) + _verify_good_capture(frame, user_options) print(f"Image and pose #{image_and_pose_iterator} saved") -def save_hand_eye_results(save_directory: Path, transform: np.ndarray, residuals: List) -> None: +def _save_hand_eye_results(save_directory: Path, transform: np.ndarray, residuals: List) -> None: """Save transformation and residuals to directory. Args: @@ -157,7 +162,9 @@ def save_hand_eye_results(save_directory: Path, transform: np.ndarray, residuals file_storage_residuals.release() -def generate_hand_eye_dataset(app: zivid.application, robot: Item, targets: List) -> Path: +def generate_hand_eye_dataset( + app: zivid.application, robot: Item, targets: List, user_options: argparse.Namespace +) -> Path: """Generate dataset of pairs of robot poses and point clouds containing calibration target. This dataset is composed of pairs of YML and ZDF files respectively. @@ -165,6 +172,7 @@ def generate_hand_eye_dataset(app: zivid.application, robot: Item, targets: List app: Zivid application instance robot: Robot item in open RoboDK rdk file targets: List of roboDK targets (poses) + user_options: Input arguments Returns: Path: Save_directory for where data will be saved @@ -176,13 +184,14 @@ def generate_hand_eye_dataset(app: zivid.application, robot: Item, targets: List save_directory = _generate_directory() image_and_pose_iterator = 1 while not image_and_pose_iterator > num_targets: - print(f"Capturing calibration object at robot pose {num_targets-len(targets)}") + print(f"Capturing calibration object at robot pose {num_targets - len(targets)}") _capture_one_frame_and_robot_pose( robot, camera, save_directory, image_and_pose_iterator, targets.pop(0) if targets else None, + user_options, ) image_and_pose_iterator += 1 @@ -191,7 +200,7 @@ def generate_hand_eye_dataset(app: zivid.application, robot: Item, targets: List return save_directory -def assisted_capture(camera: zivid.Camera, max_time_milliseconds: int = 800) -> zivid.Frame: +def _assisted_capture(camera: zivid.Camera, max_time_milliseconds: int = 800) -> zivid.Frame: """Capturing image with Zivid camera using assisted capture settings. Args: @@ -213,12 +222,14 @@ def assisted_capture(camera: zivid.Camera, max_time_milliseconds: int = 800) -> def perform_hand_eye_calibration( calibration_type: str, dataset_directory: Path, + user_options: argparse.Namespace, ) -> Tuple[np.ndarray, List[zivid.calibration.HandEyeResidual]]: """Perform hand-eye calibration based on calibration type. Args: calibration_type: Calibration type, eye-in-hand or eye-to-hand dataset_directory: Path to dataset + user_options: Input arguments Returns: transform: 4x4 transformation matrix @@ -238,7 +249,10 @@ def perform_hand_eye_calibration( if frame_file_path.is_file() and pose_file_path.is_file(): print(f"Detect feature points from img{pose_and_image_iterator:02d}.zdf") frame = zivid.Frame(frame_file_path) - detected_features = zivid.experimental.calibration.detect_feature_points(frame) + if user_options.calibration_object == "checkerboard": + detected_features = zivid.calibration.detect_calibration_board(frame) + else: + detected_features = zivid.calibration.detect_markers(frame, user_options.ids, user_options.dictionary) print(f"Read robot pose from pos{pose_and_image_iterator:02d}.yaml") transform = load_and_assert_affine_matrix(pose_file_path) @@ -279,6 +293,9 @@ def options() -> argparse.Namespace: eih or eth: eye-in-hand or eye-to-hand ip: IP address of the robot controller target_keyword: The common name of the targets (poses) in RoboDK station that will be used for the hand-eye dataset + calibration:object: marker (for ArUco marker(s)) or checkerboard (for Zivid checkerboard) + dictionary: ArUco marker dictionary + ids: List of ArUco marker IDs """ parser = argparse.ArgumentParser(description=__doc__) @@ -286,13 +303,23 @@ def options() -> argparse.Namespace: type_group.add_argument("--eih", "--eye-in-hand", action="store_true", help="eye-in-hand calibration") type_group.add_argument("--eth", "--eye-to-hand", action="store_true", help="eye-to-hand calibration") - parser.add_argument("--ip", required=True, help="IP address of the robot controller") parser.add_argument( "--target-keyword", required=True, help='This is the keyword shared for naming all poses used for hand-eye dataset, i.e. if we have: "Target 1", "Target 2", ... , "Target N". Then we should use "Target"', ) + subparsers = parser.add_subparsers(dest="calibration_object", required=True, help="Calibration object type") + subparsers.add_parser("checkerboard", help="Use checkerboard for calibration") + marker_parser = subparsers.add_parser("marker", help="Use marker for calibration") + marker_parser.add_argument( + "--dictionary", + required=True, + choices=list(zivid.calibration.MarkerDictionary.valid_values()), + help="Dictionary used for marker calibration", + ) + marker_parser.add_argument("--ids", nargs="+", required=True, type=int, help="IDs used for marker calibration") + return parser.parse_args() @@ -306,17 +333,16 @@ def _main() -> None: targets = get_robot_targets(rdk, user_options.target_keyword) # NOTE! Verify safe operation speeds and accelerations for your robot - robot_speed_accel_limits = [100, 100, 50, 50] - set_robot_speed_and_acceleration(robot, *robot_speed_accel_limits) + set_robot_speed_and_acceleration(robot, speed=100, joint_speed=100, acceleration=50, joint_acceleration=50) - dataset_dir = generate_hand_eye_dataset(app, robot, targets) + dataset_dir = generate_hand_eye_dataset(app, robot, targets, user_options) if user_options.eih: - transform, residuals = perform_hand_eye_calibration("eye-in-hand", dataset_dir) + transform, residuals = perform_hand_eye_calibration("eye-in-hand", dataset_dir, user_options) else: - transform, residuals = perform_hand_eye_calibration("eye-to-hand", dataset_dir) + transform, residuals = perform_hand_eye_calibration("eye-to-hand", dataset_dir, user_options) - save_hand_eye_results(dataset_dir, transform, residuals) + _save_hand_eye_results(dataset_dir, transform, residuals) if __name__ == "__main__": diff --git a/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/universal_robots_perform_hand_eye_calibration.py b/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/universal_robots_perform_hand_eye_calibration.py index 45529212..6add2c60 100644 --- a/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/universal_robots_perform_hand_eye_calibration.py +++ b/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/universal_robots_perform_hand_eye_calibration.py @@ -15,8 +15,6 @@ Further explanation of this sample is found in our knowledge base: https://support.zivid.com/latest/academy/applications/hand-eye/ur5-robot-%2B-python-generate-dataset-and-perform-hand-eye-calibration.html -Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice. - """ import argparse @@ -28,7 +26,6 @@ import cv2 import numpy as np import zivid -import zivid.experimental.calibration from rtde import rtde, rtde_config from sample_utils.save_load_matrix import assert_affine_matrix_and_save, load_and_assert_affine_matrix from scipy.spatial.transform import Rotation @@ -47,6 +44,17 @@ def _options() -> argparse.Namespace: mode_group.add_argument("--eth", "--eye-to-hand", action="store_true", help="eye-to-hand calibration") parser.add_argument("--ip", required=True, help="IP address to robot") + subparsers = parser.add_subparsers(dest="calibration_object", required=True, help="Calibration object type") + subparsers.add_parser("checkerboard", help="Use checkerboard for calibration") + marker_parser = subparsers.add_parser("marker", help="Use marker for calibration") + marker_parser.add_argument( + "--dictionary", + required=True, + choices=list(zivid.calibration.MarkerDictionary.valid_values()), + help="Dictionary used for marker calibration", + ) + marker_parser.add_argument("--ids", nargs="+", required=True, type=int, help="IDs used for marker calibration") + return parser.parse_args() @@ -259,30 +267,35 @@ def _ready_for_capture(robot_state: rtde.serialize.DataObject) -> bool: return robot_state.output_bit_register_64 -def _verify_good_capture(frame: zivid.Frame) -> None: - """Verify that checkerboard feature-points are detected in the frame. +def _verify_good_capture(frame: zivid.Frame, user_options: argparse.Namespace) -> None: + """Verify that calibration object feature points are detected in the frame. Args: frame: Zivid frame containing point cloud + user_options: Input arguments Raises: RuntimeError: If no feature points are detected in frame """ - detection_result = zivid.experimental.calibration.detect_feature_points(frame) - - if not detection_result.valid(): - raise RuntimeError("Failed to detect feature points from captured frame.") + if user_options.calibration_object == "checkerboard": + detected_features = zivid.calibration.detect_calibration_board(frame) + if not detected_features.valid(): + raise RuntimeError("Failed to detect Zivid checkerboard from captured frame.") + if user_options.calibration_object == "marker": + detected_features = zivid.calibration.detect_markers(frame, user_options.ids, user_options.dictionary) + if not detected_features.valid(): + raise RuntimeError("Failed to detect feature any ArUco markers from captured frame.") def _capture_one_frame_and_robot_pose( con: rtde.RTDE, camera: zivid.Camera, - settings: zivid.Settings, save_dir: Path, input_data: rtde.serialize.DataObject, image_num: int, ready_to_capture: bool, + user_options: argparse.Namespace, ) -> None: """Capture 3D image and robot pose for a given robot posture, then signals robot to move to next posture. @@ -290,15 +303,16 @@ def _capture_one_frame_and_robot_pose( Args: con: Connection between computer and robot camera: Zivid camera - settings: Zivid settings save_dir: Path to where data will be saved input_data: Input package containing the specific input data registers image_num: Image number ready_to_capture: Boolean value to robot_state that camera is ready to capture images + user_options: Input arguments """ + settings = _camera_settings(camera) frame, transform = _get_frame_and_transform_matrix(con, camera, settings) - _verify_good_capture(frame) + _verify_good_capture(frame, user_options) # Signal robot to move to next position, then set signal to low again. _write_robot_state(con, input_data, finish_capture=True, camera_ready=ready_to_capture) @@ -308,20 +322,22 @@ def _capture_one_frame_and_robot_pose( print("Image and pose saved") -def _generate_dataset(app: zivid.Application, con: rtde.RTDE, input_data: rtde.serialize.DataObject) -> Path: +def _generate_dataset( + app: zivid.Application, con: rtde.RTDE, input_data: rtde.serialize.DataObject, user_options: argparse.Namespace +) -> Path: """Generate dataset based on predefined robot poses. Args: app: Zivid application instance con: Connection between computer and robot input_data: Input package containing the specific input data registers + user_options: Input arguments Returns: Path: Save_dir to where dataset is saved """ with app.connect_camera() as camera: - settings = _camera_settings(camera) save_dir = _generate_folder() # Signal robot that camera is ready @@ -345,11 +361,11 @@ def _generate_dataset(app: zivid.Application, con: rtde.RTDE, input_data: rtde.s _capture_one_frame_and_robot_pose( con, camera, - settings, save_dir, input_data, images_captured, ready_to_capture, + user_options, ) images_captured += 1 @@ -393,7 +409,7 @@ def perform_hand_eye_calibration( if frame_file_path.is_file() and pose_file_path.is_file(): print(f"Detect feature points from img{idata:02d}.zdf") frame = zivid.Frame(frame_file_path) - detection_result = zivid.experimental.calibration.detect_feature_points(frame) + detection_result = zivid.calibration.detect_calibration_board(frame) if not detection_result.valid(): raise RuntimeError(f"Failed to detect feature points from frame {frame_file_path}") @@ -440,7 +456,7 @@ def _main() -> None: con, input_data = _initialize_robot_sync(robot_ip_address) con.send_start() - dataset_dir = _generate_dataset(app, con, input_data) + dataset_dir = _generate_dataset(app, con, input_data, user_options) if user_options.eih: transform, residuals = perform_hand_eye_calibration("eye-in-hand", dataset_dir) diff --git a/source/applications/advanced/hand_eye_calibration/verify_hand_eye_with_visualization.py b/source/applications/advanced/hand_eye_calibration/verify_hand_eye_with_visualization.py index 8c65f121..47f472e6 100644 --- a/source/applications/advanced/hand_eye_calibration/verify_hand_eye_with_visualization.py +++ b/source/applications/advanced/hand_eye_calibration/verify_hand_eye_with_visualization.py @@ -10,15 +10,13 @@ - eye-to-hand: The point clouds are transformed to the robot end-effector frame - eye-in-hand: The point clouds are transformed to the robot base frame -The Hand-Eye calibration is good if the visualized checkerboards overlap +The Hand-Eye calibration is good if the visualized calibration objects overlap accurately. Tip: This sample saves the point clouds in PLY format in the same directory where this sample is stored. You can open the PLY point clouds in MeshLab for visual inspection. -Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice. - """ import argparse @@ -28,23 +26,37 @@ import numpy as np import open3d as o3d import zivid -import zivid.experimental.calibration from sample_utils.save_load_matrix import load_and_assert_affine_matrix -def _filter_checkerboard_roi(xyz: np.ndarray, centroid: np.ndarray) -> np.ndarray: - """Filters out the data outside the region of interest defined by the checkerboard centroid. +def _filter_calibration_object_roi(frame: zivid.Frame, args: argparse.Namespace) -> np.ndarray: + """Filters out the data outside the region of interest defined by the calibration_object centroid. Args: - xyz: A numpy array of X, Y and Z point cloud coordinates - centroid: A numpy array of X, Y and Z checkerboard centroid coordinates + frame: Zivid frame + args: Input arguments Returns: xyz: A numpy array of X, Y and Z point cloud coordinates within the region of interest """ - # the longest distance from the checkerboard centroid to the calibration board corner is < 245 mm - radius_threshold = 245 + + xyz = frame.point_cloud().copy_data("xyz") + + if args.calibration_object == "checkerboard": + # Finding Cartesian coordinates of the checkerboard center point + detection_result = zivid.calibration.detect_calibration_board(frame) + centroid = detection_result.centroid() + # the longest distance from the checkerboard centroid to the calibration board corner is < 245 mm + radius_threshold = 245 + else: + # Finding Cartesian coordinates of the ArUco marker center point + detection_result = zivid.calibration.detect_markers(frame, args.id, args.dictionary) + pose = detection_result.detected_markers()[0].pose.to_matrix() + translation = pose[:3, 3] + centroid = translation.flatten() + radius_threshold = args.size * np.sqrt(2) / 2 + radius = np.linalg.norm(centroid - xyz, axis=2) xyz[radius > radius_threshold] = np.NaN @@ -65,6 +77,21 @@ def _options() -> argparse.Namespace: required=True, help="Path to the Hand-Eye dataset", ) + + subparsers = parser.add_subparsers(dest="calibration_object", required=True, help="Calibration object type") + subparsers.add_parser("checkerboard", help="Verify using Zivid calibration board") + marker_parser = subparsers.add_parser("marker", help="Verify using ArUco marker") + marker_parser.add_argument( + "--dictionary", + required=True, + choices=list(zivid.calibration.MarkerDictionary.valid_values()), + help="Dictionary of the targeted ArUco marker", + ) + marker_parser.add_argument( + "--id", nargs=1, required=True, type=int, help="ID of ArUco marker to be used for verification" + ) + marker_parser.add_argument("--size", required=True, type=float, help="ArUco marker size in mm") + return parser.parse_args() @@ -83,8 +110,8 @@ def _create_open3d_point_cloud(rgba: np.ndarray, xyz: np.ndarray) -> o3d.geometr xyz = xyz.reshape(-1, 3) rgb = rgba[:, :, 0:3].reshape(-1, 3) - point_cloud_open3d = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(xyz)) - point_cloud_open3d.colors = o3d.utility.Vector3dVector(rgb / 255) + point_cloud_open3d = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(xyz.astype(np.float64))) + point_cloud_open3d.colors = o3d.utility.Vector3dVector(rgb.astype(np.float64) / 255) refined_point_cloud_open3d = o3d.geometry.PointCloud.remove_non_finite_points( point_cloud_open3d, remove_nan=True, remove_infinite=True @@ -120,7 +147,7 @@ def _path_list_creator( file_path = path / f"{file_prefix_name}{str(num).zfill(number_of_digits_zfill)}{file_suffix_name}" list_of_paths.append(file_path) - next_file_path = path / f"{file_prefix_name}{str(num+1).zfill(number_of_digits_zfill)}{file_suffix_name}" + next_file_path = path / f"{file_prefix_name}{str(num + 1).zfill(number_of_digits_zfill)}{file_suffix_name}" if not next_file_path.exists(): return list_of_paths @@ -130,6 +157,8 @@ def _path_list_creator( def _main() -> None: with zivid.Application(): + args = _options() + while True: robot_camera_configuration = input( "Enter type of calibration, eth (for eye-to-hand) or eih (for eye-in-hand):" @@ -138,11 +167,8 @@ def _main() -> None: break print("Entered unknown Hand-Eye calibration type") - args = _options() path = args.input_path - list_of_paths_to_hand_eye_dataset_point_clouds = _path_list_creator(path, "img", 2, ".zdf") - list_of_paths_to_hand_eye_dataset_robot_poses = _path_list_creator(path, "pos", 2, ".yaml") if len(list_of_paths_to_hand_eye_dataset_robot_poses) != len(list_of_paths_to_hand_eye_dataset_point_clouds): @@ -162,7 +188,9 @@ def _main() -> None: list_of_open_3d_point_clouds = [] for data_pair_id in range(number_of_dataset_pairs): # Updating the user about the process status through the terminal - print(f"{data_pair_id} / {number_of_dataset_pairs} - {100*data_pair_id / number_of_dataset_pairs}%") + print( + f"{data_pair_id} / {number_of_dataset_pairs} - {(100 * data_pair_id / number_of_dataset_pairs):.2f}%" + ) # Reading point cloud from file frame = zivid.Frame(list_of_paths_to_hand_eye_dataset_point_clouds[data_pair_id]) @@ -178,24 +206,17 @@ def _main() -> None: if robot_camera_configuration.lower() == "eih": frame.point_cloud().transform(np.matmul(robot_pose, hand_eye_transform)) - xyz = frame.point_cloud().copy_data("xyz") - rgba = frame.point_cloud().copy_data("rgba") - - # Finding Cartesian coordinates of the checkerboard center point - detection_result = zivid.experimental.calibration.detect_feature_points(frame) - - if detection_result.valid(): - # Extracting the points within the ROI (checkerboard) - xyz_filtered = _filter_checkerboard_roi(xyz, detection_result.centroid()) + # Extracting the points within the ROI (calibration object) + xyz_filtered = _filter_calibration_object_roi(frame, args) - # Converting from NumPy array to Open3D format - point_cloud_open3d = _create_open3d_point_cloud(rgba, xyz_filtered) + # Converting from NumPy array to Open3D format + point_cloud_open3d = _create_open3d_point_cloud(frame.point_cloud().copy_data("rgba"), xyz_filtered) - # Saving point cloud to PLY file - o3d.io.write_point_cloud(f"img{data_pair_id + 1}.ply", point_cloud_open3d) + # Saving point cloud to PLY file + o3d.io.write_point_cloud(f"img{data_pair_id + 1}.ply", point_cloud_open3d) - # Appending the Open3D point cloud to a list for visualization - list_of_open_3d_point_clouds.append(point_cloud_open3d) + # Appending the Open3D point cloud to a list for visualization + list_of_open_3d_point_clouds.append(point_cloud_open3d) print(f"{number_of_dataset_pairs} / {number_of_dataset_pairs} - 100.0%") print("\nAll done!\n") diff --git a/source/applications/advanced/reproject_points.py b/source/applications/advanced/reproject_points.py index 56643e05..21e19249 100644 --- a/source/applications/advanced/reproject_points.py +++ b/source/applications/advanced/reproject_points.py @@ -7,8 +7,6 @@ The projector pixel coordinates are then used to draw markers at the correct locations before displaying the image using the projector. -Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice. - """ from datetime import timedelta @@ -17,7 +15,6 @@ import cv2 import numpy as np import zivid -import zivid.experimental.calibration def _checkerboard_grid() -> List[np.ndarray]: @@ -85,7 +82,7 @@ def _main() -> None: camera = app.connect_camera() print("Capturing and estimating pose of the Zivid checkerboard in the camera frame") - detection_result = zivid.experimental.calibration.detect_feature_points(camera) + detection_result = zivid.calibration.detect_calibration_board(camera) if not detection_result.valid(): raise RuntimeError("Calibration board not detected!") diff --git a/source/applications/advanced/robot_guidance/robodk_robot_guidance.py b/source/applications/advanced/robot_guidance/robodk_robot_guidance.py index 212a4afd..e5e53c36 100644 --- a/source/applications/advanced/robot_guidance/robodk_robot_guidance.py +++ b/source/applications/advanced/robot_guidance/robodk_robot_guidance.py @@ -12,8 +12,6 @@ Note: Make sure to launch RoboDK and connect to the robot before running this sample. -Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice. - """ import argparse @@ -23,7 +21,6 @@ import numpy as np import robodk import zivid -import zivid.experimental.calibration from sample_utils.robodk_tools import connect_to_robot, set_robot_speed_and_acceleration from sample_utils.save_load_matrix import load_and_assert_affine_matrix @@ -145,7 +142,7 @@ def _generate_tool_poses_from_checkerboard( List of poses (4x4) for the tool path in the robot base """ - detection_result = zivid.experimental.calibration.detect_feature_points(camera) + detection_result = zivid.calibration.detect_calibration_board(camera) if not detection_result.valid(): raise RuntimeError("Calibration board not detected!") diff --git a/source/applications/advanced/roi_box_via_aruco_marker.py b/source/applications/advanced/roi_box_via_aruco_marker.py new file mode 100644 index 00000000..da20958a --- /dev/null +++ b/source/applications/advanced/roi_box_via_aruco_marker.py @@ -0,0 +1,116 @@ +""" +Filter the point cloud based on a ROI box given relative to the ArUco marker on a Zivid Calibration Board. + +The ZFC file for this sample can be downloaded from https://support.zivid.com/en/latest/api-reference/samples/sample-data.html. + +""" + +from typing import List + +import numpy as np +import zivid +from sample_utils.display import display_depthmap, display_pointcloud +from sample_utils.paths import get_sample_data_path + + +def _transform_points(points: List[np.ndarray], transform: np.ndarray) -> List[np.ndarray]: + """Perform a homogenous transformation to every point in 'points' and return the transformed points. + + Args: + points: list of 3D points to be transformed + transform: homogenous transform (4x4) + + Returns: + transformed_points: list of transformed 3D points + + """ + rotation_matrix = transform[:3, :3] + translation_vector = transform[:3, 3] + + transformed_points = [] + for point in points: + transformed_points.append(rotation_matrix @ point + translation_vector) + + return transformed_points + + +def _main() -> None: + with zivid.Application() as app: + + file_camera = get_sample_data_path() / "BinWithCalibrationBoard.zfc" + + print(f"Creating virtual camera using file: {file_camera}") + camera = app.create_file_camera(file_camera) + + settings = zivid.Settings([zivid.Settings.Acquisition()]) + + original_frame = camera.capture(settings) + point_cloud = original_frame.point_cloud() + + print("Displaying the original point cloud") + display_pointcloud(point_cloud.copy_data("xyz"), point_cloud.copy_data("rgba")[:, :, :3]) + + print("Configuring ROI box based on bin size and checkerboard placement") + roi_box_length = 545 + roi_box_width = 345 + roi_box_height = 150 + + # Coordinates are relative to the ArUco marker origin which lies in the center of the ArUco marker. + # Positive x-axis is "East", y-axis is "South" and z-axis is "Down". + roi_box_lower_right_corner_in_aruco_frame = np.array([240, 30, 5]) + roi_box_upper_right_corner_in_aruco_frame = np.array( + [ + roi_box_lower_right_corner_in_aruco_frame[0], + roi_box_lower_right_corner_in_aruco_frame[1] - roi_box_width, + roi_box_lower_right_corner_in_aruco_frame[2], + ] + ) + roi_box_lower_left_corner_in_aruco_frame = np.array( + [ + roi_box_lower_right_corner_in_aruco_frame[0] - roi_box_length, + roi_box_lower_right_corner_in_aruco_frame[1], + roi_box_lower_right_corner_in_aruco_frame[2], + ] + ) + + point_o_in_aruco_frame = roi_box_lower_right_corner_in_aruco_frame + point_a_in_aruco_frame = roi_box_upper_right_corner_in_aruco_frame + point_b_in_aruco_frame = roi_box_lower_left_corner_in_aruco_frame + + print("Configuring ArUco marker") + marker_dictionary = zivid.calibration.MarkerDictionary.aruco4x4_50 + marker_id = [1] + + print("Detecting ArUco marker") + detection_result = zivid.calibration.detect_markers(original_frame, marker_id, marker_dictionary) + + if not detection_result.valid(): + raise RuntimeError("No ArUco markers detected") + + print("Estimating pose of detected ArUco marker") + transform_camera_to_marker = detection_result.detected_markers()[0].pose.to_matrix() + + print("Transforming the ROI base frame points to the camera frame") + roi_points_in_camera_frame = _transform_points( + [point_o_in_aruco_frame, point_a_in_aruco_frame, point_b_in_aruco_frame], + transform_camera_to_marker, + ) + + print("Setting the ROI") + settings.region_of_interest.box.enabled = True + settings.region_of_interest.box.point_o = roi_points_in_camera_frame[0] + settings.region_of_interest.box.point_a = roi_points_in_camera_frame[1] + settings.region_of_interest.box.point_b = roi_points_in_camera_frame[2] + settings.region_of_interest.box.extents = (-10, roi_box_height) + + roi_point_cloud = camera.capture(settings).point_cloud() + + print("Displaying the ROI-filtered point cloud") + display_pointcloud(roi_point_cloud.copy_data("xyz"), roi_point_cloud.copy_data("rgba")[:, :, :3]) + + print("Displaying depth map of the ROI-filtered point cloud") + display_depthmap(roi_point_cloud.copy_data("xyz")) + + +if __name__ == "__main__": + _main() diff --git a/source/applications/advanced/roi_box_via_checkerboard.py b/source/applications/advanced/roi_box_via_checkerboard.py index 5703ff76..1c4f2dab 100644 --- a/source/applications/advanced/roi_box_via_checkerboard.py +++ b/source/applications/advanced/roi_box_via_checkerboard.py @@ -3,15 +3,12 @@ The ZFC file for this sample can be downloaded from https://support.zivid.com/en/latest/api-reference/samples/sample-data.html. -Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice. - """ from typing import List import numpy as np import zivid -import zivid.experimental.calibration from sample_utils.display import display_depthmap, display_pointcloud from sample_utils.paths import get_sample_data_path @@ -38,68 +35,72 @@ def _transform_points(points: List[np.ndarray], transform: np.ndarray) -> List[n def _main() -> None: - app = zivid.Application() - - file_camera = get_sample_data_path() / "BinWithCalibrationBoard.zfc" - - print(f"Creating virtual camera using file: {file_camera}") - camera = app.create_file_camera(file_camera) - - settings = zivid.Settings([zivid.Settings.Acquisition()]) - - original_frame = camera.capture(settings) - original_point_cloud = original_frame.point_cloud() - - print("Displaying the original point cloud") - display_pointcloud(original_point_cloud.copy_data("xyz"), original_point_cloud.copy_data("rgba")[:, :, :3]) - - print("Configuring ROI box based on bin size and checkerboard placement") - roi_box_length = 545 - roi_box_width = 345 - roi_box_height = 150 - - # Coordinates are relative to the checkerboard origin which lies in the intersection between the four checkers - # in the top-left corner of the checkerboard: Positive x-axis is "East", y-axis is "South" and z-axis is "Down" - roi_box_lower_right_corner = np.array([240, 260, 0.5]) - roi_box_upper_right_corner = np.array( - [ - roi_box_lower_right_corner[0], - roi_box_lower_right_corner[1] - roi_box_width, - roi_box_lower_right_corner[2], - ] - ) - roi_box_lower_left_corner = np.array( - [roi_box_lower_right_corner[0] - roi_box_length, roi_box_lower_right_corner[1], roi_box_lower_right_corner[2]] - ) - - point_o_in_checkerboard_frame = roi_box_lower_right_corner - point_a_in_checkerboard_frame = roi_box_upper_right_corner - point_b_in_checkerboard_frame = roi_box_lower_left_corner - - print("Detecting and estimating pose of the Zivid checkerboard in the camera frame") - detection_result = zivid.experimental.calibration.detect_feature_points(original_frame) - transform_checkerboard_to_camera = detection_result.pose().to_matrix() - - print("Transforming the ROI base frame points to the camera frame") - roi_points_in_camera_frame = _transform_points( - [point_o_in_checkerboard_frame, point_a_in_checkerboard_frame, point_b_in_checkerboard_frame], - transform_checkerboard_to_camera, - ) - - print("Setting the ROI") - settings.region_of_interest.box.enabled = True - settings.region_of_interest.box.point_o = roi_points_in_camera_frame[0] - settings.region_of_interest.box.point_a = roi_points_in_camera_frame[1] - settings.region_of_interest.box.point_b = roi_points_in_camera_frame[2] - settings.region_of_interest.box.extents = (-10, roi_box_height) - - roi_point_cloud = camera.capture(settings).point_cloud() - - print("Displaying the ROI-filtered point cloud") - display_pointcloud(roi_point_cloud.copy_data("xyz"), roi_point_cloud.copy_data("rgba")[:, :, :3]) - - print("Displaying depth map of the ROI-filtered point cloud") - display_depthmap(roi_point_cloud.copy_data("xyz")) + with zivid.Application() as app: + + file_camera = get_sample_data_path() / "BinWithCalibrationBoard.zfc" + + print(f"Creating virtual camera using file: {file_camera}") + camera = app.create_file_camera(file_camera) + + settings = zivid.Settings([zivid.Settings.Acquisition()]) + + original_frame = camera.capture(settings) + point_cloud = original_frame.point_cloud() + + print("Displaying the original point cloud") + display_pointcloud(point_cloud.copy_data("xyz"), point_cloud.copy_data("rgba")[:, :, :3]) + + print("Configuring ROI box based on bin size and checkerboard placement") + roi_box_length = 545 + roi_box_width = 345 + roi_box_height = 150 + + # Coordinates are relative to the checkerboard origin which lies in the intersection between the four checkers + # in the top-left corner of the checkerboard: Positive x-axis is "East", y-axis is "South" and z-axis is "Down" + roi_box_lower_right_corner = np.array([240, 260, 0.5]) + roi_box_upper_right_corner = np.array( + [ + roi_box_lower_right_corner[0], + roi_box_lower_right_corner[1] - roi_box_width, + roi_box_lower_right_corner[2], + ] + ) + roi_box_lower_left_corner = np.array( + [ + roi_box_lower_right_corner[0] - roi_box_length, + roi_box_lower_right_corner[1], + roi_box_lower_right_corner[2], + ] + ) + + point_o_in_checkerboard_frame = roi_box_lower_right_corner + point_a_in_checkerboard_frame = roi_box_upper_right_corner + point_b_in_checkerboard_frame = roi_box_lower_left_corner + + print("Detecting and estimating pose of the Zivid checkerboard in the camera frame") + detection_result = zivid.calibration.detect_feature_points(original_frame) + transform_camera_to_checkerboard = detection_result.pose().to_matrix() + + print("Transforming the ROI base frame points to the camera frame") + roi_points_in_camera_frame = _transform_points( + [point_o_in_checkerboard_frame, point_a_in_checkerboard_frame, point_b_in_checkerboard_frame], + transform_camera_to_checkerboard, + ) + + print("Setting the ROI") + settings.region_of_interest.box.enabled = True + settings.region_of_interest.box.point_o = roi_points_in_camera_frame[0] + settings.region_of_interest.box.point_a = roi_points_in_camera_frame[1] + settings.region_of_interest.box.point_b = roi_points_in_camera_frame[2] + settings.region_of_interest.box.extents = (-10, roi_box_height) + + roi_point_cloud = camera.capture(settings).point_cloud() + + print("Displaying the ROI-filtered point cloud") + display_pointcloud(roi_point_cloud.copy_data("xyz"), roi_point_cloud.copy_data("rgba")[:, :, :3]) + + print("Displaying depth map of the ROI-filtered point cloud") + display_depthmap(roi_point_cloud.copy_data("xyz")) if __name__ == "__main__": diff --git a/source/applications/advanced/transform_point_cloud_via_aruco_marker.py b/source/applications/advanced/transform_point_cloud_via_aruco_marker.py new file mode 100644 index 00000000..9227c403 --- /dev/null +++ b/source/applications/advanced/transform_point_cloud_via_aruco_marker.py @@ -0,0 +1,90 @@ +""" +Transform a point cloud from camera to ArUco marker coordinate frame by estimating the marker's pose from the point cloud. + +The ZDF file for this sample can be found under the main instructions for Zivid samples. + +""" + +from pathlib import Path + +import cv2 +import numpy as np +import zivid +from sample_utils.display import display_bgr +from sample_utils.paths import get_sample_data_path +from sample_utils.save_load_matrix import assert_affine_matrix_and_save + + +def _draw_detected_marker(bgra_image: np.ndarray, detection_result: zivid.calibration.DetectionResult) -> np.ndarray: + """Draw detected ArUco marker on the BGRA image based on Zivid ArUco marker detection results. + + Args: + bgra_image: The input BGRA image. + detection_result: The result object containing detected ArUco markers with their corners. + + Returns: + bgra_image: The BGR image with ArUco detected marker drawn on it. + """ + + bgr = bgra_image[:, :, 0:3].copy() + marker_corners = detection_result.detected_markers()[0].corners_in_pixel_coordinates + + for i, corner in enumerate(marker_corners): + start_point = tuple(map(int, corner)) + end_point = tuple(map(int, marker_corners[(i + 1) % len(marker_corners)])) + cv2.line(bgr, start_point, end_point, (0, 255, 0), 2) + + return bgr + + +def _main() -> None: + with zivid.Application(): + + data_file = get_sample_data_path() / "CalibrationBoardInCameraOrigin.zdf" + print(f"Reading ZDF frame from file: {data_file}") + frame = zivid.Frame(data_file) + point_cloud = frame.point_cloud() + + print("Configuring ArUco marker") + marker_dictionary = zivid.calibration.MarkerDictionary.aruco4x4_50 + marker_id = [1] + + print("Detecting ArUco marker") + detection_result = zivid.calibration.detect_markers(frame, marker_id, marker_dictionary) + + if not detection_result.valid(): + raise RuntimeError("No ArUco markers detected") + + print("Converting to OpenCV image format") + bgra_image = point_cloud.copy_data("bgra") + + print("Displaying detected ArUco marker") + bgr = _draw_detected_marker(bgra_image, detection_result) + display_bgr(bgr, "ArucoMarkerDetected") + + bgr_image_file = "ArucoMarkerDetected.png" + print(f"Saving 2D color image with detected ArUco marker to file: {bgr_image_file}") + cv2.imwrite(bgr_image_file, bgr) + + print("Estimating pose of detected ArUco marker") + transform_camera_to_marker = detection_result.detected_markers()[0].pose.to_matrix() + print("ArUco marker pose in camera frame:") + print(transform_camera_to_marker) + print("Camera pose in ArUco marker frame:") + transform_marker_to_camera = np.linalg.inv(transform_camera_to_marker) + print(transform_marker_to_camera) + + transform_file = Path("ArUcoMarkerToCameraTransform.yaml") + print("Saving a YAML file with Inverted ArUco marker pose to file: ") + assert_affine_matrix_and_save(transform_marker_to_camera, transform_file) + + print("Transforming point cloud from camera frame to ArUco marker frame") + point_cloud.transform(transform_marker_to_camera) + + aruco_marker_transformed_file = "CalibrationBoardInArucoMarkerOrigin.zdf" + print(f"Saving transformed point cloud to file: {aruco_marker_transformed_file}") + frame.save(aruco_marker_transformed_file) + + +if __name__ == "__main__": + _main() diff --git a/source/applications/advanced/transform_point_cloud_via_checkerboard.py b/source/applications/advanced/transform_point_cloud_via_checkerboard.py new file mode 100644 index 00000000..b561740a --- /dev/null +++ b/source/applications/advanced/transform_point_cloud_via_checkerboard.py @@ -0,0 +1,193 @@ +""" +Transform a point cloud from camera to checkerboard (Zivid Calibration Board) coordinate frame by getting checkerboard pose from the API. + +The ZDF file for this sample can be found under the main instructions for Zivid samples. + +Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice. + +""" + +from pathlib import Path +from typing import Dict, Tuple + +import cv2 +import numpy as np +import zivid +import zivid.experimental.calibration +from sample_utils.display import display_bgr +from sample_utils.paths import get_sample_data_path +from sample_utils.save_load_matrix import assert_affine_matrix_and_save + + +def _coordinate_system_line( + bgra_image: np.ndarray, + first_point: Tuple[int, int], + second_point: Tuple[int, int], + line_color: Tuple[int, int, int], +) -> None: + """Draw a line on a BGRA image. + + Args: + bgra_image: BGRA image. + first_point: Pixel coordinates of the first end point. + second_point: Pixel coordinates of the second end point. + line_color: Line color. + """ + + line_thickness = 4 + line_type = cv2.LINE_8 + cv2.line(bgra_image, first_point, second_point, line_color, line_thickness, line_type) + + +def _zivid_camera_matrix_to_opencv_camera_matrix(camera_matrix: zivid.CameraIntrinsics.CameraMatrix) -> np.ndarray: + """Convert camera matrix from Zivid to OpenCV. + + Args: + camera_matrix: Camera matrix in Zivid format. + + Returns: + camera_matrix_opencv: Camera matrix in OpenCV format. + """ + + return np.array( + [[camera_matrix.fx, 0.0, camera_matrix.cx], [0.0, camera_matrix.fy, camera_matrix.cy], [0.0, 0.0, 1.0]] + ) + + +def _zivid_distortion_coefficients_to_opencv_distortion_coefficients( + distortion_coeffs: zivid.CameraIntrinsics.Distortion, +) -> np.ndarray: + """Convert distortion coefficients from Zivid to OpenCV. + + Args: + distortion_coeffs: Camera distortion coefficients in Zivid format. + + Returns: + distortion_coeffs_opencv: Camera distortion coefficients in OpenCV format. + """ + + return np.array( + [distortion_coeffs.k1, distortion_coeffs.k2, distortion_coeffs.p1, distortion_coeffs.p2, distortion_coeffs.k3] + ) + + +def _move_point( + origin_in_camera_frame: np.ndarray, offset_in_board_frame: np.ndarray, checkerboard_pose: np.ndarray +) -> np.ndarray: + """Move a coordinate system origin point given a direction and an offset to create a coordinate system axis point. + + Args: + origin_in_camera_frame: 3D coordinates of the coordinate system origin point. + offset_in_board_frame: 3D coordinates of the offset to move the coordinate system origin point to. + checkerboard_pose: Transformation matrix (checkerboard in camera frame). + + Returns: + translated point: 3D coordinates of coordinate system axis point. + """ + + rotation_matrix = checkerboard_pose[:3, :3] + offset_rotated = np.dot(rotation_matrix, offset_in_board_frame) + return origin_in_camera_frame + offset_rotated + + +def _get_coordinate_system_points( + frame: zivid.Frame, checkerboard_pose: np.ndarray, size_of_axis: float +) -> Dict[str, Tuple[int, int]]: + """Get pixel coordinates of the coordinate system origin and axes. + + Args: + frame: Zivid frame containing point cloud. + checkerboard_pose: Transformation matrix (checkerboard in camera frame). + size_of_axis: Coordinate system axis length in mm. + + Returns: + frame_points: Pixel coordinates of the coordinate system origin and axes. + """ + + intrinsics = zivid.experimental.calibration.estimate_intrinsics(frame) + cv_camera_matrix = _zivid_camera_matrix_to_opencv_camera_matrix(intrinsics.camera_matrix) + cv_dist_coeffs = _zivid_distortion_coefficients_to_opencv_distortion_coefficients(intrinsics.distortion) + + origin_position = np.array([checkerboard_pose[0, 3], checkerboard_pose[1, 3], checkerboard_pose[2, 3]]) + x_axis_direction = _move_point(origin_position, np.array([size_of_axis, 0.0, 0.0]), checkerboard_pose) + y_axis_direction = _move_point(origin_position, np.array([0.0, size_of_axis, 0.0]), checkerboard_pose) + z_axis_direction = _move_point(origin_position, np.array([0.0, 0.0, size_of_axis]), checkerboard_pose) + + points_to_project = np.array([origin_position, x_axis_direction, y_axis_direction, z_axis_direction]) + projected_points = cv2.projectPoints(points_to_project, np.zeros(3), np.zeros(3), cv_camera_matrix, cv_dist_coeffs) + + projected_points = projected_points.reshape(-1, 2) + return { + "origin_point": (int(projected_points[0][0]), int(projected_points[0][1])), + "x_axis_point": (int(projected_points[1][0]), int(projected_points[1][1])), + "y_axis_point": (int(projected_points[2][0]), int(projected_points[2][1])), + "z_axis_point": (int(projected_points[3][0]), int(projected_points[3][1])), + } + + +def _draw_coordinae_system(frame: zivid.Frame, checkerboard_pose: np.ndarray, bgra_image: np.ndarray) -> None: + """Draw a coordinate system on a BGRA image. + + Args: + frame: Zivid frame containing point cloud. + checkerboard_pose: Transformation matrix (checkerboard in camera frame). + bgra_image: BGRA image. + """ + + size_of_axis = 30.0 # each axis has 30 mm of length + + print("Acquiring frame points") + frame_points = _get_coordinate_system_points(frame, checkerboard_pose, size_of_axis) + + origin_point = frame_points["origin_point"] + z = frame_points["z_axis_point"] + y = frame_points["y_axis_point"] + x = frame_points["x_axis_point"] + + print("Drawing Z axis") + _coordinate_system_line(bgra_image, origin_point, z, (255, 0, 0)) + + print("Drawing Y axis") + _coordinate_system_line(bgra_image, origin_point, y, (0, 255, 0)) + + print("Drawing X axis") + _coordinate_system_line(bgra_image, origin_point, x, (0, 0, 255)) + + +def _main() -> None: + with zivid.Application(): + + data_file = get_sample_data_path() / "CalibrationBoardInCameraOrigin.zdf" + print(f"Reading ZDF frame from file: {data_file}") + frame = zivid.Frame(data_file) + point_cloud = frame.point_cloud() + + print("Detecting and estimating pose of the Zivid checkerboard in the camera frame") + detection_result = zivid.calibration.detect_calibration_board(frame) + transform_camera_to_checkerboard = detection_result.pose().to_matrix() + print(transform_camera_to_checkerboard) + print("Camera pose in checkerboard frame:") + transform_checkerboard_to_camera = np.linalg.inv(transform_camera_to_checkerboard) + print(transform_checkerboard_to_camera) + + transform_file = Path("CheckerboardToCameraTransform.yaml") + print("Saving a YAML file with Inverted checkerboard pose to file: ") + assert_affine_matrix_and_save(transform_checkerboard_to_camera, transform_file) + + print("Transforming point cloud from camera frame to Checkerboard frame") + point_cloud.transform(transform_checkerboard_to_camera) + + print("Converting to OpenCV image format") + bgra_image = point_cloud.copy_data("bgra") + + print("Visualizing checkerboard with coordinate system") + _draw_coordinae_system(frame, transform_camera_to_checkerboard, bgra_image) + display_bgr(bgra_image, "Checkerboard transformation frame") + + checkerboard_transformed_file = "CalibrationBoardInCheckerboardOrigin.zdf" + print(f"Saving transformed point cloud to file: {checkerboard_transformed_file}") + frame.save(checkerboard_transformed_file) + + +if __name__ == "__main__": + _main() diff --git a/source/applications/advanced/verify_hand_eye_calibration/robodk_verify_hand_eye_calibration.py b/source/applications/advanced/verify_hand_eye_calibration/robodk_verify_hand_eye_calibration.py index 26df4648..cc8fa3e6 100644 --- a/source/applications/advanced/verify_hand_eye_calibration/robodk_verify_hand_eye_calibration.py +++ b/source/applications/advanced/verify_hand_eye_calibration/robodk_verify_hand_eye_calibration.py @@ -2,12 +2,13 @@ Perform a touch test with a robot to verify Hand-Eye Calibration using the RoboDK interface. The touch test is performed by a robot equipped with the Pointed Hand-Eye Verification Tool. -The robot touches a corner of the checkerboard on the Zivid calibration board to verify hand-eye calibration. +The robot touches the Zivid calibration object to verify hand-eye calibration. The sample requires as follows: - Type of calibration used (eye-in-hand or eye-to-hand) - YAML file with Hand-Eye transformation - YAML file with Pointed Hand-Eye Verification Tool transformation - Capture pose target name used in RoboDK +- Calibration object used (Zivid calibration board or ArUco marker) Note: Make sure to launch your RDK file and connect to the robot through RoboDK before running this script. @@ -19,23 +20,28 @@ import argparse import datetime +from typing import Dict, Tuple +import cv2 import numpy as np import zivid +import zivid.experimental.calibration from robodk import Mat from robodk.robolink import Item +from sample_utils.display import display_rgb from sample_utils.robodk_tools import connect_to_robot, get_robot_targets, set_robot_speed_and_acceleration from sample_utils.save_load_matrix import load_and_assert_affine_matrix -def _capture_and_estimate_calibration_board_pose(camera: zivid.Camera) -> np.ndarray: - """Capture an image with the Zivid camera using capture assistant, detecting, and estimating the pose of the calibration board. +def _assisted_capture(camera: zivid.Camera) -> zivid.Frame: + """Acquire frame with capture assistant. Args: camera: Zivid camera Returns: - calibration_board_pose: A 4x4 numpy array containing the calibration board pose + frame: Zivid frame + bgra_image: BGRA image """ suggest_settings_parameters = zivid.capture_assistant.SuggestSettingsParameters( @@ -46,25 +52,55 @@ def _capture_and_estimate_calibration_board_pose(camera: zivid.Camera) -> np.nda settings_list = zivid.capture_assistant.suggest_settings(camera, suggest_settings_parameters) frame = camera.capture(settings_list) - calibration_board_pose = zivid.calibration.detect_feature_points(frame).pose().to_matrix() + bgra_image = frame.point_cloud().copy_data("bgra") - return calibration_board_pose + return frame, bgra_image -def _get_robot_base_to_calibration_board_transform( +def _estimate_calibration_object_pose(frame: zivid.Frame, user_options: argparse.Namespace) -> np.ndarray: + """Detect and estimate the pose of the calibration object. + + Args: + frame: Zivid frame + user_options: Input arguments + + Returns: + calibration_object_pose: A 4x4 numpy array containing the calibration object pose + + """ + + if user_options.calibration_object == "checkerboard": + print("Detecting the Zivid calibration board pose (upper left checkerboard corner)") + calibration_object_pose = zivid.calibration.detect_calibration_board(frame).pose().to_matrix() + else: + print("Detecting the ArUco marker pose (center of the marker)") + + calibration_object_pose = ( + zivid.calibration.detect_markers(frame, user_options.ids, user_options.dictionary) + .detected_markers()[0] + .pose.to_matrix() + ) + + return calibration_object_pose + + +def _get_robot_base_to_calibration_object_transform( user_options: argparse.Namespace, - camera_to_calibration_board_transform: np.ndarray, + camera_to_calibration_object_transform: np.ndarray, robot: Item, ) -> np.ndarray: - """Calculating the robot base to the calibration board transform matrix. + """Calculating the robot base to the calibration object transform matrix. Args: user_options: Arguments from user that contain the type of hand-eye calibration done and the path to the matrix that resulted from that calibration - camera_to_calibration_board_transform: A 4x4 numpy array containing the calibration board pose in the camera frame + camera_to_calibration_object_transform: A 4x4 numpy array containing the calibration object pose in the camera frame robot: Robot item in open RoboDK rdk file Returns: - robot_base_to_calibration_board_transform: A 4x4 numpy array containing the calibration board pose in the robot base frame + robot_base_to_calibration_object_transform: A 4x4 numpy array containing the calibration object pose in the robot base frame + + Raises: + ValueError: If an invalid calibration type is selected """ if user_options.eih: @@ -72,17 +108,19 @@ def _get_robot_base_to_calibration_board_transform( robot_base_to_flange_transform = np.array(robot.Pose()).T flange_to_camera_transform = load_and_assert_affine_matrix(user_options.hand_eye_yaml) - robot_base_to_calibration_board_transform = ( - robot_base_to_flange_transform @ flange_to_camera_transform @ camera_to_calibration_board_transform + robot_base_to_calibration_object_transform = ( + robot_base_to_flange_transform @ flange_to_camera_transform @ camera_to_calibration_object_transform ) - if user_options.eth: + elif user_options.eth: robot_base_to_camera_transform = load_and_assert_affine_matrix(user_options.hand_eye_yaml) - robot_base_to_calibration_board_transform = ( - robot_base_to_camera_transform @ camera_to_calibration_board_transform + robot_base_to_calibration_object_transform = ( + robot_base_to_camera_transform @ camera_to_calibration_object_transform ) + else: + raise ValueError("Invalid calibration type. Please choose either eye-in-hand or eye-to-hand.") - return robot_base_to_calibration_board_transform + return robot_base_to_calibration_object_transform def _yes_no_prompt(question: str) -> str: @@ -102,6 +140,148 @@ def _yes_no_prompt(question: str) -> str: print("Invalid response. Please respond with either 'y' or 'n'.") +def _coordinate_system_line( + bgra_image: np.ndarray, + first_point: Tuple[int, int], + second_point: Tuple[int, int], + line_color: Tuple[int, int, int], +) -> None: + """Draw a line on a BGRA image. + + Args: + bgra_image: BGRA image. + first_point: Pixel coordinates of the first end point. + second_point: Pixel coordinates of the second end point. + line_color: Line color. + """ + + line_thickness = 4 + line_type = cv2.LINE_8 + cv2.line(bgra_image, first_point, second_point, line_color, line_thickness, line_type) + + +def _zivid_camera_matrix_to_opencv_camera_matrix(camera_matrix: zivid.CameraIntrinsics.CameraMatrix) -> np.ndarray: + """Convert camera matrix from Zivid to OpenCV. + + Args: + camera_matrix: Camera matrix in Zivid format. + + Returns: + camera_matrix_opencv: Camera matrix in OpenCV format. + """ + + return np.array( + [[camera_matrix.fx, 0.0, camera_matrix.cx], [0.0, camera_matrix.fy, camera_matrix.cy], [0.0, 0.0, 1.0]] + ) + + +def _zivid_distortion_coefficients_to_opencv_distortion_coefficients( + distortion_coeffs: zivid.CameraIntrinsics.Distortion, +) -> np.ndarray: + """Convert distortion coefficients from Zivid to OpenCV. + + Args: + distortion_coeffs: Camera distortion coefficients in Zivid format. + + Returns: + distortion_coeffs_opencv: Camera distortion coefficients in OpenCV format. + """ + + return np.array( + [distortion_coeffs.k1, distortion_coeffs.k2, distortion_coeffs.p1, distortion_coeffs.p2, distortion_coeffs.k3] + ) + + +def _move_point( + origin_in_camera_frame: np.ndarray, offset_in_object_frame: np.ndarray, calibration_object_pose: np.ndarray +) -> np.ndarray: + """Move a coordinate system origin point given a direction and an offset to create a coordinate system axis point. + + Args: + origin_in_camera_frame: 3D coordinates of the coordinate system origin point. + offset_in_object_frame: 3D coordinates of the offset to move the coordinate system origin point to. + calibration_object_pose: Transformation matrix (calibration object in camera frame). + + Returns: + translated_point: 3D coordinates of coordinate system axis point. + """ + + rotation_matrix = calibration_object_pose[:3, :3] + offset_rotated = np.dot(rotation_matrix, offset_in_object_frame) + return origin_in_camera_frame + offset_rotated + + +def _get_coordinate_system_points( + frame: zivid.Frame, calibration_object_pose: np.ndarray, size_of_axis: float +) -> Dict[str, Tuple[int, int]]: + """Get pixel coordinates of the coordinate system origin and axes. + + Args: + frame: Zivid frame containing point cloud. + calibration_object_pose: Transformation matrix (calibration object in camera frame). + size_of_axis: Coordinate system axis length in mm. + + Returns: + frame_points: Pixel coordinates of the coordinate system origin and axes. + """ + + intrinsics = zivid.experimental.calibration.estimate_intrinsics(frame) + cv_camera_matrix = _zivid_camera_matrix_to_opencv_camera_matrix(intrinsics.camera_matrix) + cv_dist_coeffs = _zivid_distortion_coefficients_to_opencv_distortion_coefficients(intrinsics.distortion) + + origin_position = np.array( + [calibration_object_pose[0, 3], calibration_object_pose[1, 3], calibration_object_pose[2, 3]] + ) + x_axis_direction = _move_point(origin_position, np.array([size_of_axis, 0.0, 0.0]), calibration_object_pose) + y_axis_direction = _move_point(origin_position, np.array([0.0, size_of_axis, 0.0]), calibration_object_pose) + z_axis_direction = _move_point(origin_position, np.array([0.0, 0.0, size_of_axis]), calibration_object_pose) + + points_to_project = np.array([origin_position, x_axis_direction, y_axis_direction, z_axis_direction]) + projected_points = cv2.projectPoints(points_to_project, np.zeros(3), np.zeros(3), cv_camera_matrix, cv_dist_coeffs)[ + 0 + ] + + projected_points = projected_points.reshape(-1, 2) + return { + "origin_point": (int(projected_points[0][0]), int(projected_points[0][1])), + "x_axis_point": (int(projected_points[1][0]), int(projected_points[1][1])), + "y_axis_point": (int(projected_points[2][0]), int(projected_points[2][1])), + "z_axis_point": (int(projected_points[3][0]), int(projected_points[3][1])), + } + + +def _draw_coordinate_system(frame: zivid.Frame, calibration_object_pose: np.ndarray, bgra_image: np.ndarray) -> None: + """Draw a coordinate system on a BGRA image. + + Args: + frame: Zivid frame containing point cloud. + calibration_object_pose: Transformation matrix (calibration object in camera frame). + bgra_image: BGRA image. + + """ + + size_of_axis = 30.0 # each axis has 30 mm of length + + print("Acquiring frame points") + frame_points = _get_coordinate_system_points(frame, calibration_object_pose, size_of_axis) + + origin_point = frame_points["origin_point"] + z = frame_points["z_axis_point"] + y = frame_points["y_axis_point"] + x = frame_points["x_axis_point"] + + print("Drawing Z axis") + _coordinate_system_line(bgra_image, origin_point, z, (255, 0, 0)) + + print("Drawing Y axis") + _coordinate_system_line(bgra_image, origin_point, y, (0, 255, 0)) + + print("Drawing X axis") + _coordinate_system_line(bgra_image, origin_point, x, (0, 0, 255)) + + display_rgb(rgb=cv2.cvtColor(bgra_image, cv2.COLOR_BGRA2RGB)) + + def _options() -> argparse.Namespace: """Function to read user arguments. @@ -112,14 +292,13 @@ def _options() -> argparse.Namespace: parser = argparse.ArgumentParser(description=__doc__) type_group = parser.add_mutually_exclusive_group(required=True) - type_group.add_argument("--eih", "--eye-in-hand", action="store_true", help="eye-in-hand calibration") - type_group.add_argument("--eth", "--eye-to-hand", action="store_true", help="eye-to-hand calibration") - + type_group.add_argument("--eih", "--eye-in-hand", action="store_true", help="eye-in-hand configuration") + type_group.add_argument("--eth", "--eye-to-hand", action="store_true", help="eye-to-hand configuration") parser.add_argument("--ip", required=True, help="IP address of the robot controller") parser.add_argument( "--target-keyword", required=True, - help="RoboDK target name representing a robot pose at which the calibration board is in the field of view of the camera", + help="RoboDK target name representing a robot pose at which the calibration object is in the field of view of the camera", ) parser.add_argument( "--tool-yaml", @@ -136,17 +315,31 @@ def _options() -> argparse.Namespace: required=True, help="Path to the YAML file that contains the Hand-Eye transformation matrix", ) + subparsers = parser.add_subparsers(dest="calibration_object", required=True, help="Calibration object type") + subparsers.add_parser("checkerboard", help="Verify using Zivid calibration board") + marker_parser = subparsers.add_parser("marker", help="Verify using ArUco marker") + marker_parser.add_argument( + "--dictionary", + required=True, + choices=list(zivid.calibration.MarkerDictionary.valid_values()), + help="Dictionary of the targeted ArUco marker", + ) + marker_parser.add_argument( + "--id", nargs=1, required=True, type=int, help="ID of ArUco marker to be used for verification" + ) return parser.parse_args() -def main() -> None: +def _main() -> None: + app = zivid.Application() camera = app.connect_camera() user_options = _options() rdk, robot = connect_to_robot(user_options.ip) - set_robot_speed_and_acceleration(robot, speed=120, joint_speed=120, acceleration=120, joint_acceleration=120) + # NOTE! Verify safe operation speeds and accelerations for your robot! + set_robot_speed_and_acceleration(robot, speed=100, joint_speed=100, acceleration=50, joint_acceleration=50) print("Loading the Pointed Hand-Eye Verification Tool transformation matrix from a YAML file") pointed_hand_eye_verification_tool_matrix = load_and_assert_affine_matrix(user_options.tool_yaml) @@ -166,46 +359,49 @@ def main() -> None: ) robot.MoveJ(capture_pose[0]) - print("\nPlace the calibration board in the FOV of the camera.") + print("\nPlace the calibration object in the FOV of the camera.") input("Press enter to start.") while True: try: - print("Detecting the calibration board pose (upper left checkerboard corner)") - camera_to_calibration_board_transform = _capture_and_estimate_calibration_board_pose(camera) + frame, bgra_image = _assisted_capture(camera) + camera_to_calibration_object_transform = _estimate_calibration_object_pose(frame, user_options) - print("Calculating the calibration board pose in robot base frame") - robot_base_to_calibration_board_transform = _get_robot_base_to_calibration_board_transform( + print("Calculating the calibration object pose in robot base frame") + robot_base_to_calibration_object_transform = _get_robot_base_to_calibration_object_transform( user_options, - camera_to_calibration_board_transform, + camera_to_calibration_object_transform, robot, ) - print("Calculating pose for robot to touch the calibration board") - touch_pose = robot_base_to_calibration_board_transform @ np.linalg.inv(tcp) + print("Calculating pose for robot to touch the calibration object") + touch_pose = robot_base_to_calibration_object_transform @ np.linalg.inv(tcp) - print("Calculating pose for the robot to approach the calibration board") + print("Calculating pose for the robot to approach the calibration object") touch_pose_offset = np.identity(4) touch_pose_offset[2, 3] = -140 approach_pose = touch_pose @ touch_pose_offset - print("Touching calibration board (upper left checkerboard corner)") + _draw_coordinate_system(frame, camera_to_calibration_object_transform, bgra_image) + input("\nClose the window and press enter to the touch the calibration object...") + + print("Touching calibration object") robot.MoveJ(Mat(approach_pose.tolist())) robot.MoveL(Mat(touch_pose.tolist())) input("\nPress enter to pull back and return to the capture pose...") robot.MoveL(Mat(approach_pose.tolist())) robot.MoveJ(capture_pose[0]) - print("\nThe board can be moved at this time.") + print("\nThe calibration object can be moved at this time.") answer = _yes_no_prompt("Perform another touch?") if answer == "n": break except RuntimeError as ex: print(ex) - print("Please make sure calibration board is in FOV.") + print("Please make sure calibration object is in FOV.") input("Press enter to continue.") if __name__ == "__main__": - main() + _main() diff --git a/source/applications/point_cloud_tutorial.md b/source/applications/point_cloud_tutorial.md index 829ad763..55bdd945 100644 --- a/source/applications/point_cloud_tutorial.md +++ b/source/applications/point_cloud_tutorial.md @@ -12,7 +12,6 @@ tutorial see: [**Introduction**](#Introduction) | [**Frame**](#Frame) | [**Point**](#Point-Cloud) | -[**Transform**](#Transform) | [**Downsample**](#Downsample) | [**Normals**](#Normals) | [**Visualize**](#Visualize) | @@ -148,7 +147,7 @@ In terms of memory allocation, there are two ways to copy data: - A user can pass a pointer to a pre-allocated memory buffer, and the Zivid SDK will copy the data to the pre-allocated memory buffer. -## Transform +----- You may want to [transform](https://support.zivid.com/latest//academy/applications/transform.html) @@ -230,10 +229,10 @@ The size of normals is equal to the size of the input point cloud. Having the frame allows you to visualize the point cloud. -No source available for {language\_name} You can visualize the point +No source available for {language\_name}You can visualize the point cloud from the point cloud object as well. -No source available for {language\_name} For more information, check out +No source available for {language\_name}For more information, check out [Visualization Tutorial](https://support.zivid.com/latest/academy/applications/visualization-tutorial.html), where we cover point cloud, color image, depth map, and normals diff --git a/source/camera/basic/capture_tutorial.md b/source/camera/basic/capture_tutorial.md index c0f761d5..2fbf9c0c 100644 --- a/source/camera/basic/capture_tutorial.md +++ b/source/camera/basic/capture_tutorial.md @@ -433,8 +433,7 @@ frame.save(data_file_ply) We can get 2D color image from a 3D capture. -No source available for {language\_name} 2D captures also produce 2D -color images. +2D captures also produce 2D color images. ([go to source](https://github.com/zivid/zivid-python-samples/tree/master//source/camera/basic/capture_2d.py#L33)) diff --git a/source/camera/info_util_other/automatic_network_configuration_for_cameras.py b/source/camera/info_util_other/automatic_network_configuration_for_cameras.py new file mode 100644 index 00000000..b3826a20 --- /dev/null +++ b/source/camera/info_util_other/automatic_network_configuration_for_cameras.py @@ -0,0 +1,77 @@ +""" +Automatically set the IP addresses of any number of cameras to be in the same subnet as the provided IP address of the network interface. +""" + +import argparse +import ipaddress + +import zivid + + +def _options() -> argparse.Namespace: + """Function to read user arguments. + + Returns: + Arguments from user + + """ + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("--interface-ipv4", required=True, help="IP address of the PC network interface") + parser.add_argument("--subnet-mask", required=False, default="255.255.255.0", help="Network subnet mask") + + return parser.parse_args() + + +def _assert_user_input(ip_address: str, subnet_mask: str) -> None: + """Validates the IP address and the subnet mask. + + Args: + ip_address: IP address + subnet_mask: Subnet mask + + """ + zivid.NetworkConfiguration.IPV4(address=ip_address, subnet_mask=subnet_mask) + + +def _main() -> None: + try: + + user_input = _options() + _assert_user_input(user_input.interface_ipv4, user_input.subnet_mask) + user_ip_address = ipaddress.ip_address(user_input.interface_ipv4) + + # defines the last octet of the ip address of the first Zivid camera. Eg.: x.x.x.2 + next_ip_address_last_octet = 2 + new_ip_address = ipaddress.IPv4Address(user_ip_address.packed[:-1] + bytes([next_ip_address_last_octet])) + + app = zivid.Application() + cameras = app.cameras() + + if len(cameras) == 0: + raise RuntimeError("No cameras connected") + + for camera in cameras: + + if new_ip_address == user_ip_address: + new_ip_address = new_ip_address + 1 + + new_config = zivid.NetworkConfiguration( + ipv4=zivid.NetworkConfiguration.IPV4( + mode=zivid.NetworkConfiguration.IPV4.Mode.manual, + address=str(new_ip_address), + subnet_mask=user_input.subnet_mask, + ) + ) + + new_ip_address = new_ip_address + 1 + + print(f"Applying network configuration to camera {camera.info.serial_number}") + camera.apply_network_configuration(new_config) + print(f"New {camera.network_configuration}\n") + + except RuntimeError as ex: + print(ex) + + +if __name__ == "__main__": + _main() diff --git a/source/camera/info_util_other/firmware_updater.py b/source/camera/info_util_other/firmware_updater.py index d5204e94..838ecd5f 100644 --- a/source/camera/info_util_other/firmware_updater.py +++ b/source/camera/info_util_other/firmware_updater.py @@ -23,7 +23,7 @@ def _main() -> None: zivid.firmware.update( camera, progress_callback=lambda progress, description: print( - f'{progress}% : {description}{("","...") [progress < 100]}' + f'{progress}% : {description}{("","...")[progress < 100]}' ), ) else: diff --git a/source/camera/info_util_other/network_configuration.py b/source/camera/info_util_other/network_configuration.py new file mode 100644 index 00000000..edec9b7f --- /dev/null +++ b/source/camera/info_util_other/network_configuration.py @@ -0,0 +1,75 @@ +""" +Uses Zivid API to change the IP address of the Zivid camera. +""" + +import zivid + + +def _confirm(message: str) -> bool: + while True: + answer = input(f"{message} [Y/n]: ") + if answer.lower() == "y" or answer.lower() == "yes": + return True + if answer.lower() == "n" or answer.lower() == "no": + return False + print("Invalid input. Please enter 'Y' or 'n'.") + + +def _main() -> None: + app = zivid.Application() + cameras = app.cameras() + + if len(cameras) == 0: + raise RuntimeError("Failed to connect to camera. No cameras found.") + + camera = cameras[0] + + original_config = camera.network_configuration + + print(f"Current network configuration of camera {camera.info.serial_number}:") + print(f"{original_config}\n") + + mode = zivid.NetworkConfiguration.IPV4.Mode.manual + address = original_config.ipv4.address + subnet_mask = original_config.ipv4.subnet_mask + + if _confirm("Do you want to use DHCP?"): + mode = zivid.NetworkConfiguration.IPV4.Mode.dhcp + else: + + input_address = input(f"Enter IPv4 Address [{original_config.ipv4.address}]: ") + if input_address: + address = input_address + else: + address = original_config.ipv4.address + + input_subnet_mask = input(f"Enter new Subnet mask [{original_config.ipv4.subnet_mask}]: ") + if input_subnet_mask: + subnet_mask = input_subnet_mask + else: + subnet_mask = original_config.ipv4.subnet_mask + + new_config = zivid.NetworkConfiguration( + ipv4=zivid.NetworkConfiguration.IPV4( + mode=mode, + address=address, + subnet_mask=subnet_mask, + ) + ) + + print("\nNew network configuration:") + print(new_config) + if not _confirm(f"Do you want to apply the new network configuration to camera {camera.info.serial_number}?"): + return + + print("Applying network configuration...") + camera.apply_network_configuration(new_config) + + print(f"Updated network configuration of camera {camera.info.serial_number}:") + print(f"{camera.network_configuration}\n") + + print(f"Camera status is '{camera.state.status}'") + + +if __name__ == "__main__": + _main() diff --git a/source/camera/maintenance/correct_camera_in_field.py b/source/camera/maintenance/correct_camera_in_field.py index 9d9520a0..b13e2b38 100644 --- a/source/camera/maintenance/correct_camera_in_field.py +++ b/source/camera/maintenance/correct_camera_in_field.py @@ -56,7 +56,7 @@ def _collect_dataset(camera: zivid.Camera) -> List[zivid.experimental.calibratio print(print_line) if _yes_no_prompt("Capture (y) or finish (n)? "): print("Capturing calibration board") - detection_result = zivid.experimental.calibration.detect_feature_points(camera) + detection_result = zivid.calibration.detect_calibration_board(camera) infield_input = zivid.experimental.calibration.InfieldCorrectionInput(detection_result) if infield_input.valid(): @@ -93,7 +93,7 @@ def _main() -> None: print( "If written to the camera, this correction can be expected to yield a dimension accuracy error of", - f"{accuracy_estimate.dimension_accuracy()*100:.3f}% or better in the range of z=[{accuracy_estimate.z_min():.3f}, {accuracy_estimate.z_max():.3f}] across the full FOV.", + f"{accuracy_estimate.dimension_accuracy() * 100:.3f}% or better in the range of z=[{accuracy_estimate.z_min():.3f}, {accuracy_estimate.z_max():.3f}] across the full FOV.", "Accuracy close to where the correction data was collected is likely better.", ) diff --git a/source/camera/maintenance/verify_camera_in_field.py b/source/camera/maintenance/verify_camera_in_field.py index 7d2fc3e3..cfd05514 100644 --- a/source/camera/maintenance/verify_camera_in_field.py +++ b/source/camera/maintenance/verify_camera_in_field.py @@ -29,7 +29,7 @@ def _main() -> None: # Gather data print("Capturing calibration board") - detection_result = zivid.experimental.calibration.detect_feature_points(camera) + detection_result = zivid.calibration.detect_calibration_board(camera) # Prepare data and check that it is appropriate for infield verification infield_input = zivid.experimental.calibration.InfieldCorrectionInput(detection_result) @@ -42,7 +42,7 @@ def _main() -> None: print(f"Successful measurement at {detection_result.centroid()}") camera_verification = zivid.experimental.calibration.verify_camera(infield_input) print( - f"Estimated dimension trueness error at measured position: {camera_verification.local_dimension_trueness()*100:.3f}%" + f"Estimated dimension trueness error at measured position: {camera_verification.local_dimension_trueness() * 100:.3f}%" ) diff --git a/source/camera/maintenance/verify_camera_in_field_from_zdf.py b/source/camera/maintenance/verify_camera_in_field_from_zdf.py index 786697e9..4bcbedf8 100644 --- a/source/camera/maintenance/verify_camera_in_field_from_zdf.py +++ b/source/camera/maintenance/verify_camera_in_field_from_zdf.py @@ -33,7 +33,7 @@ def _main() -> None: # offline infield verification print("Capturing calibration board") - with zivid.experimental.calibration.capture_calibration_board(camera) as frame: + with zivid.calibration.capture_calibration_board(camera) as frame: data_file = "FrameWithCalibrationBoard.zdf" print(f"Saving frame to file: {data_file}, for later use in offline infield verification") frame.save(data_file) @@ -44,7 +44,7 @@ def _main() -> None: print(f"Reading frame from file: {data_file}, for offline infield verification") with zivid.Frame(data_file) as frame: print("Detecting calibration board") - detection_result = zivid.experimental.calibration.detect_feature_points(frame) + detection_result = zivid.calibration.detect_calibration_board(frame) infield_input = zivid.experimental.calibration.InfieldCorrectionInput(detection_result) if not infield_input.valid(): @@ -55,7 +55,7 @@ def _main() -> None: print(f"Successful measurement at {detection_result.centroid()}") camera_verification = zivid.experimental.calibration.verify_camera(infield_input) print( - f"Estimated dimension trueness error at measured position: {camera_verification.local_dimension_trueness()*100:.3f}%" + f"Estimated dimension trueness error at measured position: {camera_verification.local_dimension_trueness() * 100:.3f}%" ) diff --git a/source/sample_utils/save_load_matrix.py b/source/sample_utils/save_load_matrix.py index e86b3f20..b73a5c41 100644 --- a/source/sample_utils/save_load_matrix.py +++ b/source/sample_utils/save_load_matrix.py @@ -1,3 +1,8 @@ +""" +Save and load Zivid 4x4 transformation matrices from and to YAML files. + +""" + from pathlib import Path from typing import Union