diff --git a/.pylintrc b/.pylintrc index c7ec6e43..ac716666 100644 --- a/.pylintrc +++ b/.pylintrc @@ -3,7 +3,9 @@ disable=bad-continuation, ######## Disabled because of conflicts with bl consider-using-enumerate, ######## Temporary disabled ######## import-error, missing-docstring, - duplicate-code + no-member, + duplicate-code, + [TYPE CHECK] generated-members=cv2.* diff --git a/README.md b/README.md index 0436f402..a3aa87e5 100644 --- a/README.md +++ b/README.md @@ -60,6 +60,7 @@ from the camera can be used. - [camera\_user\_data](https://github.com/zivid/zivid-python-samples/tree/master//source/camera/info_util_other/camera_user_data.py) - Store user data on the Zivid camera. - [capture\_with\_diagnostics](https://github.com/zivid/zivid-python-samples/tree/master//source/camera/info_util_other/capture_with_diagnostics.py) - Capture point clouds, with color, from the Zivid camera, with settings from YML file and diagnostics enabled. + - [firmware\_updater](https://github.com/zivid/zivid-python-samples/tree/master//source/camera/info_util_other/firmware_updater.py) - Update firmware on the Zivid camera. - [get\_camera\_intrinsics](https://github.com/zivid/zivid-python-samples/tree/master//source/camera/info_util_other/get_camera_intrinsics.py) - Read intrinsic parameters from the Zivid camera (OpenCV model). - [print\_version\_info](https://github.com/zivid/zivid-python-samples/tree/master//source/camera/info_util_other/print_version_info.py) - Print version information for Python, zivid-python and @@ -97,9 +98,13 @@ from the camera can be used. - [hand\_eye\_calibration](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/hand_eye_calibration/hand_eye_calibration.py) - Perform Hand-Eye calibration. - [mask\_point\_cloud](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/mask_point_cloud.py) - Read point cloud data from a ZDF file, apply a binary mask, and visualize it. + - [roi\_box\_via\_checkerboard](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/roi_box_via_checkerboard.py) - Filter the point cloud based on a ROI box given relative + to the Zivid Calibration Board. - **hand\_eye\_calibration** - [pose\_conversions](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/hand_eye_calibration/pose_conversions.py) - Convert to/from Transformation Matrix (Rotation Matrix + Translation Vector). + - [robodk\_hand\_eye\_calibration](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robodk_hand_eye_calibration.py) - Generate a dataset and perform hand-eye calibration + using the Robodk interface. - [utilize\_hand\_eye\_calibration](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/hand_eye_calibration/utilize_hand_eye_calibration.py) - Transform single data point or entire point cloud from camera frame to robot base frame using Hand-Eye calibration @@ -111,43 +116,44 @@ from the camera can be used. - **sample\_utils** - [display](https://github.com/zivid/zivid-python-samples/tree/master//source/sample_utils/display.py) - Display relevant data for Zivid Samples. - [paths](https://github.com/zivid/zivid-python-samples/tree/master//source/sample_utils/paths.py) - Get relevant paths for Zivid Samples. + - **applications** + - **advanced** + - **hand\_eye\_calibration** + - **robodk\_hand\_eye\_calibration** + - [robot\_tools](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robot_tools.py) - Robot Control Module ## Installation 1. [Install Zivid - Software](https://support.zivid.com/latest//getting-started/software-installation.html) + Software](https://support.zivid.com/latest//getting-started/software-installation.html). -2. [Install Zivid Python](https://github.com/zivid/zivid-python). Note: - The recommended Python version for these samples is 3.8. +2. [Install Zivid + Python](https://github.com/zivid/zivid-python#installation). 3. [Download Zivid Sample - Data](https://support.zivid.com/latest//api-reference/samples/sample-data.html) - -4. \[Optional\] Launch the Python IDE of your choice. Read our - instructions on [setting up - Python](https://support.zivid.com/latest//api-reference/samples/python/setting-up-python.html). + Data](https://support.zivid.com/latest//api-reference/samples/sample-data.html). -5. Install the runtime requirements using IDE or command line: +4. Install the runtime requirements using IDE or command line: ``` sourceCode pip install -r requirements.txt ``` -6. Add the directory source to PYTHONPATH. Navigate to the root of the +5. Add the directory source to PYTHONPATH. Navigate to the root of the repository and run: > - PowerShell: `$env:PYTHONPATH=$env:PYTHONPATH + ";$PWD\source"` > - cmd: `set PYTHONPATH="$PYTHONPATH;$PWD\source"` > - bash: `export PYTHONPATH="$PYTHONPATH:$PWD/source"` -7. Open and run one of the samples. +6. Open and run one of the samples. ## Support For more information about the Zivid cameras, please visit our [Knowledge Base](https://support.zivid.com/latest). If you run into any issues please check out -[Troubleshooting](https://support.zivid.com/latest/rst/support/troubleshooting.html). +[Troubleshooting](https://support.zivid.com/latest/support/troubleshooting.html). ## License diff --git a/source/.gitattributes b/source/.gitattributes new file mode 100644 index 00000000..8be188e1 --- /dev/null +++ b/source/.gitattributes @@ -0,0 +1 @@ +*.rdk filter=lfs diff=lfs merge=lfs -text diff --git a/source/applications/advanced/color_balance.py b/source/applications/advanced/color_balance.py index 0d26a0dc..7833142e 100644 --- a/source/applications/advanced/color_balance.py +++ b/source/applications/advanced/color_balance.py @@ -5,9 +5,9 @@ import datetime from dataclasses import dataclass -import matplotlib.pyplot as plt import numpy as np import zivid +from sample_utils.display import display_rgb @dataclass @@ -27,22 +27,6 @@ class MeanColor: blue: np.float64 -def _display_rgb(rgb, title): - """Display RGB image. - - Args: - rgb: RGB image (HxWx3 darray) - title: Image title - - Returns None - - """ - plt.figure() - plt.imshow(rgb) - plt.title(title) - plt.show(block=False) - - def _compute_mean_rgb(rgb, pixels): """Compute mean RGB values. @@ -221,7 +205,7 @@ def _main(): settings_2d = _auto_settings_configuration(camera) rgba = camera.capture(settings_2d).image_rgba().copy_data() - _display_rgb(rgba[:, :, 0:3], "RGB image before color balance") + display_rgb(rgba[:, :, 0:3], title="RGB image before color balance", block=False) [red_balance, green_balance, blue_balance] = _color_balance_calibration(camera, settings_2d) @@ -231,8 +215,7 @@ def _main(): settings_2d.processing.color.balance.blue = blue_balance rgba_balanced = camera.capture(settings_2d).image_rgba().copy_data() - _display_rgb(rgba_balanced[:, :, 0:3], "RGB image after color balance") - input("Press Enter to close...") + display_rgb(rgba_balanced[:, :, 0:3], title="RGB image after color balance", block=True) if __name__ == "__main__": diff --git a/source/applications/advanced/downsample.py b/source/applications/advanced/downsample.py index a94ab2b8..3a32f77f 100644 --- a/source/applications/advanced/downsample.py +++ b/source/applications/advanced/downsample.py @@ -45,8 +45,6 @@ def _main(): display_pointcloud(xyz_donwsampled, rgba_downsampled[:, :, 0:3]) - input("Press Enter to close...") - if __name__ == "__main__": # If running the script from Spyder IDE, first run '%gui qt' diff --git a/source/applications/advanced/get_checkerboard_pose_from_zdf.py b/source/applications/advanced/get_checkerboard_pose_from_zdf.py index cf77803c..37e62b07 100644 --- a/source/applications/advanced/get_checkerboard_pose_from_zdf.py +++ b/source/applications/advanced/get_checkerboard_pose_from_zdf.py @@ -65,7 +65,7 @@ def _visualize_checkerboard_point_cloud_with_coordinate_system(point_cloud_open3 coord_system_mesh = o3d.geometry.TriangleMesh.create_coordinate_frame(size=30) coord_system_mesh.transform(transform) - visualizer = o3d.visualization.Visualizer() # pylint: disable=no-member + visualizer = o3d.visualization.Visualizer() visualizer.create_window() visualizer.add_geometry(point_cloud_open3d) visualizer.add_geometry(coord_system_mesh) @@ -83,9 +83,7 @@ def _main(): point_cloud = frame.point_cloud() print("Detecting checkerboard and estimating its pose in camera frame") - transform_camera_to_checkerboard = ( - zivid.calibration.detect_feature_points(point_cloud).pose().to_matrix() - ) # pylint: disable=no-member + transform_camera_to_checkerboard = zivid.calibration.detect_feature_points(point_cloud).pose().to_matrix() print(f"Camera pose in checkerboard frame:\n{transform_camera_to_checkerboard}") transform_file = "CameraToCheckerboardTransform.yaml" diff --git a/source/applications/advanced/hand_eye_calibration/README.md b/source/applications/advanced/hand_eye_calibration/README.md index ef30cbb4..1842df22 100644 --- a/source/applications/advanced/hand_eye_calibration/README.md +++ b/source/applications/advanced/hand_eye_calibration/README.md @@ -3,9 +3,9 @@ To fully understand Hand-Eye Calibration, please see the [tutorial][Tutorial-url] in our Knowledge Base. ----------------- -The following applications creates a **Transformation Matrix** from data provided by a user +The following applications create a **Transformation Matrix** from data provided by a user -[HandEyeCalibration][HandEyeCalibration-url]: +[**HandEyeCalibration**][HandEyeCalibration-url] * Application which walks through the collection of calibration poses 1. Provide robot pose to application (manual entry) @@ -14,9 +14,26 @@ The following applications creates a **Transformation Matrix** from data provide 4. Repeat i.-iii. until 10-20 pose pairs are collected 5. Enter command to perform calibration and return a **Transformation Matrix** -[ZividHandEyeCalibration][ZividHandEyeCalibration-url] +[**ZividHandEyeCalibration**][ZividHandEyeCalibration-url] -* [CLI application][CLI application-url] which takes a collection of pose pairs (e.g. output of steps i.-iii. in [HandEyeCalibration][HandEyeCalibration-url]) and returns a **Transformation Matrix**. This application comes with the Windows installer and is part of the tools deb for Ubuntu. +* [CLI application][CLI application-url] which takes a collection of pose pairs (e.g. output of steps 1-3 in [HandEyeCalibration][HandEyeCalibration-url]) and returns a **Transformation Matrix**. This application comes with the Windows installer and is part of the tools deb for Ubuntu. + +----------------- + +There are two samples that show how to perform acquisition of the hand-eye calibration dataset in this repository. +Both samples go through the process of acquiring the pose and point cloud pairs and then process them to return the resulting hand-eye **Transform Matrix**. + +[**UniversalRobotsPerformHandEyeCalibration**][URhandeyecalibration-url] + +* This sample is created to work specifically with the UR5e robot. +* To follow the tutorial for this sample go to [**UR5e + Python Hand Eye Tutorial**][URHandEyeTutorial-url]. + +[**RoboDKHandEyeCalibration**][RobodkHandEyeCalibration-url] + +The second sample uses RoboDK for robot control and can be used with any robot that the software supports. +The list for the robots that they support can be found [**here**][robodk-robot-library-url]. +Poses must be added by the user to their personal rdk file. +To find best pose practice follow the instructions provided on the Zivid knowledge base for the [hand-eye calibration process][ZividHandEyeCalibration-url]. ----------------- The following applications assume that a **Transformation Matrix** has been found @@ -31,22 +48,22 @@ The following applications assume that a **Transformation Matrix** has been foun [**PoseConversions**][PoseConversions-url]: -* Zivid primarily operate with a (4x4) Transformation Matrix (Rotation Matrix + Translation Vector). This example shows how to use Eigen to convert to and from: +* Zivid primarily operate with a (4x4) **Transformation Matrix** (Rotation Matrix + Translation Vector). This example shows how to use Eigen to convert to and from: * AxisAngle, Rotation Vector, Roll-Pitch-Yaw, Quaternion [**VerifyHandEyeWithVisualization**][VerifyHandEyeWithVisualization-url]: Visually demonstrates the hand-eye calibration accuracy by overlapping transformed points clouds. + * The application asks the user for the hand-eye calibration type (manual entry). -* After loading the hand-eye dataset (point clouds and robot poses) and the hand-eye output (transformation matrix), the application repeats the following process for all data pairs: +* After loading the hand-eye dataset (point clouds and robot poses) and the hand-eye output (**transformation matrix**), the application repeats the following process for all data pairs: 1. Transforms the point cloud 2. Finds cartesian coordinates of the checkerboard centroid 3. Creates a region of interest around the checkerboard and filters out points outside the region of interest 4. Saves the point cloud to a PLY file 5. Appends the point cloud to a list (overlapped point clouds) -This application ends by displaying all point clouds from the list. - +This application ends by displaying all point clouds from the list. [HandEyeCalibration-url]: hand_eye_calibration.py [UtilizeHandEyeCalibration-url]: utilize_hand_eye_calibration.py @@ -54,4 +71,8 @@ This application ends by displaying all point clouds from the list. [ZividHandEyeCalibration-url]: https://support.zivid.com/latest/academy/applications/hand-eye/hand-eye-calibration-process.html [Tutorial-url]: https://support.zivid.com/latest/academy/applications/hand-eye.html [PoseConversions-url]: pose_conversions.py -[CLI application-url]: https://support.zivid.com/latest/academy/applications/hand-eye/zivid_CLI_tool_for_hand_eye_calibration.html \ No newline at end of file +[CLI application-url]: https://support.zivid.com/latest/academy/applications/hand-eye/zivid_CLI_tool_for_hand_eye_calibration.html +[URhandeyecalibration-url]: ur_hand_eye_calibration/universal_robots_perform_hand_eye_calibration.py +[URHandEyeTutorial-url]: https://support.zivid.com/en/latest/academy/applications/hand-eye/ur5-robot-+-python-generate-dataset-and-perform-hand-eye-calibration.html +[RobodkHandEyeCalibration-url]: robodk_hand_eye_calibration/robodk_hand_eye_calibration.py +[robodk-robot-library-url]: https://robodk.com/supported-robots \ No newline at end of file diff --git a/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/README.md b/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/README.md new file mode 100644 index 00000000..bd236942 --- /dev/null +++ b/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/README.md @@ -0,0 +1,27 @@ +# Hand Eye Calibration with RoboDK + +Hand-eye calibration is a necessity for any picking scenario that involves a camera and a robot. +This sample offers an easy and adaptable method to perform hand eye with a variety of robots that are supported in RoboDK. + +For more on Hand-Eye Calibration, please see the [tutorial](https://support.zivid.com/latest/academy/applications/hand-eye.html) in our Knowledge Base. + +If you need help with the sample, visit our Knowledge Base article [here](help.zivid.com) (To be updated) + +This sample is made and modeled with a Universal Robots UR5e robot. +It is a requirement that you make your own poses that suit your environment. +If you have a different robot from a UR5e you will need to load in the corresponding robot to your rdk file. + +## Installation + +1. [Install Zivid Software](https://support.zivid.com/latest//getting-started/software-installation.html) + +2. [Install Zivid Python](https://github.com/zivid/zivid-python). +Note: The recommended Python version for these samples is 3.8. + +3. [Install RoboDK](https://robodk.com/download) + +4. [Install RoboDK python](https://pypi.org/project/robodk/) + +Other requirements can be installed using the following command: + + pip install -r requirements.txt diff --git a/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/handeye_poses.rdk b/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/handeye_poses.rdk new file mode 100644 index 00000000..5fe15466 --- /dev/null +++ b/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/handeye_poses.rdk @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0486fba0e401503c3bdb4214b63d6f0e3c8d1ce0bcc276532f7a75bff18b73e7 +size 4571581 diff --git a/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/requirements.txt b/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/requirements.txt new file mode 100644 index 00000000..017ad075 --- /dev/null +++ b/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/requirements.txt @@ -0,0 +1,2 @@ +robodk +typing \ No newline at end of file diff --git a/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robodk_hand_eye_calibration.py b/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robodk_hand_eye_calibration.py new file mode 100644 index 00000000..78d02f69 --- /dev/null +++ b/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robodk_hand_eye_calibration.py @@ -0,0 +1,335 @@ +""" +Generate a dataset and perform hand-eye calibration using the Robodk interface. + +You must modify each robot pose to your scene using the RoboDK GUI interface. + +More information about RoboDK: +https://robodk.com/doc/en/Getting-Started.html + +The sample comes with a .rdk sample environment file using a Universal Robots UR5e robot. + +To use the sample with your robot an rdk file needs to be created using a model of your robot. +Each pose will need to be created or modified to fit your scene using the RoboDK GUI. +For finding the best poses for hand-eye check out: +https://support.zivid.com/latest/academy/applications/hand-eye/hand-eye-calibration-process.html +Make sure to launch your RDK file and connect to robot through Robodk before running this script. +""" + +import argparse +import datetime +import time +from pathlib import Path +from typing import List, Tuple + +import cv2 +import numpy as np +import zivid +from robolink import Item +from robot_tools import connect_to_robot, get_robot_targets, set_robot_speed_and_acceleration +from zivid.capture_assistant import SuggestSettingsParameters + + +def _generate_directory() -> Path: + """Generate directory where dataset will be stored + + Returns: + Directory to where data will be saved + + """ + name = input("Input desired name of dataset directory: (enter for default: /datasets/handeye) ") + directory_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") if name == "" else name + directory_path = Path(__file__).absolute().parent / "datasets/handeye" / directory_name + print(f"Directory generated at {directory_path}") + if not directory_path.is_dir(): + directory_path.mkdir(parents=True) + + return directory_path + + +def _get_frame_and_transform(robot: Item, camera: zivid.Camera) -> Tuple[zivid.Frame, np.ndarray]: + """Capture image with Zivid camera and read robot pose + + Args: + robot: robot item in open RoboDK rdk file + camera: Zivid camera + + Returns: + Zivid frame + 4x4 transformation matrix + """ + frame = assisted_capture(camera) + transform = np.array(robot.Pose()).T + return frame, transform + + +def _save_point_cloud_and_pose( + save_directory: Path, + image_and_pose_iterator: int, + frame: zivid.Frame, + transform: np.ndarray, +) -> None: + """Save data to directory. + + Args: + save_directory: Path to where data will be saved + image_and_pose_iterator: Image number + frame: Point cloud stored as .zdf + transform: 4x4 transformation matrix + + """ + frame.save(save_directory / f"img{image_and_pose_iterator:02d}.zdf") + + file_storage = cv2.FileStorage( + str(save_directory / f"pos{image_and_pose_iterator:02d}.yaml"), + cv2.FILE_STORAGE_WRITE, + ) + file_storage.write("PoseState", transform) + file_storage.release() + + +def _verify_good_capture(frame: zivid.Frame) -> None: + """Verify that checkerboard feature points are detected in the frame. + + Args: + frame: Zivid frame containing point cloud + + Raises: + RuntimeError: If no feature points are detected in frame + + """ + detected_features = zivid.calibration.detect_feature_points(frame.point_cloud()) + if not detected_features.valid(): + raise RuntimeError("Failed to detect feature points from captured frame.") + + +def _capture_one_frame_and_robot_pose( + robot: Item, + camera: zivid.Camera, + save_directory: Path, + image_and_pose_iterator: int, + next_target: Item, +) -> None: + """Captures and saves point cloud at a given robot pose, saves robot pose, + then signals the robot to move to the next pose. + + Args: + robot: robot item in open RoboDK rdk file + camera: Zivid camera + save_directory: Path to where data will be saved + image_and_pose_iterator: Counter for point cloud and pose acquisition sequence + next_target: next pose the robot should move to in sequence + + """ + robot.WaitMove() + # Make sure the robot is completely stopped and no miniscule movement is occurring + time.sleep(0.2) + frame, transform = _get_frame_and_transform(robot, camera) + _save_point_cloud_and_pose(save_directory, image_and_pose_iterator, frame, transform) + # Verifying capture from previous pose while moving to new pose + if next_target is not None: + robot.MoveJ(next_target, blocking=False) + _verify_good_capture(frame) + print(f"Image and pose #{image_and_pose_iterator} saved") + + +def _read_transform(transform_file: Path) -> zivid.calibration.Pose: + """Read transformation matrix from a YAML file. + + Args: + transform_file: Path to the YAML file + + Returns: + transform: Transformation matrix + + """ + file_storage = cv2.FileStorage(str(transform_file), cv2.FILE_STORAGE_READ) + transform = file_storage.getNode("PoseState").mat() + file_storage.release() + return transform + + +def save_hand_eye_results(save_directory: Path, transform: np.ndarray, residuals: list) -> None: + """Save transformation and residuals to directory. + + Args: + save_directory: Path to where data will be saved + transform: 4x4 transformation matrix + residuals: List of residuals + + """ + file_storage_transform = cv2.FileStorage(str(save_directory / "hand_eye_transform.yaml"), cv2.FILE_STORAGE_WRITE) + file_storage_transform.write("PoseState", transform) + file_storage_transform.release() + + file_storage_residuals = cv2.FileStorage(str(save_directory / "residuals.yaml"), cv2.FILE_STORAGE_WRITE) + residual_list = [] + for res in residuals: + tmp = list([res.rotation(), res.translation()]) + residual_list.append(tmp) + + file_storage_residuals.write( + "Per pose residuals for rotation in deg and translation in mm", + np.array(residual_list), + ) + file_storage_residuals.release() + + +def generate_hand_eye_dataset(app: zivid.application, robot: Item, targets: List) -> Path: + """Generate dataset of pairs of robot poses and point clouds containing calibration target. + This dataset is composed of pairs of yml and zdf files respectively. + + Args: + app: Zivid application instance + robot: robot item in open RoboDK rdk file + targets: list of roboDK targets (poses) + + Returns: + Path: save_directory for where data will be saved + + """ + num_targets = len(targets) + robot.MoveJ(targets.pop(0)) + with app.connect_camera() as camera: + save_directory = _generate_directory() + image_and_pose_iterator = 1 + while not image_and_pose_iterator > num_targets: + print(f"Capturing calibration object at robot pose {num_targets-len(targets)}") + _capture_one_frame_and_robot_pose( + robot, + camera, + save_directory, + image_and_pose_iterator, + targets.pop(0) if targets else None, + ) + image_and_pose_iterator += 1 + + print(f"\n Data saved to: {save_directory}") + + return save_directory + + +def assisted_capture(camera: zivid.Camera, max_time_milliseconds=800) -> zivid.frame: + """Capturing image with Zivid camera using assisted capture settings. + + Args: + camera: Zivid camera + max_time_milliseconds: Maximum capture time allowed in milliseconds + + Returns: + Zivid frame + + """ + suggest_settings_parameters = SuggestSettingsParameters( + max_capture_time=datetime.timedelta(milliseconds=max_time_milliseconds), + ambient_light_frequency=SuggestSettingsParameters.AmbientLightFrequency.none, + ) + settings = zivid.capture_assistant.suggest_settings(camera, suggest_settings_parameters) + return camera.capture(settings) + + +def perform_hand_eye_calibration(calibration_type: str, dataset_directory: Path) -> Tuple[List, List]: + """Perform hand-eye calibration based on calibration type. + + Args: + calibration_type: Calibration type, eye-in-hand or eye-to-hand + dataset_directory: Path to dataset + + Returns: + Tuple: - 4x4 transformation matrix + - List of residuals + + """ + with zivid.Application(): + hand_eye_input = [] + pose_and_image_iterator = 1 + + while True: + frame_file = dataset_directory / f"img{pose_and_image_iterator:02d}.zdf" + pose_file = dataset_directory / f"pos{pose_and_image_iterator:02d}.yaml" + + if frame_file.is_file() and pose_file.is_file(): + print(f"Detect feature points from img{pose_and_image_iterator:02d}.zdf") + point_cloud = zivid.Frame(frame_file).point_cloud() + detected_features = zivid.calibration.detect_feature_points(point_cloud) + print(f"Read robot pose from pos{pose_and_image_iterator:02d}.yaml") + transform = _read_transform(pose_file) + + detection_result = zivid.calibration.HandEyeInput(zivid.calibration.Pose(transform), detected_features) + hand_eye_input.append(detection_result) + else: + break + + pose_and_image_iterator += 1 + + print(f"\nPerforming {calibration_type} calibration") + + if calibration_type == "eye-in-hand": + calibration_result = zivid.calibration.calibrate_eye_in_hand(hand_eye_input) + elif calibration_type == "eye-to-hand": + calibration_result = zivid.calibration.calibrate_eye_to_hand(hand_eye_input) + else: + raise ValueError(f"Invalid calibration type: {calibration_type}") + + transform = calibration_result.transform() + residuals = calibration_result.residuals() + + print("\n\nTransform: \n") + np.set_printoptions(precision=5, suppress=True) + print(transform) + + print("\n\nResiduals: \n") + for residual in residuals: + print(f"Rotation: {residual.rotation():.6f} Translation: {residual.translation():.6f}") + + return transform, residuals + + +def options() -> argparse.Namespace: + """Function for taking in arguments from user. + + Returns: + eih or eth: eye-in-hand or eye-to-hand + ip: IP address of the robot controller + target_keyword: the common name of the targets (poses) in RoboDK station that will be used for the hand-eye dataset + + """ + parser = argparse.ArgumentParser(description=__doc__) + type_group = parser.add_mutually_exclusive_group(required=True) + + type_group.add_argument("--eih", "--eye-in-hand", action="store_true", help="eye-in-hand calibration") + type_group.add_argument("--eth", "--eye-to-hand", action="store_true", help="eye-to-hand calibration") + + parser.add_argument("--ip", required=True, help="IP address of the robot controller") + parser.add_argument( + "--target-keyword", + required=True, + help="This is the keyword shared for naming all poses used for hand-eye dataset, i.e. pose in 'pose 1, pose 2, pose 3", + ) + + return parser.parse_args() + + +def _main(): + + app = zivid.Application() + + user_options = options() + + rdk, robot = connect_to_robot(user_options.ip) + targets = get_robot_targets(rdk, user_options.target_keyword) + # NOTE! Verify safe operation speeds and accelerations for your robot + robot_speed_accel_limits = [100, 100, 50, 50] + set_robot_speed_and_acceleration(robot, *robot_speed_accel_limits) + + dataset_dir = generate_hand_eye_dataset(app, robot, targets) + + if user_options.eih: + transform, residuals = perform_hand_eye_calibration("eye-in-hand", dataset_dir) + else: + transform, residuals = perform_hand_eye_calibration("eye-to-hand", dataset_dir) + + save_hand_eye_results(dataset_dir, transform, residuals) + + +if __name__ == "__main__": + _main() diff --git a/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robot_tools.py b/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robot_tools.py new file mode 100644 index 00000000..b9858f8d --- /dev/null +++ b/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/robot_tools.py @@ -0,0 +1,75 @@ +""" +Robot Control Module +Module interfaces with the python API for RoboDK and the RoboDK software. +It can be used to connect to the specified robot, get a list of targets and set robot speeds. +""" +from typing import Any, List, Tuple + +from robolink import ITEM_TYPE_ROBOT, RUNMODE_RUN_ROBOT, Item, Robolink + + +def connect_to_robot(robot_ip: str) -> Tuple[Any, Any]: + """Sets up the connection to the robot and sets the mode to run, + such that the robot will accept movement commands + + Args: + robot_ip : unique IP address associated with robot being used + + Returns: + rdk : Robolink - the link between RoboDK and Python + robot : robot item in open RoboDK rdk file + + """ + rdk = Robolink() + robot = rdk.ItemUserPick("", ITEM_TYPE_ROBOT) + robot.ConnectSafe( + robot_ip=robot_ip, + max_attempts=5, + wait_connection=4, + callback_abort=None, + ) + rdk.setRunMode(RUNMODE_RUN_ROBOT) + print("Connection with the robot established") + return rdk, robot + + +def set_robot_speed_and_acceleration( + robot: Item, speed: int, joint_speed: int, acceleration: int, joint_acceleration: int +) -> None: + """Sets the speed and acceleration of the Robot + + Args: + robot: robot item in open RoboDK rdk file + speed: Set max linear speed for robot (mm/s) + joint_speed: set max joint speed of robot (deg/s) + acceleration: total linear acceleration of robot (mm/s²) + joint_acceleration: set max joint acceleration allowed for robot (deg/s²) + + """ + robot.setSpeed(speed) + robot.setSpeedJoints(joint_speed) + robot.setAcceleration(acceleration) + robot.setAccelerationJoints(joint_acceleration) + + +def get_robot_targets(rdk: Robolink, target_keyword: str) -> List: + """Extract a set of targets (poses) from roboDK using a keyword. + If targets in the RoboDK station are named, e.g., 'target 1', 'pose 1', 'target 2', 'target 3', 'pose 2', + and the keyword used is 'target', then only 'target 1', 'target 2', and 'target 3' are added to the + list from the .rdk station file. + + Args: + rdk: Robolink - the link between RoboDK and Python + target_keyword: the common name of the targets (poses) in RoboDK station that will be used for the hand-eye dataset + + Returns: + List: Array of target items + + """ + list_items = rdk.ItemList() + targets = [] + + for item in list_items: + if target_keyword in item.Name(): + targets.append(item) + return targets diff --git a/source/applications/advanced/hand_eye_calibration/verify_hand_eye_with_visualization.py b/source/applications/advanced/hand_eye_calibration/verify_hand_eye_with_visualization.py index 1dd40146..00849b8d 100644 --- a/source/applications/advanced/hand_eye_calibration/verify_hand_eye_with_visualization.py +++ b/source/applications/advanced/hand_eye_calibration/verify_hand_eye_with_visualization.py @@ -216,7 +216,7 @@ def _main(): if data_pair_id > 1: print("Visualizing transformed point clouds\n") - o3d.visualization.draw_geometries(list_of_open_3d_point_clouds) # pylint: disable=no-member + o3d.visualization.draw_geometries(list_of_open_3d_point_clouds) else: raise Exception("Not enought data!") diff --git a/source/applications/advanced/mask_point_cloud.py b/source/applications/advanced/mask_point_cloud.py index 204304c2..f27f90ec 100644 --- a/source/applications/advanced/mask_point_cloud.py +++ b/source/applications/advanced/mask_point_cloud.py @@ -35,11 +35,10 @@ def _main(): w_max = int((width + pixels_to_display) / 2) mask[h_min:h_max, w_min:w_max] = 1 - display_rgb(rgba[:, :, 0:3], "RGB image") + display_rgb(rgba[:, :, 0:3], title="RGB image") display_depthmap(xyz) display_pointcloud(xyz, rgba[:, :, 0:3]) - input("Press Enter to continue...") print("Masking point cloud") xyz_masked = xyz.copy() @@ -47,7 +46,6 @@ def _main(): display_depthmap(xyz_masked) display_pointcloud(xyz_masked, rgba[:, :, 0:3]) - input("Press Enter to close...") if __name__ == "__main__": diff --git a/source/applications/advanced/roi_box_via_checkerboard.py b/source/applications/advanced/roi_box_via_checkerboard.py new file mode 100644 index 00000000..7a6b2232 --- /dev/null +++ b/source/applications/advanced/roi_box_via_checkerboard.py @@ -0,0 +1,130 @@ +""" +Filter the point cloud based on a ROI box given relative to the Zivid Calibration Board. + +The ZDF file for this sample can be found under the main instructions for Zivid samples. +""" + +from pathlib import Path +from typing import Tuple + +import numpy as np +import zivid +from sample_utils.display import display_depthmap, display_pointcloud +from sample_utils.paths import get_sample_data_path +from zivid.point_cloud import PointCloud + + +def roi_box_point_cloud( + point_cloud: PointCloud, + roi_box_bottom_left_corner_x: float, + roi_box_bottom_left_corner_y: float, + roi_box_bottom_left_corner_z: float, + box_dimension_in_axis_x: float, + box_dimension_in_axis_y: float, + box_dimension_in_axis_z: float, +) -> Tuple[np.ndarray, np.ndarray]: + """Filter point cloud based on ROI box by providing box location and dimensions. + + This function assumes that the point cloud is transformed to the checkerboard frame. + + Args: + point_cloud: Zivid point cloud + roi_box_bottom_left_corner_x: Distance in X coordinate from the board origin to the bottom left bin corner in checkerboard coordinate system + roi_box_bottom_left_corner_y: Distance in Y coordinate from the board origin to the bottom left bin corner in checkerboard coordinate system + roi_box_bottom_left_corner_z: Distance in Z coordinate from the board origin to the bottom left bin corner in checkerboard coordinate system + box_dimension_in_axis_x: Bin dimension in X axis of the checkerboard coordinate system + box_dimension_in_axis_y: Bin dimension in Y axis of the checkerboard coordinate system + box_dimension_in_axis_z: Bin dimension in Z axis of the checkerboard coordinate system + + Returns: + A tuple of masked numpy array of X, Y and Z point cloud coordinates and a masked RGB image (HxWx3 darray) + + """ + xyz = point_cloud.copy_data("xyz") + rgba = point_cloud.copy_data("rgba") + + masked_xyz = np.copy(xyz) + masked_rgba = np.copy(rgba) + + # Creating ROI box mask + margin_of_box_roi = 10 + mask_x = np.logical_and( + xyz[:, :, 0] > roi_box_bottom_left_corner_x - margin_of_box_roi, + xyz[:, :, 0] < roi_box_bottom_left_corner_x + box_dimension_in_axis_x + margin_of_box_roi, + ) + mask_y = np.logical_and( + xyz[:, :, 1] < roi_box_bottom_left_corner_y + margin_of_box_roi, + xyz[:, :, 1] > roi_box_bottom_left_corner_y - box_dimension_in_axis_y - margin_of_box_roi, + ) + mask_z = np.logical_and( + xyz[:, :, 2] < roi_box_bottom_left_corner_z + margin_of_box_roi, + xyz[:, :, 2] > roi_box_bottom_left_corner_z - box_dimension_in_axis_z - margin_of_box_roi, + ) + mask = np.logical_and(np.logical_and(mask_x, mask_y), mask_z) + + # Filtering out points outside the ROI box + masked_xyz[~mask] = np.nan + masked_rgba[~mask] = 0 + + return masked_xyz, masked_rgba + + +def _main(): + + with zivid.Application(): + + data_file = Path() / get_sample_data_path() / "BinWithCalibrationBoard.zdf" + print(f"Reading ZDF frame from file: {data_file}") + frame = zivid.Frame(data_file) + point_cloud = frame.point_cloud() + + print("Displaying the point cloud original point cloud") + display_pointcloud(point_cloud.copy_data("xyz"), point_cloud.copy_data("rgba")[:, :, 0:3]) + + print("Detecting and estimating pose of the Zivid checkerboard in the camera frame") + detection_result = zivid.calibration.detect_feature_points(point_cloud) + transform_camera_to_checkerboard = detection_result.pose().to_matrix() + + print("Camera pose in checkerboard frame:") + transform_checkerboard_to_camera = np.linalg.inv(transform_camera_to_checkerboard) + print(transform_checkerboard_to_camera) + + print("Transforming point cloud from camera frame to Checkerboard frame") + point_cloud.transform(transform_checkerboard_to_camera) + + print("Bottom-Left ROI Box corner:") + roi_box_bottom_left_corner_x = -80 # Positive is "East" + roi_box_bottom_left_corner_y = 280 # Positive is "South" + roi_box_bottom_left_corner_z = 5 # Positive is "Down" + print(f"X: {roi_box_bottom_left_corner_x}") + print(f"Y: {roi_box_bottom_left_corner_y}") + print(f"Z: {roi_box_bottom_left_corner_z}") + + print("ROI Box size:") + roi_box_length = 600 + roi_box_width = 400 + roi_box_height = 80 + print(f"Length: {roi_box_length}") + print(f"Width: {roi_box_width}") + print(f"Height: {roi_box_height}") + + print("Filtering the point cloud based on ROI Box") + filtered_xyz, filtered_rgba = roi_box_point_cloud( + point_cloud, + roi_box_bottom_left_corner_x, + roi_box_bottom_left_corner_y, + roi_box_bottom_left_corner_z, + roi_box_length, + roi_box_width, + roi_box_height, + ) + + print("Displaying transformed point cloud after ROI Box filtering") + display_pointcloud(filtered_xyz, filtered_rgba[:, :, 0:3]) + + print("Displaying depth map of the transformed point cloud after ROI Box filtering") + display_depthmap(filtered_xyz) + + +if __name__ == "__main__": + _main() diff --git a/source/applications/basic/visualization/capture_hdr_vis_normals.py b/source/applications/basic/visualization/capture_hdr_vis_normals.py index 9b637d48..27399741 100644 --- a/source/applications/basic/visualization/capture_hdr_vis_normals.py +++ b/source/applications/basic/visualization/capture_hdr_vis_normals.py @@ -25,16 +25,12 @@ def _main(): normals_colormap = 0.5 * (1 - normals) print("Visualizing normals in 2D") - display_rgb(rgb=rgba[:, :, :3], title="RGB image") - display_rgb(rgb=normals_colormap, title="Colormapped normals") - - input("Press any key to continue...") + display_rgb(rgb=rgba[:, :, :3], title="RGB image", block=False) + display_rgb(rgb=normals_colormap, title="Colormapped normals", block=True) print("Visualizing normals in 3D") display_pointcloud_with_downsampled_normals(point_cloud, zivid.PointCloud.Downsampling.by4x4) - input("Press Enter to close...") - if __name__ == "__main__": _main() diff --git a/source/applications/basic/visualization/capture_vis_3d.py b/source/applications/basic/visualization/capture_vis_3d.py index 3b579682..36c8dee7 100644 --- a/source/applications/basic/visualization/capture_vis_3d.py +++ b/source/applications/basic/visualization/capture_vis_3d.py @@ -27,8 +27,6 @@ def _main(): print("Visualizing point cloud") display_pointcloud(xyz, rgba[:, :, 0:3]) - input("Press Enter to close...") - if __name__ == "__main__": _main() diff --git a/source/applications/basic/visualization/read_zdf_vis_3d.py b/source/applications/basic/visualization/read_zdf_vis_3d.py index 8224a477..acdb1e9c 100644 --- a/source/applications/basic/visualization/read_zdf_vis_3d.py +++ b/source/applications/basic/visualization/read_zdf_vis_3d.py @@ -24,14 +24,12 @@ def _main(): xyz = point_cloud.copy_data("xyz") rgba = point_cloud.copy_data("rgba") - display_rgb(rgba[:, :, 0:3]) + display_rgb(rgba[:, :, 0:3], block=False) - display_depthmap(xyz) + display_depthmap(xyz, block=True) display_pointcloud(xyz, rgba[:, :, 0:3]) - input("Press Enter to close...") - if __name__ == "__main__": _main() diff --git a/source/applications/point_cloud_tutorial.md b/source/applications/point_cloud_tutorial.md index 9c814f31..24fe58d9 100644 --- a/source/applications/point_cloud_tutorial.md +++ b/source/applications/point_cloud_tutorial.md @@ -2,7 +2,7 @@ Note\! This tutorial has been generated for use on Github. For original tutorial see: -[point\_cloud\_tutorial](https://support.zivid.com/latest/rst/academy/applications/point-cloud-tutorial.html) +[point\_cloud\_tutorial](https://support.zivid.com/latest/academy/applications/point-cloud-tutorial.html) @@ -106,7 +106,7 @@ processing on the GPU is finished. Any calls to data-copy functions proceeding with the requested copy operation. For detailed explanation, see [Point Cloud Capture -Process](https://support.zivid.com/latest/rst/academy/camera/point-cloud-capture-process.html). +Process](https://support.zivid.com/latest/academy/camera/point-cloud-capture-process.html). ---- ### Copy from GPU to CPU memory @@ -216,7 +216,7 @@ cloud from the point cloud object as well. No source available for {language\_name} For more information, check out [Visualization -Tutorial](https://support.zivid.com/latest/rst/academy/applications/visualization-tutorial.html), +Tutorial](https://support.zivid.com/latest/academy/applications/visualization-tutorial.html), where we cover point cloud, color image, depth map, and normals visualization, with implementations using third party libraries. diff --git a/source/camera/basic/capture_tutorial.md b/source/camera/basic/capture_tutorial.md index b6a4eeb4..0967861e 100644 --- a/source/camera/basic/capture_tutorial.md +++ b/source/camera/basic/capture_tutorial.md @@ -2,7 +2,7 @@ Note\! This tutorial has been generated for use on Github. For original tutorial see: -[capture\_tutorial](https://support.zivid.com/latest/rst/academy/camera/capture-tutorial.html) +[capture\_tutorial](https://support.zivid.com/latest/academy/camera/capture-tutorial.html) @@ -172,7 +172,7 @@ There are only two parameters to configure with Capture Assistant: Another option is to configure settings manually. For more information about what each settings does, please see [Camera -Settings](https://support.zivid.com/latest/rst/reference-articles/camera-settings.html). +Settings](https://support.zivid.com/latest/reference-articles/camera-settings.html). Note that Zivid Two has a set of [standard settings](https://support.zivid.com/latest//reference-articles/standard-acquisition-settings-zivid-two.html). diff --git a/source/camera/basic/quick_capture_tutorial.md b/source/camera/basic/quick_capture_tutorial.md index 11509b14..492716f0 100644 --- a/source/camera/basic/quick_capture_tutorial.md +++ b/source/camera/basic/quick_capture_tutorial.md @@ -2,7 +2,7 @@ Note\! This tutorial has been generated for use on Github. For original tutorial see: -[quick\_capture\_tutorial](https://support.zivid.com/latest/rst/getting-started/quick-capture-tutorial.html) +[quick\_capture\_tutorial](https://support.zivid.com/latest/getting-started/quick-capture-tutorial.html) diff --git a/source/camera/info_util_other/camera_user_data.py b/source/camera/info_util_other/camera_user_data.py index ce5a1553..e4ceb575 100644 --- a/source/camera/info_util_other/camera_user_data.py +++ b/source/camera/info_util_other/camera_user_data.py @@ -24,7 +24,7 @@ def _args() -> argparse.Namespace: help="write", ) parser_write.add_argument( - "user-data", + "--user-data", type=str, help="User data to be stored on the Zivid camera", ) @@ -38,7 +38,10 @@ def _check_user_data_support(camera): def _write(camera: zivid.Camera, string: str): - camera.write_user_data(str.encode(string)) + try: + camera.write_user_data(str.encode(string)) + except RuntimeError as ex: + raise RuntimeError("Camera must be rebooted to allow another write operation!") from ex def _clear(camera: zivid.Camera): @@ -51,33 +54,28 @@ def _read(camera: zivid.Camera): def _main(): - try: - - args = _args() - mode = args.mode - - app = zivid.Application() + args = _args() + mode = args.mode - print("Connecting to camera") - camera = app.connect_camera() - _check_user_data_support(camera) + app = zivid.Application() - if mode == "read": - print("Reading user data from camera") - print(f"Done. User data: '{_read(camera)}'") + print("Connecting to camera") + camera = app.connect_camera() + _check_user_data_support(camera) - if mode == "write": - print(f"Writing '{args.user_data}' to the camera") - _write(camera, args.user_data) - print("Done. Note! Camera must be rebooted to allow another write operation") + if mode == "read": + print("Reading user data from camera") + print(f"Done. User data: '{_read(camera)}'") - if mode == "clear": - print("Clearing user data from camera") - _clear(camera) - print("Done. Note! Camera must be rebooted to allow another clear operation") + if mode == "write": + print(f"Writing '{args.user_data}' to the camera") + _write(camera, args.user_data) + print("Done. Note! Camera must be rebooted to allow another write operation") - except ValueError as ex: - print(f"Error: {ex}") + if mode == "clear": + print("Clearing user data from camera") + _clear(camera) + print("Done. Note! Camera must be rebooted to allow another clear operation") if __name__ == "__main__": diff --git a/source/camera/info_util_other/firmware_updater.py b/source/camera/info_util_other/firmware_updater.py new file mode 100644 index 00000000..ab0aa65a --- /dev/null +++ b/source/camera/info_util_other/firmware_updater.py @@ -0,0 +1,35 @@ +""" +Update firmware on the Zivid camera. +""" + +import zivid + + +def _main(): + app = zivid.Application() + + cameras = app.cameras() + if len(cameras) == 0: + print("No camera found.") + + print(f"Found {len(cameras)} camera(s)") + for camera in cameras: + if not zivid.firmware.is_up_to_date(camera): + print("Firmware update required") + print( + f"Updating firmware on camera {camera.info.serial_number}, model name: {camera.info.model_name}, firmware version: {camera.info.firmware_version}" + ) + zivid.firmware.update( + camera, + progress_callback=lambda progress, description: print( + f'{progress}% : {description}{("","...") [progress < 100]}' + ), + ) + else: + print( + f"Skipping update of camera {camera.info.serial_number}, model name: {camera.info.model_name}, firmware version: {camera.info.firmware_version}" + ) + + +if __name__ == "__main__": + _main() diff --git a/source/sample_utils/display.py b/source/sample_utils/display.py index 723ad8c8..a2d4b9ee 100644 --- a/source/sample_utils/display.py +++ b/source/sample_utils/display.py @@ -8,12 +8,13 @@ from zivid import PointCloud -def display_rgb(rgb, title="RGB image"): +def display_rgb(rgb, title="RGB image", block=True): """Display RGB image. Args: rgb: RGB image (HxWx3 darray) title: Image title + block: Whether to wait for the figure to be closed before returning Returns None @@ -21,14 +22,15 @@ def display_rgb(rgb, title="RGB image"): plt.figure() plt.imshow(rgb) plt.title(title) - plt.show(block=False) + plt.show(block=block) -def display_depthmap(xyz): +def display_depthmap(xyz, block=True): """Create and display depthmap. Args: xyz: X, Y and Z images (point cloud co-ordinates) + block: Whether to wait for the figure to be closed before returning Returns None @@ -42,7 +44,7 @@ def display_depthmap(xyz): ) plt.colorbar() plt.title("Depth map") - plt.show(block=False) + plt.show(block=block) def display_pointcloud(xyz, rgb, normals=None):