Skip to content

Commit

Permalink
Merge pull request #183 from zivid/2024-08-06-update-python-samples
Browse files Browse the repository at this point in the history
Nail down transformation matrix convention
  • Loading branch information
SatjaSivcev authored Aug 7, 2024
2 parents 9794841 + b10c9d7 commit 3e129ea
Show file tree
Hide file tree
Showing 12 changed files with 89 additions and 89 deletions.
10 changes: 5 additions & 5 deletions source/applications/advanced/get_checkerboard_pose_from_zdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def _visualize_checkerboard_point_cloud_with_coordinate_system(
Args:
point_cloud_open3d: An Open3d point cloud of a checkerboard
transform: Transformation matrix
transform: Transformation matrix (4x4)
"""
coord_system_mesh = o3d.geometry.TriangleMesh.create_coordinate_frame(size=30)
Expand All @@ -72,18 +72,18 @@ def _main() -> None:
point_cloud = frame.point_cloud()

print("Detecting checkerboard and estimating its pose in camera frame")
transform_camera_to_checkerboard = zivid.calibration.detect_calibration_board(frame).pose().to_matrix()
print(f"Camera pose in checkerboard frame:\n{transform_camera_to_checkerboard}")
camera_to_checkerboard_transform = zivid.calibration.detect_calibration_board(frame).pose().to_matrix()
print(f"Camera pose in checkerboard frame:\n{camera_to_checkerboard_transform}")

transform_file_name = "CameraToCheckerboardTransform.yaml"
print(f"Saving detected checkerboard pose to YAML file: {transform_file_name}")
transform_file_path = Path(__file__).parent / transform_file_name
assert_affine_matrix_and_save(transform_camera_to_checkerboard, transform_file_path)
assert_affine_matrix_and_save(camera_to_checkerboard_transform, transform_file_path)

print("Visualizing checkerboard with coordinate system")
checkerboard_point_cloud = _create_open3d_point_cloud(point_cloud)
_visualize_checkerboard_point_cloud_with_coordinate_system(
checkerboard_point_cloud, transform_camera_to_checkerboard
checkerboard_point_cloud, camera_to_checkerboard_transform
)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def _get_frame_and_transform(robot: Item, camera: zivid.Camera) -> Tuple[zivid.F
Returns:
Zivid frame
4x4 transformation matrix
Transformation matrix (4x4)
"""
frame = _assisted_capture(camera)
Expand All @@ -77,7 +77,7 @@ def _save_point_cloud_and_pose(
save_directory: Path to where data will be saved
image_and_pose_iterator: Image number
frame: Point cloud stored as ZDF
transform: 4x4 transformation matrix
transform: Transformation matrix (4x4)
"""
frame.save(save_directory / f"img{image_and_pose_iterator:02d}.zdf")
Expand Down Expand Up @@ -143,7 +143,7 @@ def _save_hand_eye_results(save_directory: Path, transform: np.ndarray, residual
Args:
save_directory: Path to where data will be saved
transform: 4x4 transformation matrix
transform: Transformation matrix (4x4)
residuals: List of residuals
"""
Expand Down Expand Up @@ -232,7 +232,7 @@ def perform_hand_eye_calibration(
user_options: Input arguments
Returns:
transform: 4x4 transformation matrix
transform: Transformation matrix (4x4)
residuals: List of residuals
Raises:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def _save_zdf_and_pose(save_dir: Path, image_num: int, frame: zivid.Frame, trans
save_dir: Directory to save data
image_num: Image number
frame: Point cloud stored as ZDF
transform: 4x4 transformation matrix
transform: Transformation matrix (4x4)
"""
frame.save(save_dir / f"img{image_num:02d}.zdf")
Expand Down Expand Up @@ -164,7 +164,7 @@ def _get_frame_and_transform_matrix(
Returns:
frame: Zivid frame
transform: 4x4 transformation matrix
transform: Transformation matrix (4x4)
"""
frame = camera.capture(settings)
Expand Down Expand Up @@ -222,7 +222,7 @@ def _save_hand_eye_results(save_dir: Path, transform: np.ndarray, residuals: Lis
Args:
save_dir: Path to where data will be saved
transform: 4x4 transformation matrix
transform: Transformation matrix (4x4)
residuals: List of residuals
"""
Expand Down Expand Up @@ -392,7 +392,7 @@ def perform_hand_eye_calibration(
data_dir: Path to dataset
Returns:
transform: 4x4 transformation matrix
transform: Transformation matrix (4x4)
residuals: List of residuals
Raises:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def _main() -> None:
eye_to_hand_transform_file_path = get_sample_data_path() / "EyeToHandTransform.yaml"

print("Reading camera pose in robot base reference frame (result of eye-to-hand calibration)")
transform_base_to_camera = load_and_assert_affine_matrix(eye_to_hand_transform_file_path)
base_to_camera_transform = load_and_assert_affine_matrix(eye_to_hand_transform_file_path)

break

Expand All @@ -71,13 +71,13 @@ def _main() -> None:
print(
"Reading camera pose in flange (end-effector) reference frame (result of eye-in-hand calibration)"
)
transform_flange_to_camera = load_and_assert_affine_matrix(eye_in_hand_transform_file_path)
flange_to_camera_transform = load_and_assert_affine_matrix(eye_in_hand_transform_file_path)

print("Reading flange (end-effector) pose in robot base reference frame")
transform_base_to_flange = load_and_assert_affine_matrix(robot_transform_file_path)
base_to_flange_transform = load_and_assert_affine_matrix(robot_transform_file_path)

print("Computing camera pose in robot base reference frame")
transform_base_to_camera = np.matmul(transform_base_to_flange, transform_flange_to_camera)
base_to_camera_transform = np.matmul(base_to_flange_transform, flange_to_camera_transform)

break

Expand Down Expand Up @@ -107,7 +107,7 @@ def _main() -> None:
print(f"Point coordinates in camera reference frame: {point_in_camera_frame[0:3]}")

print("Transforming (picking) point from camera to robot base reference frame")
point_in_base_frame = np.matmul(transform_base_to_camera, point_in_camera_frame)
point_in_base_frame = np.matmul(base_to_camera_transform, point_in_camera_frame)

print(f"Point coordinates in robot base reference frame: {point_in_base_frame[0:3]}")

Expand All @@ -116,7 +116,7 @@ def _main() -> None:
if command.lower() == "p":
print("Transforming point cloud")

point_cloud.transform(transform_base_to_camera)
point_cloud.transform(base_to_camera_transform)

save_file = "ZividGemTransformed.zdf"
print(f"Saving point cloud to file: {save_file}")
Expand Down
24 changes: 13 additions & 11 deletions source/applications/advanced/reproject_points.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,23 +36,23 @@ def _checkerboard_grid() -> List[np.ndarray]:
return list(points)


def _transform_grid_to_calibration_board(
grid: List[np.ndarray], transform_camera_to_checkerboard: np.ndarray
def _transform_grid_to_camera_frame(
grid: List[np.ndarray], camera_to_checkerboard_transform: np.ndarray
) -> List[np.ndarray]:
"""Transform a list of grid points to the camera frame.
Args:
grid: List of 4D points (X,Y,Z,W) for each corner in the checkerboard, in the checkerboard frame
transform_camera_to_checkerboard: 4x4 transformation matrix
camera_to_checkerboard_transform: 4x4 transformation matrix
Returns:
List of 3D grid points in the camera frame
"""
points_in_camera_frame = []
for point in grid:
transformed_point = transform_camera_to_checkerboard @ point
points_in_camera_frame.append(transformed_point[:3])
for point_in_checkerboard_frame in grid:
point_in_camera_frame = camera_to_checkerboard_transform @ point_in_checkerboard_frame
points_in_camera_frame.append(point_in_camera_frame[:3])

return points_in_camera_frame

Expand Down Expand Up @@ -87,17 +87,19 @@ def _main() -> None:
raise RuntimeError("Calibration board not detected!")

print("Estimating checkerboard pose")
transform_camera_to_checkerboard = detection_result.pose().to_matrix()
print(transform_camera_to_checkerboard)
camera_to_checkerboard_transform = detection_result.pose().to_matrix()
print(camera_to_checkerboard_transform)

print("Creating a grid of 7 x 6 points (3D) with 30 mm spacing to match checkerboard corners")
grid = _checkerboard_grid()
grid_points_in_checkerboard_frame = _checkerboard_grid()

print("Transforming the grid to the camera frame")
points_in_camera_frame = _transform_grid_to_calibration_board(grid, transform_camera_to_checkerboard)
grid_points_in_camera_frame = _transform_grid_to_camera_frame(
grid_points_in_checkerboard_frame, camera_to_checkerboard_transform
)

print("Getting projector pixels (2D) corresponding to points (3D) in the camera frame")
projector_pixels = zivid.projection.pixels_from_3d_points(camera, points_in_camera_frame)
projector_pixels = zivid.projection.pixels_from_3d_points(camera, grid_points_in_camera_frame)

print("Retrieving the projector resolution that the camera supports")
projector_resolution = zivid.projection.projector_resolution(camera)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def _options() -> argparse.Namespace:
parser.add_argument(
"--tool-yaml",
required=True,
help="Path to YAML file that contains the tool tip to robot flange transformation matrix",
help="Path to YAML file that contains the tool tip transformation matrix (tool tip in robot flange frame)",
)
parser.add_argument(
"--hand-eye-yaml",
Expand Down Expand Up @@ -78,7 +78,7 @@ def _transform_points(points: List[np.ndarray], transform: np.ndarray) -> List[n
Args:
points: List of 3D points to be transformed
transform: Homogenous transform (4x4)
transform: Transformation matrix (4x4)
Returns:
List of transformed 3D points
Expand Down Expand Up @@ -127,13 +127,13 @@ def _zivid_logo_from_grid() -> List[int]:


def _generate_tool_poses_from_checkerboard(
camera: zivid.Camera, camera_to_base_transform: np.ndarray
camera: zivid.Camera, base_to_camera_transform: np.ndarray
) -> List[np.ndarray]:
"""Generate a tool path as a list of poses in the camera frame using the checkerboard.
Args:
camera: Zivid camera
camera_to_base_transform: Homogenous transform (4x4) from the camera frame to the robot base
base_to_camera_transform: Camera pose in robot base frame (4x4)
Raises:
RuntimeError: If the calibration board is not detected
Expand All @@ -146,12 +146,12 @@ def _generate_tool_poses_from_checkerboard(
if not detection_result.valid():
raise RuntimeError("Calibration board not detected!")

checkerboard_to_camera_transform = detection_result.pose().to_matrix()
checkerboard_to_base_transform = camera_to_base_transform @ checkerboard_to_camera_transform
camera_to_checkerboard_transform = detection_result.pose().to_matrix()
base_to_checkerboard_transform = base_to_camera_transform @ camera_to_checkerboard_transform

grid_points = _checkerboard_grid()
grid_points_in_base_frame = _transform_points(grid_points, checkerboard_to_base_transform)
grid_poses_in_base_frame = _points_to_poses(grid_points_in_base_frame, checkerboard_to_base_transform[:3, :3])
grid_points_in_checkerboard_frame = _checkerboard_grid()
grid_points_in_base_frame = _transform_points(grid_points_in_checkerboard_frame, base_to_checkerboard_transform)
grid_poses_in_base_frame = _points_to_poses(grid_points_in_base_frame, base_to_checkerboard_transform[:3, :3])

tool_poses_in_base_frame = [grid_poses_in_base_frame[idx] for idx in _zivid_logo_from_grid()]

Expand Down Expand Up @@ -242,25 +242,25 @@ def _main() -> None:
camera = app.connect_camera()

print("Generating tool path from the checkerboard")
tool_to_flange_transform = load_and_assert_affine_matrix(user_options.tool_yaml)
flange_to_base_transform = np.array(robot.Pose()).T
flange_to_tcp_transform = load_and_assert_affine_matrix(user_options.tool_yaml)
base_to_flange_transform = np.array(robot.Pose()).T

if user_options.eih:
camera_to_flange_transform = load_and_assert_affine_matrix(user_options.hand_eye_yaml)
camera_to_base_transform = flange_to_base_transform @ camera_to_flange_transform
flange_to_camera_transform = load_and_assert_affine_matrix(user_options.hand_eye_yaml)
base_to_camera_transform = base_to_flange_transform @ flange_to_camera_transform
else:
camera_to_base_transform = load_and_assert_affine_matrix(user_options.hand_eye_yaml)
base_to_camera_transform = load_and_assert_affine_matrix(user_options.hand_eye_yaml)

tool_poses_in_robot_base = _generate_tool_poses_from_checkerboard(camera, camera_to_base_transform)
flange_poses_in_robot_base = [
tool_pose_in_robot_base @ np.linalg.inv(tool_to_flange_transform)
for tool_pose_in_robot_base in tool_poses_in_robot_base
tool_poses_in_base_frame = _generate_tool_poses_from_checkerboard(camera, base_to_camera_transform)
flange_poses_in_base_frame = [
tool_pose_in_base_frame @ np.linalg.inv(flange_to_tcp_transform)
for tool_pose_in_base_frame in tool_poses_in_base_frame
]

print("Displaying the tool path on the checkerboard")
tool_poses_in_camera_frame = [
np.linalg.inv(camera_to_base_transform) @ tool_pose_in_robot_base
for tool_pose_in_robot_base in tool_poses_in_robot_base
np.linalg.inv(base_to_camera_transform) @ tool_pose_in_base_frame
for tool_pose_in_base_frame in tool_poses_in_base_frame
]

projector_image = _projected_tool_path(camera, tool_poses_in_camera_frame)
Expand All @@ -270,9 +270,9 @@ def _main() -> None:
if user_options.eih:
projected_image_handle.stop()

_approach_target_pose(robot, flange_poses_in_robot_base[0], offset=np.array([0, 0, -100]))
_approach_target_pose(robot, flange_poses_in_base_frame[0], offset=np.array([0, 0, -100]))

_follow_linear_path(robot, flange_poses_in_robot_base)
_follow_linear_path(robot, flange_poses_in_base_frame)

robot.MoveJ(robot.JointsHome())

Expand Down
6 changes: 3 additions & 3 deletions source/applications/advanced/roi_box_via_aruco_marker.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def _transform_points(points: List[np.ndarray], transform: np.ndarray) -> List[n
Args:
points: list of 3D points to be transformed
transform: homogenous transform (4x4)
transform: homogenous transformation matrix (4x4)
Returns:
transformed_points: list of transformed 3D points
Expand Down Expand Up @@ -88,12 +88,12 @@ def _main() -> None:
raise RuntimeError("No ArUco markers detected")

print("Estimating pose of detected ArUco marker")
transform_camera_to_marker = detection_result.detected_markers()[0].pose.to_matrix()
camera_to_marker_transform = detection_result.detected_markers()[0].pose.to_matrix()

print("Transforming the ROI base frame points to the camera frame")
roi_points_in_camera_frame = _transform_points(
[point_o_in_aruco_frame, point_a_in_aruco_frame, point_b_in_aruco_frame],
transform_camera_to_marker,
camera_to_marker_transform,
)

print("Setting the ROI")
Expand Down
6 changes: 3 additions & 3 deletions source/applications/advanced/roi_box_via_checkerboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def _transform_points(points: List[np.ndarray], transform: np.ndarray) -> List[n
Args:
points: list of 3D points to be transformed
transform: homogenous transform (4x4)
transform: homogenous transformation matrix (4x4)
Returns:
transformed_points: list of transformed 3D points
Expand Down Expand Up @@ -79,12 +79,12 @@ def _main() -> None:

print("Detecting and estimating pose of the Zivid checkerboard in the camera frame")
detection_result = zivid.calibration.detect_feature_points(original_frame)
transform_camera_to_checkerboard = detection_result.pose().to_matrix()
camera_to_checkerboard_transform = detection_result.pose().to_matrix()

print("Transforming the ROI base frame points to the camera frame")
roi_points_in_camera_frame = _transform_points(
[point_o_in_checkerboard_frame, point_a_in_checkerboard_frame, point_b_in_checkerboard_frame],
transform_camera_to_checkerboard,
camera_to_checkerboard_transform,
)

print("Setting the ROI")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def _get_coordinate_system_points(
}


def _draw_coordinae_system(frame: zivid.Frame, checkerboard_pose: np.ndarray, bgra_image: np.ndarray) -> None:
def _draw_coordinate_system(frame: zivid.Frame, checkerboard_pose: np.ndarray, bgra_image: np.ndarray) -> None:
"""Draw a coordinate system on a BGRA image.
Args:
Expand Down Expand Up @@ -164,24 +164,24 @@ def _main() -> None:

print("Detecting and estimating pose of the Zivid checkerboard in the camera frame")
detection_result = zivid.calibration.detect_calibration_board(frame)
transform_camera_to_checkerboard = detection_result.pose().to_matrix()
print(transform_camera_to_checkerboard)
camera_to_checkerboard_transform = detection_result.pose().to_matrix()
print(camera_to_checkerboard_transform)
print("Camera pose in checkerboard frame:")
transform_checkerboard_to_camera = np.linalg.inv(transform_camera_to_checkerboard)
print(transform_checkerboard_to_camera)
checkerboard_to_camera_transform = np.linalg.inv(camera_to_checkerboard_transform)
print(checkerboard_to_camera_transform)

transform_file = Path("CheckerboardToCameraTransform.yaml")
print("Saving a YAML file with Inverted checkerboard pose to file: ")
assert_affine_matrix_and_save(transform_checkerboard_to_camera, transform_file)
assert_affine_matrix_and_save(checkerboard_to_camera_transform, transform_file)

print("Transforming point cloud from camera frame to Checkerboard frame")
point_cloud.transform(transform_checkerboard_to_camera)
point_cloud.transform(checkerboard_to_camera_transform)

print("Converting to OpenCV image format")
bgra_image = point_cloud.copy_data("bgra")

print("Visualizing checkerboard with coordinate system")
_draw_coordinae_system(frame, transform_camera_to_checkerboard, bgra_image)
_draw_coordinate_system(frame, camera_to_checkerboard_transform, bgra_image)
display_bgr(bgra_image, "Checkerboard transformation frame")

checkerboard_transformed_file = "CalibrationBoardInCheckerboardOrigin.zdf"
Expand Down
Loading

0 comments on commit 3e129ea

Please sign in to comment.