Significantly improved convergence for mesh and cuboid, new ESDF collision.

This commit is contained in:
Balakumar Sundaralingam
2024-03-18 11:19:48 -07:00
parent 286b3820a5
commit b1f63e8778
100 changed files with 7587 additions and 2589 deletions

View File

@@ -32,7 +32,6 @@ from curobo.types.robot import RobotConfig
from curobo.types.state import JointState
from curobo.util_file import get_robot_configs_path, get_world_configs_path, join_path, load_yaml
from curobo.wrap.model.robot_segmenter import RobotSegmenter
from curobo.wrap.model.robot_world import RobotWorld, RobotWorldConfig
torch.manual_seed(30)
@@ -42,7 +41,13 @@ torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
def create_render_dataset(robot_file, save_debug_data: bool = False):
def create_render_dataset(
robot_file,
save_debug_data: bool = False,
fov_deg: float = 60,
n_frames: int = 20,
retract_delta: float = 0.0,
):
# load robot:
robot_dict = load_yaml(join_path(get_robot_configs_path(), robot_file))
robot_dict["robot_cfg"]["kinematics"]["load_link_names_with_mesh"] = True
@@ -54,6 +59,8 @@ def create_render_dataset(robot_file, save_debug_data: bool = False):
q = kin_model.retract_config
q += retract_delta
meshes = kin_model.get_robot_as_mesh(q)
world = WorldConfig(mesh=meshes[:])
@@ -71,10 +78,11 @@ def create_render_dataset(robot_file, save_debug_data: bool = False):
mesh_dataset = MeshDataset(
None,
n_frames=20,
image_size=480,
n_frames=n_frames,
image_size=640,
save_data_dir=None,
trimesh_mesh=robot_mesh,
fov_deg=fov_deg,
)
q_js = JointState(position=q, joint_names=kin_model.joint_names)
@@ -83,6 +91,7 @@ def create_render_dataset(robot_file, save_debug_data: bool = False):
def mask_image(robot_file="ur5e.yml"):
save_debug_data = False
write_pointcloud = False
# create robot segmenter:
tensor_args = TensorDeviceType()
@@ -90,7 +99,7 @@ def mask_image(robot_file="ur5e.yml"):
robot_file, collision_sphere_buffer=0.01, distance_threshold=0.05, use_cuda_graph=True
)
mesh_dataset, q_js = create_render_dataset(robot_file, save_debug_data)
mesh_dataset, q_js = create_render_dataset(robot_file, write_pointcloud, n_frames=20)
if save_debug_data:
visualize_scale = 10.0
@@ -113,14 +122,15 @@ def mask_image(robot_file="ur5e.yml"):
# save robot spheres in current joint configuration
robot_kinematics = curobo_segmenter._robot_world.kinematics
sph = robot_kinematics.get_robot_as_spheres(q_js.position)
WorldConfig(sphere=sph[0]).save_world_as_mesh("robot_spheres.stl")
if write_pointcloud:
sph = robot_kinematics.get_robot_as_spheres(q_js.position)
WorldConfig(sphere=sph[0]).save_world_as_mesh("robot_spheres.stl")
# save world pointcloud in robot origin
# save world pointcloud in robot origin
pc = cam_obs.get_pointcloud()
pc_obs = PointCloud("world", pose=cam_obs.pose.to_list(), points=pc)
pc_obs.save_as_mesh("camera_pointcloud.stl", transform_with_pose=True)
pc = cam_obs.get_pointcloud()
pc_obs = PointCloud("world", pose=cam_obs.pose.to_list(), points=pc)
pc_obs.save_as_mesh("camera_pointcloud.stl", transform_with_pose=True)
# run segmentation:
depth_mask, filtered_image = curobo_segmenter.get_robot_mask_from_active_js(
@@ -131,10 +141,12 @@ def mask_image(robot_file="ur5e.yml"):
robot_mask = cam_obs.clone()
robot_mask.depth_image[~depth_mask] = 0.0
robot_mesh = PointCloud(
"world", pose=robot_mask.pose.to_list(), points=robot_mask.get_pointcloud()
)
robot_mesh.save_as_mesh("robot_segmented.stl", transform_with_pose=True)
if write_pointcloud:
robot_mesh = PointCloud(
"world", pose=robot_mask.pose.to_list(), points=robot_mask.get_pointcloud()
)
robot_mesh.save_as_mesh("robot_segmented.stl", transform_with_pose=True)
# save depth image
imageio.imwrite(
"robot_depth.png",
@@ -150,10 +162,11 @@ def mask_image(robot_file="ur5e.yml"):
world_mask = cam_obs.clone()
world_mask.depth_image[depth_mask] = 0.0
world_mesh = PointCloud(
"world", pose=world_mask.pose.to_list(), points=world_mask.get_pointcloud()
)
world_mesh.save_as_mesh("world_segmented.stl", transform_with_pose=True)
if write_pointcloud:
world_mesh = PointCloud(
"world", pose=world_mask.pose.to_list(), points=world_mask.get_pointcloud()
)
world_mesh.save_as_mesh("world_segmented.stl", transform_with_pose=True)
imageio.imwrite(
"world_depth.png",
@@ -190,6 +203,248 @@ def mask_image(robot_file="ur5e.yml"):
print("Segmentation Time (ms), (hz)", np.mean(dt_list[5:]) * 1000.0, 1.0 / np.mean(dt_list[5:]))
def batch_mask_image(robot_file="ur5e.yml"):
"""Mask images from different camera views using batched query.
Note: This only works for a single joint configuration across camera views.
Args:
robot_file: robot to use for example.
"""
save_debug_data = True
# create robot segmenter:
tensor_args = TensorDeviceType()
curobo_segmenter = RobotSegmenter.from_robot_file(
robot_file, collision_sphere_buffer=0.01, distance_threshold=0.05, use_cuda_graph=True
)
mesh_dataset, q_js = create_render_dataset(robot_file, save_debug_data, fov_deg=60)
mesh_dataset_zoom, q_js = create_render_dataset(
robot_file, save_debug_data, fov_deg=40, n_frames=30
)
if save_debug_data:
visualize_scale = 10.0
data = mesh_dataset[0]
cam_obs = CameraObservation(
depth_image=tensor_args.to_device(data["depth"]).unsqueeze(0) * 1000,
intrinsics=data["intrinsics"],
pose=Pose.from_matrix(data["pose"].to(device=tensor_args.device)),
)
data_zoom = mesh_dataset_zoom[1]
cam_obs = CameraObservation(
depth_image=tensor_args.to_device(data["depth"]) * 1000,
intrinsics=data["intrinsics"],
pose=Pose.from_matrix(data["pose"].to(device=tensor_args.device)),
)
cam_obs_zoom = CameraObservation(
depth_image=tensor_args.to_device(data_zoom["depth"]) * 1000,
intrinsics=data_zoom["intrinsics"],
pose=Pose.from_matrix(data_zoom["pose"].to(device=tensor_args.device)),
)
cam_obs = cam_obs.stack(cam_obs_zoom)
for i in range(cam_obs.depth_image.shape[0]):
# save depth image
imageio.imwrite(
"camera_depth_" + str(i) + ".png",
(cam_obs.depth_image[i] * visualize_scale)
.squeeze()
.detach()
.cpu()
.numpy()
.astype(np.uint16),
)
# run segmentation:
depth_mask, filtered_image = curobo_segmenter.get_robot_mask_from_active_js(
cam_obs,
q_js,
)
# save robot points as mesh
robot_mask = cam_obs.clone()
robot_mask.depth_image[~depth_mask] = 0.0
for i in range(cam_obs.depth_image.shape[0]):
# save depth image
imageio.imwrite(
"robot_depth_" + str(i) + ".png",
(robot_mask.depth_image[i] * visualize_scale)
.detach()
.squeeze()
.cpu()
.numpy()
.astype(np.uint16),
)
# save world points as mesh
imageio.imwrite(
"world_depth_" + str(i) + ".png",
(filtered_image[i] * visualize_scale)
.detach()
.squeeze()
.cpu()
.numpy()
.astype(np.uint16),
)
dt_list = []
for i in range(len(mesh_dataset)):
data = mesh_dataset[i]
data_zoom = mesh_dataset_zoom[i + 1]
cam_obs = CameraObservation(
depth_image=tensor_args.to_device(data["depth"]) * 1000,
intrinsics=data["intrinsics"],
pose=Pose.from_matrix(data["pose"].to(device=tensor_args.device)),
)
cam_obs_zoom = CameraObservation(
depth_image=tensor_args.to_device(data_zoom["depth"]) * 1000,
intrinsics=data_zoom["intrinsics"],
pose=Pose.from_matrix(data_zoom["pose"].to(device=tensor_args.device)),
)
cam_obs = cam_obs.stack(cam_obs_zoom)
if not curobo_segmenter.ready:
curobo_segmenter.update_camera_projection(cam_obs)
st_time = time.time()
depth_mask, filtered_image = curobo_segmenter.get_robot_mask_from_active_js(
cam_obs,
q_js,
)
torch.cuda.synchronize()
dt_list.append(time.time() - st_time)
print("Segmentation Time (ms), (hz)", np.mean(dt_list[5:]) * 1000.0, 1.0 / np.mean(dt_list[5:]))
def batch_robot_mask_image(robot_file="ur5e.yml"):
"""Mask images from different camera views using batched query.
Note: This example treats each image to have different robot joint configuration
Args:
robot_file: robot to use for example.
"""
save_debug_data = True
# create robot segmenter:
tensor_args = TensorDeviceType()
curobo_segmenter = RobotSegmenter.from_robot_file(
robot_file, collision_sphere_buffer=0.01, distance_threshold=0.05, use_cuda_graph=True
)
mesh_dataset, q_js = create_render_dataset(robot_file, save_debug_data, fov_deg=60)
mesh_dataset_zoom, q_js_zoom = create_render_dataset(
robot_file, save_debug_data, fov_deg=60, retract_delta=-0.5
)
q_js = q_js.unsqueeze(0)
q_js = q_js.stack(q_js_zoom.unsqueeze(0))
if save_debug_data:
visualize_scale = 10.0
data = mesh_dataset[0]
cam_obs = CameraObservation(
depth_image=tensor_args.to_device(data["depth"]).unsqueeze(0) * 1000,
intrinsics=data["intrinsics"],
pose=Pose.from_matrix(data["pose"].to(device=tensor_args.device)),
)
data_zoom = mesh_dataset_zoom[0]
cam_obs = CameraObservation(
depth_image=tensor_args.to_device(data["depth"]) * 1000,
intrinsics=data["intrinsics"],
pose=Pose.from_matrix(data["pose"].to(device=tensor_args.device)),
)
cam_obs_zoom = CameraObservation(
depth_image=tensor_args.to_device(data_zoom["depth"]) * 1000,
intrinsics=data_zoom["intrinsics"],
pose=Pose.from_matrix(data_zoom["pose"].to(device=tensor_args.device)),
)
cam_obs = cam_obs.stack(cam_obs_zoom)
for i in range(cam_obs.depth_image.shape[0]):
# save depth image
imageio.imwrite(
"camera_depth_" + str(i) + ".png",
(cam_obs.depth_image[i] * visualize_scale)
.squeeze()
.detach()
.cpu()
.numpy()
.astype(np.uint16),
)
# run segmentation:
depth_mask, filtered_image = curobo_segmenter.get_robot_mask_from_active_js(
cam_obs,
q_js,
)
# save robot points as mesh
robot_mask = cam_obs.clone()
robot_mask.depth_image[~depth_mask] = 0.0
for i in range(cam_obs.depth_image.shape[0]):
# save depth image
imageio.imwrite(
"robot_depth_" + str(i) + ".png",
(robot_mask.depth_image[i] * visualize_scale)
.detach()
.squeeze()
.cpu()
.numpy()
.astype(np.uint16),
)
# save world points as mesh
imageio.imwrite(
"world_depth_" + str(i) + ".png",
(filtered_image[i] * visualize_scale)
.detach()
.squeeze()
.cpu()
.numpy()
.astype(np.uint16),
)
dt_list = []
for i in range(len(mesh_dataset)):
data = mesh_dataset[i]
data_zoom = mesh_dataset_zoom[i]
cam_obs = CameraObservation(
depth_image=tensor_args.to_device(data["depth"]) * 1000,
intrinsics=data["intrinsics"],
pose=Pose.from_matrix(data["pose"].to(device=tensor_args.device)),
)
cam_obs_zoom = CameraObservation(
depth_image=tensor_args.to_device(data_zoom["depth"]) * 1000,
intrinsics=data_zoom["intrinsics"],
pose=Pose.from_matrix(data_zoom["pose"].to(device=tensor_args.device)),
)
cam_obs = cam_obs.stack(cam_obs_zoom)
if not curobo_segmenter.ready:
curobo_segmenter.update_camera_projection(cam_obs)
st_time = time.time()
depth_mask, filtered_image = curobo_segmenter.get_robot_mask_from_active_js(
cam_obs,
q_js,
)
torch.cuda.synchronize()
dt_list.append(time.time() - st_time)
print("Segmentation Time (ms), (hz)", np.mean(dt_list[5:]) * 1000.0, 1.0 / np.mean(dt_list[5:]))
def profile_mask_image(robot_file="ur5e.yml"):
# create robot segmenter:
tensor_args = TensorDeviceType()
@@ -236,3 +491,5 @@ def profile_mask_image(robot_file="ur5e.yml"):
if __name__ == "__main__":
mask_image()
# profile_mask_image()
# batch_mask_image()
# batch_robot_mask_image()