Significantly improved convergence for mesh and cuboid, new ESDF collision.
This commit is contained in:
@@ -31,7 +31,7 @@ torch.backends.cudnn.allow_tf32 = True
|
||||
def demo_basic_ik():
|
||||
tensor_args = TensorDeviceType()
|
||||
|
||||
config_file = load_yaml(join_path(get_robot_configs_path(), "franka.yml"))
|
||||
config_file = load_yaml(join_path(get_robot_configs_path(), "ur10e.yml"))
|
||||
urdf_file = config_file["robot_cfg"]["kinematics"][
|
||||
"urdf_path"
|
||||
] # Send global path starting with "/"
|
||||
@@ -48,13 +48,13 @@ def demo_basic_ik():
|
||||
self_collision_check=False,
|
||||
self_collision_opt=False,
|
||||
tensor_args=tensor_args,
|
||||
use_cuda_graph=False,
|
||||
use_cuda_graph=True,
|
||||
)
|
||||
ik_solver = IKSolver(ik_config)
|
||||
|
||||
# print(kin_state)
|
||||
for _ in range(10):
|
||||
q_sample = ik_solver.sample_configs(5000)
|
||||
q_sample = ik_solver.sample_configs(100)
|
||||
kin_state = ik_solver.fk(q_sample)
|
||||
goal = Pose(kin_state.ee_position, kin_state.ee_quaternion)
|
||||
|
||||
@@ -62,12 +62,12 @@ def demo_basic_ik():
|
||||
result = ik_solver.solve_batch(goal)
|
||||
torch.cuda.synchronize()
|
||||
print(
|
||||
"Success, Solve Time(s), Total Time(s) ",
|
||||
"Success, Solve Time(s), hz ",
|
||||
torch.count_nonzero(result.success).item() / len(q_sample),
|
||||
result.solve_time,
|
||||
q_sample.shape[0] / (time.time() - st_time),
|
||||
torch.mean(result.position_error) * 100.0,
|
||||
torch.mean(result.rotation_error) * 100.0,
|
||||
torch.mean(result.position_error),
|
||||
torch.mean(result.rotation_error),
|
||||
)
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ def demo_full_config_collision_free_ik():
|
||||
# print(kin_state)
|
||||
print("Running Single IK")
|
||||
for _ in range(10):
|
||||
q_sample = ik_solver.sample_configs(5000)
|
||||
q_sample = ik_solver.sample_configs(1)
|
||||
kin_state = ik_solver.fk(q_sample)
|
||||
goal = Pose(kin_state.ee_position, kin_state.ee_quaternion)
|
||||
|
||||
@@ -107,7 +107,7 @@ def demo_full_config_collision_free_ik():
|
||||
total_time = (time.time() - st_time) / q_sample.shape[0]
|
||||
print(
|
||||
"Success, Solve Time(s), Total Time(s)",
|
||||
torch.count_nonzero(result.success).item() / len(q_sample),
|
||||
torch.count_nonzero(result.success).item(),
|
||||
result.solve_time,
|
||||
total_time,
|
||||
1.0 / total_time,
|
||||
|
||||
@@ -100,7 +100,6 @@ def add_robot_to_scene(
|
||||
import_config,
|
||||
dest_path,
|
||||
)
|
||||
|
||||
# prim_path = omni.usd.get_stage_next_free_path(
|
||||
# my_world.scene.stage, str(my_world.scene.stage.GetDefaultPrim().GetPath()) + robot_path, False)
|
||||
# print(prim_path)
|
||||
@@ -112,13 +111,13 @@ def add_robot_to_scene(
|
||||
position=position,
|
||||
)
|
||||
if ISAAC_SIM_23:
|
||||
robot_p.set_solver_velocity_iteration_count(4)
|
||||
robot_p.set_solver_velocity_iteration_count(0)
|
||||
robot_p.set_solver_position_iteration_count(44)
|
||||
|
||||
my_world._physics_context.set_solver_type("PGS")
|
||||
|
||||
robot = my_world.scene.add(robot_p)
|
||||
|
||||
robot_path = robot.prim_path
|
||||
return robot, robot_path
|
||||
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ def demo_basic_robot():
|
||||
|
||||
# compute forward kinematics:
|
||||
|
||||
q = torch.rand((10, kin_model.get_dof()), **vars(tensor_args))
|
||||
q = torch.rand((10, kin_model.get_dof()), **(tensor_args.as_torch_dict()))
|
||||
out = kin_model.get_state(q)
|
||||
# here is the kinematics state:
|
||||
# print(out)
|
||||
@@ -55,7 +55,7 @@ def demo_full_config_robot():
|
||||
kin_model = CudaRobotModel(robot_cfg.kinematics)
|
||||
|
||||
# compute forward kinematics:
|
||||
q = torch.rand((10, kin_model.get_dof()), **vars(tensor_args))
|
||||
q = torch.rand((10, kin_model.get_dof()), **(tensor_args.as_torch_dict()))
|
||||
out = kin_model.get_state(q)
|
||||
# here is the kinematics state:
|
||||
# print(out)
|
||||
|
||||
@@ -94,10 +94,10 @@ def demo_full_config_mpc():
|
||||
while not converged:
|
||||
st_time = time.time()
|
||||
# current_state.position += 0.1
|
||||
print(current_state.position)
|
||||
# print(current_state.position)
|
||||
result = mpc.step(current_state, 1)
|
||||
|
||||
print(mpc.get_visual_rollouts().shape)
|
||||
# print(mpc.get_visual_rollouts().shape)
|
||||
# exit()
|
||||
torch.cuda.synchronize()
|
||||
if tstep > 5:
|
||||
|
||||
@@ -32,7 +32,6 @@ from curobo.types.robot import RobotConfig
|
||||
from curobo.types.state import JointState
|
||||
from curobo.util_file import get_robot_configs_path, get_world_configs_path, join_path, load_yaml
|
||||
from curobo.wrap.model.robot_segmenter import RobotSegmenter
|
||||
from curobo.wrap.model.robot_world import RobotWorld, RobotWorldConfig
|
||||
|
||||
torch.manual_seed(30)
|
||||
|
||||
@@ -42,7 +41,13 @@ torch.backends.cuda.matmul.allow_tf32 = True
|
||||
torch.backends.cudnn.allow_tf32 = True
|
||||
|
||||
|
||||
def create_render_dataset(robot_file, save_debug_data: bool = False):
|
||||
def create_render_dataset(
|
||||
robot_file,
|
||||
save_debug_data: bool = False,
|
||||
fov_deg: float = 60,
|
||||
n_frames: int = 20,
|
||||
retract_delta: float = 0.0,
|
||||
):
|
||||
# load robot:
|
||||
robot_dict = load_yaml(join_path(get_robot_configs_path(), robot_file))
|
||||
robot_dict["robot_cfg"]["kinematics"]["load_link_names_with_mesh"] = True
|
||||
@@ -54,6 +59,8 @@ def create_render_dataset(robot_file, save_debug_data: bool = False):
|
||||
|
||||
q = kin_model.retract_config
|
||||
|
||||
q += retract_delta
|
||||
|
||||
meshes = kin_model.get_robot_as_mesh(q)
|
||||
|
||||
world = WorldConfig(mesh=meshes[:])
|
||||
@@ -71,10 +78,11 @@ def create_render_dataset(robot_file, save_debug_data: bool = False):
|
||||
|
||||
mesh_dataset = MeshDataset(
|
||||
None,
|
||||
n_frames=20,
|
||||
image_size=480,
|
||||
n_frames=n_frames,
|
||||
image_size=640,
|
||||
save_data_dir=None,
|
||||
trimesh_mesh=robot_mesh,
|
||||
fov_deg=fov_deg,
|
||||
)
|
||||
q_js = JointState(position=q, joint_names=kin_model.joint_names)
|
||||
|
||||
@@ -83,6 +91,7 @@ def create_render_dataset(robot_file, save_debug_data: bool = False):
|
||||
|
||||
def mask_image(robot_file="ur5e.yml"):
|
||||
save_debug_data = False
|
||||
write_pointcloud = False
|
||||
# create robot segmenter:
|
||||
tensor_args = TensorDeviceType()
|
||||
|
||||
@@ -90,7 +99,7 @@ def mask_image(robot_file="ur5e.yml"):
|
||||
robot_file, collision_sphere_buffer=0.01, distance_threshold=0.05, use_cuda_graph=True
|
||||
)
|
||||
|
||||
mesh_dataset, q_js = create_render_dataset(robot_file, save_debug_data)
|
||||
mesh_dataset, q_js = create_render_dataset(robot_file, write_pointcloud, n_frames=20)
|
||||
|
||||
if save_debug_data:
|
||||
visualize_scale = 10.0
|
||||
@@ -113,14 +122,15 @@ def mask_image(robot_file="ur5e.yml"):
|
||||
|
||||
# save robot spheres in current joint configuration
|
||||
robot_kinematics = curobo_segmenter._robot_world.kinematics
|
||||
sph = robot_kinematics.get_robot_as_spheres(q_js.position)
|
||||
WorldConfig(sphere=sph[0]).save_world_as_mesh("robot_spheres.stl")
|
||||
if write_pointcloud:
|
||||
sph = robot_kinematics.get_robot_as_spheres(q_js.position)
|
||||
WorldConfig(sphere=sph[0]).save_world_as_mesh("robot_spheres.stl")
|
||||
|
||||
# save world pointcloud in robot origin
|
||||
# save world pointcloud in robot origin
|
||||
|
||||
pc = cam_obs.get_pointcloud()
|
||||
pc_obs = PointCloud("world", pose=cam_obs.pose.to_list(), points=pc)
|
||||
pc_obs.save_as_mesh("camera_pointcloud.stl", transform_with_pose=True)
|
||||
pc = cam_obs.get_pointcloud()
|
||||
pc_obs = PointCloud("world", pose=cam_obs.pose.to_list(), points=pc)
|
||||
pc_obs.save_as_mesh("camera_pointcloud.stl", transform_with_pose=True)
|
||||
|
||||
# run segmentation:
|
||||
depth_mask, filtered_image = curobo_segmenter.get_robot_mask_from_active_js(
|
||||
@@ -131,10 +141,12 @@ def mask_image(robot_file="ur5e.yml"):
|
||||
|
||||
robot_mask = cam_obs.clone()
|
||||
robot_mask.depth_image[~depth_mask] = 0.0
|
||||
robot_mesh = PointCloud(
|
||||
"world", pose=robot_mask.pose.to_list(), points=robot_mask.get_pointcloud()
|
||||
)
|
||||
robot_mesh.save_as_mesh("robot_segmented.stl", transform_with_pose=True)
|
||||
|
||||
if write_pointcloud:
|
||||
robot_mesh = PointCloud(
|
||||
"world", pose=robot_mask.pose.to_list(), points=robot_mask.get_pointcloud()
|
||||
)
|
||||
robot_mesh.save_as_mesh("robot_segmented.stl", transform_with_pose=True)
|
||||
# save depth image
|
||||
imageio.imwrite(
|
||||
"robot_depth.png",
|
||||
@@ -150,10 +162,11 @@ def mask_image(robot_file="ur5e.yml"):
|
||||
|
||||
world_mask = cam_obs.clone()
|
||||
world_mask.depth_image[depth_mask] = 0.0
|
||||
world_mesh = PointCloud(
|
||||
"world", pose=world_mask.pose.to_list(), points=world_mask.get_pointcloud()
|
||||
)
|
||||
world_mesh.save_as_mesh("world_segmented.stl", transform_with_pose=True)
|
||||
if write_pointcloud:
|
||||
world_mesh = PointCloud(
|
||||
"world", pose=world_mask.pose.to_list(), points=world_mask.get_pointcloud()
|
||||
)
|
||||
world_mesh.save_as_mesh("world_segmented.stl", transform_with_pose=True)
|
||||
|
||||
imageio.imwrite(
|
||||
"world_depth.png",
|
||||
@@ -190,6 +203,248 @@ def mask_image(robot_file="ur5e.yml"):
|
||||
print("Segmentation Time (ms), (hz)", np.mean(dt_list[5:]) * 1000.0, 1.0 / np.mean(dt_list[5:]))
|
||||
|
||||
|
||||
def batch_mask_image(robot_file="ur5e.yml"):
|
||||
"""Mask images from different camera views using batched query.
|
||||
|
||||
Note: This only works for a single joint configuration across camera views.
|
||||
|
||||
Args:
|
||||
robot_file: robot to use for example.
|
||||
"""
|
||||
save_debug_data = True
|
||||
# create robot segmenter:
|
||||
tensor_args = TensorDeviceType()
|
||||
|
||||
curobo_segmenter = RobotSegmenter.from_robot_file(
|
||||
robot_file, collision_sphere_buffer=0.01, distance_threshold=0.05, use_cuda_graph=True
|
||||
)
|
||||
|
||||
mesh_dataset, q_js = create_render_dataset(robot_file, save_debug_data, fov_deg=60)
|
||||
|
||||
mesh_dataset_zoom, q_js = create_render_dataset(
|
||||
robot_file, save_debug_data, fov_deg=40, n_frames=30
|
||||
)
|
||||
|
||||
if save_debug_data:
|
||||
visualize_scale = 10.0
|
||||
data = mesh_dataset[0]
|
||||
cam_obs = CameraObservation(
|
||||
depth_image=tensor_args.to_device(data["depth"]).unsqueeze(0) * 1000,
|
||||
intrinsics=data["intrinsics"],
|
||||
pose=Pose.from_matrix(data["pose"].to(device=tensor_args.device)),
|
||||
)
|
||||
data_zoom = mesh_dataset_zoom[1]
|
||||
cam_obs = CameraObservation(
|
||||
depth_image=tensor_args.to_device(data["depth"]) * 1000,
|
||||
intrinsics=data["intrinsics"],
|
||||
pose=Pose.from_matrix(data["pose"].to(device=tensor_args.device)),
|
||||
)
|
||||
cam_obs_zoom = CameraObservation(
|
||||
depth_image=tensor_args.to_device(data_zoom["depth"]) * 1000,
|
||||
intrinsics=data_zoom["intrinsics"],
|
||||
pose=Pose.from_matrix(data_zoom["pose"].to(device=tensor_args.device)),
|
||||
)
|
||||
cam_obs = cam_obs.stack(cam_obs_zoom)
|
||||
|
||||
for i in range(cam_obs.depth_image.shape[0]):
|
||||
# save depth image
|
||||
imageio.imwrite(
|
||||
"camera_depth_" + str(i) + ".png",
|
||||
(cam_obs.depth_image[i] * visualize_scale)
|
||||
.squeeze()
|
||||
.detach()
|
||||
.cpu()
|
||||
.numpy()
|
||||
.astype(np.uint16),
|
||||
)
|
||||
|
||||
# run segmentation:
|
||||
depth_mask, filtered_image = curobo_segmenter.get_robot_mask_from_active_js(
|
||||
cam_obs,
|
||||
q_js,
|
||||
)
|
||||
# save robot points as mesh
|
||||
|
||||
robot_mask = cam_obs.clone()
|
||||
robot_mask.depth_image[~depth_mask] = 0.0
|
||||
for i in range(cam_obs.depth_image.shape[0]):
|
||||
# save depth image
|
||||
imageio.imwrite(
|
||||
"robot_depth_" + str(i) + ".png",
|
||||
(robot_mask.depth_image[i] * visualize_scale)
|
||||
.detach()
|
||||
.squeeze()
|
||||
.cpu()
|
||||
.numpy()
|
||||
.astype(np.uint16),
|
||||
)
|
||||
|
||||
# save world points as mesh
|
||||
|
||||
imageio.imwrite(
|
||||
"world_depth_" + str(i) + ".png",
|
||||
(filtered_image[i] * visualize_scale)
|
||||
.detach()
|
||||
.squeeze()
|
||||
.cpu()
|
||||
.numpy()
|
||||
.astype(np.uint16),
|
||||
)
|
||||
|
||||
dt_list = []
|
||||
|
||||
for i in range(len(mesh_dataset)):
|
||||
|
||||
data = mesh_dataset[i]
|
||||
data_zoom = mesh_dataset_zoom[i + 1]
|
||||
cam_obs = CameraObservation(
|
||||
depth_image=tensor_args.to_device(data["depth"]) * 1000,
|
||||
intrinsics=data["intrinsics"],
|
||||
pose=Pose.from_matrix(data["pose"].to(device=tensor_args.device)),
|
||||
)
|
||||
cam_obs_zoom = CameraObservation(
|
||||
depth_image=tensor_args.to_device(data_zoom["depth"]) * 1000,
|
||||
intrinsics=data_zoom["intrinsics"],
|
||||
pose=Pose.from_matrix(data_zoom["pose"].to(device=tensor_args.device)),
|
||||
)
|
||||
cam_obs = cam_obs.stack(cam_obs_zoom)
|
||||
if not curobo_segmenter.ready:
|
||||
curobo_segmenter.update_camera_projection(cam_obs)
|
||||
st_time = time.time()
|
||||
|
||||
depth_mask, filtered_image = curobo_segmenter.get_robot_mask_from_active_js(
|
||||
cam_obs,
|
||||
q_js,
|
||||
)
|
||||
|
||||
torch.cuda.synchronize()
|
||||
dt_list.append(time.time() - st_time)
|
||||
|
||||
print("Segmentation Time (ms), (hz)", np.mean(dt_list[5:]) * 1000.0, 1.0 / np.mean(dt_list[5:]))
|
||||
|
||||
|
||||
def batch_robot_mask_image(robot_file="ur5e.yml"):
|
||||
"""Mask images from different camera views using batched query.
|
||||
|
||||
Note: This example treats each image to have different robot joint configuration
|
||||
|
||||
Args:
|
||||
robot_file: robot to use for example.
|
||||
"""
|
||||
save_debug_data = True
|
||||
# create robot segmenter:
|
||||
tensor_args = TensorDeviceType()
|
||||
|
||||
curobo_segmenter = RobotSegmenter.from_robot_file(
|
||||
robot_file, collision_sphere_buffer=0.01, distance_threshold=0.05, use_cuda_graph=True
|
||||
)
|
||||
|
||||
mesh_dataset, q_js = create_render_dataset(robot_file, save_debug_data, fov_deg=60)
|
||||
|
||||
mesh_dataset_zoom, q_js_zoom = create_render_dataset(
|
||||
robot_file, save_debug_data, fov_deg=60, retract_delta=-0.5
|
||||
)
|
||||
q_js = q_js.unsqueeze(0)
|
||||
q_js = q_js.stack(q_js_zoom.unsqueeze(0))
|
||||
|
||||
if save_debug_data:
|
||||
visualize_scale = 10.0
|
||||
data = mesh_dataset[0]
|
||||
cam_obs = CameraObservation(
|
||||
depth_image=tensor_args.to_device(data["depth"]).unsqueeze(0) * 1000,
|
||||
intrinsics=data["intrinsics"],
|
||||
pose=Pose.from_matrix(data["pose"].to(device=tensor_args.device)),
|
||||
)
|
||||
data_zoom = mesh_dataset_zoom[0]
|
||||
cam_obs = CameraObservation(
|
||||
depth_image=tensor_args.to_device(data["depth"]) * 1000,
|
||||
intrinsics=data["intrinsics"],
|
||||
pose=Pose.from_matrix(data["pose"].to(device=tensor_args.device)),
|
||||
)
|
||||
cam_obs_zoom = CameraObservation(
|
||||
depth_image=tensor_args.to_device(data_zoom["depth"]) * 1000,
|
||||
intrinsics=data_zoom["intrinsics"],
|
||||
pose=Pose.from_matrix(data_zoom["pose"].to(device=tensor_args.device)),
|
||||
)
|
||||
cam_obs = cam_obs.stack(cam_obs_zoom)
|
||||
|
||||
for i in range(cam_obs.depth_image.shape[0]):
|
||||
# save depth image
|
||||
imageio.imwrite(
|
||||
"camera_depth_" + str(i) + ".png",
|
||||
(cam_obs.depth_image[i] * visualize_scale)
|
||||
.squeeze()
|
||||
.detach()
|
||||
.cpu()
|
||||
.numpy()
|
||||
.astype(np.uint16),
|
||||
)
|
||||
|
||||
# run segmentation:
|
||||
depth_mask, filtered_image = curobo_segmenter.get_robot_mask_from_active_js(
|
||||
cam_obs,
|
||||
q_js,
|
||||
)
|
||||
# save robot points as mesh
|
||||
|
||||
robot_mask = cam_obs.clone()
|
||||
robot_mask.depth_image[~depth_mask] = 0.0
|
||||
for i in range(cam_obs.depth_image.shape[0]):
|
||||
# save depth image
|
||||
imageio.imwrite(
|
||||
"robot_depth_" + str(i) + ".png",
|
||||
(robot_mask.depth_image[i] * visualize_scale)
|
||||
.detach()
|
||||
.squeeze()
|
||||
.cpu()
|
||||
.numpy()
|
||||
.astype(np.uint16),
|
||||
)
|
||||
|
||||
# save world points as mesh
|
||||
|
||||
imageio.imwrite(
|
||||
"world_depth_" + str(i) + ".png",
|
||||
(filtered_image[i] * visualize_scale)
|
||||
.detach()
|
||||
.squeeze()
|
||||
.cpu()
|
||||
.numpy()
|
||||
.astype(np.uint16),
|
||||
)
|
||||
|
||||
dt_list = []
|
||||
|
||||
for i in range(len(mesh_dataset)):
|
||||
|
||||
data = mesh_dataset[i]
|
||||
data_zoom = mesh_dataset_zoom[i]
|
||||
cam_obs = CameraObservation(
|
||||
depth_image=tensor_args.to_device(data["depth"]) * 1000,
|
||||
intrinsics=data["intrinsics"],
|
||||
pose=Pose.from_matrix(data["pose"].to(device=tensor_args.device)),
|
||||
)
|
||||
cam_obs_zoom = CameraObservation(
|
||||
depth_image=tensor_args.to_device(data_zoom["depth"]) * 1000,
|
||||
intrinsics=data_zoom["intrinsics"],
|
||||
pose=Pose.from_matrix(data_zoom["pose"].to(device=tensor_args.device)),
|
||||
)
|
||||
cam_obs = cam_obs.stack(cam_obs_zoom)
|
||||
if not curobo_segmenter.ready:
|
||||
curobo_segmenter.update_camera_projection(cam_obs)
|
||||
st_time = time.time()
|
||||
|
||||
depth_mask, filtered_image = curobo_segmenter.get_robot_mask_from_active_js(
|
||||
cam_obs,
|
||||
q_js,
|
||||
)
|
||||
|
||||
torch.cuda.synchronize()
|
||||
dt_list.append(time.time() - st_time)
|
||||
|
||||
print("Segmentation Time (ms), (hz)", np.mean(dt_list[5:]) * 1000.0, 1.0 / np.mean(dt_list[5:]))
|
||||
|
||||
|
||||
def profile_mask_image(robot_file="ur5e.yml"):
|
||||
# create robot segmenter:
|
||||
tensor_args = TensorDeviceType()
|
||||
@@ -236,3 +491,5 @@ def profile_mask_image(robot_file="ur5e.yml"):
|
||||
if __name__ == "__main__":
|
||||
mask_image()
|
||||
# profile_mask_image()
|
||||
# batch_mask_image()
|
||||
# batch_robot_mask_image()
|
||||
|
||||
Reference in New Issue
Block a user