Significantly improved convergence for mesh and cuboid, new ESDF collision.

This commit is contained in:
Balakumar Sundaralingam
2024-03-18 11:19:48 -07:00
parent 286b3820a5
commit b1f63e8778
100 changed files with 7587 additions and 2589 deletions

View File

@@ -19,13 +19,16 @@ from torch.profiler import record_function
# CuRobo
from curobo.cuda_robot_model.cuda_robot_model import CudaRobotModel
from curobo.geom.cv import get_projection_rays, project_depth_using_rays
from curobo.geom.types import PointCloud
from curobo.types.base import TensorDeviceType
from curobo.types.camera import CameraObservation
from curobo.types.math import Pose
from curobo.types.robot import RobotConfig
from curobo.types.state import JointState
from curobo.util.logger import log_error
from curobo.util.torch_utils import (
get_torch_compile_options,
get_torch_jit_decorator,
is_torch_compile_available,
)
from curobo.util_file import get_robot_configs_path, join_path, load_yaml
from curobo.wrap.model.robot_world import RobotWorld, RobotWorldConfig
@@ -36,6 +39,7 @@ class RobotSegmenter:
robot_world: RobotWorld,
distance_threshold: float = 0.05,
use_cuda_graph: bool = True,
ops_dtype: torch.dtype = torch.float32,
):
self._robot_world = robot_world
self._projection_rays = None
@@ -48,11 +52,12 @@ class RobotSegmenter:
self._use_cuda_graph = use_cuda_graph
self.tensor_args = robot_world.tensor_args
self.distance_threshold = distance_threshold
self._ops_dtype = ops_dtype
@staticmethod
def from_robot_file(
robot_file: Union[str, Dict],
collision_sphere_buffer: Optional[float],
collision_sphere_buffer: Optional[float] = None,
distance_threshold: float = 0.05,
use_cuda_graph: bool = True,
tensor_args: TensorDeviceType = TensorDeviceType(),
@@ -78,7 +83,7 @@ class RobotSegmenter:
def get_pointcloud_from_depth(self, camera_obs: CameraObservation):
if self._projection_rays is None:
self.update_camera_projection(camera_obs)
depth_image = camera_obs.depth_image
depth_image = camera_obs.depth_image.to(dtype=self._ops_dtype)
if len(depth_image.shape) == 2:
depth_image = depth_image.unsqueeze(0)
points = project_depth_using_rays(depth_image, self._projection_rays)
@@ -91,7 +96,7 @@ class RobotSegmenter:
intrinsics = intrinsics.unsqueeze(0)
project_rays = get_projection_rays(
camera_obs.depth_image.shape[-2], camera_obs.depth_image.shape[-1], intrinsics
)
).to(dtype=self._ops_dtype)
if self._projection_rays is None:
self._projection_rays = project_rays
@@ -157,8 +162,12 @@ class RobotSegmenter:
def _mask_op(self, camera_obs, q):
if len(q.shape) == 1:
q = q.unsqueeze(0)
robot_spheres = self._robot_world.get_kinematics(q).link_spheres_tensor
points = self.get_pointcloud_from_depth(camera_obs)
camera_to_robot = camera_obs.pose
points = points.to(dtype=torch.float32)
if self._out_points_buffer is None:
self._out_points_buffer = points.clone()
@@ -181,9 +190,9 @@ class RobotSegmenter:
out_points = points_in_robot_frame
dist = self._robot_world.get_point_robot_distance(out_points, q)
mask, filtered_image = mask_image(camera_obs.depth_image, dist, self.distance_threshold)
mask, filtered_image = mask_spheres_image(
camera_obs.depth_image, robot_spheres, out_points, self.distance_threshold
)
return mask, filtered_image
@@ -200,7 +209,7 @@ class RobotSegmenter:
return self.kinematics.base_link
@torch.jit.script
@get_torch_jit_decorator()
def mask_image(
image: torch.Tensor, distance: torch.Tensor, distance_threshold: float
) -> Tuple[torch.Tensor, torch.Tensor]:
@@ -212,3 +221,36 @@ def mask_image(
mask = torch.logical_and((image > 0.0), (distance > -distance_threshold))
filtered_image = torch.where(mask, 0, image)
return mask, filtered_image
@get_torch_jit_decorator()
def mask_spheres_image(
image: torch.Tensor,
link_spheres_tensor: torch.Tensor,
points: torch.Tensor,
distance_threshold: float,
) -> Tuple[torch.Tensor, torch.Tensor]:
if link_spheres_tensor.shape[0] != 1:
assert link_spheres_tensor.shape[0] == points.shape[0]
if len(points.shape) == 2:
points = points.unsqueeze(0)
robot_spheres = link_spheres_tensor.view(link_spheres_tensor.shape[0], -1, 4).contiguous()
robot_spheres = robot_spheres.unsqueeze(-3)
robot_radius = robot_spheres[..., 3]
points = points.unsqueeze(-2)
sph_distance = -1 * (
torch.linalg.norm(points - robot_spheres[..., :3], dim=-1) - robot_radius
) # b, n_spheres
distance = torch.max(sph_distance, dim=-1)[0]
distance = distance.view(
image.shape[0],
image.shape[1],
image.shape[2],
)
mask = torch.logical_and((image > 0.0), (distance > -distance_threshold))
filtered_image = torch.where(mask, 0, image)
return mask, filtered_image

View File

@@ -36,6 +36,11 @@ from curobo.types.robot import RobotConfig
from curobo.types.state import JointState
from curobo.util.logger import log_error
from curobo.util.sample_lib import HaltonGenerator
from curobo.util.torch_utils import (
get_torch_compile_options,
get_torch_jit_decorator,
is_torch_compile_available,
)
from curobo.util.warp import init_warp
from curobo.util_file import get_robot_configs_path, get_world_configs_path, join_path, load_yaml
@@ -192,6 +197,9 @@ class RobotWorld(RobotWorldConfig):
def update_world(self, world_config: WorldConfig):
self.world_model.load_collision_model(world_config)
def clear_world_cache(self):
self.world_model.clear_cache()
def get_collision_distance(
self, x_sph: torch.Tensor, env_query_idx: Optional[torch.Tensor] = None
) -> torch.Tensor:
@@ -364,16 +372,7 @@ class RobotWorld(RobotWorldConfig):
if len(q.shape) == 1:
log_error("q should be of shape [b, dof]")
kin_state = self.get_kinematics(q)
b, n = None, None
if len(points.shape) == 3:
b, n, _ = points.shape
points = points.view(b * n, 3)
pt_distance = point_robot_distance(kin_state.link_spheres_tensor, points)
if b is not None:
pt_distance = pt_distance.view(b, n)
return pt_distance
def get_active_js(self, full_js: JointState):
@@ -382,27 +381,50 @@ class RobotWorld(RobotWorldConfig):
return out_js
@torch.jit.script
@get_torch_jit_decorator()
def sum_mask(d1, d2, d3):
d_total = d1 + d2 + d3
d_mask = d_total == 0.0
return d_mask.view(-1)
@torch.jit.script
@get_torch_jit_decorator()
def mask(d1, d2, d3):
d_total = d1 + d2 + d3
d_mask = d_total == 0.0
return d_mask
@torch.jit.script
@get_torch_jit_decorator()
def point_robot_distance(link_spheres_tensor, points):
robot_spheres = link_spheres_tensor.view(1, -1, 4).contiguous()
robot_radius = robot_spheres[:, :, 3]
points = points.unsqueeze(1)
sph_distance = (
torch.linalg.norm(points - robot_spheres[:, :, :3], dim=-1) - robot_radius
"""Compute distance between robot and points
Args:
link_spheres_tensor: [batch_robot, n_robot_spheres, 4]
points: [batch_points, n_points, 3]
Returns:
distance: [batch_points, n_points]
"""
if link_spheres_tensor.shape[0] != 1:
assert link_spheres_tensor.shape[0] == points.shape[0]
squeeze_shape = False
n = 1
if len(points.shape) == 2:
squeeze_shape = True
n, _ = points.shape
points = points.unsqueeze(0)
robot_spheres = link_spheres_tensor.view(link_spheres_tensor.shape[0], -1, 4).contiguous()
robot_spheres = robot_spheres.unsqueeze(-3)
robot_radius = robot_spheres[..., 3]
points = points.unsqueeze(-2)
sph_distance = -1 * (
torch.linalg.norm(points - robot_spheres[..., :3], dim=-1) - robot_radius
) # b, n_spheres
pt_distance = torch.max(-1 * sph_distance, dim=-1)[0]
pt_distance = torch.max(sph_distance, dim=-1)[0]
if squeeze_shape:
pt_distance = pt_distance.view(n)
return pt_distance

View File

@@ -20,6 +20,7 @@ import torch.autograd.profiler as profiler
# CuRobo
from curobo.types.robot import JointState
from curobo.types.tensor import T_DOF
from curobo.util.torch_utils import get_torch_jit_decorator
from curobo.util.trajectory import calculate_dt
@@ -32,7 +33,7 @@ class TrajEvaluatorConfig:
max_dt: float = 0.1
@torch.jit.script
@get_torch_jit_decorator()
def compute_path_length(vel, traj_dt, cspace_distance_weight):
pl = torch.sum(
torch.sum(torch.abs(vel) * traj_dt.unsqueeze(-1) * cspace_distance_weight, dim=-1), dim=-1
@@ -40,24 +41,25 @@ def compute_path_length(vel, traj_dt, cspace_distance_weight):
return pl
@torch.jit.script
@get_torch_jit_decorator()
def compute_path_length_cost(vel, cspace_distance_weight):
pl = torch.sum(torch.sum(torch.abs(vel) * cspace_distance_weight, dim=-1), dim=-1)
return pl
@torch.jit.script
@get_torch_jit_decorator()
def smooth_cost(abs_acc, abs_jerk, opt_dt):
# acc = torch.max(torch.max(abs_acc, dim=-1)[0], dim=-1)[0]
# jerk = torch.max(torch.max(abs_jerk, dim=-1)[0], dim=-1)[0]
jerk = torch.mean(torch.max(abs_jerk, dim=-1)[0], dim=-1)
mean_acc = torch.mean(torch.max(abs_acc, dim=-1)[0], dim=-1) # [0]
a = (jerk * 0.001) + 5.0 * opt_dt + (mean_acc * 0.01)
a = (jerk * 0.001) + 10.0 * opt_dt + (mean_acc * 0.01)
# a = (jerk * 0.001) + 50.0 * opt_dt + (mean_acc * 0.01)
return a
@torch.jit.script
@get_torch_jit_decorator()
def compute_smoothness(
vel: torch.Tensor,
acc: torch.Tensor,
@@ -104,7 +106,7 @@ def compute_smoothness(
return (acc_label, smooth_cost(abs_acc, abs_jerk, dt_score))
@torch.jit.script
@get_torch_jit_decorator()
def compute_smoothness_opt_dt(
vel, acc, jerk, max_vel: torch.Tensor, max_acc: float, max_jerk: float, opt_dt: torch.Tensor
):

View File

@@ -34,6 +34,7 @@ from curobo.types.robot import JointState, RobotConfig
from curobo.types.tensor import T_BDOF, T_BValue_bool, T_BValue_float
from curobo.util.logger import log_error, log_warn
from curobo.util.sample_lib import HaltonGenerator
from curobo.util.torch_utils import get_torch_jit_decorator
from curobo.util_file import (
get_robot_configs_path,
get_task_configs_path,
@@ -1010,7 +1011,7 @@ class IKSolver(IKSolverConfig):
]
@torch.jit.script
@get_torch_jit_decorator()
def get_success(
feasible,
position_error,
@@ -1028,7 +1029,7 @@ def get_success(
return success
@torch.jit.script
@get_torch_jit_decorator()
def get_result(
pose_error,
position_error,

View File

@@ -197,7 +197,7 @@ class MotionGenConfig:
smooth_weight: List[float] = None,
finetune_smooth_weight: Optional[List[float]] = None,
state_finite_difference_mode: Optional[str] = None,
finetune_dt_scale: float = 0.98,
finetune_dt_scale: float = 0.95,
maximum_trajectory_time: Optional[float] = None,
maximum_trajectory_dt: float = 0.1,
velocity_scale: Optional[Union[List[float], float]] = None,
@@ -2086,6 +2086,7 @@ class MotionGen(MotionGenConfig):
# self.trajopt_solver.compute_metrics(not og_evaluate, og_evaluate)
if self.store_debug_in_result:
result.debug_info["trajopt_result"] = traj_result
# run finetune
if plan_config.enable_finetune_trajopt and torch.count_nonzero(traj_result.success) > 0:
with profiler.record_function("motion_gen/finetune_trajopt"):
@@ -2113,6 +2114,9 @@ class MotionGen(MotionGenConfig):
traj_result.solve_time = og_solve_time
if self.store_debug_in_result:
result.debug_info["finetune_trajopt_result"] = traj_result
elif plan_config.enable_finetune_trajopt:
traj_result.success = traj_result.success[:, 0]
result.success = traj_result.success
result.interpolated_plan = traj_result.interpolated_solution
@@ -2190,7 +2194,7 @@ class MotionGen(MotionGenConfig):
# warm up js_trajopt:
goal_state = start_state.clone()
goal_state.position[..., warmup_joint_index] += warmup_joint_delta
for _ in range(2):
for _ in range(3):
self.plan_single_js(start_state, goal_state, MotionGenPlanConfig(max_attempts=1))
if enable_graph:
start_state = JointState.from_position(
@@ -2214,7 +2218,7 @@ class MotionGen(MotionGenConfig):
if n_goalset == -1:
retract_pose = Pose(state.ee_pos_seq, quaternion=state.ee_quat_seq)
start_state.position[..., warmup_joint_index] += warmup_joint_delta
for _ in range(2):
for _ in range(3):
self.plan_single(
start_state,
retract_pose,
@@ -2243,7 +2247,7 @@ class MotionGen(MotionGenConfig):
quaternion=state.ee_quat_seq.repeat(n_goalset, 1).view(1, n_goalset, 4),
)
start_state.position[..., warmup_joint_index] += warmup_joint_delta
for _ in range(2):
for _ in range(3):
self.plan_goalset(
start_state,
retract_pose,
@@ -2278,7 +2282,7 @@ class MotionGen(MotionGenConfig):
retract_pose = Pose(state.ee_pos_seq, quaternion=state.ee_quat_seq)
start_state.position[..., warmup_joint_index] += warmup_joint_delta
for _ in range(2):
for _ in range(3):
if batch_env_mode:
self.plan_batch_env(
start_state,
@@ -2307,7 +2311,7 @@ class MotionGen(MotionGenConfig):
.contiguous(),
)
start_state.position[..., warmup_joint_index] += warmup_joint_delta
for _ in range(2):
for _ in range(3):
if batch_env_mode:
self.plan_batch_env_goalset(
start_state,

View File

@@ -41,6 +41,7 @@ from curobo.types.robot import JointState, RobotConfig
from curobo.types.tensor import T_BDOF, T_DOF, T_BValue_bool, T_BValue_float
from curobo.util.helpers import list_idx_if_not_none
from curobo.util.logger import log_error, log_info, log_warn
from curobo.util.torch_utils import get_torch_jit_decorator, is_torch_compile_available
from curobo.util.trajectory import (
InterpolateType,
calculate_dt_no_clamp,
@@ -877,24 +878,37 @@ class TrajOptSolver(TrajOptSolverConfig):
result.metrics.goalset_index = metrics.goalset_index
st_time = time.time()
feasible = torch.all(result.metrics.feasible, dim=-1)
if result.metrics.cspace_error is None and result.metrics.position_error is None:
raise log_error("convergence check requires either goal_pose or goal_state")
if result.metrics.position_error is not None:
converge = torch.logical_and(
result.metrics.position_error[..., -1] <= self.position_threshold,
result.metrics.rotation_error[..., -1] <= self.rotation_threshold,
)
elif result.metrics.cspace_error is not None:
converge = result.metrics.cspace_error[..., -1] <= self.cspace_threshold
else:
raise ValueError("convergence check requires either goal_pose or goal_state")
success = jit_feasible_success(
result.metrics.feasible,
result.metrics.position_error,
result.metrics.rotation_error,
result.metrics.cspace_error,
self.position_threshold,
self.rotation_threshold,
self.cspace_threshold,
)
if False:
feasible = torch.all(result.metrics.feasible, dim=-1)
success = torch.logical_and(feasible, converge)
if result.metrics.position_error is not None:
converge = torch.logical_and(
result.metrics.position_error[..., -1] <= self.position_threshold,
result.metrics.rotation_error[..., -1] <= self.rotation_threshold,
)
elif result.metrics.cspace_error is not None:
converge = result.metrics.cspace_error[..., -1] <= self.cspace_threshold
else:
raise ValueError("convergence check requires either goal_pose or goal_state")
success = torch.logical_and(feasible, converge)
if return_all_solutions:
traj_result = TrajResult(
success=success,
goal=goal,
solution=result.action.scale(self.solver_dt / opt_dt.view(-1, 1, 1)),
solution=result.action.scale_by_dt(self.solver_dt_tensor, opt_dt.view(-1, 1, 1)),
seed=seed_traj,
position_error=result.metrics.position_error,
rotation_error=result.metrics.rotation_error,
@@ -928,49 +942,86 @@ class TrajOptSolver(TrajOptSolverConfig):
)
with profiler.record_function("trajopt/best_select"):
success[~smooth_label] = False
# get the best solution:
if result.metrics.pose_error is not None:
convergence_error = result.metrics.pose_error[..., -1]
elif result.metrics.cspace_error is not None:
convergence_error = result.metrics.cspace_error[..., -1]
if True: # not get_torch_jit_decorator() == torch.jit.script:
# This only works if torch compile is available:
(
idx,
position_error,
rotation_error,
cspace_error,
goalset_index,
opt_dt,
success,
) = jit_trajopt_best_select(
success,
smooth_label,
result.metrics.cspace_error,
result.metrics.pose_error,
result.metrics.position_error,
result.metrics.rotation_error,
result.metrics.goalset_index,
result.metrics.cost,
smooth_cost,
batch_mode,
goal.batch,
num_seeds,
self._col,
opt_dt,
)
if batch_mode:
last_tstep = [last_tstep[i] for i in idx]
else:
last_tstep = [last_tstep[idx.item()]]
best_act_seq = result.action[idx]
best_raw_action = result.raw_action[idx]
interpolated_traj = interpolated_trajs[idx]
else:
raise ValueError("convergence check requires either goal_pose or goal_state")
running_cost = torch.mean(result.metrics.cost, dim=-1) * 0.0001
error = convergence_error + smooth_cost + running_cost
error[~success] += 10000.0
if batch_mode:
idx = torch.argmin(error.view(goal.batch, num_seeds), dim=-1)
idx = idx + num_seeds * self._col
last_tstep = [last_tstep[i] for i in idx]
success = success[idx]
else:
idx = torch.argmin(error, dim=0)
success[~smooth_label] = False
# get the best solution:
if result.metrics.pose_error is not None:
convergence_error = result.metrics.pose_error[..., -1]
elif result.metrics.cspace_error is not None:
convergence_error = result.metrics.cspace_error[..., -1]
else:
raise ValueError(
"convergence check requires either goal_pose or goal_state"
)
running_cost = torch.mean(result.metrics.cost, dim=-1) * 0.0001
error = convergence_error + smooth_cost + running_cost
error[~success] += 10000.0
if batch_mode:
idx = torch.argmin(error.view(goal.batch, num_seeds), dim=-1)
idx = idx + num_seeds * self._col
last_tstep = [last_tstep[i] for i in idx]
success = success[idx]
else:
idx = torch.argmin(error, dim=0)
last_tstep = [last_tstep[idx.item()]]
success = success[idx : idx + 1]
last_tstep = [last_tstep[idx.item()]]
success = success[idx : idx + 1]
best_act_seq = result.action[idx]
best_raw_action = result.raw_action[idx]
interpolated_traj = interpolated_trajs[idx]
goalset_index = position_error = rotation_error = cspace_error = None
if result.metrics.position_error is not None:
position_error = result.metrics.position_error[idx, -1]
if result.metrics.rotation_error is not None:
rotation_error = result.metrics.rotation_error[idx, -1]
if result.metrics.cspace_error is not None:
cspace_error = result.metrics.cspace_error[idx, -1]
if result.metrics.goalset_index is not None:
goalset_index = result.metrics.goalset_index[idx, -1]
best_act_seq = result.action[idx]
best_raw_action = result.raw_action[idx]
interpolated_traj = interpolated_trajs[idx]
goalset_index = position_error = rotation_error = cspace_error = None
if result.metrics.position_error is not None:
position_error = result.metrics.position_error[idx, -1]
if result.metrics.rotation_error is not None:
rotation_error = result.metrics.rotation_error[idx, -1]
if result.metrics.cspace_error is not None:
cspace_error = result.metrics.cspace_error[idx, -1]
if result.metrics.goalset_index is not None:
goalset_index = result.metrics.goalset_index[idx, -1]
opt_dt = opt_dt[idx]
opt_dt = opt_dt[idx]
if self.sync_cuda_time:
torch.cuda.synchronize()
if len(best_act_seq.shape) == 3:
opt_dt_v = opt_dt.view(-1, 1, 1)
else:
opt_dt_v = opt_dt.view(1, 1)
opt_solution = best_act_seq.scale(self.solver_dt / opt_dt_v)
opt_solution = best_act_seq.scale_by_dt(self.solver_dt_tensor, opt_dt_v)
select_time = time.time() - st_time
debug_info = None
if self.store_debug_in_result:
@@ -1174,7 +1225,7 @@ class TrajOptSolver(TrajOptSolverConfig):
self._max_joint_vel,
self._max_joint_acc,
self._max_joint_jerk,
self.solver_dt,
self.solver_dt_tensor,
kind=self.interpolation_type,
tensor_args=self.tensor_args,
out_traj_state=self._interpolated_traj_buffer,
@@ -1224,7 +1275,12 @@ class TrajOptSolver(TrajOptSolverConfig):
@property
def solver_dt(self):
return self.solver.safety_rollout.dynamics_model.dt_traj_params.base_dt
return self.solver.safety_rollout.dynamics_model.traj_dt[0]
# return self.solver.safety_rollout.dynamics_model.dt_traj_params.base_dt
@property
def solver_dt_tensor(self):
return self.solver.safety_rollout.dynamics_model.traj_dt[0]
def update_solver_dt(
self,
@@ -1254,3 +1310,79 @@ class TrajOptSolver(TrajOptSolverConfig):
for rollout in rollouts
if isinstance(rollout, ArmReacher)
]
@get_torch_jit_decorator()
def jit_feasible_success(
feasible,
position_error: Union[torch.Tensor, None],
rotation_error: Union[torch.Tensor, None],
cspace_error: Union[torch.Tensor, None],
position_threshold: float,
rotation_threshold: float,
cspace_threshold: float,
):
feasible = torch.all(feasible, dim=-1)
converge = feasible
if position_error is not None and rotation_error is not None:
converge = torch.logical_and(
position_error[..., -1] <= position_threshold,
rotation_error[..., -1] <= rotation_threshold,
)
elif cspace_error is not None:
converge = cspace_error[..., -1] <= cspace_threshold
success = torch.logical_and(feasible, converge)
return success
@get_torch_jit_decorator(only_valid_for_compile=True)
def jit_trajopt_best_select(
success,
smooth_label,
cspace_error: Union[torch.Tensor, None],
pose_error: Union[torch.Tensor, None],
position_error: Union[torch.Tensor, None],
rotation_error: Union[torch.Tensor, None],
goalset_index: Union[torch.Tensor, None],
cost,
smooth_cost,
batch_mode: bool,
batch: int,
num_seeds: int,
col,
opt_dt,
):
success[~smooth_label] = False
convergence_error = 0
# get the best solution:
if pose_error is not None:
convergence_error = pose_error[..., -1]
elif cspace_error is not None:
convergence_error = cspace_error[..., -1]
running_cost = torch.mean(cost, dim=-1) * 0.0001
error = convergence_error + smooth_cost + running_cost
error[~success] += 10000.0
if batch_mode:
idx = torch.argmin(error.view(batch, num_seeds), dim=-1)
idx = idx + num_seeds * col
success = success[idx]
else:
idx = torch.argmin(error, dim=0)
success = success[idx : idx + 1]
# goalset_index = position_error = rotation_error = cspace_error = None
if position_error is not None:
position_error = position_error[idx, -1]
if rotation_error is not None:
rotation_error = rotation_error[idx, -1]
if cspace_error is not None:
cspace_error = cspace_error[idx, -1]
if goalset_index is not None:
goalset_index = goalset_index[idx, -1]
opt_dt = opt_dt[idx]
return idx, position_error, rotation_error, cspace_error, goalset_index, opt_dt, success