Improved precision, quality and js planner.

This commit is contained in:
Balakumar Sundaralingam
2024-04-11 13:19:01 -07:00
parent 774dcfd609
commit d6e600c88c
51 changed files with 2128 additions and 1054 deletions

View File

@@ -10,6 +10,19 @@
#
"""
cuRobo provides accelerated modules for robotics which can be used to build high-performance
robotics applications. The library has several modules for numerical optimization, robot kinematics,
geometry processing, collision checking, graph search planning. cuRobo provides high-level APIs for
performing tasks like collision-free inverse kinematics, model predictive control, and motion
planning.
High-level APIs:
- Inverse Kinematics: :mod:`curobo.wrap.reacher.ik_solver`.
- Model Predictive Control: :mod:`curobo.wrap.reacher.mpc`.
- Motion Generation / Planning: :mod:`curobo.wrap.reacher.motion_gen`.
cuRobo package is split into several modules:
- :mod:`curobo.opt` contains optimization solvers.
@@ -18,7 +31,7 @@ cuRobo package is split into several modules:
- :mod:`curobo.geom` contains geometry processing, collision checking and frame transforms.
- :mod:`curobo.graph` contains geometric planning with graph search methods.
- :mod:`curobo.rollout` contains methods that map actions to costs. This class wraps instances of
:mod:`curobo.cuda_robot_model` and :mod:`geom` to compute costs given trajectory of actions.
:mod:`curobo.cuda_robot_model` and :mod:`curobo.geom` to compute costs given trajectory of actions.
- :mod:`curobo.util` contains utility methods.
- :mod:`curobo.wrap` adds the user-level api for task programming. Includes implementation of
collision-free reacher and batched robot world collision checking.

View File

@@ -29,7 +29,7 @@ robot_cfg:
collision_link_names: null # List[str]
collision_spheres: null #
collision_sphere_buffer: 0.005
collision_sphere_buffer: 0.005 # float or Dict[str, float]
extra_collision_spheres: {}
self_collision_ignore: {} # Dict[str, List[str]]
self_collision_buffer: {} # Dict[str, float]

View File

@@ -11,7 +11,7 @@
robot_cfg:
kinematics:
usd_path: "robot/ur_description/ur5e.usd"
usd_path: null
usd_robot_root: "/robot"
isaac_usd_path: ""
usd_flip_joints: {}
@@ -95,10 +95,11 @@ robot_cfg:
"radius": -0.01 #0.05
collision_sphere_buffer: 0.005
collision_sphere_buffer: 0.0
extra_collision_spheres: {}
self_collision_ignore: {
"upper_arm_link": ["forearm_link", "shoulder_link"],
"shoulder_link": ["tool0"],
"upper_arm_link": ["forearm_link", "shoulder_link", "tool0"],
"forearm_link": ["wrist_1_link", "wrist_2_link", "wrist_3_link"],
"wrist_1_link": ["wrist_2_link","wrist_3_link","tool0"],
"wrist_2_link": ["wrist_3_link", "tool0"],

View File

@@ -43,6 +43,7 @@ constraint:
classify: True
bound_cfg:
weight: [5000.0, 5000.0, 5000.0,5000.0]
activation_distance: [0.0,0.0,0.0,0.0] # for position, velocity, acceleration and jerk
@@ -54,14 +55,14 @@ convergence:
terminal: False
link_pose_cfg:
vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
weight: [1.0, 1.0]
weight: [0.1, 10.0]
vec_convergence: [0.0, 0.0] # orientation, position
terminal: False
cspace_cfg:
weight: 1.0
terminal: True
run_weight: 1.0
run_weight: 0.0
null_space_cfg:
weight: 1.0
terminal: True

View File

@@ -32,7 +32,7 @@ cost:
pose_cfg:
vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # orientation, position for all timesteps
run_vec_weight: [0.00,0.00,0.00,0.0,0.0,0.0] # running weight orientation, position
weight: [2000,500000.0,30,50]
weight: [2000,500000.0,30,60]
vec_convergence: [0.0,0.0] # orientation, position, orientation metric activation, position metric activation
terminal: True
run_weight: 1.0
@@ -41,10 +41,10 @@ cost:
link_pose_cfg:
vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # orientation, position for all timesteps
run_vec_weight: [0.00,0.00,0.00,0.0,0.0,0.0] # running weight orientation, position
weight: [2000,500000.0,30,50] #[150.0, 2000.0, 30, 40]
weight: [2000,500000.0,30,60] #[150.0, 2000.0, 30, 40]
vec_convergence: [0.0,0.0] # orientation, position, orientation metric activation, position metric activation
terminal: True
run_weight: 0.001
run_weight: 1.0
use_metric: True
@@ -54,8 +54,8 @@ cost:
run_weight: 0.00 #1
bound_cfg:
weight: [5000.0, 50000.0, 50000.0, 50000.0] # needs to be 3 values
smooth_weight: [0.0,5000.0, 50.0, 0.0] #[0.0,5000.0, 50.0, 0.0] # [vel, acc, jerk,]
weight: [10000.0, 500000.0, 500.0, 500.0]
smooth_weight: [0.0,10000.0, 5.0, 0.0]
run_weight_velocity: 0.0
run_weight_acceleration: 1.0
run_weight_jerk: 1.0
@@ -63,7 +63,7 @@ cost:
null_space_weight: [0.0]
primitive_collision_cfg:
weight: 1000000.0 #1000000.0 1000000
weight: 1000000
use_sweep: True
sweep_steps: 4
classify: False
@@ -79,11 +79,11 @@ cost:
lbfgs:
n_iters: 300 # 400
n_iters: 300
inner_iters: 25
cold_start_n_iters: null
min_iters: 25
line_search_scale: [0.01, 0.3, 0.7,1.0] # #
line_search_scale: [0.1, 0.3, 0.7,1.0] # #
fixed_iters: True
cost_convergence: 0.01
cost_delta_threshold: 1.0

View File

@@ -54,10 +54,8 @@ cost:
run_weight: 0.00 #1
bound_cfg:
weight: [5000.0, 50000.0, 50000.0, 50000.0] # needs to be 3 values
smooth_weight: [0.0,50000.0, 500.0, 0.0] # [vel, acc, jerk,]
weight: [10000.0, 100000.0, 500.0, 500.0]
smooth_weight: [0.0,10000.0, 50.0, 0.0]
run_weight_velocity: 0.0
run_weight_acceleration: 1.0
run_weight_jerk: 1.0

View File

@@ -28,24 +28,24 @@ model:
cost:
pose_cfg:
vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
weight: [200,4000,10,20]
#weight: [5000,500000,5,20]
weight: [2000,10000,20,40]
vec_convergence: [0.0, 0.00]
terminal: False
use_metric: True
run_weight: 1.0
link_pose_cfg:
vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
weight: [200,4000,10,20]
weight: [2000,10000,20,40]
vec_convergence: [0.00, 0.000]
terminal: False
use_metric: True
run_weight: 1.0
cspace_cfg:
weight: 0.000
bound_cfg:
weight: 100.0
weight: 500.0
activation_distance: [0.1]
null_space_weight: [1.0]
primitive_collision_cfg:
@@ -62,7 +62,7 @@ lbfgs:
n_iters: 100 #60
inner_iters: 25
cold_start_n_iters: null
min_iters: 25
min_iters: null
line_search_scale: [0.1, 0.3, 0.7, 1.0]
fixed_iters: True
cost_convergence: 0.001

View File

@@ -33,7 +33,6 @@ cost:
vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # orientation, position for all timesteps
run_vec_weight: [0.00,0.00,0.00,0.0,0.0,0.0] # running weight orientation, position
weight: [2000,50000.0,30,50] #[150.0, 2000.0, 30, 40]
vec_convergence: [0.0,0.0] # orientation, position, orientation metric activation, position metric activation
terminal: True
run_weight: 1.0
@@ -45,17 +44,17 @@ cost:
weight: [2000,50000.0,30,50] #[150.0, 2000.0, 30, 40]
vec_convergence: [0.0,0.0] # orientation, position, orientation metric activation, position metric activation
terminal: True
run_weight: 0.001
run_weight: 1.0
use_metric: True
cspace_cfg:
weight: 10000.0
weight: 1000.0
terminal: True
run_weight: 0.00 #1
run_weight: 0.0
bound_cfg:
weight: [5000.0, 50000.0, 50000.0,50000.0] # needs to be 3 values
smooth_weight: [0.0,10000.0,10.0, 0.0] # [vel, acc, jerk, alpha_v-not-used]
weight: [50000.0, 50000.0, 50000.0,50000.0]
smooth_weight: [0.0,10000.0,5.0, 0.0]
run_weight_velocity: 0.00
run_weight_acceleration: 1.0
run_weight_jerk: 1.0
@@ -84,7 +83,7 @@ lbfgs:
inner_iters: 25
cold_start_n_iters: null
min_iters: 25
line_search_scale: [0.01,0.3,0.7,1.0]
line_search_scale: [0.1,0.3,0.7,1.0]
fixed_iters: True
cost_convergence: 0.01
cost_delta_threshold: 2000.0

View File

@@ -38,7 +38,7 @@ cost:
use_metric: True
cspace_cfg:
weight: 500.0
weight: 1000.0
terminal: True
run_weight: 1.0

View File

@@ -34,7 +34,7 @@ cost:
weight: [250.0, 5000.0, 20, 20]
vec_convergence: [0.0,0.0,1000.0,1000.0]
terminal: True
run_weight: 1.0
run_weight: 0.0
use_metric: True
link_pose_cfg:

View File

@@ -73,7 +73,7 @@ class CudaRobotGeneratorConfig:
collision_spheres: Union[None, str, Dict[str, Any]] = None
#: Radius buffer to add to collision spheres as padding.
collision_sphere_buffer: float = 0.0
collision_sphere_buffer: Union[float, Dict[str, float]] = 0.0
#: Compute jacobian of link poses. Currently not supported.
compute_jacobian: bool = False
@@ -436,7 +436,6 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
if self._bodies[0].link_name in link_names:
store_link_map.append(chain_link_names.index(self._bodies[0].link_name))
ordered_link_names.append(self._bodies[0].link_name)
joint_offset_map.append(self._bodies[0].joint_offset)
# get joint types:
for i in range(1, len(self._bodies)):
body = self._bodies[i]
@@ -614,7 +613,6 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
self.joint_names.remove(j)
self._n_dofs -= 1
self._active_joints.remove(i)
self._joint_map[self._joint_map < -1] = -1
self._joint_map = self._joint_map.to(device=self.tensor_args.device)
self._joint_map_type = self._joint_map_type.to(device=self.tensor_args.device)
@@ -628,7 +626,7 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
self,
collision_spheres: Dict,
collision_link_names: List[str],
collision_sphere_buffer: float = 0.0,
collision_sphere_buffer: Union[float, Dict[str, float]] = 0.0,
):
"""
@@ -643,6 +641,7 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
# we store as [n_link, 7]
link_sphere_idx_map = []
cpu_tensor_args = self.tensor_args.cpu()
self_collision_buffer = self.self_collision_buffer.copy()
with profiler.record_function("robot_generator/build_collision_spheres"):
for j_idx, j in enumerate(collision_link_names):
# print(j_idx)
@@ -652,11 +651,22 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
)
# find link index in global map:
l_idx = self._name_to_idx_map[j]
offset_radius = 0.0
if isinstance(collision_sphere_buffer, float):
offset_radius = collision_sphere_buffer
elif j in collision_sphere_buffer:
offset_radius = collision_sphere_buffer[j]
if j in self_collision_buffer:
self_collision_buffer[j] -= offset_radius
else:
self_collision_buffer[j] = -offset_radius
for i in range(n_spheres):
padded_radius = collision_spheres[j][i]["radius"] + offset_radius
if padded_radius <= 0.0 and padded_radius > -1.0:
padded_radius = 0.001
link_spheres[i, :] = tensor_sphere(
collision_spheres[j][i]["center"],
collision_spheres[j][i]["radius"],
padded_radius,
tensor_args=cpu_tensor_args,
tensor=link_spheres[i, :],
)
@@ -665,10 +675,6 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
self.total_spheres += n_spheres
self._link_spheres_tensor = torch.cat(coll_link_spheres, dim=0)
new_radius = self._link_spheres_tensor[..., 3] + collision_sphere_buffer
flag = torch.logical_and(new_radius > -1.0, new_radius <= 0.0)
new_radius[flag] = 0.001
self._link_spheres_tensor[:, 3] = new_radius
self._link_sphere_idx_map = torch.as_tensor(
link_sphere_idx_map, dtype=torch.int16, device=cpu_tensor_args.device
)
@@ -696,9 +702,9 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
link1_spheres_idx = torch.nonzero(self._link_sphere_idx_map == link1_idx)
rad1 = self._link_spheres_tensor[link1_spheres_idx, 3]
if j not in self.self_collision_buffer.keys():
self.self_collision_buffer[j] = 0.0
c1 = self.self_collision_buffer[j]
if j not in self_collision_buffer.keys():
self_collision_buffer[j] = 0.0
c1 = self_collision_buffer[j]
self.self_collision_offset[link1_spheres_idx] = c1
for _, i_name in enumerate(collision_link_names):
if i_name == j or i_name in ignore_links:
@@ -706,9 +712,9 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
if i_name not in collision_link_names:
log_error("Self Collision Link name not found in collision_link_names")
# find index of this link name:
if i_name not in self.self_collision_buffer.keys():
self.self_collision_buffer[i_name] = 0.0
c2 = self.self_collision_buffer[i_name]
if i_name not in self_collision_buffer.keys():
self_collision_buffer[i_name] = 0.0
c2 = self_collision_buffer[i_name]
link2_idx = self._name_to_idx_map[i_name]
# update collision distance between spheres from these two links:
link2_spheres_idx = torch.nonzero(self._link_sphere_idx_map == link2_idx)

View File

@@ -143,6 +143,10 @@ class CudaRobotModelConfig:
def cspace(self):
return self.kinematics_config.cspace
@property
def dof(self) -> int:
return self.kinematics_config.n_dof
class CudaRobotModel(CudaRobotModelConfig):
"""
@@ -368,6 +372,10 @@ class CudaRobotModel(CudaRobotModelConfig):
def get_dof(self) -> int:
return self.kinematics_config.n_dof
@property
def dof(self) -> int:
return self.kinematics_config.n_dof
@property
def joint_names(self) -> List[str]:
return self.kinematics_config.joint_names

View File

@@ -68,12 +68,12 @@ class UrdfKinematicsParser(KinematicsParser):
"velocity": joint.limit.velocity,
}
else:
log_warn("Converting continuous joint to revolute with limits[-10,10]")
log_warn("Converting continuous joint to revolute with limits[-6.28,6.28]")
joint_type = "revolute"
joint_limits = {
"effort": joint.limit.effort,
"lower": -10,
"upper": 10,
"lower": -3.14 * 2,
"upper": 3.14 * 2,
"velocity": joint.limit.velocity,
}
return joint_limits, joint_type
@@ -181,6 +181,7 @@ class UrdfKinematicsParser(KinematicsParser):
log_error("Joint type not supported")
if joint_axis[0] == -1 or joint_axis[1] == -1 or joint_axis[2] == -1:
joint_offset[0] = -1.0 * joint_offset[0]
joint_axis = [abs(x) for x in joint_axis]
body_params["joint_type"] = joint_type
body_params["joint_offset"] = joint_offset

View File

@@ -18,4 +18,12 @@
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
#define CHECK_FP8 defined(CUDA_VERSION) && CUDA_VERSION >= 11080 && TORCH_VERSION_MAJOR >= 2 && TORCH_VERSION_MINOR >= 2
#define CHECK_INPUT_GUARD(x) CHECK_INPUT(x); const at::cuda::OptionalCUDAGuard guard(x.device())
#define CHECK_INPUT_GUARD(x) CHECK_INPUT(x); const at::cuda::OptionalCUDAGuard guard(x.device())
#if CHECK_FP8
#define FP8_TYPE_MACRO torch::kFloat8_e4m3fn
//constexpr const auto fp8_type = torch::kFloat8_e4m3fn;
#else
#define FP8_TYPE_MACRO torch::kHalf
//const constexpr auto fp8_type = torch::kHalf;
#endif

View File

@@ -1341,13 +1341,14 @@ std::vector<torch::Tensor>kin_fused_backward_16t(
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
assert(sparsity_opt);
const bool parallel_write = true;
if (use_global_cumul)
{
if (n_joints < 16)
{
AT_DISPATCH_FLOATING_TYPES(
grad_spheres.scalar_type(), "kin_fused_backward_16t", ([&] {
kin_fused_backward_kernel3<scalar_t, double, true, true, 16, true>
kin_fused_backward_kernel3<scalar_t, double, true, true, 16, parallel_write>
<< < blocksPerGrid, threadsPerBlock, sharedMemSize, stream >> > (
grad_out.data_ptr<float>(),
grad_nlinks_pos.data_ptr<float>(),
@@ -1371,7 +1372,7 @@ std::vector<torch::Tensor>kin_fused_backward_16t(
{
AT_DISPATCH_FLOATING_TYPES(
grad_spheres.scalar_type(), "kin_fused_backward_16t", ([&] {
kin_fused_backward_kernel3<scalar_t, double, true, true, 64, true>
kin_fused_backward_kernel3<scalar_t, double, true, true, 64, parallel_write>
<< < blocksPerGrid, threadsPerBlock, sharedMemSize, stream >> > (
grad_out.data_ptr<float>(),
grad_nlinks_pos.data_ptr<float>(),
@@ -1395,7 +1396,7 @@ std::vector<torch::Tensor>kin_fused_backward_16t(
{
AT_DISPATCH_FLOATING_TYPES(
grad_spheres.scalar_type(), "kin_fused_backward_16t", ([&] {
kin_fused_backward_kernel3<scalar_t, double, true, true, 128, true>
kin_fused_backward_kernel3<scalar_t, double, true, true, 128, parallel_write>
<< < blocksPerGrid, threadsPerBlock, sharedMemSize, stream >> > (
grad_out.data_ptr<float>(),
grad_nlinks_pos.data_ptr<float>(),
@@ -1423,7 +1424,7 @@ std::vector<torch::Tensor>kin_fused_backward_16t(
//
AT_DISPATCH_FLOATING_TYPES(
grad_spheres.scalar_type(), "kin_fused_backward_16t", ([&] {
kin_fused_backward_kernel3<scalar_t, double, false, true, 128, true>
kin_fused_backward_kernel3<scalar_t, double, false, true, 128, parallel_write>
<< < blocksPerGrid, threadsPerBlock, sharedMemSize, stream >> > (
grad_out.data_ptr<float>(),
grad_nlinks_pos.data_ptr<float>(),

View File

@@ -481,7 +481,7 @@ namespace Curobo
}
delta = make_float3(pt.x - sphere.x, pt.y - sphere.y, pt.z - sphere.z);
@@ -2746,13 +2746,8 @@ sphere_obb_clpt(const torch::Tensor sphere_position, // batch_size, 3
else
{
#if CHECK_FP8
const auto fp8_type = torch::kFloat8_e4m3fn;
#else
const auto fp8_type = torch::kHalf;
#endif
// typename scalar_t, typename dist_scalar_t=float, bool BATCH_ENV_T=true, bool SCALE_METRIC=true, bool SUM_COLLISIONS=true, bool COMPUTE_ESDF=false
AT_DISPATCH_FLOATING_TYPES_AND2(torch::kBFloat16, fp8_type,
AT_DISPATCH_FLOATING_TYPES_AND2(torch::kBFloat16, FP8_TYPE_MACRO,
distance.scalar_type(), "SphereObb_clpt", ([&]{
auto distance_kernel = sphere_obb_distance_kernel<scalar_t, scalar_t, false, scale_metric, sum_collisions_,false>;
if (use_batch_env)
@@ -3211,13 +3206,8 @@ sphere_voxel_clpt(const torch::Tensor sphere_position, // batch_size, 3
int blocksPerGrid = (bnh_spheres + threadsPerBlock - 1) / threadsPerBlock;
#if CHECK_FP8
const auto fp8_type = torch::kFloat8_e4m3fn;
#else
const auto fp8_type = torch::kHalf;
#endif
AT_DISPATCH_FLOATING_TYPES_AND2(torch::kBFloat16, fp8_type,
AT_DISPATCH_FLOATING_TYPES_AND2(torch::kBFloat16, FP8_TYPE_MACRO,
grid_features.scalar_type(), "SphereVoxel_clpt", ([&]
{
@@ -3255,7 +3245,6 @@ sphere_voxel_clpt(const torch::Tensor sphere_position, // batch_size, 3
}
}
selected_kernel
<< < blocksPerGrid, threadsPerBlock, 0, stream >> > (
sphere_position.data_ptr<float>(),
@@ -3275,7 +3264,6 @@ sphere_voxel_clpt(const torch::Tensor sphere_position, // batch_size, 3
num_voxels,
batch_size,
horizon, n_spheres, transform_back);
}));
@@ -3346,13 +3334,7 @@ swept_sphere_voxel_clpt(const torch::Tensor sphere_position, // batch_size, 3
int blocksPerGrid = (bnh_spheres + threadsPerBlock - 1) / threadsPerBlock;
#if CHECK_FP8
const auto fp8_type = torch::kFloat8_e4m3fn;
#else
const auto fp8_type = torch::kHalf;
#endif
AT_DISPATCH_FLOATING_TYPES_AND2(torch::kBFloat16, fp8_type,
AT_DISPATCH_FLOATING_TYPES_AND2(torch::kBFloat16, FP8_TYPE_MACRO,
grid_features.scalar_type(), "SphereVoxel_clpt", ([&] {
auto collision_kernel_n = swept_sphere_voxel_distance_jump_kernel<scalar_t, float, float, false, scale_metric, true, false, 100>;

View File

@@ -525,7 +525,7 @@ class SdfMeshWarpPy(torch.autograd.Function):
if env_query_idx is None:
use_batch_env = False
env_query_idx = n_env_mesh
requires_grad = query_spheres.requires_grad
wp.launch(
kernel=get_closest_pt_batch_env,
dim=b * h * n,
@@ -541,7 +541,7 @@ class SdfMeshWarpPy(torch.autograd.Function):
wp.from_torch(mesh_enable.view(-1), dtype=wp.uint8),
wp.from_torch(n_env_mesh.view(-1), dtype=wp.int32),
wp.from_torch(max_dist, dtype=wp.float32),
query_spheres.requires_grad,
requires_grad,
b,
h,
n,
@@ -608,6 +608,7 @@ class SweptSdfMeshWarpPy(torch.autograd.Function):
if env_query_idx is None:
use_batch_env = False
env_query_idx = n_env_mesh
requires_grad = query_spheres.requires_grad
wp.launch(
kernel=get_swept_closest_pt_batch_env,
@@ -625,7 +626,7 @@ class SweptSdfMeshWarpPy(torch.autograd.Function):
wp.from_torch(mesh_enable.view(-1), dtype=wp.uint8),
wp.from_torch(n_env_mesh.view(-1), dtype=wp.int32),
wp.from_torch(max_dist, dtype=wp.float32),
query_spheres.requires_grad,
requires_grad,
b,
h,
n,

View File

@@ -42,6 +42,7 @@ class WorldVoxelCollision(WorldMeshCollision):
and self.cache["voxel"] not in [None, 0]
):
self._create_voxel_cache(self.cache["voxel"])
return super()._init_cache()
def _create_voxel_cache(self, voxel_cache: Dict[str, Any]):
n_layers = voxel_cache["layers"]
@@ -699,7 +700,6 @@ class WorldVoxelCollision(WorldMeshCollision):
- self.max_esdf_distance
).to(dtype=self._voxel_tensor_list[3].dtype)
self._env_n_voxels[:] = 0
print(self._voxel_tensor_list)
def get_voxel_grid_shape(self, env_idx: int = 0, obs_idx: int = 0):
return self._voxel_tensor_list[3][env_idx, obs_idx].shape

View File

@@ -355,6 +355,8 @@ class WarpBoundSmoothFunction(torch.autograd.Function):
wp_device = wp.device_from_torch(vel.device)
# assert smooth_weight.shape[0] == 7
b, h, dof = vel.shape
requires_grad = pos.requires_grad
wp.launch(
kernel=forward_bound_smooth_warp,
dim=b * h * dof,
@@ -383,7 +385,7 @@ class WarpBoundSmoothFunction(torch.autograd.Function):
wp.from_torch(out_gv.view(-1), dtype=wp.float32),
wp.from_torch(out_ga.view(-1), dtype=wp.float32),
wp.from_torch(out_gj.view(-1), dtype=wp.float32),
pos.requires_grad,
requires_grad,
b,
h,
dof,
@@ -471,6 +473,7 @@ class WarpBoundFunction(torch.autograd.Function):
):
wp_device = wp.device_from_torch(vel.device)
b, h, dof = vel.shape
requires_grad = pos.requires_grad
wp.launch(
kernel=forward_bound_warp,
dim=b * h * dof,
@@ -494,7 +497,7 @@ class WarpBoundFunction(torch.autograd.Function):
wp.from_torch(out_gv.view(-1), dtype=wp.float32),
wp.from_torch(out_ga.view(-1), dtype=wp.float32),
wp.from_torch(out_gj.view(-1), dtype=wp.float32),
pos.requires_grad,
requires_grad,
b,
h,
dof,
@@ -505,6 +508,7 @@ class WarpBoundFunction(torch.autograd.Function):
ctx.save_for_backward(out_gp, out_gv, out_ga, out_gj)
# out_c = out_cost
# out_c = torch.linalg.norm(out_cost, dim=-1)
out_c = torch.sum(out_cost, dim=-1)
return out_c
@@ -569,11 +573,11 @@ class WarpBoundPosFunction(torch.autograd.Function):
):
wp_device = wp.device_from_torch(pos.device)
b, h, dof = pos.shape
requires_grad = pos.requires_grad
wp.launch(
kernel=forward_bound_pos_warp,
dim=b * h * dof,
inputs=[
# wp.from_torch(pos.detach().view(-1).contiguous(), dtype=wp.float32),
wp.from_torch(pos.detach().view(-1), dtype=wp.float32),
wp.from_torch(retract_config.detach().view(-1), dtype=wp.float32),
wp.from_torch(retract_idx.detach().view(-1), dtype=wp.int32),
@@ -584,7 +588,7 @@ class WarpBoundPosFunction(torch.autograd.Function):
wp.from_torch(vec_weight.view(-1), dtype=wp.float32),
wp.from_torch(out_cost.view(-1), dtype=wp.float32),
wp.from_torch(out_gp.view(-1), dtype=wp.float32),
pos.requires_grad,
requires_grad,
b,
h,
dof,
@@ -685,23 +689,33 @@ def forward_bound_pos_warp(
c_total = n_w * error * error
g_p = 2.0 * n_w * error
# bound cost:
# if c_p < p_l:
# delta = p_l - c_p
# if (delta) > eta_p or eta_p == 0.0:
# c_total += w * (delta - 0.5 * eta_p)
# g_p += -w
# else:
# c_total += w * (0.5 / eta_p) * delta * delta
# g_p += -w * (1.0 / eta_p) * delta
# elif c_p > p_u:
# delta = c_p - p_u
# if (delta) > eta_p or eta_p == 0.0:
# c_total += w * (delta - 0.5 * eta_p)
# g_p += w
# else:
# c_total += w * (0.5 / eta_p) * delta * delta
# g_p += w * (1.0 / eta_p) * delta
# bound cost:
if c_p < p_l:
delta = p_l - c_p
if (delta) > eta_p or eta_p == 0.0:
c_total += w * (delta - 0.5 * eta_p)
g_p += -w
else:
c_total += w * (0.5 / eta_p) * delta * delta
g_p += -w * (1.0 / eta_p) * delta
delta = c_p - p_l
c_total += w * delta * delta
g_p += 2.0 * w * delta
elif c_p > p_u:
delta = c_p - p_u
if (delta) > eta_p or eta_p == 0.0:
c_total += w * (delta - 0.5 * eta_p)
g_p += w
else:
c_total += w * (0.5 / eta_p) * delta * delta
g_p += w * (1.0 / eta_p) * delta
c_total += w * delta * delta
g_p += 2.0 * w * delta
out_cost[b_addrs] = c_total
@@ -811,73 +825,43 @@ def forward_bound_warp(
g_p = 2.0 * n_w * error
# bound cost:
delta = 0.0
if c_p < p_l:
delta = p_l - c_p
if (delta) > eta_p or eta_p == 0.0:
c_total += w * (delta - 0.5 * eta_p)
g_p += -w
else:
c_total += w * (0.5 / eta_p) * delta * delta
g_p += -w * (1.0 / eta_p) * delta
delta = c_p - p_l
elif c_p > p_u:
delta = c_p - p_u
if (delta) > eta_p or eta_p == 0.0:
c_total += w * (delta - 0.5 * eta_p)
g_p += w
else:
c_total += w * (0.5 / eta_p) * delta * delta
g_p += w * (1.0 / eta_p) * delta
c_total += w * delta * delta
g_p += 2.0 * w * delta
# bound cost:
delta = 0.0
if c_v < v_l:
delta = v_l - c_v
if (delta) > eta_v or eta_v == 0.0:
c_total += b_wv * (delta - 0.5 * eta_v)
g_v = -b_wv
else:
c_total += b_wv * (0.5 / eta_v) * delta * delta
g_v = -b_wv * (1.0 / eta_v) * delta
delta = c_v - v_l
elif c_v > v_u:
delta = c_v - v_u
if (delta) > eta_v or eta_v == 0.0:
c_total += b_wv * (delta - 0.5 * eta_v)
g_v = b_wv
else:
c_total += b_wv * (0.5 / eta_v) * delta * delta
g_v = b_wv * (1.0 / eta_v) * delta
c_total += b_wv * delta * delta
g_v = 2.0 * b_wv * delta
delta = 0.0
if c_a < a_l:
delta = a_l - c_a
if (delta) > eta_a or eta_a == 0.0:
c_total += b_wa * (delta - 0.5 * eta_a)
g_a = -b_wa
else:
c_total += b_wa * (0.5 / eta_a) * delta * delta
g_a = -b_wa * (1.0 / eta_a) * delta
delta = c_a - a_l
elif c_a > a_u:
delta = c_a - a_u
if (delta) > eta_a or eta_a == 0.0:
c_total += b_wa * (delta - 0.5 * eta_a)
g_a = b_wa
else:
c_total += b_wa * (0.5 / eta_a) * delta * delta
g_a = b_wa * (1.0 / eta_a) * delta
c_total += b_wa * delta * delta
g_a = b_wa * 2.0 * delta
delta = 0.0
if c_j < j_l:
delta = j_l - c_j
if (delta) > eta_j or eta_j == 0.0:
c_total += b_wj * (delta - 0.5 * eta_j)
g_j = -b_wj
else:
c_total += b_wj * (0.5 / eta_j) * delta * delta
g_j = -b_wj * (1.0 / eta_j) * delta
delta = c_j - j_l
elif c_j > j_u:
delta = c_j - j_u
if (delta) > eta_j or eta_j == 0.0:
c_total += b_wj * (delta - 0.5 * eta_j)
g_j = b_wj
else:
c_total += b_wj * (0.5 / eta_j) * delta * delta
g_j = b_wj * (1.0 / eta_j) * delta
c_total += b_wj * delta * delta
g_j = b_wj * delta * 2.0
out_cost[b_addrs] = c_total
@@ -1031,75 +1015,45 @@ def forward_bound_smooth_warp(
g_p = 2.0 * n_w * error
# bound cost:
# bound cost:
delta = 0.0
if c_p < p_l:
delta = p_l - c_p
if (delta) > eta_p or eta_p == 0.0:
c_total += w * (delta - 0.5 * eta_p)
g_p += -w
else:
c_total += w * (0.5 / eta_p) * delta * delta
g_p += -w * (1.0 / eta_p) * delta
delta = c_p - p_l
elif c_p > p_u:
delta = c_p - p_u
if (delta) > eta_p or eta_p == 0.0:
c_total += w * (delta - 0.5 * eta_p)
g_p += w
else:
c_total += w * (0.5 / eta_p) * delta * delta
g_p += w * (1.0 / eta_p) * delta
c_total += w * delta * delta
g_p += 2.0 * w * delta
# bound cost:
delta = 0.0
if c_v < v_l:
delta = v_l - c_v
if (delta) > eta_v or eta_v == 0.0:
c_total += b_wv * (delta - 0.5 * eta_v)
g_v = -b_wv
else:
c_total += b_wv * (0.5 / eta_v) * delta * delta
g_v = -b_wv * (1.0 / eta_v) * delta
delta = c_v - v_l
elif c_v > v_u:
delta = c_v - v_u
if (delta) > eta_v or eta_v == 0.0:
c_total += b_wv * (delta - 0.5 * eta_v)
g_v = b_wv
else:
c_total += b_wv * (0.5 / eta_v) * delta * delta
g_v = b_wv * (1.0 / eta_v) * delta
c_total += b_wv * delta * delta
g_v = 2.0 * b_wv * delta
delta = 0.0
if c_a < a_l:
delta = a_l - c_a
if (delta) > eta_a or eta_a == 0.0:
c_total += b_wa * (delta - 0.5 * eta_a)
g_a = -b_wa
else:
c_total += b_wa * (0.5 / eta_a) * delta * delta
g_a = -b_wa * (1.0 / eta_a) * delta
delta = c_a - a_l
elif c_a > a_u:
delta = c_a - a_u
if (delta) > eta_a or eta_a == 0.0:
c_total += b_wa * (delta - 0.5 * eta_a)
g_a = b_wa
else:
c_total += b_wa * (0.5 / eta_a) * delta * delta
g_a = b_wa * (1.0 / eta_a) * delta
c_total += b_wa * delta * delta
g_a = b_wa * 2.0 * delta
delta = 0.0
if c_j < j_l:
delta = j_l - c_j
if (delta) > eta_j or eta_j == 0.0:
c_total += b_wj * (delta - 0.5 * eta_j)
g_j = -b_wj
else:
c_total += b_wj * (0.5 / eta_j) * delta * delta
g_j = -b_wj * (1.0 / eta_j) * delta
delta = c_j - j_l
elif c_j > j_u:
delta = c_j - j_u
if (delta) > eta_j or eta_j == 0.0:
c_total += b_wj * (delta - 0.5 * eta_j)
g_j = b_wj
else:
c_total += b_wj * (0.5 / eta_j) * delta * delta
g_j = b_wj * (1.0 / eta_j) * delta
# g_v = -1.0 * g_v
# g_a = -1.0 * g_a
# g_j = - 1.0
c_total += b_wj * delta * delta
g_j = b_wj * delta * 2.0
# do l2 regularization for velocity:
if r_wv < 1.0:
s_v = w_v * r_wv * c_v * c_v

View File

@@ -128,12 +128,12 @@ def forward_l2_warp(
target_p = target[target_id]
error = c_p - target_p
if r_w >= 1.0 and w > 100.0:
c_total = w * wp.log2(wp.cosh(50.0 * error))
g_p = w * 50.0 * wp.sinh(50.0 * error) / (wp.cosh(50.0 * error))
else:
c_total = w * error * error
g_p = 2.0 * w * error
# if r_w >= 1.0 and w > 100.0:
# c_total = w * wp.log2(wp.cosh(10.0 * error))
# g_p = w * 10.0 * wp.sinh(10.0 * error) / (wp.cosh(10.0 * error))
# else:
c_total = w * error * error
g_p = 2.0 * w * error
out_cost[b_addrs] = c_total
@@ -159,8 +159,7 @@ class L2DistFunction(torch.autograd.Function):
):
wp_device = wp.device_from_torch(pos.device)
b, h, dof = pos.shape
# print(target)
requires_grad = pos.requires_grad
wp.launch(
kernel=forward_l2_warp,
dim=b * h * dof,
@@ -173,7 +172,7 @@ class L2DistFunction(torch.autograd.Function):
wp.from_torch(vec_weight.view(-1), dtype=wp.float32),
wp.from_torch(out_cost_v.view(-1), dtype=wp.float32),
wp.from_torch(out_gp.view(-1), dtype=wp.float32),
pos.requires_grad,
requires_grad,
b,
h,
dof,
@@ -181,11 +180,8 @@ class L2DistFunction(torch.autograd.Function):
device=wp_device,
stream=wp.stream_from_torch(pos.device),
)
# cost = torch.linalg.norm(out_cost_v, dim=-1)
# if pos.requires_grad:
# out_gp = out_gp * torch.nan_to_num( 1.0/cost.unsqueeze(-1), 0.0)
cost = torch.sum(out_cost_v, dim=-1)
cost = torch.sum(out_cost_v, dim=-1)
ctx.save_for_backward(out_gp)
return cost
@@ -277,7 +273,11 @@ class DistCost(CostBase, DistCostConfig):
self._run_weight_vec[:, :-1] *= self.run_weight
cost = self._run_weight_vec * dist
if RETURN_GOAL_DIST:
return cost, dist / self.weight
dist_scale = torch.nan_to_num(
1.0 / torch.sqrt((self.weight * self._run_weight_vec)), 0.0
)
return cost, dist * dist_scale
return cost
def forward_target_idx(self, goal_vec, current_vec, goal_idx, RETURN_GOAL_DIST=False):
@@ -292,7 +292,6 @@ class DistCost(CostBase, DistCostConfig):
self._run_weight_vec[:, :-1] *= self.run_weight
else:
raise NotImplementedError("terminal flag needs to be set to true")
if self.dist_type == DistType.L2:
# print(goal_idx.shape, goal_vec.shape)
cost = L2DistFunction.apply(
@@ -306,11 +305,12 @@ class DistCost(CostBase, DistCostConfig):
self._out_cv_buffer,
self._out_g_buffer,
)
# cost = torch.linalg.norm(cost, dim=-1)
else:
raise NotImplementedError()
# print(cost.shape, cost[:,-1])
if RETURN_GOAL_DIST:
return cost, (cost / torch.sqrt((self.weight * self._run_weight_vec)))
dist_scale = torch.nan_to_num(
1.0 / torch.sqrt((self.weight * self._run_weight_vec)), 0.0
)
return cost, cost * dist_scale
return cost

View File

@@ -105,9 +105,9 @@ class PoseCostMetric:
@classmethod
def create_grasp_approach_metric(
cls,
offset_position: float = 0.5,
offset_position: float = 0.1,
linear_axis: int = 2,
tstep_fraction: float = 0.6,
tstep_fraction: float = 0.8,
tensor_args: TensorDeviceType = TensorDeviceType(),
) -> PoseCostMetric:
"""Enables moving to a pregrasp and then locked orientation movement to final grasp.
@@ -203,7 +203,6 @@ class PoseCost(CostBase, PoseCostConfig):
self.offset_waypoint[:3].copy_(offset_rotation)
self.offset_tstep_fraction[:] = offset_tstep_fraction
if self._horizon <= 0:
print(self.weight)
log_error(
"Updating offset waypoint is only possible after initializing motion gen"
+ " run motion_gen.warmup() before adding offset_waypoint"

View File

@@ -823,7 +823,6 @@ class UsdHelper:
config_file["robot_cfg"]["kinematics"], tensor_args=tensor_args
)
kin_model = CudaRobotModel(robot_cfg)
m = kin_model.get_robot_link_meshes()
offsets = [x.pose for x in m]
robot_mesh_model = WorldConfig(mesh=m)

View File

@@ -8,3 +8,4 @@
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
""" Module contains high-level APIs for robotics applications. """

View File

@@ -8,3 +8,4 @@
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
""" Module contains commonly used high-level APIs for motion generation. """

View File

@@ -8,33 +8,116 @@
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
""" This modules contains heuristics for scoring trajectories. """
from __future__ import annotations
# Standard Library
from dataclasses import dataclass
from typing import Optional
from typing import Optional, Tuple, Union
# Third Party
import torch
import torch.autograd.profiler as profiler
# CuRobo
from curobo.types.base import TensorDeviceType
from curobo.types.robot import JointState
from curobo.types.tensor import T_DOF
from curobo.util.logger import log_error
from curobo.util.torch_utils import get_torch_jit_decorator
from curobo.util.trajectory import calculate_dt
@dataclass
class TrajEvaluatorConfig:
max_acc: float = 15.0
max_jerk: float = 500.0
"""Configurable Parameters for Trajectory Evaluator."""
#: Maximum acceleration for each joint.
max_acc: torch.Tensor
#: Maximum jerk for each joint.
max_jerk: torch.Tensor
#: Minimum allowed time step for trajectory. Trajectories with time step less than this
#: value will be rejected.
min_dt: torch.Tensor
#: Maximum allowed time step for trajectory. Trajectories with time step greater than this
#: value will be rejected.
max_dt: torch.Tensor
#: Weight to scale smoothness cost, total cost = path length + cost_weight * smoothness cost.
cost_weight: float = 0.01
min_dt: float = 0.001
max_dt: float = 0.1
def __post_init__(self):
"""Checks if values are of correct type and converts them if possible."""
if not isinstance(self.max_acc, torch.Tensor):
log_error(
"max_acc should be a torch.Tensor, this was changed recently, use "
+ "TrajEvaluatorConfig.from_basic() to create TrajEvaluatorConfig object"
)
if not isinstance(self.max_jerk, torch.Tensor):
log_error(
"max_jerk should be a torch.Tensor, this was changed recently, use "
+ "TrajEvaluatorConfig.from_basic() to create TrajEvaluatorConfig object"
)
if not isinstance(self.min_dt, torch.Tensor):
self.min_dt = torch.as_tensor(
self.min_dt, device=self.max_acc.device, dtype=self.max_acc.dtype
)
if not isinstance(self.max_dt, torch.Tensor):
self.max_dt = torch.as_tensor(
self.max_dt, device=self.max_acc.device, dtype=self.max_acc.dtype
)
@staticmethod
def from_basic(
dof: int,
max_acc: float = 15.0,
max_jerk: float = 500.0,
cost_weight: float = 0.01,
min_dt: float = 0.001,
max_dt: float = 0.1,
tensor_args: TensorDeviceType = TensorDeviceType(),
) -> TrajEvaluatorConfig:
"""Creates TrajEvaluatorConfig object from basic parameters.
Args:
dof: number of active joints in the robot.
max_acc: maximum acceleration for all joints. Treats this as same for all joints.
max_jerk: maximum jerk for all joints. Treats this as same for all joints.
cost_weight: weight to scale smoothness cost.
min_dt: minimum allowed time step between waypoints of a trajectory.
max_dt: maximum allowed time step between waypoints of a trajectory.
tensor_args: device and dtype for the tensors.
Returns:
TrajEvaluatorConfig: Configured Parameters for Trajectory Evaluator.
"""
return TrajEvaluatorConfig(
max_acc=torch.full((dof,), max_acc, device=tensor_args.device, dtype=tensor_args.dtype),
max_jerk=torch.full(
(dof,), max_jerk, device=tensor_args.device, dtype=tensor_args.dtype
),
cost_weight=cost_weight,
min_dt=torch.as_tensor(min_dt, device=tensor_args.device, dtype=tensor_args.dtype),
max_dt=torch.as_tensor(max_dt, device=tensor_args.device, dtype=tensor_args.dtype),
)
@get_torch_jit_decorator()
def compute_path_length(vel, traj_dt, cspace_distance_weight):
def compute_path_length(vel, traj_dt, cspace_distance_weight) -> torch.Tensor:
"""JIT compatible function to compute path length.
Args:
vel: joint space velocity tensor of shape (batch, horizon, dof).
traj_dt: dt of trajectory tensor of shape (batch, horizon) or (1, horizon) or (1, 1).
cspace_distance_weight: weight tensor of shape (dof).
Returns:
torch.Tensor: path length tensor of shape (batch).
"""
pl = torch.sum(
torch.sum(torch.abs(vel) * traj_dt.unsqueeze(-1) * cspace_distance_weight, dim=-1), dim=-1
)
@@ -42,19 +125,35 @@ def compute_path_length(vel, traj_dt, cspace_distance_weight):
@get_torch_jit_decorator()
def compute_path_length_cost(vel, cspace_distance_weight):
def compute_path_length_cost(vel, cspace_distance_weight) -> torch.Tensor:
"""JIT compatible function to compute path length cost without considering time step.
Args:
vel: joint space velocity tensor of shape (batch, horizon, dof).
cspace_distance_weight: weight tensor of shape (dof).
Returns:
torch.Tensor: path length cost tensor of shape (batch).
"""
pl = torch.sum(torch.sum(torch.abs(vel) * cspace_distance_weight, dim=-1), dim=-1)
return pl
@get_torch_jit_decorator()
def smooth_cost(abs_acc, abs_jerk, opt_dt):
# acc = torch.max(torch.max(abs_acc, dim=-1)[0], dim=-1)[0]
# jerk = torch.max(torch.max(abs_jerk, dim=-1)[0], dim=-1)[0]
def smooth_cost(abs_acc, abs_jerk, opt_dt) -> torch.Tensor:
"""JIT compatible function to compute smoothness cost.
Args:
abs_acc: absolute acceleration tensor of shape (batch, horizon, dof).
abs_jerk: absolute jerk tensor of shape (batch, horizon, dof).
opt_dt: optimal time step tensor of shape (batch).
Returns:
torch.Tensor: smoothness cost tensor of shape (batch).
"""
jerk = torch.mean(torch.max(abs_jerk, dim=-1)[0], dim=-1)
mean_acc = torch.mean(torch.max(abs_acc, dim=-1)[0], dim=-1) # [0]
a = (jerk * 0.001) + 10.0 * opt_dt + (mean_acc * 0.01)
# a = (jerk * 0.001) + 50.0 * opt_dt + (mean_acc * 0.01)
return a
@@ -65,24 +164,44 @@ def compute_smoothness(
acc: torch.Tensor,
jerk: torch.Tensor,
max_vel: torch.Tensor,
max_acc: float,
max_jerk: float,
max_acc: torch.Tensor,
max_jerk: torch.Tensor,
traj_dt: torch.Tensor,
min_dt: float,
max_dt: float,
):
) -> Tuple[torch.Tensor, torch.Tensor]:
"""JIT compatible function to compute smoothness.
Args:
vel: joint space velocity tensor of shape (batch, horizon, dof).
acc: joint space acceleration tensor of shape (batch, horizon, dof).
jerk: joint space jerk tensor of shape (batch, horizon, dof).
max_vel: maximum velocity limits, used to find scaling factor for dt that pushes at least
one joint to its limit, taking into account acceleration and jerk limits.
max_acc: maximum acceleration limits, used to find scaling factor for dt that pushes at least
one joint to its limit, taking into account velocity and jerk limits.
max_jerk: maximum jerk limits, used to find scaling factor for dt that pushes at least
one joint to its limit, taking into account velocity and acceleration limits.
traj_dt: dt of trajectory tensor of shape (batch, horizon) or (1, horizon)
min_dt: minimum delta time allowed between steps/waypoints in a trajectory.
max_dt: maximum delta time allowed between steps/waypoints in a trajectory.
Returns:
Tuple[torch.Tensor, torch.Tensor]: success label tensor of shape (batch) and
smoothness cost tensor of shape (batch)
"""
# compute scaled dt:
# h = int(vel.shape[-2] / 2)
max_v_arr = torch.max(torch.abs(vel), dim=-2)[0] # output is batch, dof
scale_dt = (max_v_arr) / (max_vel.view(1, max_v_arr.shape[-1])) # batch,dof
max_acc_arr = torch.max(torch.abs(acc), dim=-2)[0] # batch, dof
max_acc = max_acc.view(1, max_acc_arr.shape[-1])
scale_dt_acc = torch.sqrt(torch.max(max_acc_arr / max_acc, dim=-1)[0]) # batch, 1
max_jerk_arr = torch.max(torch.abs(jerk), dim=-2)[0] # batch, dof
max_jerk = max_jerk.view(1, max_jerk_arr.shape[-1])
scale_dt_jerk = torch.pow(torch.max(max_jerk_arr / max_jerk, dim=-1)[0], 1.0 / 3.0) # batch, 1
dt_score_vel = torch.max(scale_dt, dim=-1)[0] # batch, 1
@@ -102,25 +221,55 @@ def compute_smoothness(
# mean_jerk_val = torch.max(torch.mean(abs_jerk, dim=-1), dim=-1)[0]
max_jerk_val = torch.max(torch.max(abs_jerk, dim=-1)[0], dim=-1)[0]
acc_label = torch.logical_and(max_acc_val <= max_acc, max_jerk_val <= max_jerk)
# print(max_acc_val, max_jerk_val, dt_score)
return (acc_label, smooth_cost(abs_acc, abs_jerk, dt_score))
@get_torch_jit_decorator()
def compute_smoothness_opt_dt(
vel, acc, jerk, max_vel: torch.Tensor, max_acc: float, max_jerk: float, opt_dt: torch.Tensor
):
abs_acc = torch.abs(acc)
max_acc_val = torch.max(torch.max(abs_acc, dim=-1)[0], dim=-1)[0]
abs_jerk = torch.abs(jerk)
max_jerk_val = torch.max(torch.max(abs_jerk, dim=-1)[0], dim=-1)[0]
vel,
acc,
jerk,
max_vel: torch.Tensor,
max_acc: torch.Tensor,
max_jerk: torch.Tensor,
opt_dt: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""JIT compatible function to compute smoothness with pre-computed optimal time step.
acc_label = torch.logical_and(max_acc_val <= max_acc, max_jerk_val <= max_jerk)
Args:
vel: joint space velocity tensor of shape (batch, horizon, dof), not used in this
implementation.
acc: joint space acceleration tensor of shape (batch, horizon, dof).
jerk: joint space jerk tensor of shape (batch, horizon, dof).
max_vel: maximum velocity limit, not used in this implementation.
max_acc: maximum acceleration limit, used to reject trajectories.
max_jerk: maximum jerk limit, used to reject trajectories.
opt_dt: optimal time step tensor of shape (batch).
Returns:
Tuple[torch.Tensor, torch.Tensor]: success label tensor of shape (batch) and
smoothness cost tensor of shape (batch).
"""
abs_acc = torch.abs(acc)
abs_jerk = torch.abs(jerk)
delta_acc = torch.min(torch.min(max_acc - abs_acc, dim=-1)[0], dim=-1)[0]
max_jerk = max_jerk.view(1, abs_jerk.shape[-1])
delta_jerk = torch.min(torch.min(max_jerk - abs_jerk, dim=-1)[0], dim=-1)[0]
acc_label = torch.logical_and(delta_acc >= 0.0, delta_jerk >= 0.0)
return acc_label, smooth_cost(abs_acc, abs_jerk, opt_dt)
class TrajEvaluator(TrajEvaluatorConfig):
"""Trajectory Evaluator class that uses heuristics to score trajectories."""
def __init__(self, config: Optional[TrajEvaluatorConfig] = None):
"""Initializes the TrajEvaluator object.
Args:
config: Configurable parameters for Trajectory Evaluator.
"""
if config is None:
config = TrajEvaluatorConfig()
super().__init__(**vars(config))
@@ -128,17 +277,41 @@ class TrajEvaluator(TrajEvaluatorConfig):
def _compute_path_length(
self, js: JointState, traj_dt: torch.Tensor, cspace_distance_weight: T_DOF
):
"""Compute path length from joint velocities across trajectory and dt between them.
Args:
js: joint state object with velocity tensor.
traj_dt: time step tensor of shape (batch, horizon) or (1, horizon) or (1, 1).
cspace_distance_weight: weight tensor of shape (dof).
Returns:
torch.Tensor: path length tensor of shape (batch).
"""
path_length = compute_path_length(js.velocity, traj_dt, cspace_distance_weight)
return path_length
def _check_smoothness(self, js: JointState, traj_dt: torch.Tensor, max_vel: torch.Tensor):
def _check_smoothness(
self, js: JointState, traj_dt: torch.Tensor, max_vel: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Check smoothness of trajectory.
Args:
js: joint state object with velocity, acceleration and jerk tensors.
traj_dt: time step tensor of shape (batch, horizon) or (1, horizon) or (1, 1).
max_vel: maximum velocity limits, used to find scaling factor for dt that pushes at
least one joint to its limit, taking into account acceleration and jerk limits.
Returns:
Tuple[torch.Tensor, torch.Tensor]: success label tensor of shape (batch) and
smoothness cost tensor of shape (batch).
"""
if js.jerk is None:
js.jerk = (
(torch.roll(js.acceleration, -1, -2) - js.acceleration)
* (1 / traj_dt).unsqueeze(-1)
)[..., :-1, :]
acc_label, max_acc = compute_smoothness(
smooth_success_label, smooth_cost = compute_smoothness(
js.velocity,
js.acceleration,
js.jerk,
@@ -149,7 +322,7 @@ class TrajEvaluator(TrajEvaluatorConfig):
self.min_dt,
self.max_dt,
)
return acc_label, max_acc
return smooth_success_label, smooth_cost
@profiler.record_function("traj_evaluate_smoothness")
def evaluate(
@@ -158,7 +331,22 @@ class TrajEvaluator(TrajEvaluatorConfig):
traj_dt: torch.Tensor,
cspace_distance_weight: T_DOF,
max_vel: torch.Tensor,
):
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Evaluate trajectory based on smoothness and path length.
Args:
js: joint state object with velocity, acceleration and jerk tensors.
traj_dt: time step tensor of shape (batch, horizon) or (1, horizon) or (1, 1) or
(batch, 1).
cspace_distance_weight: weight tensor of shape (dof).
max_vel: maximum velocity limits, used to find scaling factor for dt that pushes at
least one joint to its limit, taking into account acceleration and jerk limits.
Returns:
Tuple[torch.Tensor, torch.Tensor]: success label tensor of shape (batch) and
total cost tensor of shape (batch) where total cost = (path length +
cost_weight * smoothness cost).
"""
label, cost = self._check_smoothness(js, traj_dt, max_vel)
pl_cost = self._compute_path_length(js, traj_dt, cspace_distance_weight)
return label, pl_cost + self.cost_weight * cost
@@ -170,7 +358,21 @@ class TrajEvaluator(TrajEvaluatorConfig):
opt_dt: torch.Tensor,
cspace_distance_weight: T_DOF,
max_vel: torch.Tensor,
):
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Evaluate trajectory based on smoothness and path length with pre-computed optimal dt.
Args:
js: joint state object with velocity, acceleration and jerk tensors.
opt_dt: optimal time step tensor of shape (batch).
cspace_distance_weight: weight tensor of shape (dof).
max_vel: maximum velocity limits, used to find scaling factor for dt that pushes at
least one joint to its limit, taking into account acceleration and jerk limits.
Returns:
Tuple[torch.Tensor, torch.Tensor]: success label tensor of shape (batch) and
total cost tensor of shape (batch) where total cost = (path length +
cost_weight * smoothness cost).
"""
label, cost = compute_smoothness_opt_dt(
js.velocity, js.acceleration, js.jerk, max_vel, self.max_acc, self.max_jerk, opt_dt
)
@@ -185,7 +387,22 @@ class TrajEvaluator(TrajEvaluatorConfig):
cspace_distance_weight: T_DOF,
max_vel: torch.Tensor,
skip_last_tstep: bool = False,
):
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Evaluate trajectory by first computing velocity, acceleration and jerk from position.
Args:
js: joint state object with position tensor.
traj_dt: time step tensor of shape (batch, 1) or (1, 1).
cspace_distance_weight: weight tensor of shape (dof).
max_vel: maximum velocity limits, used to find scaling factor for dt that pushes at
least one joint to its limit, taking into account acceleration and jerk limits.
skip_last_tstep: flag to skip last time step in trajectory.
Returns:
Tuple[torch.Tensor, torch.Tensor]: success label tensor of shape (batch) and
total cost tensor of shape (batch) where total cost = (path length +
cost_weight * smoothness cost).
"""
js = js.calculate_fd_from_position(traj_dt)
if skip_last_tstep:
js.position = js.position[..., :-1, :]
@@ -194,6 +411,3 @@ class TrajEvaluator(TrajEvaluatorConfig):
js.jerk = js.jerk[..., :-1, :]
return self.evaluate(js, traj_dt, cspace_distance_weight, max_vel)
def calculate_dt(self, js: JointState, max_vel: torch.Tensor, raw_dt: float):
return calculate_dt(js.velocity, max_vel, raw_dt)

File diff suppressed because it is too large Load Diff

View File

@@ -9,6 +9,28 @@
# its affiliates is strictly prohibited.
#
"""
This module contains :meth:`MotionGen` class that provides a high-level interface for motion
generation. Motion Generation can take goals either as joint configurations
:meth:`MotionGen.plan_single_js` or as Cartesian poses :meth:`MotionGen.plan_single`. When Cartesian
pose is given as target, inverse kinematics is first done to generate seeds for trajectory
optimization. Motion generation fallback to using a graph planner when linear interpolated
trajectory optimization seeds are not successful. Reaching one Cartesian pose in a goalset is also
supported using :meth:`MotionGen.plan_goalset`. Batched planning in the same world environment (
:meth:`MotionGen.plan_batch`) and different world environments (:meth:`MotionGen.plan_batch_env`)
is also provided.
A motion generation request can be configured using :meth:`MotionGenPlanConfig`. The result
of motion generation is returned as a :meth:`MotionGenResult`.
.. raw:: html
<p>
<video autoplay="True" loop="True" muted="True" preload="auto" width="100%"><source src="../videos/ur10_real_timer.mp4" type="video/mp4"></video>
</p>
"""
from __future__ import annotations
@@ -16,6 +38,7 @@ from __future__ import annotations
import math
import time
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, List, Optional, Union
# Third Party
@@ -129,16 +152,23 @@ class MotionGenConfig:
#: record compute ops as cuda graphs and replay recorded graphs for upto 10x faster execution.
use_cuda_graph: bool = True
#: After 100 iterations of trajectory optimization, a new dt is computed that pushes the
#: velocity, acceleration, or jerk limits to the maximum. This new dt is then used with a
#: reduction :attr:`MotionGenPlanConfig.finetune_dt_scale` to run 200+ iterations of trajectory
#: optimization. If trajectory optimization fails with the new dt, the new dt is increased and
#: tried again until :attr:`MotionGenPlanConfig.finetune_attempts`.
optimize_dt: bool = True
@staticmethod
@profiler.record_function("motion_gen_config/load_from_robot_config")
def load_from_robot_config(
robot_cfg: Union[Union[str, Dict], RobotConfig],
world_model: Optional[Union[Union[str, Dict], WorldConfig]] = None,
tensor_args: TensorDeviceType = TensorDeviceType(),
num_ik_seeds: int = 30,
num_ik_seeds: int = 32,
num_graph_seeds: int = 1,
num_trajopt_seeds: int = 12,
num_batch_ik_seeds: int = 30,
num_trajopt_seeds: int = 4,
num_batch_ik_seeds: int = 32,
num_batch_trajopt_seeds: int = 1,
num_trajopt_noisy_seeds: int = 1,
position_threshold: float = 0.005,
@@ -197,7 +227,7 @@ class MotionGenConfig:
smooth_weight: List[float] = None,
finetune_smooth_weight: Optional[List[float]] = None,
state_finite_difference_mode: Optional[str] = None,
finetune_dt_scale: float = 0.95,
finetune_dt_scale: float = 0.9,
maximum_trajectory_time: Optional[float] = None,
maximum_trajectory_dt: float = 0.1,
velocity_scale: Optional[Union[List[float], float]] = None,
@@ -207,87 +237,18 @@ class MotionGenConfig:
project_pose_to_goal_frame: bool = True,
ik_seed: int = 1531,
graph_seed: int = 1531,
high_precision: bool = False,
):
"""Helper function to create configuration from robot and world configuration.
Args:
robot_cfg: Robot configuration to use for motion generation.
world_model: World model to use for motion generation. Use None to disable world model.
tensor_args: Tensor device to use for motion generation. Use to change cuda device id.
num_ik_seeds: Number of seeds to use in inverse kinematics (IK) optimization.
num_graph_seeds: Number of graph paths to use as seed in trajectory optimization.
num_trajopt_seeds: Number of seeds to use in trajectory optimization.
num_batch_ik_seeds: Number of seeds to use in batch planning modes for IK.
num_batch_trajopt_seeds: Number of seeds to use in batch planning modes for trajopt.
num_trajopt_noisy_seeds: Number of seeds to use for trajopt.
position_threshold: _description_
rotation_threshold: _description_
cspace_threshold: _description_
world_coll_checker: _description_
base_cfg_file: _description_
particle_ik_file: _description_
gradient_ik_file: _description_
graph_file: _description_
particle_trajopt_file: _description_
gradient_trajopt_file: _description_
finetune_trajopt_file: _description_
trajopt_tsteps: _description_
interpolation_steps: _description_
interpolation_dt: _description_
interpolation_type: _description_
use_cuda_graph: _description_
self_collision_check: _description_
self_collision_opt: _description_
grad_trajopt_iters: _description_
trajopt_seed_ratio: _description_
ik_opt_iters: _description_
ik_particle_opt: _description_
collision_checker_type: _description_
sync_cuda_time: _description_
trajopt_particle_opt: _description_
traj_evaluator_config: _description_
traj_evaluator: _description_
minimize_jerk: _description_
filter_robot_command: _description_
use_gradient_descent: _description_
collision_cache: _description_
n_collision_envs: _description_
ee_link_name: _description_
use_es_ik: _description_
use_es_trajopt: _description_
es_ik_learning_rate: _description_
es_trajopt_learning_rate: _description_
use_ik_fixed_samples: _description_
use_trajopt_fixed_samples: _description_
evaluate_interpolated_trajectory: _description_
partial_ik_iters: _description_
fixed_iters_trajopt: _description_
store_ik_debug: _description_
store_trajopt_debug: _description_
graph_trajopt_iters: _description_
collision_max_outside_distance: _description_
collision_activation_distance: _description_
trajopt_dt: _description_
js_trajopt_dt: _description_
js_trajopt_tsteps: _description_
trim_steps: _description_
store_debug_in_result: _description_
finetune_trajopt_iters: _description_
smooth_weight: _description_
finetune_smooth_weight: _description_
state_finite_difference_mode: _description_
finetune_dt_scale: _description_
maximum_trajectory_time: _description_
maximum_trajectory_dt: _description_
velocity_scale: _description_
acceleration_scale: _description_
jerk_scale: _description_
optimize_dt: _description_
Returns:
_description_
"""
if position_threshold <= 0.001:
high_precision = True
if high_precision:
finetune_trajopt_iters = (
300 if finetune_trajopt_iters is None else max(300, finetune_trajopt_iters)
)
grad_trajopt_iters = 200 if grad_trajopt_iters is None else max(200, grad_trajopt_iters)
position_threshold = min(position_threshold, 0.001)
rotation_threshold = min(rotation_threshold, 0.025)
cspace_threshold = min(cspace_threshold, 0.01)
init_warp(tensor_args=tensor_args)
if js_trajopt_tsteps is not None:
log_warn("js_trajopt_tsteps is deprecated, use trajopt_tsteps instead.")
@@ -317,14 +278,6 @@ class MotionGenConfig:
):
maximum_trajectory_dt = (1.0 / min(velocity_scale)) * maximum_trajectory_dt
if traj_evaluator_config is None:
if maximum_trajectory_dt is not None:
max_dt = maximum_trajectory_dt
if maximum_trajectory_time is not None:
max_dt = maximum_trajectory_time / trajopt_tsteps
if acceleration_scale is not None:
max_dt = max_dt * (1.0 / np.sqrt(min(acceleration_scale)))
traj_evaluator_config = TrajEvaluatorConfig(min_dt=interpolation_dt, max_dt=max_dt)
if maximum_trajectory_dt is not None:
if trajopt_dt is None:
trajopt_dt = maximum_trajectory_dt
@@ -385,10 +338,20 @@ class MotionGenConfig:
robot_cfg["kinematics"]["cspace"]["velocity_scale"] = velocity_scale
robot_cfg = RobotConfig.from_dict(robot_cfg, tensor_args)
traj_evaluator_config.max_acc = (
robot_cfg.kinematics.get_joint_limits().acceleration[1, 0].item()
)
traj_evaluator_config.max_jerk = robot_cfg.kinematics.get_joint_limits().jerk[1, 0].item()
if traj_evaluator_config is None:
if maximum_trajectory_dt is not None:
max_dt = maximum_trajectory_dt
if maximum_trajectory_time is not None:
max_dt = maximum_trajectory_time / trajopt_tsteps
if acceleration_scale is not None:
max_dt = max_dt * (1.0 / np.sqrt(min(acceleration_scale)))
traj_evaluator_config = TrajEvaluatorConfig.from_basic(
min_dt=interpolation_dt, max_dt=max_dt, dof=robot_cfg.kinematics.dof
)
traj_evaluator_config.max_acc = robot_cfg.kinematics.get_joint_limits().acceleration[1]
traj_evaluator_config.max_jerk = robot_cfg.kinematics.get_joint_limits().jerk[1]
if isinstance(world_model, str):
world_model = load_yaml(join_path(get_world_configs_path(), world_model))
@@ -439,6 +402,7 @@ class MotionGenConfig:
collision_activation_distance=collision_activation_distance,
project_pose_to_goal_frame=project_pose_to_goal_frame,
seed=ik_seed,
high_precision=high_precision,
)
ik_solver = IKSolver(ik_solver_cfg)
@@ -612,15 +576,175 @@ class MotionGenConfig:
interpolation_dt=interpolation_dt,
finetune_dt_scale=finetune_dt_scale,
use_cuda_graph=use_cuda_graph,
optimize_dt=optimize_dt,
)
@dataclass
class MotionGenPlanConfig:
"""Configuration for querying motion generation."""
#: Use graph planner to generate collision-free seed for trajectory optimization.
enable_graph: bool = False
#: Enable trajectory optimization.
enable_opt: bool = True
#: Use neural network IK seed for solving inverse kinematics. Not implemented.
use_nn_ik_seed: bool = False
#: Run trajectory optimization only if graph planner is successful.
need_graph_success: bool = False
#: Maximum number of attempts allowed to solve the motion generation problem.
max_attempts: int = 60
#: Maximum time in seconds allowed to solve the motion generation problem.
timeout: float = 10.0
#: Number of failed attempts at which to fallback to a graph planner for obtaining trajectory
#: seeds.
enable_graph_attempt: Optional[int] = 3
#: Number of failed attempts at which to disable graph planner. This has not been found to be
#: useful.
disable_graph_attempt: Optional[int] = None
#: Number of IK attempts allowed before returning a failure. Set this to a low value (5) to
#: save compute time when an unreachable goal is given.
ik_fail_return: Optional[int] = None
#: Full IK solving takes 10% of the planning time. Setting this to True will run only 50
#: iterations of IK instead of 100 and then run trajecrtory optimization without checking if
#: IK is successful. This can reduce the planning time by 5% but generated solutions can
#: have larger motion time and path length. Leave this to False for most cases.
partial_ik_opt: bool = False
#: Number of seeds to use for solving inverse kinematics. Chanigng this will cause the existing
#: CUDAGraphs to be invalidated. Instead, set num_ik_seeds when creating
#: :meth:`MotionGenConfig`.
num_ik_seeds: Optional[int] = None
#: Number of seeds to use for graph planner. We found 4 to be a good number for most cases. The
#: default value is set when creating :meth:`MotionGenConfig` so leave this to None.
num_graph_seeds: Optional[int] = None
#: Number of seeds to use for trajectory optimization. We found 12 to be a good number for most
#: cases. The default value is set when creating :meth:`MotionGenConfig` so leave this to None.
num_trajopt_seeds: Optional[int] = None
#: Ratio of successful motion generation queries to total queries in batch planning mode. Motion
#: generation is queries upto :attr:`MotionGenPlanConfig.max_attempts` until this ratio is met.
success_ratio: float = 1
#: Return a failure if the query is invalid. Set this to False to debug a query that is invalid.
fail_on_invalid_query: bool = True
#: use start config as regularization for IK instead of
#: :meth:`curobo.types.robot.RobotConfig.kinematics.kinematics_config.retract_config`
use_start_state_as_retract: bool = True
#: Use a custom pose cost metric for trajectory optimization. This is useful for adding
#: additional constraints to motion generation, such as constraining the end-effector's motion
#: to a plane or a line or hold orientation while moving. This is also useful for only reaching
#: partial pose (e.g., only position). See :meth:`curobo.rollout.cost.pose_cost.PoseCostMetric`
#: for more details.
pose_cost_metric: Optional[PoseCostMetric] = None
#: Run finetuning trajectory optimization after running 100 iterations of
#: trajectory optimization. This will provide shorter and smoother trajectories. When
# :attr:`MotionGenConfig.optimize_dt` is True, this flag will also scale the trajectory
#: optimization by a new dt. Leave this to True for most cases. If you are not interested in
#: finding time-optimal solutions and only want to use motion generation as a feasibility check,
#: set this to False. Note that when set to False, the resulting trajectory is only guaranteed
#: to be collision-free and within joint limits. When False, it's not guaranteed to be smooth
#: and might not execute on a real robot.
enable_finetune_trajopt: bool = True
#: Run finetuning trajectory optimization across all trajectory optimization seeds. If this is
#: set to False, then only 1 successful seed per query is selected and finetuned. When False,
#: we have observed that the resulting trajectory is not as optimal as when set to True.
parallel_finetune: bool = True
#: Scale dt by this value before running finetuning trajectory optimization. This enables
#: trajectory optimization to find shorter paths and also account for smoothness over variable
#: length trajectories. This is only used when :attr:`MotionGenConfig.optimize_dt` is True.
finetune_dt_scale: Optional[float] = 0.85
#: Number of attempts to run finetuning trajectory optimization. Every attempt will increase the
#: :attr:`MotionGenPlanConfig.finetune_dt_scale` by
#: :attr:`MotionGenPlanConfig.finetune_dt_decay` as a path couldn't be found with the previous
#: smaller dt.
finetune_attempts: int = 5
#: Decay factor used to increase :attr:`MotionGenPlanConfig.finetune_dt_scale` when optimization
#: fails to find a solution. This is only used when :attr:`MotionGenConfig.optimize_dt` is True.
finetune_dt_decay: float = 1.025
def __post_init__(self):
if not self.enable_opt and not self.enable_graph:
log_error("Graph search and Optimization are both disabled, enable one")
def clone(self) -> MotionGenPlanConfig:
return MotionGenPlanConfig(
enable_graph=self.enable_graph,
enable_opt=self.enable_opt,
use_nn_ik_seed=self.use_nn_ik_seed,
need_graph_success=self.need_graph_success,
max_attempts=self.max_attempts,
timeout=self.timeout,
enable_graph_attempt=self.enable_graph_attempt,
disable_graph_attempt=self.disable_graph_attempt,
ik_fail_return=self.ik_fail_return,
partial_ik_opt=self.partial_ik_opt,
num_ik_seeds=self.num_ik_seeds,
num_graph_seeds=self.num_graph_seeds,
num_trajopt_seeds=self.num_trajopt_seeds,
success_ratio=self.success_ratio,
fail_on_invalid_query=self.fail_on_invalid_query,
enable_finetune_trajopt=self.enable_finetune_trajopt,
parallel_finetune=self.parallel_finetune,
use_start_state_as_retract=self.use_start_state_as_retract,
pose_cost_metric=(
None if self.pose_cost_metric is None else self.pose_cost_metric.clone()
),
finetune_dt_scale=self.finetune_dt_scale,
finetune_attempts=self.finetune_attempts,
)
class MotionGenStatus(Enum):
"""Status of motion generation query."""
#: Inverse kinematics failed to find a solution.
IK_FAIL = "IK Fail"
#: Graph planner failed to find a solution.
GRAPH_FAIL = "Graph Fail"
#: Trajectory optimization failed to find a solution.
TRAJOPT_FAIL = "TrajOpt Fail"
#: Finetune Trajectory optimization failed to find a solution.
FINETUNE_TRAJOPT_FAIL = "Finetune TrajOpt Fail"
#: Invalid query was given. The start state is either out of joint limits, in collision with
#: world, or in self-collision.
INVALID_QUERY = "Invalid Query"
#: Motion generation query was successful.
SUCCESS = "Success"
#: Motion generation was not attempted.
NOT_ATTEMPTED = "Not Attempted"
@dataclass
class MotionGenResult:
"""Result obtained from motion generation."""
#: success tensor with index referring to the batch index.
success: Optional[T_BValue_bool] = None
success: Optional[torch.Tensor] = None
#: returns true if the start state is not satisfying constraints (e.g., in collision)
valid_query: bool = True
@@ -672,7 +796,7 @@ class MotionGenResult:
debug_info: Any = None
#: status of motion generation query. returns [IK Fail, Graph Fail, TrajOpt Fail].
status: Optional[str] = None
status: Optional[Union[MotionGenStatus, str]] = None
#: number of attempts used before returning a solution.
attempts: int = 1
@@ -818,63 +942,6 @@ class MotionGenResult:
return self.optimized_dt * (self.optimized_plan.position.shape[-2] - 1 - 2 - 1)
@dataclass
class MotionGenPlanConfig:
enable_graph: bool = False
enable_opt: bool = True
use_nn_ik_seed: bool = False
need_graph_success: bool = False
max_attempts: int = 60
timeout: float = 10.0
enable_graph_attempt: Optional[int] = 3
disable_graph_attempt: Optional[int] = None
ik_fail_return: Optional[int] = None
partial_ik_opt: bool = False
num_ik_seeds: Optional[int] = None
num_graph_seeds: Optional[int] = None
num_trajopt_seeds: Optional[int] = None
success_ratio: float = 1
fail_on_invalid_query: bool = True
#: enables retiming trajectory optimization, useful for getting low jerk trajectories.
enable_finetune_trajopt: bool = True
parallel_finetune: bool = True
#: use start config as regularization
use_start_state_as_retract: bool = True
pose_cost_metric: Optional[PoseCostMetric] = None
def __post_init__(self):
if not self.enable_opt and not self.enable_graph:
log_error("Graph search and Optimization are both disabled, enable one")
def clone(self) -> MotionGenPlanConfig:
return MotionGenPlanConfig(
enable_graph=self.enable_graph,
enable_opt=self.enable_opt,
use_nn_ik_seed=self.use_nn_ik_seed,
need_graph_success=self.need_graph_success,
max_attempts=self.max_attempts,
timeout=self.timeout,
enable_graph_attempt=self.enable_graph_attempt,
disable_graph_attempt=self.disable_graph_attempt,
ik_fail_return=self.ik_fail_return,
partial_ik_opt=self.partial_ik_opt,
num_ik_seeds=self.num_ik_seeds,
num_graph_seeds=self.num_graph_seeds,
num_trajopt_seeds=self.num_trajopt_seeds,
success_ratio=self.success_ratio,
fail_on_invalid_query=self.fail_on_invalid_query,
enable_finetune_trajopt=self.enable_finetune_trajopt,
parallel_finetune=self.parallel_finetune,
use_start_state_as_retract=self.use_start_state_as_retract,
pose_cost_metric=(
None if self.pose_cost_metric is None else self.pose_cost_metric.clone()
),
)
class MotionGen(MotionGenConfig):
def __init__(self, config: MotionGenConfig):
super().__init__(**vars(config))
@@ -1092,7 +1159,6 @@ class MotionGen(MotionGenConfig):
link_poses: List[Pose] = None,
):
start_time = time.time()
if plan_config.pose_cost_metric is not None:
valid_query = self.update_pose_cost_metric(
plan_config.pose_cost_metric, start_state, goal_pose
@@ -1120,7 +1186,8 @@ class MotionGen(MotionGenConfig):
"trajopt_attempts": 0,
}
best_status = 0 # 0== None, 1==IK Fail, 2== Graph Fail, 3==Opt Fail
if plan_config.finetune_dt_scale is None:
plan_config.finetune_dt_scale = self.finetune_dt_scale
for n in range(plan_config.max_attempts):
log_info("MG Iter: " + str(n))
result = self._plan_from_solve_state(
@@ -1145,16 +1212,22 @@ class MotionGen(MotionGenConfig):
break
if result.success[0].item():
break
if result.status == "Finetune Opt Fail":
plan_config.finetune_dt_scale *= (
plan_config.finetune_dt_decay**plan_config.finetune_attempts
)
plan_config.finetune_dt_scale = min(plan_config.finetune_dt_scale, 1.25)
if plan_config.enable_graph_attempt is not None and (
n >= plan_config.enable_graph_attempt - 1
and result.status == "Opt Fail"
and result.status in ["Opt Fail", "Finetune Opt Fail"]
and not plan_config.enable_graph
):
plan_config.enable_graph = True
plan_config.partial_ik_opt = False
if plan_config.disable_graph_attempt is not None and (
n >= plan_config.disable_graph_attempt - 1
and result.status in ["Opt Fail", "Graph Fail"]
and result.status in ["Opt Fail", "Graph Fail", "Finetune Opt Fail"]
and not force_graph
):
plan_config.enable_graph = False
@@ -1192,7 +1265,7 @@ class MotionGen(MotionGenConfig):
plan_config: MotionGenPlanConfig = MotionGenPlanConfig(),
):
start_time = time.time()
goal_pose = goal_pose.clone()
if plan_config.pose_cost_metric is not None:
valid_query = self.update_pose_cost_metric(
plan_config.pose_cost_metric, start_state, goal_pose
@@ -1646,19 +1719,35 @@ class MotionGen(MotionGenConfig):
if plan_config.parallel_finetune:
opt_dt = torch.min(opt_dt[traj_result.success])
seed_override = solve_state.num_trajopt_seeds * self.noisy_trajopt_seeds
scaled_dt = torch.clamp(
opt_dt * self.finetune_dt_scale,
self.trajopt_solver.interpolation_dt,
)
self.finetune_trajopt_solver.update_solver_dt(scaled_dt.item())
traj_result = self._solve_trajopt_from_solve_state(
goal,
solve_state,
seed_traj,
trajopt_instance=self.finetune_trajopt_solver,
num_seeds_override=seed_override,
)
finetune_time = 0
for k in range(plan_config.finetune_attempts):
newton_iters = None
scaled_dt = torch.clamp(
opt_dt
* plan_config.finetune_dt_scale
* (plan_config.finetune_dt_decay ** (k)),
self.trajopt_solver.interpolation_dt,
)
if self.optimize_dt:
self.finetune_trajopt_solver.update_solver_dt(scaled_dt.item())
traj_result = self._solve_trajopt_from_solve_state(
goal,
solve_state,
seed_traj,
trajopt_instance=self.finetune_trajopt_solver,
num_seeds_override=seed_override,
newton_iters=newton_iters,
)
finetune_time += traj_result.solve_time
if torch.count_nonzero(traj_result.success) > 0:
break
seed_traj = traj_result.optimized_seeds.detach().clone()
newton_iters = 4
traj_result.solve_time = finetune_time
result.finetune_time = traj_result.solve_time
@@ -1667,13 +1756,15 @@ class MotionGen(MotionGenConfig):
result.debug_info["finetune_trajopt_result"] = traj_result
elif plan_config.enable_finetune_trajopt:
traj_result.success = traj_result.success[0:1]
# if torch.count_nonzero(result.success) == 0:
result.status = "Opt Fail"
result.solve_time += traj_result.solve_time + result.finetune_time
result.trajopt_time = traj_result.solve_time
result.trajopt_attempts = 1
result.success = traj_result.success
if torch.count_nonzero(result.success) == 0:
result.status = "Opt Fail"
if plan_config.enable_finetune_trajopt and torch.count_nonzero(result.success) == 0:
result.status = "Finetune Opt Fail"
result.interpolated_plan = traj_result.interpolated_solution.trim_trajectory(
0, traj_result.path_buffer_last_tstep[0]
@@ -1696,15 +1787,17 @@ class MotionGen(MotionGenConfig):
) -> MotionGenResult:
trajopt_seed_traj = None
trajopt_seed_success = None
trajopt_newton_iters = None
trajopt_newton_iters = self.js_trajopt_solver.newton_iters
graph_success = 0
if len(start_state.shape) == 1:
log_error("Joint state should be not a vector (dof) should be (bxdof)")
result = MotionGenResult(cspace_error=torch.zeros((1), device=self.tensor_args.device))
if self.store_debug_in_result:
result.debug_info = {}
# do graph search:
if plan_config.enable_graph:
if plan_config.enable_graph and False:
start_config = torch.zeros(
(solve_state.num_graph_seeds, self.js_trajopt_solver.dof),
device=self.tensor_args.device,
@@ -1782,7 +1875,6 @@ class MotionGen(MotionGenConfig):
# do trajopt:
if plan_config.enable_opt:
with profiler.record_function("motion_gen/setup_trajopt_seeds"):
# self._trajopt_goal_config[:, :ik_success] = goal_config
goal = Goal(
current_state=start_state,
@@ -1815,12 +1907,16 @@ class MotionGen(MotionGenConfig):
batch_mode=False,
seed_success=trajopt_seed_success,
)
trajopt_seed_traj = trajopt_seed_traj.view(
self.js_trajopt_solver.num_seeds * 1,
1,
self.trajopt_solver.action_horizon,
self._dof,
).contiguous()
trajopt_seed_traj = (
trajopt_seed_traj.view(
self.js_trajopt_solver.num_seeds * 1,
1,
self.trajopt_solver.action_horizon,
self._dof,
)
.contiguous()
.clone()
)
if plan_config.enable_finetune_trajopt:
og_value = self.trajopt_solver.interpolation_type
self.js_trajopt_solver.interpolation_type = InterpolateType.LINEAR_CUDA
@@ -1830,8 +1926,8 @@ class MotionGen(MotionGenConfig):
goal,
solve_state,
trajopt_seed_traj,
num_seeds_override=solve_state.num_trajopt_seeds * 1,
newton_iters=trajopt_newton_iters,
num_seeds_override=solve_state.num_trajopt_seeds,
newton_iters=trajopt_newton_iters + 2,
return_all_solutions=plan_config.enable_finetune_trajopt,
trajopt_instance=self.js_trajopt_solver,
)
@@ -1853,15 +1949,15 @@ class MotionGen(MotionGenConfig):
torch.max(traj_result.optimized_dt[traj_result.success]),
self.trajopt_solver.interpolation_dt,
)
og_dt = self.js_trajopt_solver.solver_dt
og_dt = self.js_trajopt_solver.solver_dt.clone()
self.js_trajopt_solver.update_solver_dt(scaled_dt.item())
traj_result = self._solve_trajopt_from_solve_state(
goal,
solve_state,
seed_traj,
trajopt_instance=self.js_trajopt_solver,
num_seeds_override=solve_state.num_trajopt_seeds * self.noisy_trajopt_seeds,
num_seeds_override=solve_state.num_trajopt_seeds,
newton_iters=trajopt_newton_iters + 4,
)
self.js_trajopt_solver.update_solver_dt(og_dt)
@@ -2281,6 +2377,7 @@ class MotionGen(MotionGenConfig):
joint_names=self.rollout_fn.joint_names,
).repeat_seeds(batch)
state = self.rollout_fn.compute_kinematics(start_state)
link_poses = state.link_pose
if n_goalset == -1:
retract_pose = Pose(state.ee_pos_seq, quaternion=state.ee_quat_seq)
@@ -2355,6 +2452,7 @@ class MotionGen(MotionGenConfig):
"graph_time": 0,
"trajopt_time": 0,
"trajopt_attempts": 0,
"finetune_time": 0,
}
result = None
goal = Goal(goal_state=goal_state, current_state=start_state)
@@ -2367,23 +2465,36 @@ class MotionGen(MotionGenConfig):
n_envs=1,
n_goalset=1,
)
force_graph = plan_config.enable_graph
for n in range(plan_config.max_attempts):
traj_result = self._plan_js_from_solve_state(
result = self._plan_js_from_solve_state(
solve_state, start_state, goal_state, plan_config=plan_config
)
time_dict["trajopt_time"] += traj_result.solve_time
time_dict["trajopt_time"] += result.solve_time
time_dict["graph_time"] += result.graph_time
time_dict["finetune_time"] += result.finetune_time
time_dict["trajopt_attempts"] = n
if plan_config.enable_graph_attempt is not None and (
n >= plan_config.enable_graph_attempt - 1 and not plan_config.enable_graph
):
plan_config.enable_graph = True
if plan_config.disable_graph_attempt is not None and (
n >= plan_config.disable_graph_attempt - 1 and not force_graph
):
plan_config.enable_graph = False
if result is None:
result = traj_result
if traj_result.success.item():
if result.success.item():
break
if not result.valid_query:
break
result.solve_time = time_dict["trajopt_time"]
result.graph_time = time_dict["graph_time"]
result.finetune_time = time_dict["finetune_time"]
result.trajopt_time = time_dict["trajopt_time"]
result.solve_time = result.trajopt_time + result.graph_time + result.finetune_time
result.total_time = result.solve_time
if self.store_debug_in_result:
result.debug_info = {"trajopt_result": traj_result}
result.attempts = n
return result

View File

@@ -31,17 +31,14 @@ from curobo.opt.particle.parallel_es import ParallelES, ParallelESConfig
from curobo.opt.particle.parallel_mppi import ParallelMPPI, ParallelMPPIConfig
from curobo.rollout.arm_reacher import ArmReacher, ArmReacherConfig
from curobo.rollout.cost.pose_cost import PoseCostMetric
from curobo.rollout.dynamics_model.integration_utils import (
action_interpolate_kernel,
interpolate_kernel,
)
from curobo.rollout.dynamics_model.integration_utils import interpolate_kernel
from curobo.rollout.rollout_base import Goal, RolloutBase, RolloutMetrics
from curobo.types.base import TensorDeviceType
from curobo.types.robot import JointState, RobotConfig
from curobo.types.tensor import T_BDOF, T_DOF, T_BValue_bool, T_BValue_float
from curobo.util.helpers import list_idx_if_not_none
from curobo.util.logger import log_error, log_info, log_warn
from curobo.util.torch_utils import get_torch_jit_decorator, is_torch_compile_available
from curobo.util.torch_utils import get_torch_jit_decorator
from curobo.util.trajectory import (
InterpolateType,
calculate_dt_no_clamp,
@@ -69,7 +66,7 @@ class TrajOptSolverConfig:
num_seeds: int = 1
bias_node: Optional[T_DOF] = None
interpolation_dt: float = 0.01
traj_evaluator_config: TrajEvaluatorConfig = TrajEvaluatorConfig()
traj_evaluator_config: Optional[TrajEvaluatorConfig] = None
traj_evaluator: Optional[TrajEvaluator] = None
evaluate_interpolated_trajectory: bool = True
cspace_threshold: float = 0.1
@@ -109,7 +106,7 @@ class TrajOptSolverConfig:
seed_ratio: Dict[str, int] = {"linear": 1.0, "bias": 0.0, "start": 0.0, "end": 0.0},
use_particle_opt: bool = True,
collision_checker_type: Optional[CollisionCheckerType] = CollisionCheckerType.MESH,
traj_evaluator_config: TrajEvaluatorConfig = TrajEvaluatorConfig(),
traj_evaluator_config: Optional[TrajEvaluatorConfig] = None,
traj_evaluator: Optional[TrajEvaluator] = None,
minimize_jerk: bool = True,
use_gradient_descent: bool = False,
@@ -172,7 +169,6 @@ class TrajOptSolverConfig:
config_data["cost"]["pose_cfg"]["project_distance"] = project_pose_to_goal_frame
grad_config_data["cost"]["pose_cfg"]["project_distance"] = project_pose_to_goal_frame
config_data["model"]["horizon"] = traj_tsteps
grad_config_data["model"]["horizon"] = traj_tsteps
if minimize_jerk is not None:
@@ -325,6 +321,12 @@ class TrajOptSolverConfig:
sync_cuda_time=sync_cuda_time,
)
trajopt = WrapBase(cfg)
if traj_evaluator_config is None:
traj_evaluator_config = TrajEvaluatorConfig.from_basic(
min_dt=interpolation_dt,
dof=robot_cfg.kinematics.dof,
tensor_args=tensor_args,
)
trajopt_cfg = TrajOptSolverConfig(
robot_config=robot_cfg,
rollout_fn=aux_rollout,
@@ -375,6 +377,7 @@ class TrajResult(Sequence):
raw_solution: Optional[JointState] = None
raw_action: Optional[torch.Tensor] = None
goalset_index: Optional[torch.Tensor] = None
optimized_seeds: Optional[torch.Tensor] = None
def __getitem__(self, idx):
# position_error = rotation_error = cspace_error = path_buffer_last_tstep = None
@@ -405,6 +408,7 @@ class TrajResult(Sequence):
rotation_error=idx_vals[4],
cspace_error=idx_vals[5],
goalset_index=idx_vals[6],
optimized_seeds=self.optimized_seeds,
)
def __len__(self):
@@ -918,6 +922,7 @@ class TrajOptSolver(TrajOptSolverConfig):
raw_solution=result.action,
raw_action=result.raw_action,
goalset_index=result.metrics.goalset_index,
optimized_seeds=result.raw_action,
)
else:
# get path length:
@@ -938,79 +943,38 @@ class TrajOptSolver(TrajOptSolverConfig):
)
with profiler.record_function("trajopt/best_select"):
if True: # not get_torch_jit_decorator() == torch.jit.script:
# This only works if torch compile is available:
(
idx,
position_error,
rotation_error,
cspace_error,
goalset_index,
opt_dt,
success,
) = jit_trajopt_best_select(
success,
smooth_label,
result.metrics.cspace_error,
result.metrics.pose_error,
result.metrics.position_error,
result.metrics.rotation_error,
result.metrics.goalset_index,
result.metrics.cost,
smooth_cost,
batch_mode,
goal.batch,
num_seeds,
self._col,
opt_dt,
)
if batch_mode:
last_tstep = [last_tstep[i] for i in idx]
else:
last_tstep = [last_tstep[idx.item()]]
best_act_seq = result.action[idx]
best_raw_action = result.raw_action[idx]
interpolated_traj = interpolated_trajs[idx]
(
idx,
position_error,
rotation_error,
cspace_error,
goalset_index,
opt_dt,
success,
) = jit_trajopt_best_select(
success,
smooth_label,
result.metrics.cspace_error,
result.metrics.pose_error,
result.metrics.position_error,
result.metrics.rotation_error,
result.metrics.goalset_index,
result.metrics.cost,
smooth_cost,
batch_mode,
goal.batch,
num_seeds,
self._col,
opt_dt,
)
if batch_mode:
last_tstep = [last_tstep[i] for i in idx]
else:
success[~smooth_label] = False
# get the best solution:
if result.metrics.pose_error is not None:
convergence_error = result.metrics.pose_error[..., -1]
elif result.metrics.cspace_error is not None:
convergence_error = result.metrics.cspace_error[..., -1]
else:
raise ValueError(
"convergence check requires either goal_pose or goal_state"
)
running_cost = torch.mean(result.metrics.cost, dim=-1) * 0.0001
error = convergence_error + smooth_cost + running_cost
error[~success] += 10000.0
if batch_mode:
idx = torch.argmin(error.view(goal.batch, num_seeds), dim=-1)
idx = idx + num_seeds * self._col
last_tstep = [last_tstep[i] for i in idx]
success = success[idx]
else:
idx = torch.argmin(error, dim=0)
last_tstep = [last_tstep[idx.item()]]
best_act_seq = result.action[idx]
best_raw_action = result.raw_action[idx]
interpolated_traj = interpolated_trajs[idx]
last_tstep = [last_tstep[idx.item()]]
success = success[idx : idx + 1]
best_act_seq = result.action[idx]
best_raw_action = result.raw_action[idx]
interpolated_traj = interpolated_trajs[idx]
goalset_index = position_error = rotation_error = cspace_error = None
if result.metrics.position_error is not None:
position_error = result.metrics.position_error[idx, -1]
if result.metrics.rotation_error is not None:
rotation_error = result.metrics.rotation_error[idx, -1]
if result.metrics.cspace_error is not None:
cspace_error = result.metrics.cspace_error[idx, -1]
if result.metrics.goalset_index is not None:
goalset_index = result.metrics.goalset_index[idx, -1]
opt_dt = opt_dt[idx]
if self.sync_cuda_time:
torch.cuda.synchronize()
if len(best_act_seq.shape) == 3:
@@ -1046,6 +1010,7 @@ class TrajOptSolver(TrajOptSolverConfig):
raw_solution=best_act_seq,
raw_action=best_raw_action,
goalset_index=goalset_index,
optimized_seeds=result.raw_action,
)
return traj_result
@@ -1307,6 +1272,10 @@ class TrajOptSolver(TrajOptSolverConfig):
if isinstance(rollout, ArmReacher)
]
@property
def newton_iters(self):
return self._og_newton_iters
@get_torch_jit_decorator()
def jit_feasible_success(

View File

@@ -8,22 +8,25 @@
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
""" Module contains custom types and dataclasses used across reacher solvers. """
from __future__ import annotations
# Standard Library
from dataclasses import dataclass
from enum import Enum
from typing import Dict, List, Optional, Union
from typing import Dict, List, Optional, Tuple, Union
# CuRobo
from curobo.rollout.rollout_base import Goal
from curobo.types.base import TensorDeviceType
from curobo.types.math import Pose
from curobo.types.robot import JointState
from curobo.types.tensor import T_BDOF
class ReacherSolveType(Enum):
# TODO: how to differentiate between goal pose and goal config?
"""Enum for different types of problems solved with reacher solvers."""
SINGLE = 0
GOALSET = 1
BATCH = 2
@@ -34,20 +37,46 @@ class ReacherSolveType(Enum):
@dataclass
class ReacherSolveState:
"""Dataclass for storing the current problem type of a reacher solver."""
#: Type of problem solved by the reacher solver.
solve_type: ReacherSolveType
#: Number of problems in the batch.
batch_size: int
#: Number of environments in the batch.
n_envs: int
#: Number of goals per problem. Only valid for goalset problems.
n_goalset: int = 1
#: Flag to indicate if the problems use different world environments in the batch.
batch_env: bool = False
#: Flag to indicate if the problems use different retract configurations in the batch.
batch_retract: bool = False
#: Flag to indicate if there is more than 1 problem to be solved.
batch_mode: bool = False
#: Number of seeds for each problem.
num_seeds: Optional[int] = None
#: Number of seeds for inverse kinematics problems.
num_ik_seeds: Optional[int] = None
#: Number of seeds for graph search problems.
num_graph_seeds: Optional[int] = None
#: Number of seeds for trajectory optimization problems.
num_trajopt_seeds: Optional[int] = None
#: Number of seeds for model predictive control problems.
num_mpc_seeds: Optional[int] = None
def __post_init__(self):
"""Post init method to set default flags based on input values."""
if self.n_envs == 1:
self.batch_env = False
else:
@@ -63,7 +92,8 @@ class ReacherSolveState:
if self.num_seeds is None:
self.num_seeds = self.num_mpc_seeds
def clone(self):
def clone(self) -> ReacherSolveState:
"""Method to create a deep copy of the current reacher solve state."""
return ReacherSolveState(
solve_type=self.solve_type,
n_envs=self.n_envs,
@@ -79,10 +109,12 @@ class ReacherSolveState:
num_mpc_seeds=self.num_mpc_seeds,
)
def get_batch_size(self):
def get_batch_size(self) -> int:
"""Method to get total number of optimization problems in the batch including seeds."""
return self.num_seeds * self.batch_size
def get_ik_batch_size(self):
def get_ik_batch_size(self) -> int:
"""Method to get total number of IK problems in the batch including seeds."""
return self.num_ik_seeds * self.batch_size
def create_goal_buffer(
@@ -92,8 +124,24 @@ class ReacherSolveState:
retract_config: Optional[T_BDOF] = None,
link_poses: Optional[Dict[str, Pose]] = None,
tensor_args: TensorDeviceType = TensorDeviceType(),
):
# TODO: Refactor to account for num_ik_seeds or num_trajopt_seeds
) -> Goal:
"""Method to create a goal buffer from goal pose and other problem targets.
Args:
goal_pose: Pose to reach with the end effector.
goal_state: Joint configuration to reach. If None, the goal is to reach the pose.
retract_config: Joint configuration to use for L2 regularization. If None,
`retract_config` from robot configuration file is used. An alternative value is to
use the start state as the retract configuration.
link_poses: Dictionary of link poses to reach. This is only required for multi-link
pose reaching, where the goal is to reach multiple poses with different links.
tensor_args: Device and floating precision.
Returns:
Goal buffer with the goal pose, goal state, retract state, and link poses.
"""
# NOTE: Refactor to account for num_ik_seeds or num_trajopt_seeds
batch_retract = True
if retract_config is None or retract_config.shape[0] != goal_pose.batch:
batch_retract = False
@@ -122,7 +170,28 @@ class ReacherSolveState:
current_solve_state: Optional[ReacherSolveState] = None,
current_goal_buffer: Optional[Goal] = None,
tensor_args: TensorDeviceType = TensorDeviceType(),
):
) -> Tuple[ReacherSolveState, Goal, bool]:
"""Method to update the goal buffer with new goal pose and other problem targets.
Args:
goal_pose: Pose to reach with the end effector.
goal_state: Joint configuration to reach. If None, the goal is to reach the pose.
retract_config: Joint configuration to use for L2 regularization. If None,
`retract_config` from robot configuration file is used. An alternative value is to
use the start state as the retract configuration.
link_poses: Dictionary of link poses to reach. This is only required for multi-link
pose reaching, where the goal is to reach multiple poses with different links. To
use this,
:var:`curobo.cuda_robot_model.cuda_robot_model.CudaRobotModelConfig.link_names`
should have the link names to reach.
current_solve_state: Current reacher solve state.
current_goal_buffer: Current goal buffer.
tensor_args: Device and floating precision.
Returns:
Tuple of updated reacher solve state, goal buffer, and a flag indicating if the goal
buffer reference has changed which is useful to break existing CUDA Graphs.
"""
solve_state = self
# create goal buffer by comparing to existing solve type
update_reference = False
@@ -167,7 +236,19 @@ class ReacherSolveState:
current_solve_state: Optional[ReacherSolveState] = None,
current_goal_buffer: Optional[Goal] = None,
tensor_args: TensorDeviceType = TensorDeviceType(),
):
) -> Tuple[ReacherSolveState, Goal, bool]:
"""Method to update the goal buffer with values from new Rollout goal.
Args:
goal: Rollout goal to update the goal buffer.
current_solve_state: Current reacher solve state.
current_goal_buffer: Current goal buffer.
tensor_args: Device and floating precision.
Returns:
Tuple of updated reacher solve state, goal buffer, and a flag indicating if the goal
buffer reference has changed which is useful to break existing CUDA Graphs.
"""
solve_state = self
update_reference = False
if (
@@ -204,6 +285,8 @@ class ReacherSolveState:
@dataclass
class MotionGenSolverState:
"""Dataclass for storing the current state of a motion generation solver."""
solve_type: ReacherSolveType
ik_solve_state: ReacherSolveState
trajopt_solve_state: ReacherSolveState
@@ -215,6 +298,22 @@ def get_padded_goalset(
current_goal_buffer: Goal,
new_goal_pose: Pose,
) -> Union[Pose, None]:
"""Method to pad number of goals in goalset to match the cached goal buffer.
This allows for creating a goalset problem with large number of goals during the first call,
and subsequent calls can have fewer goals. This function will pad the new goalset with the
first goal to match the cached goal buffer's shape.
Args:
solve_state: New problem's solve state.
current_solve_state: Current solve state.
current_goal_buffer: Current goal buffer.
new_goal_pose: Padded goal pose to match the cached goal buffer's shape.
Returns:
Padded goal pose to match the cached goal buffer's shape. If the new goal can't be padded,
returns None.
"""
if (
current_solve_state.solve_type == ReacherSolveType.GOALSET
and solve_state.solve_type == ReacherSolveType.SINGLE