release repository

This commit is contained in:
Balakumar Sundaralingam
2023-10-26 04:17:19 -07:00
commit 07e6ccfc91
287 changed files with 70659 additions and 0 deletions

View File

@@ -0,0 +1,13 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""
"""

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,121 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
# Standard Library
from dataclasses import dataclass
from typing import List, Optional, Union
# Third Party
import torch
# CuRobo
from curobo.types.base import TensorDeviceType
@dataclass
class CostConfig:
weight: Union[torch.Tensor, float, List[float]]
tensor_args: TensorDeviceType = None
distance_threshold: float = 0.0
classify: bool = False
terminal: bool = False
run_weight: Optional[float] = None
dof: int = 7
vec_weight: Optional[Union[torch.Tensor, List[float], float]] = None
max_value: Optional[float] = None
hinge_value: Optional[float] = None
vec_convergence: Optional[List[float]] = None
threshold_value: Optional[float] = None
return_loss: bool = False
def __post_init__(self):
self.weight = self.tensor_args.to_device(self.weight)
if len(self.weight.shape) == 0:
self.weight = torch.tensor(
[self.weight], device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
if self.vec_weight is not None:
self.vec_weight = self.tensor_args.to_device(self.vec_weight)
if self.max_value is not None:
self.max_value = self.tensor_args.to_device(self.max_value)
if self.hinge_value is not None:
self.hinge_value = self.tensor_args.to_device(self.hinge_value)
if self.threshold_value is not None:
self.threshold_value = self.tensor_args.to_device(self.threshold_value)
def update_vec_weight(self, vec_weight):
self.vec_weight = self.tensor_args.to_device(vec_weight)
class CostBase(torch.nn.Module, CostConfig):
def __init__(self, config: Optional[CostConfig] = None):
"""Initialize class
Args:
config (Optional[CostConfig], optional): To initialize this class directly, pass a config.
If this is a base class, it's assumed that you will initialize the child class with `CostConfig`.
Defaults to None.
"""
self._run_weight_vec = None
super(CostBase, self).__init__()
if config is not None:
CostConfig.__init__(self, **vars(config))
CostBase._init_post_config(self)
self._batch_size = -1
self._horizon = -1
self._dof = -1
self._dt = 1
def _init_post_config(self):
self._weight = self.weight.clone()
self.cost_fn = None
self._cost_enabled = True
self._z_scalar = self.tensor_args.to_device(0.0)
if torch.sum(self.weight) == 0.0:
self.disable_cost()
def forward(self, q):
batch_size = q.shape[0]
horizon = q.shape[1]
q = q.view(batch_size * horizon, q.shape[2])
res = self.cost_fn(q)
res = res.view(batch_size, horizon)
res += self.distance_threshold
res = torch.nn.functional.relu(res, inplace=True)
if self.classify:
res = torch.where(res > 0, res + 1.0, res)
cost = self.weight * res
return cost
def disable_cost(self):
self.weight.copy_(self._weight * 0.0)
self._cost_enabled = False
def enable_cost(self):
self.weight.copy_(self._weight.clone())
if torch.sum(self.weight) == 0.0:
self._cost_enabled = False
else:
self._cost_enabled = True
def update_weight(self, weight: float):
if weight == 0.0:
self.disable_cost()
else:
self.weight.copy_(self._weight * 0.0 + weight)
@property
def enabled(self):
return self._cost_enabled
def update_dt(self, dt: Union[float, torch.Tensor]):
self._dt = dt

View File

@@ -0,0 +1,315 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
# Standard Library
from dataclasses import dataclass
from enum import Enum
from typing import Optional
# Third Party
import torch
import warp as wp
# CuRobo
from curobo.util.warp import init_warp
# Local Folder
from .cost_base import CostBase, CostConfig
wp.set_module_options({"fast_math": False})
class DistType(Enum):
L1 = 0
L2 = 1
SQUARED_L2 = 2
@dataclass
class DistCostConfig(CostConfig):
dist_type: DistType = DistType.L2
use_null_space: bool = False
def __post_init__(self):
return super().__post_init__()
@torch.jit.script
def L2_DistCost_jit(vec_weight, disp_vec):
return torch.norm(vec_weight * disp_vec, p=2, dim=-1, keepdim=False)
@torch.jit.script
def fwd_SQL2_DistCost_jit(vec_weight, disp_vec):
return torch.sum(torch.square(vec_weight * disp_vec), dim=-1, keepdim=False)
@torch.jit.script
def fwd_L1_DistCost_jit(vec_weight, disp_vec):
return torch.sum(torch.abs(vec_weight * disp_vec), dim=-1, keepdim=False)
@torch.jit.script
def L2_DistCost_target_jit(vec_weight, g_vec, c_vec, weight):
return torch.norm(weight * vec_weight * (g_vec - c_vec), p=2, dim=-1, keepdim=False)
@torch.jit.script
def fwd_SQL2_DistCost_target_jit(vec_weight, g_vec, c_vec, weight):
return torch.sum(torch.square(weight * vec_weight * (g_vec - c_vec)), dim=-1, keepdim=False)
@torch.jit.script
def fwd_L1_DistCost_target_jit(vec_weight, g_vec, c_vec, weight):
return torch.sum(torch.abs(weight * vec_weight * (g_vec - c_vec)), dim=-1, keepdim=False)
@wp.kernel
def forward_l2_warp(
pos: wp.array(dtype=wp.float32),
target: wp.array(dtype=wp.float32),
target_idx: wp.array(dtype=wp.int32),
weight: wp.array(dtype=wp.float32),
run_weight: wp.array(dtype=wp.float32),
vec_weight: wp.array(dtype=wp.float32),
out_cost: wp.array(dtype=wp.float32),
out_grad_p: wp.array(dtype=wp.float32),
write_grad: wp.uint8, # this should be a bool
batch_size: wp.int32,
horizon: wp.int32,
dof: wp.int32,
):
tid = wp.tid()
# initialize variables:
b_id = wp.int32(0)
h_id = wp.int32(0)
d_id = wp.int32(0)
b_addrs = wp.int32(0)
target_id = wp.int32(0)
w = wp.float32(0.0)
c_p = wp.float32(0.0)
target_p = wp.float32(0.0)
g_p = wp.float32(0.0)
r_w = wp.float32(0.0)
c_total = wp.float32(0.0)
# we launch batch * horizon * dof kernels
b_id = tid / (horizon * dof)
h_id = (tid - (b_id * horizon * dof)) / dof
d_id = tid - (b_id * horizon * dof + h_id * dof)
if b_id >= batch_size or h_id >= horizon or d_id >= dof:
return
# read weights:
w = weight[0]
r_w = run_weight[h_id]
w = r_w * w
r_w = vec_weight[d_id]
w = r_w * w
if w == 0.0:
return
# compute cost:
b_addrs = b_id * horizon * dof + h_id * dof + d_id
# read buffers:
c_p = pos[b_addrs]
target_id = target_idx[b_id]
target_id = target_id * dof + d_id
target_p = target[target_id]
error = c_p - target_p
if r_w >= 1.0 and w > 100.0:
c_total = w * wp.log2(wp.cosh(50.0 * error))
g_p = w * 50.0 * wp.sinh(50.0 * error) / (wp.cosh(50.0 * error))
else:
c_total = w * error * error
g_p = 2.0 * w * error
out_cost[b_addrs] = c_total
# compute gradient
if write_grad == 1:
out_grad_p[b_addrs] = g_p
# create a bound cost tensor:
class L2DistFunction(torch.autograd.Function):
@staticmethod
def forward(
ctx,
pos,
target,
target_idx,
weight,
run_weight,
vec_weight,
out_cost,
out_cost_v,
out_gp,
):
wp_device = wp.device_from_torch(pos.device)
b, h, dof = pos.shape
# print(target)
wp.launch(
kernel=forward_l2_warp,
dim=b * h * dof,
inputs=[
wp.from_torch(pos.detach().reshape(-1), dtype=wp.float32),
wp.from_torch(target.view(-1), dtype=wp.float32),
wp.from_torch(target_idx.view(-1), dtype=wp.int32),
wp.from_torch(weight, dtype=wp.float32),
wp.from_torch(run_weight.view(-1), dtype=wp.float32),
wp.from_torch(vec_weight.view(-1), dtype=wp.float32),
wp.from_torch(out_cost_v.view(-1), dtype=wp.float32),
wp.from_torch(out_gp.view(-1), dtype=wp.float32),
pos.requires_grad,
b,
h,
dof,
],
device=wp_device,
stream=wp.stream_from_torch(pos.device),
)
# cost = torch.linalg.norm(out_cost_v, dim=-1)
# if pos.requires_grad:
# out_gp = out_gp * torch.nan_to_num( 1.0/cost.unsqueeze(-1), 0.0)
cost = torch.sum(out_cost_v, dim=-1)
ctx.save_for_backward(out_gp)
return cost
@staticmethod
def backward(ctx, grad_out_cost):
(p_grad,) = ctx.saved_tensors
p_g = None
if ctx.needs_input_grad[0]:
p_g = p_grad
return p_g, None, None, None, None, None, None, None, None
class DistCost(CostBase, DistCostConfig):
def __init__(self, config: Optional[DistCostConfig] = None):
if config is not None:
DistCostConfig.__init__(self, **vars(config))
CostBase.__init__(self)
self._init_post_config()
init_warp()
def _init_post_config(self):
if self.vec_weight is not None:
self.vec_weight = self.tensor_args.to_device(self.vec_weight)
if not self.use_null_space:
self.vec_weight = self.vec_weight * 0.0 + 1.0
def update_batch_size(self, batch, horizon, dof):
if self._batch_size != batch or self._horizon != horizon or self._dof != dof:
self._out_cv_buffer = torch.zeros(
(batch, horizon, dof), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
self._out_c_buffer = torch.zeros(
(batch, horizon), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
self._out_g_buffer = torch.zeros(
(batch, horizon, dof), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
self._batch_size = batch
self._horizon = horizon
self._dof = dof
if self.vec_weight is None:
self.vec_weight = torch.ones(
(1, 1, self._dof), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
def forward(self, disp_vec, RETURN_GOAL_DIST=False):
if self.dist_type == DistType.L2:
# dist = torch.norm(disp_vec, p=2, dim=-1, keepdim=False)
dist = L2_DistCost_jit(self.vec_weight, disp_vec)
elif self.dist_type == DistType.SQUARED_L2:
# cost = weight * (0.5 * torch.square(torch.norm(disp_vec, p=2, dim=-1)))
# dist = torch.sum(torch.square(disp_vec), dim=-1, keepdim=False)
dist = SQL2_DistCost_jit(self.vec_weight, disp_vec)
elif self.dist_type == DistType.L1:
# dist = torch.sum(torch.abs(disp_vec), dim=-1, keepdim=False)
dist = L1_DistCost_jit(self.vec_weight, disp_vec)
cost = self.weight * dist
if self.terminal and self.run_weight is not None:
if self._run_weight_vec is None or self._run_weight_vec.shape[1] != cost.shape[1]:
self._run_weight_vec = torch.ones(
(1, cost.shape[1]), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
self._run_weight_vec[:, :-1] *= self.run_weight
if RETURN_GOAL_DIST:
return cost, dist
return cost
def forward_target(self, goal_vec, current_vec, RETURN_GOAL_DIST=False):
if self.dist_type == DistType.L2:
# dist = torch.norm(disp_vec, p=2, dim=-1, keepdim=False)
cost = L2_DistCost_target_jit(self.vec_weight, goal_vec, current_vec, self.weight)
elif self.dist_type == DistType.SQUARED_L2:
# cost = weight * (0.5 * torch.square(torch.norm(disp_vec, p=2, dim=-1)))
# dist = torch.sum(torch.square(disp_vec), dim=-1, keepdim=False)
cost = fwd_SQL2_DistCost_target_jit(self.vec_weight, goal_vec, current_vec, self.weight)
elif self.dist_type == DistType.L1:
# dist = torch.sum(torch.abs(disp_vec), dim=-1, keepdim=False)
cost = fwd_L1_DistCost_target_jit(self.vec_weight, goal_vec, current_vec, self.weight)
dist = cost
if self.terminal and self.run_weight is not None:
if self._run_weight_vec is None or self._run_weight_vec.shape[1] != cost.shape[1]:
self._run_weight_vec = torch.ones(
(1, cost.shape[1]), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
self._run_weight_vec[:, :-1] *= self.run_weight
cost = self._run_weight_vec * dist
if RETURN_GOAL_DIST:
return cost, dist / self.weight
return cost
def forward_target_idx(self, goal_vec, current_vec, goal_idx, RETURN_GOAL_DIST=False):
b, h, dof = current_vec.shape
self.update_batch_size(b, h, dof)
if self.terminal and self.run_weight is not None:
if self._run_weight_vec is None or self._run_weight_vec.shape[1] != h:
self._run_weight_vec = torch.ones(
(1, h), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
self._run_weight_vec[:, :-1] *= self.run_weight
else:
raise NotImplementedError("terminal flag needs to be set to true")
if self.dist_type == DistType.L2:
# print(goal_idx.shape, goal_vec.shape)
cost = L2DistFunction.apply(
current_vec,
goal_vec,
goal_idx,
self.weight,
self._run_weight_vec,
self.vec_weight,
self._out_c_buffer,
self._out_cv_buffer,
self._out_g_buffer,
)
# cost = torch.linalg.norm(cost, dim=-1)
else:
raise NotImplementedError()
# print(cost.shape, cost[:,-1])
if RETURN_GOAL_DIST:
return cost, (cost / torch.sqrt((self.weight * self._run_weight_vec)))
return cost

View File

@@ -0,0 +1,107 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
# Standard Library
from dataclasses import dataclass
from itertools import product
# Third Party
import torch
# Local Folder
from .cost_base import CostBase, CostConfig
@dataclass
class ManipulabilityCostConfig(CostConfig):
use_joint_limits: bool = False
def __post_init__(self):
return super().__post_init__()
class ManipulabilityCost(CostBase, ManipulabilityCostConfig):
def __init__(self, config: ManipulabilityCostConfig):
ManipulabilityCostConfig.__init__(self, **vars(config))
CostBase.__init__(self)
self.i_mat = torch.ones(
(6, 1), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
self.delta_vector = torch.zeros(
(64, 1, 1, 6, 1), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
x = [i for i in product(range(2), repeat=6)]
self.delta_vector[:, 0, 0, :, 0] = torch.as_tensor(
x, device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
self.delta_vector[self.delta_vector == 0] = -1.0
if self.cost_fn is None:
if self.use_joint_limits and self.joint_limits is not None:
self.cost_fn = self.joint_limited_manipulability_delta
else:
self.cost_fn = self.manipulability
def forward(self, jac_batch, q, qdot):
b, h, n = q.shape
if self.use_nn:
q = q.view(b * h, n)
score = self.cost_fn(q, jac_batch, qdot)
if self.use_nn:
score = score.view(b, h)
score[score > self.hinge_value] = self.hinge_value
score = (self.hinge_value / score) - 1
cost = self.weight * score
return cost
def manipulability(self, q, jac_batch, qdot=None):
with torch.cuda.amp.autocast(enabled=False):
J_J_t = torch.matmul(jac_batch, jac_batch.transpose(-2, -1))
score = torch.sqrt(torch.det(J_J_t))
score[score != score] = 0.0
return score
def joint_limited_manipulability_delta(self, q, jac_batch, qdot=None):
# q is [b,h,dof]
q_low = q - self.joint_limits[:, 0]
q_high = q - self.joint_limits[:, 1]
d_h_1 = torch.square(self.joint_limits[:, 1] - self.joint_limits[:, 0]) * (q_low + q_high)
d_h_2 = 4.0 * (torch.square(q_low) * torch.square(q_high))
d_h = torch.div(d_h_1, d_h_2)
dh_term = 1.0 / torch.sqrt(1 + torch.abs(d_h))
f_ten = torch.tensor(1.0, **self.tensor_args)
q_low = torch.abs(q_low)
q_high = torch.abs(q_high)
p_plus = torch.where(q_low > q_high, dh_term, f_ten).unsqueeze(-2)
p_minus = torch.where(q_low > q_high, f_ten, dh_term).unsqueeze(-2)
j_sign = torch.sign(jac_batch)
l_delta = torch.sign(self.delta_vector) * j_sign
L = torch.where(l_delta < 0.0, p_minus, p_plus)
with torch.cuda.amp.autocast(enabled=False):
w_J = L * jac_batch
J_J_t = torch.matmul(w_J, w_J.transpose(-2, -1))
score = torch.sqrt(torch.det(J_J_t))
# get actual score:
min_score = torch.min(score, dim=0)[0]
max_score = torch.max(score, dim=0)[0]
score = min_score / max_score
score[score != score] = 0.0
return score

View File

@@ -0,0 +1,765 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
# Standard Library
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional
# Third Party
import torch
from torch.autograd import Function
# CuRobo
from curobo.curobolib.geom import get_pose_distance, get_pose_distance_backward
from curobo.rollout.rollout_base import Goal
from curobo.types.math import OrientationError, Pose
# Local Folder
from .cost_base import CostBase, CostConfig
class PoseErrorType(Enum):
SINGLE_GOAL = 0 #: Distance will be computed to a single goal pose
BATCH_GOAL = 1 #: Distance will be computed pairwise between query batch and goal batch
GOALSET = 2 #: Shortest Distance will be computed to a goal set
BATCH_GOALSET = 3 #: Shortest Distance to a batch goal set
@dataclass
class PoseCostConfig(CostConfig):
cost_type: PoseErrorType = PoseErrorType.BATCH_GOAL
use_metric: bool = False
run_vec_weight: Optional[List[float]] = None
def __post_init__(self):
if self.run_vec_weight is not None:
self.run_vec_weight = self.tensor_args.to_device(self.run_vec_weight)
else:
self.run_vec_weight = torch.ones(
6, device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
if self.vec_weight is None:
self.vec_weight = torch.ones(
6, device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
if self.vec_convergence is None:
self.vec_convergence = torch.zeros(
2, device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
return super().__post_init__()
@torch.jit.script
def backward_PoseError_jit(grad_g_dist, grad_out_distance, weight, g_vec):
grad_vec = grad_g_dist + (grad_out_distance * weight)
grad = 1.0 * (grad_vec).unsqueeze(-1) * g_vec
return grad
# full method:
@torch.jit.script
def backward_full_PoseError_jit(
grad_out_distance, grad_g_dist, grad_r_err, p_w, q_w, g_vec_p, g_vec_q
):
p_grad = (grad_g_dist + (grad_out_distance * p_w)).unsqueeze(-1) * g_vec_p
q_grad = (grad_r_err + (grad_out_distance * q_w)).unsqueeze(-1) * g_vec_q
# p_grad = ((grad_out_distance * p_w)).unsqueeze(-1) * g_vec_p
# q_grad = ((grad_out_distance * q_w)).unsqueeze(-1) * g_vec_q
return p_grad, q_grad
class PoseErrorDistance(Function):
@staticmethod
def forward(
ctx,
current_position,
goal_position,
current_quat,
goal_quat,
vec_weight,
weight,
vec_convergence,
run_weight,
run_vec_weight,
batch_pose_idx,
out_distance,
out_position_distance,
out_rotation_distance,
out_p_vec,
out_r_vec,
out_idx,
out_p_grad,
out_q_grad,
batch_size,
horizon,
mode=PoseErrorType.BATCH_GOAL.value,
num_goals=1,
use_metric=False,
):
# out_distance = current_position[..., 0].detach().clone() * 0.0
# out_position_distance = out_distance.detach().clone()
# out_rotation_distance = out_distance.detach().clone()
# out_vec = (
# torch.cat((current_position.detach().clone(), current_quat.detach().clone()), dim=-1)
# * 0.0
# )
# out_idx = out_distance.clone().to(dtype=torch.long)
(
out_distance,
out_position_distance,
out_rotation_distance,
out_p_vec,
out_r_vec,
out_idx,
) = get_pose_distance(
out_distance,
out_position_distance,
out_rotation_distance,
out_p_vec,
out_r_vec,
out_idx,
current_position.contiguous(),
goal_position,
current_quat.contiguous(),
goal_quat,
vec_weight,
weight,
vec_convergence,
run_weight,
run_vec_weight,
batch_pose_idx,
batch_size,
horizon,
mode,
num_goals,
current_position.requires_grad,
True,
use_metric,
)
ctx.save_for_backward(out_p_vec, out_r_vec, weight, out_p_grad, out_q_grad)
return out_distance, out_position_distance, out_rotation_distance, out_idx # .view(-1,1)
@staticmethod
def backward(ctx, grad_out_distance, grad_g_dist, grad_r_err, grad_out_idx):
(g_vec_p, g_vec_q, weight, out_grad_p, out_grad_q) = ctx.saved_tensors
pos_grad = None
quat_grad = None
batch_size = g_vec_p.shape[0] * g_vec_p.shape[1]
if ctx.needs_input_grad[0] and ctx.needs_input_grad[2]:
pos_grad, quat_grad = get_pose_distance_backward(
out_grad_p,
out_grad_q,
grad_out_distance.contiguous(),
grad_g_dist.contiguous(),
grad_r_err.contiguous(),
weight,
g_vec_p,
g_vec_q,
batch_size,
use_distance=True,
)
# pos_grad, quat_grad = backward_full_PoseError_jit(
# grad_out_distance,
# grad_g_dist, grad_r_err, p_w, q_w, g_vec_p, g_vec_q
# )
elif ctx.needs_input_grad[0]:
pos_grad = backward_PoseError_jit(grad_g_dist, grad_out_distance, p_w, g_vec_p)
# grad_vec = grad_g_dist + (grad_out_distance * weight[1])
# pos_grad = 1.0 * (grad_vec).unsqueeze(-1) * g_vec[..., 4:]
elif ctx.needs_input_grad[2]:
quat_grad = backward_PoseError_jit(grad_r_err, grad_out_distance, q_w, g_vec_q)
# grad_vec = grad_r_err + (grad_out_distance * weight[0])
# quat_grad = 1.0 * (grad_vec).unsqueeze(-1) * g_vec[..., :4]
return (
pos_grad,
None,
quat_grad,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)
class PoseLoss(Function):
@staticmethod
def forward(
ctx,
current_position,
goal_position,
current_quat,
goal_quat,
vec_weight,
weight,
vec_convergence,
run_weight,
run_vec_weight,
batch_pose_idx,
out_distance,
out_position_distance,
out_rotation_distance,
out_p_vec,
out_r_vec,
out_idx,
out_p_grad,
out_q_grad,
batch_size,
horizon,
mode=PoseErrorType.BATCH_GOAL.value,
num_goals=1,
use_metric=False,
):
# out_distance = current_position[..., 0].detach().clone() * 0.0
# out_position_distance = out_distance.detach().clone()
# out_rotation_distance = out_distance.detach().clone()
# out_vec = (
# torch.cat((current_position.detach().clone(), current_quat.detach().clone()), dim=-1)
# * 0.0
# )
# out_idx = out_distance.clone().to(dtype=torch.long)
(
out_distance,
out_position_distance,
out_rotation_distance,
out_p_vec,
out_r_vec,
out_idx,
) = get_pose_distance(
out_distance,
out_position_distance,
out_rotation_distance,
out_p_vec,
out_r_vec,
out_idx,
current_position.contiguous(),
goal_position,
current_quat.contiguous(),
goal_quat,
vec_weight,
weight,
vec_convergence,
run_weight,
run_vec_weight,
batch_pose_idx,
batch_size,
horizon,
mode,
num_goals,
current_position.requires_grad,
False,
use_metric,
)
ctx.save_for_backward(out_p_vec, out_r_vec)
return out_distance
@staticmethod
def backward(ctx, grad_out_distance): # , grad_g_dist, grad_r_err, grad_out_idx):
pos_grad = None
quat_grad = None
if ctx.needs_input_grad[0] and ctx.needs_input_grad[2]:
(g_vec_p, g_vec_q) = ctx.saved_tensors
pos_grad = g_vec_p * grad_out_distance.unsqueeze(1)
quat_grad = g_vec_q * grad_out_distance.unsqueeze(1)
pos_grad = pos_grad.unsqueeze(-2)
quat_grad = quat_grad.unsqueeze(-2)
elif ctx.needs_input_grad[0]:
(g_vec_p, g_vec_q) = ctx.saved_tensors
pos_grad = g_vec_p * grad_out_distance.unsqueeze(1)
pos_grad = pos_grad.unsqueeze(-2)
elif ctx.needs_input_grad[2]:
(g_vec_p, g_vec_q) = ctx.saved_tensors
quat_grad = g_vec_q * grad_out_distance.unsqueeze(1)
quat_grad = quat_grad.unsqueeze(-2)
return (
pos_grad,
None,
quat_grad,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)
class PoseError(Function):
@staticmethod
def forward(
ctx,
current_position,
goal_position,
current_quat,
goal_quat,
vec_weight,
weight,
vec_convergence,
run_weight,
run_vec_weight,
batch_pose_idx,
out_distance,
out_position_distance,
out_rotation_distance,
out_p_vec,
out_r_vec,
out_idx,
out_p_grad,
out_q_grad,
batch_size,
horizon,
mode=PoseErrorType.BATCH_GOAL.value,
num_goals=1,
use_metric=False,
):
# out_distance = current_position[..., 0].detach().clone() * 0.0
# out_position_distance = out_distance.detach().clone()
# out_rotation_distance = out_distance.detach().clone()
# out_vec = (
# torch.cat((current_position.detach().clone(), current_quat.detach().clone()), dim=-1)
# * 0.0
# )
# out_idx = out_distance.clone().to(dtype=torch.long)
(
out_distance,
out_position_distance,
out_rotation_distance,
out_p_vec,
out_r_vec,
out_idx,
) = get_pose_distance(
out_distance,
out_position_distance,
out_rotation_distance,
out_p_vec,
out_r_vec,
out_idx,
current_position.contiguous(),
goal_position,
current_quat.contiguous(),
goal_quat,
vec_weight,
weight,
vec_convergence,
run_weight,
run_vec_weight,
batch_pose_idx,
batch_size,
horizon,
mode,
num_goals,
current_position.requires_grad,
False,
use_metric,
)
ctx.save_for_backward(out_p_vec, out_r_vec)
return out_distance
@staticmethod
def backward(ctx, grad_out_distance): # , grad_g_dist, grad_r_err, grad_out_idx):
pos_grad = None
quat_grad = None
if ctx.needs_input_grad[0] and ctx.needs_input_grad[2]:
(g_vec_p, g_vec_q) = ctx.saved_tensors
pos_grad = g_vec_p
quat_grad = g_vec_q
elif ctx.needs_input_grad[0]:
(g_vec_p, g_vec_q) = ctx.saved_tensors
pos_grad = g_vec_p
elif ctx.needs_input_grad[2]:
(g_vec_p, g_vec_q) = ctx.saved_tensors
quat_grad = g_vec_q
return (
pos_grad,
None,
quat_grad,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)
class PoseCost(CostBase, PoseCostConfig):
def __init__(self, config: PoseCostConfig):
PoseCostConfig.__init__(self, **vars(config))
CostBase.__init__(self)
self.rot_weight = self.vec_weight[0:3]
self.pos_weight = self.vec_weight[3:6]
self._vec_convergence = self.tensor_args.to_device(self.vec_convergence)
self._batch_size = 0
self._horizon = 0
def update_batch_size(self, batch_size, horizon):
if batch_size != self._batch_size or horizon != self._horizon:
# batch_size = b*h
self.out_distance = torch.zeros(
(batch_size, horizon), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
self.out_position_distance = torch.zeros(
(batch_size, horizon), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
self.out_rotation_distance = torch.zeros(
(batch_size, horizon), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
self.out_idx = torch.zeros(
(batch_size, horizon), device=self.tensor_args.device, dtype=torch.int32
)
self.out_p_vec = torch.zeros(
(batch_size, horizon, 3),
device=self.tensor_args.device,
dtype=self.tensor_args.dtype,
)
self.out_q_vec = torch.zeros(
(batch_size, horizon, 4),
device=self.tensor_args.device,
dtype=self.tensor_args.dtype,
)
self.out_p_grad = torch.zeros(
(batch_size, horizon, 3),
device=self.tensor_args.device,
dtype=self.tensor_args.dtype,
)
self.out_q_grad = torch.zeros(
(batch_size, horizon, 4),
device=self.tensor_args.device,
dtype=self.tensor_args.dtype,
)
if self._run_weight_vec is None or self._run_weight_vec.shape[1] != horizon:
self._run_weight_vec = torch.ones(
(1, horizon), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
if self.terminal and self.run_weight is not None:
self._run_weight_vec[:, :-1] *= self.run_weight
self._batch_size = batch_size
self._horizon = horizon
def _forward_goal_distribution(self, ee_pos_batch, ee_rot_batch, ee_goal_pos, ee_goal_rot):
ee_goal_pos = ee_goal_pos.unsqueeze(1)
ee_goal_pos = ee_goal_pos.unsqueeze(1)
ee_goal_rot = ee_goal_rot.unsqueeze(1)
ee_goal_rot = ee_goal_rot.unsqueeze(1)
error, rot_error, pos_error = self.forward_single_goal(
ee_pos_batch, ee_rot_batch, ee_goal_pos, ee_goal_rot
)
min_idx = torch.argmin(error[:, :, -1], dim=0)
min_idx = min_idx.unsqueeze(1).expand(error.shape[1], error.shape[2])
if len(min_idx.shape) == 2:
min_idx = min_idx[0, 0]
error = error[min_idx]
rot_error = rot_error[min_idx]
pos_error = pos_error[min_idx]
return error, rot_error, pos_error, min_idx
def _forward_single_goal(self, ee_pos_batch, ee_rot_batch, ee_goal_pos, ee_goal_rot):
# b, h, _ = ee_pos_batch.shape
d_g_ee = ee_pos_batch - ee_goal_pos
position_err = torch.norm(self.pos_weight * d_g_ee, dim=-1)
goal_dist = position_err # .clone()
rot_err = OrientationError.apply(ee_goal_rot, ee_rot_batch, ee_rot_batch.clone()).squeeze(
-1
)
rot_err_c = rot_err.clone()
goal_dist_c = goal_dist.clone()
# clamp:
if self.vec_convergence[1] > 0.0:
position_err = torch.where(
position_err > self.vec_convergence[1], position_err, position_err * 0.0
)
if self.vec_convergence[0] > 0.0:
rot_err = torch.where(rot_err > self.vec_convergence[0], rot_err, rot_err * 0.0)
# rot_err = torch.norm(goal_orient_vec, dim = -1)
cost = self.weight[0] * rot_err + self.weight[1] * position_err
# dimension should be bacth * traj_length
return cost, rot_err_c, goal_dist_c
def _forward_pytorch(self, ee_pos_batch, ee_rot_batch, ee_goal_pos, ee_goal_rot):
if self.cost_type == PoseErrorType.SINGLE_GOAL:
cost, r_err, g_dist = self.forward_single_goal(
ee_pos_batch, ee_rot_batch, ee_goal_pos, ee_goal_rot
)
elif self.cost_type == PoseErrorType.BATCH_GOAL:
cost, r_err, g_dist = self.forward_single_goal(
ee_pos_batch, ee_rot_batch, ee_goal_pos, ee_goal_rot
)
else:
cost, r_err, g_dist = self.forward_goal_distribution(
ee_pos_batch, ee_rot_batch, ee_goal_pos, ee_goal_rot
)
if self.terminal and self.run_weight is not None:
cost[:, :-1] *= self.run_weight
return cost, r_err, g_dist
def _update_cost_type(self, ee_goal_pos, ee_pos_batch, num_goals):
d_g = len(ee_goal_pos.shape)
b_sze = ee_goal_pos.shape[0]
if d_g == 2 and b_sze == 1: # 1, 3
self.cost_type = PoseErrorType.SINGLE_GOAL
elif d_g == 2 and b_sze == ee_pos_batch.shape[0]: # b, 3
self.cost_type = PoseErrorType.BATCH_GOAL
elif d_g == 3:
self.cost_type = PoseErrorType.GOALSET
elif len(ee_goal_pos.shape) == 4 and b_sze == ee_pos_bath.shape[0]:
self.cost_type = PoseErrorType.BATCH_GOALSET
def forward_out_distance(
self, ee_pos_batch, ee_rot_batch, goal: Goal, link_name: Optional[str] = None
):
if link_name is None:
goal_pose = goal.goal_pose
else:
goal_pose = goal.links_goal_pose[link_name]
ee_goal_pos = goal_pose.position
ee_goal_rot = goal_pose.quaternion
num_goals = goal_pose.n_goalset
self._update_cost_type(ee_goal_pos, ee_pos_batch, num_goals)
b, h, _ = ee_pos_batch.shape
self.update_batch_size(b, h)
distance, g_dist, r_err, idx = PoseErrorDistance.apply(
ee_pos_batch, # .view(-1, 3).contiguous(),
ee_goal_pos,
ee_rot_batch, # .view(-1, 4).contiguous(),
ee_goal_rot,
self.vec_weight,
self.weight,
self._vec_convergence,
self._run_weight_vec,
self.run_vec_weight,
goal.batch_pose_idx,
self.out_distance,
self.out_position_distance,
self.out_rotation_distance,
self.out_p_vec,
self.out_q_vec,
self.out_idx,
self.out_p_grad,
self.out_q_grad,
b,
h,
self.cost_type.value,
num_goals,
self.use_metric,
)
# print(goal.batch_pose_idx.shape)
cost = distance # .view(b, h)#.clone()
r_err = r_err # .view(b, h)
g_dist = g_dist # .view(b, h)
idx = idx # .view(b, h)
return cost, r_err, g_dist
def forward(self, ee_pos_batch, ee_rot_batch, goal: Goal, link_name: Optional[str] = None):
if link_name is None:
goal_pose = goal.goal_pose
else:
goal_pose = goal.links_goal_pose[link_name]
ee_goal_pos = goal_pose.position
ee_goal_rot = goal_pose.quaternion
num_goals = goal_pose.n_goalset
self._update_cost_type(ee_goal_pos, ee_pos_batch, num_goals)
b, h, _ = ee_pos_batch.shape
self.update_batch_size(b, h)
# return self.out_distance
# print(b,h, ee_goal_pos.shape)
if self.return_loss:
distance = PoseLoss.apply(
ee_pos_batch,
ee_goal_pos,
ee_rot_batch, # .view(-1, 4).contiguous(),
ee_goal_rot,
self.vec_weight,
self.weight,
self._vec_convergence,
self._run_weight_vec,
self.run_vec_weight,
goal.batch_pose_idx,
self.out_distance,
self.out_position_distance,
self.out_rotation_distance,
self.out_p_vec,
self.out_q_vec,
self.out_idx,
self.out_p_grad,
self.out_q_grad,
b,
h,
self.cost_type.value,
num_goals,
self.use_metric,
)
else:
distance = PoseError.apply(
ee_pos_batch,
ee_goal_pos,
ee_rot_batch, # .view(-1, 4).contiguous(),
ee_goal_rot,
self.vec_weight,
self.weight,
self._vec_convergence,
self._run_weight_vec,
self.run_vec_weight,
goal.batch_pose_idx,
self.out_distance,
self.out_position_distance,
self.out_rotation_distance,
self.out_p_vec,
self.out_q_vec,
self.out_idx,
self.out_p_grad,
self.out_q_grad,
b,
h,
self.cost_type.value,
num_goals,
self.use_metric,
)
cost = distance
# print(cost.shape)
return cost
def forward_pose(
self,
goal_pose: Pose,
query_pose: Pose,
batch_pose_idx: torch.Tensor,
mode: PoseErrorType = PoseErrorType.BATCH_GOAL,
):
ee_goal_pos = goal_pose.position
ee_goal_quat = goal_pose.quaternion
self.cost_type = mode
self.update_batch_size(query_pose.position.shape[0], query_pose.position.shape[1])
b = query_pose.position.shape[0]
h = query_pose.position.shape[1]
num_goals = 1
if self.return_loss:
distance = PoseLoss.apply(
query_pose.position.unsqueeze(1),
ee_goal_pos,
query_pose.quaternion.unsqueeze(1),
ee_goal_quat,
self.vec_weight,
self.weight,
self._vec_convergence,
self._run_weight_vec,
self.run_vec_weight,
batch_pose_idx,
self.out_distance,
self.out_position_distance,
self.out_rotation_distance,
self.out_p_vec,
self.out_q_vec,
self.out_idx,
self.out_p_grad,
self.out_q_grad,
b,
h,
self.cost_type.value,
num_goals,
self.use_metric,
)
else:
distance = PoseError.apply(
query_pose.position.unsqueeze(1),
ee_goal_pos,
query_pose.quaternion.unsqueeze(1),
ee_goal_quat,
self.vec_weight,
self.weight,
self._vec_convergence,
self._run_weight_vec,
self.run_vec_weight,
batch_pose_idx,
self.out_distance,
self.out_position_distance,
self.out_rotation_distance,
self.out_p_vec,
self.out_q_vec,
self.out_idx,
self.out_p_grad,
self.out_q_grad,
b,
h,
self.cost_type.value,
num_goals,
self.use_metric,
)
return distance

View File

@@ -0,0 +1,214 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
# Standard Library
from dataclasses import dataclass
from typing import Optional, Union
# Third Party
import torch
# CuRobo
from curobo.geom.sdf.world import CollisionQueryBuffer, WorldCollision
from curobo.rollout.cost.cost_base import CostBase, CostConfig
from curobo.rollout.dynamics_model.integration_utils import interpolate_kernel, sum_matrix
@dataclass
class PrimitiveCollisionCostConfig(CostConfig):
"""Create Collision Cost Configuration."""
#: WorldCollision instance to use for distance queries.
world_coll_checker: Optional[WorldCollision] = None
#: Sweep for collisions between timesteps in a trajectory.
use_sweep: bool = False
use_sweep_kernel: bool = False
sweep_steps: int = 4
#: Speed metric scales the collision distance by sphere velocity (similar to CHOMP Planner
#: ICRA'09). This prevents the optimizer from speeding through obstacles to minimize cost and
#: instead encourages the robot to move around the obstacle.
use_speed_metric: bool = False
#: dt to use for computation of velocity and acceleration through central difference for
#: speed metric. Value less than 1 is better as that leads to different scaling between
#: acceleration and velocity.
speed_dt: Union[torch.Tensor, float] = 0.01
#: The distance outside collision at which to activate the cost. Having a non-zero value enables
#: the robot to move slowly when within this distance to an obstacle. This enables our
#: post optimization interpolation to not hit any obstacles.
activation_distance: Union[torch.Tensor, float] = 0.0
#: Setting this flag to true will sum the distance across spheres of the robot.
sum_distance: bool = True
def __post_init__(self):
if isinstance(self.speed_dt, float):
self.speed_dt = self.tensor_args.to_device([self.speed_dt])
if isinstance(self.activation_distance, float):
self.activation_distance = self.tensor_args.to_device([self.activation_distance])
return super().__post_init__()
class PrimitiveCollisionCost(CostBase, PrimitiveCollisionCostConfig):
def __init__(self, config: PrimitiveCollisionCostConfig):
"""Creates a primitive collision cost instance.
See note on :ref:`collision_checking_note` for details on the cost formulation.
Args:
config: Cost
"""
PrimitiveCollisionCostConfig.__init__(self, **vars(config))
CostBase.__init__(self)
self._og_speed_dt = self.speed_dt.clone()
self.batch_size = -1
self._horizon = -1
self._n_spheres = -1
self.t_mat = None
if self.classify:
self.coll_check_fn = self.world_coll_checker.get_sphere_collision
self.sweep_check_fn = self.world_coll_checker.get_swept_sphere_collision
else:
self.coll_check_fn = self.world_coll_checker.get_sphere_distance
self.sweep_check_fn = self.world_coll_checker.get_swept_sphere_distance
self.sampled_spheres = None
self.sum_mat = None #
if self.use_sweep:
# if self.use_sweep_kernel and (
# type(self.world_coll_checker) in [WorldMeshCollision, WorldPrimitiveCollision]
# ):
# TODO: Implement sweep for nvblox collision checker.
self.forward = self.sweep_kernel_fn
# else:
# self.forward = self.discrete_fn
else:
self.forward = self.discrete_fn
self.int_mat = None
self._fd_matrix = None
self._collision_query_buffer = CollisionQueryBuffer()
def sweep_kernel_fn(self, robot_spheres_in, env_query_idx: Optional[torch.Tensor] = None):
self._collision_query_buffer.update_buffer_shape(
robot_spheres_in.shape, self.tensor_args, self.world_coll_checker.collision_types
)
dist = self.sweep_check_fn(
robot_spheres_in,
self._collision_query_buffer,
self.weight,
sweep_steps=self.sweep_steps,
activation_distance=self.activation_distance,
speed_dt=self.speed_dt,
enable_speed_metric=self.use_speed_metric,
env_query_idx=env_query_idx,
return_loss=self.return_loss,
)
if self.classify:
cost = weight_collision(dist, self.weight, self.sum_distance)
else:
cost = weight_distance(dist, self.weight, self.sum_distance)
return cost
def sweep_fn(self, robot_spheres_in, env_query_idx: Optional[torch.Tensor] = None):
batch_size, horizon, n_spheres, _ = robot_spheres_in.shape
# add intermediate spheres to account for discretization:
new_horizon = (horizon - 1) * self.sweep_steps
if self.int_mat is None:
self.int_mat = interpolate_kernel(horizon, self.sweep_steps, self.tensor_args)
self.int_mat_t = self.int_mat.transpose(0, 1)
self.int_sum_mat = sum_matrix(horizon, self.sweep_steps, self.tensor_args)
sampled_spheres = (
(robot_spheres_in.transpose(1, 2).transpose(2, 3) @ self.int_mat_t)
.transpose(2, 3)
.transpose(1, 2)
.contiguous()
)
# robot_spheres = sampled_spheres.view(batch_size * new_horizon * n_spheres, 4)
# self.update_batch_size(batch_size * new_horizon * n_spheres)
self._collision_query_buffer.update_buffer_shape(
sampled_spheres.shape, self.tensor_args, self.world_coll_checker.collision_types
)
dist = self.coll_check_fn(
sampled_spheres.contiguous(),
self._collision_query_buffer,
self.weight,
activation_distance=self.activation_distance,
env_query_idx=env_query_idx,
return_loss=self.return_loss,
)
dist = dist.view(batch_size, new_horizon, n_spheres)
if self.classify:
cost = weight_sweep_collision(self.int_sum_mat, dist, self.weight, self.sum_distance)
else:
cost = weight_sweep_distance(self.int_sum_mat, dist, self.weight, self.sum_distance)
return cost
def discrete_fn(self, robot_spheres_in, env_query_idx: Optional[torch.Tensor] = None):
self._collision_query_buffer.update_buffer_shape(
robot_spheres_in.shape, self.tensor_args, self.world_coll_checker.collision_types
)
dist = self.coll_check_fn(
robot_spheres_in,
self._collision_query_buffer,
self.weight,
env_query_idx=env_query_idx,
activation_distance=self.activation_distance,
return_loss=self.return_loss,
)
if self.classify:
cost = weight_collision(dist, self.weight, self.sum_distance)
else:
cost = weight_distance(dist, self.weight, self.sum_distance)
return cost
def update_dt(self, dt: Union[float, torch.Tensor]):
self.speed_dt[:] = dt # / self._og_speed_dt
return super().update_dt(dt)
def get_gradient_buffer(self):
return self._collision_query_buffer.get_gradient_buffer()
@torch.jit.script
def weight_sweep_distance(int_mat, dist, weight, sum_cost: bool):
dist = torch.sum(dist, dim=-1)
dist = dist @ int_mat
return dist
@torch.jit.script
def weight_sweep_collision(int_mat, dist, weight, sum_cost: bool):
dist = torch.sum(dist, dim=-1)
dist = torch.where(dist > 0, dist + 1.0, dist)
dist = dist @ int_mat
return dist
@torch.jit.script
def weight_distance(dist, weight, sum_cost: bool):
if sum_cost:
dist = torch.sum(dist, dim=-1)
return dist
@torch.jit.script
def weight_collision(dist, weight, sum_cost: bool):
if sum_cost:
dist = torch.sum(dist, dim=-1)
dist = torch.where(dist > 0, dist + 1.0, dist)
return dist

View File

@@ -0,0 +1,69 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
"""
Distance cost projected into the null-space of the Jacobian
"""
# Standard Library
from dataclasses import dataclass
from enum import Enum
# Third Party
import torch
# Local Folder
from .dist_cost import DistCost, DistCostConfig
class ProjType(Enum):
IDENTITY = 0
PSEUDO_INVERSE = 1
@dataclass
class ProjectedDistCostConfig(DistCostConfig):
eps: float = 1e-4
proj_type: ProjType = ProjType.IDENTITY
def __post_init__(self):
return super().__post_init__()
class ProjectedDistCost(DistCost, ProjectedDistCostConfig):
def __init__(self, config: ProjectedDistCostConfig):
ProjectedDistCostConfig.__init__(self, **vars(config))
DistCost.__init__(self)
self.I = torch.eye(self.dof, device=self.tensor_args.device, dtype=self.tensor_args.dtype)
self.task_I = torch.eye(6, device=self.tensor_args.device, dtype=self.tensor_args.dtype)
def forward(self, disp_vec, jac_batch=None):
disp_vec = self.vec_weight * disp_vec
if self.proj_type == ProjType.PSEUDO_INVERSE:
disp_vec_projected = self.get_pinv_null_disp(disp_vec, jac_batch)
elif self.proj_type == ProjType.IDENTITY:
disp_vec_projected = disp_vec
return super().forward(disp_vec_projected)
def get_pinv_null_disp(self, disp_vec, jac_batch):
jac_batch_t = jac_batch.transpose(-2, -1)
J_J_t = torch.matmul(jac_batch, jac_batch_t)
J_pinv = jac_batch_t @ torch.inverse(J_J_t + self.eps * self.task_I.expand_as(J_J_t))
J_pinv_J = torch.matmul(J_pinv, jac_batch)
null_proj = self.I.expand_as(J_pinv_J) - J_pinv_J
null_disp = torch.matmul(null_proj, disp_vec.unsqueeze(-1)).squeeze(-1)
return null_disp

View File

@@ -0,0 +1,79 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
# Standard Library
from dataclasses import dataclass
from typing import Optional
# Third Party
import torch
# CuRobo
from curobo.cuda_robot_model.types import SelfCollisionKinematicsConfig
from curobo.curobolib.geom import SelfCollisionDistance
# Local Folder
from .cost_base import CostBase, CostConfig
@dataclass
class SelfCollisionCostConfig(CostConfig):
self_collision_kin_config: Optional[SelfCollisionKinematicsConfig] = None
def __post_init__(self):
return super().__post_init__()
class SelfCollisionCost(CostBase, SelfCollisionCostConfig):
def __init__(self, config: SelfCollisionCostConfig):
SelfCollisionCostConfig.__init__(self, **vars(config))
CostBase.__init__(self)
self._batch_size = None
def update_batch_size(self, robot_spheres):
# Assuming n stays constant
# TODO: use collision buffer here?
if self._batch_size is None or self._batch_size != robot_spheres.shape:
b, h, n, k = robot_spheres.shape
self._out_distance = torch.zeros(
(b, h), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
self._out_vec = torch.zeros(
(b, h, n, k), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
self._batch_size = robot_spheres.shape
self._sparse_sphere_idx = torch.zeros(
(b, h, n), device=self.tensor_args.device, dtype=torch.uint8
)
def forward(self, robot_spheres):
self.update_batch_size(robot_spheres)
dist = SelfCollisionDistance.apply(
self._out_distance,
self._out_vec,
self._sparse_sphere_idx,
robot_spheres,
self.self_collision_kin_config.offset,
self.weight,
self.self_collision_kin_config.collision_matrix,
self.self_collision_kin_config.thread_location,
self.self_collision_kin_config.thread_max,
self.self_collision_kin_config.checks_per_thread,
# False,
self.self_collision_kin_config.experimental_kernel,
self.return_loss,
)
if self.classify:
dist = torch.where(dist > 0, dist + 1.0, dist)
return dist

View File

@@ -0,0 +1,76 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
# Standard Library
from dataclasses import dataclass
from typing import Optional
# Third Party
import torch
# CuRobo
from curobo.rollout.dynamics_model.kinematic_model import TimeTrajConfig
# Local Folder
from .cost_base import CostBase, CostConfig
@dataclass
class StopCostConfig(CostConfig):
max_limit: Optional[float] = None
max_nlimit: Optional[float] = None
dt_traj_params: Optional[TimeTrajConfig] = None
horizon: int = 1
def __post_init__(self):
return super().__post_init__()
class StopCost(CostBase, StopCostConfig):
def __init__(self, config: StopCostConfig):
StopCostConfig.__init__(self, **vars(config))
CostBase.__init__(self)
sum_matrix = torch.tril(
torch.ones(
(self.horizon, self.horizon),
device=self.tensor_args.device,
dtype=self.tensor_args.dtype,
)
).T
traj_dt = self.tensor_args.to_device(self.dt_traj_params.get_dt_array(self.horizon))
if self.max_nlimit is not None:
# every timestep max acceleration:
sum_matrix = torch.tril(
torch.ones(
(self.horizon, self.horizon),
device=self.tensor_args.device,
dtype=self.tensor_args.dtype,
)
).T
delta_vel = traj_dt * self.max_nlimit
self.max_vel = (sum_matrix @ delta_vel).unsqueeze(-1)
elif self.max_limit is not None:
sum_matrix = torch.tril(
torch.ones(
(self.horizon, self.horizon),
device=self.tensor_args.device,
dtype=self.tensor_args.dtype,
)
).T
delta_vel = torch.ones_like(traj_dt) * self.max_limit
self.max_vel = (sum_matrix @ delta_vel).unsqueeze(-1)
def forward(self, vels):
vel_abs = torch.abs(vels)
vel_abs = torch.nn.functional.relu(vel_abs - self.max_vel)
cost = self.weight * (torch.sum(vel_abs**2, dim=-1))
return cost

View File

@@ -0,0 +1,45 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
# Third Party
import torch
# Local Folder
from .cost_base import CostBase, CostConfig
@torch.jit.script
def st_cost(ee_pos_batch, vec_weight, weight):
ee_plus_one = torch.roll(ee_pos_batch, 1, dims=1)
xdot_current = ee_pos_batch - ee_plus_one + 1e-8
err_vec = vec_weight * xdot_current / 0.02
error = torch.sum(torch.square(err_vec), dim=-1)
# compute distance vector
cost = weight * error
return cost
class StraightLineCost(CostBase):
def __init__(self, config: CostConfig):
CostBase.__init__(self, config)
self.vel_idxs = torch.arange(
self.dof, 2 * self.dof, dtype=torch.long, device=self.tensor_args.device
)
self.I = torch.eye(self.dof, device=self.tensor_args.device, dtype=self.tensor_args.dtype)
def forward(self, ee_pos_batch):
cost = st_cost(ee_pos_batch, self.vec_weight, self.weight)
return cost

View File

@@ -0,0 +1,116 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
# Third Party
import torch
# Local Folder
from .cost_base import CostBase
@torch.jit.script
def squared_sum(cost: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
# return weight * torch.square(torch.linalg.norm(cost, dim=-1, ord=1))
# return weight * torch.sum(torch.square(cost), dim=-1)
# return torch.sum(torch.abs(cost) * weight, dim=-1)
return torch.sum(torch.square(cost) * weight, dim=-1)
@torch.jit.script
def run_squared_sum(
cost: torch.Tensor, weight: torch.Tensor, run_weight: torch.Tensor
) -> torch.Tensor:
# return torch.sum(torch.abs(cost)* weight * run_weight.unsqueeze(-1), dim=-1)
## below is smaller compute but more kernels
return torch.sum(torch.square(cost) * weight * run_weight.unsqueeze(-1), dim=-1)
# return torch.sum(torch.square(cost), dim=-1) * weight * run_weight
@torch.jit.script
def backward_squared_sum(cost_vec, w):
return 2.0 * w * cost_vec # * g_out.unsqueeze(-1)
# return w * g_out.unsqueeze(-1)
@torch.jit.script
def backward_run_squared_sum(cost_vec, w, r_w):
return 2.0 * w * r_w.unsqueeze(-1) * cost_vec # * g_out.unsqueeze(-1)
# return w * r_w.unsqueeze(-1) * cost_vec * g_out.unsqueeze(-1)
class SquaredSum(torch.autograd.Function):
@staticmethod
def forward(
ctx,
cost_vec,
weight,
):
cost = squared_sum(cost_vec, weight)
ctx.save_for_backward(cost_vec, weight)
return cost
@staticmethod
def backward(ctx, grad_out_cost):
(cost_vec, w) = ctx.saved_tensors
c_grad = None
if ctx.needs_input_grad[0]:
c_grad = backward_squared_sum(cost_vec, w)
return c_grad, None
class RunSquaredSum(torch.autograd.Function):
@staticmethod
def forward(
ctx,
cost_vec,
weight,
run_weight,
):
cost = run_squared_sum(cost_vec, weight, run_weight)
ctx.save_for_backward(cost_vec, weight, run_weight)
return cost
@staticmethod
def backward(ctx, grad_out_cost):
(cost_vec, w, r_w) = ctx.saved_tensors
c_grad = None
if ctx.needs_input_grad[0]:
c_grad = backward_run_squared_sum(cost_vec, w, r_w)
return c_grad, None, None
class ZeroCost(CostBase):
"""Zero Cost"""
def forward(self, x, goal_dist):
err = x
if self.max_value is not None:
err = torch.nn.functional.relu(torch.abs(err) - self.max_value)
if self.hinge_value is not None:
err = torch.where(goal_dist <= self.hinge_value, err, self._z_scalar) # soft hinge
if self.threshold_value is not None:
err = torch.where(err <= self.distance_threshold, self._z_scalar, err)
if not self.terminal: # or self.run_weight is not None:
cost = SquaredSum.apply(err, self.weight)
else:
if self._run_weight_vec is None or self._run_weight_vec.shape[1] != err.shape[1]:
self._run_weight_vec = torch.ones(
(1, err.shape[1]), device=self.tensor_args.device, dtype=self.tensor_args.dtype
)
self._run_weight_vec[:, 1:-1] *= self.run_weight
cost = RunSquaredSum.apply(
err, self.weight, self._run_weight_vec
) # cost * self._run_weight_vec
return cost