release repository
This commit is contained in:
10
src/curobo/wrap/__init__.py
Normal file
10
src/curobo/wrap/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
#
|
||||
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#
|
||||
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
|
||||
# property and proprietary rights in and to this material, related
|
||||
# documentation and any modifications thereto. Any use, reproduction,
|
||||
# disclosure or distribution of this material and related documentation
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
10
src/curobo/wrap/model/__init__.py
Normal file
10
src/curobo/wrap/model/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
#
|
||||
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#
|
||||
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
|
||||
# property and proprietary rights in and to this material, related
|
||||
# documentation and any modifications thereto. Any use, reproduction,
|
||||
# disclosure or distribution of this material and related documentation
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
31
src/curobo/wrap/model/curobo_robot_world.py
Normal file
31
src/curobo/wrap/model/curobo_robot_world.py
Normal file
@@ -0,0 +1,31 @@
|
||||
#
|
||||
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#
|
||||
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
|
||||
# property and proprietary rights in and to this material, related
|
||||
# documentation and any modifications thereto. Any use, reproduction,
|
||||
# disclosure or distribution of this material and related documentation
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
"""This module has differentiable layers built from CuRobo's core features for use in Pytorch. """
|
||||
|
||||
# Standard Library
|
||||
from dataclasses import dataclass
|
||||
|
||||
# CuRobo
|
||||
from curobo.util.logger import log_warn
|
||||
from curobo.wrap.model.robot_world import RobotWorld, RobotWorldConfig
|
||||
|
||||
|
||||
@dataclass
|
||||
class CuroboRobotWorldConfig(RobotWorldConfig):
|
||||
def __post_init__(self):
|
||||
log_warn("CuroboRobotWorldConfig is deprecated, use RobotWorldConfig instead")
|
||||
return super().__post_init__()
|
||||
|
||||
|
||||
class CuroboRobotWorld(RobotWorld):
|
||||
def __init__(self, config: CuroboRobotWorldConfig):
|
||||
log_warn("CuroboRobotWorld is deprecated, use RobotWorld instead")
|
||||
return super().__init__(config)
|
||||
359
src/curobo/wrap/model/robot_world.py
Normal file
359
src/curobo/wrap/model/robot_world.py
Normal file
@@ -0,0 +1,359 @@
|
||||
#
|
||||
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#
|
||||
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
|
||||
# property and proprietary rights in and to this material, related
|
||||
# documentation and any modifications thereto. Any use, reproduction,
|
||||
# disclosure or distribution of this material and related documentation
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
"""This module has differentiable layers built from CuRobo's core features for use in Pytorch. """
|
||||
|
||||
# Standard Library
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List, Optional, Tuple, Union
|
||||
|
||||
# Third Party
|
||||
import torch
|
||||
|
||||
# CuRobo
|
||||
from curobo.cuda_robot_model.cuda_robot_model import CudaRobotModel
|
||||
from curobo.cuda_robot_model.types import CudaRobotModelState
|
||||
from curobo.geom.sdf.utils import create_collision_checker
|
||||
from curobo.geom.sdf.world import CollisionCheckerType, WorldCollision, WorldCollisionConfig
|
||||
from curobo.geom.types import WorldConfig
|
||||
from curobo.rollout.cost.bound_cost import BoundCost, BoundCostConfig, BoundCostType
|
||||
from curobo.rollout.cost.pose_cost import PoseCost, PoseCostConfig, PoseErrorType
|
||||
from curobo.rollout.cost.primitive_collision_cost import (
|
||||
PrimitiveCollisionCost,
|
||||
PrimitiveCollisionCostConfig,
|
||||
)
|
||||
from curobo.rollout.cost.self_collision_cost import SelfCollisionCost, SelfCollisionCostConfig
|
||||
from curobo.types.base import TensorDeviceType
|
||||
from curobo.types.math import Pose
|
||||
from curobo.types.robot import RobotConfig
|
||||
from curobo.types.state import JointState
|
||||
from curobo.util.sample_lib import HaltonGenerator
|
||||
from curobo.util.warp import init_warp
|
||||
from curobo.util_file import get_robot_configs_path, get_world_configs_path, join_path, load_yaml
|
||||
|
||||
|
||||
@dataclass
|
||||
class RobotWorldConfig:
|
||||
kinematics: CudaRobotModel
|
||||
sampler: HaltonGenerator
|
||||
bound_scale: torch.Tensor
|
||||
bound_cost: BoundCost
|
||||
pose_cost: PoseCost
|
||||
self_collision_cost: Optional[SelfCollisionCost] = None
|
||||
collision_cost: Optional[PrimitiveCollisionCost] = None
|
||||
collision_constraint: Optional[PrimitiveCollisionCost] = None
|
||||
world_model: Optional[WorldCollision] = None
|
||||
rejection_ratio: int = 10
|
||||
tensor_args: TensorDeviceType = TensorDeviceType()
|
||||
contact_distance: float = 0.0
|
||||
|
||||
@staticmethod
|
||||
def load_from_config(
|
||||
robot_config: Union[RobotConfig, str] = "franka.yml",
|
||||
world_model: Union[None, str, Dict, WorldConfig, List[WorldConfig], List[str]] = None,
|
||||
tensor_args: TensorDeviceType = TensorDeviceType(),
|
||||
n_envs: int = 1,
|
||||
n_meshes: int = 10,
|
||||
n_cuboids: int = 10,
|
||||
collision_activation_distance: float = 0.2,
|
||||
self_collision_activation_distance: float = 0.0,
|
||||
max_collision_distance: float = 1.0,
|
||||
collision_checker_type: CollisionCheckerType = CollisionCheckerType.MESH,
|
||||
world_collision_checker: Optional[WorldCollision] = None,
|
||||
pose_weight: List[float] = [1, 1, 1, 1],
|
||||
):
|
||||
init_warp(tensor_args=tensor_args)
|
||||
world_collision_cost = self_collision_cost = world_collision_constraint = None
|
||||
if isinstance(robot_config, str):
|
||||
robot_config = load_yaml(join_path(get_robot_configs_path(), robot_config))["robot_cfg"]
|
||||
if isinstance(robot_config, Dict):
|
||||
robot_config = RobotConfig.from_dict(robot_config, tensor_args)
|
||||
kinematics = CudaRobotModel(robot_config.kinematics)
|
||||
|
||||
if isinstance(world_model, str):
|
||||
world_model = load_yaml(join_path(get_world_configs_path(), world_model))
|
||||
if isinstance(world_model, List):
|
||||
if isinstance(world_model[0], str):
|
||||
world_model = [
|
||||
load_yaml(join_path(get_world_configs_path(), x)) for x in world_model
|
||||
]
|
||||
if world_collision_checker is None and world_model is not None:
|
||||
world_cfg = WorldCollisionConfig.load_from_dict(
|
||||
{
|
||||
"checker_type": collision_checker_type,
|
||||
"cache": {"mesh": n_meshes, "obb": n_cuboids},
|
||||
"max_distance": max_collision_distance,
|
||||
"n_envs": n_envs,
|
||||
},
|
||||
world_model,
|
||||
tensor_args,
|
||||
)
|
||||
world_collision_checker = create_collision_checker(world_cfg)
|
||||
|
||||
if world_collision_checker is not None:
|
||||
collision_cost_config = PrimitiveCollisionCostConfig(
|
||||
tensor_args.to_device([1.0]),
|
||||
tensor_args,
|
||||
return_loss=True,
|
||||
world_coll_checker=world_collision_checker,
|
||||
activation_distance=collision_activation_distance,
|
||||
)
|
||||
world_collision_cost = PrimitiveCollisionCost(collision_cost_config)
|
||||
collision_constraint_config = PrimitiveCollisionCostConfig(
|
||||
tensor_args.to_device([1.0]),
|
||||
tensor_args,
|
||||
return_loss=True,
|
||||
world_coll_checker=world_collision_checker,
|
||||
activation_distance=0.0,
|
||||
)
|
||||
world_collision_constraint = PrimitiveCollisionCost(collision_constraint_config)
|
||||
|
||||
self_collision_config = SelfCollisionCostConfig(
|
||||
tensor_args.to_device([1.0]),
|
||||
tensor_args,
|
||||
return_loss=True,
|
||||
self_collision_kin_config=kinematics.get_self_collision_config(),
|
||||
distance_threshold=self_collision_activation_distance,
|
||||
)
|
||||
|
||||
self_collision_cost = SelfCollisionCost(self_collision_config)
|
||||
bound_config = BoundCostConfig(
|
||||
tensor_args.to_device([1.0]),
|
||||
tensor_args,
|
||||
return_loss=True,
|
||||
cost_type=BoundCostType.POSITION,
|
||||
activation_distance=[0.0],
|
||||
)
|
||||
bound_config.set_bounds(kinematics.get_joint_limits(), teleport_mode=True)
|
||||
|
||||
bound_cost = BoundCost(bound_config)
|
||||
sample_gen = HaltonGenerator(
|
||||
kinematics.get_dof(),
|
||||
tensor_args,
|
||||
up_bounds=kinematics.get_joint_limits().position[1],
|
||||
low_bounds=kinematics.get_joint_limits().position[0],
|
||||
)
|
||||
pose_cost_config = PoseCostConfig(
|
||||
tensor_args.to_device(pose_weight),
|
||||
tensor_args=tensor_args,
|
||||
terminal=False,
|
||||
return_loss=True,
|
||||
cost_type=PoseErrorType.BATCH_GOAL,
|
||||
)
|
||||
pose_cost = PoseCost(pose_cost_config)
|
||||
bound_scale = (
|
||||
kinematics.get_joint_limits().position[1] - kinematics.get_joint_limits().position[0]
|
||||
).unsqueeze(0) / 2.0
|
||||
dist_threshold = 0.0
|
||||
if collision_activation_distance > 0.0:
|
||||
dist_threshold = (
|
||||
(0.5 / collision_activation_distance)
|
||||
* collision_activation_distance
|
||||
* collision_activation_distance
|
||||
)
|
||||
|
||||
return RobotWorldConfig(
|
||||
kinematics,
|
||||
sample_gen,
|
||||
bound_scale,
|
||||
bound_cost,
|
||||
pose_cost,
|
||||
self_collision_cost,
|
||||
world_collision_cost,
|
||||
world_collision_constraint,
|
||||
world_collision_checker,
|
||||
tensor_args=tensor_args,
|
||||
contact_distance=dist_threshold,
|
||||
)
|
||||
|
||||
|
||||
class RobotWorld(RobotWorldConfig):
|
||||
def __init__(self, config: RobotWorldConfig) -> None:
|
||||
RobotWorldConfig.__init__(self, **vars(config))
|
||||
self._batch_pose_idx = None
|
||||
|
||||
def get_kinematics(self, q: torch.Tensor) -> CudaRobotModelState:
|
||||
state = self.kinematics.get_state(q)
|
||||
return state
|
||||
|
||||
def update_world(self, world_config: WorldConfig):
|
||||
self.world_model.load_collision_model(world_config)
|
||||
|
||||
def get_collision_distance(
|
||||
self, x_sph: torch.Tensor, env_query_idx: Optional[torch.Tensor] = None
|
||||
) -> torch.Tensor:
|
||||
"""Get collision distance
|
||||
|
||||
Args:
|
||||
x_sph (torch.Tensor): batch, horizon, n_spheres, 4
|
||||
|
||||
Returns:
|
||||
torch.Tensor: _description_
|
||||
"""
|
||||
d = self.collision_cost.forward(x_sph, env_query_idx=env_query_idx)
|
||||
return d
|
||||
|
||||
def get_collision_constraint(
|
||||
self, x_sph: torch.Tensor, env_query_idx: Optional[torch.Tensor] = None
|
||||
) -> torch.Tensor:
|
||||
"""Get collision distance
|
||||
|
||||
Args:
|
||||
x_sph (torch.Tensor): batch, horizon, n_spheres, 4
|
||||
|
||||
Returns:
|
||||
torch.Tensor: _description_
|
||||
"""
|
||||
d = self.collision_constraint.forward(x_sph, env_query_idx=env_query_idx)
|
||||
return d
|
||||
|
||||
def get_self_collision_distance(self, x_sph: torch.Tensor) -> torch.Tensor:
|
||||
return self.get_self_collision(x_sph)
|
||||
|
||||
def get_self_collision(self, x_sph: torch.Tensor) -> torch.Tensor:
|
||||
d = self.self_collision_cost.forward(x_sph)
|
||||
return d
|
||||
|
||||
def get_collision_vector(
|
||||
self, x_sph: torch.Tensor, env_query_idx: Optional[torch.Tensor] = None
|
||||
):
|
||||
"""
|
||||
NOTE: This function is not differentiable
|
||||
"""
|
||||
x_sph.requires_grad = True
|
||||
d = self.collision_cost.forward(x_sph, env_query_idx=env_query_idx)
|
||||
vec = self.collision_cost.get_gradient_buffer()
|
||||
d = d.detach()
|
||||
x_sph.requires_grad = False
|
||||
x_sph.grad = None
|
||||
return d, vec
|
||||
|
||||
def get_world_self_collision_distance_from_joints(
|
||||
self,
|
||||
q: torch.Tensor,
|
||||
env_query_idx: Optional[torch.Tensor] = None,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
q : batch, dof
|
||||
"""
|
||||
state = self.get_kinematics(q)
|
||||
d_world = self.get_collision_distance(
|
||||
state.link_spheres_tensor.unsqueeze(1), env_query_idx=env_query_idx
|
||||
).squeeze(1)
|
||||
d_self = self.get_self_collision_distance(state.link_spheres_tensor.unsqueeze(1)).squeeze(1)
|
||||
return d_world, d_self
|
||||
|
||||
def get_world_self_collision_distance_from_joint_trajectory(
|
||||
self,
|
||||
q: torch.Tensor,
|
||||
env_query_idx: Optional[torch.Tensor] = None,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
q : batch, horizon, dof
|
||||
"""
|
||||
b, h, dof = q.shape
|
||||
state = self.get_kinematics(q.view(b * h, dof))
|
||||
x_sph = state.link_spheres_tensor.view(b, h, -1, 4)
|
||||
d_world = self.get_collision_distance(x_sph, env_query_idx=env_query_idx)
|
||||
d_self = self.get_self_collision_distance(x_sph)
|
||||
return d_world, d_self
|
||||
|
||||
def get_bound(self, q: torch.Tensor) -> torch.Tensor:
|
||||
d = self.bound_cost.forward(JointState(position=q))
|
||||
return d
|
||||
|
||||
def sample(self, n: int, mask_valid: bool = True, env_query_idx: Optional[torch.Tensor] = None):
|
||||
"""
|
||||
This does not support batched environments, use sample_trajectory instead.
|
||||
"""
|
||||
n_samples = n
|
||||
if mask_valid:
|
||||
n_samples = n * self.rejection_ratio
|
||||
q = self.sampler.get_samples(n_samples, bounded=True)
|
||||
if mask_valid:
|
||||
q_mask = self.validate(q, env_query_idx)
|
||||
q = q[q_mask][:n]
|
||||
return q
|
||||
|
||||
def validate(self, q: torch.Tensor, env_query_idx: Optional[torch.Tensor] = None):
|
||||
"""
|
||||
This does not support batched environments, use validate_trajectory instead
|
||||
"""
|
||||
# run collision, self collision, bounds
|
||||
b, dof = q.shape
|
||||
kin_state = self.get_kinematics(q)
|
||||
spheres = kin_state.link_spheres_tensor.view(b, 1, -1, 4)
|
||||
d_self = self.get_self_collision(spheres)
|
||||
d_world = self.get_collision_constraint(spheres, env_query_idx)
|
||||
d_bound = self.get_bound(q.view(b, 1, dof))
|
||||
d_mask = sum_mask(d_self, d_world, d_bound)
|
||||
return d_mask
|
||||
|
||||
def sample_trajectory(
|
||||
self,
|
||||
batch: int,
|
||||
horizon: int,
|
||||
mask_valid: bool = True,
|
||||
env_query_idx: Optional[torch.Tensor] = None,
|
||||
):
|
||||
n_samples = batch * horizon
|
||||
if mask_valid:
|
||||
n_samples = batch * horizon * self.rejection_ratio
|
||||
q = self.sampler.get_samples(n_samples, bounded=True)
|
||||
q = q.reshape(batch, horizon * self.rejection_ratio, -1)
|
||||
if mask_valid:
|
||||
q_mask = self.validate_trajectory(q, env_query_idx)
|
||||
q = [q[i][q_mask[i, :], :][:horizon, :].unsqueeze(0) for i in range(batch)]
|
||||
q = torch.cat(q)
|
||||
return q
|
||||
|
||||
def validate_trajectory(self, q: torch.Tensor, env_query_idx: Optional[torch.Tensor] = None):
|
||||
"""
|
||||
q: batch , horizon, dof
|
||||
env_query_idx: batch, 1
|
||||
"""
|
||||
# run collision, self collision, bounds
|
||||
b, h, dof = q.shape
|
||||
q = q.view(b * h, dof)
|
||||
kin_state = self.get_kinematics(q)
|
||||
spheres = kin_state.link_spheres_tensor.view(b, h, -1, 4)
|
||||
d_self = self.get_self_collision(spheres)
|
||||
d_world = self.get_collision_constraint(spheres, env_query_idx)
|
||||
d_bound = self.get_bound(q.view(b, h, dof))
|
||||
d_mask = mask(d_self, d_world, d_bound)
|
||||
return d_mask
|
||||
|
||||
def pose_distance(self, x_des: Pose, x_current: Pose):
|
||||
if len(x_current.position.shape) == 2:
|
||||
x_current = x_current.unsqueeze(1)
|
||||
# calculate pose loss:
|
||||
if (
|
||||
self._batch_pose_idx is None
|
||||
or self._batch_pose_idx.shape[0] != x_current.position.shape[0]
|
||||
):
|
||||
self._batch_pose_idx = torch.arange(
|
||||
0, x_current.position.shape[0], 1, device=self.tensor_args.device, dtype=torch.int32
|
||||
)
|
||||
distance = self.pose_cost.forward_pose(x_des, x_current, self._batch_pose_idx)
|
||||
return distance
|
||||
|
||||
|
||||
@torch.jit.script
|
||||
def sum_mask(d1, d2, d3):
|
||||
d_total = d1 + d2 + d3
|
||||
d_mask = d_total == 0.0
|
||||
return d_mask.view(-1)
|
||||
|
||||
|
||||
@torch.jit.script
|
||||
def mask(d1, d2, d3):
|
||||
d_total = d1 + d2 + d3
|
||||
d_mask = d_total == 0.0
|
||||
return d_mask
|
||||
10
src/curobo/wrap/reacher/__init__.py
Normal file
10
src/curobo/wrap/reacher/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
#
|
||||
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#
|
||||
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
|
||||
# property and proprietary rights in and to this material, related
|
||||
# documentation and any modifications thereto. Any use, reproduction,
|
||||
# disclosure or distribution of this material and related documentation
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
196
src/curobo/wrap/reacher/evaluator.py
Normal file
196
src/curobo/wrap/reacher/evaluator.py
Normal file
@@ -0,0 +1,196 @@
|
||||
#
|
||||
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#
|
||||
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
|
||||
# property and proprietary rights in and to this material, related
|
||||
# documentation and any modifications thereto. Any use, reproduction,
|
||||
# disclosure or distribution of this material and related documentation
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
|
||||
# Standard Library
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
|
||||
# Third Party
|
||||
import torch
|
||||
import torch.autograd.profiler as profiler
|
||||
|
||||
# CuRobo
|
||||
from curobo.types.robot import JointState
|
||||
from curobo.types.tensor import T_DOF
|
||||
from curobo.util.trajectory import calculate_dt
|
||||
|
||||
|
||||
@dataclass
|
||||
class TrajEvaluatorConfig:
|
||||
max_acc: float = 15.0
|
||||
max_jerk: float = 500.0
|
||||
cost_weight: float = 0.01
|
||||
min_dt: float = 0.001
|
||||
max_dt: float = 0.1
|
||||
|
||||
|
||||
@torch.jit.script
|
||||
def compute_path_length(vel, traj_dt, cspace_distance_weight):
|
||||
pl = torch.sum(
|
||||
torch.sum(torch.abs(vel) * traj_dt.unsqueeze(-1) * cspace_distance_weight, dim=-1), dim=-1
|
||||
)
|
||||
return pl
|
||||
|
||||
|
||||
@torch.jit.script
|
||||
def compute_path_length_cost(vel, cspace_distance_weight):
|
||||
pl = torch.sum(torch.sum(torch.abs(vel) * cspace_distance_weight, dim=-1), dim=-1)
|
||||
return pl
|
||||
|
||||
|
||||
@torch.jit.script
|
||||
def smooth_cost(abs_acc, abs_jerk, opt_dt):
|
||||
# acc = torch.max(torch.max(abs_acc, dim=-1)[0], dim=-1)[0]
|
||||
# jerk = torch.max(torch.max(abs_jerk, dim=-1)[0], dim=-1)[0]
|
||||
jerk = torch.mean(torch.max(abs_jerk, dim=-1)[0], dim=-1)
|
||||
mean_acc = torch.mean(torch.max(abs_acc, dim=-1)[0], dim=-1) # [0]
|
||||
a = (jerk * 0.001) + opt_dt + (mean_acc * 0.01)
|
||||
return a
|
||||
|
||||
|
||||
@torch.jit.script
|
||||
def compute_smoothness(
|
||||
vel: torch.Tensor,
|
||||
acc: torch.Tensor,
|
||||
jerk: torch.Tensor,
|
||||
max_vel: torch.Tensor,
|
||||
max_acc: float,
|
||||
max_jerk: float,
|
||||
traj_dt: torch.Tensor,
|
||||
min_dt: float,
|
||||
max_dt: float,
|
||||
):
|
||||
# compute scaled dt:
|
||||
# h = int(vel.shape[-2] / 2)
|
||||
|
||||
max_v_arr = torch.max(torch.abs(vel), dim=-2)[0] # output is batch, dof
|
||||
scale_dt = (max_v_arr) / (max_vel.view(1, max_v_arr.shape[-1])) # batch,dof
|
||||
|
||||
max_acc_arr = torch.max(torch.abs(acc), dim=-2)[0] # batch, dof
|
||||
|
||||
scale_dt_acc = torch.sqrt(torch.max(max_acc_arr / max_acc, dim=-1)[0]) # batch, 1
|
||||
|
||||
max_jerk_arr = torch.max(torch.abs(jerk), dim=-2)[0] # batch, dof
|
||||
|
||||
scale_dt_jerk = torch.pow(torch.max(max_jerk_arr / max_jerk, dim=-1)[0], 1.0 / 3.0) # batch, 1
|
||||
|
||||
dt_score_vel = torch.max(scale_dt, dim=-1)[0] # batch, 1
|
||||
|
||||
dt_score = torch.maximum(dt_score_vel, scale_dt_acc)
|
||||
dt_score = torch.maximum(dt_score, scale_dt_jerk)
|
||||
|
||||
# clamp dt score:
|
||||
|
||||
dt_score = torch.clamp(dt_score, min_dt, max_dt)
|
||||
scale_dt = (1 / dt_score).view(-1, 1, 1)
|
||||
abs_acc = torch.abs(acc) * (scale_dt**2)
|
||||
# mean_acc_val = torch.max(torch.mean(abs_acc, dim=-1), dim=-1)[0]
|
||||
max_acc_val = torch.max(torch.max(abs_acc, dim=-1)[0], dim=-1)[0]
|
||||
abs_jerk = torch.abs(jerk) * scale_dt**3
|
||||
# calculate max mean jerk:
|
||||
# mean_jerk_val = torch.max(torch.mean(abs_jerk, dim=-1), dim=-1)[0]
|
||||
max_jerk_val = torch.max(torch.max(abs_jerk, dim=-1)[0], dim=-1)[0]
|
||||
acc_label = torch.logical_and(max_acc_val <= max_acc, max_jerk_val <= max_jerk)
|
||||
# print(max_acc_val, max_jerk_val, dt_score)
|
||||
return (acc_label, smooth_cost(abs_acc, abs_jerk, dt_score))
|
||||
|
||||
|
||||
@torch.jit.script
|
||||
def compute_smoothness_opt_dt(
|
||||
vel, acc, jerk, max_vel: torch.Tensor, max_acc: float, max_jerk: float, opt_dt: torch.Tensor
|
||||
):
|
||||
abs_acc = torch.abs(acc)
|
||||
max_acc_val = torch.max(torch.max(abs_acc, dim=-1)[0], dim=-1)[0]
|
||||
abs_jerk = torch.abs(jerk)
|
||||
max_jerk_val = torch.max(torch.max(abs_jerk, dim=-1)[0], dim=-1)[0]
|
||||
|
||||
acc_label = torch.logical_and(max_acc_val <= max_acc, max_jerk_val <= max_jerk)
|
||||
return acc_label, smooth_cost(abs_acc, abs_jerk, opt_dt)
|
||||
|
||||
|
||||
class TrajEvaluator(TrajEvaluatorConfig):
|
||||
def __init__(self, config: Optional[TrajEvaluatorConfig] = None):
|
||||
if config is None:
|
||||
config = TrajEvaluatorConfig()
|
||||
super().__init__(**vars(config))
|
||||
|
||||
def _compute_path_length(
|
||||
self, js: JointState, traj_dt: torch.Tensor, cspace_distance_weight: T_DOF
|
||||
):
|
||||
path_length = compute_path_length(js.velocity, traj_dt, cspace_distance_weight)
|
||||
return path_length
|
||||
|
||||
def _check_smoothness(self, js: JointState, traj_dt: torch.Tensor, max_vel: torch.Tensor):
|
||||
if js.jerk is None:
|
||||
js.jerk = (
|
||||
(torch.roll(js.acceleration, -1, -2) - js.acceleration)
|
||||
* (1 / traj_dt).unsqueeze(-1)
|
||||
)[..., :-1, :]
|
||||
|
||||
acc_label, max_acc = compute_smoothness(
|
||||
js.velocity,
|
||||
js.acceleration,
|
||||
js.jerk,
|
||||
max_vel,
|
||||
self.max_acc,
|
||||
self.max_jerk,
|
||||
traj_dt,
|
||||
self.min_dt,
|
||||
self.max_dt,
|
||||
)
|
||||
return acc_label, max_acc
|
||||
|
||||
@profiler.record_function("traj_evaluate_smoothness")
|
||||
def evaluate(
|
||||
self,
|
||||
js: JointState,
|
||||
traj_dt: torch.Tensor,
|
||||
cspace_distance_weight: T_DOF,
|
||||
max_vel: torch.Tensor,
|
||||
):
|
||||
label, cost = self._check_smoothness(js, traj_dt, max_vel)
|
||||
pl_cost = self._compute_path_length(js, traj_dt, cspace_distance_weight)
|
||||
return label, pl_cost + self.cost_weight * cost
|
||||
|
||||
@profiler.record_function("traj_evaluate_interpolated_smoothness")
|
||||
def evaluate_interpolated_smootheness(
|
||||
self,
|
||||
js: JointState,
|
||||
opt_dt: torch.Tensor,
|
||||
cspace_distance_weight: T_DOF,
|
||||
max_vel: torch.Tensor,
|
||||
):
|
||||
label, cost = compute_smoothness_opt_dt(
|
||||
js.velocity, js.acceleration, js.jerk, max_vel, self.max_acc, self.max_jerk, opt_dt
|
||||
)
|
||||
label = torch.logical_and(label, opt_dt <= self.max_dt)
|
||||
pl_cost = compute_path_length_cost(js.velocity, cspace_distance_weight)
|
||||
return label, pl_cost + self.cost_weight * cost
|
||||
|
||||
def evaluate_from_position(
|
||||
self,
|
||||
js: JointState,
|
||||
traj_dt: torch.Tensor,
|
||||
cspace_distance_weight: T_DOF,
|
||||
max_vel: torch.Tensor,
|
||||
skip_last_tstep: bool = False,
|
||||
):
|
||||
js = js.calculate_fd_from_position(traj_dt)
|
||||
if skip_last_tstep:
|
||||
js.position = js.position[..., :-1, :]
|
||||
js.velocity = js.velocity[..., :-1, :]
|
||||
js.acceleration = js.acceleration[..., :-1, :]
|
||||
js.jerk = js.jerk[..., :-1, :]
|
||||
|
||||
return self.evaluate(js, traj_dt, cspace_distance_weight, max_vel)
|
||||
|
||||
def calculate_dt(self, js: JointState, max_vel: torch.Tensor, raw_dt: float):
|
||||
return calculate_dt(js.velocity, max_vel, raw_dt)
|
||||
1021
src/curobo/wrap/reacher/ik_solver.py
Normal file
1021
src/curobo/wrap/reacher/ik_solver.py
Normal file
File diff suppressed because it is too large
Load Diff
2391
src/curobo/wrap/reacher/motion_gen.py
Normal file
2391
src/curobo/wrap/reacher/motion_gen.py
Normal file
File diff suppressed because it is too large
Load Diff
491
src/curobo/wrap/reacher/mpc.py
Normal file
491
src/curobo/wrap/reacher/mpc.py
Normal file
@@ -0,0 +1,491 @@
|
||||
#
|
||||
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#
|
||||
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
|
||||
# property and proprietary rights in and to this material, related
|
||||
# documentation and any modifications thereto. Any use, reproduction,
|
||||
# disclosure or distribution of this material and related documentation
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
|
||||
|
||||
# Standard Library
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, Optional, Union
|
||||
|
||||
# Third Party
|
||||
import torch
|
||||
|
||||
# CuRobo
|
||||
from curobo.geom.sdf.utils import create_collision_checker
|
||||
from curobo.geom.sdf.world import CollisionCheckerType, WorldCollision, WorldCollisionConfig
|
||||
from curobo.geom.types import WorldConfig
|
||||
from curobo.opt.newton.lbfgs import LBFGSOpt, LBFGSOptConfig
|
||||
from curobo.opt.particle.parallel_es import ParallelES, ParallelESConfig
|
||||
from curobo.opt.particle.parallel_mppi import ParallelMPPI, ParallelMPPIConfig
|
||||
from curobo.rollout.arm_reacher import ArmReacher, ArmReacherConfig
|
||||
from curobo.rollout.rollout_base import Goal
|
||||
from curobo.types.base import TensorDeviceType
|
||||
from curobo.types.robot import JointState, RobotConfig
|
||||
from curobo.util.logger import log_error, log_info, log_warn
|
||||
from curobo.util_file import (
|
||||
get_robot_configs_path,
|
||||
get_task_configs_path,
|
||||
get_world_configs_path,
|
||||
join_path,
|
||||
load_yaml,
|
||||
)
|
||||
from curobo.wrap.reacher.types import ReacherSolveState, ReacherSolveType
|
||||
from curobo.wrap.wrap_base import WrapResult
|
||||
from curobo.wrap.wrap_mpc import WrapConfig, WrapMpc
|
||||
|
||||
|
||||
@dataclass
|
||||
class MpcSolverConfig:
|
||||
solver: WrapMpc
|
||||
world_coll_checker: Optional[WorldCollision] = None
|
||||
tensor_args: TensorDeviceType = TensorDeviceType()
|
||||
use_cuda_graph_full_step: bool = False
|
||||
|
||||
@staticmethod
|
||||
def load_from_robot_config(
|
||||
robot_cfg: Union[Union[str, dict], RobotConfig],
|
||||
world_cfg: Union[Union[str, dict], WorldConfig],
|
||||
base_cfg: Optional[dict] = None,
|
||||
tensor_args: TensorDeviceType = TensorDeviceType(),
|
||||
compute_metrics: bool = True,
|
||||
use_cuda_graph: Optional[bool] = None,
|
||||
particle_opt_iters: Optional[int] = None,
|
||||
self_collision_check: bool = True,
|
||||
collision_checker_type: Optional[CollisionCheckerType] = CollisionCheckerType.PRIMITIVE,
|
||||
use_es: Optional[bool] = None,
|
||||
es_learning_rate: Optional[float] = 0.01,
|
||||
use_cuda_graph_metrics: bool = False,
|
||||
store_rollouts: bool = True,
|
||||
use_cuda_graph_full_step: bool = False,
|
||||
sync_cuda_time: bool = True,
|
||||
collision_cache: Optional[Dict[str, int]] = None,
|
||||
n_collision_envs: Optional[int] = None,
|
||||
collision_activation_distance: Optional[float] = None,
|
||||
world_coll_checker=None,
|
||||
step_dt: Optional[float] = None,
|
||||
use_lbfgs: bool = False,
|
||||
use_mppi: bool = True,
|
||||
):
|
||||
if use_cuda_graph_full_step:
|
||||
log_error("use_cuda_graph_full_step currently is not supported")
|
||||
raise ValueError("use_cuda_graph_full_step currently is not supported")
|
||||
|
||||
task_file = "particle_mpc.yml"
|
||||
config_data = load_yaml(join_path(get_task_configs_path(), task_file))
|
||||
config_data["mppi"]["n_envs"] = 1
|
||||
if step_dt is not None:
|
||||
config_data["model"]["dt_traj_params"]["base_dt"] = step_dt
|
||||
if particle_opt_iters is not None:
|
||||
config_data["mppi"]["n_iters"] = particle_opt_iters
|
||||
|
||||
if base_cfg is None:
|
||||
base_cfg = load_yaml(join_path(get_task_configs_path(), "base_cfg.yml"))
|
||||
if collision_cache is not None:
|
||||
base_cfg["world_collision_checker_cfg"]["cache"] = collision_cache
|
||||
if n_collision_envs is not None:
|
||||
base_cfg["world_collision_checker_cfg"]["n_envs"] = n_collision_envs
|
||||
|
||||
if collision_activation_distance is not None:
|
||||
config_data["cost"]["primitive_collision_cfg"][
|
||||
"activation_distance"
|
||||
] = collision_activation_distance
|
||||
|
||||
if not self_collision_check:
|
||||
base_cfg["constraint"]["self_collision_cfg"]["weight"] = 0.0
|
||||
config_data["cost"]["self_collision_cfg"]["weight"] = 0.0
|
||||
if collision_checker_type is not None:
|
||||
base_cfg["world_collision_checker_cfg"]["checker_type"] = collision_checker_type
|
||||
if isinstance(robot_cfg, str):
|
||||
robot_cfg = load_yaml(join_path(get_robot_configs_path(), robot_cfg))
|
||||
if isinstance(robot_cfg, dict):
|
||||
robot_cfg = RobotConfig.from_dict(robot_cfg, tensor_args)
|
||||
|
||||
if isinstance(world_cfg, str):
|
||||
world_cfg = load_yaml(join_path(get_world_configs_path(), world_cfg))
|
||||
|
||||
if world_coll_checker is None and world_cfg is not None:
|
||||
world_cfg = WorldCollisionConfig.load_from_dict(
|
||||
base_cfg["world_collision_checker_cfg"], world_cfg, tensor_args
|
||||
)
|
||||
world_coll_checker = create_collision_checker(world_cfg)
|
||||
grad_config_data = None
|
||||
if use_lbfgs:
|
||||
grad_config_data = load_yaml(join_path(get_task_configs_path(), "gradient_mpc.yml"))
|
||||
if step_dt is not None:
|
||||
grad_config_data["model"]["dt_traj_params"]["base_dt"] = step_dt
|
||||
grad_config_data["model"]["dt_traj_params"]["max_dt"] = step_dt
|
||||
|
||||
config_data["model"] = grad_config_data["model"]
|
||||
if use_cuda_graph is not None:
|
||||
grad_config_data["lbfgs"]["use_cuda_graph"] = use_cuda_graph
|
||||
|
||||
cfg = ArmReacherConfig.from_dict(
|
||||
robot_cfg,
|
||||
config_data["model"],
|
||||
config_data["cost"],
|
||||
base_cfg["constraint"],
|
||||
base_cfg["convergence"],
|
||||
base_cfg["world_collision_checker_cfg"],
|
||||
world_cfg,
|
||||
world_coll_checker=world_coll_checker,
|
||||
tensor_args=tensor_args,
|
||||
)
|
||||
|
||||
arm_rollout_mppi = ArmReacher(cfg)
|
||||
arm_rollout_safety = ArmReacher(cfg)
|
||||
config_data["mppi"]["store_rollouts"] = store_rollouts
|
||||
if use_cuda_graph is not None:
|
||||
config_data["mppi"]["use_cuda_graph"] = use_cuda_graph
|
||||
if use_cuda_graph_full_step:
|
||||
config_data["mppi"]["sync_cuda_time"] = False
|
||||
config_dict = ParallelMPPIConfig.create_data_dict(
|
||||
config_data["mppi"], arm_rollout_mppi, tensor_args
|
||||
)
|
||||
solvers = []
|
||||
parallel_mppi = None
|
||||
if use_es is not None and use_es:
|
||||
log_warn("ES solver for MPC is highly experimental, not safe to run on real robots")
|
||||
|
||||
mppi_cfg = ParallelESConfig(**config_dict)
|
||||
if es_learning_rate is not None:
|
||||
mppi_cfg.learning_rate = es_learning_rate
|
||||
parallel_mppi = ParallelES(mppi_cfg)
|
||||
elif use_mppi:
|
||||
mppi_cfg = ParallelMPPIConfig(**config_dict)
|
||||
parallel_mppi = ParallelMPPI(mppi_cfg)
|
||||
if parallel_mppi is not None:
|
||||
solvers.append(parallel_mppi)
|
||||
if use_lbfgs:
|
||||
log_warn("LBFGS solver for MPC is highly experimental, not safe to run on real robots")
|
||||
grad_cfg = ArmReacherConfig.from_dict(
|
||||
robot_cfg,
|
||||
grad_config_data["model"],
|
||||
grad_config_data["cost"],
|
||||
base_cfg["constraint"],
|
||||
base_cfg["convergence"],
|
||||
base_cfg["world_collision_checker_cfg"],
|
||||
world_cfg,
|
||||
world_coll_checker=world_coll_checker,
|
||||
tensor_args=tensor_args,
|
||||
)
|
||||
|
||||
arm_rollout_grad = ArmReacher(grad_cfg)
|
||||
lbfgs_cfg_dict = LBFGSOptConfig.create_data_dict(
|
||||
grad_config_data["lbfgs"], arm_rollout_grad, tensor_args
|
||||
)
|
||||
lbfgs = LBFGSOpt(LBFGSOptConfig(**lbfgs_cfg_dict))
|
||||
solvers.append(lbfgs)
|
||||
|
||||
mpc_cfg = WrapConfig(
|
||||
safety_rollout=arm_rollout_safety,
|
||||
optimizers=solvers,
|
||||
compute_metrics=compute_metrics,
|
||||
use_cuda_graph_metrics=use_cuda_graph_metrics,
|
||||
sync_cuda_time=sync_cuda_time,
|
||||
)
|
||||
solver = WrapMpc(mpc_cfg)
|
||||
return MpcSolverConfig(
|
||||
solver,
|
||||
tensor_args=tensor_args,
|
||||
use_cuda_graph_full_step=use_cuda_graph_full_step,
|
||||
world_coll_checker=world_coll_checker,
|
||||
)
|
||||
|
||||
|
||||
class MpcSolver(MpcSolverConfig):
|
||||
"""Model Predictive Control Solver for Arm Reacher task.
|
||||
|
||||
Args:
|
||||
MpcSolverConfig: _description_
|
||||
"""
|
||||
|
||||
def __init__(self, config: MpcSolverConfig) -> None:
|
||||
super().__init__(**vars(config))
|
||||
self.tensor_args = self.solver.rollout_fn.tensor_args
|
||||
self._goal_buffer = Goal()
|
||||
self.batch_size = -1
|
||||
self._goal_buffer = None
|
||||
self._solve_state = None
|
||||
self._col = None
|
||||
self._step_goal_buffer = None
|
||||
self._cu_state_in = None
|
||||
self._cu_seed = None
|
||||
self._cu_step_init = None
|
||||
self._cu_step_graph = None
|
||||
self._cu_result = None
|
||||
|
||||
def _update_batch_size(self, batch_size):
|
||||
if self.batch_size != batch_size:
|
||||
self.batch_size = batch_size
|
||||
|
||||
def update_goal_buffer(
|
||||
self,
|
||||
solve_state: ReacherSolveState,
|
||||
goal: Goal,
|
||||
) -> Goal:
|
||||
self._solve_state, self._goal_buffer, update_reference = solve_state.update_goal(
|
||||
goal,
|
||||
self._solve_state,
|
||||
self._goal_buffer,
|
||||
self.tensor_args,
|
||||
)
|
||||
if update_reference:
|
||||
self.solver.update_nenvs(self._solve_state.get_batch_size())
|
||||
self.reset()
|
||||
self.reset_cuda_graph()
|
||||
self._col = torch.arange(
|
||||
0, goal.batch, device=self.tensor_args.device, dtype=torch.long
|
||||
)
|
||||
self._step_goal_buffer = Goal(
|
||||
current_state=self._goal_buffer.current_state.clone(),
|
||||
batch_current_state_idx=self._goal_buffer.batch_current_state_idx.clone(),
|
||||
)
|
||||
return self._goal_buffer
|
||||
|
||||
def step(
|
||||
self,
|
||||
current_state: JointState,
|
||||
shift_steps: int = 1,
|
||||
seed_traj: Optional[JointState] = None,
|
||||
max_attempts: int = 1,
|
||||
):
|
||||
converged = True
|
||||
|
||||
for _ in range(max_attempts):
|
||||
result = self.step_once(current_state.clone(), shift_steps, seed_traj)
|
||||
if (
|
||||
torch.count_nonzero(torch.isnan(result.action.position)) == 0
|
||||
and torch.max(torch.abs(result.action.position)) < 10.0
|
||||
and torch.count_nonzero(~result.metrics.feasible) == 0
|
||||
):
|
||||
converged = True
|
||||
break
|
||||
self.reset()
|
||||
if not converged:
|
||||
result.action.copy_(current_state)
|
||||
log_warn("NOT CONVERGED")
|
||||
|
||||
return result
|
||||
|
||||
def step_once(
|
||||
self,
|
||||
current_state: JointState,
|
||||
shift_steps: int = 1,
|
||||
seed_traj: Optional[JointState] = None,
|
||||
) -> WrapResult:
|
||||
# Create cuda graph for whole solve step including computation of metrics
|
||||
# Including updation of goal buffers
|
||||
|
||||
if self._solve_state is None:
|
||||
log_error("Need to first setup solve state before calling solve()")
|
||||
|
||||
if self.use_cuda_graph_full_step:
|
||||
st_time = time.time()
|
||||
if not self._cu_step_init:
|
||||
self._initialize_cuda_graph_step(current_state, shift_steps, seed_traj)
|
||||
self._cu_state_in.copy_(current_state)
|
||||
if seed_traj is not None:
|
||||
self._cu_seed.copy_(seed_traj)
|
||||
self._cu_step_graph.replay()
|
||||
result = self._cu_result.clone()
|
||||
torch.cuda.synchronize()
|
||||
result.solve_time = time.time() - st_time
|
||||
else:
|
||||
self._step_goal_buffer.current_state.copy_(current_state)
|
||||
result = self._solve_from_solve_state(
|
||||
self._solve_state,
|
||||
self._step_goal_buffer,
|
||||
shift_steps,
|
||||
seed_traj,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def _step(
|
||||
self,
|
||||
current_state: JointState,
|
||||
shift_steps: int = 1,
|
||||
seed_traj: Optional[JointState] = None,
|
||||
):
|
||||
self._step_goal_buffer.current_state.copy_(current_state)
|
||||
result = self._solve_from_solve_state(
|
||||
self._solve_state,
|
||||
self._step_goal_buffer,
|
||||
shift_steps,
|
||||
seed_traj,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def _initialize_cuda_graph_step(
|
||||
self,
|
||||
current_state: JointState,
|
||||
shift_steps: int = 1,
|
||||
seed_traj: Optional[JointState] = None,
|
||||
):
|
||||
log_info("MpcSolver: Creating Cuda Graph")
|
||||
self._cu_state_in = current_state.clone()
|
||||
if seed_traj is not None:
|
||||
self._cu_seed = seed_traj.clone()
|
||||
s = torch.cuda.Stream(device=self.tensor_args.device)
|
||||
s.wait_stream(torch.cuda.current_stream(device=self.tensor_args.device))
|
||||
with torch.cuda.stream(s):
|
||||
for _ in range(3):
|
||||
self._cu_result = self._step(
|
||||
self._cu_state_in, shift_steps=shift_steps, seed_traj=self._cu_seed
|
||||
)
|
||||
torch.cuda.current_stream(device=self.tensor_args.device).wait_stream(s)
|
||||
self.reset()
|
||||
self._cu_step_graph = torch.cuda.CUDAGraph()
|
||||
with torch.cuda.graph(self._cu_step_graph, stream=s):
|
||||
self._cu_result = self._step(
|
||||
self._cu_state_in, shift_steps=shift_steps, seed_traj=self._cu_seed
|
||||
)
|
||||
self._cu_step_init = True
|
||||
|
||||
def setup_solve_single(self, goal: Goal, num_seeds: Optional[int] = None) -> Goal:
|
||||
solve_state = ReacherSolveState(
|
||||
ReacherSolveType.SINGLE, num_mpc_seeds=num_seeds, batch_size=1, n_envs=1, n_goalset=1
|
||||
)
|
||||
goal_buffer = self.update_goal_buffer(solve_state, goal)
|
||||
return goal_buffer
|
||||
|
||||
def setup_solve_goalset(self, goal: Goal, num_seeds: Optional[int] = None) -> Goal:
|
||||
solve_state = ReacherSolveState(
|
||||
ReacherSolveType.GOALSET,
|
||||
num_mpc_seeds=num_seeds,
|
||||
batch_size=1,
|
||||
n_envs=1,
|
||||
n_goalset=goal.n_goalset,
|
||||
)
|
||||
goal_buffer = self.update_goal_buffer(solve_state, goal)
|
||||
return goal_buffer
|
||||
|
||||
def setup_solve_batch(self, goal: Goal, num_seeds: Optional[int] = None) -> Goal:
|
||||
solve_state = ReacherSolveState(
|
||||
ReacherSolveType.BATCH,
|
||||
num_mpc_seeds=num_seeds,
|
||||
batch_size=goal.batch,
|
||||
n_envs=1,
|
||||
n_goalset=1,
|
||||
)
|
||||
goal_buffer = self.update_goal_buffer(solve_state, goal)
|
||||
return goal_buffer
|
||||
|
||||
def setup_solve_batch_goalset(self, goal: Goal, num_seeds: Optional[int] = None) -> Goal:
|
||||
solve_state = ReacherSolveState(
|
||||
ReacherSolveType.BATCH_GOALSET,
|
||||
num_mpc_seeds=num_seeds,
|
||||
batch_size=goal.batch,
|
||||
n_envs=1,
|
||||
n_goalset=goal.n_goalset,
|
||||
)
|
||||
goal_buffer = self.update_goal_buffer(solve_state, goal)
|
||||
return goal_buffer
|
||||
|
||||
def setup_solve_batch_env(self, goal: Goal, num_seeds: Optional[int] = None) -> Goal:
|
||||
solve_state = ReacherSolveState(
|
||||
ReacherSolveType.BATCH_ENV,
|
||||
num_mpc_seeds=num_seeds,
|
||||
batch_size=goal.batch,
|
||||
n_envs=1,
|
||||
n_goalset=1,
|
||||
)
|
||||
goal_buffer = self.update_goal_buffer(solve_state, goal)
|
||||
return goal_buffer
|
||||
|
||||
def setup_solve_batch_env_goalset(self, goal: Goal, num_seeds: Optional[int] = None) -> Goal:
|
||||
solve_state = ReacherSolveState(
|
||||
ReacherSolveType.BATCH_ENV_GOALSET,
|
||||
num_mpc_seeds=num_seeds,
|
||||
batch_size=goal.batch,
|
||||
n_envs=1,
|
||||
n_goalset=goal.n_goalset,
|
||||
)
|
||||
goal_buffer = self.update_goal_buffer(solve_state, goal)
|
||||
return goal_buffer
|
||||
|
||||
def _solve_from_solve_state(
|
||||
self,
|
||||
solve_state: ReacherSolveState,
|
||||
goal: Goal,
|
||||
shift_steps: int = 1,
|
||||
seed_traj: Optional[JointState] = None,
|
||||
) -> WrapResult:
|
||||
if solve_state.batch_env:
|
||||
if solve_state.batch_size > self.world_coll_checker.n_envs:
|
||||
raise ValueError("Batch Env is less that goal batch")
|
||||
|
||||
goal_buffer = self.update_goal_buffer(solve_state, goal)
|
||||
# NOTE: implement initialization from seed set here:
|
||||
if seed_traj is not None:
|
||||
self.solver.update_init_seed(seed_traj)
|
||||
|
||||
result = self.solver.solve(goal_buffer, seed_traj, shift_steps)
|
||||
result.js_action = self.rollout_fn.get_full_dof_from_solution(result.action)
|
||||
return result
|
||||
|
||||
def fn(self):
|
||||
# this will run one step of optimization and get new command
|
||||
pass
|
||||
|
||||
def update_goal(self, goal: Goal):
|
||||
self.solver.update_params(goal)
|
||||
return True
|
||||
|
||||
def reset(self):
|
||||
# reset warm start
|
||||
self.solver.reset()
|
||||
pass
|
||||
|
||||
def reset_cuda_graph(self):
|
||||
self.solver.reset_cuda_graph()
|
||||
|
||||
@property
|
||||
def rollout_fn(self):
|
||||
return self.solver.safety_rollout
|
||||
|
||||
def enable_cspace_cost(self, enable=True):
|
||||
self.solver.safety_rollout.enable_cspace_cost(enable)
|
||||
for opt in self.solver.optimizers:
|
||||
opt.rollout_fn.enable_cspace_cost(enable)
|
||||
|
||||
def enable_pose_cost(self, enable=True):
|
||||
self.solver.safety_rollout.enable_pose_cost(enable)
|
||||
for opt in self.solver.optimizers:
|
||||
opt.rollout_fn.enable_pose_cost(enable)
|
||||
|
||||
def get_active_js(
|
||||
self,
|
||||
in_js: JointState,
|
||||
):
|
||||
opt_jnames = self.rollout_fn.joint_names
|
||||
opt_js = in_js.get_ordered_joint_state(opt_jnames)
|
||||
return opt_js
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return self.rollout_fn.joint_names
|
||||
|
||||
def update_world(self, world: WorldConfig):
|
||||
self.world_coll_checker.load_collision_model(world)
|
||||
return True
|
||||
|
||||
def get_visual_rollouts(self):
|
||||
return self.solver.optimizers[0].get_rollouts()
|
||||
|
||||
@property
|
||||
def kinematics(self):
|
||||
return self.solver.safety_rollout.dynamics_model.robot_model
|
||||
|
||||
@property
|
||||
def world_collision(self):
|
||||
return self.world_coll_checker
|
||||
1198
src/curobo/wrap/reacher/trajopt.py
Normal file
1198
src/curobo/wrap/reacher/trajopt.py
Normal file
File diff suppressed because it is too large
Load Diff
171
src/curobo/wrap/reacher/types.py
Normal file
171
src/curobo/wrap/reacher/types.py
Normal file
@@ -0,0 +1,171 @@
|
||||
#
|
||||
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#
|
||||
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
|
||||
# property and proprietary rights in and to this material, related
|
||||
# documentation and any modifications thereto. Any use, reproduction,
|
||||
# disclosure or distribution of this material and related documentation
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
# Standard Library
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
# CuRobo
|
||||
from curobo.rollout.rollout_base import Goal
|
||||
from curobo.types.base import TensorDeviceType
|
||||
from curobo.types.math import Pose
|
||||
from curobo.types.robot import JointState
|
||||
|
||||
|
||||
class ReacherSolveType(Enum):
|
||||
# TODO: how to differentiate between goal pose and goal config?
|
||||
SINGLE = 0
|
||||
GOALSET = 1
|
||||
BATCH = 2
|
||||
BATCH_GOALSET = 3
|
||||
BATCH_ENV = 4
|
||||
BATCH_ENV_GOALSET = 5
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReacherSolveState:
|
||||
solve_type: ReacherSolveType
|
||||
batch_size: int
|
||||
n_envs: int
|
||||
n_goalset: int = 1
|
||||
batch_env: bool = False
|
||||
batch_retract: bool = False
|
||||
batch_mode: bool = False
|
||||
num_seeds: Optional[int] = None
|
||||
num_ik_seeds: Optional[int] = None
|
||||
num_graph_seeds: Optional[int] = None
|
||||
num_trajopt_seeds: Optional[int] = None
|
||||
num_mpc_seeds: Optional[int] = None
|
||||
|
||||
def __post_init__(self):
|
||||
if self.n_envs == 1:
|
||||
self.batch_env = False
|
||||
else:
|
||||
self.batch_env = True
|
||||
if self.batch_size > 1:
|
||||
self.batch_mode = True
|
||||
if self.num_seeds is None:
|
||||
self.num_seeds = self.num_ik_seeds
|
||||
if self.num_seeds is None:
|
||||
self.num_seeds = self.num_trajopt_seeds
|
||||
if self.num_seeds is None:
|
||||
self.num_seeds = self.num_graph_seeds
|
||||
if self.num_seeds is None:
|
||||
self.num_seeds = self.num_mpc_seeds
|
||||
|
||||
def get_batch_size(self):
|
||||
return self.num_seeds * self.batch_size
|
||||
|
||||
def get_ik_batch_size(self):
|
||||
return self.num_ik_seeds * self.batch_size
|
||||
|
||||
def create_goal_buffer(
|
||||
self,
|
||||
goal_pose: Pose,
|
||||
goal_state: Optional[JointState] = None,
|
||||
retract_config: Optional[T_BDOF] = None,
|
||||
link_poses: Optional[Dict[str, Pose]] = None,
|
||||
tensor_args: TensorDeviceType = TensorDeviceType(),
|
||||
):
|
||||
# TODO: Refactor to account for num_ik_seeds or num_trajopt_seeds
|
||||
batch_retract = True
|
||||
if retract_config is None or retract_config.shape[0] != goal_pose.batch:
|
||||
batch_retract = False
|
||||
goal_buffer = Goal.create_idx(
|
||||
pose_batch_size=self.batch_size,
|
||||
batch_env=self.batch_env,
|
||||
batch_retract=batch_retract,
|
||||
num_seeds=self.num_seeds,
|
||||
tensor_args=tensor_args,
|
||||
)
|
||||
goal_buffer.goal_pose = goal_pose
|
||||
goal_buffer.retract_state = retract_config
|
||||
goal_buffer.goal_state = goal_state
|
||||
goal_buffer.links_goal_pose = link_poses
|
||||
return goal_buffer
|
||||
|
||||
def update_goal_buffer(
|
||||
self,
|
||||
goal_pose: Pose,
|
||||
goal_state: Optional[JointState] = None,
|
||||
retract_config: Optional[T_BDOF] = None,
|
||||
link_poses: Optional[List[Pose]] = None,
|
||||
current_solve_state: Optional[ReacherSolveState] = None,
|
||||
current_goal_buffer: Optional[Goal] = None,
|
||||
tensor_args: TensorDeviceType = TensorDeviceType(),
|
||||
):
|
||||
solve_state = self
|
||||
# create goal buffer by comparing to existing solve type
|
||||
update_reference = False
|
||||
if (
|
||||
current_solve_state is None
|
||||
or current_goal_buffer is None
|
||||
or current_solve_state != solve_state
|
||||
or (current_goal_buffer.retract_state is None and retract_config is not None)
|
||||
or (current_goal_buffer.goal_state is None and goal_state is not None)
|
||||
or (current_goal_buffer.links_goal_pose is None and link_poses is not None)
|
||||
):
|
||||
current_solve_state = solve_state
|
||||
current_goal_buffer = solve_state.create_goal_buffer(
|
||||
goal_pose, goal_state, retract_config, link_poses, tensor_args
|
||||
)
|
||||
update_reference = True
|
||||
else:
|
||||
current_goal_buffer.goal_pose.copy_(goal_pose)
|
||||
if retract_config is not None:
|
||||
current_goal_buffer.retract_state.copy_(retract_config)
|
||||
if goal_state is not None:
|
||||
current_goal_buffer.goal_state.copy_(goal_state)
|
||||
if link_poses is not None:
|
||||
for k in link_poses.keys():
|
||||
current_goal_buffer.links_goal_pose[k].copy_(link_poses[k])
|
||||
return current_solve_state, current_goal_buffer, update_reference
|
||||
|
||||
def update_goal(
|
||||
self,
|
||||
goal: Goal,
|
||||
current_solve_state: Optional[ReacherSolveState] = None,
|
||||
current_goal_buffer: Optional[Goal] = None,
|
||||
tensor_args: TensorDeviceType = TensorDeviceType(),
|
||||
):
|
||||
solve_state = self
|
||||
|
||||
update_reference = False
|
||||
if (
|
||||
current_solve_state is None
|
||||
or current_goal_buffer is None
|
||||
or current_solve_state != solve_state
|
||||
or (current_goal_buffer.goal_state is None and goal.goal_state is not None)
|
||||
or (current_goal_buffer.goal_state is not None and goal.goal_state is None)
|
||||
):
|
||||
# TODO: Check for change in update idx buffers, currently we assume
|
||||
# that solve_state captures difference in idx buffers
|
||||
current_solve_state = solve_state
|
||||
current_goal_buffer = goal.create_index_buffers(
|
||||
solve_state.batch_size,
|
||||
solve_state.batch_env,
|
||||
solve_state.batch_retract,
|
||||
solve_state.num_seeds,
|
||||
tensor_args,
|
||||
)
|
||||
update_reference = True
|
||||
else:
|
||||
current_goal_buffer.copy_(goal, update_idx_buffers=False)
|
||||
return current_solve_state, current_goal_buffer, update_reference
|
||||
|
||||
|
||||
@dataclass
|
||||
class MotionGenSolverState:
|
||||
solve_type: ReacherSolveType
|
||||
ik_solve_state: ReacherSolveState
|
||||
trajopt_solve_state: ReacherSolveState
|
||||
185
src/curobo/wrap/wrap_base.py
Normal file
185
src/curobo/wrap/wrap_base.py
Normal file
@@ -0,0 +1,185 @@
|
||||
#
|
||||
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#
|
||||
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
|
||||
# property and proprietary rights in and to this material, related
|
||||
# documentation and any modifications thereto. Any use, reproduction,
|
||||
# disclosure or distribution of this material and related documentation
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
# Standard Library
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, List, Optional
|
||||
|
||||
# Third Party
|
||||
import torch
|
||||
import torch.autograd.profiler as profiler
|
||||
|
||||
# CuRobo
|
||||
from curobo.opt.newton.newton_base import NewtonOptBase
|
||||
from curobo.opt.opt_base import Optimizer
|
||||
from curobo.opt.particle.particle_opt_base import ParticleOptBase
|
||||
from curobo.rollout.rollout_base import Goal, RolloutBase, RolloutMetrics
|
||||
from curobo.types.robot import State
|
||||
from curobo.util.logger import log_info
|
||||
|
||||
|
||||
@dataclass
|
||||
class WrapConfig:
|
||||
safety_rollout: RolloutBase
|
||||
optimizers: List[Optimizer]
|
||||
compute_metrics: bool
|
||||
use_cuda_graph_metrics: bool
|
||||
sync_cuda_time: bool
|
||||
|
||||
|
||||
@dataclass
|
||||
class WrapResult:
|
||||
action: State
|
||||
solve_time: float
|
||||
metrics: Optional[RolloutMetrics] = None
|
||||
debug: Any = None
|
||||
js_action: Optional[State] = None
|
||||
|
||||
def clone(self):
|
||||
return WrapResult(
|
||||
self.action.clone(), self.solve_time, self.metrics.clone(), debug=self.debug
|
||||
)
|
||||
|
||||
|
||||
class WrapBase(WrapConfig):
|
||||
def __init__(self, config: Optional[WrapConfig] = None):
|
||||
if config is not None:
|
||||
WrapConfig.__init__(self, **vars(config))
|
||||
self.n_envs = 1
|
||||
self.opt_dt = 0
|
||||
self._rollout_list = None
|
||||
self._opt_rollouts = None
|
||||
self._init_solver = False
|
||||
|
||||
def get_metrics(self, state: State, use_cuda_graph: bool = False) -> RolloutMetrics:
|
||||
if use_cuda_graph:
|
||||
log_info("Using cuda graph")
|
||||
return self.safety_rollout.get_metrics_cuda_graph(state)
|
||||
return self.safety_rollout.get_metrics(state)
|
||||
|
||||
def optimize(self, act_seq: torch.Tensor, shift_steps: int = 0) -> torch.Tensor:
|
||||
for opt in self.optimizers:
|
||||
act_seq = opt.optimize(act_seq, shift_steps)
|
||||
return act_seq
|
||||
|
||||
def get_debug_data(self):
|
||||
debug_list = []
|
||||
for opt in self.optimizers:
|
||||
debug_list.append(opt.debug)
|
||||
return debug_list
|
||||
|
||||
def get_debug_cost(self):
|
||||
debug_list = []
|
||||
for opt in self.optimizers:
|
||||
debug_list.append(opt.debug_cost)
|
||||
return debug_list
|
||||
|
||||
def update_nenvs(self, n_envs):
|
||||
if n_envs != self.n_envs:
|
||||
self.n_envs = n_envs
|
||||
for opt in self.optimizers:
|
||||
opt.update_nenvs(self.n_envs)
|
||||
|
||||
def update_params(self, goal: Goal):
|
||||
with profiler.record_function("wrap_base/safety/update_params"):
|
||||
log_info("Updating safety params")
|
||||
self.safety_rollout.update_params(goal)
|
||||
log_info("Updating optimizer params")
|
||||
for opt in self.optimizers:
|
||||
opt.update_params(goal)
|
||||
|
||||
def get_init_act(self):
|
||||
init_act = self.safety_rollout.get_init_action_seq()
|
||||
return init_act
|
||||
|
||||
def reset(self):
|
||||
for opt in self.optimizers:
|
||||
opt.reset()
|
||||
self.safety_rollout.reset()
|
||||
|
||||
def reset_seed(self):
|
||||
self.safety_rollout.reset_seed()
|
||||
for opt in self.optimizers:
|
||||
opt.reset_seed()
|
||||
|
||||
def reset_cuda_graph(self):
|
||||
self.safety_rollout.reset_cuda_graph()
|
||||
for opt in self.optimizers:
|
||||
opt.reset_cuda_graph()
|
||||
self._init_solver = False
|
||||
|
||||
@property
|
||||
def rollout_fn(self):
|
||||
return self.safety_rollout
|
||||
|
||||
def solve(self, goal: Goal, seed: Optional[torch.Tensor] = None):
|
||||
metrics = None
|
||||
|
||||
filtered_state = self.safety_rollout.filter_robot_state(goal.current_state)
|
||||
goal.current_state = filtered_state
|
||||
self.update_params(goal)
|
||||
if seed is None:
|
||||
seed = self.get_init_act()
|
||||
log_info("getting random seed")
|
||||
else:
|
||||
seed = seed.detach().clone()
|
||||
start_time = time.time()
|
||||
if not self._init_solver:
|
||||
log_info("Solver was not initialized, warming up solver")
|
||||
for _ in range(2):
|
||||
act_seq = self.optimize(seed, shift_steps=0)
|
||||
self._init_solver = True
|
||||
act_seq = self.optimize(seed, shift_steps=0)
|
||||
self.opt_dt = time.time() - start_time
|
||||
|
||||
act = self.safety_rollout.get_robot_command(
|
||||
filtered_state, act_seq, state_idx=goal.batch_current_state_idx
|
||||
)
|
||||
|
||||
if self.compute_metrics:
|
||||
with profiler.record_function("wrap_base/compute_metrics"):
|
||||
metrics = self.get_metrics(
|
||||
act, self.use_cuda_graph_metrics
|
||||
) # TODO: use cuda graph for metrics
|
||||
|
||||
result = WrapResult(
|
||||
action=act,
|
||||
solve_time=self.opt_dt,
|
||||
metrics=metrics,
|
||||
debug={"steps": self.get_debug_data(), "cost": self.get_debug_cost()},
|
||||
)
|
||||
return result
|
||||
|
||||
@property
|
||||
def newton_optimizer(self) -> NewtonOptBase:
|
||||
return self.optimizers[-1]
|
||||
|
||||
@property
|
||||
def particle_optimizer(self) -> ParticleOptBase:
|
||||
return self.optimizers[0]
|
||||
|
||||
@property
|
||||
def joint_names(self):
|
||||
return self.rollout_fn.cspace_config.joint_names
|
||||
|
||||
def _get_rollout_instances_from_optimizers(self) -> List[RolloutBase]:
|
||||
if self._opt_rollouts is None:
|
||||
self._opt_rollouts = []
|
||||
for i in self.optimizers:
|
||||
self._opt_rollouts.extend(i.get_all_rollout_instances())
|
||||
return self._opt_rollouts
|
||||
|
||||
def get_all_rollout_instances(self) -> List[RolloutBase]:
|
||||
if self._rollout_list is None:
|
||||
self._rollout_list = [
|
||||
self.safety_rollout
|
||||
] + self._get_rollout_instances_from_optimizers()
|
||||
return self._rollout_list
|
||||
86
src/curobo/wrap/wrap_mpc.py
Normal file
86
src/curobo/wrap/wrap_mpc.py
Normal file
@@ -0,0 +1,86 @@
|
||||
#
|
||||
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
#
|
||||
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
|
||||
# property and proprietary rights in and to this material, related
|
||||
# documentation and any modifications thereto. Any use, reproduction,
|
||||
# disclosure or distribution of this material and related documentation
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
from __future__ import annotations
|
||||
|
||||
# Standard Library
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
# Third Party
|
||||
import torch
|
||||
import torch.autograd.profiler as profiler
|
||||
|
||||
# CuRobo
|
||||
from curobo.rollout.rollout_base import Goal
|
||||
from curobo.types.robot import State
|
||||
from curobo.wrap.wrap_base import WrapBase, WrapConfig, WrapResult
|
||||
|
||||
|
||||
class WrapMpc(WrapBase):
|
||||
def __init__(self, config: Optional[WrapConfig] = None):
|
||||
self._init_act_seq = None
|
||||
super().__init__(config)
|
||||
|
||||
def update_init_seed(self, seed) -> bool:
|
||||
if self._init_act_seq is None:
|
||||
self._init_act_seq = seed.detach.clone()
|
||||
else:
|
||||
self._init_act_seq.copy_(seed)
|
||||
return True
|
||||
|
||||
def solve(self, goal: Goal, seed: Optional[State] = None, shift_steps=1):
|
||||
if seed is None and self._init_act_seq is None:
|
||||
seed = self.get_init_act()
|
||||
elif self._init_act_seq is not None:
|
||||
seed = self._init_act_seq
|
||||
else:
|
||||
seed = seed.detach().clone()
|
||||
metrics = None
|
||||
|
||||
start_time = time.time()
|
||||
# print("i:",goal.current_state.position)
|
||||
filtered_state = self.safety_rollout.filter_robot_state(goal.current_state)
|
||||
# print("f:", filtered_state.position)
|
||||
goal.current_state.copy_(filtered_state)
|
||||
|
||||
self.update_params(goal)
|
||||
if self.sync_cuda_time:
|
||||
torch.cuda.synchronize()
|
||||
# print("In: ", seed[0,:,0])
|
||||
start_time = time.time()
|
||||
with profiler.record_function("mpc/opt"):
|
||||
act_seq = self.optimize(seed, shift_steps=shift_steps)
|
||||
if self.sync_cuda_time:
|
||||
torch.cuda.synchronize()
|
||||
self.opt_dt = time.time() - start_time
|
||||
with profiler.record_function("mpc/filter"):
|
||||
act = self.safety_rollout.get_robot_command(
|
||||
filtered_state, act_seq, shift_steps=shift_steps
|
||||
)
|
||||
# print("Out: ", act_seq[0,:,0])
|
||||
self._init_act_seq = self._shift(act_seq, shift_steps=shift_steps)
|
||||
if self.compute_metrics:
|
||||
with profiler.record_function("mpc/get_metrics"):
|
||||
metrics = self.get_metrics(act)
|
||||
result = WrapResult(action=act, metrics=metrics, solve_time=self.opt_dt)
|
||||
return result
|
||||
|
||||
def _shift(self, act_seq, shift_steps=1):
|
||||
act_seq = act_seq.roll(-shift_steps, 1)
|
||||
act_seq[:, -shift_steps:, :] = act_seq[:, -shift_steps - 1 : -shift_steps, :].clone()
|
||||
return act_seq
|
||||
|
||||
def reset(self):
|
||||
self._init_act_seq = None
|
||||
return super().reset()
|
||||
|
||||
def get_rollouts(self):
|
||||
return self.particle_optimizer.top_trajs
|
||||
Reference in New Issue
Block a user