Bug Fixes, Add Isaac Sim 4.5 support for examples
This commit is contained in:
@@ -9,7 +9,7 @@
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
|
||||
""" This module contains a function to load robot representation from a yaml or xrdf file. """
|
||||
"""This module contains a function to load robot representation from a yaml or xrdf file."""
|
||||
|
||||
# Standard Library
|
||||
from typing import Optional, Union
|
||||
|
||||
@@ -850,7 +850,12 @@ namespace Curobo
|
||||
(1.333333333f) * g_acc[3] + (-0.083333333f) * g_acc[4]) * dt * dt +
|
||||
|
||||
// ( g_acc[3] + g_acc[1] - (2.0) * g_acc[2]) * dt * dt +
|
||||
(-0.5f * g_jerk[0] + g_jerk[1] - g_jerk[3] + 0.5f * g_jerk[4]) * dt * dt * dt);
|
||||
//(-0.5f * g_jerk[0] + g_jerk[1] - g_jerk[3] + 0.5f * g_jerk[4]) * dt * dt * dt);
|
||||
(0.5f * g_jerk[0] - g_jerk[1] + g_jerk[3] - 0.5f * g_jerk[4]) * dt * dt * dt);
|
||||
|
||||
//(0.500000000000000 * g_jerk[0] - 1 * g_jerk[1] + 0 * g_jerk[2] + 1 * g_jerk[3] - 0.500000000000000 * g_jerk[4]) * dt_inv_3;
|
||||
|
||||
|
||||
}
|
||||
else if (hid == horizon - 3)
|
||||
{
|
||||
|
||||
@@ -10,9 +10,9 @@
|
||||
#
|
||||
|
||||
"""
|
||||
This module contains code for geometric processing such as pointcloud processing, analytic signed
|
||||
distance computation for the environment, and also signed distance computation between robot and
|
||||
the environment. These functions can be used for robot self collision checking and also for robot
|
||||
This module contains code for geometric processing such as pointcloud processing, analytic signed
|
||||
distance computation for the environment, and also signed distance computation between robot and
|
||||
the environment. These functions can be used for robot self collision checking and also for robot
|
||||
environment collision checking. The module also contains neural networks for learned signed distance
|
||||
fields.
|
||||
"""
|
||||
|
||||
@@ -29,7 +29,7 @@ from curobo.geom.sdf.world import (
|
||||
from curobo.geom.types import Mesh, WorldConfig
|
||||
from curobo.types.math import Pose
|
||||
from curobo.util.logger import log_error, log_info, log_warn
|
||||
from curobo.util.warp import init_warp
|
||||
from curobo.util.warp import init_warp, warp_support_bvh_constructor_type
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
@@ -149,7 +149,10 @@ class WorldMeshCollision(WorldPrimitiveCollision):
|
||||
verts, faces = mesh.get_mesh_data()
|
||||
v = wp.array(verts, dtype=wp.vec3, device=self._wp_device)
|
||||
f = wp.array(np.ravel(faces), dtype=int, device=self._wp_device)
|
||||
new_mesh = wp.Mesh(points=v, indices=f)
|
||||
if warp_support_bvh_constructor_type():
|
||||
new_mesh = wp.Mesh(points=v, indices=f, bvh_constructor="sah")
|
||||
else:
|
||||
new_mesh = wp.Mesh(points=v, indices=f)
|
||||
return WarpMeshData(mesh.name, new_mesh.id, v, f, new_mesh)
|
||||
|
||||
def _load_mesh_into_cache(self, mesh: Mesh) -> WarpMeshData:
|
||||
@@ -188,6 +191,7 @@ class WorldMeshCollision(WorldPrimitiveCollision):
|
||||
|
||||
id_list[i] = m_data.m_id
|
||||
name_list.append(m_data.name)
|
||||
|
||||
pose_buffer = Pose.from_batch_list(pose_list, self.tensor_args)
|
||||
inv_pose_buffer = pose_buffer.inverse()
|
||||
return name_list, id_list, inv_pose_buffer.get_pose_vector()
|
||||
|
||||
@@ -20,7 +20,6 @@ import torch
|
||||
import warp as wp
|
||||
|
||||
# CuRobo
|
||||
from curobo.curobolib.kinematics import rotation_matrix_to_quaternion
|
||||
from curobo.util.logger import log_error
|
||||
from curobo.util.torch_utils import get_torch_jit_decorator
|
||||
from curobo.util.warp import init_warp
|
||||
@@ -174,31 +173,6 @@ def matrix_to_quaternion(
|
||||
"""
|
||||
matrix = matrix.view(-1, 3, 3)
|
||||
out_quat = MatrixToQuaternion.apply(matrix, out_quat, adj_matrix)
|
||||
# out_quat = cuda_matrix_to_quaternion(matrix)
|
||||
return out_quat
|
||||
|
||||
|
||||
def cuda_matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:
|
||||
"""Convert rotations given as rotation matrices to quaternions.
|
||||
|
||||
This is not differentiable. Use :func:`~matrix_to_quaternion` for differentiable conversion.
|
||||
Args:
|
||||
matrix: Rotation matrices as tensor of shape (..., 3, 3).
|
||||
|
||||
Returns:
|
||||
quaternions with real part first, as tensor of shape (..., 4). [qw, qx,qy,qz]
|
||||
"""
|
||||
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
|
||||
raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.")
|
||||
|
||||
# account for different batch shapes here:
|
||||
in_shape = matrix.shape
|
||||
mat_in = matrix.view(-1, 3, 3)
|
||||
|
||||
out_quat = torch.zeros((mat_in.shape[0], 4), device=matrix.device, dtype=matrix.dtype)
|
||||
out_quat = rotation_matrix_to_quaternion(matrix, out_quat)
|
||||
out_shape = list(in_shape[:-2]) + [4]
|
||||
out_quat = out_quat.view(out_shape)
|
||||
return out_quat
|
||||
|
||||
|
||||
@@ -521,6 +495,41 @@ def compute_batch_transform_point(
|
||||
out_pt[b_idx * n_pts + p_idx] = p
|
||||
|
||||
|
||||
@wp.kernel
|
||||
def compute_batch_transform_point_fp16(
|
||||
position: wp.array(dtype=wp.vec3h),
|
||||
quat: wp.array(dtype=wp.vec4h),
|
||||
pt: wp.array(dtype=wp.vec3h),
|
||||
n_pts: wp.int32,
|
||||
n_poses: wp.int32,
|
||||
out_pt: wp.array(dtype=wp.vec3h),
|
||||
):
|
||||
"""A warp kernel to transform batch of points by batch of poses."""
|
||||
|
||||
# given n,3 points and b poses, get b,n,3 transformed points
|
||||
# we tile as
|
||||
tid = wp.tid()
|
||||
b_idx = tid / (n_pts)
|
||||
p_idx = tid - (b_idx * n_pts)
|
||||
|
||||
# read data:
|
||||
|
||||
in_position = position[b_idx]
|
||||
in_quat = quat[b_idx]
|
||||
in_pt = pt[b_idx * n_pts + p_idx]
|
||||
|
||||
# read point
|
||||
# create a transform from a vector/quaternion:
|
||||
q_vec = wp.quaternion(in_quat[1], in_quat[2], in_quat[3], in_quat[0])
|
||||
t = wp.transformh(in_position, q_vec)
|
||||
|
||||
# transform a point
|
||||
p = wp.transform_point(t, in_pt)
|
||||
|
||||
# write pt:
|
||||
out_pt[b_idx * n_pts + p_idx] = p
|
||||
|
||||
|
||||
@wp.kernel
|
||||
def compute_batch_pose_multipy(
|
||||
position: wp.array(dtype=wp.vec3),
|
||||
@@ -654,7 +663,8 @@ class TransformPoint(torch.autograd.Function):
|
||||
b,
|
||||
],
|
||||
outputs=[wp.from_torch(out_points.view(-1, 3), dtype=wp.vec3)],
|
||||
stream=wp.stream_from_torch(position.device),
|
||||
stream=None if not position.is_cuda else wp.stream_from_torch(position.device),
|
||||
device=wp.device_from_torch(position.device),
|
||||
)
|
||||
|
||||
return out_points
|
||||
@@ -679,7 +689,9 @@ class TransformPoint(torch.autograd.Function):
|
||||
|
||||
wp_adj_position = wp.from_torch(adj_position, dtype=wp.vec3)
|
||||
wp_adj_quat = wp.from_torch(adj_quaternion, dtype=wp.vec4)
|
||||
|
||||
stream = None
|
||||
if position.is_cuda:
|
||||
stream = wp.stream_from_torch(position.device)
|
||||
wp.launch(
|
||||
kernel=compute_transform_point,
|
||||
dim=ctx.b * ctx.n,
|
||||
@@ -707,8 +719,9 @@ class TransformPoint(torch.autograd.Function):
|
||||
adj_outputs=[
|
||||
None,
|
||||
],
|
||||
stream=wp.stream_from_torch(grad_output.device),
|
||||
adjoint=True,
|
||||
stream=None if not grad_output.is_cuda else wp.stream_from_torch(grad_output.device),
|
||||
device=wp.device_from_torch(grad_output.device),
|
||||
)
|
||||
g_p = g_q = g_pt = None
|
||||
if ctx.needs_input_grad[0]:
|
||||
@@ -742,19 +755,44 @@ class BatchTransformPoint(torch.autograd.Function):
|
||||
)
|
||||
ctx.b = b
|
||||
ctx.n = n
|
||||
wp.launch(
|
||||
kernel=compute_batch_transform_point,
|
||||
dim=b * n,
|
||||
inputs=[
|
||||
wp.from_torch(position.detach().view(-1, 3).contiguous(), dtype=wp.vec3),
|
||||
wp.from_torch(quaternion.detach().view(-1, 4).contiguous(), dtype=wp.vec4),
|
||||
wp.from_torch(points.detach().view(-1, 3).contiguous(), dtype=wp.vec3),
|
||||
n,
|
||||
b,
|
||||
],
|
||||
outputs=[wp.from_torch(out_points.view(-1, 3).contiguous(), dtype=wp.vec3)],
|
||||
stream=wp.stream_from_torch(position.device),
|
||||
)
|
||||
if points.dtype == torch.float32:
|
||||
wp.launch(
|
||||
kernel=compute_batch_transform_point,
|
||||
dim=b * n,
|
||||
inputs=[
|
||||
wp.from_torch(
|
||||
position.detach().view(-1, 3).contiguous(),
|
||||
dtype=wp.types.vector(length=3, dtype=wp.float32),
|
||||
),
|
||||
wp.from_torch(quaternion.detach().view(-1, 4).contiguous(), dtype=wp.vec4),
|
||||
wp.from_torch(points.detach().view(-1, 3).contiguous(), dtype=wp.vec3),
|
||||
n,
|
||||
b,
|
||||
],
|
||||
outputs=[wp.from_torch(out_points.view(-1, 3).contiguous(), dtype=wp.vec3)],
|
||||
stream=None if not position.is_cuda else wp.stream_from_torch(position.device),
|
||||
device=wp.device_from_torch(position.device),
|
||||
)
|
||||
elif points.dtype == torch.float16:
|
||||
wp.launch(
|
||||
kernel=compute_batch_transform_point_fp16,
|
||||
dim=b * n,
|
||||
inputs=[
|
||||
wp.from_torch(
|
||||
position.detach().view(-1, 3).contiguous(),
|
||||
dtype=wp.types.vector(length=3, dtype=wp.float16),
|
||||
),
|
||||
wp.from_torch(quaternion.detach().view(-1, 4).contiguous(), dtype=wp.vec4h),
|
||||
wp.from_torch(points.detach().view(-1, 3).contiguous(), dtype=wp.vec3h),
|
||||
n,
|
||||
b,
|
||||
],
|
||||
outputs=[wp.from_torch(out_points.view(-1, 3).contiguous(), dtype=wp.vec3h)],
|
||||
stream=None if not position.is_cuda else wp.stream_from_torch(position.device),
|
||||
device=wp.device_from_torch(position.device),
|
||||
)
|
||||
else:
|
||||
log_error("Unsupported dtype: " + str(points.dtype))
|
||||
|
||||
return out_points
|
||||
|
||||
@@ -804,8 +842,9 @@ class BatchTransformPoint(torch.autograd.Function):
|
||||
adj_outputs=[
|
||||
None,
|
||||
],
|
||||
stream=wp.stream_from_torch(grad_output.device),
|
||||
adjoint=True,
|
||||
stream=None if not grad_output.is_cuda else wp.stream_from_torch(grad_output.device),
|
||||
device=wp.device_from_torch(grad_output.device),
|
||||
)
|
||||
g_p = g_q = g_pt = None
|
||||
if ctx.needs_input_grad[0]:
|
||||
@@ -876,7 +915,8 @@ class BatchTransformPose(torch.autograd.Function):
|
||||
wp.from_torch(out_position.detach().view(-1, 3).contiguous(), dtype=wp.vec3),
|
||||
wp.from_torch(out_quaternion.detach().view(-1, 4).contiguous(), dtype=wp.vec4),
|
||||
],
|
||||
stream=wp.stream_from_torch(position.device),
|
||||
stream=None if not position.is_cuda else wp.stream_from_torch(position.device),
|
||||
device=wp.device_from_torch(position.device),
|
||||
)
|
||||
|
||||
return out_position, out_quaternion
|
||||
@@ -949,8 +989,13 @@ class BatchTransformPose(torch.autograd.Function):
|
||||
None,
|
||||
None,
|
||||
],
|
||||
stream=wp.stream_from_torch(grad_out_position.device),
|
||||
adjoint=True,
|
||||
stream=(
|
||||
None
|
||||
if not grad_out_position.is_cuda
|
||||
else wp.stream_from_torch(grad_out_position.device)
|
||||
),
|
||||
device=wp.device_from_torch(grad_out_position.device),
|
||||
)
|
||||
g_p1 = g_q1 = g_p2 = g_q2 = None
|
||||
if ctx.needs_input_grad[0]:
|
||||
@@ -1010,7 +1055,7 @@ class TransformPose(torch.autograd.Function):
|
||||
)
|
||||
ctx.b = b
|
||||
wp.launch(
|
||||
kernel=compute_batch_pose_multipy,
|
||||
kernel=compute_pose_multipy,
|
||||
dim=b,
|
||||
inputs=[
|
||||
wp.from_torch(position.detach().view(-1, 3).contiguous(), dtype=wp.vec3),
|
||||
@@ -1022,7 +1067,8 @@ class TransformPose(torch.autograd.Function):
|
||||
wp.from_torch(out_position.detach().view(-1, 3).contiguous(), dtype=wp.vec3),
|
||||
wp.from_torch(out_quaternion.detach().view(-1, 4).contiguous(), dtype=wp.vec4),
|
||||
],
|
||||
stream=wp.stream_from_torch(position.device),
|
||||
stream=(None if not position.is_cuda else wp.stream_from_torch(position.device)),
|
||||
device=wp.device_from_torch(position.device),
|
||||
)
|
||||
|
||||
return out_position, out_quaternion
|
||||
@@ -1095,7 +1141,12 @@ class TransformPose(torch.autograd.Function):
|
||||
None,
|
||||
None,
|
||||
],
|
||||
stream=wp.stream_from_torch(grad_out_position.device),
|
||||
stream=(
|
||||
None
|
||||
if not grad_out_position.is_cuda
|
||||
else wp.stream_from_torch(grad_out_position.device)
|
||||
),
|
||||
device=wp.device_from_torch(grad_out_position.device),
|
||||
adjoint=True,
|
||||
)
|
||||
g_p1 = g_q1 = g_p2 = g_q2 = None
|
||||
@@ -1155,7 +1206,8 @@ class PoseInverse(torch.autograd.Function):
|
||||
wp.from_torch(out_position.detach().view(-1, 3).contiguous(), dtype=wp.vec3),
|
||||
wp.from_torch(out_quaternion.detach().view(-1, 4).contiguous(), dtype=wp.vec4),
|
||||
],
|
||||
stream=wp.stream_from_torch(position.device),
|
||||
stream=(None if not position.is_cuda else wp.stream_from_torch(position.device)),
|
||||
device=wp.device_from_torch(position.device),
|
||||
)
|
||||
|
||||
return out_position, out_quaternion
|
||||
@@ -1212,8 +1264,11 @@ class PoseInverse(torch.autograd.Function):
|
||||
None,
|
||||
None,
|
||||
],
|
||||
stream=wp.stream_from_torch(grad_out_position.device),
|
||||
adjoint=True,
|
||||
stream=(
|
||||
None if not out_position.is_cuda else wp.stream_from_torch(out_position.device)
|
||||
),
|
||||
device=wp.device_from_torch(out_position.device),
|
||||
)
|
||||
g_p1 = g_q1 = None
|
||||
if ctx.needs_input_grad[0]:
|
||||
@@ -1260,7 +1315,8 @@ class QuatToMatrix(torch.autograd.Function):
|
||||
outputs=[
|
||||
wp.from_torch(out_mat.detach().view(-1, 3, 3).contiguous(), dtype=wp.mat33),
|
||||
],
|
||||
stream=wp.stream_from_torch(quaternion.device),
|
||||
stream=(None if not quaternion.is_cuda else wp.stream_from_torch(quaternion.device)),
|
||||
device=wp.device_from_torch(quaternion.device),
|
||||
)
|
||||
|
||||
return out_mat
|
||||
@@ -1299,8 +1355,11 @@ class QuatToMatrix(torch.autograd.Function):
|
||||
adj_outputs=[
|
||||
None,
|
||||
],
|
||||
stream=wp.stream_from_torch(grad_out_mat.device),
|
||||
adjoint=True,
|
||||
stream=(
|
||||
None if not grad_out_mat.is_cuda else wp.stream_from_torch(grad_out_mat.device)
|
||||
),
|
||||
device=wp.device_from_torch(grad_out_mat.device),
|
||||
)
|
||||
g_q1 = None
|
||||
if ctx.needs_input_grad[0]:
|
||||
@@ -1345,7 +1404,8 @@ class MatrixToQuaternion(torch.autograd.Function):
|
||||
outputs=[
|
||||
wp.from_torch(out_quaternion.detach().view(-1, 4).contiguous(), dtype=wp.vec4),
|
||||
],
|
||||
stream=wp.stream_from_torch(in_mat.device),
|
||||
stream=(None if not in_mat.is_cuda else wp.stream_from_torch(in_mat.device)),
|
||||
device=wp.device_from_torch(in_mat.device),
|
||||
)
|
||||
|
||||
return out_quaternion
|
||||
@@ -1382,8 +1442,9 @@ class MatrixToQuaternion(torch.autograd.Function):
|
||||
adj_outputs=[
|
||||
None,
|
||||
],
|
||||
stream=wp.stream_from_torch(grad_out_q.device),
|
||||
adjoint=True,
|
||||
stream=(None if not in_mat.is_cuda else wp.stream_from_torch(in_mat.device)),
|
||||
device=wp.device_from_torch(in_mat.device),
|
||||
)
|
||||
g_q1 = None
|
||||
if ctx.needs_input_grad[0]:
|
||||
|
||||
@@ -9,12 +9,12 @@
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
|
||||
""" Optimization module containing several numerical solvers.
|
||||
"""Optimization module containing several numerical solvers.
|
||||
|
||||
Base for an opimization solver is at :class:`opt_base.Optimizer`. cuRobo provides two base classes
|
||||
for implementing two popular ways to optimize, (1) using particles
|
||||
Base for an opimization solver is at :class:`opt_base.Optimizer`. cuRobo provides two base classes
|
||||
for implementing two popular ways to optimize, (1) using particles
|
||||
with :class:`particle.particle_opt_base.ParticleOptBase` and (2) using Newton/Quasi-Newton solvers
|
||||
with :class:`newton.newton_base.NewtonOptBase`. :class:`newton.newton_base.NewtonOptBase` contains
|
||||
with :class:`newton.newton_base.NewtonOptBase`. :class:`newton.newton_base.NewtonOptBase` contains
|
||||
implementations of several line search schemes. Note that these line search schemes are approximate
|
||||
as cuRobo tries different line search magnitudes in parallel and chooses the largest that satisfies
|
||||
line search conditions.
|
||||
|
||||
@@ -117,6 +117,7 @@ class LBFGSOpt(NewtonOptBase, LBFGSOptConfig):
|
||||
if self.history > self.d_opt:
|
||||
log_info("LBFGS: history >= d_opt, reducing history to d_opt-1")
|
||||
self.history = self.d_opt - 1
|
||||
self.init_hessian(self.n_problems)
|
||||
|
||||
@profiler.record_function("lbfgs/reset")
|
||||
def reset(self):
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
""" Base module for Optimization. """
|
||||
"""Base module for Optimization."""
|
||||
from __future__ import annotations
|
||||
|
||||
# Standard Library
|
||||
|
||||
@@ -642,7 +642,7 @@ def jit_diag_a_cov_update(w, actions, mean_action):
|
||||
# weighted_delta =
|
||||
# sum across horizon and mean across particles:
|
||||
# cov_update = torch.diag(torch.mean(torch.sum(weighted_delta.T , dim=0), dim=0))
|
||||
cov_update = torch.mean(torch.sum(weighted_delta, dim=-2), dim=-2).unsqueeze(-2)
|
||||
cov_update = torch.mean(torch.sum(weighted_delta, dim=-3), dim=-2).unsqueeze(-2)
|
||||
return cov_update
|
||||
|
||||
|
||||
|
||||
@@ -9,5 +9,4 @@
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
|
||||
"""
|
||||
"""
|
||||
""" """
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
|
||||
""" This module contains aliases for structured Tensors, improving readability."""
|
||||
"""This module contains aliases for structured Tensors, improving readability."""
|
||||
|
||||
# Third Party
|
||||
import torch
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
#
|
||||
"""
|
||||
This module provides logging API, wrapping :py:class:`logging.Logger`. These functions are used
|
||||
to log messages in the cuRobo package. The functions can also be used in other packages by
|
||||
to log messages in the cuRobo package. The functions can also be used in other packages by
|
||||
creating a new logger (:py:meth:`setup_logger`) with the desired name.
|
||||
"""
|
||||
# Standard Library
|
||||
|
||||
@@ -67,6 +67,21 @@ def warp_support_kernel_key(wp_module=None):
|
||||
return True
|
||||
|
||||
|
||||
def warp_support_bvh_constructor_type(wp_module=None):
|
||||
if wp_module is None:
|
||||
wp_module = wp
|
||||
wp_version = wp_module.config.version
|
||||
|
||||
if version.parse(wp_version) < version.parse("1.6.0"):
|
||||
log_info(
|
||||
"Warp version is "
|
||||
+ wp_version
|
||||
+ " < 1.6.0, using, creating global constant to trigger kernel generation."
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def is_runtime_warp_kernel_enabled() -> bool:
|
||||
env_variable = os.environ.get("CUROBO_WARP_RUNTIME_KERNEL_DISABLE")
|
||||
if env_variable is None:
|
||||
|
||||
@@ -8,4 +8,4 @@
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
""" Module contains high-level APIs for robotics applications. """
|
||||
"""Module contains high-level APIs for robotics applications."""
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
"""This module has differentiable layers built from CuRobo's core features for use in Pytorch. """
|
||||
"""This module has differentiable layers built from CuRobo's core features for use in Pytorch."""
|
||||
|
||||
# Standard Library
|
||||
from dataclasses import dataclass
|
||||
|
||||
@@ -25,7 +25,6 @@ from curobo.types.robot import RobotConfig
|
||||
from curobo.types.state import JointState
|
||||
from curobo.util.logger import log_error
|
||||
from curobo.util.torch_utils import (
|
||||
get_torch_compile_options,
|
||||
get_torch_jit_decorator,
|
||||
is_torch_compile_available,
|
||||
)
|
||||
@@ -63,6 +62,8 @@ class RobotSegmenter:
|
||||
distance_threshold: float = 0.05,
|
||||
use_cuda_graph: bool = True,
|
||||
tensor_args: TensorDeviceType = TensorDeviceType(),
|
||||
ops_dtype: torch.dtype = torch.float32,
|
||||
depth_to_meter: float = 0.001,
|
||||
):
|
||||
robot_dict = load_yaml(join_path(get_robot_configs_path(), robot_file))["robot_cfg"]
|
||||
if collision_sphere_buffer is not None:
|
||||
@@ -79,7 +80,11 @@ class RobotSegmenter:
|
||||
robot_world = RobotWorld(config)
|
||||
|
||||
return RobotSegmenter(
|
||||
robot_world, distance_threshold=distance_threshold, use_cuda_graph=use_cuda_graph
|
||||
robot_world,
|
||||
distance_threshold=distance_threshold,
|
||||
use_cuda_graph=use_cuda_graph,
|
||||
ops_dtype=ops_dtype,
|
||||
depth_to_meter=depth_to_meter,
|
||||
)
|
||||
|
||||
def get_pointcloud_from_depth(self, camera_obs: CameraObservation):
|
||||
@@ -171,18 +176,23 @@ class RobotSegmenter:
|
||||
robot_spheres = self._robot_world.get_kinematics(q).link_spheres_tensor
|
||||
|
||||
points = self.get_pointcloud_from_depth(camera_obs)
|
||||
camera_to_robot = camera_obs.pose
|
||||
points = points.to(dtype=torch.float32)
|
||||
camera_to_robot = camera_obs.pose.to(TensorDeviceType(dtype=self._ops_dtype))
|
||||
|
||||
if self._out_points_buffer is None:
|
||||
self._out_points_buffer = points.clone()
|
||||
if self._out_gpt is None:
|
||||
self._out_gpt = torch.zeros((points.shape[0], points.shape[1], 3), device=points.device)
|
||||
self._out_gpt = torch.zeros(
|
||||
(points.shape[0], points.shape[1], 3), device=points.device, dtype=self._ops_dtype
|
||||
)
|
||||
if self._out_gp is None:
|
||||
self._out_gp = torch.zeros((camera_to_robot.position.shape[0], 3), device=points.device)
|
||||
self._out_gp = torch.zeros(
|
||||
(camera_to_robot.position.shape[0], 3), device=points.device, dtype=self._ops_dtype
|
||||
)
|
||||
if self._out_gq is None:
|
||||
self._out_gq = torch.zeros(
|
||||
(camera_to_robot.quaternion.shape[0], 4), device=points.device
|
||||
(camera_to_robot.quaternion.shape[0], 4),
|
||||
device=points.device,
|
||||
dtype=self._ops_dtype,
|
||||
)
|
||||
|
||||
points_in_robot_frame = camera_to_robot.batch_transform_points(
|
||||
@@ -193,11 +203,21 @@ class RobotSegmenter:
|
||||
gpt_out=self._out_gpt,
|
||||
)
|
||||
|
||||
out_points = points_in_robot_frame
|
||||
|
||||
mask, filtered_image = mask_spheres_image(
|
||||
camera_obs.depth_image, robot_spheres, out_points, self.distance_threshold
|
||||
)
|
||||
robot_spheres = robot_spheres.to(dtype=self._ops_dtype)
|
||||
if is_torch_compile_available():
|
||||
mask, filtered_image = mask_spheres_image(
|
||||
camera_obs.depth_image,
|
||||
robot_spheres,
|
||||
points_in_robot_frame,
|
||||
self.distance_threshold,
|
||||
)
|
||||
else:
|
||||
mask, filtered_image = mask_spheres_image_cdist(
|
||||
camera_obs.depth_image,
|
||||
robot_spheres,
|
||||
points_in_robot_frame,
|
||||
self.distance_threshold,
|
||||
)
|
||||
|
||||
return mask, filtered_image
|
||||
|
||||
@@ -246,8 +266,11 @@ def mask_spheres_image(
|
||||
|
||||
robot_radius = robot_spheres[..., 3]
|
||||
points = points.unsqueeze(-2)
|
||||
|
||||
robot_points = robot_spheres[..., :3]
|
||||
|
||||
sph_distance = -1 * (
|
||||
torch.linalg.norm(points - robot_spheres[..., :3], dim=-1) - robot_radius
|
||||
torch.linalg.norm(points - robot_points, dim=-1) - robot_radius
|
||||
) # b, n_spheres
|
||||
distance = torch.max(sph_distance, dim=-1)[0]
|
||||
|
||||
@@ -259,3 +282,39 @@ def mask_spheres_image(
|
||||
mask = torch.logical_and((image > 0.0), (distance > -distance_threshold))
|
||||
filtered_image = torch.where(mask, 0, image)
|
||||
return mask, filtered_image
|
||||
|
||||
|
||||
@get_torch_jit_decorator()
|
||||
def mask_spheres_image_cdist(
|
||||
image: torch.Tensor,
|
||||
link_spheres_tensor: torch.Tensor,
|
||||
world_points: torch.Tensor,
|
||||
distance_threshold: float,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
|
||||
if link_spheres_tensor.shape[0] != 1:
|
||||
assert link_spheres_tensor.shape[0] == world_points.shape[0]
|
||||
if len(world_points.shape) == 2:
|
||||
world_points = world_points.unsqueeze(0)
|
||||
|
||||
robot_spheres = link_spheres_tensor.view(link_spheres_tensor.shape[0], -1, 4).contiguous()
|
||||
|
||||
robot_spheres = robot_spheres.expand(world_points.shape[0], -1, -1)
|
||||
robot_points = robot_spheres[..., :3]
|
||||
|
||||
robot_radius = robot_spheres[..., 3].unsqueeze(1)
|
||||
|
||||
sph_distance = torch.cdist(world_points, robot_points, p=2.0)
|
||||
sph_distance = -1.0 * (sph_distance - robot_radius)
|
||||
|
||||
distance = torch.max(sph_distance, dim=-1)[0]
|
||||
|
||||
distance = distance.view(
|
||||
image.shape[0],
|
||||
image.shape[1],
|
||||
image.shape[2],
|
||||
)
|
||||
distance_mask = distance > -distance_threshold
|
||||
mask = torch.logical_and((image > 0.0), distance_mask)
|
||||
filtered_image = torch.where(mask, 0, image)
|
||||
return mask, filtered_image
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
"""This module has differentiable layers built from CuRobo's core features for use in Pytorch. """
|
||||
"""This module has differentiable layers built from CuRobo's core features for use in Pytorch."""
|
||||
|
||||
# Standard Library
|
||||
from dataclasses import dataclass
|
||||
|
||||
@@ -8,4 +8,4 @@
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
""" Module contains commonly used high-level APIs for motion generation. """
|
||||
"""Module contains commonly used high-level APIs for motion generation."""
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
""" This modules contains heuristics for scoring trajectories. """
|
||||
"""This modules contains heuristics for scoring trajectories."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
# without an express license agreement from NVIDIA CORPORATION or
|
||||
# its affiliates is strictly prohibited.
|
||||
#
|
||||
""" Module contains custom types and dataclasses used across reacher solvers. """
|
||||
"""Module contains custom types and dataclasses used across reacher solvers."""
|
||||
from __future__ import annotations
|
||||
|
||||
# Standard Library
|
||||
|
||||
Reference in New Issue
Block a user