Add re-timing, minimum dt robustness

This commit is contained in:
Balakumar Sundaralingam
2024-04-25 12:24:17 -07:00
parent d6e600c88c
commit 7362ccd4c2
54 changed files with 4773 additions and 2189 deletions

View File

@@ -43,16 +43,21 @@ def test_linear_interpolation():
# create max_velocity buffer:
out_traj_gpu, _, _ = get_batch_interpolated_trajectory(
in_traj, int_dt, max_vel, max_acc=max_acc, max_jerk=max_jerk, raw_dt=raw_dt
in_traj,
raw_dt,
int_dt,
max_vel,
max_acc=max_acc,
max_jerk=max_jerk,
)
#
out_traj_gpu = out_traj_gpu.clone()
out_traj_cpu, _, _ = get_batch_interpolated_trajectory(
in_traj,
raw_dt,
int_dt,
max_vel,
raw_dt=raw_dt,
kind=InterpolateType.LINEAR,
max_acc=max_acc,
max_jerk=max_jerk,

View File

@@ -0,0 +1,330 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
# Third Party
import pytest
import torch
# CuRobo
from curobo.geom.types import WorldConfig
from curobo.types.base import TensorDeviceType
from curobo.types.math import Pose
from curobo.types.robot import JointState, RobotConfig
from curobo.util.trajectory import InterpolateType
from curobo.util_file import get_robot_configs_path, get_world_configs_path, join_path, load_yaml
from curobo.wrap.reacher.motion_gen import MotionGen, MotionGenConfig, MotionGenPlanConfig
@pytest.fixture(scope="function")
def motion_gen():
tensor_args = TensorDeviceType()
world_file = "collision_table.yml"
robot_file = "franka.yml"
motion_gen_config = MotionGenConfig.load_from_robot_config(
robot_file,
world_file,
tensor_args,
use_cuda_graph=False,
)
motion_gen_instance = MotionGen(motion_gen_config)
return motion_gen_instance
@pytest.fixture(scope="function")
def motion_gen_batch_env():
tensor_args = TensorDeviceType()
world_files = ["collision_table.yml", "collision_test.yml"]
world_cfg = [
WorldConfig.from_dict(load_yaml(join_path(get_world_configs_path(), world_file)))
for world_file in world_files
]
robot_file = "franka.yml"
motion_gen_config = MotionGenConfig.load_from_robot_config(
robot_file,
world_cfg,
tensor_args,
use_cuda_graph=False,
)
motion_gen_instance = MotionGen(motion_gen_config)
return motion_gen_instance
@pytest.mark.parametrize(
"motion_gen_str,interpolation",
[
("motion_gen", InterpolateType.LINEAR),
("motion_gen", InterpolateType.CUBIC),
# ("motion_gen", InterpolateType.KUNZ_STILMAN_OPTIMAL),
("motion_gen", InterpolateType.LINEAR_CUDA),
],
)
def test_motion_gen_single(motion_gen_str, interpolation, request):
motion_gen = request.getfixturevalue(motion_gen_str)
motion_gen.update_interpolation_type(interpolation)
motion_gen.warmup()
retract_cfg = motion_gen.get_retract_config()
state = motion_gen.compute_kinematics(JointState.from_position(retract_cfg.view(1, -1)))
goal_pose = Pose(state.ee_pos_seq, quaternion=state.ee_quat_seq)
start_state = JointState.from_position(retract_cfg.view(1, -1) + 0.3)
m_config = MotionGenPlanConfig(False, True)
result = motion_gen.plan_single(start_state, goal_pose, m_config)
# get final solutions:
assert torch.count_nonzero(result.success) == 1
reached_state = motion_gen.compute_kinematics(result.optimized_plan[-1])
assert torch.norm(goal_pose.position - reached_state.ee_pos_seq) < 0.005
def test_motion_gen_goalset(motion_gen):
motion_gen.warmup(n_goalset=2)
retract_cfg = motion_gen.get_retract_config()
state = motion_gen.compute_kinematics(JointState.from_position(retract_cfg.view(1, -1)))
goal_pose = Pose(
state.ee_pos_seq.repeat(2, 1).view(1, -1, 3),
quaternion=state.ee_quat_seq.repeat(2, 1).view(1, -1, 4),
)
goal_pose.position[0, 0, 0] -= 0.1
start_state = JointState.from_position(retract_cfg.view(1, -1) + 0.3)
m_config = MotionGenPlanConfig(False, True)
result = motion_gen.plan_goalset(start_state, goal_pose, m_config)
# get final solutions:
assert torch.count_nonzero(result.success) == 1
reached_state = motion_gen.compute_kinematics(result.optimized_plan[-1])
assert (
torch.min(
torch.norm(goal_pose.position[:, 0, :] - reached_state.ee_pos_seq),
torch.norm(goal_pose.position[:, 1, :] - reached_state.ee_pos_seq),
)
< 0.005
)
assert result.goalset_index is not None
assert (
torch.norm(goal_pose.position[:, result.goalset_index, :] - reached_state.ee_pos_seq)
< 0.005
)
def test_motion_gen_batch_goalset(motion_gen):
motion_gen.warmup(n_goalset=3, batch=3, warmup_js_trajopt=False, enable_graph=False)
retract_cfg = motion_gen.get_retract_config()
state = motion_gen.compute_kinematics(JointState.from_position(retract_cfg.view(1, -1)))
goal_pose = Pose(
state.ee_pos_seq.repeat(6, 1).view(3, -1, 3).clone(),
quaternion=state.ee_quat_seq.repeat(6, 1).view(3, -1, 4).clone(),
)
goal_pose.position[0, 1, 1] = 0.2
goal_pose.position[1, 0, 1] = 0.2
goal_pose.position[2, 1, 1] = 0.2
start_state = JointState.from_position(retract_cfg.view(1, -1) + 0.2).repeat_seeds(3)
m_config = MotionGenPlanConfig(False, True, max_attempts=1, enable_graph_attempt=None)
result = motion_gen.plan_batch_goalset(start_state, goal_pose, m_config)
# get final solutions:
assert torch.count_nonzero(result.success) == result.success.shape[0]
reached_state = motion_gen.compute_kinematics(result.optimized_plan.trim_trajectory(-1))
#
goal_position = torch.cat(
[
goal_pose.position[x, result.goalset_index[x], :].unsqueeze(0)
for x in range(len(result.goalset_index))
]
)
assert result.goalset_index is not None
assert torch.max(torch.norm(goal_position - reached_state.ee_pos_seq, dim=-1)) < 0.005
def test_motion_gen_batch(motion_gen):
motion_gen.warmup(batch=2)
retract_cfg = motion_gen.get_retract_config()
state = motion_gen.compute_kinematics(JointState.from_position(retract_cfg.view(1, -1)))
goal_pose = Pose(
state.ee_pos_seq.squeeze(), quaternion=state.ee_quat_seq.squeeze()
).repeat_seeds(2)
start_state = JointState.from_position(retract_cfg.view(1, -1) + 0.3).repeat_seeds(2)
goal_pose.position[1, 0] -= 0.1
m_config = MotionGenPlanConfig(False, True)
result = motion_gen.plan_batch(start_state, goal_pose.clone(), m_config)
assert torch.count_nonzero(result.success) == 2
# get final solutions:
q = result.optimized_plan.trim_trajectory(-1).squeeze(1)
reached_state = motion_gen.compute_kinematics(q)
assert torch.norm(goal_pose.position - reached_state.ee_pos_seq) < 0.005
@pytest.mark.parametrize(
"motion_gen_str,interpolation",
[
("motion_gen", InterpolateType.LINEAR),
("motion_gen", InterpolateType.CUBIC),
# ("motion_gen", InterpolateType.KUNZ_STILMAN_OPTIMAL),
("motion_gen", InterpolateType.LINEAR_CUDA),
],
)
def test_motion_gen_batch_graph(motion_gen_str: str, interpolation: InterpolateType, request):
motion_gen = request.getfixturevalue(motion_gen_str)
motion_gen.graph_planner.interpolation_type = interpolation
motion_gen.reset()
retract_cfg = motion_gen.get_retract_config()
state = motion_gen.compute_kinematics(JointState.from_position(retract_cfg.view(1, -1)))
goal_pose = Pose(
state.ee_pos_seq.squeeze(), quaternion=state.ee_quat_seq.squeeze()
).repeat_seeds(5)
start_state = JointState.from_position(retract_cfg.view(1, -1) + 0.3).repeat_seeds(5)
goal_pose.position[1, 0] -= 0.05
m_config = MotionGenPlanConfig(True, False)
result = motion_gen.plan_batch(start_state, goal_pose, m_config)
assert torch.count_nonzero(result.success) > 0
# get final solutions:
q = result.interpolated_plan.trim_trajectory(-1).squeeze(1)
reached_state = motion_gen.compute_kinematics(q)
assert torch.norm(goal_pose.position - reached_state.ee_pos_seq) < 0.005
def test_motion_gen_batch_env(motion_gen_batch_env):
motion_gen_batch_env.warmup(batch=2, batch_env_mode=True, enable_graph=False)
# motion_gen_batch_env.reset()
retract_cfg = motion_gen_batch_env.get_retract_config()
state = motion_gen_batch_env.compute_kinematics(
JointState.from_position(retract_cfg.view(1, -1))
)
goal_pose = Pose(
state.ee_pos_seq.squeeze(), quaternion=state.ee_quat_seq.squeeze()
).repeat_seeds(2)
start_state = JointState.from_position(retract_cfg.view(1, -1) + 0.3).repeat_seeds(2)
goal_pose.position[1, 0] -= 0.1
m_config = MotionGenPlanConfig(False, True, max_attempts=1)
result = motion_gen_batch_env.plan_batch_env(start_state, goal_pose, m_config)
assert torch.count_nonzero(result.success) == 2
# get final solutions:
reached_state = motion_gen_batch_env.compute_kinematics(
result.optimized_plan.trim_trajectory(-1).squeeze(1)
)
assert torch.norm(goal_pose.position - reached_state.ee_pos_seq) < 0.005
def test_motion_gen_batch_env_goalset(motion_gen_batch_env):
motion_gen_batch_env.warmup(batch=2, batch_env_mode=True, n_goalset=2, enable_graph=False)
retract_cfg = motion_gen_batch_env.get_retract_config()
state = motion_gen_batch_env.compute_kinematics(
JointState.from_position(retract_cfg.view(1, -1))
)
goal_pose = Pose(
state.ee_pos_seq.repeat(4, 1).view(2, -1, 3),
quaternion=state.ee_quat_seq.repeat(4, 1).view(2, -1, 4),
)
start_state = JointState.from_position(retract_cfg.view(1, -1) + 0.3).repeat_seeds(2)
goal_pose.position[1, 0] -= 0.2
m_config = MotionGenPlanConfig(False, True, enable_graph_attempt=None)
result = motion_gen_batch_env.plan_batch_env_goalset(start_state, goal_pose, m_config)
assert torch.count_nonzero(result.success) > 0
# get final solutions:
reached_state = motion_gen_batch_env.compute_kinematics(
result.optimized_plan.trim_trajectory(-1).squeeze(1)
)
assert (
torch.min(
torch.norm(goal_pose.position[:, 0, :] - reached_state.ee_pos_seq),
torch.norm(goal_pose.position[:, 1, :] - reached_state.ee_pos_seq),
)
< 0.005
)
goal_position = torch.cat(
[
goal_pose.position[x, result.goalset_index[x], :].unsqueeze(0)
for x in range(len(result.goalset_index))
]
)
assert result.goalset_index is not None
assert torch.max(torch.norm(goal_position - reached_state.ee_pos_seq, dim=-1)) < 0.005
@pytest.mark.parametrize(
"motion_gen_str,enable_graph",
[
("motion_gen", True),
("motion_gen", False),
],
)
def test_motion_gen_single_js(motion_gen_str, enable_graph, request):
motion_gen = request.getfixturevalue(motion_gen_str)
motion_gen.warmup(warmup_js_trajopt=True)
retract_cfg = motion_gen.get_retract_config()
start_state = JointState.from_position(retract_cfg.view(1, -1) + 0.3)
m_config = MotionGenPlanConfig(enable_graph=enable_graph, max_attempts=2)
goal_state = start_state.clone()
goal_state.position -= 0.3
result = motion_gen.plan_single_js(start_state, goal_state, m_config)
assert torch.count_nonzero(result.success) == 1
reached_state = result.optimized_plan[-1]
assert torch.norm(goal_state.position - reached_state.position) < 0.05

View File

@@ -40,6 +40,24 @@ def motion_gen(request):
return motion_gen_instance
@pytest.fixture(scope="module")
def motion_gen_ur5e():
tensor_args = TensorDeviceType()
world_file = "collision_table.yml"
robot_file = "ur5e.yml"
motion_gen_config = MotionGenConfig.load_from_robot_config(
robot_file,
world_file,
tensor_args,
interpolation_steps=10000,
interpolation_dt=0.05,
)
motion_gen_instance = MotionGen(motion_gen_config)
motion_gen_instance.warmup(warmup_js_trajopt=False, enable_graph=False)
return motion_gen_instance
@pytest.mark.parametrize(
"motion_gen",
[
@@ -66,3 +84,126 @@ def test_motion_gen_velocity_scale(motion_gen):
result = motion_gen.plan_single(start_state, goal_pose, m_config)
assert torch.count_nonzero(result.success) == 1
@pytest.mark.parametrize(
"velocity_scale, acceleration_scale",
[
(1.0, 1.0),
(0.75, 1.0),
(0.5, 1.0),
(0.25, 1.0),
(0.15, 1.0),
(0.1, 1.0),
(1.0, 0.1),
(0.75, 0.1),
(0.5, 0.1),
(0.25, 0.1),
(0.15, 0.1),
(0.1, 0.1),
],
)
def test_pose_sequence_speed_ur5e_scale(velocity_scale, acceleration_scale):
# load ur5e motion gen:
world_file = "collision_table.yml"
robot_file = "ur5e.yml"
motion_gen_config = MotionGenConfig.load_from_robot_config(
robot_file,
world_file,
interpolation_dt=(1.0 / 5.0),
velocity_scale=velocity_scale,
acceleration_scale=acceleration_scale,
)
motion_gen = MotionGen(motion_gen_config)
motion_gen.warmup(warmup_js_trajopt=False, enable_graph=False)
retract_cfg = motion_gen.get_retract_config()
start_state = JointState.from_position(retract_cfg.view(1, -1))
# poses for ur5e:
home_pose = [-0.431, 0.172, 0.348, 0, 1, 0, 0]
pose_1 = [0.157, -0.443, 0.427, 0, 1, 0, 0]
pose_2 = [0.126, -0.443, 0.729, 0, 0, 1, 0]
pose_3 = [-0.449, 0.339, 0.414, -0.681, -0.000, 0.000, 0.732]
pose_4 = [-0.449, 0.339, 0.414, 0.288, 0.651, -0.626, -0.320]
pose_5 = [-0.218, 0.508, 0.670, 0.529, 0.169, 0.254, 0.792]
pose_6 = [-0.865, 0.001, 0.411, 0.286, 0.648, -0.628, -0.321]
pose_list = [home_pose, pose_1, pose_2, pose_3, pose_4, pose_5, pose_6, home_pose]
trajectory = start_state
motion_time = 0
fail = 0
for i, pose in enumerate(pose_list):
goal_pose = Pose.from_list(pose, q_xyzw=False)
start_state = trajectory[-1].unsqueeze(0).clone()
start_state.velocity[:] = 0.0
start_state.acceleration[:] = 0.0
result = motion_gen.plan_single(
start_state.clone(),
goal_pose,
plan_config=MotionGenPlanConfig(
max_attempts=5,
),
)
if result.success.item():
plan = result.get_interpolated_plan()
trajectory = trajectory.stack(plan.clone())
motion_time += result.motion_time
else:
fail += 1
assert fail == 0
@pytest.mark.parametrize(
"motion_gen_str, time_dilation_factor",
[
("motion_gen_ur5e", 1.0),
("motion_gen_ur5e", 0.75),
("motion_gen_ur5e", 0.5),
("motion_gen_ur5e", 0.25),
("motion_gen_ur5e", 0.15),
("motion_gen_ur5e", 0.1),
("motion_gen_ur5e", 0.001),
],
)
def test_pose_sequence_speed_ur5e_time_dilation(motion_gen_str, time_dilation_factor, request):
# load ur5e motion gen:
motion_gen = request.getfixturevalue(motion_gen_str)
retract_cfg = motion_gen.get_retract_config()
start_state = JointState.from_position(retract_cfg.view(1, -1))
# poses for ur5e:
home_pose = [-0.431, 0.172, 0.348, 0, 1, 0, 0]
pose_1 = [0.157, -0.443, 0.427, 0, 1, 0, 0]
pose_2 = [0.126, -0.443, 0.729, 0, 0, 1, 0]
pose_3 = [-0.449, 0.339, 0.414, -0.681, -0.000, 0.000, 0.732]
pose_4 = [-0.449, 0.339, 0.414, 0.288, 0.651, -0.626, -0.320]
pose_5 = [-0.218, 0.508, 0.670, 0.529, 0.169, 0.254, 0.792]
pose_6 = [-0.865, 0.001, 0.411, 0.286, 0.648, -0.628, -0.321]
pose_list = [home_pose, pose_1, pose_2, pose_3, pose_4, pose_5, pose_6, home_pose]
trajectory = start_state
motion_time = 0
fail = 0
for i, pose in enumerate(pose_list):
goal_pose = Pose.from_list(pose, q_xyzw=False)
start_state = trajectory[-1].unsqueeze(0).clone()
start_state.velocity[:] = 0.0
start_state.acceleration[:] = 0.0
result = motion_gen.plan_single(
start_state.clone(),
goal_pose,
plan_config=MotionGenPlanConfig(
max_attempts=5,
time_dilation_factor=time_dilation_factor,
),
)
if result.success.item():
plan = result.get_interpolated_plan()
trajectory = trajectory.stack(plan.clone())
motion_time += result.motion_time
else:
fail += 1
assert fail == 0
assert motion_time < 15 * (1 / time_dilation_factor)

View File

@@ -57,15 +57,15 @@ def trajopt_solver_batch_env():
robot_cfg = RobotConfig.from_dict(
load_yaml(join_path(get_robot_configs_path(), robot_file))["robot_cfg"]
)
# world_cfg = WorldConfig.from_dict(load_yaml(join_path(get_world_configs_path(), world_file)))
trajopt_config = TrajOptSolverConfig.load_from_robot_config(
robot_cfg,
world_cfg,
tensor_args,
use_cuda_graph=False,
num_seeds=10,
num_seeds=4,
evaluate_interpolated_trajectory=True,
grad_trajopt_iters=200,
)
trajopt_solver = TrajOptSolver(trajopt_config)
@@ -73,7 +73,7 @@ def trajopt_solver_batch_env():
def test_trajopt_single_js(trajopt_solver):
q_start = trajopt_solver.retract_config
q_start = trajopt_solver.retract_config.clone()
q_goal = q_start.clone() + 0.2
goal_state = JointState.from_position(q_goal)
current_state = JointState.from_position(q_start)
@@ -88,7 +88,7 @@ def test_trajopt_single_js(trajopt_solver):
def test_trajopt_single_pose(trajopt_solver):
trajopt_solver.reset_seed()
q_start = trajopt_solver.retract_config
q_start = trajopt_solver.retract_config.clone()
q_goal = q_start.clone() + 0.1
kin_state = trajopt_solver.fk(q_goal)
goal_pose = Pose(kin_state.ee_position, kin_state.ee_quaternion)
@@ -102,7 +102,7 @@ def test_trajopt_single_pose(trajopt_solver):
def test_trajopt_single_pose_no_seed(trajopt_solver):
trajopt_solver.reset_seed()
q_start = trajopt_solver.retract_config
q_start = trajopt_solver.retract_config.clone()
q_goal = q_start.clone() + 0.05
kin_state = trajopt_solver.fk(q_goal)
goal_pose = Pose(kin_state.ee_position, kin_state.ee_quaternion)
@@ -116,7 +116,7 @@ def test_trajopt_single_pose_no_seed(trajopt_solver):
def test_trajopt_single_goalset(trajopt_solver):
# run goalset planning:
q_start = trajopt_solver.retract_config
q_start = trajopt_solver.retract_config.clone()
q_goal = q_start.clone() + 0.1
kin_state = trajopt_solver.fk(q_goal)
goal_pose = Pose(kin_state.ee_position, kin_state.ee_quaternion)
@@ -133,7 +133,7 @@ def test_trajopt_single_goalset(trajopt_solver):
def test_trajopt_batch(trajopt_solver):
# run goalset planning:
q_start = trajopt_solver.retract_config.repeat(2, 1)
q_start = trajopt_solver.retract_config.clone().repeat(2, 1)
q_goal = q_start.clone()
q_goal[0] += 0.1
q_goal[1] -= 0.1
@@ -153,7 +153,7 @@ def test_trajopt_batch(trajopt_solver):
def test_trajopt_batch_js(trajopt_solver):
# run goalset planning:
q_start = trajopt_solver.retract_config.repeat(2, 1)
q_start = trajopt_solver.retract_config.clone().repeat(2, 1)
q_goal = q_start.clone()
q_goal[0] += 0.1
q_goal[1] -= 0.1
@@ -173,7 +173,7 @@ def test_trajopt_batch_js(trajopt_solver):
def test_trajopt_batch_goalset(trajopt_solver):
# run goalset planning:
q_start = trajopt_solver.retract_config.repeat(3, 1)
q_start = trajopt_solver.retract_config.clone().repeat(3, 1)
q_goal = q_start.clone()
q_goal[0] += 0.1
q_goal[1] -= 0.1
@@ -196,14 +196,12 @@ def test_trajopt_batch_goalset(trajopt_solver):
def test_trajopt_batch_env_js(trajopt_solver_batch_env):
# run goalset planning:
q_start = trajopt_solver_batch_env.retract_config.repeat(3, 1)
q_start = trajopt_solver_batch_env.retract_config.clone().repeat(3, 1)
q_goal = q_start.clone()
q_goal += 0.1
q_goal[2] += 0.1
q_goal[2][0] += 0.1
q_goal[1] -= 0.2
# q_goal[2, -1] += 0.1
kin_state = trajopt_solver_batch_env.fk(q_goal)
goal_pose = Pose(kin_state.ee_position, kin_state.ee_quaternion)
goal_state = JointState.from_position(q_goal)
current_state = JointState.from_position(q_start)
@@ -213,14 +211,15 @@ def test_trajopt_batch_env_js(trajopt_solver_batch_env):
traj = result.solution.position
interpolated_traj = result.interpolated_solution.position
assert torch.count_nonzero(result.success) == 3
assert torch.linalg.norm((goal_state.position - traj[:, -1, :])).item() < 0.005
assert torch.linalg.norm((goal_state.position - interpolated_traj[:, -1, :])).item() < 0.005
error = torch.linalg.norm((goal_state.position - traj[:, -1, :]), dim=-1)
assert torch.max(error).item() < 0.05
assert torch.linalg.norm((goal_state.position - interpolated_traj[:, -1, :])).item() < 0.05
assert len(result) == 3
def test_trajopt_batch_env(trajopt_solver_batch_env):
# run goalset planning:
q_start = trajopt_solver_batch_env.retract_config.repeat(3, 1)
q_start = trajopt_solver_batch_env.retract_config.clone().repeat(3, 1)
q_goal = q_start.clone()
q_goal[0] += 0.1
q_goal[1] -= 0.1
@@ -262,7 +261,7 @@ def test_trajopt_batch_env_goalset(trajopt_solver_batch_env):
def test_trajopt_batch_env(trajopt_solver):
# run goalset planning:
q_start = trajopt_solver.retract_config.repeat(3, 1)
q_start = trajopt_solver.retract_config.clone().repeat(3, 1)
q_goal = q_start.clone()
q_goal[0] += 0.1
q_goal[1] -= 0.1