Improved precision, quality and js planner.

This commit is contained in:
Balakumar Sundaralingam
2024-04-11 13:19:01 -07:00
parent 774dcfd609
commit d6e600c88c
51 changed files with 2128 additions and 1054 deletions

View File

@@ -10,10 +10,23 @@ its affiliates is strictly prohibited.
--> -->
# Changelog # Changelog
## Latest Commit ## Version 0.7.1
### New Features ### New Features
- Add mimic joint parsing and optimization support. Check `ur5e_robotiq_2f_140.yml`. - Add mimic joint parsing and optimization support. Check `ur5e_robotiq_2f_140.yml`.
- Add `finetune_dt_scale` as a parameter to `MotionGenPlanConfig` to dynamically change the
time-optimal scaling on a per problem instance.
- `MotionGen.plan_single()` will now try finetuning in a for-loop, with larger and larger dt
until convergence. This also warm starts from previous failure.
- Add `high_precision` mode to `MotionGenConfig` to support `<1mm` convergence.
### Changes in default behavior
- collision_sphere_buffer now supports having offset per link. Also, collision_sphere_buffer only
applies to world collision while self_collision_buffer applies for self collision. Previously,
self_collision_buffer was added on top of collision_sphere_buffer.
- `TrajEvaluatorConfig` cannot be initialized without dof as now per-joint jerk and acceleration
limits are used. Use `TrajEvaluatorConfig.from_basic()` to initialize similar to previous behavior.
- `finetune_dt_scale` default value is 0.9 from 0.95.
### BugFixes & Misc. ### BugFixes & Misc.
- Fix bug in `WorldVoxelCollision` where `env_query_idx` was being overwritten. - Fix bug in `WorldVoxelCollision` where `env_query_idx` was being overwritten.
@@ -26,6 +39,21 @@ its affiliates is strictly prohibited.
- Added flag to sample from ik seeder instead of `rollout_fn` sampler. - Added flag to sample from ik seeder instead of `rollout_fn` sampler.
- Added ik startup profiler to `benchmark/curobo_python_profile.py`. - Added ik startup profiler to `benchmark/curobo_python_profile.py`.
- Reduced branching in Kinematics kernels and added mimic joint computations. - Reduced branching in Kinematics kernels and added mimic joint computations.
- Add init_cache to WorldVoxelCollision to create cache for Mesh and Cuboid obstacles.
- `TrajEvaluator` now uses per-joint acceleration and jerk limits.
- Fixed regression in `batch_motion_gen_reacher.py` example where robot's position was not being
set correctly.
- Switched from smooth l2 to l2 for BoundCost as that gives better convergence.
- `requires_grad` is explicitly stored in a varaible before `tensor.detach()` in warp kernel calls
as this can get set to False in some instances.
- Fix dt update in `MotionGen.plan_single_js()` where dt was not reset after finetunestep, causing
joint space planner to fail often.
- Improve joint space planner success by changing smooth l2 distance cost to l2 distance. Also,
added fallback to graph planner when linear path is not possible.
- Retuned weigths for IKSolver, now 98th percentile accuracy is 10 micrometers wtih 16 seeds
(vs 24 seeds previously).
- Switch float8 precision check from `const` to macro to avoid compile errors in older nvcc, this
fixes docker build issues for isaac sim 2023.1.0.
## Version 0.7.0 ## Version 0.7.0
### Changes in default behavior ### Changes in default behavior

View File

@@ -152,7 +152,6 @@ def load_curobo(
robot_cfg["kinematics"]["collision_link_names"].remove("attached_object") robot_cfg["kinematics"]["collision_link_names"].remove("attached_object")
robot_cfg["kinematics"]["ee_link"] = "panda_hand" robot_cfg["kinematics"]["ee_link"] = "panda_hand"
# del robot_cfg["kinematics"]
if ik_seeds is None: if ik_seeds is None:
ik_seeds = 32 ik_seeds = 32
@@ -211,6 +210,7 @@ def load_curobo(
trajopt_dt=0.25, trajopt_dt=0.25,
finetune_dt_scale=finetune_dt_scale, finetune_dt_scale=finetune_dt_scale,
maximum_trajectory_dt=0.15, maximum_trajectory_dt=0.15,
high_precision=args.high_precision,
) )
mg = MotionGen(motion_gen_config) mg = MotionGen(motion_gen_config)
mg.warmup(enable_graph=True, warmup_js_trajopt=False, parallel_finetune=parallel_finetune) mg.warmup(enable_graph=True, warmup_js_trajopt=False, parallel_finetune=parallel_finetune)
@@ -240,7 +240,7 @@ def benchmark_mb(
og_tsteps = 32 og_tsteps = 32
if override_tsteps is not None: if override_tsteps is not None:
og_tsteps = override_tsteps og_tsteps = override_tsteps
og_finetune_dt_scale = 0.9 og_finetune_dt_scale = 0.85
og_trajopt_seeds = 4 og_trajopt_seeds = 4
og_parallel_finetune = True og_parallel_finetune = True
og_collision_activation_distance = 0.01 og_collision_activation_distance = 0.01
@@ -252,6 +252,7 @@ def benchmark_mb(
if "dresser_task_oriented" in list(problems.keys()): if "dresser_task_oriented" in list(problems.keys()):
mpinets_data = True mpinets_data = True
for key, v in tqdm(problems.items()): for key, v in tqdm(problems.items()):
finetune_dt_scale = og_finetune_dt_scale finetune_dt_scale = og_finetune_dt_scale
force_graph = False force_graph = False
tsteps = og_tsteps tsteps = og_tsteps
@@ -260,23 +261,12 @@ def benchmark_mb(
parallel_finetune = og_parallel_finetune parallel_finetune = og_parallel_finetune
ik_seeds = og_ik_seeds ik_seeds = og_ik_seeds
if "cage_panda" in key: scene_problems = problems[key]
trajopt_seeds = 8
if "table_under_pick_panda" in key:
trajopt_seeds = 8
finetune_dt_scale = 0.95
if key == "cubby_task_oriented": # or key == "merged_cubby_task_oriented":
trajopt_seeds = 16
finetune_dt_scale = 0.95
if "dresser_task_oriented" in key:
trajopt_seeds = 16
finetune_dt_scale = 0.95
scene_problems = problems[key][:]
n_cubes = check_problems(scene_problems) n_cubes = check_problems(scene_problems)
if "cubby_task_oriented" in key and "merged" not in key:
trajopt_seeds = 8
mg, robot_cfg = load_curobo( mg, robot_cfg = load_curobo(
n_cubes, n_cubes,
enable_debug, enable_debug,
@@ -302,7 +292,7 @@ def benchmark_mb(
continue continue
plan_config = MotionGenPlanConfig( plan_config = MotionGenPlanConfig(
max_attempts=20, max_attempts=20, # 20,
enable_graph_attempt=1, enable_graph_attempt=1,
disable_graph_attempt=10, disable_graph_attempt=10,
enable_finetune_trajopt=not args.no_finetune, enable_finetune_trajopt=not args.no_finetune,
@@ -312,6 +302,7 @@ def benchmark_mb(
enable_opt=not graph_mode, enable_opt=not graph_mode,
need_graph_success=force_graph, need_graph_success=force_graph,
parallel_finetune=parallel_finetune, parallel_finetune=parallel_finetune,
finetune_dt_scale=finetune_dt_scale,
) )
q_start = problem["start"] q_start = problem["start"]
pose = ( pose = (
@@ -579,6 +570,22 @@ def benchmark_mb(
g_m = CuroboGroupMetrics.from_list(all_groups) g_m = CuroboGroupMetrics.from_list(all_groups)
if not args.kpi: if not args.kpi:
try:
from tabulate import tabulate
headers = ["Metric", "Value"]
table = [
["Success %", f"{g_m.success:2.2f}"],
["Plan Time (s)", g_m.time],
["Motion Time(s)", g_m.motion_time],
["Path Length (rad.)", g_m.cspace_path_length],
["Jerk", g_m.jerk],
["Position Error (mm)", g_m.position_error],
]
print(tabulate(table, headers, tablefmt="grid"))
except ImportError:
print( print(
"All: ", "All: ",
f"{g_m.success:2.2f}", f"{g_m.success:2.2f}",
@@ -588,7 +595,6 @@ def benchmark_mb(
g_m.position_error.percent_75, g_m.position_error.percent_75,
g_m.orientation_error.percent_75, g_m.orientation_error.percent_75,
) )
if write_benchmark: if write_benchmark:
if not mpinets_data: if not mpinets_data:
write_yaml(problems, args.file_name + "_mb_solution.yaml") write_yaml(problems, args.file_name + "_mb_solution.yaml")
@@ -596,6 +602,23 @@ def benchmark_mb(
write_yaml(problems, args.file_name + "_mpinets_solution.yaml") write_yaml(problems, args.file_name + "_mpinets_solution.yaml")
all_files += all_groups all_files += all_groups
g_m = CuroboGroupMetrics.from_list(all_files) g_m = CuroboGroupMetrics.from_list(all_files)
try:
from tabulate import tabulate
headers = ["Metric", "Value"]
table = [
["Success %", f"{g_m.success:2.2f}"],
["Plan Time (s)", g_m.time],
["Motion Time(s)", g_m.motion_time],
["Path Length (rad.)", g_m.cspace_path_length],
["Jerk", g_m.jerk],
["Position Error (mm)", g_m.position_error],
]
print(tabulate(table, headers, tablefmt="grid"))
except ImportError:
print("######## FULL SET ############") print("######## FULL SET ############")
print("All: ", f"{g_m.success:2.2f}") print("All: ", f"{g_m.success:2.2f}")
print("MT: ", g_m.motion_time) print("MT: ", g_m.motion_time)
@@ -716,6 +739,12 @@ if __name__ == "__main__":
help="When True, runs benchmark with parameters for jetson", help="When True, runs benchmark with parameters for jetson",
default=False, default=False,
) )
parser.add_argument(
"--high_precision",
action="store_true",
help="When True, runs benchmark with parameters for jetson",
default=False,
)
args = parser.parse_args() args = parser.parse_args()

View File

@@ -147,7 +147,7 @@ def load_curobo(
"world": { "world": {
"pose": [0, 0, 0, 1, 0, 0, 0], "pose": [0, 0, 0, 1, 0, 0, 0],
"integrator_type": "tsdf", "integrator_type": "tsdf",
"voxel_size": 0.01, "voxel_size": 0.02,
} }
} }
} }
@@ -177,9 +177,9 @@ def load_curobo(
interpolation_steps=interpolation_steps, interpolation_steps=interpolation_steps,
collision_activation_distance=collision_activation_distance, collision_activation_distance=collision_activation_distance,
trajopt_dt=0.25, trajopt_dt=0.25,
finetune_dt_scale=1.0, finetune_dt_scale=0.9,
maximum_trajectory_dt=0.1, maximum_trajectory_dt=0.15,
finetune_trajopt_iters=300, finetune_trajopt_iters=200,
) )
mg = MotionGen(motion_gen_config) mg = MotionGen(motion_gen_config)
mg.warmup(enable_graph=True, warmup_js_trajopt=False, parallel_finetune=True) mg.warmup(enable_graph=True, warmup_js_trajopt=False, parallel_finetune=True)
@@ -208,7 +208,7 @@ def benchmark_mb(
# load dataset: # load dataset:
graph_mode = args.graph graph_mode = args.graph
interpolation_dt = 0.02 interpolation_dt = 0.02
file_paths = [demo_raw, motion_benchmaker_raw, mpinets_raw][2:] file_paths = [demo_raw, motion_benchmaker_raw, mpinets_raw][1:2]
enable_debug = save_log or plot_cost enable_debug = save_log or plot_cost
all_files = [] all_files = []
@@ -237,8 +237,9 @@ def benchmark_mb(
mpinets_data = True mpinets_data = True
if "cage_panda" in key: if "cage_panda" in key:
trajopt_seeds = 16 trajopt_seeds = 8
finetune_dt_scale = 0.95 else:
continue
if "table_under_pick_panda" in key: if "table_under_pick_panda" in key:
tsteps = 44 tsteps = 44
trajopt_seeds = 16 trajopt_seeds = 16

View File

@@ -133,7 +133,7 @@ def load_curobo(
robot_cfg = load_yaml(join_path(get_robot_configs_path(), "franka.yml"))["robot_cfg"] robot_cfg = load_yaml(join_path(get_robot_configs_path(), "franka.yml"))["robot_cfg"]
robot_cfg["kinematics"]["collision_sphere_buffer"] = -0.00 robot_cfg["kinematics"]["collision_sphere_buffer"] = -0.00
ik_seeds = 24 ik_seeds = 32
if graph_mode: if graph_mode:
trajopt_seeds = 4 trajopt_seeds = 4
if trajopt_seeds >= 14: if trajopt_seeds >= 14:
@@ -253,21 +253,9 @@ def benchmark_mb(
collision_activation_distance = og_collision_activation_distance collision_activation_distance = og_collision_activation_distance
finetune_dt_scale = 0.9 finetune_dt_scale = 0.9
parallel_finetune = True parallel_finetune = True
if "cage_panda" in key: if "cubby_task_oriented" in key and "merged" not in key:
trajopt_seeds = 8 trajopt_seeds = 8
if "table_under_pick_panda" in key:
trajopt_seeds = 8
finetune_dt_scale = 0.98
if key == "cubby_task_oriented":
trajopt_seeds = 16
finetune_dt_scale = 0.98
if "dresser_task_oriented" in key:
trajopt_seeds = 16
finetune_dt_scale = 0.98
mg, robot_cfg, robot_world = load_curobo( mg, robot_cfg, robot_world = load_curobo(
0, 0,
enable_debug, enable_debug,
@@ -285,16 +273,12 @@ def benchmark_mb(
i += 1 i += 1
if problem["collision_buffer_ik"] < 0.0: if problem["collision_buffer_ik"] < 0.0:
continue continue
plan_config = MotionGenPlanConfig( plan_config = MotionGenPlanConfig(
max_attempts=10, max_attempts=20,
enable_graph_attempt=1, enable_graph_attempt=1,
enable_finetune_trajopt=True, disable_graph_attempt=10,
partial_ik_opt=False, partial_ik_opt=False,
enable_graph=graph_mode,
timeout=60, timeout=60,
enable_opt=not graph_mode,
parallel_finetune=True,
) )
q_start = problem["start"] q_start = problem["start"]
@@ -593,6 +577,21 @@ def benchmark_mb(
) )
print(g_m.attempts) print(g_m.attempts)
g_m = CuroboGroupMetrics.from_list(all_groups) g_m = CuroboGroupMetrics.from_list(all_groups)
try:
from tabulate import tabulate
headers = ["Metric", "Value"]
table = [
["Success %", f"{g_m.success:2.2f}"],
["Plan Time (s)", g_m.time],
["Motion Time(s)", g_m.motion_time],
["Path Length (rad.)", g_m.cspace_path_length],
["Jerk", g_m.jerk],
["Position Error (mm)", g_m.position_error],
]
print(tabulate(table, headers, tablefmt="grid"))
except ImportError:
print( print(
"All: ", "All: ",
f"{g_m.success:2.2f}", f"{g_m.success:2.2f}",
@@ -601,9 +600,8 @@ def benchmark_mb(
g_m.time.percent_75, g_m.time.percent_75,
g_m.position_error.percent_75, g_m.position_error.percent_75,
g_m.orientation_error.percent_75, g_m.orientation_error.percent_75,
g_m.perception_success,
) )
print(g_m.attempts)
if write_benchmark: if write_benchmark:
if not mpinets_data: if not mpinets_data:
write_yaml(problems, "mb_curobo_solution_voxel.yaml") write_yaml(problems, "mb_curobo_solution_voxel.yaml")
@@ -612,6 +610,22 @@ def benchmark_mb(
all_files += all_groups all_files += all_groups
g_m = CuroboGroupMetrics.from_list(all_files) g_m = CuroboGroupMetrics.from_list(all_files)
print("######## FULL SET ############") print("######## FULL SET ############")
try:
from tabulate import tabulate
headers = ["Metric", "Value"]
table = [
["Success %", f"{g_m.success:2.2f}"],
["Plan Time (s)", g_m.time],
["Motion Time(s)", g_m.motion_time],
["Path Length (rad.)", g_m.cspace_path_length],
["Jerk", g_m.jerk],
["Position Error (mm)", g_m.position_error],
]
print(tabulate(table, headers, tablefmt="grid"))
except ImportError:
print("All: ", f"{g_m.success:2.2f}") print("All: ", f"{g_m.success:2.2f}")
print( print(
"Perception Success (coarse, interpolated):", "Perception Success (coarse, interpolated):",

View File

@@ -67,7 +67,7 @@ def run_full_config_collision_free_ik(
robot_cfg, robot_cfg,
world_cfg, world_cfg,
position_threshold=position_threshold, position_threshold=position_threshold,
num_seeds=30, num_seeds=16,
self_collision_check=collision_free, self_collision_check=collision_free,
self_collision_opt=collision_free, self_collision_opt=collision_free,
tensor_args=tensor_args, tensor_args=tensor_args,
@@ -123,7 +123,7 @@ if __name__ == "__main__":
args = parser.parse_args() args = parser.parse_args()
b_list = [1, 10, 100, 1000][-1:] b_list = [1, 10, 100, 2000][-1:]
robot_list = get_motion_gen_robot_list() + get_multi_arm_robot_list()[:2] robot_list = get_motion_gen_robot_list() + get_multi_arm_robot_list()[:2]
world_file = "collision_test.yml" world_file = "collision_test.yml"
@@ -141,7 +141,7 @@ if __name__ == "__main__":
"Position-Error-Collision-Free-IK(mm)": [], "Position-Error-Collision-Free-IK(mm)": [],
"Orientation-Error-Collision-Free-IK": [], "Orientation-Error-Collision-Free-IK": [],
} }
for robot_file in robot_list[:1]: for robot_file in robot_list[:-1]:
# create a sampler with dof: # create a sampler with dof:
for b_size in b_list: for b_size in b_list:
# sample test configs: # sample test configs:
@@ -176,13 +176,21 @@ if __name__ == "__main__":
data["Collision-Free-IK-time(ms)"].append(dt_cu_ik_cfree * 1000.0) data["Collision-Free-IK-time(ms)"].append(dt_cu_ik_cfree * 1000.0)
write_yaml(data, join_path(args.save_path, args.file_name + ".yml")) write_yaml(data, join_path(args.save_path, args.file_name + ".yml"))
try: try:
# Third Party # Third Party
import pandas as pd import pandas as pd
df = pd.DataFrame(data) df = pd.DataFrame(data)
print("Reported errors are 98th percentile") print("Reported errors are 98th percentile")
print(df)
df.to_csv(join_path(args.save_path, args.file_name + ".csv")) df.to_csv(join_path(args.save_path, args.file_name + ".csv"))
try:
from tabulate import tabulate
print(tabulate(df, headers="keys", tablefmt="grid"))
except ImportError:
print(df)
pass
except ImportError: except ImportError:
pass pass

View File

@@ -129,7 +129,7 @@ RUN pip3 install trimesh \
empy empy
# Add cache date to avoid using cached layers older than this # Add cache date to avoid using cached layers older than this
ARG CACHE_DATE=2024-02-20 ARG CACHE_DATE=2024-04-11
# install warp: # install warp:
# #

View File

@@ -161,7 +161,7 @@ RUN echo "alias omni_python='/isaac-sim/python.sh'" >> /.bashrc
# Add cache date to avoid using cached layers older than this # Add cache date to avoid using cached layers older than this
ARG CACHE_DATE=2023-12-15 ARG CACHE_DATE=2024-04-11
RUN $omni_python -m pip install "robometrics[evaluator] @ git+https://github.com/fishbotics/robometrics.git" RUN $omni_python -m pip install "robometrics[evaluator] @ git+https://github.com/fishbotics/robometrics.git"

View File

@@ -15,7 +15,7 @@ FROM curobo_docker:${IMAGE_TAG}
# Set variables # Set variables
ARG USERNAME ARG USERNAME
ARG USER_ID ARG USER_ID
ARG CACHE_DATE=2024-03-18 ARG CACHE_DATE=2023-04-11
# Set environment variables # Set environment variables

View File

@@ -78,7 +78,7 @@ ENV TORCH_CUDA_ARCH_LIST "7.0+PTX"
ENV LD_LIBRARY_PATH="/usr/local/lib:${LD_LIBRARY_PATH}" ENV LD_LIBRARY_PATH="/usr/local/lib:${LD_LIBRARY_PATH}"
# Add cache date to avoid using cached layers older than this # Add cache date to avoid using cached layers older than this
ARG CACHE_DATE=2024-02-20 ARG CACHE_DATE=2024-04-11
RUN pip install "robometrics[evaluator] @ git+https://github.com/fishbotics/robometrics.git" RUN pip install "robometrics[evaluator] @ git+https://github.com/fishbotics/robometrics.git"

View File

@@ -23,6 +23,7 @@ from pxr import UsdPhysics
# CuRobo # CuRobo
from curobo.util.logger import log_warn from curobo.util.logger import log_warn
from curobo.util.usd_helper import set_prim_transform
ISAAC_SIM_23 = False ISAAC_SIM_23 = False
try: try:
@@ -107,8 +108,13 @@ def add_robot_to_scene(
robot_p = Robot( robot_p = Robot(
prim_path=robot_path + "/" + base_link_name, prim_path=robot_path + "/" + base_link_name,
name=robot_name, name=robot_name,
position=position,
) )
robot_prim = robot_p.prim
stage = robot_prim.GetStage()
linkp = stage.GetPrimAtPath(robot_path)
set_prim_transform(linkp, [position[0], position[1], position[2], 1, 0, 0, 0])
if False and ISAAC_SIM_23: # this doesn't work in isaac sim 2023.1.1 if False and ISAAC_SIM_23: # this doesn't work in isaac sim 2023.1.1
robot_p.set_solver_velocity_iteration_count(0) robot_p.set_solver_velocity_iteration_count(0)
robot_p.set_solver_position_iteration_count(44) robot_p.set_solver_position_iteration_count(44)
@@ -116,8 +122,6 @@ def add_robot_to_scene(
my_world._physics_context.set_solver_type("PGS") my_world._physics_context.set_solver_type("PGS")
if ISAAC_SIM_23: # fix to load robot correctly in isaac sim 2023.1.1 if ISAAC_SIM_23: # fix to load robot correctly in isaac sim 2023.1.1
robot_prim = robot_p.prim
stage = robot_prim.GetStage()
linkp = stage.GetPrimAtPath(robot_path + "/" + base_link_name) linkp = stage.GetPrimAtPath(robot_path + "/" + base_link_name)
mass = UsdPhysics.MassAPI(linkp) mass = UsdPhysics.MassAPI(linkp)
mass.GetMassAttr().Set(0) mass.GetMassAttr().Set(0)

View File

@@ -205,7 +205,7 @@ def main():
interpolation_dt = 0.05 interpolation_dt = 0.05
if args.reactive: if args.reactive:
trajopt_tsteps = 40 trajopt_tsteps = 40
trajopt_dt = 0.05 trajopt_dt = 0.04
optimize_dt = False optimize_dt = False
max_attempts = 1 max_attempts = 1
trim_steps = [1, None] trim_steps = [1, None]
@@ -223,9 +223,6 @@ def main():
trajopt_dt=trajopt_dt, trajopt_dt=trajopt_dt,
trajopt_tsteps=trajopt_tsteps, trajopt_tsteps=trajopt_tsteps,
trim_steps=trim_steps, trim_steps=trim_steps,
# velocity_scale=0.1,
# self_collision_check=False,
# self_collision_opt=False,
) )
motion_gen = MotionGen(motion_gen_config) motion_gen = MotionGen(motion_gen_config)
print("warming up...") print("warming up...")

View File

@@ -136,7 +136,6 @@ def main():
robot_cfg, robot_cfg,
world_cfg, world_cfg,
tensor_args, tensor_args,
trajopt_tsteps=40,
collision_checker_type=CollisionCheckerType.MESH, collision_checker_type=CollisionCheckerType.MESH,
use_cuda_graph=True, use_cuda_graph=True,
num_trajopt_seeds=12, num_trajopt_seeds=12,
@@ -146,6 +145,7 @@ def main():
collision_activation_distance=0.025, collision_activation_distance=0.025,
acceleration_scale=1.0, acceleration_scale=1.0,
fixed_iters_trajopt=True, fixed_iters_trajopt=True,
trajopt_tsteps=40,
) )
motion_gen = MotionGen(motion_gen_config) motion_gen = MotionGen(motion_gen_config)
print("warming up...") print("warming up...")
@@ -154,7 +154,9 @@ def main():
print("Curobo is Ready") print("Curobo is Ready")
add_extensions(simulation_app, args.headless_mode) add_extensions(simulation_app, args.headless_mode)
plan_config = MotionGenPlanConfig( plan_config = MotionGenPlanConfig(
enable_graph=False, enable_graph_attempt=4, max_attempts=10, enable_finetune_trajopt=True enable_graph=False,
enable_graph_attempt=4,
max_attempts=10,
) )
usd_help.load_stage(my_world.stage) usd_help.load_stage(my_world.stage)

View File

@@ -162,7 +162,7 @@ class CuroboController(BaseController):
pose_metric = None pose_metric = None
if constrain_grasp_approach: if constrain_grasp_approach:
pose_metric = PoseCostMetric.create_grasp_approach_metric( pose_metric = PoseCostMetric.create_grasp_approach_metric(
offset_position=0.1, tstep_fraction=0.6 offset_position=0.1, tstep_fraction=0.8
) )
self.plan_config = MotionGenPlanConfig( self.plan_config = MotionGenPlanConfig(

View File

@@ -107,7 +107,7 @@ def demo_motion_gen_mesh():
motion_gen = MotionGen(motion_gen_config) motion_gen = MotionGen(motion_gen_config)
robot_cfg = load_yaml(join_path(get_robot_configs_path(), robot_file))["robot_cfg"] robot_cfg = load_yaml(join_path(get_robot_configs_path(), robot_file))["robot_cfg"]
robot_cfg = RobotConfig.from_dict(robot_cfg, tensor_args) robot_cfg = RobotConfig.from_dict(robot_cfg, tensor_args)
retract_cfg = robot_cfg.retract_config retract_cfg = robot_cfg.cpsace.retract_config
state = motion_gen.rollout_fn.compute_kinematics( state = motion_gen.rollout_fn.compute_kinematics(
JointState.from_position(retract_cfg.view(1, -1)) JointState.from_position(retract_cfg.view(1, -1))
) )

View File

@@ -55,7 +55,7 @@ def demo_motion_gen():
motion_gen = MotionGen(motion_gen_config) motion_gen = MotionGen(motion_gen_config)
robot_cfg = load_yaml(join_path(get_robot_configs_path(), robot_file))["robot_cfg"] robot_cfg = load_yaml(join_path(get_robot_configs_path(), robot_file))["robot_cfg"]
robot_cfg = RobotConfig.from_dict(robot_cfg, tensor_args) robot_cfg = RobotConfig.from_dict(robot_cfg, tensor_args)
retract_cfg = robot_cfg.retract_config retract_cfg = robot_cfg.cspace.retract_config
state = motion_gen.rollout_fn.compute_kinematics( state = motion_gen.rollout_fn.compute_kinematics(
JointState.from_position(retract_cfg.view(1, -1)) JointState.from_position(retract_cfg.view(1, -1))
) )

View File

@@ -51,14 +51,13 @@ def get_trajectory(robot_file="franka.yml", dt=1.0 / 60.0):
robot_file, robot_file,
world_file, world_file,
tensor_args, tensor_args,
trajopt_tsteps=24, trajopt_tsteps=32,
collision_checker_type=CollisionCheckerType.PRIMITIVE, collision_checker_type=CollisionCheckerType.PRIMITIVE,
use_cuda_graph=True, use_cuda_graph=True,
num_trajopt_seeds=2, num_trajopt_seeds=2,
num_graph_seeds=2, num_graph_seeds=2,
evaluate_interpolated_trajectory=True, evaluate_interpolated_trajectory=True,
interpolation_dt=dt, interpolation_dt=dt,
self_collision_check=True,
) )
motion_gen = MotionGen(motion_gen_config) motion_gen = MotionGen(motion_gen_config)
motion_gen.warmup() motion_gen.warmup()
@@ -70,13 +69,15 @@ def get_trajectory(robot_file="franka.yml", dt=1.0 / 60.0):
) )
retract_pose = Pose(state.ee_pos_seq.squeeze(), quaternion=state.ee_quat_seq.squeeze()) retract_pose = Pose(state.ee_pos_seq.squeeze(), quaternion=state.ee_quat_seq.squeeze())
start_state = JointState.from_position(retract_cfg.view(1, -1).clone() + 0.5) start_state = JointState.from_position(retract_cfg.view(1, -1).clone())
start_state.position[..., :-2] += 0.5
result = motion_gen.plan_single(start_state, retract_pose) result = motion_gen.plan_single(start_state, retract_pose)
traj = result.get_interpolated_plan() # optimized plan traj = result.get_interpolated_plan() # optimized plan
return traj return traj
def save_curobo_robot_world_to_usd(robot_file="franka.yml"): def save_curobo_robot_world_to_usd(robot_file="franka.yml"):
print(robot_file)
tensor_args = TensorDeviceType() tensor_args = TensorDeviceType()
world_file = "collision_test.yml" world_file = "collision_test.yml"
world_model = WorldConfig.from_dict( world_model = WorldConfig.from_dict(
@@ -87,16 +88,18 @@ def save_curobo_robot_world_to_usd(robot_file="franka.yml"):
q_traj = get_trajectory(robot_file, dt) q_traj = get_trajectory(robot_file, dt)
if q_traj is not None: if q_traj is not None:
q_start = q_traj[0] q_start = q_traj[0]
UsdHelper.write_trajectory_animation_with_robot_usd( UsdHelper.write_trajectory_animation_with_robot_usd(
robot_file, robot_file,
world_model, world_model,
q_start, q_start,
q_traj, q_traj,
save_path="test.usda", save_path="test.usd",
robot_color=[0.5, 0.5, 0.2, 1.0], robot_color=[0.5, 0.5, 0.2, 1.0],
base_frame="/grid_world_1", base_frame="/grid_world_1",
flatten_usd=True,
) )
else:
print("failed")
def save_curobo_robot_to_usd(robot_file="franka.yml"): def save_curobo_robot_to_usd(robot_file="franka.yml"):
@@ -241,5 +244,5 @@ def save_log_motion_gen(robot_file: str = "franka.yml"):
if __name__ == "__main__": if __name__ == "__main__":
# save_curobo_world_to_usd() # save_curobo_world_to_usd()
setup_curobo_logger("error") setup_curobo_logger("error")
save_log_motion_gen("franka.yml") # save_log_motion_gen("franka.yml")
# save_curobo_robot_world_to_usd("franka.yml") save_curobo_robot_world_to_usd("ur5e_robotiq_2f_140.yml")

View File

@@ -80,6 +80,9 @@ ci =
sphinx sphinx
sphinx_rtd_theme sphinx_rtd_theme
graphviz>=0.20.1 graphviz>=0.20.1
furo
sphinx-copybutton
# this is only available in 3.8+ # this is only available in 3.8+
smooth = smooth =
@@ -107,6 +110,8 @@ doc =
sphinx sphinx
sphinx_rtd_theme sphinx_rtd_theme
graphviz>=0.20.1 graphviz>=0.20.1
furo
sphinx-copybutton
[options.entry_points] [options.entry_points]
# Add here console scripts like: # Add here console scripts like:

View File

@@ -10,6 +10,19 @@
# #
""" """
cuRobo provides accelerated modules for robotics which can be used to build high-performance
robotics applications. The library has several modules for numerical optimization, robot kinematics,
geometry processing, collision checking, graph search planning. cuRobo provides high-level APIs for
performing tasks like collision-free inverse kinematics, model predictive control, and motion
planning.
High-level APIs:
- Inverse Kinematics: :mod:`curobo.wrap.reacher.ik_solver`.
- Model Predictive Control: :mod:`curobo.wrap.reacher.mpc`.
- Motion Generation / Planning: :mod:`curobo.wrap.reacher.motion_gen`.
cuRobo package is split into several modules: cuRobo package is split into several modules:
- :mod:`curobo.opt` contains optimization solvers. - :mod:`curobo.opt` contains optimization solvers.
@@ -18,7 +31,7 @@ cuRobo package is split into several modules:
- :mod:`curobo.geom` contains geometry processing, collision checking and frame transforms. - :mod:`curobo.geom` contains geometry processing, collision checking and frame transforms.
- :mod:`curobo.graph` contains geometric planning with graph search methods. - :mod:`curobo.graph` contains geometric planning with graph search methods.
- :mod:`curobo.rollout` contains methods that map actions to costs. This class wraps instances of - :mod:`curobo.rollout` contains methods that map actions to costs. This class wraps instances of
:mod:`curobo.cuda_robot_model` and :mod:`geom` to compute costs given trajectory of actions. :mod:`curobo.cuda_robot_model` and :mod:`curobo.geom` to compute costs given trajectory of actions.
- :mod:`curobo.util` contains utility methods. - :mod:`curobo.util` contains utility methods.
- :mod:`curobo.wrap` adds the user-level api for task programming. Includes implementation of - :mod:`curobo.wrap` adds the user-level api for task programming. Includes implementation of
collision-free reacher and batched robot world collision checking. collision-free reacher and batched robot world collision checking.

View File

@@ -29,7 +29,7 @@ robot_cfg:
collision_link_names: null # List[str] collision_link_names: null # List[str]
collision_spheres: null # collision_spheres: null #
collision_sphere_buffer: 0.005 collision_sphere_buffer: 0.005 # float or Dict[str, float]
extra_collision_spheres: {} extra_collision_spheres: {}
self_collision_ignore: {} # Dict[str, List[str]] self_collision_ignore: {} # Dict[str, List[str]]
self_collision_buffer: {} # Dict[str, float] self_collision_buffer: {} # Dict[str, float]

View File

@@ -11,7 +11,7 @@
robot_cfg: robot_cfg:
kinematics: kinematics:
usd_path: "robot/ur_description/ur5e.usd" usd_path: null
usd_robot_root: "/robot" usd_robot_root: "/robot"
isaac_usd_path: "" isaac_usd_path: ""
usd_flip_joints: {} usd_flip_joints: {}
@@ -95,10 +95,11 @@ robot_cfg:
"radius": -0.01 #0.05 "radius": -0.01 #0.05
collision_sphere_buffer: 0.005 collision_sphere_buffer: 0.0
extra_collision_spheres: {} extra_collision_spheres: {}
self_collision_ignore: { self_collision_ignore: {
"upper_arm_link": ["forearm_link", "shoulder_link"], "shoulder_link": ["tool0"],
"upper_arm_link": ["forearm_link", "shoulder_link", "tool0"],
"forearm_link": ["wrist_1_link", "wrist_2_link", "wrist_3_link"], "forearm_link": ["wrist_1_link", "wrist_2_link", "wrist_3_link"],
"wrist_1_link": ["wrist_2_link","wrist_3_link","tool0"], "wrist_1_link": ["wrist_2_link","wrist_3_link","tool0"],
"wrist_2_link": ["wrist_3_link", "tool0"], "wrist_2_link": ["wrist_3_link", "tool0"],

View File

@@ -43,6 +43,7 @@ constraint:
classify: True classify: True
bound_cfg: bound_cfg:
weight: [5000.0, 5000.0, 5000.0,5000.0] weight: [5000.0, 5000.0, 5000.0,5000.0]
activation_distance: [0.0,0.0,0.0,0.0] # for position, velocity, acceleration and jerk activation_distance: [0.0,0.0,0.0,0.0] # for position, velocity, acceleration and jerk
@@ -54,14 +55,14 @@ convergence:
terminal: False terminal: False
link_pose_cfg: link_pose_cfg:
vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
weight: [1.0, 1.0] weight: [0.1, 10.0]
vec_convergence: [0.0, 0.0] # orientation, position vec_convergence: [0.0, 0.0] # orientation, position
terminal: False terminal: False
cspace_cfg: cspace_cfg:
weight: 1.0 weight: 1.0
terminal: True terminal: True
run_weight: 1.0 run_weight: 0.0
null_space_cfg: null_space_cfg:
weight: 1.0 weight: 1.0
terminal: True terminal: True

View File

@@ -32,7 +32,7 @@ cost:
pose_cfg: pose_cfg:
vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # orientation, position for all timesteps vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # orientation, position for all timesteps
run_vec_weight: [0.00,0.00,0.00,0.0,0.0,0.0] # running weight orientation, position run_vec_weight: [0.00,0.00,0.00,0.0,0.0,0.0] # running weight orientation, position
weight: [2000,500000.0,30,50] weight: [2000,500000.0,30,60]
vec_convergence: [0.0,0.0] # orientation, position, orientation metric activation, position metric activation vec_convergence: [0.0,0.0] # orientation, position, orientation metric activation, position metric activation
terminal: True terminal: True
run_weight: 1.0 run_weight: 1.0
@@ -41,10 +41,10 @@ cost:
link_pose_cfg: link_pose_cfg:
vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # orientation, position for all timesteps vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # orientation, position for all timesteps
run_vec_weight: [0.00,0.00,0.00,0.0,0.0,0.0] # running weight orientation, position run_vec_weight: [0.00,0.00,0.00,0.0,0.0,0.0] # running weight orientation, position
weight: [2000,500000.0,30,50] #[150.0, 2000.0, 30, 40] weight: [2000,500000.0,30,60] #[150.0, 2000.0, 30, 40]
vec_convergence: [0.0,0.0] # orientation, position, orientation metric activation, position metric activation vec_convergence: [0.0,0.0] # orientation, position, orientation metric activation, position metric activation
terminal: True terminal: True
run_weight: 0.001 run_weight: 1.0
use_metric: True use_metric: True
@@ -54,8 +54,8 @@ cost:
run_weight: 0.00 #1 run_weight: 0.00 #1
bound_cfg: bound_cfg:
weight: [5000.0, 50000.0, 50000.0, 50000.0] # needs to be 3 values weight: [10000.0, 500000.0, 500.0, 500.0]
smooth_weight: [0.0,5000.0, 50.0, 0.0] #[0.0,5000.0, 50.0, 0.0] # [vel, acc, jerk,] smooth_weight: [0.0,10000.0, 5.0, 0.0]
run_weight_velocity: 0.0 run_weight_velocity: 0.0
run_weight_acceleration: 1.0 run_weight_acceleration: 1.0
run_weight_jerk: 1.0 run_weight_jerk: 1.0
@@ -63,7 +63,7 @@ cost:
null_space_weight: [0.0] null_space_weight: [0.0]
primitive_collision_cfg: primitive_collision_cfg:
weight: 1000000.0 #1000000.0 1000000 weight: 1000000
use_sweep: True use_sweep: True
sweep_steps: 4 sweep_steps: 4
classify: False classify: False
@@ -79,11 +79,11 @@ cost:
lbfgs: lbfgs:
n_iters: 300 # 400 n_iters: 300
inner_iters: 25 inner_iters: 25
cold_start_n_iters: null cold_start_n_iters: null
min_iters: 25 min_iters: 25
line_search_scale: [0.01, 0.3, 0.7,1.0] # # line_search_scale: [0.1, 0.3, 0.7,1.0] # #
fixed_iters: True fixed_iters: True
cost_convergence: 0.01 cost_convergence: 0.01
cost_delta_threshold: 1.0 cost_delta_threshold: 1.0

View File

@@ -54,10 +54,8 @@ cost:
run_weight: 0.00 #1 run_weight: 0.00 #1
bound_cfg: bound_cfg:
weight: [5000.0, 50000.0, 50000.0, 50000.0] # needs to be 3 values weight: [10000.0, 100000.0, 500.0, 500.0]
smooth_weight: [0.0,10000.0, 50.0, 0.0]
smooth_weight: [0.0,50000.0, 500.0, 0.0] # [vel, acc, jerk,]
run_weight_velocity: 0.0 run_weight_velocity: 0.0
run_weight_acceleration: 1.0 run_weight_acceleration: 1.0
run_weight_jerk: 1.0 run_weight_jerk: 1.0

View File

@@ -28,24 +28,24 @@ model:
cost: cost:
pose_cfg: pose_cfg:
vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
weight: [200,4000,10,20] weight: [2000,10000,20,40]
#weight: [5000,500000,5,20]
vec_convergence: [0.0, 0.00] vec_convergence: [0.0, 0.00]
terminal: False terminal: False
use_metric: True use_metric: True
run_weight: 1.0 run_weight: 1.0
link_pose_cfg: link_pose_cfg:
vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
weight: [200,4000,10,20] weight: [2000,10000,20,40]
vec_convergence: [0.00, 0.000] vec_convergence: [0.00, 0.000]
terminal: False terminal: False
use_metric: True use_metric: True
run_weight: 1.0
cspace_cfg: cspace_cfg:
weight: 0.000 weight: 0.000
bound_cfg: bound_cfg:
weight: 100.0 weight: 500.0
activation_distance: [0.1] activation_distance: [0.1]
null_space_weight: [1.0] null_space_weight: [1.0]
primitive_collision_cfg: primitive_collision_cfg:
@@ -62,7 +62,7 @@ lbfgs:
n_iters: 100 #60 n_iters: 100 #60
inner_iters: 25 inner_iters: 25
cold_start_n_iters: null cold_start_n_iters: null
min_iters: 25 min_iters: null
line_search_scale: [0.1, 0.3, 0.7, 1.0] line_search_scale: [0.1, 0.3, 0.7, 1.0]
fixed_iters: True fixed_iters: True
cost_convergence: 0.001 cost_convergence: 0.001

View File

@@ -33,7 +33,6 @@ cost:
vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # orientation, position for all timesteps vec_weight: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] # orientation, position for all timesteps
run_vec_weight: [0.00,0.00,0.00,0.0,0.0,0.0] # running weight orientation, position run_vec_weight: [0.00,0.00,0.00,0.0,0.0,0.0] # running weight orientation, position
weight: [2000,50000.0,30,50] #[150.0, 2000.0, 30, 40] weight: [2000,50000.0,30,50] #[150.0, 2000.0, 30, 40]
vec_convergence: [0.0,0.0] # orientation, position, orientation metric activation, position metric activation vec_convergence: [0.0,0.0] # orientation, position, orientation metric activation, position metric activation
terminal: True terminal: True
run_weight: 1.0 run_weight: 1.0
@@ -45,17 +44,17 @@ cost:
weight: [2000,50000.0,30,50] #[150.0, 2000.0, 30, 40] weight: [2000,50000.0,30,50] #[150.0, 2000.0, 30, 40]
vec_convergence: [0.0,0.0] # orientation, position, orientation metric activation, position metric activation vec_convergence: [0.0,0.0] # orientation, position, orientation metric activation, position metric activation
terminal: True terminal: True
run_weight: 0.001 run_weight: 1.0
use_metric: True use_metric: True
cspace_cfg: cspace_cfg:
weight: 10000.0 weight: 1000.0
terminal: True terminal: True
run_weight: 0.00 #1 run_weight: 0.0
bound_cfg: bound_cfg:
weight: [5000.0, 50000.0, 50000.0,50000.0] # needs to be 3 values weight: [50000.0, 50000.0, 50000.0,50000.0]
smooth_weight: [0.0,10000.0,10.0, 0.0] # [vel, acc, jerk, alpha_v-not-used] smooth_weight: [0.0,10000.0,5.0, 0.0]
run_weight_velocity: 0.00 run_weight_velocity: 0.00
run_weight_acceleration: 1.0 run_weight_acceleration: 1.0
run_weight_jerk: 1.0 run_weight_jerk: 1.0
@@ -84,7 +83,7 @@ lbfgs:
inner_iters: 25 inner_iters: 25
cold_start_n_iters: null cold_start_n_iters: null
min_iters: 25 min_iters: 25
line_search_scale: [0.01,0.3,0.7,1.0] line_search_scale: [0.1,0.3,0.7,1.0]
fixed_iters: True fixed_iters: True
cost_convergence: 0.01 cost_convergence: 0.01
cost_delta_threshold: 2000.0 cost_delta_threshold: 2000.0

View File

@@ -38,7 +38,7 @@ cost:
use_metric: True use_metric: True
cspace_cfg: cspace_cfg:
weight: 500.0 weight: 1000.0
terminal: True terminal: True
run_weight: 1.0 run_weight: 1.0

View File

@@ -34,7 +34,7 @@ cost:
weight: [250.0, 5000.0, 20, 20] weight: [250.0, 5000.0, 20, 20]
vec_convergence: [0.0,0.0,1000.0,1000.0] vec_convergence: [0.0,0.0,1000.0,1000.0]
terminal: True terminal: True
run_weight: 1.0 run_weight: 0.0
use_metric: True use_metric: True
link_pose_cfg: link_pose_cfg:

View File

@@ -73,7 +73,7 @@ class CudaRobotGeneratorConfig:
collision_spheres: Union[None, str, Dict[str, Any]] = None collision_spheres: Union[None, str, Dict[str, Any]] = None
#: Radius buffer to add to collision spheres as padding. #: Radius buffer to add to collision spheres as padding.
collision_sphere_buffer: float = 0.0 collision_sphere_buffer: Union[float, Dict[str, float]] = 0.0
#: Compute jacobian of link poses. Currently not supported. #: Compute jacobian of link poses. Currently not supported.
compute_jacobian: bool = False compute_jacobian: bool = False
@@ -436,7 +436,6 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
if self._bodies[0].link_name in link_names: if self._bodies[0].link_name in link_names:
store_link_map.append(chain_link_names.index(self._bodies[0].link_name)) store_link_map.append(chain_link_names.index(self._bodies[0].link_name))
ordered_link_names.append(self._bodies[0].link_name) ordered_link_names.append(self._bodies[0].link_name)
joint_offset_map.append(self._bodies[0].joint_offset)
# get joint types: # get joint types:
for i in range(1, len(self._bodies)): for i in range(1, len(self._bodies)):
body = self._bodies[i] body = self._bodies[i]
@@ -614,7 +613,6 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
self.joint_names.remove(j) self.joint_names.remove(j)
self._n_dofs -= 1 self._n_dofs -= 1
self._active_joints.remove(i) self._active_joints.remove(i)
self._joint_map[self._joint_map < -1] = -1 self._joint_map[self._joint_map < -1] = -1
self._joint_map = self._joint_map.to(device=self.tensor_args.device) self._joint_map = self._joint_map.to(device=self.tensor_args.device)
self._joint_map_type = self._joint_map_type.to(device=self.tensor_args.device) self._joint_map_type = self._joint_map_type.to(device=self.tensor_args.device)
@@ -628,7 +626,7 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
self, self,
collision_spheres: Dict, collision_spheres: Dict,
collision_link_names: List[str], collision_link_names: List[str],
collision_sphere_buffer: float = 0.0, collision_sphere_buffer: Union[float, Dict[str, float]] = 0.0,
): ):
""" """
@@ -643,6 +641,7 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
# we store as [n_link, 7] # we store as [n_link, 7]
link_sphere_idx_map = [] link_sphere_idx_map = []
cpu_tensor_args = self.tensor_args.cpu() cpu_tensor_args = self.tensor_args.cpu()
self_collision_buffer = self.self_collision_buffer.copy()
with profiler.record_function("robot_generator/build_collision_spheres"): with profiler.record_function("robot_generator/build_collision_spheres"):
for j_idx, j in enumerate(collision_link_names): for j_idx, j in enumerate(collision_link_names):
# print(j_idx) # print(j_idx)
@@ -652,11 +651,22 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
) )
# find link index in global map: # find link index in global map:
l_idx = self._name_to_idx_map[j] l_idx = self._name_to_idx_map[j]
offset_radius = 0.0
if isinstance(collision_sphere_buffer, float):
offset_radius = collision_sphere_buffer
elif j in collision_sphere_buffer:
offset_radius = collision_sphere_buffer[j]
if j in self_collision_buffer:
self_collision_buffer[j] -= offset_radius
else:
self_collision_buffer[j] = -offset_radius
for i in range(n_spheres): for i in range(n_spheres):
padded_radius = collision_spheres[j][i]["radius"] + offset_radius
if padded_radius <= 0.0 and padded_radius > -1.0:
padded_radius = 0.001
link_spheres[i, :] = tensor_sphere( link_spheres[i, :] = tensor_sphere(
collision_spheres[j][i]["center"], collision_spheres[j][i]["center"],
collision_spheres[j][i]["radius"], padded_radius,
tensor_args=cpu_tensor_args, tensor_args=cpu_tensor_args,
tensor=link_spheres[i, :], tensor=link_spheres[i, :],
) )
@@ -665,10 +675,6 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
self.total_spheres += n_spheres self.total_spheres += n_spheres
self._link_spheres_tensor = torch.cat(coll_link_spheres, dim=0) self._link_spheres_tensor = torch.cat(coll_link_spheres, dim=0)
new_radius = self._link_spheres_tensor[..., 3] + collision_sphere_buffer
flag = torch.logical_and(new_radius > -1.0, new_radius <= 0.0)
new_radius[flag] = 0.001
self._link_spheres_tensor[:, 3] = new_radius
self._link_sphere_idx_map = torch.as_tensor( self._link_sphere_idx_map = torch.as_tensor(
link_sphere_idx_map, dtype=torch.int16, device=cpu_tensor_args.device link_sphere_idx_map, dtype=torch.int16, device=cpu_tensor_args.device
) )
@@ -696,9 +702,9 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
link1_spheres_idx = torch.nonzero(self._link_sphere_idx_map == link1_idx) link1_spheres_idx = torch.nonzero(self._link_sphere_idx_map == link1_idx)
rad1 = self._link_spheres_tensor[link1_spheres_idx, 3] rad1 = self._link_spheres_tensor[link1_spheres_idx, 3]
if j not in self.self_collision_buffer.keys(): if j not in self_collision_buffer.keys():
self.self_collision_buffer[j] = 0.0 self_collision_buffer[j] = 0.0
c1 = self.self_collision_buffer[j] c1 = self_collision_buffer[j]
self.self_collision_offset[link1_spheres_idx] = c1 self.self_collision_offset[link1_spheres_idx] = c1
for _, i_name in enumerate(collision_link_names): for _, i_name in enumerate(collision_link_names):
if i_name == j or i_name in ignore_links: if i_name == j or i_name in ignore_links:
@@ -706,9 +712,9 @@ class CudaRobotGenerator(CudaRobotGeneratorConfig):
if i_name not in collision_link_names: if i_name not in collision_link_names:
log_error("Self Collision Link name not found in collision_link_names") log_error("Self Collision Link name not found in collision_link_names")
# find index of this link name: # find index of this link name:
if i_name not in self.self_collision_buffer.keys(): if i_name not in self_collision_buffer.keys():
self.self_collision_buffer[i_name] = 0.0 self_collision_buffer[i_name] = 0.0
c2 = self.self_collision_buffer[i_name] c2 = self_collision_buffer[i_name]
link2_idx = self._name_to_idx_map[i_name] link2_idx = self._name_to_idx_map[i_name]
# update collision distance between spheres from these two links: # update collision distance between spheres from these two links:
link2_spheres_idx = torch.nonzero(self._link_sphere_idx_map == link2_idx) link2_spheres_idx = torch.nonzero(self._link_sphere_idx_map == link2_idx)

View File

@@ -143,6 +143,10 @@ class CudaRobotModelConfig:
def cspace(self): def cspace(self):
return self.kinematics_config.cspace return self.kinematics_config.cspace
@property
def dof(self) -> int:
return self.kinematics_config.n_dof
class CudaRobotModel(CudaRobotModelConfig): class CudaRobotModel(CudaRobotModelConfig):
""" """
@@ -368,6 +372,10 @@ class CudaRobotModel(CudaRobotModelConfig):
def get_dof(self) -> int: def get_dof(self) -> int:
return self.kinematics_config.n_dof return self.kinematics_config.n_dof
@property
def dof(self) -> int:
return self.kinematics_config.n_dof
@property @property
def joint_names(self) -> List[str]: def joint_names(self) -> List[str]:
return self.kinematics_config.joint_names return self.kinematics_config.joint_names

View File

@@ -68,12 +68,12 @@ class UrdfKinematicsParser(KinematicsParser):
"velocity": joint.limit.velocity, "velocity": joint.limit.velocity,
} }
else: else:
log_warn("Converting continuous joint to revolute with limits[-10,10]") log_warn("Converting continuous joint to revolute with limits[-6.28,6.28]")
joint_type = "revolute" joint_type = "revolute"
joint_limits = { joint_limits = {
"effort": joint.limit.effort, "effort": joint.limit.effort,
"lower": -10, "lower": -3.14 * 2,
"upper": 10, "upper": 3.14 * 2,
"velocity": joint.limit.velocity, "velocity": joint.limit.velocity,
} }
return joint_limits, joint_type return joint_limits, joint_type
@@ -181,6 +181,7 @@ class UrdfKinematicsParser(KinematicsParser):
log_error("Joint type not supported") log_error("Joint type not supported")
if joint_axis[0] == -1 or joint_axis[1] == -1 or joint_axis[2] == -1: if joint_axis[0] == -1 or joint_axis[1] == -1 or joint_axis[2] == -1:
joint_offset[0] = -1.0 * joint_offset[0] joint_offset[0] = -1.0 * joint_offset[0]
joint_axis = [abs(x) for x in joint_axis]
body_params["joint_type"] = joint_type body_params["joint_type"] = joint_type
body_params["joint_offset"] = joint_offset body_params["joint_offset"] = joint_offset

View File

@@ -19,3 +19,11 @@
#define CHECK_FP8 defined(CUDA_VERSION) && CUDA_VERSION >= 11080 && TORCH_VERSION_MAJOR >= 2 && TORCH_VERSION_MINOR >= 2 #define CHECK_FP8 defined(CUDA_VERSION) && CUDA_VERSION >= 11080 && TORCH_VERSION_MAJOR >= 2 && TORCH_VERSION_MINOR >= 2
#define CHECK_INPUT_GUARD(x) CHECK_INPUT(x); const at::cuda::OptionalCUDAGuard guard(x.device()) #define CHECK_INPUT_GUARD(x) CHECK_INPUT(x); const at::cuda::OptionalCUDAGuard guard(x.device())
#if CHECK_FP8
#define FP8_TYPE_MACRO torch::kFloat8_e4m3fn
//constexpr const auto fp8_type = torch::kFloat8_e4m3fn;
#else
#define FP8_TYPE_MACRO torch::kHalf
//const constexpr auto fp8_type = torch::kHalf;
#endif

View File

@@ -1341,13 +1341,14 @@ std::vector<torch::Tensor>kin_fused_backward_16t(
cudaStream_t stream = at::cuda::getCurrentCUDAStream(); cudaStream_t stream = at::cuda::getCurrentCUDAStream();
assert(sparsity_opt); assert(sparsity_opt);
const bool parallel_write = true;
if (use_global_cumul) if (use_global_cumul)
{ {
if (n_joints < 16) if (n_joints < 16)
{ {
AT_DISPATCH_FLOATING_TYPES( AT_DISPATCH_FLOATING_TYPES(
grad_spheres.scalar_type(), "kin_fused_backward_16t", ([&] { grad_spheres.scalar_type(), "kin_fused_backward_16t", ([&] {
kin_fused_backward_kernel3<scalar_t, double, true, true, 16, true> kin_fused_backward_kernel3<scalar_t, double, true, true, 16, parallel_write>
<< < blocksPerGrid, threadsPerBlock, sharedMemSize, stream >> > ( << < blocksPerGrid, threadsPerBlock, sharedMemSize, stream >> > (
grad_out.data_ptr<float>(), grad_out.data_ptr<float>(),
grad_nlinks_pos.data_ptr<float>(), grad_nlinks_pos.data_ptr<float>(),
@@ -1371,7 +1372,7 @@ std::vector<torch::Tensor>kin_fused_backward_16t(
{ {
AT_DISPATCH_FLOATING_TYPES( AT_DISPATCH_FLOATING_TYPES(
grad_spheres.scalar_type(), "kin_fused_backward_16t", ([&] { grad_spheres.scalar_type(), "kin_fused_backward_16t", ([&] {
kin_fused_backward_kernel3<scalar_t, double, true, true, 64, true> kin_fused_backward_kernel3<scalar_t, double, true, true, 64, parallel_write>
<< < blocksPerGrid, threadsPerBlock, sharedMemSize, stream >> > ( << < blocksPerGrid, threadsPerBlock, sharedMemSize, stream >> > (
grad_out.data_ptr<float>(), grad_out.data_ptr<float>(),
grad_nlinks_pos.data_ptr<float>(), grad_nlinks_pos.data_ptr<float>(),
@@ -1395,7 +1396,7 @@ std::vector<torch::Tensor>kin_fused_backward_16t(
{ {
AT_DISPATCH_FLOATING_TYPES( AT_DISPATCH_FLOATING_TYPES(
grad_spheres.scalar_type(), "kin_fused_backward_16t", ([&] { grad_spheres.scalar_type(), "kin_fused_backward_16t", ([&] {
kin_fused_backward_kernel3<scalar_t, double, true, true, 128, true> kin_fused_backward_kernel3<scalar_t, double, true, true, 128, parallel_write>
<< < blocksPerGrid, threadsPerBlock, sharedMemSize, stream >> > ( << < blocksPerGrid, threadsPerBlock, sharedMemSize, stream >> > (
grad_out.data_ptr<float>(), grad_out.data_ptr<float>(),
grad_nlinks_pos.data_ptr<float>(), grad_nlinks_pos.data_ptr<float>(),
@@ -1423,7 +1424,7 @@ std::vector<torch::Tensor>kin_fused_backward_16t(
// //
AT_DISPATCH_FLOATING_TYPES( AT_DISPATCH_FLOATING_TYPES(
grad_spheres.scalar_type(), "kin_fused_backward_16t", ([&] { grad_spheres.scalar_type(), "kin_fused_backward_16t", ([&] {
kin_fused_backward_kernel3<scalar_t, double, false, true, 128, true> kin_fused_backward_kernel3<scalar_t, double, false, true, 128, parallel_write>
<< < blocksPerGrid, threadsPerBlock, sharedMemSize, stream >> > ( << < blocksPerGrid, threadsPerBlock, sharedMemSize, stream >> > (
grad_out.data_ptr<float>(), grad_out.data_ptr<float>(),
grad_nlinks_pos.data_ptr<float>(), grad_nlinks_pos.data_ptr<float>(),

View File

@@ -2746,13 +2746,8 @@ sphere_obb_clpt(const torch::Tensor sphere_position, // batch_size, 3
else else
{ {
#if CHECK_FP8
const auto fp8_type = torch::kFloat8_e4m3fn;
#else
const auto fp8_type = torch::kHalf;
#endif
// typename scalar_t, typename dist_scalar_t=float, bool BATCH_ENV_T=true, bool SCALE_METRIC=true, bool SUM_COLLISIONS=true, bool COMPUTE_ESDF=false // typename scalar_t, typename dist_scalar_t=float, bool BATCH_ENV_T=true, bool SCALE_METRIC=true, bool SUM_COLLISIONS=true, bool COMPUTE_ESDF=false
AT_DISPATCH_FLOATING_TYPES_AND2(torch::kBFloat16, fp8_type, AT_DISPATCH_FLOATING_TYPES_AND2(torch::kBFloat16, FP8_TYPE_MACRO,
distance.scalar_type(), "SphereObb_clpt", ([&]{ distance.scalar_type(), "SphereObb_clpt", ([&]{
auto distance_kernel = sphere_obb_distance_kernel<scalar_t, scalar_t, false, scale_metric, sum_collisions_,false>; auto distance_kernel = sphere_obb_distance_kernel<scalar_t, scalar_t, false, scale_metric, sum_collisions_,false>;
if (use_batch_env) if (use_batch_env)
@@ -3211,13 +3206,8 @@ sphere_voxel_clpt(const torch::Tensor sphere_position, // batch_size, 3
int blocksPerGrid = (bnh_spheres + threadsPerBlock - 1) / threadsPerBlock; int blocksPerGrid = (bnh_spheres + threadsPerBlock - 1) / threadsPerBlock;
#if CHECK_FP8
const auto fp8_type = torch::kFloat8_e4m3fn;
#else
const auto fp8_type = torch::kHalf;
#endif
AT_DISPATCH_FLOATING_TYPES_AND2(torch::kBFloat16, fp8_type, AT_DISPATCH_FLOATING_TYPES_AND2(torch::kBFloat16, FP8_TYPE_MACRO,
grid_features.scalar_type(), "SphereVoxel_clpt", ([&] grid_features.scalar_type(), "SphereVoxel_clpt", ([&]
{ {
@@ -3255,7 +3245,6 @@ sphere_voxel_clpt(const torch::Tensor sphere_position, // batch_size, 3
} }
} }
selected_kernel selected_kernel
<< < blocksPerGrid, threadsPerBlock, 0, stream >> > ( << < blocksPerGrid, threadsPerBlock, 0, stream >> > (
sphere_position.data_ptr<float>(), sphere_position.data_ptr<float>(),
@@ -3275,7 +3264,6 @@ sphere_voxel_clpt(const torch::Tensor sphere_position, // batch_size, 3
num_voxels, num_voxels,
batch_size, batch_size,
horizon, n_spheres, transform_back); horizon, n_spheres, transform_back);
})); }));
@@ -3346,13 +3334,7 @@ swept_sphere_voxel_clpt(const torch::Tensor sphere_position, // batch_size, 3
int blocksPerGrid = (bnh_spheres + threadsPerBlock - 1) / threadsPerBlock; int blocksPerGrid = (bnh_spheres + threadsPerBlock - 1) / threadsPerBlock;
#if CHECK_FP8 AT_DISPATCH_FLOATING_TYPES_AND2(torch::kBFloat16, FP8_TYPE_MACRO,
const auto fp8_type = torch::kFloat8_e4m3fn;
#else
const auto fp8_type = torch::kHalf;
#endif
AT_DISPATCH_FLOATING_TYPES_AND2(torch::kBFloat16, fp8_type,
grid_features.scalar_type(), "SphereVoxel_clpt", ([&] { grid_features.scalar_type(), "SphereVoxel_clpt", ([&] {
auto collision_kernel_n = swept_sphere_voxel_distance_jump_kernel<scalar_t, float, float, false, scale_metric, true, false, 100>; auto collision_kernel_n = swept_sphere_voxel_distance_jump_kernel<scalar_t, float, float, false, scale_metric, true, false, 100>;

View File

@@ -525,7 +525,7 @@ class SdfMeshWarpPy(torch.autograd.Function):
if env_query_idx is None: if env_query_idx is None:
use_batch_env = False use_batch_env = False
env_query_idx = n_env_mesh env_query_idx = n_env_mesh
requires_grad = query_spheres.requires_grad
wp.launch( wp.launch(
kernel=get_closest_pt_batch_env, kernel=get_closest_pt_batch_env,
dim=b * h * n, dim=b * h * n,
@@ -541,7 +541,7 @@ class SdfMeshWarpPy(torch.autograd.Function):
wp.from_torch(mesh_enable.view(-1), dtype=wp.uint8), wp.from_torch(mesh_enable.view(-1), dtype=wp.uint8),
wp.from_torch(n_env_mesh.view(-1), dtype=wp.int32), wp.from_torch(n_env_mesh.view(-1), dtype=wp.int32),
wp.from_torch(max_dist, dtype=wp.float32), wp.from_torch(max_dist, dtype=wp.float32),
query_spheres.requires_grad, requires_grad,
b, b,
h, h,
n, n,
@@ -608,6 +608,7 @@ class SweptSdfMeshWarpPy(torch.autograd.Function):
if env_query_idx is None: if env_query_idx is None:
use_batch_env = False use_batch_env = False
env_query_idx = n_env_mesh env_query_idx = n_env_mesh
requires_grad = query_spheres.requires_grad
wp.launch( wp.launch(
kernel=get_swept_closest_pt_batch_env, kernel=get_swept_closest_pt_batch_env,
@@ -625,7 +626,7 @@ class SweptSdfMeshWarpPy(torch.autograd.Function):
wp.from_torch(mesh_enable.view(-1), dtype=wp.uint8), wp.from_torch(mesh_enable.view(-1), dtype=wp.uint8),
wp.from_torch(n_env_mesh.view(-1), dtype=wp.int32), wp.from_torch(n_env_mesh.view(-1), dtype=wp.int32),
wp.from_torch(max_dist, dtype=wp.float32), wp.from_torch(max_dist, dtype=wp.float32),
query_spheres.requires_grad, requires_grad,
b, b,
h, h,
n, n,

View File

@@ -42,6 +42,7 @@ class WorldVoxelCollision(WorldMeshCollision):
and self.cache["voxel"] not in [None, 0] and self.cache["voxel"] not in [None, 0]
): ):
self._create_voxel_cache(self.cache["voxel"]) self._create_voxel_cache(self.cache["voxel"])
return super()._init_cache()
def _create_voxel_cache(self, voxel_cache: Dict[str, Any]): def _create_voxel_cache(self, voxel_cache: Dict[str, Any]):
n_layers = voxel_cache["layers"] n_layers = voxel_cache["layers"]
@@ -699,7 +700,6 @@ class WorldVoxelCollision(WorldMeshCollision):
- self.max_esdf_distance - self.max_esdf_distance
).to(dtype=self._voxel_tensor_list[3].dtype) ).to(dtype=self._voxel_tensor_list[3].dtype)
self._env_n_voxels[:] = 0 self._env_n_voxels[:] = 0
print(self._voxel_tensor_list)
def get_voxel_grid_shape(self, env_idx: int = 0, obs_idx: int = 0): def get_voxel_grid_shape(self, env_idx: int = 0, obs_idx: int = 0):
return self._voxel_tensor_list[3][env_idx, obs_idx].shape return self._voxel_tensor_list[3][env_idx, obs_idx].shape

View File

@@ -355,6 +355,8 @@ class WarpBoundSmoothFunction(torch.autograd.Function):
wp_device = wp.device_from_torch(vel.device) wp_device = wp.device_from_torch(vel.device)
# assert smooth_weight.shape[0] == 7 # assert smooth_weight.shape[0] == 7
b, h, dof = vel.shape b, h, dof = vel.shape
requires_grad = pos.requires_grad
wp.launch( wp.launch(
kernel=forward_bound_smooth_warp, kernel=forward_bound_smooth_warp,
dim=b * h * dof, dim=b * h * dof,
@@ -383,7 +385,7 @@ class WarpBoundSmoothFunction(torch.autograd.Function):
wp.from_torch(out_gv.view(-1), dtype=wp.float32), wp.from_torch(out_gv.view(-1), dtype=wp.float32),
wp.from_torch(out_ga.view(-1), dtype=wp.float32), wp.from_torch(out_ga.view(-1), dtype=wp.float32),
wp.from_torch(out_gj.view(-1), dtype=wp.float32), wp.from_torch(out_gj.view(-1), dtype=wp.float32),
pos.requires_grad, requires_grad,
b, b,
h, h,
dof, dof,
@@ -471,6 +473,7 @@ class WarpBoundFunction(torch.autograd.Function):
): ):
wp_device = wp.device_from_torch(vel.device) wp_device = wp.device_from_torch(vel.device)
b, h, dof = vel.shape b, h, dof = vel.shape
requires_grad = pos.requires_grad
wp.launch( wp.launch(
kernel=forward_bound_warp, kernel=forward_bound_warp,
dim=b * h * dof, dim=b * h * dof,
@@ -494,7 +497,7 @@ class WarpBoundFunction(torch.autograd.Function):
wp.from_torch(out_gv.view(-1), dtype=wp.float32), wp.from_torch(out_gv.view(-1), dtype=wp.float32),
wp.from_torch(out_ga.view(-1), dtype=wp.float32), wp.from_torch(out_ga.view(-1), dtype=wp.float32),
wp.from_torch(out_gj.view(-1), dtype=wp.float32), wp.from_torch(out_gj.view(-1), dtype=wp.float32),
pos.requires_grad, requires_grad,
b, b,
h, h,
dof, dof,
@@ -505,6 +508,7 @@ class WarpBoundFunction(torch.autograd.Function):
ctx.save_for_backward(out_gp, out_gv, out_ga, out_gj) ctx.save_for_backward(out_gp, out_gv, out_ga, out_gj)
# out_c = out_cost # out_c = out_cost
# out_c = torch.linalg.norm(out_cost, dim=-1) # out_c = torch.linalg.norm(out_cost, dim=-1)
out_c = torch.sum(out_cost, dim=-1) out_c = torch.sum(out_cost, dim=-1)
return out_c return out_c
@@ -569,11 +573,11 @@ class WarpBoundPosFunction(torch.autograd.Function):
): ):
wp_device = wp.device_from_torch(pos.device) wp_device = wp.device_from_torch(pos.device)
b, h, dof = pos.shape b, h, dof = pos.shape
requires_grad = pos.requires_grad
wp.launch( wp.launch(
kernel=forward_bound_pos_warp, kernel=forward_bound_pos_warp,
dim=b * h * dof, dim=b * h * dof,
inputs=[ inputs=[
# wp.from_torch(pos.detach().view(-1).contiguous(), dtype=wp.float32),
wp.from_torch(pos.detach().view(-1), dtype=wp.float32), wp.from_torch(pos.detach().view(-1), dtype=wp.float32),
wp.from_torch(retract_config.detach().view(-1), dtype=wp.float32), wp.from_torch(retract_config.detach().view(-1), dtype=wp.float32),
wp.from_torch(retract_idx.detach().view(-1), dtype=wp.int32), wp.from_torch(retract_idx.detach().view(-1), dtype=wp.int32),
@@ -584,7 +588,7 @@ class WarpBoundPosFunction(torch.autograd.Function):
wp.from_torch(vec_weight.view(-1), dtype=wp.float32), wp.from_torch(vec_weight.view(-1), dtype=wp.float32),
wp.from_torch(out_cost.view(-1), dtype=wp.float32), wp.from_torch(out_cost.view(-1), dtype=wp.float32),
wp.from_torch(out_gp.view(-1), dtype=wp.float32), wp.from_torch(out_gp.view(-1), dtype=wp.float32),
pos.requires_grad, requires_grad,
b, b,
h, h,
dof, dof,
@@ -685,23 +689,33 @@ def forward_bound_pos_warp(
c_total = n_w * error * error c_total = n_w * error * error
g_p = 2.0 * n_w * error g_p = 2.0 * n_w * error
# bound cost:
# if c_p < p_l:
# delta = p_l - c_p
# if (delta) > eta_p or eta_p == 0.0:
# c_total += w * (delta - 0.5 * eta_p)
# g_p += -w
# else:
# c_total += w * (0.5 / eta_p) * delta * delta
# g_p += -w * (1.0 / eta_p) * delta
# elif c_p > p_u:
# delta = c_p - p_u
# if (delta) > eta_p or eta_p == 0.0:
# c_total += w * (delta - 0.5 * eta_p)
# g_p += w
# else:
# c_total += w * (0.5 / eta_p) * delta * delta
# g_p += w * (1.0 / eta_p) * delta
# bound cost: # bound cost:
if c_p < p_l: if c_p < p_l:
delta = p_l - c_p delta = c_p - p_l
if (delta) > eta_p or eta_p == 0.0: c_total += w * delta * delta
c_total += w * (delta - 0.5 * eta_p) g_p += 2.0 * w * delta
g_p += -w
else:
c_total += w * (0.5 / eta_p) * delta * delta
g_p += -w * (1.0 / eta_p) * delta
elif c_p > p_u: elif c_p > p_u:
delta = c_p - p_u delta = c_p - p_u
if (delta) > eta_p or eta_p == 0.0: c_total += w * delta * delta
c_total += w * (delta - 0.5 * eta_p) g_p += 2.0 * w * delta
g_p += w
else:
c_total += w * (0.5 / eta_p) * delta * delta
g_p += w * (1.0 / eta_p) * delta
out_cost[b_addrs] = c_total out_cost[b_addrs] = c_total
@@ -811,73 +825,43 @@ def forward_bound_warp(
g_p = 2.0 * n_w * error g_p = 2.0 * n_w * error
# bound cost: # bound cost:
delta = 0.0
if c_p < p_l: if c_p < p_l:
delta = p_l - c_p delta = c_p - p_l
if (delta) > eta_p or eta_p == 0.0:
c_total += w * (delta - 0.5 * eta_p)
g_p += -w
else:
c_total += w * (0.5 / eta_p) * delta * delta
g_p += -w * (1.0 / eta_p) * delta
elif c_p > p_u: elif c_p > p_u:
delta = c_p - p_u delta = c_p - p_u
if (delta) > eta_p or eta_p == 0.0:
c_total += w * (delta - 0.5 * eta_p)
g_p += w
else:
c_total += w * (0.5 / eta_p) * delta * delta
g_p += w * (1.0 / eta_p) * delta
c_total += w * delta * delta
g_p += 2.0 * w * delta
# bound cost:
delta = 0.0
if c_v < v_l: if c_v < v_l:
delta = v_l - c_v delta = c_v - v_l
if (delta) > eta_v or eta_v == 0.0:
c_total += b_wv * (delta - 0.5 * eta_v)
g_v = -b_wv
else:
c_total += b_wv * (0.5 / eta_v) * delta * delta
g_v = -b_wv * (1.0 / eta_v) * delta
elif c_v > v_u: elif c_v > v_u:
delta = c_v - v_u delta = c_v - v_u
if (delta) > eta_v or eta_v == 0.0:
c_total += b_wv * (delta - 0.5 * eta_v) c_total += b_wv * delta * delta
g_v = b_wv g_v = 2.0 * b_wv * delta
else:
c_total += b_wv * (0.5 / eta_v) * delta * delta delta = 0.0
g_v = b_wv * (1.0 / eta_v) * delta
if c_a < a_l: if c_a < a_l:
delta = a_l - c_a delta = c_a - a_l
if (delta) > eta_a or eta_a == 0.0:
c_total += b_wa * (delta - 0.5 * eta_a)
g_a = -b_wa
else:
c_total += b_wa * (0.5 / eta_a) * delta * delta
g_a = -b_wa * (1.0 / eta_a) * delta
elif c_a > a_u: elif c_a > a_u:
delta = c_a - a_u delta = c_a - a_u
if (delta) > eta_a or eta_a == 0.0:
c_total += b_wa * (delta - 0.5 * eta_a)
g_a = b_wa
else:
c_total += b_wa * (0.5 / eta_a) * delta * delta
g_a = b_wa * (1.0 / eta_a) * delta
c_total += b_wa * delta * delta
g_a = b_wa * 2.0 * delta
delta = 0.0
if c_j < j_l: if c_j < j_l:
delta = j_l - c_j delta = c_j - j_l
if (delta) > eta_j or eta_j == 0.0:
c_total += b_wj * (delta - 0.5 * eta_j)
g_j = -b_wj
else:
c_total += b_wj * (0.5 / eta_j) * delta * delta
g_j = -b_wj * (1.0 / eta_j) * delta
elif c_j > j_u: elif c_j > j_u:
delta = c_j - j_u delta = c_j - j_u
if (delta) > eta_j or eta_j == 0.0:
c_total += b_wj * (delta - 0.5 * eta_j) c_total += b_wj * delta * delta
g_j = b_wj g_j = b_wj * delta * 2.0
else:
c_total += b_wj * (0.5 / eta_j) * delta * delta
g_j = b_wj * (1.0 / eta_j) * delta
out_cost[b_addrs] = c_total out_cost[b_addrs] = c_total
@@ -1031,75 +1015,45 @@ def forward_bound_smooth_warp(
g_p = 2.0 * n_w * error g_p = 2.0 * n_w * error
# bound cost: # bound cost:
# bound cost:
delta = 0.0
if c_p < p_l: if c_p < p_l:
delta = p_l - c_p delta = c_p - p_l
if (delta) > eta_p or eta_p == 0.0:
c_total += w * (delta - 0.5 * eta_p)
g_p += -w
else:
c_total += w * (0.5 / eta_p) * delta * delta
g_p += -w * (1.0 / eta_p) * delta
elif c_p > p_u: elif c_p > p_u:
delta = c_p - p_u delta = c_p - p_u
if (delta) > eta_p or eta_p == 0.0:
c_total += w * (delta - 0.5 * eta_p)
g_p += w
else:
c_total += w * (0.5 / eta_p) * delta * delta
g_p += w * (1.0 / eta_p) * delta
c_total += w * delta * delta
g_p += 2.0 * w * delta
# bound cost:
delta = 0.0
if c_v < v_l: if c_v < v_l:
delta = v_l - c_v delta = c_v - v_l
if (delta) > eta_v or eta_v == 0.0:
c_total += b_wv * (delta - 0.5 * eta_v)
g_v = -b_wv
else:
c_total += b_wv * (0.5 / eta_v) * delta * delta
g_v = -b_wv * (1.0 / eta_v) * delta
elif c_v > v_u: elif c_v > v_u:
delta = c_v - v_u delta = c_v - v_u
if (delta) > eta_v or eta_v == 0.0:
c_total += b_wv * (delta - 0.5 * eta_v) c_total += b_wv * delta * delta
g_v = b_wv g_v = 2.0 * b_wv * delta
else:
c_total += b_wv * (0.5 / eta_v) * delta * delta delta = 0.0
g_v = b_wv * (1.0 / eta_v) * delta
if c_a < a_l: if c_a < a_l:
delta = a_l - c_a delta = c_a - a_l
if (delta) > eta_a or eta_a == 0.0:
c_total += b_wa * (delta - 0.5 * eta_a)
g_a = -b_wa
else:
c_total += b_wa * (0.5 / eta_a) * delta * delta
g_a = -b_wa * (1.0 / eta_a) * delta
elif c_a > a_u: elif c_a > a_u:
delta = c_a - a_u delta = c_a - a_u
if (delta) > eta_a or eta_a == 0.0:
c_total += b_wa * (delta - 0.5 * eta_a) c_total += b_wa * delta * delta
g_a = b_wa g_a = b_wa * 2.0 * delta
else:
c_total += b_wa * (0.5 / eta_a) * delta * delta delta = 0.0
g_a = b_wa * (1.0 / eta_a) * delta
if c_j < j_l: if c_j < j_l:
delta = j_l - c_j delta = c_j - j_l
if (delta) > eta_j or eta_j == 0.0:
c_total += b_wj * (delta - 0.5 * eta_j)
g_j = -b_wj
else:
c_total += b_wj * (0.5 / eta_j) * delta * delta
g_j = -b_wj * (1.0 / eta_j) * delta
elif c_j > j_u: elif c_j > j_u:
delta = c_j - j_u delta = c_j - j_u
if (delta) > eta_j or eta_j == 0.0:
c_total += b_wj * (delta - 0.5 * eta_j)
g_j = b_wj
else:
c_total += b_wj * (0.5 / eta_j) * delta * delta
g_j = b_wj * (1.0 / eta_j) * delta
# g_v = -1.0 * g_v c_total += b_wj * delta * delta
# g_a = -1.0 * g_a g_j = b_wj * delta * 2.0
# g_j = - 1.0
# do l2 regularization for velocity: # do l2 regularization for velocity:
if r_wv < 1.0: if r_wv < 1.0:
s_v = w_v * r_wv * c_v * c_v s_v = w_v * r_wv * c_v * c_v

View File

@@ -128,10 +128,10 @@ def forward_l2_warp(
target_p = target[target_id] target_p = target[target_id]
error = c_p - target_p error = c_p - target_p
if r_w >= 1.0 and w > 100.0: # if r_w >= 1.0 and w > 100.0:
c_total = w * wp.log2(wp.cosh(50.0 * error)) # c_total = w * wp.log2(wp.cosh(10.0 * error))
g_p = w * 50.0 * wp.sinh(50.0 * error) / (wp.cosh(50.0 * error)) # g_p = w * 10.0 * wp.sinh(10.0 * error) / (wp.cosh(10.0 * error))
else: # else:
c_total = w * error * error c_total = w * error * error
g_p = 2.0 * w * error g_p = 2.0 * w * error
@@ -159,8 +159,7 @@ class L2DistFunction(torch.autograd.Function):
): ):
wp_device = wp.device_from_torch(pos.device) wp_device = wp.device_from_torch(pos.device)
b, h, dof = pos.shape b, h, dof = pos.shape
# print(target) requires_grad = pos.requires_grad
wp.launch( wp.launch(
kernel=forward_l2_warp, kernel=forward_l2_warp,
dim=b * h * dof, dim=b * h * dof,
@@ -173,7 +172,7 @@ class L2DistFunction(torch.autograd.Function):
wp.from_torch(vec_weight.view(-1), dtype=wp.float32), wp.from_torch(vec_weight.view(-1), dtype=wp.float32),
wp.from_torch(out_cost_v.view(-1), dtype=wp.float32), wp.from_torch(out_cost_v.view(-1), dtype=wp.float32),
wp.from_torch(out_gp.view(-1), dtype=wp.float32), wp.from_torch(out_gp.view(-1), dtype=wp.float32),
pos.requires_grad, requires_grad,
b, b,
h, h,
dof, dof,
@@ -181,11 +180,8 @@ class L2DistFunction(torch.autograd.Function):
device=wp_device, device=wp_device,
stream=wp.stream_from_torch(pos.device), stream=wp.stream_from_torch(pos.device),
) )
# cost = torch.linalg.norm(out_cost_v, dim=-1)
# if pos.requires_grad:
# out_gp = out_gp * torch.nan_to_num( 1.0/cost.unsqueeze(-1), 0.0)
cost = torch.sum(out_cost_v, dim=-1)
cost = torch.sum(out_cost_v, dim=-1)
ctx.save_for_backward(out_gp) ctx.save_for_backward(out_gp)
return cost return cost
@@ -277,7 +273,11 @@ class DistCost(CostBase, DistCostConfig):
self._run_weight_vec[:, :-1] *= self.run_weight self._run_weight_vec[:, :-1] *= self.run_weight
cost = self._run_weight_vec * dist cost = self._run_weight_vec * dist
if RETURN_GOAL_DIST: if RETURN_GOAL_DIST:
return cost, dist / self.weight dist_scale = torch.nan_to_num(
1.0 / torch.sqrt((self.weight * self._run_weight_vec)), 0.0
)
return cost, dist * dist_scale
return cost return cost
def forward_target_idx(self, goal_vec, current_vec, goal_idx, RETURN_GOAL_DIST=False): def forward_target_idx(self, goal_vec, current_vec, goal_idx, RETURN_GOAL_DIST=False):
@@ -292,7 +292,6 @@ class DistCost(CostBase, DistCostConfig):
self._run_weight_vec[:, :-1] *= self.run_weight self._run_weight_vec[:, :-1] *= self.run_weight
else: else:
raise NotImplementedError("terminal flag needs to be set to true") raise NotImplementedError("terminal flag needs to be set to true")
if self.dist_type == DistType.L2: if self.dist_type == DistType.L2:
# print(goal_idx.shape, goal_vec.shape) # print(goal_idx.shape, goal_vec.shape)
cost = L2DistFunction.apply( cost = L2DistFunction.apply(
@@ -306,11 +305,12 @@ class DistCost(CostBase, DistCostConfig):
self._out_cv_buffer, self._out_cv_buffer,
self._out_g_buffer, self._out_g_buffer,
) )
# cost = torch.linalg.norm(cost, dim=-1)
else: else:
raise NotImplementedError() raise NotImplementedError()
# print(cost.shape, cost[:,-1])
if RETURN_GOAL_DIST: if RETURN_GOAL_DIST:
return cost, (cost / torch.sqrt((self.weight * self._run_weight_vec))) dist_scale = torch.nan_to_num(
1.0 / torch.sqrt((self.weight * self._run_weight_vec)), 0.0
)
return cost, cost * dist_scale
return cost return cost

View File

@@ -105,9 +105,9 @@ class PoseCostMetric:
@classmethod @classmethod
def create_grasp_approach_metric( def create_grasp_approach_metric(
cls, cls,
offset_position: float = 0.5, offset_position: float = 0.1,
linear_axis: int = 2, linear_axis: int = 2,
tstep_fraction: float = 0.6, tstep_fraction: float = 0.8,
tensor_args: TensorDeviceType = TensorDeviceType(), tensor_args: TensorDeviceType = TensorDeviceType(),
) -> PoseCostMetric: ) -> PoseCostMetric:
"""Enables moving to a pregrasp and then locked orientation movement to final grasp. """Enables moving to a pregrasp and then locked orientation movement to final grasp.
@@ -203,7 +203,6 @@ class PoseCost(CostBase, PoseCostConfig):
self.offset_waypoint[:3].copy_(offset_rotation) self.offset_waypoint[:3].copy_(offset_rotation)
self.offset_tstep_fraction[:] = offset_tstep_fraction self.offset_tstep_fraction[:] = offset_tstep_fraction
if self._horizon <= 0: if self._horizon <= 0:
print(self.weight)
log_error( log_error(
"Updating offset waypoint is only possible after initializing motion gen" "Updating offset waypoint is only possible after initializing motion gen"
+ " run motion_gen.warmup() before adding offset_waypoint" + " run motion_gen.warmup() before adding offset_waypoint"

View File

@@ -823,7 +823,6 @@ class UsdHelper:
config_file["robot_cfg"]["kinematics"], tensor_args=tensor_args config_file["robot_cfg"]["kinematics"], tensor_args=tensor_args
) )
kin_model = CudaRobotModel(robot_cfg) kin_model = CudaRobotModel(robot_cfg)
m = kin_model.get_robot_link_meshes() m = kin_model.get_robot_link_meshes()
offsets = [x.pose for x in m] offsets = [x.pose for x in m]
robot_mesh_model = WorldConfig(mesh=m) robot_mesh_model = WorldConfig(mesh=m)

View File

@@ -8,3 +8,4 @@
# without an express license agreement from NVIDIA CORPORATION or # without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited. # its affiliates is strictly prohibited.
# #
""" Module contains high-level APIs for robotics applications. """

View File

@@ -8,3 +8,4 @@
# without an express license agreement from NVIDIA CORPORATION or # without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited. # its affiliates is strictly prohibited.
# #
""" Module contains commonly used high-level APIs for motion generation. """

View File

@@ -8,33 +8,116 @@
# without an express license agreement from NVIDIA CORPORATION or # without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited. # its affiliates is strictly prohibited.
# #
""" This modules contains heuristics for scoring trajectories. """
from __future__ import annotations
# Standard Library # Standard Library
from dataclasses import dataclass from dataclasses import dataclass
from typing import Optional from typing import Optional, Tuple, Union
# Third Party # Third Party
import torch import torch
import torch.autograd.profiler as profiler import torch.autograd.profiler as profiler
# CuRobo # CuRobo
from curobo.types.base import TensorDeviceType
from curobo.types.robot import JointState from curobo.types.robot import JointState
from curobo.types.tensor import T_DOF from curobo.types.tensor import T_DOF
from curobo.util.logger import log_error
from curobo.util.torch_utils import get_torch_jit_decorator from curobo.util.torch_utils import get_torch_jit_decorator
from curobo.util.trajectory import calculate_dt from curobo.util.trajectory import calculate_dt
@dataclass @dataclass
class TrajEvaluatorConfig: class TrajEvaluatorConfig:
max_acc: float = 15.0 """Configurable Parameters for Trajectory Evaluator."""
max_jerk: float = 500.0
#: Maximum acceleration for each joint.
max_acc: torch.Tensor
#: Maximum jerk for each joint.
max_jerk: torch.Tensor
#: Minimum allowed time step for trajectory. Trajectories with time step less than this
#: value will be rejected.
min_dt: torch.Tensor
#: Maximum allowed time step for trajectory. Trajectories with time step greater than this
#: value will be rejected.
max_dt: torch.Tensor
#: Weight to scale smoothness cost, total cost = path length + cost_weight * smoothness cost.
cost_weight: float = 0.01 cost_weight: float = 0.01
min_dt: float = 0.001
max_dt: float = 0.1 def __post_init__(self):
"""Checks if values are of correct type and converts them if possible."""
if not isinstance(self.max_acc, torch.Tensor):
log_error(
"max_acc should be a torch.Tensor, this was changed recently, use "
+ "TrajEvaluatorConfig.from_basic() to create TrajEvaluatorConfig object"
)
if not isinstance(self.max_jerk, torch.Tensor):
log_error(
"max_jerk should be a torch.Tensor, this was changed recently, use "
+ "TrajEvaluatorConfig.from_basic() to create TrajEvaluatorConfig object"
)
if not isinstance(self.min_dt, torch.Tensor):
self.min_dt = torch.as_tensor(
self.min_dt, device=self.max_acc.device, dtype=self.max_acc.dtype
)
if not isinstance(self.max_dt, torch.Tensor):
self.max_dt = torch.as_tensor(
self.max_dt, device=self.max_acc.device, dtype=self.max_acc.dtype
)
@staticmethod
def from_basic(
dof: int,
max_acc: float = 15.0,
max_jerk: float = 500.0,
cost_weight: float = 0.01,
min_dt: float = 0.001,
max_dt: float = 0.1,
tensor_args: TensorDeviceType = TensorDeviceType(),
) -> TrajEvaluatorConfig:
"""Creates TrajEvaluatorConfig object from basic parameters.
Args:
dof: number of active joints in the robot.
max_acc: maximum acceleration for all joints. Treats this as same for all joints.
max_jerk: maximum jerk for all joints. Treats this as same for all joints.
cost_weight: weight to scale smoothness cost.
min_dt: minimum allowed time step between waypoints of a trajectory.
max_dt: maximum allowed time step between waypoints of a trajectory.
tensor_args: device and dtype for the tensors.
Returns:
TrajEvaluatorConfig: Configured Parameters for Trajectory Evaluator.
"""
return TrajEvaluatorConfig(
max_acc=torch.full((dof,), max_acc, device=tensor_args.device, dtype=tensor_args.dtype),
max_jerk=torch.full(
(dof,), max_jerk, device=tensor_args.device, dtype=tensor_args.dtype
),
cost_weight=cost_weight,
min_dt=torch.as_tensor(min_dt, device=tensor_args.device, dtype=tensor_args.dtype),
max_dt=torch.as_tensor(max_dt, device=tensor_args.device, dtype=tensor_args.dtype),
)
@get_torch_jit_decorator() @get_torch_jit_decorator()
def compute_path_length(vel, traj_dt, cspace_distance_weight): def compute_path_length(vel, traj_dt, cspace_distance_weight) -> torch.Tensor:
"""JIT compatible function to compute path length.
Args:
vel: joint space velocity tensor of shape (batch, horizon, dof).
traj_dt: dt of trajectory tensor of shape (batch, horizon) or (1, horizon) or (1, 1).
cspace_distance_weight: weight tensor of shape (dof).
Returns:
torch.Tensor: path length tensor of shape (batch).
"""
pl = torch.sum( pl = torch.sum(
torch.sum(torch.abs(vel) * traj_dt.unsqueeze(-1) * cspace_distance_weight, dim=-1), dim=-1 torch.sum(torch.abs(vel) * traj_dt.unsqueeze(-1) * cspace_distance_weight, dim=-1), dim=-1
) )
@@ -42,19 +125,35 @@ def compute_path_length(vel, traj_dt, cspace_distance_weight):
@get_torch_jit_decorator() @get_torch_jit_decorator()
def compute_path_length_cost(vel, cspace_distance_weight): def compute_path_length_cost(vel, cspace_distance_weight) -> torch.Tensor:
"""JIT compatible function to compute path length cost without considering time step.
Args:
vel: joint space velocity tensor of shape (batch, horizon, dof).
cspace_distance_weight: weight tensor of shape (dof).
Returns:
torch.Tensor: path length cost tensor of shape (batch).
"""
pl = torch.sum(torch.sum(torch.abs(vel) * cspace_distance_weight, dim=-1), dim=-1) pl = torch.sum(torch.sum(torch.abs(vel) * cspace_distance_weight, dim=-1), dim=-1)
return pl return pl
@get_torch_jit_decorator() @get_torch_jit_decorator()
def smooth_cost(abs_acc, abs_jerk, opt_dt): def smooth_cost(abs_acc, abs_jerk, opt_dt) -> torch.Tensor:
# acc = torch.max(torch.max(abs_acc, dim=-1)[0], dim=-1)[0] """JIT compatible function to compute smoothness cost.
# jerk = torch.max(torch.max(abs_jerk, dim=-1)[0], dim=-1)[0]
Args:
abs_acc: absolute acceleration tensor of shape (batch, horizon, dof).
abs_jerk: absolute jerk tensor of shape (batch, horizon, dof).
opt_dt: optimal time step tensor of shape (batch).
Returns:
torch.Tensor: smoothness cost tensor of shape (batch).
"""
jerk = torch.mean(torch.max(abs_jerk, dim=-1)[0], dim=-1) jerk = torch.mean(torch.max(abs_jerk, dim=-1)[0], dim=-1)
mean_acc = torch.mean(torch.max(abs_acc, dim=-1)[0], dim=-1) # [0] mean_acc = torch.mean(torch.max(abs_acc, dim=-1)[0], dim=-1) # [0]
a = (jerk * 0.001) + 10.0 * opt_dt + (mean_acc * 0.01) a = (jerk * 0.001) + 10.0 * opt_dt + (mean_acc * 0.01)
# a = (jerk * 0.001) + 50.0 * opt_dt + (mean_acc * 0.01)
return a return a
@@ -65,24 +164,44 @@ def compute_smoothness(
acc: torch.Tensor, acc: torch.Tensor,
jerk: torch.Tensor, jerk: torch.Tensor,
max_vel: torch.Tensor, max_vel: torch.Tensor,
max_acc: float, max_acc: torch.Tensor,
max_jerk: float, max_jerk: torch.Tensor,
traj_dt: torch.Tensor, traj_dt: torch.Tensor,
min_dt: float, min_dt: float,
max_dt: float, max_dt: float,
): ) -> Tuple[torch.Tensor, torch.Tensor]:
"""JIT compatible function to compute smoothness.
Args:
vel: joint space velocity tensor of shape (batch, horizon, dof).
acc: joint space acceleration tensor of shape (batch, horizon, dof).
jerk: joint space jerk tensor of shape (batch, horizon, dof).
max_vel: maximum velocity limits, used to find scaling factor for dt that pushes at least
one joint to its limit, taking into account acceleration and jerk limits.
max_acc: maximum acceleration limits, used to find scaling factor for dt that pushes at least
one joint to its limit, taking into account velocity and jerk limits.
max_jerk: maximum jerk limits, used to find scaling factor for dt that pushes at least
one joint to its limit, taking into account velocity and acceleration limits.
traj_dt: dt of trajectory tensor of shape (batch, horizon) or (1, horizon)
min_dt: minimum delta time allowed between steps/waypoints in a trajectory.
max_dt: maximum delta time allowed between steps/waypoints in a trajectory.
Returns:
Tuple[torch.Tensor, torch.Tensor]: success label tensor of shape (batch) and
smoothness cost tensor of shape (batch)
"""
# compute scaled dt: # compute scaled dt:
# h = int(vel.shape[-2] / 2)
max_v_arr = torch.max(torch.abs(vel), dim=-2)[0] # output is batch, dof max_v_arr = torch.max(torch.abs(vel), dim=-2)[0] # output is batch, dof
scale_dt = (max_v_arr) / (max_vel.view(1, max_v_arr.shape[-1])) # batch,dof scale_dt = (max_v_arr) / (max_vel.view(1, max_v_arr.shape[-1])) # batch,dof
max_acc_arr = torch.max(torch.abs(acc), dim=-2)[0] # batch, dof max_acc_arr = torch.max(torch.abs(acc), dim=-2)[0] # batch, dof
max_acc = max_acc.view(1, max_acc_arr.shape[-1])
scale_dt_acc = torch.sqrt(torch.max(max_acc_arr / max_acc, dim=-1)[0]) # batch, 1 scale_dt_acc = torch.sqrt(torch.max(max_acc_arr / max_acc, dim=-1)[0]) # batch, 1
max_jerk_arr = torch.max(torch.abs(jerk), dim=-2)[0] # batch, dof max_jerk_arr = torch.max(torch.abs(jerk), dim=-2)[0] # batch, dof
max_jerk = max_jerk.view(1, max_jerk_arr.shape[-1])
scale_dt_jerk = torch.pow(torch.max(max_jerk_arr / max_jerk, dim=-1)[0], 1.0 / 3.0) # batch, 1 scale_dt_jerk = torch.pow(torch.max(max_jerk_arr / max_jerk, dim=-1)[0], 1.0 / 3.0) # batch, 1
dt_score_vel = torch.max(scale_dt, dim=-1)[0] # batch, 1 dt_score_vel = torch.max(scale_dt, dim=-1)[0] # batch, 1
@@ -102,25 +221,55 @@ def compute_smoothness(
# mean_jerk_val = torch.max(torch.mean(abs_jerk, dim=-1), dim=-1)[0] # mean_jerk_val = torch.max(torch.mean(abs_jerk, dim=-1), dim=-1)[0]
max_jerk_val = torch.max(torch.max(abs_jerk, dim=-1)[0], dim=-1)[0] max_jerk_val = torch.max(torch.max(abs_jerk, dim=-1)[0], dim=-1)[0]
acc_label = torch.logical_and(max_acc_val <= max_acc, max_jerk_val <= max_jerk) acc_label = torch.logical_and(max_acc_val <= max_acc, max_jerk_val <= max_jerk)
# print(max_acc_val, max_jerk_val, dt_score)
return (acc_label, smooth_cost(abs_acc, abs_jerk, dt_score)) return (acc_label, smooth_cost(abs_acc, abs_jerk, dt_score))
@get_torch_jit_decorator() @get_torch_jit_decorator()
def compute_smoothness_opt_dt( def compute_smoothness_opt_dt(
vel, acc, jerk, max_vel: torch.Tensor, max_acc: float, max_jerk: float, opt_dt: torch.Tensor vel,
): acc,
abs_acc = torch.abs(acc) jerk,
max_acc_val = torch.max(torch.max(abs_acc, dim=-1)[0], dim=-1)[0] max_vel: torch.Tensor,
abs_jerk = torch.abs(jerk) max_acc: torch.Tensor,
max_jerk_val = torch.max(torch.max(abs_jerk, dim=-1)[0], dim=-1)[0] max_jerk: torch.Tensor,
opt_dt: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""JIT compatible function to compute smoothness with pre-computed optimal time step.
acc_label = torch.logical_and(max_acc_val <= max_acc, max_jerk_val <= max_jerk) Args:
vel: joint space velocity tensor of shape (batch, horizon, dof), not used in this
implementation.
acc: joint space acceleration tensor of shape (batch, horizon, dof).
jerk: joint space jerk tensor of shape (batch, horizon, dof).
max_vel: maximum velocity limit, not used in this implementation.
max_acc: maximum acceleration limit, used to reject trajectories.
max_jerk: maximum jerk limit, used to reject trajectories.
opt_dt: optimal time step tensor of shape (batch).
Returns:
Tuple[torch.Tensor, torch.Tensor]: success label tensor of shape (batch) and
smoothness cost tensor of shape (batch).
"""
abs_acc = torch.abs(acc)
abs_jerk = torch.abs(jerk)
delta_acc = torch.min(torch.min(max_acc - abs_acc, dim=-1)[0], dim=-1)[0]
max_jerk = max_jerk.view(1, abs_jerk.shape[-1])
delta_jerk = torch.min(torch.min(max_jerk - abs_jerk, dim=-1)[0], dim=-1)[0]
acc_label = torch.logical_and(delta_acc >= 0.0, delta_jerk >= 0.0)
return acc_label, smooth_cost(abs_acc, abs_jerk, opt_dt) return acc_label, smooth_cost(abs_acc, abs_jerk, opt_dt)
class TrajEvaluator(TrajEvaluatorConfig): class TrajEvaluator(TrajEvaluatorConfig):
"""Trajectory Evaluator class that uses heuristics to score trajectories."""
def __init__(self, config: Optional[TrajEvaluatorConfig] = None): def __init__(self, config: Optional[TrajEvaluatorConfig] = None):
"""Initializes the TrajEvaluator object.
Args:
config: Configurable parameters for Trajectory Evaluator.
"""
if config is None: if config is None:
config = TrajEvaluatorConfig() config = TrajEvaluatorConfig()
super().__init__(**vars(config)) super().__init__(**vars(config))
@@ -128,17 +277,41 @@ class TrajEvaluator(TrajEvaluatorConfig):
def _compute_path_length( def _compute_path_length(
self, js: JointState, traj_dt: torch.Tensor, cspace_distance_weight: T_DOF self, js: JointState, traj_dt: torch.Tensor, cspace_distance_weight: T_DOF
): ):
"""Compute path length from joint velocities across trajectory and dt between them.
Args:
js: joint state object with velocity tensor.
traj_dt: time step tensor of shape (batch, horizon) or (1, horizon) or (1, 1).
cspace_distance_weight: weight tensor of shape (dof).
Returns:
torch.Tensor: path length tensor of shape (batch).
"""
path_length = compute_path_length(js.velocity, traj_dt, cspace_distance_weight) path_length = compute_path_length(js.velocity, traj_dt, cspace_distance_weight)
return path_length return path_length
def _check_smoothness(self, js: JointState, traj_dt: torch.Tensor, max_vel: torch.Tensor): def _check_smoothness(
self, js: JointState, traj_dt: torch.Tensor, max_vel: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Check smoothness of trajectory.
Args:
js: joint state object with velocity, acceleration and jerk tensors.
traj_dt: time step tensor of shape (batch, horizon) or (1, horizon) or (1, 1).
max_vel: maximum velocity limits, used to find scaling factor for dt that pushes at
least one joint to its limit, taking into account acceleration and jerk limits.
Returns:
Tuple[torch.Tensor, torch.Tensor]: success label tensor of shape (batch) and
smoothness cost tensor of shape (batch).
"""
if js.jerk is None: if js.jerk is None:
js.jerk = ( js.jerk = (
(torch.roll(js.acceleration, -1, -2) - js.acceleration) (torch.roll(js.acceleration, -1, -2) - js.acceleration)
* (1 / traj_dt).unsqueeze(-1) * (1 / traj_dt).unsqueeze(-1)
)[..., :-1, :] )[..., :-1, :]
acc_label, max_acc = compute_smoothness( smooth_success_label, smooth_cost = compute_smoothness(
js.velocity, js.velocity,
js.acceleration, js.acceleration,
js.jerk, js.jerk,
@@ -149,7 +322,7 @@ class TrajEvaluator(TrajEvaluatorConfig):
self.min_dt, self.min_dt,
self.max_dt, self.max_dt,
) )
return acc_label, max_acc return smooth_success_label, smooth_cost
@profiler.record_function("traj_evaluate_smoothness") @profiler.record_function("traj_evaluate_smoothness")
def evaluate( def evaluate(
@@ -158,7 +331,22 @@ class TrajEvaluator(TrajEvaluatorConfig):
traj_dt: torch.Tensor, traj_dt: torch.Tensor,
cspace_distance_weight: T_DOF, cspace_distance_weight: T_DOF,
max_vel: torch.Tensor, max_vel: torch.Tensor,
): ) -> Tuple[torch.Tensor, torch.Tensor]:
"""Evaluate trajectory based on smoothness and path length.
Args:
js: joint state object with velocity, acceleration and jerk tensors.
traj_dt: time step tensor of shape (batch, horizon) or (1, horizon) or (1, 1) or
(batch, 1).
cspace_distance_weight: weight tensor of shape (dof).
max_vel: maximum velocity limits, used to find scaling factor for dt that pushes at
least one joint to its limit, taking into account acceleration and jerk limits.
Returns:
Tuple[torch.Tensor, torch.Tensor]: success label tensor of shape (batch) and
total cost tensor of shape (batch) where total cost = (path length +
cost_weight * smoothness cost).
"""
label, cost = self._check_smoothness(js, traj_dt, max_vel) label, cost = self._check_smoothness(js, traj_dt, max_vel)
pl_cost = self._compute_path_length(js, traj_dt, cspace_distance_weight) pl_cost = self._compute_path_length(js, traj_dt, cspace_distance_weight)
return label, pl_cost + self.cost_weight * cost return label, pl_cost + self.cost_weight * cost
@@ -170,7 +358,21 @@ class TrajEvaluator(TrajEvaluatorConfig):
opt_dt: torch.Tensor, opt_dt: torch.Tensor,
cspace_distance_weight: T_DOF, cspace_distance_weight: T_DOF,
max_vel: torch.Tensor, max_vel: torch.Tensor,
): ) -> Tuple[torch.Tensor, torch.Tensor]:
"""Evaluate trajectory based on smoothness and path length with pre-computed optimal dt.
Args:
js: joint state object with velocity, acceleration and jerk tensors.
opt_dt: optimal time step tensor of shape (batch).
cspace_distance_weight: weight tensor of shape (dof).
max_vel: maximum velocity limits, used to find scaling factor for dt that pushes at
least one joint to its limit, taking into account acceleration and jerk limits.
Returns:
Tuple[torch.Tensor, torch.Tensor]: success label tensor of shape (batch) and
total cost tensor of shape (batch) where total cost = (path length +
cost_weight * smoothness cost).
"""
label, cost = compute_smoothness_opt_dt( label, cost = compute_smoothness_opt_dt(
js.velocity, js.acceleration, js.jerk, max_vel, self.max_acc, self.max_jerk, opt_dt js.velocity, js.acceleration, js.jerk, max_vel, self.max_acc, self.max_jerk, opt_dt
) )
@@ -185,7 +387,22 @@ class TrajEvaluator(TrajEvaluatorConfig):
cspace_distance_weight: T_DOF, cspace_distance_weight: T_DOF,
max_vel: torch.Tensor, max_vel: torch.Tensor,
skip_last_tstep: bool = False, skip_last_tstep: bool = False,
): ) -> Tuple[torch.Tensor, torch.Tensor]:
"""Evaluate trajectory by first computing velocity, acceleration and jerk from position.
Args:
js: joint state object with position tensor.
traj_dt: time step tensor of shape (batch, 1) or (1, 1).
cspace_distance_weight: weight tensor of shape (dof).
max_vel: maximum velocity limits, used to find scaling factor for dt that pushes at
least one joint to its limit, taking into account acceleration and jerk limits.
skip_last_tstep: flag to skip last time step in trajectory.
Returns:
Tuple[torch.Tensor, torch.Tensor]: success label tensor of shape (batch) and
total cost tensor of shape (batch) where total cost = (path length +
cost_weight * smoothness cost).
"""
js = js.calculate_fd_from_position(traj_dt) js = js.calculate_fd_from_position(traj_dt)
if skip_last_tstep: if skip_last_tstep:
js.position = js.position[..., :-1, :] js.position = js.position[..., :-1, :]
@@ -194,6 +411,3 @@ class TrajEvaluator(TrajEvaluatorConfig):
js.jerk = js.jerk[..., :-1, :] js.jerk = js.jerk[..., :-1, :]
return self.evaluate(js, traj_dt, cspace_distance_weight, max_vel) return self.evaluate(js, traj_dt, cspace_distance_weight, max_vel)
def calculate_dt(self, js: JointState, max_vel: torch.Tensor, raw_dt: float):
return calculate_dt(js.velocity, max_vel, raw_dt)

File diff suppressed because it is too large Load Diff

View File

@@ -9,6 +9,28 @@
# its affiliates is strictly prohibited. # its affiliates is strictly prohibited.
# #
"""
This module contains :meth:`MotionGen` class that provides a high-level interface for motion
generation. Motion Generation can take goals either as joint configurations
:meth:`MotionGen.plan_single_js` or as Cartesian poses :meth:`MotionGen.plan_single`. When Cartesian
pose is given as target, inverse kinematics is first done to generate seeds for trajectory
optimization. Motion generation fallback to using a graph planner when linear interpolated
trajectory optimization seeds are not successful. Reaching one Cartesian pose in a goalset is also
supported using :meth:`MotionGen.plan_goalset`. Batched planning in the same world environment (
:meth:`MotionGen.plan_batch`) and different world environments (:meth:`MotionGen.plan_batch_env`)
is also provided.
A motion generation request can be configured using :meth:`MotionGenPlanConfig`. The result
of motion generation is returned as a :meth:`MotionGenResult`.
.. raw:: html
<p>
<video autoplay="True" loop="True" muted="True" preload="auto" width="100%"><source src="../videos/ur10_real_timer.mp4" type="video/mp4"></video>
</p>
"""
from __future__ import annotations from __future__ import annotations
@@ -16,6 +38,7 @@ from __future__ import annotations
import math import math
import time import time
from dataclasses import dataclass from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, List, Optional, Union from typing import Any, Dict, List, Optional, Union
# Third Party # Third Party
@@ -129,16 +152,23 @@ class MotionGenConfig:
#: record compute ops as cuda graphs and replay recorded graphs for upto 10x faster execution. #: record compute ops as cuda graphs and replay recorded graphs for upto 10x faster execution.
use_cuda_graph: bool = True use_cuda_graph: bool = True
#: After 100 iterations of trajectory optimization, a new dt is computed that pushes the
#: velocity, acceleration, or jerk limits to the maximum. This new dt is then used with a
#: reduction :attr:`MotionGenPlanConfig.finetune_dt_scale` to run 200+ iterations of trajectory
#: optimization. If trajectory optimization fails with the new dt, the new dt is increased and
#: tried again until :attr:`MotionGenPlanConfig.finetune_attempts`.
optimize_dt: bool = True
@staticmethod @staticmethod
@profiler.record_function("motion_gen_config/load_from_robot_config") @profiler.record_function("motion_gen_config/load_from_robot_config")
def load_from_robot_config( def load_from_robot_config(
robot_cfg: Union[Union[str, Dict], RobotConfig], robot_cfg: Union[Union[str, Dict], RobotConfig],
world_model: Optional[Union[Union[str, Dict], WorldConfig]] = None, world_model: Optional[Union[Union[str, Dict], WorldConfig]] = None,
tensor_args: TensorDeviceType = TensorDeviceType(), tensor_args: TensorDeviceType = TensorDeviceType(),
num_ik_seeds: int = 30, num_ik_seeds: int = 32,
num_graph_seeds: int = 1, num_graph_seeds: int = 1,
num_trajopt_seeds: int = 12, num_trajopt_seeds: int = 4,
num_batch_ik_seeds: int = 30, num_batch_ik_seeds: int = 32,
num_batch_trajopt_seeds: int = 1, num_batch_trajopt_seeds: int = 1,
num_trajopt_noisy_seeds: int = 1, num_trajopt_noisy_seeds: int = 1,
position_threshold: float = 0.005, position_threshold: float = 0.005,
@@ -197,7 +227,7 @@ class MotionGenConfig:
smooth_weight: List[float] = None, smooth_weight: List[float] = None,
finetune_smooth_weight: Optional[List[float]] = None, finetune_smooth_weight: Optional[List[float]] = None,
state_finite_difference_mode: Optional[str] = None, state_finite_difference_mode: Optional[str] = None,
finetune_dt_scale: float = 0.95, finetune_dt_scale: float = 0.9,
maximum_trajectory_time: Optional[float] = None, maximum_trajectory_time: Optional[float] = None,
maximum_trajectory_dt: float = 0.1, maximum_trajectory_dt: float = 0.1,
velocity_scale: Optional[Union[List[float], float]] = None, velocity_scale: Optional[Union[List[float], float]] = None,
@@ -207,87 +237,18 @@ class MotionGenConfig:
project_pose_to_goal_frame: bool = True, project_pose_to_goal_frame: bool = True,
ik_seed: int = 1531, ik_seed: int = 1531,
graph_seed: int = 1531, graph_seed: int = 1531,
high_precision: bool = False,
): ):
"""Helper function to create configuration from robot and world configuration. if position_threshold <= 0.001:
high_precision = True
Args: if high_precision:
robot_cfg: Robot configuration to use for motion generation. finetune_trajopt_iters = (
world_model: World model to use for motion generation. Use None to disable world model. 300 if finetune_trajopt_iters is None else max(300, finetune_trajopt_iters)
tensor_args: Tensor device to use for motion generation. Use to change cuda device id. )
num_ik_seeds: Number of seeds to use in inverse kinematics (IK) optimization. grad_trajopt_iters = 200 if grad_trajopt_iters is None else max(200, grad_trajopt_iters)
num_graph_seeds: Number of graph paths to use as seed in trajectory optimization. position_threshold = min(position_threshold, 0.001)
num_trajopt_seeds: Number of seeds to use in trajectory optimization. rotation_threshold = min(rotation_threshold, 0.025)
num_batch_ik_seeds: Number of seeds to use in batch planning modes for IK. cspace_threshold = min(cspace_threshold, 0.01)
num_batch_trajopt_seeds: Number of seeds to use in batch planning modes for trajopt.
num_trajopt_noisy_seeds: Number of seeds to use for trajopt.
position_threshold: _description_
rotation_threshold: _description_
cspace_threshold: _description_
world_coll_checker: _description_
base_cfg_file: _description_
particle_ik_file: _description_
gradient_ik_file: _description_
graph_file: _description_
particle_trajopt_file: _description_
gradient_trajopt_file: _description_
finetune_trajopt_file: _description_
trajopt_tsteps: _description_
interpolation_steps: _description_
interpolation_dt: _description_
interpolation_type: _description_
use_cuda_graph: _description_
self_collision_check: _description_
self_collision_opt: _description_
grad_trajopt_iters: _description_
trajopt_seed_ratio: _description_
ik_opt_iters: _description_
ik_particle_opt: _description_
collision_checker_type: _description_
sync_cuda_time: _description_
trajopt_particle_opt: _description_
traj_evaluator_config: _description_
traj_evaluator: _description_
minimize_jerk: _description_
filter_robot_command: _description_
use_gradient_descent: _description_
collision_cache: _description_
n_collision_envs: _description_
ee_link_name: _description_
use_es_ik: _description_
use_es_trajopt: _description_
es_ik_learning_rate: _description_
es_trajopt_learning_rate: _description_
use_ik_fixed_samples: _description_
use_trajopt_fixed_samples: _description_
evaluate_interpolated_trajectory: _description_
partial_ik_iters: _description_
fixed_iters_trajopt: _description_
store_ik_debug: _description_
store_trajopt_debug: _description_
graph_trajopt_iters: _description_
collision_max_outside_distance: _description_
collision_activation_distance: _description_
trajopt_dt: _description_
js_trajopt_dt: _description_
js_trajopt_tsteps: _description_
trim_steps: _description_
store_debug_in_result: _description_
finetune_trajopt_iters: _description_
smooth_weight: _description_
finetune_smooth_weight: _description_
state_finite_difference_mode: _description_
finetune_dt_scale: _description_
maximum_trajectory_time: _description_
maximum_trajectory_dt: _description_
velocity_scale: _description_
acceleration_scale: _description_
jerk_scale: _description_
optimize_dt: _description_
Returns:
_description_
"""
init_warp(tensor_args=tensor_args) init_warp(tensor_args=tensor_args)
if js_trajopt_tsteps is not None: if js_trajopt_tsteps is not None:
log_warn("js_trajopt_tsteps is deprecated, use trajopt_tsteps instead.") log_warn("js_trajopt_tsteps is deprecated, use trajopt_tsteps instead.")
@@ -317,14 +278,6 @@ class MotionGenConfig:
): ):
maximum_trajectory_dt = (1.0 / min(velocity_scale)) * maximum_trajectory_dt maximum_trajectory_dt = (1.0 / min(velocity_scale)) * maximum_trajectory_dt
if traj_evaluator_config is None:
if maximum_trajectory_dt is not None:
max_dt = maximum_trajectory_dt
if maximum_trajectory_time is not None:
max_dt = maximum_trajectory_time / trajopt_tsteps
if acceleration_scale is not None:
max_dt = max_dt * (1.0 / np.sqrt(min(acceleration_scale)))
traj_evaluator_config = TrajEvaluatorConfig(min_dt=interpolation_dt, max_dt=max_dt)
if maximum_trajectory_dt is not None: if maximum_trajectory_dt is not None:
if trajopt_dt is None: if trajopt_dt is None:
trajopt_dt = maximum_trajectory_dt trajopt_dt = maximum_trajectory_dt
@@ -385,10 +338,20 @@ class MotionGenConfig:
robot_cfg["kinematics"]["cspace"]["velocity_scale"] = velocity_scale robot_cfg["kinematics"]["cspace"]["velocity_scale"] = velocity_scale
robot_cfg = RobotConfig.from_dict(robot_cfg, tensor_args) robot_cfg = RobotConfig.from_dict(robot_cfg, tensor_args)
traj_evaluator_config.max_acc = (
robot_cfg.kinematics.get_joint_limits().acceleration[1, 0].item() if traj_evaluator_config is None:
if maximum_trajectory_dt is not None:
max_dt = maximum_trajectory_dt
if maximum_trajectory_time is not None:
max_dt = maximum_trajectory_time / trajopt_tsteps
if acceleration_scale is not None:
max_dt = max_dt * (1.0 / np.sqrt(min(acceleration_scale)))
traj_evaluator_config = TrajEvaluatorConfig.from_basic(
min_dt=interpolation_dt, max_dt=max_dt, dof=robot_cfg.kinematics.dof
) )
traj_evaluator_config.max_jerk = robot_cfg.kinematics.get_joint_limits().jerk[1, 0].item() traj_evaluator_config.max_acc = robot_cfg.kinematics.get_joint_limits().acceleration[1]
traj_evaluator_config.max_jerk = robot_cfg.kinematics.get_joint_limits().jerk[1]
if isinstance(world_model, str): if isinstance(world_model, str):
world_model = load_yaml(join_path(get_world_configs_path(), world_model)) world_model = load_yaml(join_path(get_world_configs_path(), world_model))
@@ -439,6 +402,7 @@ class MotionGenConfig:
collision_activation_distance=collision_activation_distance, collision_activation_distance=collision_activation_distance,
project_pose_to_goal_frame=project_pose_to_goal_frame, project_pose_to_goal_frame=project_pose_to_goal_frame,
seed=ik_seed, seed=ik_seed,
high_precision=high_precision,
) )
ik_solver = IKSolver(ik_solver_cfg) ik_solver = IKSolver(ik_solver_cfg)
@@ -612,15 +576,175 @@ class MotionGenConfig:
interpolation_dt=interpolation_dt, interpolation_dt=interpolation_dt,
finetune_dt_scale=finetune_dt_scale, finetune_dt_scale=finetune_dt_scale,
use_cuda_graph=use_cuda_graph, use_cuda_graph=use_cuda_graph,
optimize_dt=optimize_dt,
) )
@dataclass
class MotionGenPlanConfig:
"""Configuration for querying motion generation."""
#: Use graph planner to generate collision-free seed for trajectory optimization.
enable_graph: bool = False
#: Enable trajectory optimization.
enable_opt: bool = True
#: Use neural network IK seed for solving inverse kinematics. Not implemented.
use_nn_ik_seed: bool = False
#: Run trajectory optimization only if graph planner is successful.
need_graph_success: bool = False
#: Maximum number of attempts allowed to solve the motion generation problem.
max_attempts: int = 60
#: Maximum time in seconds allowed to solve the motion generation problem.
timeout: float = 10.0
#: Number of failed attempts at which to fallback to a graph planner for obtaining trajectory
#: seeds.
enable_graph_attempt: Optional[int] = 3
#: Number of failed attempts at which to disable graph planner. This has not been found to be
#: useful.
disable_graph_attempt: Optional[int] = None
#: Number of IK attempts allowed before returning a failure. Set this to a low value (5) to
#: save compute time when an unreachable goal is given.
ik_fail_return: Optional[int] = None
#: Full IK solving takes 10% of the planning time. Setting this to True will run only 50
#: iterations of IK instead of 100 and then run trajecrtory optimization without checking if
#: IK is successful. This can reduce the planning time by 5% but generated solutions can
#: have larger motion time and path length. Leave this to False for most cases.
partial_ik_opt: bool = False
#: Number of seeds to use for solving inverse kinematics. Chanigng this will cause the existing
#: CUDAGraphs to be invalidated. Instead, set num_ik_seeds when creating
#: :meth:`MotionGenConfig`.
num_ik_seeds: Optional[int] = None
#: Number of seeds to use for graph planner. We found 4 to be a good number for most cases. The
#: default value is set when creating :meth:`MotionGenConfig` so leave this to None.
num_graph_seeds: Optional[int] = None
#: Number of seeds to use for trajectory optimization. We found 12 to be a good number for most
#: cases. The default value is set when creating :meth:`MotionGenConfig` so leave this to None.
num_trajopt_seeds: Optional[int] = None
#: Ratio of successful motion generation queries to total queries in batch planning mode. Motion
#: generation is queries upto :attr:`MotionGenPlanConfig.max_attempts` until this ratio is met.
success_ratio: float = 1
#: Return a failure if the query is invalid. Set this to False to debug a query that is invalid.
fail_on_invalid_query: bool = True
#: use start config as regularization for IK instead of
#: :meth:`curobo.types.robot.RobotConfig.kinematics.kinematics_config.retract_config`
use_start_state_as_retract: bool = True
#: Use a custom pose cost metric for trajectory optimization. This is useful for adding
#: additional constraints to motion generation, such as constraining the end-effector's motion
#: to a plane or a line or hold orientation while moving. This is also useful for only reaching
#: partial pose (e.g., only position). See :meth:`curobo.rollout.cost.pose_cost.PoseCostMetric`
#: for more details.
pose_cost_metric: Optional[PoseCostMetric] = None
#: Run finetuning trajectory optimization after running 100 iterations of
#: trajectory optimization. This will provide shorter and smoother trajectories. When
# :attr:`MotionGenConfig.optimize_dt` is True, this flag will also scale the trajectory
#: optimization by a new dt. Leave this to True for most cases. If you are not interested in
#: finding time-optimal solutions and only want to use motion generation as a feasibility check,
#: set this to False. Note that when set to False, the resulting trajectory is only guaranteed
#: to be collision-free and within joint limits. When False, it's not guaranteed to be smooth
#: and might not execute on a real robot.
enable_finetune_trajopt: bool = True
#: Run finetuning trajectory optimization across all trajectory optimization seeds. If this is
#: set to False, then only 1 successful seed per query is selected and finetuned. When False,
#: we have observed that the resulting trajectory is not as optimal as when set to True.
parallel_finetune: bool = True
#: Scale dt by this value before running finetuning trajectory optimization. This enables
#: trajectory optimization to find shorter paths and also account for smoothness over variable
#: length trajectories. This is only used when :attr:`MotionGenConfig.optimize_dt` is True.
finetune_dt_scale: Optional[float] = 0.85
#: Number of attempts to run finetuning trajectory optimization. Every attempt will increase the
#: :attr:`MotionGenPlanConfig.finetune_dt_scale` by
#: :attr:`MotionGenPlanConfig.finetune_dt_decay` as a path couldn't be found with the previous
#: smaller dt.
finetune_attempts: int = 5
#: Decay factor used to increase :attr:`MotionGenPlanConfig.finetune_dt_scale` when optimization
#: fails to find a solution. This is only used when :attr:`MotionGenConfig.optimize_dt` is True.
finetune_dt_decay: float = 1.025
def __post_init__(self):
if not self.enable_opt and not self.enable_graph:
log_error("Graph search and Optimization are both disabled, enable one")
def clone(self) -> MotionGenPlanConfig:
return MotionGenPlanConfig(
enable_graph=self.enable_graph,
enable_opt=self.enable_opt,
use_nn_ik_seed=self.use_nn_ik_seed,
need_graph_success=self.need_graph_success,
max_attempts=self.max_attempts,
timeout=self.timeout,
enable_graph_attempt=self.enable_graph_attempt,
disable_graph_attempt=self.disable_graph_attempt,
ik_fail_return=self.ik_fail_return,
partial_ik_opt=self.partial_ik_opt,
num_ik_seeds=self.num_ik_seeds,
num_graph_seeds=self.num_graph_seeds,
num_trajopt_seeds=self.num_trajopt_seeds,
success_ratio=self.success_ratio,
fail_on_invalid_query=self.fail_on_invalid_query,
enable_finetune_trajopt=self.enable_finetune_trajopt,
parallel_finetune=self.parallel_finetune,
use_start_state_as_retract=self.use_start_state_as_retract,
pose_cost_metric=(
None if self.pose_cost_metric is None else self.pose_cost_metric.clone()
),
finetune_dt_scale=self.finetune_dt_scale,
finetune_attempts=self.finetune_attempts,
)
class MotionGenStatus(Enum):
"""Status of motion generation query."""
#: Inverse kinematics failed to find a solution.
IK_FAIL = "IK Fail"
#: Graph planner failed to find a solution.
GRAPH_FAIL = "Graph Fail"
#: Trajectory optimization failed to find a solution.
TRAJOPT_FAIL = "TrajOpt Fail"
#: Finetune Trajectory optimization failed to find a solution.
FINETUNE_TRAJOPT_FAIL = "Finetune TrajOpt Fail"
#: Invalid query was given. The start state is either out of joint limits, in collision with
#: world, or in self-collision.
INVALID_QUERY = "Invalid Query"
#: Motion generation query was successful.
SUCCESS = "Success"
#: Motion generation was not attempted.
NOT_ATTEMPTED = "Not Attempted"
@dataclass @dataclass
class MotionGenResult: class MotionGenResult:
"""Result obtained from motion generation.""" """Result obtained from motion generation."""
#: success tensor with index referring to the batch index. #: success tensor with index referring to the batch index.
success: Optional[T_BValue_bool] = None success: Optional[torch.Tensor] = None
#: returns true if the start state is not satisfying constraints (e.g., in collision) #: returns true if the start state is not satisfying constraints (e.g., in collision)
valid_query: bool = True valid_query: bool = True
@@ -672,7 +796,7 @@ class MotionGenResult:
debug_info: Any = None debug_info: Any = None
#: status of motion generation query. returns [IK Fail, Graph Fail, TrajOpt Fail]. #: status of motion generation query. returns [IK Fail, Graph Fail, TrajOpt Fail].
status: Optional[str] = None status: Optional[Union[MotionGenStatus, str]] = None
#: number of attempts used before returning a solution. #: number of attempts used before returning a solution.
attempts: int = 1 attempts: int = 1
@@ -818,63 +942,6 @@ class MotionGenResult:
return self.optimized_dt * (self.optimized_plan.position.shape[-2] - 1 - 2 - 1) return self.optimized_dt * (self.optimized_plan.position.shape[-2] - 1 - 2 - 1)
@dataclass
class MotionGenPlanConfig:
enable_graph: bool = False
enable_opt: bool = True
use_nn_ik_seed: bool = False
need_graph_success: bool = False
max_attempts: int = 60
timeout: float = 10.0
enable_graph_attempt: Optional[int] = 3
disable_graph_attempt: Optional[int] = None
ik_fail_return: Optional[int] = None
partial_ik_opt: bool = False
num_ik_seeds: Optional[int] = None
num_graph_seeds: Optional[int] = None
num_trajopt_seeds: Optional[int] = None
success_ratio: float = 1
fail_on_invalid_query: bool = True
#: enables retiming trajectory optimization, useful for getting low jerk trajectories.
enable_finetune_trajopt: bool = True
parallel_finetune: bool = True
#: use start config as regularization
use_start_state_as_retract: bool = True
pose_cost_metric: Optional[PoseCostMetric] = None
def __post_init__(self):
if not self.enable_opt and not self.enable_graph:
log_error("Graph search and Optimization are both disabled, enable one")
def clone(self) -> MotionGenPlanConfig:
return MotionGenPlanConfig(
enable_graph=self.enable_graph,
enable_opt=self.enable_opt,
use_nn_ik_seed=self.use_nn_ik_seed,
need_graph_success=self.need_graph_success,
max_attempts=self.max_attempts,
timeout=self.timeout,
enable_graph_attempt=self.enable_graph_attempt,
disable_graph_attempt=self.disable_graph_attempt,
ik_fail_return=self.ik_fail_return,
partial_ik_opt=self.partial_ik_opt,
num_ik_seeds=self.num_ik_seeds,
num_graph_seeds=self.num_graph_seeds,
num_trajopt_seeds=self.num_trajopt_seeds,
success_ratio=self.success_ratio,
fail_on_invalid_query=self.fail_on_invalid_query,
enable_finetune_trajopt=self.enable_finetune_trajopt,
parallel_finetune=self.parallel_finetune,
use_start_state_as_retract=self.use_start_state_as_retract,
pose_cost_metric=(
None if self.pose_cost_metric is None else self.pose_cost_metric.clone()
),
)
class MotionGen(MotionGenConfig): class MotionGen(MotionGenConfig):
def __init__(self, config: MotionGenConfig): def __init__(self, config: MotionGenConfig):
super().__init__(**vars(config)) super().__init__(**vars(config))
@@ -1092,7 +1159,6 @@ class MotionGen(MotionGenConfig):
link_poses: List[Pose] = None, link_poses: List[Pose] = None,
): ):
start_time = time.time() start_time = time.time()
if plan_config.pose_cost_metric is not None: if plan_config.pose_cost_metric is not None:
valid_query = self.update_pose_cost_metric( valid_query = self.update_pose_cost_metric(
plan_config.pose_cost_metric, start_state, goal_pose plan_config.pose_cost_metric, start_state, goal_pose
@@ -1120,7 +1186,8 @@ class MotionGen(MotionGenConfig):
"trajopt_attempts": 0, "trajopt_attempts": 0,
} }
best_status = 0 # 0== None, 1==IK Fail, 2== Graph Fail, 3==Opt Fail best_status = 0 # 0== None, 1==IK Fail, 2== Graph Fail, 3==Opt Fail
if plan_config.finetune_dt_scale is None:
plan_config.finetune_dt_scale = self.finetune_dt_scale
for n in range(plan_config.max_attempts): for n in range(plan_config.max_attempts):
log_info("MG Iter: " + str(n)) log_info("MG Iter: " + str(n))
result = self._plan_from_solve_state( result = self._plan_from_solve_state(
@@ -1145,16 +1212,22 @@ class MotionGen(MotionGenConfig):
break break
if result.success[0].item(): if result.success[0].item():
break break
if result.status == "Finetune Opt Fail":
plan_config.finetune_dt_scale *= (
plan_config.finetune_dt_decay**plan_config.finetune_attempts
)
plan_config.finetune_dt_scale = min(plan_config.finetune_dt_scale, 1.25)
if plan_config.enable_graph_attempt is not None and ( if plan_config.enable_graph_attempt is not None and (
n >= plan_config.enable_graph_attempt - 1 n >= plan_config.enable_graph_attempt - 1
and result.status == "Opt Fail" and result.status in ["Opt Fail", "Finetune Opt Fail"]
and not plan_config.enable_graph and not plan_config.enable_graph
): ):
plan_config.enable_graph = True plan_config.enable_graph = True
plan_config.partial_ik_opt = False plan_config.partial_ik_opt = False
if plan_config.disable_graph_attempt is not None and ( if plan_config.disable_graph_attempt is not None and (
n >= plan_config.disable_graph_attempt - 1 n >= plan_config.disable_graph_attempt - 1
and result.status in ["Opt Fail", "Graph Fail"] and result.status in ["Opt Fail", "Graph Fail", "Finetune Opt Fail"]
and not force_graph and not force_graph
): ):
plan_config.enable_graph = False plan_config.enable_graph = False
@@ -1192,7 +1265,7 @@ class MotionGen(MotionGenConfig):
plan_config: MotionGenPlanConfig = MotionGenPlanConfig(), plan_config: MotionGenPlanConfig = MotionGenPlanConfig(),
): ):
start_time = time.time() start_time = time.time()
goal_pose = goal_pose.clone()
if plan_config.pose_cost_metric is not None: if plan_config.pose_cost_metric is not None:
valid_query = self.update_pose_cost_metric( valid_query = self.update_pose_cost_metric(
plan_config.pose_cost_metric, start_state, goal_pose plan_config.pose_cost_metric, start_state, goal_pose
@@ -1646,10 +1719,18 @@ class MotionGen(MotionGenConfig):
if plan_config.parallel_finetune: if plan_config.parallel_finetune:
opt_dt = torch.min(opt_dt[traj_result.success]) opt_dt = torch.min(opt_dt[traj_result.success])
seed_override = solve_state.num_trajopt_seeds * self.noisy_trajopt_seeds seed_override = solve_state.num_trajopt_seeds * self.noisy_trajopt_seeds
finetune_time = 0
for k in range(plan_config.finetune_attempts):
newton_iters = None
scaled_dt = torch.clamp( scaled_dt = torch.clamp(
opt_dt * self.finetune_dt_scale, opt_dt
* plan_config.finetune_dt_scale
* (plan_config.finetune_dt_decay ** (k)),
self.trajopt_solver.interpolation_dt, self.trajopt_solver.interpolation_dt,
) )
if self.optimize_dt:
self.finetune_trajopt_solver.update_solver_dt(scaled_dt.item()) self.finetune_trajopt_solver.update_solver_dt(scaled_dt.item())
traj_result = self._solve_trajopt_from_solve_state( traj_result = self._solve_trajopt_from_solve_state(
@@ -1658,7 +1739,15 @@ class MotionGen(MotionGenConfig):
seed_traj, seed_traj,
trajopt_instance=self.finetune_trajopt_solver, trajopt_instance=self.finetune_trajopt_solver,
num_seeds_override=seed_override, num_seeds_override=seed_override,
newton_iters=newton_iters,
) )
finetune_time += traj_result.solve_time
if torch.count_nonzero(traj_result.success) > 0:
break
seed_traj = traj_result.optimized_seeds.detach().clone()
newton_iters = 4
traj_result.solve_time = finetune_time
result.finetune_time = traj_result.solve_time result.finetune_time = traj_result.solve_time
@@ -1667,13 +1756,15 @@ class MotionGen(MotionGenConfig):
result.debug_info["finetune_trajopt_result"] = traj_result result.debug_info["finetune_trajopt_result"] = traj_result
elif plan_config.enable_finetune_trajopt: elif plan_config.enable_finetune_trajopt:
traj_result.success = traj_result.success[0:1] traj_result.success = traj_result.success[0:1]
# if torch.count_nonzero(result.success) == 0:
result.status = "Opt Fail"
result.solve_time += traj_result.solve_time + result.finetune_time result.solve_time += traj_result.solve_time + result.finetune_time
result.trajopt_time = traj_result.solve_time result.trajopt_time = traj_result.solve_time
result.trajopt_attempts = 1 result.trajopt_attempts = 1
result.success = traj_result.success result.success = traj_result.success
if torch.count_nonzero(result.success) == 0: if plan_config.enable_finetune_trajopt and torch.count_nonzero(result.success) == 0:
result.status = "Opt Fail" result.status = "Finetune Opt Fail"
result.interpolated_plan = traj_result.interpolated_solution.trim_trajectory( result.interpolated_plan = traj_result.interpolated_solution.trim_trajectory(
0, traj_result.path_buffer_last_tstep[0] 0, traj_result.path_buffer_last_tstep[0]
@@ -1696,15 +1787,17 @@ class MotionGen(MotionGenConfig):
) -> MotionGenResult: ) -> MotionGenResult:
trajopt_seed_traj = None trajopt_seed_traj = None
trajopt_seed_success = None trajopt_seed_success = None
trajopt_newton_iters = None trajopt_newton_iters = self.js_trajopt_solver.newton_iters
graph_success = 0 graph_success = 0
if len(start_state.shape) == 1: if len(start_state.shape) == 1:
log_error("Joint state should be not a vector (dof) should be (bxdof)") log_error("Joint state should be not a vector (dof) should be (bxdof)")
result = MotionGenResult(cspace_error=torch.zeros((1), device=self.tensor_args.device)) result = MotionGenResult(cspace_error=torch.zeros((1), device=self.tensor_args.device))
if self.store_debug_in_result:
result.debug_info = {}
# do graph search: # do graph search:
if plan_config.enable_graph: if plan_config.enable_graph and False:
start_config = torch.zeros( start_config = torch.zeros(
(solve_state.num_graph_seeds, self.js_trajopt_solver.dof), (solve_state.num_graph_seeds, self.js_trajopt_solver.dof),
device=self.tensor_args.device, device=self.tensor_args.device,
@@ -1782,7 +1875,6 @@ class MotionGen(MotionGenConfig):
# do trajopt: # do trajopt:
if plan_config.enable_opt: if plan_config.enable_opt:
with profiler.record_function("motion_gen/setup_trajopt_seeds"): with profiler.record_function("motion_gen/setup_trajopt_seeds"):
# self._trajopt_goal_config[:, :ik_success] = goal_config
goal = Goal( goal = Goal(
current_state=start_state, current_state=start_state,
@@ -1815,12 +1907,16 @@ class MotionGen(MotionGenConfig):
batch_mode=False, batch_mode=False,
seed_success=trajopt_seed_success, seed_success=trajopt_seed_success,
) )
trajopt_seed_traj = trajopt_seed_traj.view( trajopt_seed_traj = (
trajopt_seed_traj.view(
self.js_trajopt_solver.num_seeds * 1, self.js_trajopt_solver.num_seeds * 1,
1, 1,
self.trajopt_solver.action_horizon, self.trajopt_solver.action_horizon,
self._dof, self._dof,
).contiguous() )
.contiguous()
.clone()
)
if plan_config.enable_finetune_trajopt: if plan_config.enable_finetune_trajopt:
og_value = self.trajopt_solver.interpolation_type og_value = self.trajopt_solver.interpolation_type
self.js_trajopt_solver.interpolation_type = InterpolateType.LINEAR_CUDA self.js_trajopt_solver.interpolation_type = InterpolateType.LINEAR_CUDA
@@ -1830,8 +1926,8 @@ class MotionGen(MotionGenConfig):
goal, goal,
solve_state, solve_state,
trajopt_seed_traj, trajopt_seed_traj,
num_seeds_override=solve_state.num_trajopt_seeds * 1, num_seeds_override=solve_state.num_trajopt_seeds,
newton_iters=trajopt_newton_iters, newton_iters=trajopt_newton_iters + 2,
return_all_solutions=plan_config.enable_finetune_trajopt, return_all_solutions=plan_config.enable_finetune_trajopt,
trajopt_instance=self.js_trajopt_solver, trajopt_instance=self.js_trajopt_solver,
) )
@@ -1853,15 +1949,15 @@ class MotionGen(MotionGenConfig):
torch.max(traj_result.optimized_dt[traj_result.success]), torch.max(traj_result.optimized_dt[traj_result.success]),
self.trajopt_solver.interpolation_dt, self.trajopt_solver.interpolation_dt,
) )
og_dt = self.js_trajopt_solver.solver_dt og_dt = self.js_trajopt_solver.solver_dt.clone()
self.js_trajopt_solver.update_solver_dt(scaled_dt.item()) self.js_trajopt_solver.update_solver_dt(scaled_dt.item())
traj_result = self._solve_trajopt_from_solve_state( traj_result = self._solve_trajopt_from_solve_state(
goal, goal,
solve_state, solve_state,
seed_traj, seed_traj,
trajopt_instance=self.js_trajopt_solver, trajopt_instance=self.js_trajopt_solver,
num_seeds_override=solve_state.num_trajopt_seeds * self.noisy_trajopt_seeds, num_seeds_override=solve_state.num_trajopt_seeds,
newton_iters=trajopt_newton_iters + 4,
) )
self.js_trajopt_solver.update_solver_dt(og_dt) self.js_trajopt_solver.update_solver_dt(og_dt)
@@ -2281,6 +2377,7 @@ class MotionGen(MotionGenConfig):
joint_names=self.rollout_fn.joint_names, joint_names=self.rollout_fn.joint_names,
).repeat_seeds(batch) ).repeat_seeds(batch)
state = self.rollout_fn.compute_kinematics(start_state) state = self.rollout_fn.compute_kinematics(start_state)
link_poses = state.link_pose
if n_goalset == -1: if n_goalset == -1:
retract_pose = Pose(state.ee_pos_seq, quaternion=state.ee_quat_seq) retract_pose = Pose(state.ee_pos_seq, quaternion=state.ee_quat_seq)
@@ -2355,6 +2452,7 @@ class MotionGen(MotionGenConfig):
"graph_time": 0, "graph_time": 0,
"trajopt_time": 0, "trajopt_time": 0,
"trajopt_attempts": 0, "trajopt_attempts": 0,
"finetune_time": 0,
} }
result = None result = None
goal = Goal(goal_state=goal_state, current_state=start_state) goal = Goal(goal_state=goal_state, current_state=start_state)
@@ -2367,23 +2465,36 @@ class MotionGen(MotionGenConfig):
n_envs=1, n_envs=1,
n_goalset=1, n_goalset=1,
) )
force_graph = plan_config.enable_graph
for n in range(plan_config.max_attempts): for n in range(plan_config.max_attempts):
traj_result = self._plan_js_from_solve_state( result = self._plan_js_from_solve_state(
solve_state, start_state, goal_state, plan_config=plan_config solve_state, start_state, goal_state, plan_config=plan_config
) )
time_dict["trajopt_time"] += traj_result.solve_time time_dict["trajopt_time"] += result.solve_time
time_dict["graph_time"] += result.graph_time
time_dict["finetune_time"] += result.finetune_time
time_dict["trajopt_attempts"] = n time_dict["trajopt_attempts"] = n
if plan_config.enable_graph_attempt is not None and (
n >= plan_config.enable_graph_attempt - 1 and not plan_config.enable_graph
):
plan_config.enable_graph = True
if plan_config.disable_graph_attempt is not None and (
n >= plan_config.disable_graph_attempt - 1 and not force_graph
):
plan_config.enable_graph = False
if result is None: if result.success.item():
result = traj_result break
if not result.valid_query:
if traj_result.success.item():
break break
result.solve_time = time_dict["trajopt_time"] result.graph_time = time_dict["graph_time"]
result.finetune_time = time_dict["finetune_time"]
result.trajopt_time = time_dict["trajopt_time"]
result.solve_time = result.trajopt_time + result.graph_time + result.finetune_time
result.total_time = result.solve_time result.total_time = result.solve_time
if self.store_debug_in_result: result.attempts = n
result.debug_info = {"trajopt_result": traj_result}
return result return result

View File

@@ -31,17 +31,14 @@ from curobo.opt.particle.parallel_es import ParallelES, ParallelESConfig
from curobo.opt.particle.parallel_mppi import ParallelMPPI, ParallelMPPIConfig from curobo.opt.particle.parallel_mppi import ParallelMPPI, ParallelMPPIConfig
from curobo.rollout.arm_reacher import ArmReacher, ArmReacherConfig from curobo.rollout.arm_reacher import ArmReacher, ArmReacherConfig
from curobo.rollout.cost.pose_cost import PoseCostMetric from curobo.rollout.cost.pose_cost import PoseCostMetric
from curobo.rollout.dynamics_model.integration_utils import ( from curobo.rollout.dynamics_model.integration_utils import interpolate_kernel
action_interpolate_kernel,
interpolate_kernel,
)
from curobo.rollout.rollout_base import Goal, RolloutBase, RolloutMetrics from curobo.rollout.rollout_base import Goal, RolloutBase, RolloutMetrics
from curobo.types.base import TensorDeviceType from curobo.types.base import TensorDeviceType
from curobo.types.robot import JointState, RobotConfig from curobo.types.robot import JointState, RobotConfig
from curobo.types.tensor import T_BDOF, T_DOF, T_BValue_bool, T_BValue_float from curobo.types.tensor import T_BDOF, T_DOF, T_BValue_bool, T_BValue_float
from curobo.util.helpers import list_idx_if_not_none from curobo.util.helpers import list_idx_if_not_none
from curobo.util.logger import log_error, log_info, log_warn from curobo.util.logger import log_error, log_info, log_warn
from curobo.util.torch_utils import get_torch_jit_decorator, is_torch_compile_available from curobo.util.torch_utils import get_torch_jit_decorator
from curobo.util.trajectory import ( from curobo.util.trajectory import (
InterpolateType, InterpolateType,
calculate_dt_no_clamp, calculate_dt_no_clamp,
@@ -69,7 +66,7 @@ class TrajOptSolverConfig:
num_seeds: int = 1 num_seeds: int = 1
bias_node: Optional[T_DOF] = None bias_node: Optional[T_DOF] = None
interpolation_dt: float = 0.01 interpolation_dt: float = 0.01
traj_evaluator_config: TrajEvaluatorConfig = TrajEvaluatorConfig() traj_evaluator_config: Optional[TrajEvaluatorConfig] = None
traj_evaluator: Optional[TrajEvaluator] = None traj_evaluator: Optional[TrajEvaluator] = None
evaluate_interpolated_trajectory: bool = True evaluate_interpolated_trajectory: bool = True
cspace_threshold: float = 0.1 cspace_threshold: float = 0.1
@@ -109,7 +106,7 @@ class TrajOptSolverConfig:
seed_ratio: Dict[str, int] = {"linear": 1.0, "bias": 0.0, "start": 0.0, "end": 0.0}, seed_ratio: Dict[str, int] = {"linear": 1.0, "bias": 0.0, "start": 0.0, "end": 0.0},
use_particle_opt: bool = True, use_particle_opt: bool = True,
collision_checker_type: Optional[CollisionCheckerType] = CollisionCheckerType.MESH, collision_checker_type: Optional[CollisionCheckerType] = CollisionCheckerType.MESH,
traj_evaluator_config: TrajEvaluatorConfig = TrajEvaluatorConfig(), traj_evaluator_config: Optional[TrajEvaluatorConfig] = None,
traj_evaluator: Optional[TrajEvaluator] = None, traj_evaluator: Optional[TrajEvaluator] = None,
minimize_jerk: bool = True, minimize_jerk: bool = True,
use_gradient_descent: bool = False, use_gradient_descent: bool = False,
@@ -172,7 +169,6 @@ class TrajOptSolverConfig:
config_data["cost"]["pose_cfg"]["project_distance"] = project_pose_to_goal_frame config_data["cost"]["pose_cfg"]["project_distance"] = project_pose_to_goal_frame
grad_config_data["cost"]["pose_cfg"]["project_distance"] = project_pose_to_goal_frame grad_config_data["cost"]["pose_cfg"]["project_distance"] = project_pose_to_goal_frame
config_data["model"]["horizon"] = traj_tsteps config_data["model"]["horizon"] = traj_tsteps
grad_config_data["model"]["horizon"] = traj_tsteps grad_config_data["model"]["horizon"] = traj_tsteps
if minimize_jerk is not None: if minimize_jerk is not None:
@@ -325,6 +321,12 @@ class TrajOptSolverConfig:
sync_cuda_time=sync_cuda_time, sync_cuda_time=sync_cuda_time,
) )
trajopt = WrapBase(cfg) trajopt = WrapBase(cfg)
if traj_evaluator_config is None:
traj_evaluator_config = TrajEvaluatorConfig.from_basic(
min_dt=interpolation_dt,
dof=robot_cfg.kinematics.dof,
tensor_args=tensor_args,
)
trajopt_cfg = TrajOptSolverConfig( trajopt_cfg = TrajOptSolverConfig(
robot_config=robot_cfg, robot_config=robot_cfg,
rollout_fn=aux_rollout, rollout_fn=aux_rollout,
@@ -375,6 +377,7 @@ class TrajResult(Sequence):
raw_solution: Optional[JointState] = None raw_solution: Optional[JointState] = None
raw_action: Optional[torch.Tensor] = None raw_action: Optional[torch.Tensor] = None
goalset_index: Optional[torch.Tensor] = None goalset_index: Optional[torch.Tensor] = None
optimized_seeds: Optional[torch.Tensor] = None
def __getitem__(self, idx): def __getitem__(self, idx):
# position_error = rotation_error = cspace_error = path_buffer_last_tstep = None # position_error = rotation_error = cspace_error = path_buffer_last_tstep = None
@@ -405,6 +408,7 @@ class TrajResult(Sequence):
rotation_error=idx_vals[4], rotation_error=idx_vals[4],
cspace_error=idx_vals[5], cspace_error=idx_vals[5],
goalset_index=idx_vals[6], goalset_index=idx_vals[6],
optimized_seeds=self.optimized_seeds,
) )
def __len__(self): def __len__(self):
@@ -918,6 +922,7 @@ class TrajOptSolver(TrajOptSolverConfig):
raw_solution=result.action, raw_solution=result.action,
raw_action=result.raw_action, raw_action=result.raw_action,
goalset_index=result.metrics.goalset_index, goalset_index=result.metrics.goalset_index,
optimized_seeds=result.raw_action,
) )
else: else:
# get path length: # get path length:
@@ -938,8 +943,6 @@ class TrajOptSolver(TrajOptSolverConfig):
) )
with profiler.record_function("trajopt/best_select"): with profiler.record_function("trajopt/best_select"):
if True: # not get_torch_jit_decorator() == torch.jit.script:
# This only works if torch compile is available:
( (
idx, idx,
position_error, position_error,
@@ -972,45 +975,6 @@ class TrajOptSolver(TrajOptSolverConfig):
best_raw_action = result.raw_action[idx] best_raw_action = result.raw_action[idx]
interpolated_traj = interpolated_trajs[idx] interpolated_traj = interpolated_trajs[idx]
else:
success[~smooth_label] = False
# get the best solution:
if result.metrics.pose_error is not None:
convergence_error = result.metrics.pose_error[..., -1]
elif result.metrics.cspace_error is not None:
convergence_error = result.metrics.cspace_error[..., -1]
else:
raise ValueError(
"convergence check requires either goal_pose or goal_state"
)
running_cost = torch.mean(result.metrics.cost, dim=-1) * 0.0001
error = convergence_error + smooth_cost + running_cost
error[~success] += 10000.0
if batch_mode:
idx = torch.argmin(error.view(goal.batch, num_seeds), dim=-1)
idx = idx + num_seeds * self._col
last_tstep = [last_tstep[i] for i in idx]
success = success[idx]
else:
idx = torch.argmin(error, dim=0)
last_tstep = [last_tstep[idx.item()]]
success = success[idx : idx + 1]
best_act_seq = result.action[idx]
best_raw_action = result.raw_action[idx]
interpolated_traj = interpolated_trajs[idx]
goalset_index = position_error = rotation_error = cspace_error = None
if result.metrics.position_error is not None:
position_error = result.metrics.position_error[idx, -1]
if result.metrics.rotation_error is not None:
rotation_error = result.metrics.rotation_error[idx, -1]
if result.metrics.cspace_error is not None:
cspace_error = result.metrics.cspace_error[idx, -1]
if result.metrics.goalset_index is not None:
goalset_index = result.metrics.goalset_index[idx, -1]
opt_dt = opt_dt[idx]
if self.sync_cuda_time: if self.sync_cuda_time:
torch.cuda.synchronize() torch.cuda.synchronize()
if len(best_act_seq.shape) == 3: if len(best_act_seq.shape) == 3:
@@ -1046,6 +1010,7 @@ class TrajOptSolver(TrajOptSolverConfig):
raw_solution=best_act_seq, raw_solution=best_act_seq,
raw_action=best_raw_action, raw_action=best_raw_action,
goalset_index=goalset_index, goalset_index=goalset_index,
optimized_seeds=result.raw_action,
) )
return traj_result return traj_result
@@ -1307,6 +1272,10 @@ class TrajOptSolver(TrajOptSolverConfig):
if isinstance(rollout, ArmReacher) if isinstance(rollout, ArmReacher)
] ]
@property
def newton_iters(self):
return self._og_newton_iters
@get_torch_jit_decorator() @get_torch_jit_decorator()
def jit_feasible_success( def jit_feasible_success(

View File

@@ -8,22 +8,25 @@
# without an express license agreement from NVIDIA CORPORATION or # without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited. # its affiliates is strictly prohibited.
# #
""" Module contains custom types and dataclasses used across reacher solvers. """
from __future__ import annotations from __future__ import annotations
# Standard Library # Standard Library
from dataclasses import dataclass from dataclasses import dataclass
from enum import Enum from enum import Enum
from typing import Dict, List, Optional, Union from typing import Dict, List, Optional, Tuple, Union
# CuRobo # CuRobo
from curobo.rollout.rollout_base import Goal from curobo.rollout.rollout_base import Goal
from curobo.types.base import TensorDeviceType from curobo.types.base import TensorDeviceType
from curobo.types.math import Pose from curobo.types.math import Pose
from curobo.types.robot import JointState from curobo.types.robot import JointState
from curobo.types.tensor import T_BDOF
class ReacherSolveType(Enum): class ReacherSolveType(Enum):
# TODO: how to differentiate between goal pose and goal config? """Enum for different types of problems solved with reacher solvers."""
SINGLE = 0 SINGLE = 0
GOALSET = 1 GOALSET = 1
BATCH = 2 BATCH = 2
@@ -34,20 +37,46 @@ class ReacherSolveType(Enum):
@dataclass @dataclass
class ReacherSolveState: class ReacherSolveState:
"""Dataclass for storing the current problem type of a reacher solver."""
#: Type of problem solved by the reacher solver.
solve_type: ReacherSolveType solve_type: ReacherSolveType
#: Number of problems in the batch.
batch_size: int batch_size: int
#: Number of environments in the batch.
n_envs: int n_envs: int
#: Number of goals per problem. Only valid for goalset problems.
n_goalset: int = 1 n_goalset: int = 1
#: Flag to indicate if the problems use different world environments in the batch.
batch_env: bool = False batch_env: bool = False
#: Flag to indicate if the problems use different retract configurations in the batch.
batch_retract: bool = False batch_retract: bool = False
#: Flag to indicate if there is more than 1 problem to be solved.
batch_mode: bool = False batch_mode: bool = False
#: Number of seeds for each problem.
num_seeds: Optional[int] = None num_seeds: Optional[int] = None
#: Number of seeds for inverse kinematics problems.
num_ik_seeds: Optional[int] = None num_ik_seeds: Optional[int] = None
#: Number of seeds for graph search problems.
num_graph_seeds: Optional[int] = None num_graph_seeds: Optional[int] = None
#: Number of seeds for trajectory optimization problems.
num_trajopt_seeds: Optional[int] = None num_trajopt_seeds: Optional[int] = None
#: Number of seeds for model predictive control problems.
num_mpc_seeds: Optional[int] = None num_mpc_seeds: Optional[int] = None
def __post_init__(self): def __post_init__(self):
"""Post init method to set default flags based on input values."""
if self.n_envs == 1: if self.n_envs == 1:
self.batch_env = False self.batch_env = False
else: else:
@@ -63,7 +92,8 @@ class ReacherSolveState:
if self.num_seeds is None: if self.num_seeds is None:
self.num_seeds = self.num_mpc_seeds self.num_seeds = self.num_mpc_seeds
def clone(self): def clone(self) -> ReacherSolveState:
"""Method to create a deep copy of the current reacher solve state."""
return ReacherSolveState( return ReacherSolveState(
solve_type=self.solve_type, solve_type=self.solve_type,
n_envs=self.n_envs, n_envs=self.n_envs,
@@ -79,10 +109,12 @@ class ReacherSolveState:
num_mpc_seeds=self.num_mpc_seeds, num_mpc_seeds=self.num_mpc_seeds,
) )
def get_batch_size(self): def get_batch_size(self) -> int:
"""Method to get total number of optimization problems in the batch including seeds."""
return self.num_seeds * self.batch_size return self.num_seeds * self.batch_size
def get_ik_batch_size(self): def get_ik_batch_size(self) -> int:
"""Method to get total number of IK problems in the batch including seeds."""
return self.num_ik_seeds * self.batch_size return self.num_ik_seeds * self.batch_size
def create_goal_buffer( def create_goal_buffer(
@@ -92,8 +124,24 @@ class ReacherSolveState:
retract_config: Optional[T_BDOF] = None, retract_config: Optional[T_BDOF] = None,
link_poses: Optional[Dict[str, Pose]] = None, link_poses: Optional[Dict[str, Pose]] = None,
tensor_args: TensorDeviceType = TensorDeviceType(), tensor_args: TensorDeviceType = TensorDeviceType(),
): ) -> Goal:
# TODO: Refactor to account for num_ik_seeds or num_trajopt_seeds """Method to create a goal buffer from goal pose and other problem targets.
Args:
goal_pose: Pose to reach with the end effector.
goal_state: Joint configuration to reach. If None, the goal is to reach the pose.
retract_config: Joint configuration to use for L2 regularization. If None,
`retract_config` from robot configuration file is used. An alternative value is to
use the start state as the retract configuration.
link_poses: Dictionary of link poses to reach. This is only required for multi-link
pose reaching, where the goal is to reach multiple poses with different links.
tensor_args: Device and floating precision.
Returns:
Goal buffer with the goal pose, goal state, retract state, and link poses.
"""
# NOTE: Refactor to account for num_ik_seeds or num_trajopt_seeds
batch_retract = True batch_retract = True
if retract_config is None or retract_config.shape[0] != goal_pose.batch: if retract_config is None or retract_config.shape[0] != goal_pose.batch:
batch_retract = False batch_retract = False
@@ -122,7 +170,28 @@ class ReacherSolveState:
current_solve_state: Optional[ReacherSolveState] = None, current_solve_state: Optional[ReacherSolveState] = None,
current_goal_buffer: Optional[Goal] = None, current_goal_buffer: Optional[Goal] = None,
tensor_args: TensorDeviceType = TensorDeviceType(), tensor_args: TensorDeviceType = TensorDeviceType(),
): ) -> Tuple[ReacherSolveState, Goal, bool]:
"""Method to update the goal buffer with new goal pose and other problem targets.
Args:
goal_pose: Pose to reach with the end effector.
goal_state: Joint configuration to reach. If None, the goal is to reach the pose.
retract_config: Joint configuration to use for L2 regularization. If None,
`retract_config` from robot configuration file is used. An alternative value is to
use the start state as the retract configuration.
link_poses: Dictionary of link poses to reach. This is only required for multi-link
pose reaching, where the goal is to reach multiple poses with different links. To
use this,
:var:`curobo.cuda_robot_model.cuda_robot_model.CudaRobotModelConfig.link_names`
should have the link names to reach.
current_solve_state: Current reacher solve state.
current_goal_buffer: Current goal buffer.
tensor_args: Device and floating precision.
Returns:
Tuple of updated reacher solve state, goal buffer, and a flag indicating if the goal
buffer reference has changed which is useful to break existing CUDA Graphs.
"""
solve_state = self solve_state = self
# create goal buffer by comparing to existing solve type # create goal buffer by comparing to existing solve type
update_reference = False update_reference = False
@@ -167,7 +236,19 @@ class ReacherSolveState:
current_solve_state: Optional[ReacherSolveState] = None, current_solve_state: Optional[ReacherSolveState] = None,
current_goal_buffer: Optional[Goal] = None, current_goal_buffer: Optional[Goal] = None,
tensor_args: TensorDeviceType = TensorDeviceType(), tensor_args: TensorDeviceType = TensorDeviceType(),
): ) -> Tuple[ReacherSolveState, Goal, bool]:
"""Method to update the goal buffer with values from new Rollout goal.
Args:
goal: Rollout goal to update the goal buffer.
current_solve_state: Current reacher solve state.
current_goal_buffer: Current goal buffer.
tensor_args: Device and floating precision.
Returns:
Tuple of updated reacher solve state, goal buffer, and a flag indicating if the goal
buffer reference has changed which is useful to break existing CUDA Graphs.
"""
solve_state = self solve_state = self
update_reference = False update_reference = False
if ( if (
@@ -204,6 +285,8 @@ class ReacherSolveState:
@dataclass @dataclass
class MotionGenSolverState: class MotionGenSolverState:
"""Dataclass for storing the current state of a motion generation solver."""
solve_type: ReacherSolveType solve_type: ReacherSolveType
ik_solve_state: ReacherSolveState ik_solve_state: ReacherSolveState
trajopt_solve_state: ReacherSolveState trajopt_solve_state: ReacherSolveState
@@ -215,6 +298,22 @@ def get_padded_goalset(
current_goal_buffer: Goal, current_goal_buffer: Goal,
new_goal_pose: Pose, new_goal_pose: Pose,
) -> Union[Pose, None]: ) -> Union[Pose, None]:
"""Method to pad number of goals in goalset to match the cached goal buffer.
This allows for creating a goalset problem with large number of goals during the first call,
and subsequent calls can have fewer goals. This function will pad the new goalset with the
first goal to match the cached goal buffer's shape.
Args:
solve_state: New problem's solve state.
current_solve_state: Current solve state.
current_goal_buffer: Current goal buffer.
new_goal_pose: Padded goal pose to match the cached goal buffer's shape.
Returns:
Padded goal pose to match the cached goal buffer's shape. If the new goal can't be padded,
returns None.
"""
if ( if (
current_solve_state.solve_type == ReacherSolveType.GOALSET current_solve_state.solve_type == ReacherSolveType.GOALSET
and solve_state.solve_type == ReacherSolveType.SINGLE and solve_state.solve_type == ReacherSolveType.SINGLE

15
tests/conftest.py Normal file
View File

@@ -0,0 +1,15 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
# Standard Library
import os
os.environ["CUROBO_TORCH_COMPILE_DISABLE"] = str(1)

View File

@@ -80,28 +80,25 @@ def test_motion_gen_lock_js_update():
kin_state = motion_gen_instance.compute_kinematics(start_state) kin_state = motion_gen_instance.compute_kinematics(start_state)
ee_pose = kin_state.ee_pose.clone() ee_pose = kin_state.ee_pose.clone()
# test motion gen: # test motion gen:
plan_start = start_state.clone() plan_start = start_state.clone()
plan_start.position[..., :-2] += 0.1 plan_start.position[..., :-2] += 0.1
result = motion_gen_instance.plan_single(plan_start, ee_pose) result = motion_gen_instance.plan_single(plan_start, ee_pose.clone())
assert result.success.item() assert result.success.item()
lock_js = {"base_x": 1.0, "base_y": 0.0, "base_z": 0.0} lock_js = {"base_x": 2.0, "base_y": 0.0, "base_z": 0.0}
motion_gen_instance.update_locked_joints(lock_js, robot_config) motion_gen_instance.update_locked_joints(lock_js, robot_config)
kin_state_new = motion_gen_instance.compute_kinematics(start_state) kin_state_new = motion_gen_instance.compute_kinematics(start_state)
ee_pose_shift = kin_state_new.ee_pose.clone() ee_pose_shift = kin_state_new.ee_pose.clone()
assert torch.norm(ee_pose.position[..., 0] - ee_pose_shift.position[..., 0]).item() == 1.0 assert 2 - torch.norm(ee_pose.position[..., 0] - ee_pose_shift.position[..., 0]).item() <= 1e-5
assert torch.norm(ee_pose.position[..., 1:] - ee_pose_shift.position[..., 1:]).item() == 0.0 assert torch.norm(ee_pose.position[..., 1:] - ee_pose_shift.position[..., 1:]).item() == 0.0
# test motion gen with new lock state: # test motion gen with new lock state:
result = motion_gen_instance.plan_single(plan_start, ee_pose_shift.clone())
result = motion_gen_instance.plan_single(plan_start, ee_pose_shift)
assert result.success.item() assert result.success.item()
result = motion_gen_instance.plan_single( result = motion_gen_instance.plan_single(
plan_start, ee_pose, MotionGenPlanConfig(max_attempts=3) plan_start, ee_pose.clone(), MotionGenPlanConfig(max_attempts=3)
) )
assert result.success.item() == False assert result.success.item() == False

View File

@@ -109,32 +109,35 @@ def test_batch_goalset_padded(motion_gen_batch):
# run goalset planning # run goalset planning
motion_gen.reset() motion_gen.reset()
retract_cfg = motion_gen.get_retract_config() retract_cfg = motion_gen.get_retract_config().clone()
state = motion_gen.compute_kinematics(JointState.from_position(retract_cfg.view(1, -1))) state = motion_gen.compute_kinematics(JointState.from_position(retract_cfg.view(1, -1)))
goal_pose = Pose( goal_pose = Pose(
state.ee_pos_seq.repeat(3 * 3, 1).view(3, -1, 3).contiguous(), state.ee_pos_seq.repeat(3 * 3, 1).view(3, -1, 3).contiguous(),
quaternion=state.ee_quat_seq.repeat(3 * 3, 1).view(3, -1, 4).contiguous(), quaternion=state.ee_quat_seq.repeat(3 * 3, 1).view(3, -1, 4).contiguous(),
) ).clone()
goal_pose.position[0, 1, 1] = 0.2 goal_pose.position[0, 1, 1] = 0.2
goal_pose.position[1, 0, 1] = 0.2 goal_pose.position[1, 0, 1] = 0.2
goal_pose.position[2, 1, 1] = 0.2 goal_pose.position[2, 1, 1] = 0.2
retract_cfg = motion_gen.get_retract_config().clone()
start_state = JointState.from_position(retract_cfg.view(1, -1) + 0.2).repeat_seeds(3) start_state = JointState.from_position(retract_cfg.view(1, -1) + 0.2).repeat_seeds(3)
m_config = MotionGenPlanConfig(False, True, max_attempts=10, enable_graph_attempt=20) m_config = MotionGenPlanConfig(enable_graph_attempt=100, max_attempts=2)
result = motion_gen.plan_batch_goalset(start_state, goal_pose, m_config.clone()) result = motion_gen.plan_batch_goalset(start_state, goal_pose.clone(), m_config.clone())
# get final solutions: # get final solutions:
assert torch.count_nonzero(result.success) == result.success.shape[0] assert torch.count_nonzero(result.success) == result.success.shape[0]
reached_state = motion_gen.compute_kinematics(result.optimized_plan.trim_trajectory(-1)) reached_state = motion_gen.compute_kinematics(
result.optimized_plan.trim_trajectory(-1).squeeze(1)
)
# #
goal_position = torch.cat( goal_position = torch.cat(
[ [
goal_pose.position[x, result.goalset_index[x], :].unsqueeze(0) goal_pose.position[x, result.goalset_index[x], :].clone().unsqueeze(0)
for x in range(len(result.goalset_index)) for x in range(len(result.goalset_index))
] ]
) )
@@ -161,7 +164,7 @@ def test_batch_goalset_padded(motion_gen_batch):
start_state = JointState.from_position(retract_cfg.view(1, -1) + 0.2).repeat_seeds(3) start_state = JointState.from_position(retract_cfg.view(1, -1) + 0.2).repeat_seeds(3)
result = motion_gen.plan_batch_goalset(start_state, goal_pose, m_config) result = motion_gen.plan_batch_goalset(start_state, goal_pose.clone(), m_config)
# get final solutions: # get final solutions:
assert torch.count_nonzero(result.success) == result.success.shape[0] assert torch.count_nonzero(result.success) == result.success.shape[0]
@@ -192,7 +195,7 @@ def test_batch_goalset_padded(motion_gen_batch):
goal_pose.position[1, 0] -= 0.1 goal_pose.position[1, 0] -= 0.1
result = motion_gen.plan_batch(start_state, goal_pose.clone(), m_config) result = motion_gen.plan_batch(start_state, goal_pose, m_config)
assert torch.count_nonzero(result.success) == 3 assert torch.count_nonzero(result.success) == 3
# get final solutions: # get final solutions:

View File

@@ -0,0 +1,44 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#
# Third Party
import torch
# CuRobo
from curobo.types.robot import JointState
from curobo.wrap.reacher.motion_gen import MotionGen, MotionGenConfig, MotionGenPlanConfig
def test_motion_gen_plan_js():
world_file = "collision_table.yml"
robot_file = "ur5e.yml"
motion_gen_config = MotionGenConfig.load_from_robot_config(
robot_file,
world_file,
trajopt_tsteps=32,
use_cuda_graph=False,
num_trajopt_seeds=4,
fixed_iters_trajopt=True,
evaluate_interpolated_trajectory=True,
)
motion_gen = MotionGen(motion_gen_config)
motion_gen.warmup(warmup_js_trajopt=True)
retract_cfg = motion_gen.get_retract_config()
start_state = JointState.from_position(retract_cfg.view(1, -1).clone())
goal_state = JointState.from_position(retract_cfg.view(1, -1).clone())
goal_state.position[:] = torch.as_tensor(
[1.000, -2.2000, 1.9000, -1.3830, -1.5700, 0.0000], device=motion_gen.tensor_args.device
)
result = motion_gen.plan_single_js(start_state, goal_state, MotionGenPlanConfig(max_attempts=1))
assert result.success.item()

View File

@@ -42,11 +42,12 @@ def test_multi_pose_franka(b_size: int):
world_cfg, world_cfg,
rotation_threshold=0.05, rotation_threshold=0.05,
position_threshold=0.005, position_threshold=0.005,
num_seeds=30, num_seeds=16,
self_collision_check=True, self_collision_check=True,
self_collision_opt=True, self_collision_opt=True,
use_cuda_graph=True, use_cuda_graph=True,
tensor_args=tensor_args, tensor_args=tensor_args,
regularization=False,
) )
ik_solver = IKSolver(ik_config) ik_solver = IKSolver(ik_config)
@@ -60,3 +61,41 @@ def test_multi_pose_franka(b_size: int):
assert ( assert (
torch.count_nonzero(success).item() / b_size >= 0.9 torch.count_nonzero(success).item() / b_size >= 0.9
) # we check if atleast 90% are successful ) # we check if atleast 90% are successful
@pytest.mark.parametrize(
"b_size",
[1, 10, 100],
)
def test_multi_pose_hand(b_size: int):
tensor_args = TensorDeviceType()
world_file = "collision_table.yml"
robot_file = "iiwa_allegro.yml"
robot_data = load_yaml(join_path(get_robot_configs_path(), robot_file))["robot_cfg"]
robot_cfg = RobotConfig.from_dict(robot_data)
world_cfg = WorldConfig.from_dict(load_yaml(join_path(get_world_configs_path(), world_file)))
ik_config = IKSolverConfig.load_from_robot_config(
robot_cfg,
world_cfg,
rotation_threshold=0.05,
position_threshold=0.005,
num_seeds=16,
use_cuda_graph=True,
tensor_args=tensor_args,
regularization=False,
)
ik_solver = IKSolver(ik_config)
q_sample = ik_solver.sample_configs(b_size)
kin_state = ik_solver.fk(q_sample)
link_poses = kin_state.link_pose
goal = Pose(kin_state.ee_position, kin_state.ee_quaternion).clone()
result = ik_solver.solve_batch(goal, link_poses=link_poses)
success = result.success
assert (
torch.count_nonzero(success).item() / b_size >= 0.9
) # we check if atleast 90% are successful