add isaac sim 2023 support and dockerfiles

This commit is contained in:
Balakumar Sundaralingam
2023-11-04 09:32:30 -07:00
parent f2eb5f937a
commit 102c5d6ab2
41 changed files with 1284 additions and 622 deletions

View File

@@ -10,27 +10,5 @@ its affiliates is strictly prohibited.
-->
# Docker Instructions
## Running docker from NGC (Recommended)
1. `sh build_user_docker.sh $UID`
2. `sh start_docker_x86.sh` will start the docker
## Building your own docker image with CuRobo
1. Add default nvidia runtime to enable cuda compilation during docker build:
```
Edit/create the /etc/docker/daemon.json with content:
{
"runtimes": {
"nvidia": {
"path": "/usr/bin/nvidia-container-runtime",
"runtimeArgs": []
}
},
"default-runtime": "nvidia" # ADD this line (the above lines will already exist in your json file)
}
```
2. `sh pull_repos.sh`
3. `bash build_dev_docker.sh`
4. Change the docker image name in `user.dockerfile`
5. `sh build_user_docker.sh`
6. `sh start_docker_x86.sh` will start the docker
Check [Docker Development](https://curobo.org/source/getting_started/5_docker_development.html) for
instructions.

167
docker/aarch64.dockerfile Normal file
View File

@@ -0,0 +1,167 @@
##
## Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
##
## NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
## property and proprietary rights in and to this material, related
## documentation and any modifications thereto. Any use, reproduction,
## disclosure or distribution of this material and related documentation
## without an express license agreement from NVIDIA CORPORATION or
## its affiliates is strictly prohibited.
##
FROM nvcr.io/nvidia/l4t-pytorch:r35.1.0-pth1.13-py3 AS l4t_pytorch
# Install ros components:
RUN apt-get update &&\
apt-get install -y sudo git bash unattended-upgrades glmark2 &&\
rm -rf /var/lib/apt/lists/*
# Deal with getting tons of debconf messages
# See: https://github.com/phusion/baseimage-docker/issues/58
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
# TODO: Don't hardcode timezone setting to Los_Angeles, pull from host computer
# Set timezone info
RUN apt-get update && apt-get install -y \
tzdata \
&& rm -rf /var/lib/apt/lists/* \
&& ln -fs /usr/share/zoneinfo/America/Los_Angeles /etc/localtime \
&& echo "America/Los_Angeles" > /etc/timezone \
&& dpkg-reconfigure -f noninteractive tzdata
# Install apt-get packages necessary for building, downloading, etc
# NOTE: Dockerfile best practices recommends having apt-get update
# and install commands in one line to avoid apt-get caching issues.
# https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#run
RUN apt-get update && apt-get install -y \
curl \
lsb-core \
software-properties-common \
wget \
&& rm -rf /var/lib/apt/lists/*
RUN add-apt-repository -y ppa:git-core/ppa
RUN apt-get update && apt-get install -y \
build-essential \
cmake \
git \
git-lfs \
iputils-ping \
make \
openssh-server \
openssh-client \
libeigen3-dev \
libssl-dev \
python3-pip \
python3-ipdb \
python3-tk \
python3-wstool \
sudo git bash unattended-upgrades \
apt-utils \
terminator \
&& rm -rf /var/lib/apt/lists/*
ARG ROS_PKG=ros_base # desktop does not work
ENV ROS_DISTRO=noetic
ENV ROS_ROOT=/opt/ros/${ROS_DISTRO}
ENV ROS_PYTHON_VERSION=3
ENV DEBIAN_FRONTEND=noninteractive
WORKDIR /workspace
#
# add the ROS deb repo to the apt sources list
#
RUN apt-get update && \
apt-get install -y --no-install-recommends \
git \
cmake \
build-essential \
curl \
wget \
gnupg2 \
lsb-release \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
RUN sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list'
RUN curl -s https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc | apt-key add -
#
# install bootstrap dependencies
#
RUN apt-get update && \
apt-get install -y --no-install-recommends \
libpython3-dev \
python3-rosdep \
python3-rosinstall-generator \
python3-vcstool \
build-essential && \
rosdep init && \
rosdep update && \
rm -rf /var/lib/apt/lists/*
#
# download/build the ROS source
#
RUN mkdir ros_catkin_ws && \
cd ros_catkin_ws && \
rosinstall_generator ${ROS_PKG} vision_msgs --rosdistro ${ROS_DISTRO} --deps --tar > ${ROS_DISTRO}-${ROS_PKG}.rosinstall && \
mkdir src && \
vcs import --input ${ROS_DISTRO}-${ROS_PKG}.rosinstall ./src && \
apt-get update && \
rosdep install --from-paths ./src --ignore-packages-from-source --rosdistro ${ROS_DISTRO} --skip-keys python3-pykdl -y && \
python3 ./src/catkin/bin/catkin_make_isolated --install --install-space ${ROS_ROOT} -DCMAKE_BUILD_TYPE=Release && \
rm -rf /var/lib/apt/lists/*
RUN pip3 install trimesh \
numpy-quaternion \
networkx \
pyyaml \
rospkg \
rosdep \
empy
# warp from https://github.com/NVIDIA/warp needs to be compiled locally and then
# placed in curobo/docker/pkgs.
# Run the following from your terminal:
# cd curobo/docker && mkdir pkgs && cd pkgs && git clone https://github.com/NVIDIA/warp.git
# cd warp && python build_libs.py
#
# copy pkgs directory:
COPY pkgs /pkgs
# install warp:
#
RUN cd /pkgs/warp && python3 build_lib.py && pip3 install .
# install curobo:
RUN cd /pkgs && git clone https://github.com/NVlabs/curobo.git
RUN cd /pkgs/curobo && pip3 install . --no-build-isolation
# Optionally install nvblox:
RUN apt-get update && \
apt-get install -y libgoogle-glog-dev libgtest-dev curl libsqlite3-dev && \
cd /usr/src/googletest && cmake . && cmake --build . --target install && \
rm -rf /var/lib/apt/lists/*
RUN cd /pkgs && git clone https://github.com/valtsblukis/nvblox.git && \
cd nvblox && cd nvblox && mkdir build && cd build && \
cmake .. -DPRE_CXX11_ABI_LINKABLE=ON && \
make -j32 && \
make install
RUN cd /pkgs && git clone https://github.com/nvlabs/nvblox_torch.git && \
cd nvblox_torch && \
sh install.sh

View File

@@ -12,21 +12,35 @@
# This script will create a dev docker. Run this script by calling `bash build_dev_docker.sh`
# Make sure you have pulled all required repos into pkgs folder (see pull_repos.sh script)
# If you want to build a isaac sim docker, run this script with `bash build_dev_docker.sh isaac`
# Check architecture to build:
arch=`uname -m`
if [ ${arch} == "x86_64" ]; then
image_tag="x86"
isaac_sim_version=""
if [ $1 == "isaac_sim_2022.2.1" ]; then
echo "Building Isaac Sim docker"
dockerfile="isaac_sim.dockerfile"
image_tag="isaac_sim_2022.2.1"
isaac_sim_version="2022.2.1"
elif [ $1 == "isaac_sim_2023.1.0" ]; then
echo "Building Isaac Sim headless docker"
dockerfile="isaac_sim.dockerfile"
image_tag="isaac_sim_2023.1.0"
isaac_sim_version="2023.1.0"
elif [ ${arch} == "x86" ]; then
echo "Building for X86 Architecture"
dockerfile="x86.dockerfile"
image_tag="x86"
elif [ ${arch} = "aarch64" ]; then
echo "Building for ARM Architecture"
dockerfile="arm64.dockerfile"
dockerfile="aarch64.dockerfile"
image_tag="aarch64"
else
echo "Unknown Architecture, defaulting to " + ${arch}
dockerfile="x86.dockerfile"
echo "Unknown Architecture"
exit
fi
# build docker file:
@@ -43,4 +57,5 @@ fi
# }
#
echo "${dockerfile}"
docker build -t curobo_docker:latest -f ${dockerfile} .
docker build --build-arg ISAAC_SIM_VERSION=${isaac_sim_version} -t curobo_docker:${image_tag} -f ${dockerfile} .

View File

@@ -8,4 +8,8 @@
## without an express license agreement from NVIDIA CORPORATION or
## its affiliates is strictly prohibited.
##
docker build --build-arg USERNAME=$USER --no-cache --build-arg USER_ID=$1 --tag curobo_user_docker:latest -f user.dockerfile .
echo $1
echo $2
docker build --build-arg USERNAME=$USER --no-cache --build-arg USER_ID=$1 --build-arg IMAGE_TAG=$2 -f user.dockerfile --tag curobo_docker:user_$2 .

247
docker/isaac_sim.dockerfile Normal file
View File

@@ -0,0 +1,247 @@
##
## Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
##
## NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
## property and proprietary rights in and to this material, related
## documentation and any modifications thereto. Any use, reproduction,
## disclosure or distribution of this material and related documentation
## without an express license agreement from NVIDIA CORPORATION or
## its affiliates is strictly prohibited.
##
ARG DEBIAN_FRONTEND=noninteractive
ARG BASE_DIST=ubuntu20.04
ARG CUDA_VERSION=11.4.2
ARG ISAAC_SIM_VERSION=2022.2.1
FROM nvcr.io/nvidia/isaac-sim:${ISAAC_SIM_VERSION} AS isaac-sim
FROM nvcr.io/nvidia/cudagl:${CUDA_VERSION}-devel-${BASE_DIST}
# this does not work for 2022.2.1
#$FROM nvcr.io/nvidia/cuda:${CUDA_VERSION}-cudnn8-devel-${BASE_DIST}
LABEL maintainer "User Name"
ARG VULKAN_SDK_VERSION=1.3.224.1
# Deal with getting tons of debconf messages
# See: https://github.com/phusion/baseimage-docker/issues/58
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
# add GL if using a cuda docker instead of cudagl:
#RUN apt-get update && apt-get install -y --no-install-recommends \
# pkg-config \
# libglvnd-dev \
# libgl1-mesa-dev \
# libegl1-mesa-dev \
# libgles2-mesa-dev && \
# rm -rf /var/lib/apt/lists/*
# Set timezone info
RUN apt-get update && apt-get install -y \
tzdata \
software-properties-common \
&& rm -rf /var/lib/apt/lists/* \
&& ln -fs /usr/share/zoneinfo/America/Los_Angeles /etc/localtime \
&& echo "America/Los_Angeles" > /etc/timezone \
&& dpkg-reconfigure -f noninteractive tzdata \
&& add-apt-repository -y ppa:git-core/ppa \
&& apt-get update && apt-get install -y \
curl \
lsb-core \
wget \
build-essential \
cmake \
git \
git-lfs \
iputils-ping \
make \
openssh-server \
openssh-client \
libeigen3-dev \
libssl-dev \
python3-pip \
python3-ipdb \
python3-tk \
python3-wstool \
sudo git bash unattended-upgrades \
apt-utils \
terminator \
&& rm -rf /var/lib/apt/lists/*
# https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cudagl
RUN apt-get update && apt-get install -y --no-install-recommends \
libatomic1 \
libegl1 \
libglu1-mesa \
libgomp1 \
libsm6 \
libxi6 \
libxrandr2 \
libxt6 \
libfreetype-dev \
libfontconfig1 \
openssl \
libssl1.1 \
wget \
vulkan-utils \
&& apt-get -y autoremove \
&& apt-get clean autoclean \
&& rm -rf /var/lib/apt/lists/*
# Download the Vulkan SDK and extract the headers, loaders, layers and binary utilities
RUN wget -q --show-progress \
--progress=bar:force:noscroll \
https://sdk.lunarg.com/sdk/download/${VULKAN_SDK_VERSION}/linux/vulkansdk-linux-x86_64-${VULKAN_SDK_VERSION}.tar.gz \
-O /tmp/vulkansdk-linux-x86_64-${VULKAN_SDK_VERSION}.tar.gz \
&& echo "Installing Vulkan SDK ${VULKAN_SDK_VERSION}" \
&& mkdir -p /opt/vulkan \
&& tar -xf /tmp/vulkansdk-linux-x86_64-${VULKAN_SDK_VERSION}.tar.gz -C /opt/vulkan \
&& mkdir -p /usr/local/include/ && cp -ra /opt/vulkan/${VULKAN_SDK_VERSION}/x86_64/include/* /usr/local/include/ \
&& mkdir -p /usr/local/lib && cp -ra /opt/vulkan/${VULKAN_SDK_VERSION}/x86_64/lib/* /usr/local/lib/ \
&& cp -a /opt/vulkan/${VULKAN_SDK_VERSION}/x86_64/lib/libVkLayer_*.so /usr/local/lib \
&& mkdir -p /usr/local/share/vulkan/explicit_layer.d \
&& cp /opt/vulkan/${VULKAN_SDK_VERSION}/x86_64/etc/vulkan/explicit_layer.d/VkLayer_*.json /usr/local/share/vulkan/explicit_layer.d \
&& mkdir -p /usr/local/share/vulkan/registry \
&& cp -a /opt/vulkan/${VULKAN_SDK_VERSION}/x86_64/share/vulkan/registry/* /usr/local/share/vulkan/registry \
&& cp -a /opt/vulkan/${VULKAN_SDK_VERSION}/x86_64/bin/* /usr/local/bin \
&& ldconfig \
&& rm /tmp/vulkansdk-linux-x86_64-${VULKAN_SDK_VERSION}.tar.gz && rm -rf /opt/vulkan
# Setup the required capabilities for the container runtime
ENV NVIDIA_VISIBLE_DEVICES=all NVIDIA_DRIVER_CAPABILITIES=all
# Open ports for live streaming
EXPOSE 47995-48012/udp \
47995-48012/tcp \
49000-49007/udp \
49000-49007/tcp \
49100/tcp \
8011/tcp \
8012/tcp \
8211/tcp \
8899/tcp \
8891/tcp
ENV OMNI_SERVER http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/${ISAAC_SIM_VERSION}
# ENV OMNI_SERVER omniverse://localhost/NVIDIA/Assets/Isaac/2022.1
# ENV OMNI_USER admin
# ENV OMNI_PASS admin
ENV MIN_DRIVER_VERSION 525.60.11
# Copy Isaac Sim files
COPY --from=isaac-sim /isaac-sim /isaac-sim
RUN mkdir -p /root/.nvidia-omniverse/config
COPY --from=isaac-sim /root/.nvidia-omniverse/config /root/.nvidia-omniverse/config
COPY --from=isaac-sim /etc/vulkan/icd.d/nvidia_icd.json /etc/vulkan/icd.d/nvidia_icd.json
COPY --from=isaac-sim /etc/vulkan/icd.d/nvidia_icd.json /etc/vulkan/implicit_layer.d/nvidia_layers.json
WORKDIR /isaac-sim
ENV TORCH_CUDA_ARCH_LIST="7.0+PTX"
# create an alias for omniverse python
ENV omni_python='/isaac-sim/python.sh'
RUN echo "alias omni_python='/isaac-sim/python.sh'" >> /.bashrc
RUN $omni_python -m pip install "robometrics[evaluator] @ git+https://github.com/fishbotics/robometrics.git"
# if you want to use a different version of curobo, create folder as docker/pkgs and put your
# version of curobo there. Then uncomment below line and comment the next line that clones from
# github
# COPY pkgs /pkgs
RUN mkdir /pkgs && cd /pkgs && git clone https://github.com/NVlabs/curobo.git
RUN $omni_python -m pip install ninja wheel tomli
RUN cd /pkgs/curobo && $omni_python -m pip install .[dev, isaac_sim] --no-build-isolation
# Optionally install nvblox:
RUN apt-get update && \
apt-get install -y curl tcl && \
rm -rf /var/lib/apt/lists/*
# install gflags and glog statically, instructions from: https://github.com/nvidia-isaac/nvblox/blob/public/docs/redistributable.md
RUN cd /pkgs && git clone https://github.com/sqlite/sqlite.git -b version-3.39.4 && \
cd /pkgs/sqlite && CFLAGS=-fPIC ./configure --prefix=/pkgs/sqlite/install/ && \
make && make install
RUN cd /pkgs && git clone https://github.com/google/glog.git -b v0.4.0 && \
cd glog && \
mkdir build && cd build && \
cmake .. -DCMAKE_POSITION_INDEPENDENT_CODE=ON \
-DCMAKE_INSTALL_PREFIX=/pkgs/glog/install/ \
-DWITH_GFLAGS=OFF -DBUILD_SHARED_LIBS=OFF \
&& make -j8 && make install
RUN cd /pkgs && git clone https://github.com/gflags/gflags.git -b v2.2.2 && \
cd gflags && \
mkdir build && cd build && \
cmake .. -DCMAKE_POSITION_INDEPENDENT_CODE=ON \
-DCMAKE_INSTALL_PREFIX=/pkgs/gflags/install/ \
-DGFLAGS_BUILD_STATIC_LIBS=ON -DGFLAGS=google \
&& make -j8 && make install
RUN cd /pkgs && git clone https://github.com/google/googletest.git -b v1.14.0 && \
cd googletest && mkdir build && cd build && cmake .. && make -j8 && make install
RUN cd /pkgs && git clone https://github.com/valtsblukis/nvblox.git
RUN cd /pkgs/nvblox/nvblox && mkdir build && cd build && \
cmake .. -DPRE_CXX11_ABI_LINKABLE=ON -DBUILD_REDISTRIBUTABLE=ON -DSQLITE3_BASE_PATH="/pkgs/sqlite/install/" -DGLOG_BASE_PATH="/pkgs/glog/install/" -DGFLAGS_BASE_PATH="/pkgs/gflags/install/" && \
make -j32 && \
make install
# install newer cmake and glog for pytorch:
# TODO: use libgoogle from source that was compiled instead.
RUN apt-get update && \
apt-get install -y libgoogle-glog-dev && \
rm -rf /var/lib/apt/lists/*
RUN cd /pkgs && wget https://cmake.org/files/v3.19/cmake-3.19.5.tar.gz && \
tar -xvzf cmake-3.19.5.tar.gz && \
apt update && apt install -y build-essential checkinstall zlib1g-dev libssl-dev && \
cd cmake-3.19.5 && ./bootstrap && \
make -j8 && \
make install && rm -rf /var/lib/apt/lists/*
ENV cudnn_version=8.2.4.15
ENV cuda_version=cuda11.4
RUN apt update && apt-get install -y libcudnn8=${cudnn_version}-1+${cuda_version} libcudnn8-dev=${cudnn_version}-1+${cuda_version} && \
rm -rf /var/lib/apt/lists/*
RUN cd /pkgs && git clone https://github.com/nvlabs/nvblox_torch.git && \
cd /pkgs/nvblox_torch && \
sh install_isaac_sim.sh $($omni_python -c 'import torch.utils; print(torch.utils.cmake_prefix_path)') && \
$omni_python -m pip install -e .
# install realsense for nvblox demos:
RUN $omni_python -m pip install pyrealsense2 opencv-python transforms3d

View File

@@ -0,0 +1,49 @@
#!/bin/bash
##
## Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
##
## NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
## property and proprietary rights in and to this material, related
## documentation and any modifications thereto. Any use, reproduction,
## disclosure or distribution of this material and related documentation
## without an express license agreement from NVIDIA CORPORATION or
## its affiliates is strictly prohibited.
##
if [ $1 == "x86" ]; then
docker run --rm -it \
--privileged \
-e NVIDIA_DISABLE_REQUIRE=1 \
-e NVIDIA_DRIVER_CAPABILITIES=all --device /dev/dri \
--mount type=bind,src=/home/$USER/code,target=/home/$USER/code \
--hostname ros1-docker \
--add-host ros1-docker:127.0.0.1 \
--gpus all \
--network host \
--env DISPLAY=unix$DISPLAY \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--volume /dev:/dev \
curobo_docker:user_$1
elif [ $1 == "aarch64" ]; then
docker run --rm -it \
--runtime nvidia \
--hostname ros1-docker \
--add-host ros1-docker:127.0.0.1 \
--network host \
--gpus all \
--env ROS_HOSTNAME=localhost \
--env DISPLAY=$DISPLAY \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--volume /dev/input:/dev/input \
--mount type=bind,src=/home/$USER/code,target=/home/$USER/code \
curobo_docker:user_$1
elif [[ $1 == *isaac_sim* ]] ; then
echo "Isaac Sim Dev Docker is not supported"
else
echo "Unknown docker"
fi

62
docker/start_docker.sh Normal file
View File

@@ -0,0 +1,62 @@
#!/bin/bash
##
## Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
##
## NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
## property and proprietary rights in and to this material, related
## documentation and any modifications thereto. Any use, reproduction,
## disclosure or distribution of this material and related documentation
## without an express license agreement from NVIDIA CORPORATION or
## its affiliates is strictly prohibited.
##
if [ $1 == "x86" ]; then
docker run --rm -it \
--privileged \
-e NVIDIA_DISABLE_REQUIRE=1 \
-e NVIDIA_DRIVER_CAPABILITIES=all --device /dev/dri \
--hostname ros1-docker \
--add-host ros1-docker:127.0.0.1 \
--gpus all \
--network host \
--env DISPLAY=unix$DISPLAY \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--volume /dev:/dev \
curobo_docker:$1
elif [ $1 == "aarch64" ]; then
docker run --rm -it \
--runtime nvidia \
--hostname ros1-docker \
--add-host ros1-docker:127.0.0.1 \
--network host \
--gpus all \
--env ROS_HOSTNAME=localhost \
--env DISPLAY=$DISPLAY \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--volume /dev/input:/dev/input \
curobo_docker:$1
elif [[ $1 == *isaac_sim* ]] ; then
docker run --name container_$1 --entrypoint bash -it --gpus all -e "ACCEPT_EULA=Y" --rm --network=host \
--privileged \
-e "PRIVACY_CONSENT=Y" \
-v $HOME/.Xauthority:/root/.Xauthority \
-e DISPLAY \
-v ~/docker/isaac-sim/cache/kit:/isaac-sim/kit/cache:rw \
-v ~/docker/isaac-sim/cache/ov:/root/.cache/ov:rw \
-v ~/docker/isaac-sim/cache/pip:/root/.cache/pip:rw \
-v ~/docker/isaac-sim/cache/glcache:/root/.cache/nvidia/GLCache:rw \
-v ~/docker/isaac-sim/cache/computecache:/root/.nv/ComputeCache:rw \
-v ~/docker/isaac-sim/logs:/root/.nvidia-omniverse/logs:rw \
-v ~/docker/isaac-sim/data:/root/.local/share/ov/data:rw \
-v ~/docker/isaac-sim/documents:/root/Documents:rw \
--volume /dev:/dev \
curobo_docker:$1
else
echo "Unknown docker"
fi

View File

@@ -0,0 +1,22 @@
##
## Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
##
## NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
## property and proprietary rights in and to this material, related
## documentation and any modifications thereto. Any use, reproduction,
## disclosure or distribution of this material and related documentation
## without an express license agreement from NVIDIA CORPORATION or
## its affiliates is strictly prohibited.
##
docker run --rm -it \
--runtime nvidia \
--mount type=bind,src=/home/$USER/code,target=/home/$USER/code \
--hostname ros1-docker \
--add-host ros1-docker:127.0.0.1 \
--network host \
--gpus all \
--env ROS_HOSTNAME=localhost \
--env DISPLAY=$DISPLAY \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--volume /dev/input:/dev/input \
curobo_docker:aarch64

View File

@@ -0,0 +1,27 @@
##
## Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
##
## NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
## property and proprietary rights in and to this material, related
## documentation and any modifications thereto. Any use, reproduction,
## disclosure or distribution of this material and related documentation
## without an express license agreement from NVIDIA CORPORATION or
## its affiliates is strictly prohibited.
##
docker run --name container_$1 --entrypoint bash -it --gpus all -e "ACCEPT_EULA=Y" --rm --network=host \
--privileged \
-e "PRIVACY_CONSENT=Y" \
-v $HOME/.Xauthority:/root/.Xauthority \
-e DISPLAY \
-v ~/docker/isaac-sim/cache/kit:/isaac-sim/kit/cache:rw \
-v ~/docker/isaac-sim/cache/ov:/root/.cache/ov:rw \
-v ~/docker/isaac-sim/cache/pip:/root/.cache/pip:rw \
-v ~/docker/isaac-sim/cache/glcache:/root/.cache/nvidia/GLCache:rw \
-v ~/docker/isaac-sim/cache/computecache:/root/.nv/ComputeCache:rw \
-v ~/docker/isaac-sim/logs:/root/.nvidia-omniverse/logs:rw \
-v ~/docker/isaac-sim/data:/root/.local/share/ov/data:rw \
-v ~/docker/isaac-sim/documents:/root/Documents:rw \
--volume /dev:/dev \
curobo_docker:$1

View File

@@ -0,0 +1,22 @@
##
## Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
##
## NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
## property and proprietary rights in and to this material, related
## documentation and any modifications thereto. Any use, reproduction,
## disclosure or distribution of this material and related documentation
## without an express license agreement from NVIDIA CORPORATION or
## its affiliates is strictly prohibited.
##
docker run --name container_$1 --entrypoint bash -it --gpus all -e "ACCEPT_EULA=Y" --rm --network=host \
-e "PRIVACY_CONSENT=Y" \
-v ~/docker/isaac-sim/cache/kit:/isaac-sim/kit/cache:rw \
-v ~/docker/isaac-sim/cache/ov:/root/.cache/ov:rw \
-v ~/docker/isaac-sim/cache/pip:/root/.cache/pip:rw \
-v ~/docker/isaac-sim/cache/glcache:/root/.cache/nvidia/GLCache:rw \
-v ~/docker/isaac-sim/cache/computecache:/root/.nv/ComputeCache:rw \
-v ~/docker/isaac-sim/logs:/root/.nvidia-omniverse/logs:rw \
-v ~/docker/isaac-sim/data:/root/.local/share/ov/data:rw \
-v ~/docker/isaac-sim/documents:/root/Documents:rw \
curobo_docker:$1

View File

@@ -8,8 +8,9 @@
## without an express license agreement from NVIDIA CORPORATION or
## its affiliates is strictly prohibited.
##
docker run --rm -it \
--privileged --mount type=bind,src=/home/$USER/code,target=/home/$USER/code \
--privileged \
-e NVIDIA_DISABLE_REQUIRE=1 \
-e NVIDIA_DRIVER_CAPABILITIES=all --device /dev/dri \
--hostname ros1-docker \
@@ -19,4 +20,4 @@ docker run --rm -it \
--env DISPLAY=unix$DISPLAY \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--volume /dev:/dev \
curobo_user_docker:latest
curobo_docker:x86

View File

@@ -10,7 +10,8 @@
##
# Check architecture and load:
FROM curobo_docker:latest
ARG IMAGE_TAG
FROM curobo_docker:${IMAGE_TAG}
# Set variables
ARG USERNAME
ARG USER_ID
@@ -24,11 +25,13 @@ RUN useradd -l -u $USER_ID -g users $USERNAME
RUN /sbin/adduser $USERNAME sudo
RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
# Set user
USER $USERNAME
WORKDIR /home/$USERNAME
ENV USER=$USERNAME
ENV PATH="${PATH}:/home/${USER}/.local/bin"
RUN echo 'completed'

View File

@@ -12,6 +12,11 @@ FROM nvcr.io/nvidia/pytorch:23.08-py3 AS torch_cuda_base
LABEL maintainer "User Name"
# Deal with getting tons of debconf messages
# See: https://github.com/phusion/baseimage-docker/issues/58
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
# add GL:
RUN apt-get update && apt-get install -y --no-install-recommends \
pkg-config \
@@ -24,16 +29,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES graphics,utility,compute
RUN apt-get update &&\
apt-get install -y sudo git bash unattended-upgrades glmark2 &&\
rm -rf /var/lib/apt/lists/*
# Deal with getting tons of debconf messages
# See: https://github.com/phusion/baseimage-docker/issues/58
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
# Set timezone info
RUN apt-get update && apt-get install -y \
tzdata \
@@ -42,9 +37,8 @@ RUN apt-get update && apt-get install -y \
&& ln -fs /usr/share/zoneinfo/America/Los_Angeles /etc/localtime \
&& echo "America/Los_Angeles" > /etc/timezone \
&& dpkg-reconfigure -f noninteractive tzdata \
&& add-apt-repository -y ppa:git-core/ppa
RUN apt-get update && apt-get install -y \
&& add-apt-repository -y ppa:git-core/ppa \
&& apt-get update && apt-get install -y \
curl \
lsb-core \
wget \
@@ -65,6 +59,7 @@ RUN apt-get update && apt-get install -y \
sudo git bash unattended-upgrades \
apt-utils \
terminator \
glmark2 \
&& rm -rf /var/lib/apt/lists/*
# push defaults to bashrc:
@@ -83,11 +78,43 @@ ENV TORCH_CUDA_ARCH_LIST "7.0+PTX"
ENV LD_LIBRARY_PATH="/usr/local/lib:${LD_LIBRARY_PATH}"
# copy pkgs directory: clone curobo into docker/pkgs folder.
COPY pkgs /pkgs
RUN pip install "robometrics[evaluator] @ git+https://github.com/fishbotics/robometrics.git"
# if you want to use a different version of curobo, create folder as docker/pkgs and put your
# version of curobo there. Then uncomment below line and comment the next line that clones from
# github
# COPY pkgs /pkgs
RUN mkdir /pkgs && cd /pkgs && git clone https://github.com/NVlabs/curobo.git
RUN cd /pkgs/curobo && pip3 install .[dev,usd] --no-build-isolation
WORKDIR /pkgs/curobo
# Optionally install nvblox:
# we require this environment variable to render images in unit test curobo/tests/nvblox_test.py
ENV PYOPENGL_PLATFORM=egl
# add this file to enable EGL for rendering
RUN echo '{"file_format_version": "1.0.0", "ICD": {"library_path": "libEGL_nvidia.so.0"}}' >> /usr/share/glvnd/egl_vendor.d/10_nvidia.json
RUN apt-get update && \
apt-get install -y libgoogle-glog-dev libgtest-dev curl libsqlite3-dev && \
cd /usr/src/googletest && cmake . && cmake --build . --target install && \
rm -rf /var/lib/apt/lists/*
RUN cd /pkgs && git clone https://github.com/valtsblukis/nvblox.git && \
cd nvblox && cd nvblox && mkdir build && cd build && \
cmake .. -DPRE_CXX11_ABI_LINKABLE=ON && \
make -j32 && \
make install
RUN cd /pkgs && git clone https://github.com/nvlabs/nvblox_torch.git && \
cd nvblox_torch && \
sh install.sh
RUN python -m pip install pyrealsense2 opencv-python transforms3d