update to 0.6.2

This commit is contained in:
Balakumar Sundaralingam
2023-12-15 02:01:33 -08:00
parent d85ae41fba
commit 58958bbcce
105 changed files with 2514 additions and 934 deletions

View File

@@ -107,7 +107,7 @@ RUN apt-get update && \
#
# download/build the ROS source
# Optionally download/build the ROS source
#
RUN mkdir ros_catkin_ws && \
cd ros_catkin_ws && \
@@ -139,12 +139,13 @@ COPY pkgs /pkgs
# install warp:
#
RUN cd /pkgs/warp && python3 build_lib.py && pip3 install .
RUN cd /pkgs/warp && pip3 install .
# install curobo:
RUN cd /pkgs && git clone https://github.com/NVlabs/curobo.git
ENV TORCH_CUDA_ARCH_LIST "7.0+PTX"
RUN cd /pkgs/curobo && pip3 install . --no-build-isolation

View File

@@ -9,7 +9,6 @@
## its affiliates is strictly prohibited.
##
#@FROM nvcr.io/nvidia/pytorch:22.12-py3
FROM nvcr.io/nvidia/pytorch:23.08-py3 AS torch_cuda_base
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections

View File

@@ -10,16 +10,9 @@
## its affiliates is strictly prohibited.
##
input_arg=$1
USER_ID=$(id -g "$USER")
# This script will create a dev docker. Run this script by calling `bash build_dev_docker.sh`
# If you want to build a isaac sim docker, run this script with `bash build_dev_docker.sh isaac_sim_2022.2.1`
# Check architecture to build:
echo "deprecated, use build_docker.sh instead"
image_tag="x86"
isaac_sim_version=""
input_arg="$1"
if [ -z "$input_arg" ]; then
arch=$(uname -m)
@@ -33,42 +26,13 @@ if [ -z "$input_arg" ]; then
fi
fi
if [ "$input_arg" == "isaac_sim_2022.2.1" ]; then
echo "Building Isaac Sim docker"
dockerfile="isaac_sim.dockerfile"
image_tag="isaac_sim_2022.2.1"
isaac_sim_version="2022.2.1"
elif [ "$input_arg" == "isaac_sim_2023.1.0" ]; then
echo "Building Isaac Sim headless docker"
dockerfile="isaac_sim.dockerfile"
image_tag="isaac_sim_2023.1.0"
isaac_sim_version="2023.1.0"
elif [ "$input_arg" == "x86" ]; then
echo "Building for X86 Architecture"
dockerfile="x86.dockerfile"
image_tag="x86"
elif [ "$input_arg" = "aarch64" ]; then
echo "Building for ARM Architecture"
dockerfile="aarch64.dockerfile"
image_tag="aarch64"
else
echo "Unknown Architecture"
exit
user_dockerfile=user.dockerfile
if [[ $input_arg == *isaac_sim* ]] ; then
user_dockerfile=user_isaac_sim.dockerfile
fi
# build docker file:
# Make sure you enable nvidia runtime by:
# Edit/create the /etc/docker/daemon.json with content:
# {
# "runtimes": {
# "nvidia": {
# "path": "/usr/bin/nvidia-container-runtime",
# "runtimeArgs": []
# }
# },
# "default-runtime": "nvidia" # ADD this line (the above lines will already exist in your json file)
# }
#
echo "${dockerfile}"
echo $input_arg
echo $USER_ID
docker build --build-arg ISAAC_SIM_VERSION=${isaac_sim_version} -t curobo_docker:${image_tag} -f ${dockerfile} .
docker build --build-arg USERNAME=$USER --build-arg USER_ID=${USER_ID} --build-arg IMAGE_TAG=$input_arg -f $user_dockerfile --tag curobo_docker:user_$input_arg .

View File

@@ -12,7 +12,7 @@
# This script will create a dev docker. Run this script by calling `bash build_dev_docker.sh`
# If you want to build a isaac sim docker, run this script with `bash build_dev_docker.sh isaac_sim_2022.2.1`
# If you want to build a isaac sim docker, run this script with `bash build_dev_docker.sh isaac`
# Check architecture to build:
@@ -51,7 +51,7 @@ elif [ "$input_arg" = "aarch64" ]; then
dockerfile="aarch64.dockerfile"
image_tag="aarch64"
else
echo "Unknown Architecture"
echo "Unknown Argument. Please pass one of [x86, aarch64, isaac_sim_2022.2.1, isaac_sim_2023.1.0]"
exit
fi

0
docker/build_user_docker.sh Executable file → Normal file
View File

View File

@@ -18,6 +18,7 @@ FROM nvcr.io/nvidia/isaac-sim:${ISAAC_SIM_VERSION} AS isaac-sim
FROM nvcr.io/nvidia/cudagl:${CUDA_VERSION}-devel-${BASE_DIST}
# this does not work for 2022.2.1
#$FROM nvcr.io/nvidia/cuda:${CUDA_VERSION}-cudnn8-devel-${BASE_DIST}
@@ -171,7 +172,7 @@ RUN mkdir /pkgs && cd /pkgs && git clone https://github.com/NVlabs/curobo.git
RUN $omni_python -m pip install ninja wheel tomli
RUN cd /pkgs/curobo && $omni_python -m pip install .[dev,isaac_sim] --no-build-isolation
RUN cd /pkgs/curobo && $omni_python -m pip install .[dev] --no-build-isolation
# Optionally install nvblox:
@@ -183,17 +184,32 @@ RUN apt-get update && \
# install gflags and glog statically, instructions from: https://github.com/nvidia-isaac/nvblox/blob/public/docs/redistributable.md
RUN cd /pkgs && wget https://cmake.org/files/v3.27/cmake-3.27.1.tar.gz && \
tar -xvzf cmake-3.27.1.tar.gz && \
apt update && apt install -y build-essential checkinstall zlib1g-dev libssl-dev && \
cd cmake-3.27.1 && ./bootstrap && \
make -j8 && \
make install && rm -rf /var/lib/apt/lists/*
ENV USE_CX11_ABI=0
ENV PRE_CX11_ABI=ON
RUN cd /pkgs && git clone https://github.com/sqlite/sqlite.git -b version-3.39.4 && \
cd /pkgs/sqlite && CFLAGS=-fPIC ./configure --prefix=/pkgs/sqlite/install/ && \
make && make install
RUN cd /pkgs && git clone https://github.com/google/glog.git -b v0.4.0 && \
RUN cd /pkgs && git clone https://github.com/google/glog.git -b v0.6.0 && \
cd glog && \
mkdir build && cd build && \
cmake .. -DCMAKE_POSITION_INDEPENDENT_CODE=ON \
-DCMAKE_INSTALL_PREFIX=/pkgs/glog/install/ \
-DWITH_GFLAGS=OFF -DBUILD_SHARED_LIBS=OFF \
-DWITH_GFLAGS=OFF -DWITH_GTEST=OFF -DBUILD_SHARED_LIBS=OFF -DCMAKE_CXX_FLAGS=-D_GLIBCXX_USE_CXX11_ABI=${USE_CX11_ABI} \
&& make -j8 && make install
@@ -202,39 +218,25 @@ RUN cd /pkgs && git clone https://github.com/gflags/gflags.git -b v2.2.2 && \
mkdir build && cd build && \
cmake .. -DCMAKE_POSITION_INDEPENDENT_CODE=ON \
-DCMAKE_INSTALL_PREFIX=/pkgs/gflags/install/ \
-DGFLAGS_BUILD_STATIC_LIBS=ON -DGFLAGS=google \
-DGFLAGS_BUILD_STATIC_LIBS=ON -DCMAKE_CXX_FLAGS=-D_GLIBCXX_USE_CXX11_ABI=${USE_CX11_ABI} \
&& make -j8 && make install
RUN cd /pkgs && git clone https://github.com/google/googletest.git -b v1.14.0 && \
cd googletest && mkdir build && cd build && cmake .. && make -j8 && make install
RUN cd /pkgs && git clone https://github.com/valtsblukis/nvblox.git
RUN cd /pkgs/nvblox/nvblox && mkdir build && cd build && \
cmake .. -DPRE_CXX11_ABI_LINKABLE=ON -DBUILD_REDISTRIBUTABLE=ON -DSQLITE3_BASE_PATH="/pkgs/sqlite/install/" -DGLOG_BASE_PATH="/pkgs/glog/install/" -DGFLAGS_BASE_PATH="/pkgs/gflags/install/" && \
RUN cd /pkgs && git clone https://github.com/valtsblukis/nvblox.git && cd /pkgs/nvblox/nvblox && \
mkdir build && cd build && \
cmake .. -DBUILD_REDISTRIBUTABLE=ON \
-DCMAKE_CXX_FLAGS=-D_GLIBCXX_USE_CXX11_ABI=${USE_CX11_ABI} -DPRE_CXX11_ABI_LINKABLE=${PRE_CX11_ABI} \
-DSQLITE3_BASE_PATH="/pkgs/sqlite/install/" -DGLOG_BASE_PATH="/pkgs/glog/install/" \
-DGFLAGS_BASE_PATH="/pkgs/gflags/install/" -DCMAKE_CUDA_FLAGS=-D_GLIBCXX_USE_CXX11_ABI=${USE_CX11_ABI} \
-DBUILD_TESTING=OFF && \
make -j32 && \
make install
# install newer cmake and glog for pytorch:
# TODO: use libgoogle from source that was compiled instead.
RUN apt-get update && \
apt-get install -y libgoogle-glog-dev && \
rm -rf /var/lib/apt/lists/*
RUN cd /pkgs && wget https://cmake.org/files/v3.19/cmake-3.19.5.tar.gz && \
tar -xvzf cmake-3.19.5.tar.gz && \
apt update && apt install -y build-essential checkinstall zlib1g-dev libssl-dev && \
cd cmake-3.19.5 && ./bootstrap && \
make -j8 && \
make install && rm -rf /var/lib/apt/lists/*
ENV cudnn_version=8.2.4.15
ENV cuda_version=cuda11.4
RUN apt update && apt-get install -y libcudnn8=${cudnn_version}-1+${cuda_version} libcudnn8-dev=${cudnn_version}-1+${cuda_version} && \
rm -rf /var/lib/apt/lists/*
# we also need libglog for pytorch:
RUN cd /pkgs/glog && \
mkdir build_isaac && cd build_isaac && \
cmake .. -DCMAKE_POSITION_INDEPENDENT_CODE=ON \
-DWITH_GFLAGS=OFF -DWITH_GTEST=OFF -DBUILD_SHARED_LIBS=OFF -DCMAKE_CXX_FLAGS=-D_GLIBCXX_USE_CXX11_ABI=${USE_CX11_ABI} \
&& make -j8 && make install
RUN cd /pkgs && git clone https://github.com/nvlabs/nvblox_torch.git && \
cd /pkgs/nvblox_torch && \

View File

@@ -10,25 +10,24 @@
## its affiliates is strictly prohibited.
##
echo "deprecated, use start_user_docker.sh instead"
input_arg="$1"
input_arg=$1
if [ -z "$input_arg" ]; then
echo "Argument empty, trying to run based on architecture"
arch=$(uname -m)
if [ "$arch" == "x86_64" ]; then
arch=`uname -m`
if [ $arch == "x86_64" ]; then
input_arg="x86"
elif [ "$arch" == "arm64" ]; then
elif [ $arch == "arm64" ]; then
input_arg="aarch64"
elif [ "$arch" == "aarch64" ]; then
elif [ $arch == "aarch64" ]; then
input_arg="aarch64"
fi
fi
if [ "$input_arg" == "x86" ]; then
if [ $input_arg == "x86" ]; then
docker run --rm -it \
--privileged \
@@ -44,9 +43,10 @@ if [ "$input_arg" == "x86" ]; then
--volume /dev:/dev \
curobo_docker:user_$input_arg
elif [ "$input_arg" == "aarch64" ]; then
elif [ $input_arg == "aarch64" ]; then
docker run --rm -it \
--privileged \
--runtime nvidia \
--hostname ros1-docker \
--add-host ros1-docker:127.0.0.1 \
@@ -59,8 +59,38 @@ elif [ "$input_arg" == "aarch64" ]; then
--mount type=bind,src=/home/$USER/code,target=/home/$USER/code \
curobo_docker:user_$input_arg
elif [[ "$input_arg" == *isaac_sim* ]] ; then
echo "Isaac Sim User Docker is not supported"
elif [[ $input_arg == *isaac_sim* ]] ; then
echo "Isaac Sim Dev Docker is not supported"
mkdir -p ~/docker/isaac-sim ~/docker/isaac-sim/cache/kit \
~/docker/isaac-sim/cache/ov \
~/docker/isaac-sim/cache/pip \
~/docker/isaac-sim/cache/glcache \
~/docker/isaac-sim/cache/computecache \
~/docker/isaac-sim/logs \
~/docker/isaac-sim/data \
~/docker/isaac-sim/documents
docker run --name container_$input_arg -it --gpus all -e "ACCEPT_EULA=Y" --rm --network=host \
--privileged \
-e "PRIVACY_CONSENT=Y" \
-v $HOME/.Xauthority:/root/.Xauthority \
-e DISPLAY \
-v ~/docker/isaac-sim/cache/kit:/isaac-sim/kit/cache:rw \
-v ~/docker/isaac-sim/cache/ov:/root/.cache/ov:rw \
-v ~/docker/isaac-sim/cache/pip:/root/.cache/pip:rw \
-v ~/docker/isaac-sim/cache/glcache:/root/.cache/nvidia/GLCache:rw \
-v ~/docker/isaac-sim/cache/computecache:/root/.nv/ComputeCache:rw \
-v ~/docker/isaac-sim/logs:/root/.nvidia-omniverse/logs:rw \
-v ~/docker/isaac-sim/data:/root/.local/share/ov/data:rw \
-v ~/docker/isaac-sim/documents:/home/$USER/Documents:rw \
--volume /dev:/dev \
--mount type=bind,src=/home/$USER/code,target=/home/$USER/code \
curobo_docker:user_$input_arg
else
echo "Unknown docker"
fi

View File

@@ -42,6 +42,7 @@ if [ "$input_arg" == "x86" ]; then
elif [ "$input_arg" == "aarch64" ]; then
docker run --rm -it \
--privileged \
--runtime nvidia \
--hostname ros1-docker \
--add-host ros1-docker:127.0.0.1 \

1
docker/start_docker_aarch64.sh Executable file → Normal file
View File

@@ -1,3 +1,4 @@
#!/bin/bash
##
## Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
##

View File

@@ -0,0 +1,22 @@
##
## Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
##
## NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
## property and proprietary rights in and to this material, related
## documentation and any modifications thereto. Any use, reproduction,
## disclosure or distribution of this material and related documentation
## without an express license agreement from NVIDIA CORPORATION or
## its affiliates is strictly prohibited.
##
docker run --rm -it \
--runtime nvidia \
--mount type=bind,src=/home/$USER/code,target=/home/$USER/code \
--hostname ros1-docker \
--add-host ros1-docker:127.0.0.1 \
--network host \
--gpus all \
--env ROS_HOSTNAME=localhost \
--env DISPLAY=$DISPLAY \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--volume /dev/input:/dev/input \
curobo_user_docker:latest

1
docker/start_docker_isaac_sim.sh Executable file → Normal file
View File

@@ -1,3 +1,4 @@
#!/bin/bash
##
## Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
##

1
docker/start_docker_isaac_sim_headless.sh Executable file → Normal file
View File

@@ -1,3 +1,4 @@
#!/bin/bash
##
## Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
##

1
docker/start_docker_x86.sh Executable file → Normal file
View File

@@ -1,3 +1,4 @@
#!/bin/bash
##
## Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
##

View File

@@ -0,0 +1,24 @@
##
## Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
##
## NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
## property and proprietary rights in and to this material, related
## documentation and any modifications thereto. Any use, reproduction,
## disclosure or distribution of this material and related documentation
## without an express license agreement from NVIDIA CORPORATION or
## its affiliates is strictly prohibited.
##
docker run --rm -it \
--privileged --mount type=bind,src=/home/$USER/code,target=/home/$USER/code \
-e NVIDIA_DISABLE_REQUIRE=1 \
-e NVIDIA_DRIVER_CAPABILITIES=all --device /dev/dri \
--hostname ros1-docker \
--add-host ros1-docker:127.0.0.1 \
--gpus all \
--network host \
--env ROS_MASTER_URI=http://127.0.0.1:11311 \
--env ROS_IP=127.0.0.1 \
--env DISPLAY=unix$DISPLAY \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--volume /dev/input:/dev/input \
curobo_user_docker:latest

View File

@@ -11,23 +11,23 @@
##
input_arg="$1"
input_arg=$1
if [ -z "$input_arg" ]; then
echo "Argument empty, trying to run based on architecture"
arch=$(uname -m)
if [ "$arch" == "x86_64" ]; then
arch=`uname -m`
if [ $arch == "x86_64" ]; then
input_arg="x86"
elif [ "$arch" == "arm64" ]; then
elif [ $arch == "arm64" ]; then
input_arg="aarch64"
elif [ "$arch" == "aarch64" ]; then
elif [ $arch == "aarch64" ]; then
input_arg="aarch64"
fi
fi
if [ "$input_arg" == "x86" ]; then
if [ $input_arg == "x86" ]; then
docker run --rm -it \
--privileged \
@@ -41,9 +41,9 @@ if [ "$input_arg" == "x86" ]; then
--env DISPLAY=unix$DISPLAY \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--volume /dev:/dev \
curobo_docker:user_$1
curobo_docker:user_$input_arg
elif [ "$input_arg" == "aarch64" ]; then
elif [ $input_arg == "aarch64" ]; then
docker run --rm -it \
--runtime nvidia \
@@ -56,10 +56,23 @@ elif [ "$input_arg" == "aarch64" ]; then
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--volume /dev/input:/dev/input \
--mount type=bind,src=/home/$USER/code,target=/home/$USER/code \
curobo_docker:user_$1
curobo_docker:user_$input_arg
elif [[ "$input_arg" == *isaac_sim* ]] ; then
echo "Isaac Sim User Docker is not supported"
elif [[ $input_arg == *isaac_sim* ]] ; then
echo "Isaac Sim Dev Docker is not supported"
else
echo "Unknown docker"
echo "Unknown docker, launching blindly"
docker run --rm -it \
--privileged \
-e NVIDIA_DISABLE_REQUIRE=1 \
-e NVIDIA_DRIVER_CAPABILITIES=all --device /dev/dri \
--mount type=bind,src=/home/$USER/code,target=/home/$USER/code \
--hostname ros1-docker \
--add-host ros1-docker:127.0.0.1 \
--gpus all \
--network host \
--env DISPLAY=unix$DISPLAY \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--volume /dev:/dev \
curobo_docker:user_$input_arg
fi

View File

@@ -0,0 +1,70 @@
##
## Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
##
## NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
## property and proprietary rights in and to this material, related
## documentation and any modifications thereto. Any use, reproduction,
## disclosure or distribution of this material and related documentation
## without an express license agreement from NVIDIA CORPORATION or
## its affiliates is strictly prohibited.
##
# Check architecture and load:
ARG IMAGE_TAG
FROM curobo_docker:${IMAGE_TAG}
# Set variables
ARG USERNAME
ARG USER_ID
# Set environment variables
# Set up sudo user
#RUN /sbin/adduser --disabled-password --gecos '' --uid $USER_ID $USERNAME
RUN useradd -l -u $USER_ID -g users $USERNAME
RUN /sbin/adduser $USERNAME sudo
RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
RUN usermod -aG root $USERNAME
# change ownership of isaac sim folder if it exists:
RUN mkdir /isaac-sim/kit/cache && chown -R $USERNAME:users /isaac-sim/kit/cache
RUN chown $USERNAME:users /root && chown $USERNAME:users /isaac-sim
RUN mkdir /root/.nv && chown -R $USERNAME:users /root/.nv
RUN chown -R $USERNAME:users /root/.cache
# change permission for some exts:
RUN mkdir -p /isaac-sim/kit/logs/Kit/Isaac-Sim && chown -R $USERNAME:users /isaac-sim/kit/logs/Kit/Isaac-Sim
#RUN chown -R $USERNAME:users /root/.cache/pip
#RUN chown -R $USERNAME:users /root/.cache/nvidia/GLCache
#RUN chown -R $USERNAME:users /root/.local/share/ov
RUN mkdir /root/.nvidia-omniverse/logs && mkdir -p /home/$USERNAME/.nvidia-omniverse && cp -r /root/.nvidia-omniverse/* /home/$USERNAME/.nvidia-omniverse && chown -R $USERNAME:users /home/$USERNAME/.nvidia-omniverse
RUN chown -R $USERNAME:users /isaac-sim/exts/omni.isaac.synthetic_recorder/
RUN chown -R $USERNAME:users /isaac-sim/kit/exts/omni.gpu_foundation
RUN mkdir -p /home/$USERNAME/.cache && cp -r /root/.cache/* /home/$USERNAME/.cache && chown -R $USERNAME:users /home/$USERNAME/.cache
RUN mkdir -p /isaac-sim/kit/data/documents/Kit && mkdir -p /isaac-sim/kit/data/documents/Kit/apps/Isaac-Sim/scripts/ &&chown -R $USERNAME:users /isaac-sim/kit/data/documents/Kit /isaac-sim/kit/data/documents/Kit/apps/Isaac-Sim/scripts/
RUN mkdir -p /home/$USERNAME/.local
RUN echo "alias omni_python='/isaac-sim/python.sh'" >> /home/$USERNAME/.bashrc
RUN echo "alias python='/isaac-sim/python.sh'" >> /home/$USERNAME/.bashrc
RUN chown -R $USERNAME:users /home/$USERNAME
# /isaac-sim/kit/data
# /isaac-sim/kit/logs/Kit
# Set user
USER $USERNAME
WORKDIR /home/$USERNAME
ENV USER=$USERNAME
ENV PATH="${PATH}:/home/${USER}/.local/bin"
ENV SHELL /bin/bash
ENV OMNI_USER=admin
ENV OMNI_PASS=admin
RUN mkdir /root/Documents && chown -R $USERNAME:users /root/Documents
RUN echo 'completed'