Skip to content

Commit

Permalink
Create Dockerfile
Browse files Browse the repository at this point in the history
  • Loading branch information
yinfan98 authored Dec 6, 2023
1 parent 3908c0d commit 59198d1
Showing 1 changed file with 71 additions and 0 deletions.
71 changes: 71 additions & 0 deletions docker/Jetson/Jetpack4.6/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
FROM nvcr.io/nvidia/l4t-pytorch:r32.7.1-pth1.10-py3

ARG MMDEPLOY_VERSION=main
ENV NVIDIA_VISIBLE_DEVICE all
ENV NVIDIA_DRIVER_CAPABILITIES all
ENV CUDA_HOME="/usr/local/cuda"
ENV PATH="/usr/local/cuda/bin:${PATH}"
ENV LD_LIBRARY_PATH="/usr/local/cuda/lib64:${LD_LIBRARY_PATH}"
ENV TENSORRT_DIR="/usr/include/aarch64-linux-gnu"

ENV DEBIAN_FRONTEND=nointeractive
ENV FORCE_CUDA="1"

USER root
WORKDIR /root/workspace

# install dependencies
RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 42D5A192B819C5DA &&\
apt-get update &&\
apt-get install -y vim wget libspdlog-dev libssl-dev libpng-dev pkg-config libhdf5-100 libhdf5-dev --no-install-recommends

RUN python3 -m pip install --upgrade pip
# install onnx
RUN python3 -m pip install onnx==1.10 versioned-hdf5

# install onnxruntime
COPY onnxruntime_gpu-1.10.0-cp36-cp36m-linux_aarch64.whl /root/workspace
# RUN wget https://nvidia.box.com/shared/static/jy7nqva7l88mq9i8bw3g3sklzf4kcnn2.whl -O onnxruntime_gpu-1.10.0-cp36-cp36m-linux_aarch64.whl &&\
RUN python3 -m pip install --no-cache-dir onnxruntime_gpu-1.10.0-cp36-cp36m-linux_aarch64.whl

# install mmcv
RUN git clone --branch 2.x https://github.com/open-mmlab/mmcv.git
WORKDIR /root/workspace/mmcv
RUN python3 -m pip install --no-cache-dir opencv-python==4.5.4.60
RUN MMCV_WITH_OPS=1 python3 -m pip install -e .

# build ppl.cv
COPY ppl.cv /root/workspace/ppl.cv
WORKDIR /root/workspace/ppl.cv
RUN echo "export PPLCV_DIR=/root/workspace/ppl.cv" >> ~/.bashrc
RUN ./build.sh cuda

# build mmdeploy
COPY mmdeploy /root/workspace/mmdeploy
# RUN git clone --recursive -b $MMDEPLOY_VERSION --depth 1 https://github.com/open-mmlab/mmdeploy &&\
# build TRT custom op
WORKDIR /root/workspace/mmdeploy
RUN mkdir -p build && cd build &&\
cmake .. \
-DMMDEPLOY_TARGET_BACKENDS="trt" \
-DTENSORRT_DIR=TENSORRT_DIR &&\
make -j$(nproc) && make install
RUN python3 -m pip install --upgrade setuptools && python3 -m pip install -e .
# build mmdeploy
RUN mkdir -p build && cd build &&\
cmake .. \
-DMMDEPLOY_BUILD_SDK=ON \
-DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \
-DMMDEPLOY_BUILD_EXAMPLES=ON \
-DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \
-DMMDEPLOY_TARGET_BACKENDS="trt" \
-DTENSORRT_DIR=TENSORRT_DIR \
-Dpplcv_DIR=/root/workspace/ppl.cv/cuda-build/install/lib/cmake/ppl \
-DMMDEPLOY_CODEBASES=all && \
make -j$(nproc) && make install

ENV MMDeploy_DIR="/root/workspace/mmdeploy/build/install/lib/cmake/MMDeploy"
ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${BACKUP_LD_LIBRARY_PATH}"
ENV PATH="/root/workspace/mmdeploy/build/bin:${PATH}"
ENV PYTHONPATH="/root/workspace/mmdeploy:${PYTHONPATH}"
WORKDIR /root/workspace

0 comments on commit 59198d1

Please sign in to comment.