-
Notifications
You must be signed in to change notification settings - Fork 484
/
Dockerfile
67 lines (57 loc) · 2.1 KB
/
Dockerfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#---
# name: nanosam
# group: vit
# depends: [pytorch, torch2trt, transformers]
# requires: '>=34.1.0'
# docs: docs.md
#---
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
WORKDIR /opt
# 1. Install the dependencies
#
# PyTorch and torch2trt are specified in the header yaml part (under "depends:")
#
RUN git clone https://github.com/NVIDIA-AI-IOT/trt_pose && \
cd trt_pose && \
python3 setup.py develop --user
# 2. Install the NanoSAM Python package
RUN git clone https://github.com/NVIDIA-AI-IOT/nanosam && \
cd nanosam && \
python3 setup.py develop --user
# 3. Build the TensorRT engine for the mask decoder
RUN pip3 install timm
#RUN cd /opt/nanosam && \
# mkdir data && \
# python3 -m nanosam.tools.export_sam_mask_decoder_onnx \
# --model-type=vit_t \
# --checkpoint=assets/mobile_sam.pt \
# --output=data/mobile_sam_mask_decoder.onnx
RUN mkdir /opt/nanosam/data && \
wget --quiet --show-progress --progress=bar:force:noscroll --no-check-certificate \
https://nvidia.box.com/shared/static/ho09o7ohgp7lsqe0tcxqu5gs2ddojbis.onnx \
-O /opt/nanosam/data/mobile_sam_mask_decoder.onnx
RUN cd /opt/nanosam && \
/usr/src/tensorrt/bin/trtexec \
--onnx=data/mobile_sam_mask_decoder.onnx \
--saveEngine=data/mobile_sam_mask_decoder.engine \
--minShapes=point_coords:1x1x2,point_labels:1x1 \
--optShapes=point_coords:1x1x2,point_labels:1x1 \
--maxShapes=point_coords:1x10x2,point_labels:1x10
# 4. Build the TensorRT engine for the NanoSAM image encoder
RUN pip3 install gdown && \
cd /opt/nanosam/data/ && \
gdown https://drive.google.com/uc?id=14-SsvoaTl-esC3JOzomHDnI9OGgdO2OR && \
ls -lh && \
cd /opt/nanosam/ && \
/usr/src/tensorrt/bin/trtexec \
--onnx=data/resnet18_image_encoder.onnx \
--saveEngine=data/resnet18_image_encoder.engine \
--fp16
# 5. Run the basic usage example
RUN pip3 install matplotlib
RUN cd /opt/nanosam/ && \
python3 examples/basic_usage.py \
--image_encoder=data/resnet18_image_encoder.engine \
--mask_decoder=data/mobile_sam_mask_decoder.engine
COPY benchmark.py /opt/nanosam/