Skip to content

Commit

Permalink
pull
Browse files Browse the repository at this point in the history
  • Loading branch information
tuas-travis-ci committed May 5, 2024
1 parent 9816762 commit bdbd1f8
Show file tree
Hide file tree
Showing 8 changed files with 62 additions and 3 deletions.
4 changes: 4 additions & 0 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
// See this page for reference of options: https://containers.dev/implementors/json_reference
{
"name": "Existing Dockerfile",
<<<<<<< Updated upstream
"image": "ghcr.io/tritonuas/obcpp:x86",
=======
"image": "ghcr.io/tritonuas/obcpp:jetson",
>>>>>>> Stashed changes
// enable when need to connect over USB to pixhawk
// also: need to run obcpp with sudo or add tuas user to dialout group with
// `sudo usermod -aG dialout tuas && newgrp && bash`
Expand Down
44 changes: 44 additions & 0 deletions docker/Dockerfile.jetson
Original file line number Diff line number Diff line change
@@ -1,4 +1,12 @@
<<<<<<< Updated upstream
FROM dustynv/l4t-pytorch:r36.2.0
=======
# FROM tritonuas/jetson-base:r36.2.0
FROM dustynv/l4t-pytorch:r36.2.0

# this base image came from dusty-nv/jetson-containers
# ./build.sh --name=tritonuas/jetson-base pytorch:2.1 torchvision opencv
>>>>>>> Stashed changes

ARG USERNAME=tuas USER_UID=1000 USER_GID=1000 DEBIAN_FRONTEND=noninteractive

Expand Down Expand Up @@ -50,6 +58,14 @@ RUN --mount=target=/var/lib/apt/lists,type=cache,sharing=locked \
libopenblas-dev \
ninja-build

# RUN sudo dpkg --remove libopencv-dev
# RUN sudo apt install -f
# RUN --mount=target=/var/lib/apt/lists,type=cache,sharing=locked \
# --mount=target=/var/cache/apt,type=cache,sharing=locked \
# rm -f /etc/apt/apt.conf.d/docker-clean \
# && apt-get update \
# && apt-get install -y libopencv-dev

RUN pip3 install typing-extensions PyYAML cpplint

RUN echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \
Expand All @@ -65,6 +81,7 @@ RUN git clone --depth 1 https://github.com/mavlink/MAVSDK.git --branch v2.9.1 --
# pull and build torchvision
# refer to this page for version compatibilty with pytorch (libtorch) https://github.com/pytorch/pytorch/wiki/PyTorch-Versions
ARG TORCHVISION_VERSION=0.17.0
<<<<<<< Updated upstream
# Space separated list of CUDA architecture versions.
# The version nubmers depend on the NVIDIA GPU model we're using and the installed CUDA version.
# For the Jetson Orin Nano with the "Ampere" architecture and CUDA 12.1 we can use version 8.6 (written as 86 in CUDA_ARCH_LIST).
Expand Down Expand Up @@ -94,16 +111,43 @@ RUN gdown 1VtBji-cWfetM5nXZwt55JuHPWPGahQOH -O ${ARENA_TAR_PATH}
RUN tar -xvzf ${ARENA_TAR_PATH}
WORKDIR ${ARENA_EXTRACTED_PATH}
RUN sh Arena_SDK_ARM64.conf
=======
#ARG TORCHVISION_INSTALL_DIR=/torchvision-tmp
WORKDIR ${TORCHVISION_INSTALL_DIR}
RUN wget "https://github.com/pytorch/vision/archive/refs/tags/v${TORCHVISION_VERSION}.zip" \
&& unzip "v${TORCHVISION_VERSION}.zip" \
&& cd vision-0.17.0 \
&& mkdir build \
&& cd build \
&& cmake -DWITH_CUDA=1 -DCUDA_HAS_FP16=1 -DCUDA_NO_HALF_OPERATORS=1 -DCUDA_NO_HALF_CONVERSIONS=1 -DCUDA_NO_HALF2_OPERATORS=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="/usr/local/lib/python3.10/dist-packages/torch/share/cmake/Torch" .. \
&& make -j4 \
&& make install

# # Install g++10 and replace the older version. For some reason some c++ 20 features aren't working with g++9 even though
# # we have CMake configured to use c++ 20 https://stackoverflow.com/questions/6903.1073/why-am-i-missing-c20-headers-and-how-do-i-fix-this
# RUN apt-get update && apt-get install -y g++-10 gcc-10
# RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 10
# RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 10
# RUN update-alternatives --set gcc /usr/bin/gcc-10
# RUN update-alternatives --set g++ /usr/bin/g++-10
>>>>>>> Stashed changes

WORKDIR /obcpp
COPY . .

RUN rm -rf /obcpp/build
WORKDIR /obcpp/build
ENV CMAKE_PREFIX_PATH="/usr/local/lib/python3.10/dist-packages/torch/share/cmake/Torch;/usr/local/share/cmake/TorchVision"
<<<<<<< Updated upstream
RUN GITHUB_ACTIONS=true cmake -DCMAKE_PREFIX_PATH="/usr/local/lib/python3.10/dist-packages/torch/share/cmake/Torch;/usr/local/share/cmake/TorchVision" -DCMAKE_MODULE_PATH="/usr/local/share/cmake/TorchVision" -DCMAKE_BUILD_TYPE="Release" ..

RUN ninja obcpp
=======
RUN GITHUB_ACTIONS=true cmake -DCMAKE_PREFIX_PATH="/usr/local/lib/python3.10/dist-packages/torch/share/cmake/Torch;/usr/local/share/cmake/TorchVision" -DCMAKE_MODULE_PATH="/usr/local/share/cmake/TorchVision" -DCMAKE_BUILD_TYPE="Release" -DCMAKE_JOB_POOLS="j=2" ..

# RUN make obcpp cuda_check load_torchvision_model VERBOSE=1
RUN ninja obcpp
>>>>>>> Stashed changes

# login as non-root user
# USER $USERNAME
Expand Down
2 changes: 2 additions & 0 deletions docker/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,5 @@ stop-jetson-pixhawk-compose:
run-jetson-cuda-check:
docker run -it --rm --runtime nvidia -i tritonuas/obcpp:nvidia /obcpp/build/bin/cuda_check

jetson-develop:
cd .. && docker run -it --net=host --runtime=nvidia --volume=./:/obcpp -i tritonuas/obcpp:jetson /bin/bash
2 changes: 1 addition & 1 deletion docker/jetson-pixhawk-compose.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
version: "3"
services:
obcpp:
image: tritonuas/obcpp:jetson
image: ghcr.io/tritonuas/obcpp:debug-jetson-docker-jetpack36
runtime: nvidia
network_mode: "host"
devices:
Expand Down
1 change: 1 addition & 0 deletions include/cv/saliency.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ class Saliency {
private:
std::string modelPath; // path to prediction model
torch::jit::script::Module module; // the loaded model
// c10::Device device; //
};

#endif // INCLUDE_CV_SALIENCY_HPP_
1 change: 1 addition & 0 deletions src/cv/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ set(FILES
set(LIB_DEPS
obcpp_protos
obcpp_utilities
obcpp_camera
)

add_library(${LIB_NAME} STATIC
Expand Down
8 changes: 6 additions & 2 deletions src/cv/saliency.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,19 @@ std::vector<CroppedTarget> Saliency::salience(cv::Mat image) {
tensor = tensor.toType(c10::kFloat).div(255);
// swap axis
tensor = Saliency::transpose(tensor, { (2), (0), (1) });
auto input_to_net = ToInput(tensor);

c10::Device device = torch::cuda::is_available() ? torch::kCUDA : torch::kCPU; // eventually add device as member of Saliency
auto tensor_cuda = tensor.to(device);

auto input_to_net = ToInput(tensor_cuda);

/*
* forward() runs an inference on the input image using the provided model
* and returns predictions as List[Dict[Tensor]], where the fields of Dict
* that we want are : a) boxes (FloatTensor[N, 4]): the predicted boxes, and
* b) scores (Tensor[N]): the scores of each detection.
*/

// output is a tuple of (losses, detections)
auto output = module.forward(input_to_net);
c10::ivalue::Tuple& tuple = output.toTupleRef();
Expand Down
3 changes: 3 additions & 0 deletions tests/integration/cv_saliency.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,14 @@

// expected arguments: <path-to-model> <path-to-image>
int main(int argc, const char* argv[]) {

if (argc != 3) {
std::cerr << "usage: example-app <path-to-model> <path-to-image>\n";
return -1;
}

// convert image to tensor

const char* modelPath = argv[1];
Saliency sal(modelPath);
const char* imgPath = argv[2];
Expand All @@ -36,6 +38,7 @@ int main(int argc, const char* argv[]) {
// cv::namedWindow("cropped targets", cv::WINDOW_FULLSCREEN);
// cv::imshow("cropped targets", img);
// cv::waitKey(0);

cv::imwrite("croppedTargets.jpg", img);
LOG_F(INFO, "saved croppedTargets.jpg to build/");
// testing: save input image to file path (cv::imsave?) with bounding boxes overlayed
Expand Down

0 comments on commit bdbd1f8

Please sign in to comment.