From bdbd1f8e2e880fea94dec77f3ca01af0d15d2572 Mon Sep 17 00:00:00 2001 From: Triton UAS Date: Sat, 4 May 2024 17:46:09 -0700 Subject: [PATCH] pull --- .devcontainer/devcontainer.json | 4 +++ docker/Dockerfile.jetson | 44 +++++++++++++++++++++++++++++++ docker/Makefile | 2 ++ docker/jetson-pixhawk-compose.yml | 2 +- include/cv/saliency.hpp | 1 + src/cv/CMakeLists.txt | 1 + src/cv/saliency.cpp | 8 ++++-- tests/integration/cv_saliency.cpp | 3 +++ 8 files changed, 62 insertions(+), 3 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 70a328ef..c35a35aa 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,7 +1,11 @@ // See this page for reference of options: https://containers.dev/implementors/json_reference { "name": "Existing Dockerfile", +<<<<<<< Updated upstream "image": "ghcr.io/tritonuas/obcpp:x86", +======= + "image": "ghcr.io/tritonuas/obcpp:jetson", +>>>>>>> Stashed changes // enable when need to connect over USB to pixhawk // also: need to run obcpp with sudo or add tuas user to dialout group with // `sudo usermod -aG dialout tuas && newgrp && bash` diff --git a/docker/Dockerfile.jetson b/docker/Dockerfile.jetson index 28ae7dc6..5d2103e3 100644 --- a/docker/Dockerfile.jetson +++ b/docker/Dockerfile.jetson @@ -1,4 +1,12 @@ +<<<<<<< Updated upstream FROM dustynv/l4t-pytorch:r36.2.0 +======= +# FROM tritonuas/jetson-base:r36.2.0 +FROM dustynv/l4t-pytorch:r36.2.0 + +# this base image came from dusty-nv/jetson-containers +# ./build.sh --name=tritonuas/jetson-base pytorch:2.1 torchvision opencv +>>>>>>> Stashed changes ARG USERNAME=tuas USER_UID=1000 USER_GID=1000 DEBIAN_FRONTEND=noninteractive @@ -50,6 +58,14 @@ RUN --mount=target=/var/lib/apt/lists,type=cache,sharing=locked \ libopenblas-dev \ ninja-build +# RUN sudo dpkg --remove libopencv-dev +# RUN sudo apt install -f +# RUN --mount=target=/var/lib/apt/lists,type=cache,sharing=locked \ +# --mount=target=/var/cache/apt,type=cache,sharing=locked \ +# rm -f /etc/apt/apt.conf.d/docker-clean \ +# && apt-get update \ +# && apt-get install -y libopencv-dev + RUN pip3 install typing-extensions PyYAML cpplint RUN echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ @@ -65,6 +81,7 @@ RUN git clone --depth 1 https://github.com/mavlink/MAVSDK.git --branch v2.9.1 -- # pull and build torchvision # refer to this page for version compatibilty with pytorch (libtorch) https://github.com/pytorch/pytorch/wiki/PyTorch-Versions ARG TORCHVISION_VERSION=0.17.0 +<<<<<<< Updated upstream # Space separated list of CUDA architecture versions. # The version nubmers depend on the NVIDIA GPU model we're using and the installed CUDA version. # For the Jetson Orin Nano with the "Ampere" architecture and CUDA 12.1 we can use version 8.6 (written as 86 in CUDA_ARCH_LIST). @@ -94,6 +111,26 @@ RUN gdown 1VtBji-cWfetM5nXZwt55JuHPWPGahQOH -O ${ARENA_TAR_PATH} RUN tar -xvzf ${ARENA_TAR_PATH} WORKDIR ${ARENA_EXTRACTED_PATH} RUN sh Arena_SDK_ARM64.conf +======= +#ARG TORCHVISION_INSTALL_DIR=/torchvision-tmp +WORKDIR ${TORCHVISION_INSTALL_DIR} +RUN wget "https://github.com/pytorch/vision/archive/refs/tags/v${TORCHVISION_VERSION}.zip" \ + && unzip "v${TORCHVISION_VERSION}.zip" \ + && cd vision-0.17.0 \ + && mkdir build \ + && cd build \ + && cmake -DWITH_CUDA=1 -DCUDA_HAS_FP16=1 -DCUDA_NO_HALF_OPERATORS=1 -DCUDA_NO_HALF_CONVERSIONS=1 -DCUDA_NO_HALF2_OPERATORS=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="/usr/local/lib/python3.10/dist-packages/torch/share/cmake/Torch" .. \ + && make -j4 \ + && make install + +# # Install g++10 and replace the older version. For some reason some c++ 20 features aren't working with g++9 even though +# # we have CMake configured to use c++ 20 https://stackoverflow.com/questions/6903.1073/why-am-i-missing-c20-headers-and-how-do-i-fix-this +# RUN apt-get update && apt-get install -y g++-10 gcc-10 +# RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 10 +# RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 10 +# RUN update-alternatives --set gcc /usr/bin/gcc-10 +# RUN update-alternatives --set g++ /usr/bin/g++-10 +>>>>>>> Stashed changes WORKDIR /obcpp COPY . . @@ -101,9 +138,16 @@ COPY . . RUN rm -rf /obcpp/build WORKDIR /obcpp/build ENV CMAKE_PREFIX_PATH="/usr/local/lib/python3.10/dist-packages/torch/share/cmake/Torch;/usr/local/share/cmake/TorchVision" +<<<<<<< Updated upstream RUN GITHUB_ACTIONS=true cmake -DCMAKE_PREFIX_PATH="/usr/local/lib/python3.10/dist-packages/torch/share/cmake/Torch;/usr/local/share/cmake/TorchVision" -DCMAKE_MODULE_PATH="/usr/local/share/cmake/TorchVision" -DCMAKE_BUILD_TYPE="Release" .. RUN ninja obcpp +======= +RUN GITHUB_ACTIONS=true cmake -DCMAKE_PREFIX_PATH="/usr/local/lib/python3.10/dist-packages/torch/share/cmake/Torch;/usr/local/share/cmake/TorchVision" -DCMAKE_MODULE_PATH="/usr/local/share/cmake/TorchVision" -DCMAKE_BUILD_TYPE="Release" -DCMAKE_JOB_POOLS="j=2" .. + +# RUN make obcpp cuda_check load_torchvision_model VERBOSE=1 +RUN ninja obcpp +>>>>>>> Stashed changes # login as non-root user # USER $USERNAME diff --git a/docker/Makefile b/docker/Makefile index 92d977e9..1ee3ada6 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -26,3 +26,5 @@ stop-jetson-pixhawk-compose: run-jetson-cuda-check: docker run -it --rm --runtime nvidia -i tritonuas/obcpp:nvidia /obcpp/build/bin/cuda_check +jetson-develop: + cd .. && docker run -it --net=host --runtime=nvidia --volume=./:/obcpp -i tritonuas/obcpp:jetson /bin/bash \ No newline at end of file diff --git a/docker/jetson-pixhawk-compose.yml b/docker/jetson-pixhawk-compose.yml index 1fe2e47d..3fa21267 100644 --- a/docker/jetson-pixhawk-compose.yml +++ b/docker/jetson-pixhawk-compose.yml @@ -1,7 +1,7 @@ version: "3" services: obcpp: - image: tritonuas/obcpp:jetson + image: ghcr.io/tritonuas/obcpp:debug-jetson-docker-jetpack36 runtime: nvidia network_mode: "host" devices: diff --git a/include/cv/saliency.hpp b/include/cv/saliency.hpp index fa3b0ab2..eddbaa53 100644 --- a/include/cv/saliency.hpp +++ b/include/cv/saliency.hpp @@ -36,6 +36,7 @@ class Saliency { private: std::string modelPath; // path to prediction model torch::jit::script::Module module; // the loaded model + // c10::Device device; // }; #endif // INCLUDE_CV_SALIENCY_HPP_ diff --git a/src/cv/CMakeLists.txt b/src/cv/CMakeLists.txt index d0fd517a..4d74106f 100644 --- a/src/cv/CMakeLists.txt +++ b/src/cv/CMakeLists.txt @@ -14,6 +14,7 @@ set(FILES set(LIB_DEPS obcpp_protos obcpp_utilities + obcpp_camera ) add_library(${LIB_NAME} STATIC diff --git a/src/cv/saliency.cpp b/src/cv/saliency.cpp index b57d0969..32fbb4a5 100644 --- a/src/cv/saliency.cpp +++ b/src/cv/saliency.cpp @@ -34,7 +34,11 @@ std::vector Saliency::salience(cv::Mat image) { tensor = tensor.toType(c10::kFloat).div(255); // swap axis tensor = Saliency::transpose(tensor, { (2), (0), (1) }); - auto input_to_net = ToInput(tensor); + + c10::Device device = torch::cuda::is_available() ? torch::kCUDA : torch::kCPU; // eventually add device as member of Saliency + auto tensor_cuda = tensor.to(device); + + auto input_to_net = ToInput(tensor_cuda); /* * forward() runs an inference on the input image using the provided model @@ -42,7 +46,7 @@ std::vector Saliency::salience(cv::Mat image) { * that we want are : a) boxes (FloatTensor[N, 4]): the predicted boxes, and * b) scores (Tensor[N]): the scores of each detection. */ - + // output is a tuple of (losses, detections) auto output = module.forward(input_to_net); c10::ivalue::Tuple& tuple = output.toTupleRef(); diff --git a/tests/integration/cv_saliency.cpp b/tests/integration/cv_saliency.cpp index 59822d12..f3a5c14c 100644 --- a/tests/integration/cv_saliency.cpp +++ b/tests/integration/cv_saliency.cpp @@ -9,12 +9,14 @@ // expected arguments: int main(int argc, const char* argv[]) { + if (argc != 3) { std::cerr << "usage: example-app \n"; return -1; } // convert image to tensor + const char* modelPath = argv[1]; Saliency sal(modelPath); const char* imgPath = argv[2]; @@ -36,6 +38,7 @@ int main(int argc, const char* argv[]) { // cv::namedWindow("cropped targets", cv::WINDOW_FULLSCREEN); // cv::imshow("cropped targets", img); // cv::waitKey(0); + cv::imwrite("croppedTargets.jpg", img); LOG_F(INFO, "saved croppedTargets.jpg to build/"); // testing: save input image to file path (cv::imsave?) with bounding boxes overlayed