From 89eed5d2c4688d72369cda756acfe97d5eec1aee Mon Sep 17 00:00:00 2001 From: Hanxi Guo Date: Thu, 17 Jun 2021 20:32:20 +0800 Subject: [PATCH] Add WebNN backend for OpenCV DNN Module Update dnn.cpp Update dnn.cpp Update dnn.cpp Update dnn.cpp Add WebNN head files into OpenCV 3rd partiy files Create webnn.hpp update cmake Complete README and add OpenCVDetectWebNN.cmake file add webnn.cpp Modify webnn.cpp Can successfully compile the codes for creating a MLContext Update webnn.cpp Update README.md Update README.md Update README.md Update README.md Update cmake files and update README.md Update OpenCVDetectWebNN.cmake and README.md Update OpenCVDetectWebNN.cmake Fix OpenCVDetectWebNN.cmake and update README.md Add source webnn_cpp.cpp and libary libwebnn_proc.so Update dnn.cpp Update dnn.cpp Update dnn.cpp Update dnn.cpp update dnn.cpp update op_webnn update op_webnn Update op_webnn.hpp update op_webnn.cpp & hpp Update op_webnn.hpp Update op_webnn update the skeleton Update op_webnn.cpp Update op_webnn Update op_webnn.cpp Update op_webnn.cpp Update op_webnn.hpp update op_webnn update op_webnn Solved the problems of released variables. Fixed the bugs in op_webnn.cpp Implement op_webnn Implement Relu by WebNN API Update dnn.cpp for better test Update elementwise_layers.cpp Implement ReLU6 Update elementwise_layers.cpp Implement SoftMax using WebNN API Implement Reshape by WebNN API Implement PermuteLayer by WebNN API Implement PoolingLayer using WebNN API Update pooling_layer.cpp Update pooling_layer.cpp Update pooling_layer.cpp Update pooling_layer.cpp Update pooling_layer.cpp Update pooling_layer.cpp Implement poolingLayer by WebNN API and add more detailed logs Update dnn.cpp Update dnn.cpp Remove redundant codes and add more logs for poolingLayer Add more logs in the pooling layer implementation Fix the indent issue and resolve the compiling issue Fix the build problems Fix the build issue FIx the build issue Update dnn.cpp Update dnn.cpp --- CMakeLists.txt | 17 + cmake/OpenCVDetectWebNN.cmake | 23 ++ cmake/checks/webnn.cpp | 13 + cmake/templates/cvconfig.h.in | 3 + modules/dnn/CMakeLists.txt | 10 +- modules/dnn/include/opencv2/dnn/dnn.hpp | 3 + modules/dnn/src/dnn.cpp | 308 ++++++++++++++++++ modules/dnn/src/layers/elementwise_layers.cpp | 149 +++++++++ modules/dnn/src/layers/permute_layer.cpp | 16 + modules/dnn/src/layers/pooling_layer.cpp | 120 +++++++ modules/dnn/src/layers/reshape_layer.cpp | 13 + modules/dnn/src/layers/softmax_layer.cpp | 24 ++ modules/dnn/src/op_webnn.cpp | 237 ++++++++++++++ modules/dnn/src/op_webnn.hpp | 119 +++++++ modules/dnn/src/webnn/README.md | 11 + modules/dnn/test/test_common.hpp | 3 +- modules/dnn/test/test_common.impl.hpp | 15 +- samples/dnn/classification.cpp | 3 +- 18 files changed, 1083 insertions(+), 4 deletions(-) create mode 100644 cmake/OpenCVDetectWebNN.cmake create mode 100644 cmake/checks/webnn.cpp create mode 100644 modules/dnn/src/op_webnn.cpp create mode 100644 modules/dnn/src/op_webnn.hpp create mode 100644 modules/dnn/src/webnn/README.md diff --git a/CMakeLists.txt b/CMakeLists.txt index 49abe017a5ee..f049e1a9e5dc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -290,6 +290,9 @@ OCV_OPTION(WITH_INF_ENGINE "Include Intel Inference Engine support" OFF OCV_OPTION(WITH_NGRAPH "Include nGraph support" WITH_INF_ENGINE VISIBLE_IF TRUE VERIFY TARGET ngraph::ngraph) +OCV_OPTION(WITH_WEBNN "Include WebNN support" OFF + VISIBLE_IF TRUE + VERIFY HAVE_WEBNN) OCV_OPTION(WITH_JASPER "Include JPEG2K support (Jasper)" ON VISIBLE_IF NOT IOS VERIFY HAVE_JASPER) @@ -784,6 +787,11 @@ if(WITH_VULKAN) include(cmake/OpenCVDetectVulkan.cmake) endif() +# --- WebNN --- +if(WITH_WEBNN) + include(cmake/OpenCVDetectWebNN.cmake) +endif() + # --- Inference Engine --- if(WITH_INF_ENGINE) include(cmake/OpenCVDetectInferenceEngine.cmake) @@ -1600,6 +1608,15 @@ if(WITH_VULKAN OR HAVE_VULKAN) endif() endif() +if(WITH_WEBNN OR HAVE_WEBNN) + status("") + status(" WebNN:" HAVE_WEBNN THEN "YES" ELSE "NO") + if(HAVE_WEBNN) + status(" Include path:" WEBNN_HEADER_DIRS THEN "${WEBNN_HEADER_DIRS}" ELSE "NO") + status(" Link libraries:" WEBNN_LIBRARIES THEN "${WEBNN_LIBRARIES}" ELSE "NO") + endif() +endif() + if(WITH_OPENCL OR HAVE_OPENCL) ocv_build_features_string(opencl_features IF HAVE_OPENCL_SVM THEN "SVM" diff --git a/cmake/OpenCVDetectWebNN.cmake b/cmake/OpenCVDetectWebNN.cmake new file mode 100644 index 000000000000..90e69c46f7ab --- /dev/null +++ b/cmake/OpenCVDetectWebNN.cmake @@ -0,0 +1,23 @@ +ocv_clear_vars(HAVE_WEBNN) +ocv_clear_vars(WEBNN_EMSDK) +if(WITH_WEBNN) + set(WEBNN_HEADER_DIRS "$ENV{WEBNN_NATIVE_DIR}/gen/src/include") + set(WEBNN_INCLUDE_DIRS "$ENV{WEBNN_NATIVE_DIR}/../../src/include") + set(WEBNN_LIBRARIES "$ENV{WEBNN_NATIVE_DIR}/libwebnn_native.so;$ENV{WEBNN_NATIVE_DIR}/libwebnn_proc.so") +endif() + +try_compile(VALID_WEBNN + "${OpenCV_BINARY_DIR}" + SOURCES "${OpenCV_SOURCE_DIR}/cmake/checks/webnn.cpp" + "$ENV{WEBNN_NATIVE_DIR}/gen/src/webnn/webnn_cpp.cpp" + CMAKE_FLAGS "-DINCLUDE_DIRECTORIES:STRING=${WEBNN_INCLUDE_DIRS}\;${WEBNN_HEADER_DIRS}" + "-DLINK_LIBRARIES:STRING=${WEBNN_LIBRARIES}" + OUTPUT_VARIABLE TRY_OUT + ) +if(NOT ${VALID_WEBNN}) + message(WARNING "Can't use WebNN-native") + return() +endif() +message(AUTHOR_WARNING "Use WebNN-native") + +set(HAVE_WEBNN 1) diff --git a/cmake/checks/webnn.cpp b/cmake/checks/webnn.cpp new file mode 100644 index 000000000000..1a05f3569957 --- /dev/null +++ b/cmake/checks/webnn.cpp @@ -0,0 +1,13 @@ +#include +#include +#include +#include + + +int main(int /*argc*/, char** /*argv*/) +{ + WebnnProcTable backendProcs = webnn_native::GetProcs(); + webnnProcSetProcs(&backendProcs); + ml::Context ml_context = ml::Context(webnn_native::CreateContext()); + return 0; +} \ No newline at end of file diff --git a/cmake/templates/cvconfig.h.in b/cmake/templates/cvconfig.h.in index e79e1ec0a1bc..99ec4802d2cb 100644 --- a/cmake/templates/cvconfig.h.in +++ b/cmake/templates/cvconfig.h.in @@ -59,6 +59,9 @@ /* Vulkan support */ #cmakedefine HAVE_VULKAN +/* Webnn support */ +#cmakedefine HAVE_WEBNN + /* Define to 1 if you have the header file. */ #cmakedefine HAVE_INTTYPES_H 1 diff --git a/modules/dnn/CMakeLists.txt b/modules/dnn/CMakeLists.txt index 4c8129cbda1c..3ae87ef72edd 100644 --- a/modules/dnn/CMakeLists.txt +++ b/modules/dnn/CMakeLists.txt @@ -133,6 +133,14 @@ if(HAVE_TENGINE) list(APPEND libs -Wl,--whole-archive ${TENGINE_LIBRARIES} -Wl,--no-whole-archive) endif() +set(webnn_srcs "") +if(HAVE_WEBNN) + list(APPEND include_dirs ${WEBNN_HEADER_DIRS}) + list(APPEND include_dirs ${WEBNN_INCLUDE_DIRS}) + list(APPEND libs -Wl,--whole-archive ${WEBNN_LIBRARIES} -Wl,--no-whole-archive) + list(APPEND webnn_srcs $ENV{WEBNN_NATIVE_DIR}/gen/src/webnn/webnn_cpp.cpp) +endif() + ocv_module_include_directories(${include_dirs}) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") ocv_append_source_files_cxx_compiler_options(fw_srcs "-Wno-suggest-override") # GCC @@ -162,7 +170,7 @@ if(HAVE_NGRAPH) list(APPEND dnn_runtime_libs ngraph::ngraph) endif() -ocv_glob_module_sources(${sources_options} SOURCES ${fw_srcs}) +ocv_glob_module_sources(${sources_options} SOURCES ${fw_srcs} ${webnn_srcs}) ocv_create_module(${libs} ${dnn_runtime_libs}) ocv_add_samples() ocv_add_accuracy_tests(${dnn_runtime_libs}) diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp index 255b41de88a5..c7be7b8dc440 100644 --- a/modules/dnn/include/opencv2/dnn/dnn.hpp +++ b/modules/dnn/include/opencv2/dnn/dnn.hpp @@ -74,6 +74,7 @@ CV__DNN_INLINE_NS_BEGIN DNN_BACKEND_OPENCV, DNN_BACKEND_VKCOM, DNN_BACKEND_CUDA, + DNN_BACKEND_WEBNN, #ifdef __OPENCV_BUILD DNN_BACKEND_INFERENCE_ENGINE_NGRAPH = 1000000, // internal - use DNN_BACKEND_INFERENCE_ENGINE + setInferenceEngineBackendType() DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, // internal - use DNN_BACKEND_INFERENCE_ENGINE + setInferenceEngineBackendType() @@ -298,6 +299,8 @@ CV__DNN_INLINE_NS_BEGIN virtual Ptr initVkCom(const std::vector > &inputs); + virtual Ptr initWebnn(const std::vector > &inputs, const std::vector >& nodes); + /** * @brief Returns a CUDA backend node * diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index 668cce8fa671..aaa425c5e7b0 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -45,6 +45,7 @@ #include "ie_ngraph.hpp" #include "op_vkcom.hpp" #include "op_cuda.hpp" +#include "op_webnn.hpp" #ifdef HAVE_CUDA #include "cuda4dnn/init.hpp" @@ -231,6 +232,13 @@ class BackendRegistry #endif #endif // HAVE_INF_ENGINE +#ifdef HAVE_WEBNN + if (haveWebnn()) + { + backends.push_back(std::make_pair(DNN_BACKEND_WEBNN, DNN_TARGET_CPU)); + } +#endif // HAVE_WEBNN + #ifdef HAVE_OPENCL if (cv::ocl::useOpenCL()) { @@ -1120,6 +1128,14 @@ static Ptr wrapMat(int backendId, int targetId, cv::Mat& m) return Ptr(new NgraphBackendWrapper(targetId, m)); #else CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph"); +#endif + } + else if (backendId == DNN_BACKEND_WEBNN) + { +#ifdef HAVE_WEBNN + return Ptr(new WebnnBackendWrapper(targetId, m)); +#else + CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of WebNN"); #endif } else if (backendId == DNN_BACKEND_VKCOM) @@ -1263,6 +1279,12 @@ struct Net::Impl : public detail::NetImplBase { return wrapMat(preferableBackend, preferableTarget, host); } + else if (preferableBackend == DNN_BACKEND_WEBNN) + { +#ifdef HAVE_WEBNN + return wrapMat(preferableBackend, preferableTarget, host); +#endif + } else if (preferableBackend == DNN_BACKEND_VKCOM) { #ifdef HAVE_VULKAN @@ -1403,6 +1425,13 @@ struct Net::Impl : public detail::NetImplBase preferableTarget == DNN_TARGET_FPGA ); } +#endif +#ifdef HAVE_WEBNN + if (preferableBackend == DNN_BACKEND_WEBNN) + { + CV_Assert(preferableTarget == DNN_TARGET_CPU || + preferableTarget == DNN_TARGET_OPENCL); + } #endif CV_Assert(preferableBackend != DNN_BACKEND_VKCOM || preferableTarget == DNN_TARGET_VULKAN); @@ -1626,6 +1655,14 @@ struct Net::Impl : public detail::NetImplBase initNgraphBackend(blobsToKeep_); #else CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph"); +#endif + } + else if (preferableBackend == DNN_BACKEND_WEBNN) + { +#ifdef HAVE_WEBNN + initWebnnBackend(blobsToKeep_); +#else + CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of WebNN"); #endif } else if (preferableBackend == DNN_BACKEND_VKCOM) @@ -2332,6 +2369,265 @@ struct Net::Impl : public detail::NetImplBase } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + void addWebnnOutputs(LayerData &ld) + { + CV_TRACE_FUNCTION(); + + Ptr layerNet; + auto it = ld.backendNodes.find(preferableBackend); + if (it != ld.backendNodes.end()) + { + Ptr node = it->second; + if (!node.empty()) + { + Ptr webnnNode = node.dynamicCast(); + CV_Assert(!webnnNode.empty()); CV_Assert(!webnnNode->net.empty()); + layerNet = webnnNode->net; + } + } + + for (int i = 0; i < ld.inputBlobsId.size(); ++i) + { + LayerData &inpLd = layers[ld.inputBlobsId[i].lid]; + Ptr inpNode = inpLd.backendNodes[preferableBackend]; + if (!inpNode.empty()) + { + Ptr webnnInpNode = inpNode.dynamicCast(); + CV_Assert(!webnnInpNode.empty()); CV_Assert(!webnnInpNode->net.empty()); + if (layerNet != webnnInpNode->net) + { + webnnInpNode->net->addOutput(webnnInpNode->name); + webnnInpNode->net->setUnconnectedNodes(webnnInpNode); + } + } + } + } + + void initWebnnBackend(const std::vector& blobsToKeep_) + { + CV_TRACE_FUNCTION(); + CV_Assert_N(preferableBackend == DNN_BACKEND_WEBNN, haveWebnn()); + + MapIdToLayerData::iterator it; + Ptr net; + + for (it = layers.begin(); it != layers.end(); ++it) + { + LayerData &ld = it->second; + if (ld.id == 0) + { + CV_Assert((netInputLayer->outNames.empty() && ld.outputBlobsWrappers.size() == 1) || + (netInputLayer->outNames.size() == ld.outputBlobsWrappers.size())); + for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) + { + Ptr wrapper = ld.outputBlobsWrappers[i].dynamicCast(); + std::string outputName = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]; + outputName = ld.outputBlobsWrappers.size() > 1 ? (outputName + "." + std::to_string(i)) : outputName; + wrapper->name = outputName; + } + } + else + { + for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) + { + Ptr wrapper = ld.outputBlobsWrappers[i].dynamicCast(); + std::string outputName = ld.outputBlobsWrappers.size() > 1 ? (ld.name + "." + std::to_string(i)) : ld.name; + wrapper->name = outputName; + } + } + } + + // Build WebNN networks from sets of layers that support this + // backend. Split a whole model on several WebNN networks if + // some of layers are not implemented. + for (it = layers.begin(); it != layers.end(); ++it) + { + LayerData &ld = it->second; + + if (ld.id == 0 && ld.skip) + continue; + + bool fused = ld.skip; + Ptr layer = ld.layerInstance; + if (!fused && !layer->supportBackend(preferableBackend)) + { + // For test use. when not using WebNN, the test case will fail + // with the following code. + + CV_LOG_WARNING(NULL, "Layer " + ld.type + " name " + ld.name + " is unsupported by WebNN backend."); + + addWebnnOutputs(ld); + net = Ptr(); + layer->preferableTarget = DNN_TARGET_CPU; + + for (int i = 0; i < ld.inputBlobsId.size(); ++i) + { + LayerData &inpLd = layers[ld.inputBlobsId[i].lid]; + Ptr inpNode = inpLd.backendNodes[preferableBackend]; + if (!inpNode.empty()) { + Ptr webnnNode = inpNode.dynamicCast(); + CV_Assert(!webnnNode.empty()); + webnnNode->net->setUnconnectedNodes(webnnNode); + } + } + continue; + } + ld.skip = true; // Initially skip all WebNN supported layers. + + // Create a new network if one of inputs from different WebNN graph. + std::vector> inputNodes; + for (int i = 0; i < ld.inputBlobsId.size(); ++i) + { + // Layer_Test_ROIPooling.Accuracy has 2 inputs inpLD = 0, 0 -> has 4 inputNodes (input, rois, input, rois) + if (inputNodes.size() == ld.inputBlobsId.size()) { + break; + } + LayerData &inpLd = layers[ld.inputBlobsId[i].lid]; + Ptr inpNode = inpLd.backendNodes[preferableBackend]; + if (!inpNode.empty()) + { + Ptr webnnInpNode = inpNode.dynamicCast(); + CV_Assert(!webnnInpNode.empty()); CV_Assert(!webnnInpNode->net.empty()); + if (webnnInpNode->net == net && !fused) { + inputNodes.push_back(inpNode); + continue; + } + } + + if (net.empty()) { + net = Ptr(new WebnnNet()); + } + + if (!fused) { + std::vector inputNames; + std::vector inputs; + + auto curr_pos = inpLd.consumers.begin(); + auto compare = [&ld] (const LayerPin& lp) { return lp.lid == ld.id; }; + auto cons = curr_pos; + while ((cons = std::find_if(curr_pos, inpLd.consumers.end(), compare)) != + inpLd.consumers.end()) { + int cons_inp = cons->oid; + Ptr inpWrapper = inpLd.outputBlobsWrappers[cons_inp]. + dynamicCast(); + CV_Assert(!inpWrapper.empty()); + auto iter = std::find(inputNames.begin(), inputNames.end(), + inpWrapper->name); + if (iter == inputNames.end()) { + inputNames.push_back(inpWrapper->name); + inputs.push_back(inpLd.outputBlobs[cons_inp]); + } + curr_pos = cons + 1; + } + + auto inps = net->setInputs(inputs, inputNames); + for (auto& inp : inps) { + WebnnBackendNode* node = new WebnnBackendNode(inp); + node->net = net; + inputNodes.emplace_back(Ptr(node)); + } + } + } + + Ptr node; + if (!net.empty()) + { + if (fused) + { + bool inPlace = ld.inputBlobsId.size() == 1 && ld.outputBlobs.size() == 1 && + ld.inputBlobs[0]->data == ld.outputBlobs[0].data; + CV_Assert(inPlace); + node = layers[ld.inputBlobsId[0].lid].backendNodes[preferableBackend]; + ld.inputBlobsWrappers = layers[ld.inputBlobsId[0].lid].inputBlobsWrappers; + } + } + else { + net = Ptr(new WebnnNet()); + } + + if (!fused) + { + CV_Assert(ld.inputBlobsId.size() == inputNodes.size()); + for (int i = 0; i < ld.inputBlobsId.size(); ++i) + { + int lid = ld.inputBlobsId[i].lid; + int oid = ld.inputBlobsId[i].oid; + if (oid == 0 || lid == 0) + continue; + + auto webnnInpNode = inputNodes[i].dynamicCast(); + inputNodes[i] = Ptr(new WebnnBackendNode(webnnInpNode->operand)); + } + + if (layer->supportBackend(preferableBackend)) + { + node = layer->initWebnn(ld.inputBlobsWrappers, inputNodes); + for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i) + { + Ptr wrapper = ld.outputBlobsWrappers[i].dynamicCast(); + node.dynamicCast()->name = wrapper->name; + } + } + else + { + continue; + } + } + else if (node.empty()) + continue; + + ld.backendNodes[preferableBackend] = node; + + Ptr webnnNode = node.dynamicCast(); + CV_Assert(!webnnNode.empty()); + webnnNode->net = net; + + if (ld.consumers.empty()) { + // TF EAST_text_detection + webnnNode->net->setUnconnectedNodes(webnnNode); + } + for (const auto& pin : blobsToKeep_) + { + if (pin.lid == ld.id) + { + webnnNode->net->addOutput(webnnNode->name); + break; + } + } + net->addBlobs(ld.inputBlobsWrappers); + net->addBlobs(ld.outputBlobsWrappers); + addWebnnOutputs(ld); + } + + // Initialize all networks. + for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it) + { + LayerData &ld = it->second; + auto iter = ld.backendNodes.find(preferableBackend); + if (iter == ld.backendNodes.end()) + continue; + + Ptr& node = iter->second; + if (node.empty()) + continue; + + Ptr webnnNode = node.dynamicCast(); + if (webnnNode.empty()) + continue; + + CV_Assert(!webnnNode->net.empty()); + + if (!webnnNode->net->isInitialized()) + { + webnnNode->net->setUnconnectedNodes(webnnNode); + webnnNode->net->createNet((Target)preferableTarget); + ld.skip = false; + } + } + } +#endif + void initVkComBackend() { CV_TRACE_FUNCTION(); @@ -3370,6 +3666,10 @@ struct Net::Impl : public detail::NetImplBase else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { forwardNgraph(ld.outputBlobsWrappers, node, isAsync); + } + else if (preferableBackend == DNN_BACKEND_WEBNN) + { + forwardWebnn(ld.outputBlobsWrappers, node, isAsync); } else if (preferableBackend == DNN_BACKEND_VKCOM) { @@ -4487,6 +4787,7 @@ string Net::Impl::dump() case DNN_BACKEND_OPENCV: backend = "OCV/"; break; case DNN_BACKEND_VKCOM: backend = "VULKAN/"; break; case DNN_BACKEND_CUDA: backend = "CUDA/"; break; + case DNN_BACKEND_WEBNN: backend = "WEBNN/"; break; // don't use default: } out << "digraph G {\n"; @@ -5078,6 +5379,13 @@ Ptr Layer::initNgraph(const std::vector > & inp return Ptr(); } +Ptr Layer::initWebnn(const std::vector > & inputs, const std::vector >& nodes) +{ + CV_Error(Error::StsNotImplemented, "WebNN pipeline of " + type + + " layers is not defined."); + return Ptr(); +} + void Layer::applyHalideScheduler(Ptr& node, const std::vector &inputs, const std::vector &outputs, int targetId) const { diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index 9bb5be342f76..88dad02af7d0 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -47,9 +47,11 @@ #include "../op_inf_engine.hpp" #include "../ie_ngraph.hpp" #include "../op_vkcom.hpp" +#include "../op_webnn.hpp" #include #include +#include #ifdef HAVE_OPENCL #include "opencl_kernels_dnn.hpp" @@ -59,6 +61,7 @@ #include "../cuda4dnn/primitives/activation.hpp" using namespace cv::dnn::cuda4dnn; #endif +#include namespace cv { @@ -181,6 +184,17 @@ class ElementWiseLayer : public Func::Layer } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + virtual Ptr initWebnn(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE + { + Ptr node = nodes[0].dynamicCast(); + auto& webnnInpOperand = node->operand; + auto& webnnGraphBuilder = node->net->builder; + auto operand = func.initWebnnAPI(webnnGraphBuilder, webnnInpOperand); + return Ptr(new WebnnBackendNode(operand)); + } +#endif + virtual Ptr initVkCom(const std::vector >& inputs) CV_OVERRIDE { #ifdef HAVE_VULKAN @@ -306,6 +320,16 @@ struct ReLUFunctor : public BaseFunctor #ifdef HAVE_DNN_NGRAPH if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return true; +#endif +#ifdef HAVE_WEBNN + if (backendId == DNN_BACKEND_WEBNN) { + // TODO: support PRELU + if (slope != 0) + { + CV_LOG_WARNING(NULL, "PRELU is not supported now."); + } + return slope == 0; + } #endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || @@ -428,6 +452,13 @@ struct ReLUFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + return builder.Relu(input); + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -455,6 +486,7 @@ struct ReLU6Functor : public BaseFunctor return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE || + backendId == DNN_BACKEND_WEBNN || backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; } @@ -551,6 +583,33 @@ struct ReLU6Functor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH + + +#ifdef HAVE_WEBNN + ml::Operand BuildConstant(const ml::GraphBuilder& builder, + const std::vector& dimensions, + const void* value, + size_t size, + ml::OperandType type) { + ml::OperandDescriptor desc; + desc.type = type; + desc.dimensions = dimensions.data(); + desc.dimensionsCount = (uint32_t)dimensions.size(); + ml::ArrayBufferView resource; + resource.buffer = const_cast(value); + resource.byteLength = size; + return builder.Constant(&desc, &resource); + } + + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + ml::ClampOptions clampOptions; + clampOptions.minValue = BuildConstant(builder, {}, &minValue, 1 * sizeof(float), ml::OperandType::Float32); + clampOptions.maxValue = BuildConstant(builder, {}, &maxValue, 1 * sizeof(float), ml::OperandType::Float32); + return builder.Clamp(input, &clampOptions); + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -643,6 +702,15 @@ struct TanHFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -735,6 +803,15 @@ struct SwishFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -840,6 +917,15 @@ struct MishFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -932,6 +1018,15 @@ struct SigmoidFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -1024,6 +1119,15 @@ struct ELUFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -1122,6 +1226,15 @@ struct AbsValFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -1215,6 +1328,15 @@ struct BNLLFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -1367,6 +1489,15 @@ struct PowerFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -1507,6 +1638,15 @@ struct ExpFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -1644,6 +1784,15 @@ struct ChannelsPReLUFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { diff --git a/modules/dnn/src/layers/permute_layer.cpp b/modules/dnn/src/layers/permute_layer.cpp index c525c3f82f78..f950e2cff3c4 100644 --- a/modules/dnn/src/layers/permute_layer.cpp +++ b/modules/dnn/src/layers/permute_layer.cpp @@ -46,6 +46,7 @@ #include "../op_inf_engine.hpp" #include "../ie_ngraph.hpp" #include "../op_vkcom.hpp" +#include "../op_webnn.hpp" #include #include @@ -119,6 +120,7 @@ class PermuteLayerImpl CV_FINAL : public PermuteLayer #endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || + backendId == DNN_BACKEND_WEBNN || ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()) || (backendId == DNN_BACKEND_VKCOM && haveVulkan()); } @@ -413,6 +415,20 @@ class PermuteLayerImpl CV_FINAL : public PermuteLayer } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + virtual Ptr initWebnn(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE + { + Ptr node = nodes[0].dynamicCast(); + auto& webnnInpOperand = node->operand; + auto& webnnGraphBuilder = node->net->builder; + std::vector permutation(_order.begin(), _order.end()); + ml::TransposeOptions options; + options.permutation = permutation.data(); + options.permutationCount = permutation.size(); + auto operand = webnnGraphBuilder.Transpose(webnnInpOperand, &options); + return Ptr(new WebnnBackendNode(operand)); + } +#endif #ifdef HAVE_CUDA Ptr initCUDA( diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index b8e2cfdf8f84..b5e42ee650c7 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -46,6 +46,7 @@ #include "../op_cuda.hpp" #include "../op_halide.hpp" #include "../op_inf_engine.hpp" +#include "../op_webnn.hpp" #ifdef HAVE_DNN_NGRAPH #include "../ie_ngraph.hpp" @@ -85,6 +86,7 @@ typedef int HALIDE_DIFF_T; #include "../cuda4dnn/primitives/max_unpooling.hpp" using namespace cv::dnn::cuda4dnn; #endif +#include namespace cv @@ -246,6 +248,51 @@ class PoolingLayerImpl CV_FINAL : public PoolingLayer (type == MAX || type == AVE); return false; } + else if (backendId == DNN_BACKEND_WEBNN) + { + if (kernel_size.empty() || kernel_size.size() == 2) + { + if (!haveWebnn()) + { + return false; + } + else + { + if (!ceilMode) + { + CV_LOG_WARNING(NULL, "ceilMode is not supported by WebNN backend."); + return false; + } + if (computeMaxIdx) + { + CV_LOG_WARNING(NULL, "Mask is not supported by WebNN backend."); + return false; + } + if (type != MAX && type != AVE) + { + if (type == STOCHASTIC) + { + CV_LOG_WARNING(NULL, "Stochastic Pooling is not supported by WebNN backend."); + } + if (type == SUM) + { + CV_LOG_WARNING(NULL, "Sum Pooling is not supported by WebNN backend."); + } + if (type == ROI) + { + CV_LOG_WARNING(NULL, "ROI Pooling is not supported by WebNN backend."); + } + if (type == PSROI) + { + CV_LOG_WARNING(NULL, "Position-sensitive ROI Pooling is not supported by WebNN backend."); + } + CV_LOG_WARNING(NULL, "WebNN backend only supports MaxPooling and AveragePooling currently."); + return false; + } + } + return true; + } + } return false; } @@ -607,6 +654,79 @@ class PoolingLayerImpl CV_FINAL : public PoolingLayer } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + struct Pool2dOptions { + public: + std::vector windowDimensions; + std::vector padding; + std::vector strides; + std::vector dilations; + ml::AutoPad autoPad = ml::AutoPad::Explicit; + ml::InputOperandLayout layout = ml::InputOperandLayout::Nchw; + + const ml::Pool2dOptions* AsPtr() { + if (!windowDimensions.empty()) { + mOptions.windowDimensionsCount = windowDimensions.size(); + mOptions.windowDimensions = windowDimensions.data(); + } + if (!padding.empty()) { + mOptions.paddingCount = padding.size(); + mOptions.padding = padding.data(); + } + if (!strides.empty()) { + mOptions.stridesCount = strides.size(); + mOptions.strides = strides.data(); + } + if (!dilations.empty()) { + mOptions.dilationsCount = dilations.size(); + mOptions.dilations = dilations.data(); + } + mOptions.layout = layout; + mOptions.autoPad = autoPad; + return &mOptions; + } + + private: + ml::Pool2dOptions mOptions; + }; + + virtual Ptr initWebnn(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE + { + std::cout << "Use WebNN Pooling Layer's Implementation." << std::endl; + Ptr node = nodes[0].dynamicCast(); + auto& webnnInpOperand = node->operand; + auto& webnnGraphBuilder = node->net->builder; + Pool2dOptions options; + std::vector kernelSize(kernel_size.begin(), kernel_size.end()); + std::vector Strides(strides.begin(), strides.end()); + std::vector Padding; + if (padMode.empty()) { + Padding = {static_cast(pads_begin[0]), + static_cast(pads_end[0]), + static_cast(pads_begin[1]), + static_cast(pads_end[1])}; + } else if (padMode == "VALID") { + Padding = {0, 0, 0, 0}; + } else if (padMode == "SAME") { + options.autoPad = ml::AutoPad::SameUpper; + } + options.windowDimensions = kernelSize; + options.strides = Strides; + options.padding = Padding; + if (type == MAX) + { + auto operand = webnnGraphBuilder.MaxPool2d(webnnInpOperand, options.AsPtr()); + return Ptr(new WebnnBackendNode(operand)); + } + else if (type == AVE) + { + auto operand = webnnGraphBuilder.AveragePool2d(webnnInpOperand, options.AsPtr()); + return Ptr(new WebnnBackendNode(operand)); + } else { + CV_Error(Error::StsNotImplemented, "Unsupported pooling type"); + } + } +#endif // HAVE_WEBNN class PoolingInvoker : public ParallelLoopBody { diff --git a/modules/dnn/src/layers/reshape_layer.cpp b/modules/dnn/src/layers/reshape_layer.cpp index ab8f41c7b6dd..7c1829d4298e 100644 --- a/modules/dnn/src/layers/reshape_layer.cpp +++ b/modules/dnn/src/layers/reshape_layer.cpp @@ -45,6 +45,7 @@ #include "../op_cuda.hpp" #include "../op_inf_engine.hpp" #include "../ie_ngraph.hpp" +#include "../op_webnn.hpp" #include @@ -203,6 +204,7 @@ class ReshapeLayerImpl CV_FINAL : public ReshapeLayer { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || + backendId == DNN_BACKEND_WEBNN || ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()); } @@ -330,6 +332,17 @@ class ReshapeLayerImpl CV_FINAL : public ReshapeLayer } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + virtual Ptr initWebnn(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE + { + Ptr node = nodes[0].dynamicCast(); + auto& webnnInpOperand = node->operand; + auto& webnnGraphBuilder = node->net->builder; + const std::vector out(outShapes[0].begin(), outShapes[0].end()); + auto operand = webnnGraphBuilder.Reshape(webnnInpOperand, out.data(), out.size()); + return Ptr(new WebnnBackendNode(operand)); + } +#endif #ifdef HAVE_CUDA Ptr initCUDA( diff --git a/modules/dnn/src/layers/softmax_layer.cpp b/modules/dnn/src/layers/softmax_layer.cpp index 546c1017add8..0f67eb39b674 100644 --- a/modules/dnn/src/layers/softmax_layer.cpp +++ b/modules/dnn/src/layers/softmax_layer.cpp @@ -47,9 +47,11 @@ #include "../op_inf_engine.hpp" #include "../ie_ngraph.hpp" #include "../op_vkcom.hpp" +#include "../op_webnn.hpp" #include #include +#include using std::max; #ifdef HAVE_OPENCL @@ -97,6 +99,16 @@ class SoftMaxLayerImpl CV_FINAL : public SoftmaxLayer virtual bool supportBackend(int backendId) CV_OVERRIDE { +#ifdef HAVE_WEBNN + if (backendId == DNN_BACKEND_WEBNN) { + // TODO: support logSoftMax + if (logSoftMax) + { + CV_LOG_WARNING(NULL, "logSoftMax is not supported by WebNN backend.") + } + return !logSoftMax; + } +#endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || (backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1) || @@ -374,6 +386,18 @@ class SoftMaxLayerImpl CV_FINAL : public SoftmaxLayer } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + virtual Ptr initWebnn(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE + { + Ptr node = nodes[0].dynamicCast(); + auto& webnnInpOperand = node->operand; + auto& webnnGraphBuilder = node->net->builder; + auto operand = webnnGraphBuilder.Softmax(webnnInpOperand); + return Ptr(new WebnnBackendNode(operand)); + } + +#endif + int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const CV_OVERRIDE { diff --git a/modules/dnn/src/op_webnn.cpp b/modules/dnn/src/op_webnn.cpp new file mode 100644 index 000000000000..7cc9c4289ece --- /dev/null +++ b/modules/dnn/src/op_webnn.cpp @@ -0,0 +1,237 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2018-2019, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. + +#include +#include "op_webnn.hpp" + +#include +#include + +#include "opencv2/core/utils/filesystem.hpp" +#include "opencv2/core/utils/filesystem.private.hpp" + +#include + +namespace cv { namespace dnn { + +#ifdef HAVE_WEBNN + +static std::string kDefaultInpLayerName = "opencv_webnn_empty_inp_layer_name"; + +template +static inline std::vector getShape(const Mat& mat) +{ + std::vector result(mat.dims); + for (int i = 0; i < mat.dims; i++) + result[i] = (T)mat.size[i]; + return result; +} + +static std::vector > +webnnWrappers(const std::vector >& ptrs) +{ + std::vector > wrappers(ptrs.size()); + for (int i = 0; i < ptrs.size(); ++i) + { + CV_Assert(!ptrs[i].empty()); + wrappers[i] = ptrs[i].dynamicCast(); + CV_Assert(!wrappers[i].empty()); + } + return wrappers; +} + +// WebnnNet +WebnnNet::WebnnNet() +{ + hasNetOwner = false; + device_name = "CPU"; + + WebnnProcTable backendProcs = webnn_native::GetProcs(); + webnnProcSetProcs(&backendProcs); + context = ml::Context(webnn_native::CreateContext()); + builder = ::ml::CreateGraphBuilder(context); + namedOperands = ::ml::CreateNamedOperands(); +} + +void WebnnNet::addOutput(const std::string& name) +{ + requestedOutputs.push_back(name); +} + +void WebnnNet::createNet(Target targetId) { + init(targetId); +} + +void WebnnNet::init(Target targetId) +{ + switch (targetId) + { + case DNN_TARGET_CPU: + device_name = "CPU"; + break; + case DNN_TARGET_OPENCL: + device_name = "GPU"; + break; + default: + CV_Error(Error::StsNotImplemented, "Unknown target"); + }; + + graph = builder.Build(namedOperands); + CV_Assert(graph!=nullptr); + isInit = true; +} + +std::vector WebnnNet::setInputs(const std::vector& inputs, + const std::vector& names) { + CV_Assert_N(inputs.size() == names.size()); + std::vector current_inp; + for (size_t i = 0; i < inputs.size(); i++) + { + auto& m = inputs[i]; + std::vector dimensions = getShape(m); + ml::OperandDescriptor descriptor; + descriptor.dimensions = dimensions.data(); + descriptor.dimensionsCount = dimensions.size(); + if (m.type() == CV_32F) + { + descriptor.type = ml::OperandType::Float32; + } + else + { + CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str())); + } + ml::Operand inputOperand = builder.Input(names[i].c_str(), &descriptor); + current_inp.push_back(std::move(inputOperand)); + } + inputNames = names; + return current_inp; +} + +void WebnnNet::setUnconnectedNodes(Ptr& node) { + outputNames.push_back(node->name); + namedOperands.Set(outputNames.back().c_str(), node->operand); +} + +bool WebnnNet::isInitialized() +{ + return isInit; +} + +void WebnnNet::reset() +{ + allBlobs.clear(); + isInit = false; +} + +void WebnnNet::addBlobs(const std::vector >& ptrs) +{ + auto wrappers = webnnWrappers(ptrs); + for (const auto& wrapper : wrappers) + { + std::string name = wrapper->name; + name = name.empty() ? kDefaultInpLayerName : name; + allBlobs.insert({name, wrapper}); + } +} + +void WebnnNet::forward(const std::vector >& outBlobsWrappers, bool isAsync) +{ + CV_LOG_DEBUG(NULL, "WebnnNet::forward(" << (isAsync ? "async" : "sync") << ")"); + ml::NamedInputs named_inputs = ::ml::CreateNamedInputs(); + std::vector inputs(inputNames.size()); + for (int i = 0; i < inputNames.size(); ++i) { + const std::string& name = inputNames[i]; + ml::Input& input = inputs[i]; + auto blobIt = allBlobs.find(name); + CV_Assert(blobIt != allBlobs.end()); + const Ptr wrapper = blobIt->second; + input.resource.buffer = wrapper->host->data; + input.resource.byteLength = wrapper->size; + named_inputs.Set(name.c_str(), &input); + } + std::vector > outs = webnnWrappers(outBlobsWrappers); + ml::NamedOutputs named_outputs = ::ml::CreateNamedOutputs(); + std::vector outputs(outs.size()); + for (int i = 0; i < outs.size(); ++i) { + const std::string& name = outs[i]->name; + ml::ArrayBufferView& output = outputs[i]; + output.buffer = outs[i]->host->data; + output.byteLength = outs[i]->size; + named_outputs.Set(name.c_str(), &output); + } + ml::ComputeGraphStatus status = graph.Compute(named_inputs, named_outputs); + if (status != ::ml::ComputeGraphStatus::Success) { + CV_Error(Error::StsAssert, format("Failed to compute: %d", int(status))); + } +} + +// WebnnBackendNode +WebnnBackendNode::WebnnBackendNode(ml::Operand&& _operand) + : BackendNode(DNN_BACKEND_WEBNN), operand(std::move(_operand)) {} + +WebnnBackendNode::WebnnBackendNode(ml::Operand& _operand) + : BackendNode(DNN_BACKEND_WEBNN), operand(_operand) {} + +// WebnnBackendWrapper +WebnnBackendWrapper::WebnnBackendWrapper(int targetId, cv::Mat& m) + : BackendWrapper(DNN_BACKEND_WEBNN, targetId) +{ + size = m.total() * m.elemSize(); + // buffer.reset(new char[size]); + // std::memcpy(buffer.get(), m.data, size); + // dimensions = getShape(m); + // descriptor.dimensions = dimensions.data(); + // descriptor.dimensionsCount = dimensions.size(); + if (m.type() == CV_32F) + { + descriptor.type = ml::OperandType::Float32; + } + else + { + CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str())); + } + host = &m; +} + +WebnnBackendWrapper::~WebnnBackendWrapper() +{ + // nothing +} + +void WebnnBackendWrapper::copyToHost() +{ + CV_LOG_DEBUG(NULL, "WebnnBackendWrapper::copyToHost()"); + //CV_Error(Error::StsNotImplemented, ""); +} + +void WebnnBackendWrapper::setHostDirty() +{ + CV_LOG_DEBUG(NULL, "WebnnBackendWrapper::setHostDirty()"); + //CV_Error(Error::StsNotImplemented, ""); +} + +void forwardWebnn(const std::vector >& outBlobsWrappers, + Ptr& node, bool isAsync) +{ + CV_Assert(!node.empty()); + Ptr webnnNode = node.dynamicCast(); + CV_Assert(!webnnNode.empty()); + webnnNode->net->forward(outBlobsWrappers, isAsync); +} + + +#else +void forwardWebnn(const std::vector >& outBlobsWrappers, + Ptr& operand, bool isAsync) +{ + CV_Assert(false && "WebNN is not enabled in this OpenCV build"); +} + +#endif + +} +} \ No newline at end of file diff --git a/modules/dnn/src/op_webnn.hpp b/modules/dnn/src/op_webnn.hpp new file mode 100644 index 000000000000..5a33b4847113 --- /dev/null +++ b/modules/dnn/src/op_webnn.hpp @@ -0,0 +1,119 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2018-2019, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. + +#ifndef __OPENCV_DNN_OP_WEBNN_HPP__ +#define __OPENCV_DNN_OP_WEBNN_HPP__ + +#include "opencv2/core/cvdef.h" +#include "opencv2/core/cvstd.hpp" +#include "opencv2/dnn.hpp" + +#ifdef HAVE_WEBNN + +#include +#include +#include +#include + +#include +#include + +#endif // HAVE_WEBNN + +namespace cv { namespace dnn { + +constexpr bool haveWebnn() { +#ifdef HAVE_WEBNN + return true; +#else + return false; +#endif +} + +#ifdef HAVE_WEBNN + +class WebnnBackendNode; +class WebnnBackendWrapper; + + + +class WebnnNet +{ +public: + WebnnNet(); + + void addOutput(const std::string& name); + + bool isInitialized(); + void init(Target targetId); + + void forward(const std::vector >& outBlobsWrappers, bool isAsync); + + std::vector setInputs(const std::vector& inputs, const std::vector& names); + + void setUnconnectedNodes(Ptr& node); + void addBlobs(const std::vector >& ptrs); + + void createNet(Target targetId); + // void setNodePtr(std::shared_ptr* ptr); + + void reset(); + + ml::GraphBuilder builder; + ml::Context context; + ml::Graph graph; + + std::unordered_map> allBlobs; + + bool hasNetOwner; + std::string device_name; + bool isInit = false; + + std::vector requestedOutputs; + + std::vector inputNames; + std::vector outputNames; + ml::NamedOperands namedOperands; +}; + +class WebnnBackendNode : public BackendNode +{ +public: + WebnnBackendNode(ml::Operand&& operand); + WebnnBackendNode(ml::Operand& operand); + + std::string name; + ml::Operand operand; + Ptr net; +}; + +class WebnnBackendWrapper : public BackendWrapper +{ +public: + WebnnBackendWrapper(int targetId, Mat& m); + ~WebnnBackendWrapper(); + + virtual void copyToHost() CV_OVERRIDE; + virtual void setHostDirty() CV_OVERRIDE; + + std::string name; + Mat* host; + std::unique_ptr buffer; + size_t size; + std::vector dimensions; + ml::OperandDescriptor descriptor; +}; + +#endif // HAVE_WebNN + +void forwardWebnn(const std::vector >& outBlobsWrappers, + Ptr& node, bool isAsync); + +}} // namespace cv::dnn + + +#endif // __OPENCV_DNN_OP_WEBNN_HPP__ diff --git a/modules/dnn/src/webnn/README.md b/modules/dnn/src/webnn/README.md new file mode 100644 index 000000000000..4d6fe20c0c5b --- /dev/null +++ b/modules/dnn/src/webnn/README.md @@ -0,0 +1,11 @@ +## Build Instructions + +### Build WebNN-native and set the environment variable + +Refer to [WebNN's build instructions](https://github.com/webmachinelearning/webnn-native) to complete the build of WebNN-native. + +Set environment variable `WEBNN_NATIVE_DIR` to enable native DNN_BACKEND_WEBNN build: `export WEBNN_NATIVE_DIR=${PATH_TO_WebNN}`. Please let `WEBNN_NATIVE_DIR` points the output directory of webnn-native build (e.g. webnn-native/out/Release). + +### Test native DNN_BACKEND_WEBNN backend +Add -DWITH_WEBNN=ON to the cmake command to build the WebNN module such as: +`cmake -DWITH_WEBNN=ON ../opencv` (according to the [Installation in Linux](https://docs.opencv.org/master/d7/d9f/tutorial_linux_install.html)) \ No newline at end of file diff --git a/modules/dnn/test/test_common.hpp b/modules/dnn/test/test_common.hpp index 139f3d1671b1..f20aa507c100 100644 --- a/modules/dnn/test/test_common.hpp +++ b/modules/dnn/test/test_common.hpp @@ -135,7 +135,8 @@ testing::internal::ParamGenerator< tuple > dnnBackendsAndTarget bool withCpuOCV = true, bool withVkCom = true, bool withCUDA = true, - bool withNgraph = true + bool withNgraph = true, + bool withWebnn = true ); testing::internal::ParamGenerator< tuple > dnnBackendsAndTargetsIE(); diff --git a/modules/dnn/test/test_common.impl.hpp b/modules/dnn/test/test_common.impl.hpp index 3d56e6f30875..c312474256f2 100644 --- a/modules/dnn/test/test_common.impl.hpp +++ b/modules/dnn/test/test_common.impl.hpp @@ -29,6 +29,7 @@ void PrintTo(const cv::dnn::Backend& v, std::ostream* os) case DNN_BACKEND_CUDA: *os << "CUDA"; return; case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: *os << "DLIE"; return; case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: *os << "NGRAPH"; return; + case DNN_BACKEND_WEBNN: *os << "WEBNN"; return; } // don't use "default:" to emit compiler warnings *os << "DNN_BACKEND_UNKNOWN(" << (int)v << ")"; } @@ -247,7 +248,8 @@ testing::internal::ParamGenerator< tuple > dnnBackendsAndTarget bool withCpuOCV /*= true*/, bool withVkCom /*= true*/, bool withCUDA /*= true*/, - bool withNgraph /*= true*/ + bool withNgraph /*= true*/, + bool withWebnn /*= false*/ ) { #ifdef HAVE_INF_ENGINE @@ -302,6 +304,17 @@ testing::internal::ParamGenerator< tuple > dnnBackendsAndTarget } #endif +#ifdef HAVE_WEBNN + if (withWebnn) + { + for (auto target : getAvailableTargets(DNN_BACKEND_WEBNN)) { + targets.push_back(make_tuple(DNN_BACKEND_WEBNN, target)); + } + } +#else + CV_UNUSED(withWebnn); +#endif + { available = getAvailableTargets(DNN_BACKEND_OPENCV); for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i) diff --git a/samples/dnn/classification.cpp b/samples/dnn/classification.cpp index 769d6874bed4..8af62e7e8b1f 100644 --- a/samples/dnn/classification.cpp +++ b/samples/dnn/classification.cpp @@ -24,7 +24,8 @@ std::string keys = "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " "3: OpenCV implementation, " "4: VKCOM, " - "5: CUDA }," + "5: CUDA, " + "6: WebNN }" "{ target | 0 | Choose one of target computation devices: " "0: CPU target (by default), " "1: OpenCL, "