From f5083c8ecb9e20ed1501c4b7db10ac0349d63f2d Mon Sep 17 00:00:00 2001 From: Ningxin Hu Date: Tue, 13 Jul 2021 01:08:47 +0800 Subject: [PATCH] Implement Relu by WebNN API Update dnn.cpp for better test Update elementwise_layers.cpp Implement ReLU6 Update elementwise_layers.cpp Implement SoftMax using WebNN API Implement Reshape by WebNN API Implement PermuteLayer by WebNN API Implement PoolingLayer using WebNN API Update pooling_layer.cpp Update pooling_layer.cpp Update pooling_layer.cpp Update pooling_layer.cpp Update pooling_layer.cpp Update pooling_layer.cpp Implement poolingLayer by WebNN API and add more detailed logs Update dnn.cpp Update dnn.cpp Remove redundant codes and add more logs for poolingLayer Add more logs in the pooling layer implementation Fix the indent issue and resolve the compiling issue Fix the build problems Fix the build issue FIx the build issue Update dnn.cpp Update dnn.cpp --- modules/dnn/include/opencv2/dnn/dnn.hpp | 2 +- modules/dnn/src/dnn.cpp | 14 +- modules/dnn/src/layers/elementwise_layers.cpp | 149 ++++++++++++++++++ modules/dnn/src/layers/permute_layer.cpp | 16 ++ modules/dnn/src/layers/pooling_layer.cpp | 120 ++++++++++++++ modules/dnn/src/layers/reshape_layer.cpp | 13 ++ modules/dnn/src/layers/softmax_layer.cpp | 24 +++ modules/dnn/src/op_webnn.cpp | 76 +++++++-- modules/dnn/src/op_webnn.hpp | 14 +- modules/dnn/src/webnn/README.md | 2 +- samples/dnn/classification.cpp | 3 +- 11 files changed, 410 insertions(+), 23 deletions(-) diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp index 31c2f48611c1..c7be7b8dc440 100644 --- a/modules/dnn/include/opencv2/dnn/dnn.hpp +++ b/modules/dnn/include/opencv2/dnn/dnn.hpp @@ -73,8 +73,8 @@ CV__DNN_INLINE_NS_BEGIN //!< @sa setInferenceEngineBackendType DNN_BACKEND_OPENCV, DNN_BACKEND_VKCOM, - DNN_BACKEND_WEBNN, DNN_BACKEND_CUDA, + DNN_BACKEND_WEBNN, #ifdef __OPENCV_BUILD DNN_BACKEND_INFERENCE_ENGINE_NGRAPH = 1000000, // internal - use DNN_BACKEND_INFERENCE_ENGINE + setInferenceEngineBackendType() DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, // internal - use DNN_BACKEND_INFERENCE_ENGINE + setInferenceEngineBackendType() diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index 9f49596df089..aaa425c5e7b0 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -236,7 +236,6 @@ class BackendRegistry if (haveWebnn()) { backends.push_back(std::make_pair(DNN_BACKEND_WEBNN, DNN_TARGET_CPU)); - backends.push_back(std::make_pair(DNN_BACKEND_WEBNN, DNN_TARGET_OPENCL)); } #endif // HAVE_WEBNN @@ -2453,6 +2452,11 @@ struct Net::Impl : public detail::NetImplBase Ptr layer = ld.layerInstance; if (!fused && !layer->supportBackend(preferableBackend)) { + // For test use. when not using WebNN, the test case will fail + // with the following code. + + CV_LOG_WARNING(NULL, "Layer " + ld.type + " name " + ld.name + " is unsupported by WebNN backend."); + addWebnnOutputs(ld); net = Ptr(); layer->preferableTarget = DNN_TARGET_CPU; @@ -2469,7 +2473,7 @@ struct Net::Impl : public detail::NetImplBase } continue; } - ld.skip = true; // Initially skip all WebNN supported layers. + ld.skip = true; // Initially skip all WebNN supported layers. // Create a new network if one of inputs from different WebNN graph. std::vector> inputNodes; @@ -2519,7 +2523,9 @@ struct Net::Impl : public detail::NetImplBase auto inps = net->setInputs(inputs, inputNames); for (auto& inp : inps) { - inputNodes.emplace_back(Ptr(new WebnnBackendNode(inp))); + WebnnBackendNode* node = new WebnnBackendNode(inp); + node->net = net; + inputNodes.emplace_back(Ptr(node)); } } } @@ -2619,8 +2625,8 @@ struct Net::Impl : public detail::NetImplBase ld.skip = false; } } -#endif } +#endif void initVkComBackend() { diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index 9bb5be342f76..88dad02af7d0 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -47,9 +47,11 @@ #include "../op_inf_engine.hpp" #include "../ie_ngraph.hpp" #include "../op_vkcom.hpp" +#include "../op_webnn.hpp" #include #include +#include #ifdef HAVE_OPENCL #include "opencl_kernels_dnn.hpp" @@ -59,6 +61,7 @@ #include "../cuda4dnn/primitives/activation.hpp" using namespace cv::dnn::cuda4dnn; #endif +#include namespace cv { @@ -181,6 +184,17 @@ class ElementWiseLayer : public Func::Layer } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + virtual Ptr initWebnn(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE + { + Ptr node = nodes[0].dynamicCast(); + auto& webnnInpOperand = node->operand; + auto& webnnGraphBuilder = node->net->builder; + auto operand = func.initWebnnAPI(webnnGraphBuilder, webnnInpOperand); + return Ptr(new WebnnBackendNode(operand)); + } +#endif + virtual Ptr initVkCom(const std::vector >& inputs) CV_OVERRIDE { #ifdef HAVE_VULKAN @@ -306,6 +320,16 @@ struct ReLUFunctor : public BaseFunctor #ifdef HAVE_DNN_NGRAPH if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return true; +#endif +#ifdef HAVE_WEBNN + if (backendId == DNN_BACKEND_WEBNN) { + // TODO: support PRELU + if (slope != 0) + { + CV_LOG_WARNING(NULL, "PRELU is not supported now."); + } + return slope == 0; + } #endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || @@ -428,6 +452,13 @@ struct ReLUFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + return builder.Relu(input); + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -455,6 +486,7 @@ struct ReLU6Functor : public BaseFunctor return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE || + backendId == DNN_BACKEND_WEBNN || backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; } @@ -551,6 +583,33 @@ struct ReLU6Functor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH + + +#ifdef HAVE_WEBNN + ml::Operand BuildConstant(const ml::GraphBuilder& builder, + const std::vector& dimensions, + const void* value, + size_t size, + ml::OperandType type) { + ml::OperandDescriptor desc; + desc.type = type; + desc.dimensions = dimensions.data(); + desc.dimensionsCount = (uint32_t)dimensions.size(); + ml::ArrayBufferView resource; + resource.buffer = const_cast(value); + resource.byteLength = size; + return builder.Constant(&desc, &resource); + } + + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + ml::ClampOptions clampOptions; + clampOptions.minValue = BuildConstant(builder, {}, &minValue, 1 * sizeof(float), ml::OperandType::Float32); + clampOptions.maxValue = BuildConstant(builder, {}, &maxValue, 1 * sizeof(float), ml::OperandType::Float32); + return builder.Clamp(input, &clampOptions); + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -643,6 +702,15 @@ struct TanHFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -735,6 +803,15 @@ struct SwishFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -840,6 +917,15 @@ struct MishFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -932,6 +1018,15 @@ struct SigmoidFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -1024,6 +1119,15 @@ struct ELUFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -1122,6 +1226,15 @@ struct AbsValFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -1215,6 +1328,15 @@ struct BNLLFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -1367,6 +1489,15 @@ struct PowerFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -1507,6 +1638,15 @@ struct ExpFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { @@ -1644,6 +1784,15 @@ struct ChannelsPReLUFunctor : public BaseFunctor } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input) + { + CV_Error(Error::StsNotImplemented, ""); + ml::Operand operand; + return operand; + } +#endif + #ifdef HAVE_VULKAN std::shared_ptr initVkCom() { diff --git a/modules/dnn/src/layers/permute_layer.cpp b/modules/dnn/src/layers/permute_layer.cpp index c525c3f82f78..f950e2cff3c4 100644 --- a/modules/dnn/src/layers/permute_layer.cpp +++ b/modules/dnn/src/layers/permute_layer.cpp @@ -46,6 +46,7 @@ #include "../op_inf_engine.hpp" #include "../ie_ngraph.hpp" #include "../op_vkcom.hpp" +#include "../op_webnn.hpp" #include #include @@ -119,6 +120,7 @@ class PermuteLayerImpl CV_FINAL : public PermuteLayer #endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || + backendId == DNN_BACKEND_WEBNN || ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()) || (backendId == DNN_BACKEND_VKCOM && haveVulkan()); } @@ -413,6 +415,20 @@ class PermuteLayerImpl CV_FINAL : public PermuteLayer } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + virtual Ptr initWebnn(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE + { + Ptr node = nodes[0].dynamicCast(); + auto& webnnInpOperand = node->operand; + auto& webnnGraphBuilder = node->net->builder; + std::vector permutation(_order.begin(), _order.end()); + ml::TransposeOptions options; + options.permutation = permutation.data(); + options.permutationCount = permutation.size(); + auto operand = webnnGraphBuilder.Transpose(webnnInpOperand, &options); + return Ptr(new WebnnBackendNode(operand)); + } +#endif #ifdef HAVE_CUDA Ptr initCUDA( diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index b8e2cfdf8f84..b5e42ee650c7 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -46,6 +46,7 @@ #include "../op_cuda.hpp" #include "../op_halide.hpp" #include "../op_inf_engine.hpp" +#include "../op_webnn.hpp" #ifdef HAVE_DNN_NGRAPH #include "../ie_ngraph.hpp" @@ -85,6 +86,7 @@ typedef int HALIDE_DIFF_T; #include "../cuda4dnn/primitives/max_unpooling.hpp" using namespace cv::dnn::cuda4dnn; #endif +#include namespace cv @@ -246,6 +248,51 @@ class PoolingLayerImpl CV_FINAL : public PoolingLayer (type == MAX || type == AVE); return false; } + else if (backendId == DNN_BACKEND_WEBNN) + { + if (kernel_size.empty() || kernel_size.size() == 2) + { + if (!haveWebnn()) + { + return false; + } + else + { + if (!ceilMode) + { + CV_LOG_WARNING(NULL, "ceilMode is not supported by WebNN backend."); + return false; + } + if (computeMaxIdx) + { + CV_LOG_WARNING(NULL, "Mask is not supported by WebNN backend."); + return false; + } + if (type != MAX && type != AVE) + { + if (type == STOCHASTIC) + { + CV_LOG_WARNING(NULL, "Stochastic Pooling is not supported by WebNN backend."); + } + if (type == SUM) + { + CV_LOG_WARNING(NULL, "Sum Pooling is not supported by WebNN backend."); + } + if (type == ROI) + { + CV_LOG_WARNING(NULL, "ROI Pooling is not supported by WebNN backend."); + } + if (type == PSROI) + { + CV_LOG_WARNING(NULL, "Position-sensitive ROI Pooling is not supported by WebNN backend."); + } + CV_LOG_WARNING(NULL, "WebNN backend only supports MaxPooling and AveragePooling currently."); + return false; + } + } + return true; + } + } return false; } @@ -607,6 +654,79 @@ class PoolingLayerImpl CV_FINAL : public PoolingLayer } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + struct Pool2dOptions { + public: + std::vector windowDimensions; + std::vector padding; + std::vector strides; + std::vector dilations; + ml::AutoPad autoPad = ml::AutoPad::Explicit; + ml::InputOperandLayout layout = ml::InputOperandLayout::Nchw; + + const ml::Pool2dOptions* AsPtr() { + if (!windowDimensions.empty()) { + mOptions.windowDimensionsCount = windowDimensions.size(); + mOptions.windowDimensions = windowDimensions.data(); + } + if (!padding.empty()) { + mOptions.paddingCount = padding.size(); + mOptions.padding = padding.data(); + } + if (!strides.empty()) { + mOptions.stridesCount = strides.size(); + mOptions.strides = strides.data(); + } + if (!dilations.empty()) { + mOptions.dilationsCount = dilations.size(); + mOptions.dilations = dilations.data(); + } + mOptions.layout = layout; + mOptions.autoPad = autoPad; + return &mOptions; + } + + private: + ml::Pool2dOptions mOptions; + }; + + virtual Ptr initWebnn(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE + { + std::cout << "Use WebNN Pooling Layer's Implementation." << std::endl; + Ptr node = nodes[0].dynamicCast(); + auto& webnnInpOperand = node->operand; + auto& webnnGraphBuilder = node->net->builder; + Pool2dOptions options; + std::vector kernelSize(kernel_size.begin(), kernel_size.end()); + std::vector Strides(strides.begin(), strides.end()); + std::vector Padding; + if (padMode.empty()) { + Padding = {static_cast(pads_begin[0]), + static_cast(pads_end[0]), + static_cast(pads_begin[1]), + static_cast(pads_end[1])}; + } else if (padMode == "VALID") { + Padding = {0, 0, 0, 0}; + } else if (padMode == "SAME") { + options.autoPad = ml::AutoPad::SameUpper; + } + options.windowDimensions = kernelSize; + options.strides = Strides; + options.padding = Padding; + if (type == MAX) + { + auto operand = webnnGraphBuilder.MaxPool2d(webnnInpOperand, options.AsPtr()); + return Ptr(new WebnnBackendNode(operand)); + } + else if (type == AVE) + { + auto operand = webnnGraphBuilder.AveragePool2d(webnnInpOperand, options.AsPtr()); + return Ptr(new WebnnBackendNode(operand)); + } else { + CV_Error(Error::StsNotImplemented, "Unsupported pooling type"); + } + } +#endif // HAVE_WEBNN class PoolingInvoker : public ParallelLoopBody { diff --git a/modules/dnn/src/layers/reshape_layer.cpp b/modules/dnn/src/layers/reshape_layer.cpp index ab8f41c7b6dd..7c1829d4298e 100644 --- a/modules/dnn/src/layers/reshape_layer.cpp +++ b/modules/dnn/src/layers/reshape_layer.cpp @@ -45,6 +45,7 @@ #include "../op_cuda.hpp" #include "../op_inf_engine.hpp" #include "../ie_ngraph.hpp" +#include "../op_webnn.hpp" #include @@ -203,6 +204,7 @@ class ReshapeLayerImpl CV_FINAL : public ReshapeLayer { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || + backendId == DNN_BACKEND_WEBNN || ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()); } @@ -330,6 +332,17 @@ class ReshapeLayerImpl CV_FINAL : public ReshapeLayer } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + virtual Ptr initWebnn(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE + { + Ptr node = nodes[0].dynamicCast(); + auto& webnnInpOperand = node->operand; + auto& webnnGraphBuilder = node->net->builder; + const std::vector out(outShapes[0].begin(), outShapes[0].end()); + auto operand = webnnGraphBuilder.Reshape(webnnInpOperand, out.data(), out.size()); + return Ptr(new WebnnBackendNode(operand)); + } +#endif #ifdef HAVE_CUDA Ptr initCUDA( diff --git a/modules/dnn/src/layers/softmax_layer.cpp b/modules/dnn/src/layers/softmax_layer.cpp index 546c1017add8..0f67eb39b674 100644 --- a/modules/dnn/src/layers/softmax_layer.cpp +++ b/modules/dnn/src/layers/softmax_layer.cpp @@ -47,9 +47,11 @@ #include "../op_inf_engine.hpp" #include "../ie_ngraph.hpp" #include "../op_vkcom.hpp" +#include "../op_webnn.hpp" #include #include +#include using std::max; #ifdef HAVE_OPENCL @@ -97,6 +99,16 @@ class SoftMaxLayerImpl CV_FINAL : public SoftmaxLayer virtual bool supportBackend(int backendId) CV_OVERRIDE { +#ifdef HAVE_WEBNN + if (backendId == DNN_BACKEND_WEBNN) { + // TODO: support logSoftMax + if (logSoftMax) + { + CV_LOG_WARNING(NULL, "logSoftMax is not supported by WebNN backend.") + } + return !logSoftMax; + } +#endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || (backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1) || @@ -374,6 +386,18 @@ class SoftMaxLayerImpl CV_FINAL : public SoftmaxLayer } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_WEBNN + virtual Ptr initWebnn(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE + { + Ptr node = nodes[0].dynamicCast(); + auto& webnnInpOperand = node->operand; + auto& webnnGraphBuilder = node->net->builder; + auto operand = webnnGraphBuilder.Softmax(webnnInpOperand); + return Ptr(new WebnnBackendNode(operand)); + } + +#endif + int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const CV_OVERRIDE { diff --git a/modules/dnn/src/op_webnn.cpp b/modules/dnn/src/op_webnn.cpp index 640c1f3fdda1..7cc9c4289ece 100644 --- a/modules/dnn/src/op_webnn.cpp +++ b/modules/dnn/src/op_webnn.cpp @@ -49,6 +49,12 @@ WebnnNet::WebnnNet() { hasNetOwner = false; device_name = "CPU"; + + WebnnProcTable backendProcs = webnn_native::GetProcs(); + webnnProcSetProcs(&backendProcs); + context = ml::Context(webnn_native::CreateContext()); + builder = ::ml::CreateGraphBuilder(context); + namedOperands = ::ml::CreateNamedOperands(); } void WebnnNet::addOutput(const std::string& name) @@ -74,22 +80,40 @@ void WebnnNet::init(Target targetId) CV_Error(Error::StsNotImplemented, "Unknown target"); }; - CV_Error(Error::StsNotImplemented, "Create ml::Graph"); + graph = builder.Build(namedOperands); + CV_Assert(graph!=nullptr); + isInit = true; } std::vector WebnnNet::setInputs(const std::vector& inputs, - const std::vector& names) { + const std::vector& names) { CV_Assert_N(inputs.size() == names.size()); std::vector current_inp; for (size_t i = 0; i < inputs.size(); i++) { - CV_Error(Error::StsNotImplemented, "Create ml::Operand"); + auto& m = inputs[i]; + std::vector dimensions = getShape(m); + ml::OperandDescriptor descriptor; + descriptor.dimensions = dimensions.data(); + descriptor.dimensionsCount = dimensions.size(); + if (m.type() == CV_32F) + { + descriptor.type = ml::OperandType::Float32; + } + else + { + CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str())); + } + ml::Operand inputOperand = builder.Input(names[i].c_str(), &descriptor); + current_inp.push_back(std::move(inputOperand)); } + inputNames = names; return current_inp; } void WebnnNet::setUnconnectedNodes(Ptr& node) { - unconnectedNodes.push_back(node); + outputNames.push_back(node->name); + namedOperands.Set(outputNames.back().c_str(), node->operand); } bool WebnnNet::isInitialized() @@ -117,7 +141,32 @@ void WebnnNet::addBlobs(const std::vector >& ptrs) void WebnnNet::forward(const std::vector >& outBlobsWrappers, bool isAsync) { CV_LOG_DEBUG(NULL, "WebnnNet::forward(" << (isAsync ? "async" : "sync") << ")"); - CV_Error(Error::StsNotImplemented, "Implement ml::Graph.compute"); + ml::NamedInputs named_inputs = ::ml::CreateNamedInputs(); + std::vector inputs(inputNames.size()); + for (int i = 0; i < inputNames.size(); ++i) { + const std::string& name = inputNames[i]; + ml::Input& input = inputs[i]; + auto blobIt = allBlobs.find(name); + CV_Assert(blobIt != allBlobs.end()); + const Ptr wrapper = blobIt->second; + input.resource.buffer = wrapper->host->data; + input.resource.byteLength = wrapper->size; + named_inputs.Set(name.c_str(), &input); + } + std::vector > outs = webnnWrappers(outBlobsWrappers); + ml::NamedOutputs named_outputs = ::ml::CreateNamedOutputs(); + std::vector outputs(outs.size()); + for (int i = 0; i < outs.size(); ++i) { + const std::string& name = outs[i]->name; + ml::ArrayBufferView& output = outputs[i]; + output.buffer = outs[i]->host->data; + output.byteLength = outs[i]->size; + named_outputs.Set(name.c_str(), &output); + } + ml::ComputeGraphStatus status = graph.Compute(named_inputs, named_outputs); + if (status != ::ml::ComputeGraphStatus::Success) { + CV_Error(Error::StsAssert, format("Failed to compute: %d", int(status))); + } } // WebnnBackendNode @@ -128,21 +177,24 @@ WebnnBackendNode::WebnnBackendNode(ml::Operand& _operand) : BackendNode(DNN_BACKEND_WEBNN), operand(_operand) {} // WebnnBackendWrapper -WebnnBackendWrapper::WebnnBackendWrapper(int targetId, const cv::Mat& m) +WebnnBackendWrapper::WebnnBackendWrapper(int targetId, cv::Mat& m) : BackendWrapper(DNN_BACKEND_WEBNN, targetId) { - size_t dataSize = m.total() * m.elemSize(); - buffer.reset(new char[dataSize]); - std::memcpy(buffer.get(), m.data, dataSize); - dimensions = getShape(m); - descriptor.dimensions = dimensions.data(); - descriptor.dimensionsCount = dimensions.size(); + size = m.total() * m.elemSize(); + // buffer.reset(new char[size]); + // std::memcpy(buffer.get(), m.data, size); + // dimensions = getShape(m); + // descriptor.dimensions = dimensions.data(); + // descriptor.dimensionsCount = dimensions.size(); if (m.type() == CV_32F) { descriptor.type = ml::OperandType::Float32; } else + { CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str())); + } + host = &m; } WebnnBackendWrapper::~WebnnBackendWrapper() diff --git a/modules/dnn/src/op_webnn.hpp b/modules/dnn/src/op_webnn.hpp index b2263ab26673..5a33b4847113 100644 --- a/modules/dnn/src/op_webnn.hpp +++ b/modules/dnn/src/op_webnn.hpp @@ -20,6 +20,7 @@ #include #include +#include #endif // HAVE_WEBNN @@ -38,6 +39,8 @@ constexpr bool haveWebnn() { class WebnnBackendNode; class WebnnBackendWrapper; + + class WebnnNet { public: @@ -60,7 +63,6 @@ class WebnnNet void reset(); -private: ml::GraphBuilder builder; ml::Context context; ml::Graph graph; @@ -72,7 +74,10 @@ class WebnnNet bool isInit = false; std::vector requestedOutputs; - std::vector> unconnectedNodes; + + std::vector inputNames; + std::vector outputNames; + ml::NamedOperands namedOperands; }; class WebnnBackendNode : public BackendNode @@ -84,20 +89,21 @@ class WebnnBackendNode : public BackendNode std::string name; ml::Operand operand; Ptr net; - Ptr cvLayer; }; class WebnnBackendWrapper : public BackendWrapper { public: - WebnnBackendWrapper(int targetId, const Mat& m); + WebnnBackendWrapper(int targetId, Mat& m); ~WebnnBackendWrapper(); virtual void copyToHost() CV_OVERRIDE; virtual void setHostDirty() CV_OVERRIDE; std::string name; + Mat* host; std::unique_ptr buffer; + size_t size; std::vector dimensions; ml::OperandDescriptor descriptor; }; diff --git a/modules/dnn/src/webnn/README.md b/modules/dnn/src/webnn/README.md index 8b5c7d06eb80..4d6fe20c0c5b 100644 --- a/modules/dnn/src/webnn/README.md +++ b/modules/dnn/src/webnn/README.md @@ -3,7 +3,7 @@ ### Build WebNN-native and set the environment variable Refer to [WebNN's build instructions](https://github.com/webmachinelearning/webnn-native) to complete the build of WebNN-native. - + Set environment variable `WEBNN_NATIVE_DIR` to enable native DNN_BACKEND_WEBNN build: `export WEBNN_NATIVE_DIR=${PATH_TO_WebNN}`. Please let `WEBNN_NATIVE_DIR` points the output directory of webnn-native build (e.g. webnn-native/out/Release). ### Test native DNN_BACKEND_WEBNN backend diff --git a/samples/dnn/classification.cpp b/samples/dnn/classification.cpp index 769d6874bed4..8af62e7e8b1f 100644 --- a/samples/dnn/classification.cpp +++ b/samples/dnn/classification.cpp @@ -24,7 +24,8 @@ std::string keys = "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " "3: OpenCV implementation, " "4: VKCOM, " - "5: CUDA }," + "5: CUDA, " + "6: WebNN }" "{ target | 0 | Choose one of target computation devices: " "0: CPU target (by default), " "1: OpenCL, "