Skip to content

Commit

Permalink
Implement op_webnn
Browse files Browse the repository at this point in the history
  • Loading branch information
huningxin authored and Hanxi Guo committed Aug 2, 2021
1 parent 9075157 commit eee8a28
Show file tree
Hide file tree
Showing 9 changed files with 430 additions and 90 deletions.
9 changes: 9 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1608,6 +1608,15 @@ if(WITH_VULKAN OR HAVE_VULKAN)
endif()
endif()

if(WITH_WEBNN OR HAVE_WEBNN)
status("")
status(" WebNN:" HAVE_WEBNN THEN "YES" ELSE "NO")
if(HAVE_WEBNN)
status(" Include path:" WEBNN_HEADER_DIRS THEN "${WEBNN_HEADER_DIRS}" ELSE "NO")
status(" Link libraries:" WEBNN_LIBRARIES THEN "${WEBNN_LIBRARIES}" ELSE "NO")
endif()
endif()

if(WITH_OPENCL OR HAVE_OPENCL)
ocv_build_features_string(opencl_features
IF HAVE_OPENCL_SVM THEN "SVM"
Expand Down
3 changes: 3 additions & 0 deletions cmake/templates/cvconfig.h.in
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,9 @@
/* Vulkan support */
#cmakedefine HAVE_VULKAN

/* Webnn support */
#cmakedefine HAVE_WEBNN

/* Define to 1 if you have the <inttypes.h> header file. */
#cmakedefine HAVE_INTTYPES_H 1

Expand Down
10 changes: 9 additions & 1 deletion modules/dnn/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,14 @@ if(HAVE_TENGINE)
list(APPEND libs -Wl,--whole-archive ${TENGINE_LIBRARIES} -Wl,--no-whole-archive)
endif()

set(webnn_srcs "")
if(HAVE_WEBNN)
list(APPEND include_dirs ${WEBNN_HEADER_DIRS})
list(APPEND include_dirs ${WEBNN_INCLUDE_DIRS})
list(APPEND libs -Wl,--whole-archive ${WEBNN_LIBRARIES} -Wl,--no-whole-archive)
list(APPEND webnn_srcs $ENV{WEBNN_NATIVE_DIR}/gen/src/webnn/webnn_cpp.cpp)
endif()

ocv_module_include_directories(${include_dirs})
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
ocv_append_source_files_cxx_compiler_options(fw_srcs "-Wno-suggest-override") # GCC
Expand Down Expand Up @@ -162,7 +170,7 @@ if(HAVE_NGRAPH)
list(APPEND dnn_runtime_libs ngraph::ngraph)
endif()

ocv_glob_module_sources(${sources_options} SOURCES ${fw_srcs})
ocv_glob_module_sources(${sources_options} SOURCES ${fw_srcs} ${webnn_srcs})
ocv_create_module(${libs} ${dnn_runtime_libs})
ocv_add_samples()
ocv_add_accuracy_tests(${dnn_runtime_libs})
Expand Down
5 changes: 3 additions & 2 deletions modules/dnn/include/opencv2/dnn/dnn.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,7 @@ CV__DNN_INLINE_NS_BEGIN
DNN_TARGET_FPGA, //!< FPGA device with CPU fallbacks using Inference Engine's Heterogeneous plugin.
DNN_TARGET_CUDA,
DNN_TARGET_CUDA_FP16,
DNN_TARGET_HDDL,
DNN_TARGET_GPU
DNN_TARGET_HDDL
};

CV_EXPORTS std::vector< std::pair<Backend, Target> > getAvailableBackends();
Expand Down Expand Up @@ -300,6 +299,8 @@ CV__DNN_INLINE_NS_BEGIN

virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs);

virtual Ptr<BackendNode> initWebnn(const std::vector<Ptr<BackendWrapper> > &inputs, const std::vector<Ptr<BackendNode> >& nodes);

/**
* @brief Returns a CUDA backend node
*
Expand Down
268 changes: 258 additions & 10 deletions modules/dnn/src/dnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -233,8 +233,11 @@ class BackendRegistry
#endif // HAVE_INF_ENGINE

#ifdef HAVE_WEBNN
if (haveWebNN())
if (haveWebnn())
{
backends.push_back(std::make_pair(DNN_BACKEND_WEBNN, DNN_TARGET_CPU));
backends.push_back(std::make_pair(DNN_BACKEND_WEBNN, DNN_TARGET_OPENCL));
}
#endif // HAVE_WEBNN

#ifdef HAVE_OPENCL
Expand Down Expand Up @@ -1131,8 +1134,7 @@ static Ptr<BackendWrapper> wrapMat(int backendId, int targetId, cv::Mat& m)
else if (backendId == DNN_BACKEND_WEBNN)
{
#ifdef HAVE_WEBNN
CV_Assert(haveWebNN());
return Ptr<BackendWrapper>(new WebNNBackendWrapper(targetId, m));
return Ptr<BackendWrapper>(new WebnnBackendWrapper(targetId, m));
#else
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of WebNN");
#endif
Expand Down Expand Up @@ -1429,7 +1431,7 @@ struct Net::Impl : public detail::NetImplBase
if (preferableBackend == DNN_BACKEND_WEBNN)
{
CV_Assert(preferableTarget == DNN_TARGET_CPU ||
preferableTarget == DNN_TARGET_GPU);
preferableTarget == DNN_TARGET_OPENCL);
}
#endif
CV_Assert(preferableBackend != DNN_BACKEND_VKCOM ||
Expand Down Expand Up @@ -1659,7 +1661,7 @@ struct Net::Impl : public detail::NetImplBase
else if (preferableBackend == DNN_BACKEND_WEBNN)
{
#ifdef HAVE_WEBNN
initWebNNBackend(blobsToKeep_);
initWebnnBackend(blobsToKeep_);
#else
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of WebNN");
#endif
Expand Down Expand Up @@ -2368,10 +2370,255 @@ struct Net::Impl : public detail::NetImplBase
}
#endif // HAVE_DNN_NGRAPH

void initWebNNBackend(const std::vector<LayerPin>& blobsToKeep_)
{
#ifdef HAVE_WEBNN
// to do
void addWebnnOutputs(LayerData &ld)
{
CV_TRACE_FUNCTION();

Ptr<WebnnNet> layerNet;
auto it = ld.backendNodes.find(preferableBackend);
if (it != ld.backendNodes.end())
{
Ptr<BackendNode> node = it->second;
if (!node.empty())
{
Ptr<WebnnBackendNode> webnnNode = node.dynamicCast<WebnnBackendNode>();
CV_Assert(!webnnNode.empty()); CV_Assert(!webnnNode->net.empty());
layerNet = webnnNode->net;
}
}

for (int i = 0; i < ld.inputBlobsId.size(); ++i)
{
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
if (!inpNode.empty())
{
Ptr<WebnnBackendNode> webnnInpNode = inpNode.dynamicCast<WebnnBackendNode>();
CV_Assert(!webnnInpNode.empty()); CV_Assert(!webnnInpNode->net.empty());
if (layerNet != webnnInpNode->net)
{
webnnInpNode->net->addOutput(webnnInpNode->name);
webnnInpNode->net->setUnconnectedNodes(webnnInpNode);
}
}
}
}

void initWebnnBackend(const std::vector<LayerPin>& blobsToKeep_)
{
CV_TRACE_FUNCTION();
CV_Assert_N(preferableBackend == DNN_BACKEND_WEBNN, haveWebnn());

MapIdToLayerData::iterator it;
Ptr<WebnnNet> net;

for (it = layers.begin(); it != layers.end(); ++it)
{
LayerData &ld = it->second;
if (ld.id == 0)
{
CV_Assert((netInputLayer->outNames.empty() && ld.outputBlobsWrappers.size() == 1) ||
(netInputLayer->outNames.size() == ld.outputBlobsWrappers.size()));
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
Ptr<WebnnBackendWrapper> wrapper = ld.outputBlobsWrappers[i].dynamicCast<WebnnBackendWrapper>();
std::string outputName = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i];
outputName = ld.outputBlobsWrappers.size() > 1 ? (outputName + "." + std::to_string(i)) : outputName;
wrapper->name = outputName;
}
}
else
{
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
Ptr<WebnnBackendWrapper> wrapper = ld.outputBlobsWrappers[i].dynamicCast<WebnnBackendWrapper>();
std::string outputName = ld.outputBlobsWrappers.size() > 1 ? (ld.name + "." + std::to_string(i)) : ld.name;
wrapper->name = outputName;
}
}
}

// Build WebNN networks from sets of layers that support this
// backend. Split a whole model on several WebNN networks if
// some of layers are not implemented.
for (it = layers.begin(); it != layers.end(); ++it)
{
LayerData &ld = it->second;

if (ld.id == 0 && ld.skip)
continue;

bool fused = ld.skip;
Ptr<Layer> layer = ld.layerInstance;
if (!fused && !layer->supportBackend(preferableBackend))
{
addWebnnOutputs(ld);
net = Ptr<WebnnNet>();
layer->preferableTarget = DNN_TARGET_CPU;

for (int i = 0; i < ld.inputBlobsId.size(); ++i)
{
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
if (!inpNode.empty()) {
Ptr<WebnnBackendNode> webnnNode = inpNode.dynamicCast<WebnnBackendNode>();
CV_Assert(!webnnNode.empty());
webnnNode->net->setUnconnectedNodes(webnnNode);
}
}
continue;
}
ld.skip = true; // Initially skip all WebNN supported layers.

// Create a new network if one of inputs from different WebNN graph.
std::vector<Ptr<BackendNode>> inputNodes;
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
{
// Layer_Test_ROIPooling.Accuracy has 2 inputs inpLD = 0, 0 -> has 4 inputNodes (input, rois, input, rois)
if (inputNodes.size() == ld.inputBlobsId.size()) {
break;
}
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
if (!inpNode.empty())
{
Ptr<WebnnBackendNode> webnnInpNode = inpNode.dynamicCast<WebnnBackendNode>();
CV_Assert(!webnnInpNode.empty()); CV_Assert(!webnnInpNode->net.empty());
if (webnnInpNode->net == net && !fused) {
inputNodes.push_back(inpNode);
continue;
}
}

if (net.empty()) {
net = Ptr<WebnnNet>(new WebnnNet());
}

if (!fused) {
std::vector<std::string> inputNames;
std::vector<cv::Mat> inputs;

auto curr_pos = inpLd.consumers.begin();
auto compare = [&ld] (const LayerPin& lp) { return lp.lid == ld.id; };
auto cons = curr_pos;
while ((cons = std::find_if(curr_pos, inpLd.consumers.end(), compare)) !=
inpLd.consumers.end()) {
int cons_inp = cons->oid;
Ptr<WebnnBackendWrapper> inpWrapper = inpLd.outputBlobsWrappers[cons_inp].
dynamicCast<WebnnBackendWrapper>();
CV_Assert(!inpWrapper.empty());
auto iter = std::find(inputNames.begin(), inputNames.end(),
inpWrapper->name);
if (iter == inputNames.end()) {
inputNames.push_back(inpWrapper->name);
inputs.push_back(inpLd.outputBlobs[cons_inp]);
}
curr_pos = cons + 1;
}

auto inps = net->setInputs(inputs, inputNames);
for (auto& inp : inps) {
inputNodes.emplace_back(Ptr<BackendNode>(new WebnnBackendNode(inp)));
}
}
}

Ptr<BackendNode> node;
if (!net.empty())
{
if (fused)
{
bool inPlace = ld.inputBlobsId.size() == 1 && ld.outputBlobs.size() == 1 &&
ld.inputBlobs[0]->data == ld.outputBlobs[0].data;
CV_Assert(inPlace);
node = layers[ld.inputBlobsId[0].lid].backendNodes[preferableBackend];
ld.inputBlobsWrappers = layers[ld.inputBlobsId[0].lid].inputBlobsWrappers;
}
}
else {
net = Ptr<WebnnNet>(new WebnnNet());
}

if (!fused)
{
CV_Assert(ld.inputBlobsId.size() == inputNodes.size());
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
{
int lid = ld.inputBlobsId[i].lid;
int oid = ld.inputBlobsId[i].oid;
if (oid == 0 || lid == 0)
continue;

auto webnnInpNode = inputNodes[i].dynamicCast<WebnnBackendNode>();
inputNodes[i] = Ptr<BackendNode>(new WebnnBackendNode(webnnInpNode->operand));
}

if (layer->supportBackend(preferableBackend))
{
node = layer->initWebnn(ld.inputBlobsWrappers, inputNodes);
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
Ptr<WebnnBackendWrapper> wrapper = ld.outputBlobsWrappers[i].dynamicCast<WebnnBackendWrapper>();
node.dynamicCast<WebnnBackendNode>()->name = wrapper->name;
}
}
else
{
continue;
}
}
else if (node.empty())
continue;

ld.backendNodes[preferableBackend] = node;

Ptr<WebnnBackendNode> webnnNode = node.dynamicCast<WebnnBackendNode>();
CV_Assert(!webnnNode.empty());
webnnNode->net = net;

if (ld.consumers.empty()) {
// TF EAST_text_detection
webnnNode->net->setUnconnectedNodes(webnnNode);
}
for (const auto& pin : blobsToKeep_)
{
if (pin.lid == ld.id)
{
webnnNode->net->addOutput(webnnNode->name);
break;
}
}
net->addBlobs(ld.inputBlobsWrappers);
net->addBlobs(ld.outputBlobsWrappers);
addWebnnOutputs(ld);
}

// Initialize all networks.
for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it)
{
LayerData &ld = it->second;
auto iter = ld.backendNodes.find(preferableBackend);
if (iter == ld.backendNodes.end())
continue;

Ptr<BackendNode>& node = iter->second;
if (node.empty())
continue;

Ptr<WebnnBackendNode> webnnNode = node.dynamicCast<WebnnBackendNode>();
if (webnnNode.empty())
continue;

CV_Assert(!webnnNode->net.empty());

if (!webnnNode->net->isInitialized())
{
webnnNode->net->setUnconnectedNodes(webnnNode);
webnnNode->net->createNet((Target)preferableTarget);
ld.skip = false;
}
}
#endif
}

Expand Down Expand Up @@ -3416,7 +3663,7 @@ struct Net::Impl : public detail::NetImplBase
}
else if (preferableBackend == DNN_BACKEND_WEBNN)
{
forwardWebNN(ld.outputBlobsWrappers, node, isAsync);
forwardWebnn(ld.outputBlobsWrappers, node, isAsync);
}
else if (preferableBackend == DNN_BACKEND_VKCOM)
{
Expand Down Expand Up @@ -4534,6 +4781,7 @@ string Net::Impl::dump()
case DNN_BACKEND_OPENCV: backend = "OCV/"; break;
case DNN_BACKEND_VKCOM: backend = "VULKAN/"; break;
case DNN_BACKEND_CUDA: backend = "CUDA/"; break;
case DNN_BACKEND_WEBNN: backend = "WEBNN/"; break;
// don't use default:
}
out << "digraph G {\n";
Expand Down Expand Up @@ -5125,7 +5373,7 @@ Ptr<BackendNode> Layer::initNgraph(const std::vector<Ptr<BackendWrapper> > & inp
return Ptr<BackendNode>();
}

Ptr<BackendNode> Layer::initWebNN(const std::vector<Ptr<BackendWrapper> > & inputs, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> Layer::initWebnn(const std::vector<Ptr<BackendWrapper> > & inputs, const std::vector<Ptr<BackendNode> >& nodes)
{
CV_Error(Error::StsNotImplemented, "WebNN pipeline of " + type +
" layers is not defined.");
Expand Down
Loading

0 comments on commit eee8a28

Please sign in to comment.