forked from openvinotoolkit/openvino
-
Notifications
You must be signed in to change notification settings - Fork 0
/
compilation.cpp
91 lines (83 loc) · 3.7 KB
/
compilation.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "gtest/gtest.h"
#include "openvino/frontend/manager.hpp"
#include "openvino/openvino.hpp"
#include "openvino/runtime/exec_model_info.hpp"
#include "tf_utils.hpp"
#include "utils.hpp"
using namespace ov::frontend::tensorflow::tests;
class CompileModelsTests : public ::testing::Test {};
#ifdef OPENVINO_ARCH_ARM64
// Ticket: 122666
TEST_F(CompileModelsTests, DISABLED_NgramCompilation) {
ov::Core core;
auto model = convert_model("model_ngram/model_ngram.pbtxt");
ov::CompiledModel compiled_model = core.compile_model(model, "CPU");
const auto runtime_model = compiled_model.get_runtime_model();
// A convert node will be inserted for CPU plugin API 2.0
EXPECT_EQ(runtime_model->get_ordered_ops().size(), 5);
EXPECT_EQ(runtime_model->get_parameters().size(), 2);
EXPECT_EQ(runtime_model->get_results().size(), 1);
}
#else
TEST_F(CompileModelsTests, NgramCompilation) {
ov::Core core;
auto model = convert_model("model_ngram/model_ngram.pbtxt");
ov::CompiledModel compiled_model = core.compile_model(model, "CPU");
const auto runtime_model = compiled_model.get_runtime_model();
// A convert node will be inserted for CPU plugin API 2.0
EXPECT_EQ(runtime_model->get_ordered_ops().size(), 5);
EXPECT_EQ(runtime_model->get_parameters().size(), 2);
EXPECT_EQ(runtime_model->get_results().size(), 1);
}
#endif
#ifdef OPENVINO_ARCH_ARM64
// Ticket: CVS-122396
TEST_F(CompileModelsTests, DISABLED_ModelWithSplitConvConcat)
#else
TEST_F(CompileModelsTests, ModelWithSplitConvConcat)
#endif
{
{
auto model = convert_model("split_conv_concat/split_conv_concat.pbtxt");
ov::Core core;
ov::CompiledModel compiled_model = core.compile_model(model, "CPU");
const auto runtime_model = compiled_model.get_runtime_model();
auto get_layer_type = [](const std::shared_ptr<ov::Node>& node) {
return node->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as<std::string>();
};
const auto ops = runtime_model->get_ops();
EXPECT_EQ(0, std::count_if(ops.begin(), ops.end(), [&](const std::shared_ptr<ov::Node>& node) {
return get_layer_type(node) == "Split";
}));
EXPECT_EQ(2, std::count_if(ops.begin(), ops.end(), [&](const std::shared_ptr<ov::Node>& node) {
return get_layer_type(node) == "Convolution";
}));
EXPECT_EQ(0, std::count_if(ops.begin(), ops.end(), [&](const std::shared_ptr<ov::Node>& node) {
return get_layer_type(node) == "Concat";
}));
}
}
TEST_F(CompileModelsTests, ModelWithShapeOf) {
auto model = convert_model("shapeof_slice_abs/shapeof_slice_abs.pbtxt");
ov::Core core;
core.set_property("CPU", ov::hint::inference_precision(ov::element::f32));
ov::CompiledModel compiled_model = core.compile_model(model, "CPU");
const auto runtime_model = compiled_model.get_runtime_model();
auto get_layer_type = [](const std::shared_ptr<ov::Node>& node) {
return node->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as<std::string>();
};
const auto ops = runtime_model->get_ops();
// one Input, one Eltwise and one Output
EXPECT_EQ(3, ops.size());
// ShapeOf is folded
EXPECT_EQ(0, std::count_if(ops.begin(), ops.end(), [&](const std::shared_ptr<ov::Node>& node) {
return get_layer_type(node) == "ShapeOf";
}));
// Slice is eliminated
EXPECT_EQ(0, std::count_if(ops.begin(), ops.end(), [&](const std::shared_ptr<ov::Node>& node) {
return get_layer_type(node) == "StridedSlice";
}));
}