diff --git a/components/percona_telemetry/data_provider.h b/components/percona_telemetry/data_provider.h index 2d017a25218f..e66082f24e83 100644 --- a/components/percona_telemetry/data_provider.h +++ b/components/percona_telemetry/data_provider.h @@ -61,7 +61,7 @@ class DataProvider { SERVICE_TYPE(mysql_command_thread) & command_thread_service, Logger &logger); - ~DataProvider() = default; + virtual ~DataProvider() = default; DataProvider(const DataProvider &rhs) = delete; DataProvider(DataProvider &&rhs) = delete; @@ -73,9 +73,9 @@ class DataProvider { std::string get_report(); private: - bool do_query(const std::string &query, QueryResult *result, - unsigned int *err_no = nullptr, - bool suppress_query_error_log = false); + virtual bool do_query(const std::string &query, QueryResult *result, + unsigned int *err_no = nullptr, + bool suppress_query_error_log = false); bool collect_db_instance_id_info(rapidjson::Document *document); bool collect_product_version_info(rapidjson::Document *document); bool collect_plugins_info(rapidjson::Document *document); diff --git a/mysql-test/suite/component_percona_telemetry/r/delete_obsolete_file.result b/mysql-test/suite/component_percona_telemetry/r/delete_obsolete_file.result new file mode 100644 index 000000000000..73a3a53f4848 --- /dev/null +++ b/mysql-test/suite/component_percona_telemetry/r/delete_obsolete_file.result @@ -0,0 +1,6 @@ +CALL mtr.add_suppression("Component percona_telemetry reported: 'Skipping file deletion this_file_should_not_be_removed'"); +# restart:--percona_telemetry.grace_interval=30 --percona_telemetry.scrape_interval=30 --percona_telemetry.history_keep_interval=80 --percona_telemetry.telemetry_root_dir= +File with the name not conforming to the pattern still should be there +1 +Obsolete file should be removed +0 diff --git a/mysql-test/suite/component_percona_telemetry/r/invalid_path.result b/mysql-test/suite/component_percona_telemetry/r/invalid_path.result new file mode 100644 index 000000000000..f6614c3768bb --- /dev/null +++ b/mysql-test/suite/component_percona_telemetry/r/invalid_path.result @@ -0,0 +1,6 @@ +# restart:--percona_telemetry.grace_interval=30 --percona_telemetry.scrape_interval=30 --percona_telemetry.history_keep_interval=80 --percona_telemetry.telemetry_root_dir= +Server should still be alive +CALL mtr.add_suppression("Component percona_telemetry reported: 'Problem during telemetry file write: filesystem error: directory iterator cannot open directory: No such file or directory"); +include/assert_grep.inc [Percona Telemetry Component warns about nonexistent directory] +Telemetry root dir should contain 1 file +1 diff --git a/mysql-test/suite/component_percona_telemetry/r/telemetry_disable.result b/mysql-test/suite/component_percona_telemetry/r/telemetry_disable.result new file mode 100644 index 000000000000..5821a1dc75f7 --- /dev/null +++ b/mysql-test/suite/component_percona_telemetry/r/telemetry_disable.result @@ -0,0 +1,11 @@ +include/assert.inc [Percona Telemetry Component should not be installed] +UNINSTALL COMPONENT 'file://component_percona_telemetry'; +include/assert.inc [Percona Telemetry Component should not be installed] +# restart: +include/assert.inc [Percona Telemetry Component should be installed after server restart] +# restart:--percona-telemetry-disable=1 +include/assert.inc [Percona Telemetry Component should not be installed when server is started with --percona-telemetry-disable=1] +INSTALL COMPONENT 'file://component_percona_telemetry'; +include/assert.inc [It should be possible to manually install Percona Telemetry Component] +# restart:--percona-telemetry-disable=1 +include/assert.inc [Percona Telemetry Component should not be installed when server is started with --percona-telemetry-disable=1] diff --git a/mysql-test/suite/component_percona_telemetry/r/telemetry_file_creation.result b/mysql-test/suite/component_percona_telemetry/r/telemetry_file_creation.result new file mode 100644 index 000000000000..85c421c8c2a8 --- /dev/null +++ b/mysql-test/suite/component_percona_telemetry/r/telemetry_file_creation.result @@ -0,0 +1,11 @@ +# restart:--percona_telemetry.grace_interval=30 --percona_telemetry.scrape_interval=30 --percona_telemetry.history_keep_interval=80 --percona_telemetry.telemetry_root_dir= +Time passed: 10.0000. Still in grace_interval. Telemetry root dir should contain 0 files +0 +Time passed: 40.0000. Time after grace_interval: 10.0000. Telemetry root dir should contain 1 file +1 +Time passed: 70.0000. Time after grace_interval: 40.0000. Telemetry root dir should contain 2 files +2 +Time passed: 100.0000. Time after grace_interval: 70.0000. Telemetry root dir should contain 3 files +3 +Time passed: 130.0000. Time after grace_interval: 100.0000. Telemetry root dir should still contain 3 files +3 diff --git a/mysql-test/suite/component_percona_telemetry/t/delete_obsolete_file.test b/mysql-test/suite/component_percona_telemetry/t/delete_obsolete_file.test new file mode 100644 index 000000000000..aa661a6cd401 --- /dev/null +++ b/mysql-test/suite/component_percona_telemetry/t/delete_obsolete_file.test @@ -0,0 +1,40 @@ +# Test that obsolete files from another servers are removed, +# but files with names not conforming to expected pattern are not. + +--source include/have_percona_telemetry.inc +--source include/force_restart.inc + +CALL mtr.add_suppression("Component percona_telemetry reported: 'Skipping file deletion this_file_should_not_be_removed'"); + +--let $telemetry_root_dir = $MYSQL_TMP_DIR/telemetry_dir +--let $grace_interval = 30 +--let $scrape_interval = 30 +--let $history_keep_interval = 80 + +--let $obsolete_file_name = 313671600-1af5d44c-81f9-4083-807d-e71ca7914f92.json +--let $untouchable_file_name = this_file_should_not_be_removed.json +--let $untouchable_file = $telemetry_root_dir/$untouchable_file_name +--let $obsolete_file = $telemetry_root_dir/$obsolete_file_name + +--mkdir $telemetry_root_dir +--exec touch $untouchable_file +--exec touch $obsolete_file + +# restart the server with custom telemetry file path and timeouts +--let $restart_parameters = "restart:--percona_telemetry.grace_interval=$grace_interval --percona_telemetry.scrape_interval=$scrape_interval --percona_telemetry.history_keep_interval=$history_keep_interval --percona_telemetry.telemetry_root_dir=$telemetry_root_dir" +--replace_regex /telemetry_root_dir=.*telemetry_dir/telemetry_root_dir=/ +--source include/restart_mysqld.inc + +# Wait for percona_telemetry.grace_interval to pass (+10sec) +--let $timeout = `select $grace_interval + 10` +--sleep $timeout + +--echo File with the name not conforming to the pattern still should be there +--file_exists $untouchable_file +--exec ls -1 $telemetry_root_dir | grep $untouchable_file_name | wc -l + +--echo Obsolete file should be removed +--exec ls -1 $telemetry_root_dir | grep $obsolete_file_name | wc -l + +# cleanup +--force-rmdir $telemetry_root_dir diff --git a/mysql-test/suite/component_percona_telemetry/t/invalid_path.test b/mysql-test/suite/component_percona_telemetry/t/invalid_path.test new file mode 100644 index 000000000000..e2c56b3d707e --- /dev/null +++ b/mysql-test/suite/component_percona_telemetry/t/invalid_path.test @@ -0,0 +1,42 @@ +# Test that Percona Telemetry Component doesn't do any harm to the server when it is not possible +# to store telemetry file + +--source include/have_percona_telemetry.inc +--source include/force_restart.inc + +--let $telemetry_root_dir = $MYSQL_TMP_DIR/telemetry_dir +--let $grace_interval = 30 +--let $scrape_interval = 30 +--let $history_keep_interval = 80 + + +# restart the server with custom telemetry file path and timeouts +--let $restart_parameters = "restart:--percona_telemetry.grace_interval=$grace_interval --percona_telemetry.scrape_interval=$scrape_interval --percona_telemetry.history_keep_interval=$history_keep_interval --percona_telemetry.telemetry_root_dir=$telemetry_root_dir" +--replace_regex /telemetry_root_dir=.*telemetry_dir/telemetry_root_dir=/ +--source include/restart_mysqld.inc + +# Wait for more than grace_interval. Component should try to create the telemetry file, but as the target dir +# doesn't exist it will fail. It should complain with a warning in the log, but continue. +--let $timeout = `select $grace_interval + 20` +--sleep $timeout +--echo Server should still be alive + +--let $warning_message = Component percona_telemetry reported: 'Problem during telemetry file write: filesystem error: directory iterator cannot open directory: No such file or directory +--eval CALL mtr.add_suppression("$warning_message") +--let $assert_file = $MYSQLTEST_VARDIR/log/mysqld.1.err +--let $assert_select = $warning_message +--let $assert_count=1 +--let $assert_text = Percona Telemetry Component warns about nonexistent directory +--source include/assert_grep.inc + + +# Now create the telemetry dir and wait for scrape_interval. Telemetry file should be created +--mkdir $telemetry_root_dir + +--let $timeout = $scrape_interval +--sleep $timeout +--echo Telemetry root dir should contain 1 file +--exec ls -1 $telemetry_root_dir | wc -l + +# cleanup +--force-rmdir $telemetry_root_dir diff --git a/mysql-test/suite/component_percona_telemetry/t/telemetry_disable.test b/mysql-test/suite/component_percona_telemetry/t/telemetry_disable.test new file mode 100644 index 000000000000..ab1f94c27c06 --- /dev/null +++ b/mysql-test/suite/component_percona_telemetry/t/telemetry_disable.test @@ -0,0 +1,50 @@ +# Component enable/disable + +--source include/have_percona_telemetry.inc +--source include/force_restart.inc + + +# Telemetry is enabled. It is checked already by have_percona_telemetry.inc +--let $assert_text = Percona Telemetry Component should not be installed +--let $assert_cond = [SELECT COUNT(*) FROM mysql.component WHERE component_urn = "file://component_percona_telemetry"] = 1 +--source include/assert.inc + +# Component can be unloaded +UNINSTALL COMPONENT 'file://component_percona_telemetry'; + +--let $assert_text = Percona Telemetry Component should not be installed +--let $assert_cond = [SELECT COUNT(*) FROM mysql.component WHERE component_urn = "file://component_percona_telemetry"] = 0 +--source include/assert.inc + +# Restart the server. Percona Telemetry Component should be installed again +--let $restart_parameters = "restart:" +--source include/restart_mysqld.inc + +--let $assert_text = Percona Telemetry Component should be installed after server restart +--let $assert_cond = [SELECT COUNT(*) FROM mysql.component WHERE component_urn = "file://component_percona_telemetry"] = 1 +--source include/assert.inc + +# Now restart the server with --percona-telemetry-disable=1. Component should not be loaded +--let $restart_parameters = "restart:--percona-telemetry-disable=1" +--source include/restart_mysqld.inc + +--let $assert_text = Percona Telemetry Component should not be installed when server is started with --percona-telemetry-disable=1 +--let $assert_cond = [SELECT COUNT(*) FROM mysql.component WHERE component_urn = "file://component_percona_telemetry"] = 0 +--source include/assert.inc + +# Component can be loaded +INSTALL COMPONENT 'file://component_percona_telemetry'; + +--let $assert_text = It should be possible to manually install Percona Telemetry Component +--let $assert_cond = [SELECT COUNT(*) FROM mysql.component WHERE component_urn = "file://component_percona_telemetry"] = 1 +--source include/assert.inc + +# ... but after restart with --percona-telemetry-disable=1 is should not be there +# Now restart the server with --percona-telemetry-disable=1. Component should not be loaded +--let $restart_parameters = "restart:--percona-telemetry-disable=1" +--source include/restart_mysqld.inc + +--let $assert_text = Percona Telemetry Component should not be installed when server is started with --percona-telemetry-disable=1 +--let $assert_cond = [SELECT COUNT(*) FROM mysql.component WHERE component_urn = "file://component_percona_telemetry"] = 0 +--source include/assert.inc + diff --git a/mysql-test/suite/component_percona_telemetry/t/telemetry_file_creation.test b/mysql-test/suite/component_percona_telemetry/t/telemetry_file_creation.test new file mode 100644 index 000000000000..f3797ce103a9 --- /dev/null +++ b/mysql-test/suite/component_percona_telemetry/t/telemetry_file_creation.test @@ -0,0 +1,76 @@ +# Test the telemetry file creation and cleanup + +--source include/have_percona_telemetry.inc +--source include/force_restart.inc + +--let $telemetry_root_dir = $MYSQL_TMP_DIR/telemetry_dir +--let $grace_interval = 30 +--let $scrape_interval = 30 +--let $history_keep_interval = 80 + +--mkdir $telemetry_root_dir + +# restart the server with custom telemetry file path and timeouts +--let $restart_parameters = "restart:--percona_telemetry.grace_interval=$grace_interval --percona_telemetry.scrape_interval=$scrape_interval --percona_telemetry.history_keep_interval=$history_keep_interval --percona_telemetry.telemetry_root_dir=$telemetry_root_dir" +--replace_regex /telemetry_root_dir=.*telemetry_dir/telemetry_root_dir=/ +--source include/restart_mysqld.inc + +# time = 0 +--let $time_passed = 0 +--let $time_after_grace_interval=0 +# Right after the server start, due to percona_telemetry.grace_interval, the telemetry file should not be created +--let $timeout = `select $grace_interval / 3` +--sleep $timeout +--let $time_passed = `select $time_passed + $timeout` + +### now we are withing grace_interval +--echo Time passed: $time_passed. Still in grace_interval. Telemetry root dir should contain 0 files +--exec ls -1 $telemetry_root_dir | wc -l + + + +# Wait for percona_telemetry.grace_interval to pass (+10sec) and check if telemetry file was created +--let $timeout = `select $grace_interval - $time_passed + 10` +--sleep $timeout +--let $time_passed = `select $time_passed + $timeout` +--let $time_after_grace_interval = `select $time_passed - $grace_interval` + +### now we are in 1st scrape interval +--echo Time passed: $time_passed. Time after grace_interval: $time_after_grace_interval. Telemetry root dir should contain 1 file +--exec ls -1 $telemetry_root_dir | wc -l + + + +# Wait a bit more. New telemetry file should be created +--let $timeout = $scrape_interval +--sleep $timeout +--let $time_passed = `select $time_passed + $timeout` +--let $time_after_grace_interval = `select $time_passed - $grace_interval` + +### now we are in 2nd scrape interval +--echo Time passed: $time_passed. Time after grace_interval: $time_after_grace_interval. Telemetry root dir should contain 2 files +--exec ls -1 $telemetry_root_dir | wc -l + + +# Wait a bit more. New telemetry file should be created +--let $timeout = $scrape_interval +--sleep $timeout +--let $time_passed = `select $time_passed + $timeout` +--let $time_after_grace_interval = `select $time_passed - $grace_interval` + +### now we are in 3rd scrape interval +--echo Time passed: $time_passed. Time after grace_interval: $time_after_grace_interval. Telemetry root dir should contain 3 files +--exec ls -1 $telemetry_root_dir | wc -l + + +# Due to history_keep_interval, new files should be created, but old ones should be deleted. +--let $timeout = $scrape_interval +--sleep $timeout +--let $time_passed = `select $time_passed + $timeout` +--let $time_after_grace_interval = `select $time_passed - $grace_interval` + +--echo Time passed: $time_passed. Time after grace_interval: $time_after_grace_interval. Telemetry root dir should still contain 3 files +--exec ls -1 $telemetry_root_dir | wc -l + +# cleanup +--force-rmdir $telemetry_root_dir diff --git a/unittest/gunit/CMakeLists.txt b/unittest/gunit/CMakeLists.txt index 76bb1e809c36..8f2177d8bdd4 100644 --- a/unittest/gunit/CMakeLists.txt +++ b/unittest/gunit/CMakeLists.txt @@ -457,6 +457,7 @@ ADD_SUBDIRECTORY(innodb) ADD_SUBDIRECTORY(components/mysql_server) ADD_SUBDIRECTORY(components/keyring_common) ADD_SUBDIRECTORY(components/keyring_vault) +ADD_SUBDIRECTORY(components/percona_telemetry) ADD_SUBDIRECTORY(xplugin) ADD_SUBDIRECTORY(group_replication) ADD_SUBDIRECTORY(libmysqlgcs) diff --git a/unittest/gunit/components/percona_telemetry/CMakeLists.txt b/unittest/gunit/components/percona_telemetry/CMakeLists.txt new file mode 100644 index 000000000000..7fe5a01e7bc2 --- /dev/null +++ b/unittest/gunit/components/percona_telemetry/CMakeLists.txt @@ -0,0 +1,43 @@ +# Copyright (c) 2024 Percona LLC and/or its affiliates. All rights reserved. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; version 2 of +# the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +MY_CHECK_CXX_COMPILER_WARNING("-Wno-unused-local-typedefs" HAS_WARN_FLAG) +IF(HAS_WARN_FLAG) + STRING_APPEND(CMAKE_CXX_FLAGS " ${HAS_WARN_FLAG}") +ENDIF() + +INCLUDE_DIRECTORIES(SYSTEM + ${GMOCK_INCLUDE_DIRS} +) + +SET(PERCONA_TELEMETRY_COMPONENT_SRC + ${CMAKE_SOURCE_DIR}/components/percona_telemetry/data_provider.cc +) + +SET(LOCAL_MOCK_SRC + logger.cc +) + +# Add tests +SET(TESTS + data_provider + ) + +FOREACH(test ${TESTS}) + MYSQL_ADD_EXECUTABLE(${test}-t ${PERCONA_TELEMETRY_COMPONENT_SRC} ${LOCAL_MOCK_SRC} ${test}-t.cc ADD_TEST ${test} LINK_LIBRARIES ${PERCONA_TELEMETRY_COMPONENT_LIBS} DEPENDENCIES component_percona_telemetry) + TARGET_LINK_LIBRARIES(${test}-t mysys gunit_small) +ENDFOREACH() + diff --git a/unittest/gunit/components/percona_telemetry/data_provider-t.cc b/unittest/gunit/components/percona_telemetry/data_provider-t.cc new file mode 100644 index 000000000000..c6e64987157f --- /dev/null +++ b/unittest/gunit/components/percona_telemetry/data_provider-t.cc @@ -0,0 +1,439 @@ +#include +#include + +#define private public +#include "components/percona_telemetry/data_provider.h" +#include "components/percona_telemetry/logger.h" +#undef private + +using ::testing::_; +using ::testing::A; +using ::testing::DoAll; +using ::testing::Eq; +using ::testing::Invoke; +using ::testing::Return; +using ::testing::SetArgPointee; +using ::testing::WithArg; + +SERVICE_TYPE_NO_CONST(mysql_command_factory) command_factory; +SERVICE_TYPE_NO_CONST(mysql_command_options) command_options; +SERVICE_TYPE_NO_CONST(mysql_command_query) command_query; +SERVICE_TYPE_NO_CONST(mysql_command_query_result) command_query_result; +SERVICE_TYPE_NO_CONST(mysql_command_field_info) command_field_info; +SERVICE_TYPE_NO_CONST(mysql_command_error_info) command_error_info; +SERVICE_TYPE_NO_CONST(mysql_command_thread) command_thread; + +SERVICE_TYPE_NO_CONST(log_builtins) log_builtins_srv; +SERVICE_TYPE_NO_CONST(log_builtins_string) log_builtins_string_srv; +Logger logger(log_builtins_srv, log_builtins_string_srv); + +namespace data_provider_unittests { + +class MockDataProvider : public DataProvider { + public: + MockDataProvider() + : DataProvider(command_factory, command_options, command_query, + command_query_result, command_field_info, + command_error_info, command_thread, logger) {} + MOCK_METHOD(bool, do_query, + (const std::string &query, QueryResult *result, + unsigned int *err_no, bool suppress_query_error_log), + (override)); +}; + +class DataProviderTest : public ::testing::Test { + protected: + virtual void SetUp() {} + virtual void TearDown() {} +}; + +TEST_F(DataProviderTest, Sanity_test) { EXPECT_EQ(1, 1); } + +TEST_F(DataProviderTest, get_database_instance_id_test) { + MockDataProvider dataProvider; + const std::string expected_id("expected_id"); + Row row{expected_id}; + + EXPECT_CALL(dataProvider, do_query(Eq(std::string("SELECT @@server_uuid")), + A(), _, _)) + .Times(1) + .WillOnce(DoAll( + WithArg<1>(Invoke([&row](QueryResult *qr) { qr->push_back(row); })), + Return(false))); + + EXPECT_EQ(dataProvider.get_database_instance_id(), expected_id); +} + +TEST_F(DataProviderTest, get_database_instance_id_queries_only_once_test) { + MockDataProvider dataProvider; + const std::string expected_id("expected_id"); + Row row{expected_id}; + + EXPECT_CALL(dataProvider, do_query(_, A(), _, _)) + .Times(1) + .WillOnce(DoAll( + WithArg<1>(Invoke([&row](QueryResult *qr) { qr->push_back(row); })), + Return(false))); + + EXPECT_EQ(dataProvider.get_database_instance_id(), expected_id); + // If we call it again, it should use cached value. + // If it calls do_query() again, the above Times(1) expectation will fail + EXPECT_EQ(dataProvider.get_database_instance_id(), expected_id); +} + +TEST_F(DataProviderTest, get_database_instance_id_do_query_fails_test) { + MockDataProvider dataProvider; + const std::string expected_id("expected_id"); + Row row{expected_id}; + + EXPECT_CALL(dataProvider, do_query(_, A(), _, _)) + .Times(2) + .WillOnce(Return(true)) + .WillOnce(DoAll( + WithArg<1>(Invoke([&row](QueryResult *qr) { qr->push_back(row); })), + Return(false))); + + // do_query fails for the 1st time. Empty string is expected as the result + EXPECT_EQ(dataProvider.get_database_instance_id(), ""); + // If we call it again, it should call do_query() again, as the previous + // attempt failed. + EXPECT_EQ(dataProvider.get_database_instance_id(), expected_id); +} + +// Helper function for metrics reported as a single value +static void collect_single_value_common( + const std::string &expected_query, const std::string &expected_json_key, + std::function fn, + bool should_cache_result = false) { + MockDataProvider dataProvider; + const std::string expected_val("expected_val"); + Row row{expected_val}; + + EXPECT_CALL(dataProvider, + do_query(Eq(expected_query), A(), _, _)) + .Times(should_cache_result ? 1 : 2) + .WillRepeatedly(DoAll( + WithArg<1>(Invoke([&row](QueryResult *qr) { qr->push_back(row); })), + Return(false))); + + // KH: + rapidjson::Document document(rapidjson::Type::kObjectType); + EXPECT_FALSE(fn(&dataProvider, &document)); + + EXPECT_TRUE(document.HasMember(expected_json_key.c_str())); + auto iter = document.FindMember(expected_json_key.c_str()); + EXPECT_STREQ(iter->value.GetString(), expected_val.c_str()); + + // The above expect Times(N) cares about caching check + rapidjson::Document document2(rapidjson::Type::kObjectType); + EXPECT_FALSE(fn(&dataProvider, &document2)); +} + +// Helper function for metrics reported as an array +static void collect_array_info_common( + const std::string &expected_query, const std::string &expected_json_key, + std::function fn) { + MockDataProvider dataProvider; + std::string Item_A("Item_A"); + std::string Item_B("Item_B"); + Row row1{Item_A}; + Row row2{Item_B}; + + EXPECT_CALL(dataProvider, + do_query(Eq(expected_query), A(), _, _)) + .Times(2) + .WillRepeatedly(DoAll(WithArg<1>(Invoke([&row1, &row2](QueryResult *qr) { + qr->push_back(row1); + qr->push_back(row2); + })), + Return(false))); + + rapidjson::Document document(rapidjson::Type::kObjectType); + EXPECT_FALSE(fn(&dataProvider, &document)); + + EXPECT_TRUE(document.HasMember(expected_json_key.c_str())); + auto iter = document.FindMember(expected_json_key.c_str()); + EXPECT_TRUE(iter->value.IsArray()); + EXPECT_EQ(iter->value.GetArray().Size(), 2); + + bool item_a_found = false; + bool item_b_found = false; + + if (Item_A.compare(iter->value.GetArray()[0].GetString()) == 0 || + Item_A.compare(iter->value.GetArray()[1].GetString()) == 0) { + item_a_found = true; + } + if (Item_B.compare(iter->value.GetArray()[0].GetString()) == 0 || + Item_B.compare(iter->value.GetArray()[1].GetString()) == 0) { + item_b_found = true; + } + + EXPECT_TRUE(item_a_found && item_b_found); + + // Let's do it once again, to ensure, that values are not cached + rapidjson::Document document2(rapidjson::Type::kObjectType); + EXPECT_FALSE(fn(&dataProvider, &document2)); +} + +TEST_F(DataProviderTest, collect_db_instance_id_info_test) { + const std::string query("SELECT @@server_uuid"); + const std::string expected_json_key("db_instance_id"); + collect_single_value_common(query, expected_json_key, + &MockDataProvider::collect_db_instance_id_info, + true); +} + +TEST_F(DataProviderTest, collect_product_version_info_test) { + MockDataProvider dataProvider; + Row row{"1.2.3.4", "This is version comment"}; + + EXPECT_CALL(dataProvider, + do_query(Eq(std::string("SELECT @@VERSION, @@VERSION_COMMENT")), + A(), _, _)) + .Times(1) + .WillOnce(DoAll( + WithArg<1>(Invoke([&row](QueryResult *qr) { qr->push_back(row); })), + Return(false))); + + rapidjson::Document document(rapidjson::Type::kObjectType); + EXPECT_FALSE(dataProvider.collect_product_version_info(&document)); + + EXPECT_TRUE(document.HasMember("pillar_version")); + auto iter = document.FindMember("pillar_version"); + EXPECT_STREQ(iter->value.GetString(), "1.2.3.4"); + + // The 2nd call will use cached version, do_query() won't be called + rapidjson::Document document2(rapidjson::Type::kObjectType); + EXPECT_FALSE(dataProvider.collect_product_version_info(&document2)); +} + +TEST_F(DataProviderTest, collect_product_version_info_pro_test) { + MockDataProvider dataProvider; + Row row{"1.2.3.4", "This is Pro build version comment"}; + + EXPECT_CALL(dataProvider, + do_query(Eq(std::string("SELECT @@VERSION, @@VERSION_COMMENT")), + A(), _, _)) + .Times(1) + .WillOnce(DoAll( + WithArg<1>(Invoke([&row](QueryResult *qr) { qr->push_back(row); })), + Return(false))); + + rapidjson::Document document(rapidjson::Type::kObjectType); + EXPECT_FALSE(dataProvider.collect_product_version_info(&document)); + + EXPECT_TRUE(document.HasMember("pillar_version")); + auto iter = document.FindMember("pillar_version"); + EXPECT_STREQ(iter->value.GetString(), "1.2.3.4-pro"); +} + +TEST_F(DataProviderTest, collect_plugins_info_test) { + const std::string query( + std::string("SELECT PLUGIN_NAME FROM information_schema.plugins WHERE " + "PLUGIN_STATUS='ACTIVE'")); + const std::string expected_json_key("active_plugins"); + collect_array_info_common(query, expected_json_key, + &MockDataProvider::collect_plugins_info); +} + +TEST_F(DataProviderTest, collect_components_info_test) { + const std::string query( + std::string("SELECT component_urn FROM mysql.component")); + const std::string expected_json_key("active_components"); + collect_array_info_common(query, expected_json_key, + &MockDataProvider::collect_components_info); +} + +TEST_F(DataProviderTest, collect_uptime_info_test) { + MockDataProvider dataProvider; + Row row{"Uptime", "12345"}; + + EXPECT_CALL(dataProvider, + do_query(Eq(std::string("SHOW GLOBAL STATUS LIKE 'Uptime'")), + A(), _, _)) + .Times(1) + .WillOnce(DoAll( + WithArg<1>(Invoke([&row](QueryResult *qr) { qr->push_back(row); })), + Return(false))); + + rapidjson::Document document(rapidjson::Type::kObjectType); + EXPECT_FALSE(dataProvider.collect_uptime_info(&document)); + + EXPECT_TRUE(document.HasMember("uptime")); + auto iter = document.FindMember("uptime"); + EXPECT_STREQ(iter->value.GetString(), "12345"); +} + +TEST_F(DataProviderTest, collect_dbs_number_info_test) { + const std::string query(std::string( + "SELECT COUNT(*) FROM information_schema.SCHEMATA WHERE SCHEMA_NAME NOT " + "IN('mysql', 'information_schema', 'performance_schema', 'sys')")); + const std::string expected_json_key("databases_count"); + collect_single_value_common(query, expected_json_key, + &MockDataProvider::collect_dbs_number_info); +} + +TEST_F(DataProviderTest, collect_dbs_size_info_test) { + const std::string query(std::string( + "SELECT IFNULL(ROUND(SUM(data_length + index_length), 1), '0') " + "size_MB FROM information_schema.tables WHERE table_schema NOT " + "IN('mysql', 'information_schema', 'performance_schema', 'sys')")); + const std::string expected_json_key("databases_size"); + collect_single_value_common(query, expected_json_key, + &MockDataProvider::collect_dbs_size_info); +} + +TEST_F(DataProviderTest, collect_se_usage_info_test) { + const std::string query( + std::string("SELECT DISTINCT ENGINE FROM information_schema.tables WHERE " + "table_schema NOT IN('mysql', 'information_schema', " + "'performance_schema', 'sys');")); + const std::string expected_json_key("se_engines_in_use"); + collect_array_info_common(query, expected_json_key, + &MockDataProvider::collect_se_usage_info); +} + +TEST_F(DataProviderTest, collect_group_replication_info_test) { + MockDataProvider dataProvider; + + const std::string query1(std::string( + "SELECT MEMBER_ROLE, @@global.group_replication_group_name, " + "@@global.group_replication_single_primary_mode FROM " + "performance_schema.replication_group_members WHERE MEMBER_STATE != " + "'OFFLINE' AND MEMBER_ID='expected_id'")); + + const std::string role_val("role_val"); + const std::string db_id_val("db_id_val"); + const std::string single_primary_mode_val("single_primary_mode_val"); + Row row1{role_val, db_id_val, single_primary_mode_val}; + + EXPECT_CALL(dataProvider, do_query(Eq(query1), A(), _, _)) + .Times(1) + .WillOnce(DoAll(WithArg<1>(Invoke([&row1](QueryResult *qr) { + qr->clear(); + qr->push_back(row1); + })), + Return(false))); + + const std::string query2(std::string( + "SELECT COUNT(*) FROM performance_schema.replication_group_members")); + + const std::string count_val("count_val"); + Row row2{count_val}; + + EXPECT_CALL(dataProvider, do_query(Eq(query2), A(), _, _)) + .Times(1) + .WillOnce(DoAll(WithArg<1>(Invoke([&row2](QueryResult *qr) { + qr->clear(); + qr->push_back(row2); + })), + Return(false))); + + const std::string expected_id("expected_id"); + Row row3{expected_id}; + + EXPECT_CALL(dataProvider, do_query(Eq(std::string("SELECT @@server_uuid")), + A(), _, _)) + .Times(1) + .WillOnce(DoAll(WithArg<1>(Invoke([&row3](QueryResult *qr) { + qr->clear(); + qr->push_back(row3); + })), + Return(false))); + + rapidjson::Document document(rapidjson::Type::kObjectType); + EXPECT_FALSE(dataProvider.collect_group_replication_info(&document)); + + EXPECT_TRUE(document.HasMember("group_replication_info")); + auto gr_iter = document.FindMember("group_replication_info"); + + EXPECT_TRUE(gr_iter->value.HasMember("role")); + auto iter = gr_iter->value.FindMember("role"); + EXPECT_STREQ(iter->value.GetString(), role_val.c_str()); + + EXPECT_TRUE(gr_iter->value.HasMember("single_primary_mode")); + iter = gr_iter->value.FindMember("single_primary_mode"); + EXPECT_STREQ(iter->value.GetString(), single_primary_mode_val.c_str()); + + EXPECT_TRUE(gr_iter->value.HasMember("group_size")); + iter = gr_iter->value.FindMember("group_size"); + EXPECT_STREQ(iter->value.GetString(), count_val.c_str()); +} + +TEST_F(DataProviderTest, collect_async_replication_info_test) { + MockDataProvider dataProvider; + + EXPECT_CALL(dataProvider, do_query(Eq(std::string("SHOW REPLICAS")), + A(), _, _)) + .Times(1) + .WillOnce(DoAll(WithArg<1>(Invoke([](QueryResult *qr) { + qr->clear(); + qr->push_back(Row{"foo"}); + })), + Return(false))); + + EXPECT_CALL(dataProvider, do_query(Eq(std::string("SHOW REPLICA STATUS")), + A(), _, _)) + .Times(1) + .WillOnce(DoAll(WithArg<1>(Invoke([](QueryResult *qr) { + qr->clear(); + qr->push_back(Row{"bar"}); + })), + Return(false))); + + // check that we try new naming first + EXPECT_CALL( + dataProvider, + do_query(Eq(std::string("SELECT @@global.rpl_semi_sync_source_enabled")), + A(), _, _)) + .Times(1) + .WillOnce(Return(true)); + + // then return actual result when old naming used + EXPECT_CALL( + dataProvider, + do_query(Eq(std::string("SELECT @@global.rpl_semi_sync_master_enabled")), + A(), _, _)) + .Times(1) + .WillOnce(DoAll(WithArg<1>(Invoke([](QueryResult *qr) { + qr->clear(); + qr->push_back(Row{"1"}); + })), + Return(false))); + + // the same, but for replica + EXPECT_CALL( + dataProvider, + do_query(Eq(std::string("SELECT @@global.rpl_semi_sync_replica_enabled")), + A(), _, _)) + .Times(1) + .WillOnce(Return(true)); + + // then return actual result when old naming used + EXPECT_CALL( + dataProvider, + do_query(Eq(std::string("SELECT @@global.rpl_semi_sync_slave_enabled")), + A(), _, _)) + .Times(1) + .WillOnce(DoAll(WithArg<1>(Invoke([](QueryResult *qr) { + qr->clear(); + qr->push_back(Row{"0"}); // but we are not semi-sync + })), + Return(false))); + + rapidjson::Document document(rapidjson::Type::kObjectType); + EXPECT_FALSE(dataProvider.collect_async_replication_info(&document)); + + EXPECT_TRUE(document.HasMember("replication_info")); + auto ri_iter = document.FindMember("replication_info"); + + EXPECT_TRUE(ri_iter->value.HasMember("is_semisync_source")); + auto iter = ri_iter->value.FindMember("is_semisync_source"); + EXPECT_STREQ(iter->value.GetString(), "1"); + + EXPECT_TRUE(ri_iter->value.HasMember("is_replica")); + iter = ri_iter->value.FindMember("is_replica"); + EXPECT_STREQ(iter->value.GetString(), "1"); +} + +} // namespace data_provider_unittests \ No newline at end of file diff --git a/unittest/gunit/components/percona_telemetry/logger.cc b/unittest/gunit/components/percona_telemetry/logger.cc new file mode 100644 index 000000000000..761e9c444f6c --- /dev/null +++ b/unittest/gunit/components/percona_telemetry/logger.cc @@ -0,0 +1,8 @@ +#include + +Logger::Logger(SERVICE_TYPE(log_builtins) &, + SERVICE_TYPE(log_builtins_string) &, loglevel) {} + +void Logger::info(const char *, ...) {} +void Logger::warning(const char *, ...) {} +void Logger::error(const char *, ...) {}