From 44c25dd78a05378e004fea76a4e2e7009be08992 Mon Sep 17 00:00:00 2001 From: Ryoji Kurosawa Date: Tue, 11 Jun 2024 19:05:19 +0900 Subject: [PATCH] style: avoid using CRLF for text files --- CMakeLists.txt | 228 ++-- bench/README.md | 266 ++-- cmake/CompileOptions.cmake | 102 +- cmake/FindTBB.cmake | 56 +- include/base_node.h | 612 ++++----- include/border_helper.h | 672 ++++----- include/border_node.h | 1208 ++++++++--------- include/common_helper.h | 174 +-- include/cpu.h | 20 +- include/epoch.h | 50 +- include/garbage_collection.h | 318 ++--- include/interior_helper.h | 370 ++--- include/interior_node.h | 744 +++++----- include/kvs.h | 536 ++++---- include/link_or_value.h | 392 +++--- include/manager_thread.h | 200 +-- include/permutation.h | 478 +++---- include/scan_helper.h | 878 ++++++------ include/scheme.h | 422 +++--- include/thread_info.h | 124 +- include/thread_info_table.h | 218 +-- include/version.h | 788 +++++------ .../multi_thread_delete_100_key_test.cpp | 538 ++++---- .../multi_thread_delete_100k_key_test.cpp | 394 +++--- .../multi_thread_delete_10_key_test.cpp | 532 ++++---- .../delete/multi_thread_delete_1_key_test.cpp | 162 +-- .../multi_thread_delete_1m_key_test.cpp | 394 +++--- .../multi_thread_delete_200_key_test.cpp | 432 +++--- .../multi_thread_delete_20_key_test.cpp | 536 ++++---- .../multi_thread_delete_one_border_test.cpp | 710 +++++----- .../multi_thread_delete_two_border_test.cpp | 680 +++++----- .../put/multi_thread_put_100k_key_test.cpp | 322 ++--- .../multi_thread_put_many_interior_test.cpp | 344 ++--- .../put/multi_thread_put_one_border_test.cpp | 336 ++--- ...read_put_one_interior_many_border_test.cpp | 332 ++--- ...hread_put_one_interior_two_border_test.cpp | 336 ++--- .../put/multi_thread_put_two_border_test.cpp | 332 ++--- test/multi_thread/put/readme.md | 40 +- ...i_thread_put_delete_many_interior_test.cpp | 242 ++-- ...ulti_thread_put_delete_one_border_test.cpp | 506 +++---- ...t_delete_one_interior_many_border_test.cpp | 424 +++--- .../multi_thread_put_delete_test.cpp | 334 ++--- ...ulti_thread_put_delete_two_border_test.cpp | 1156 ++++++++-------- test/multi_thread/put_delete/readme.md | 44 +- ...read_put_delete_get_many_interior_test.cpp | 474 +++---- ..._thread_put_delete_get_one_border_test.cpp | 440 +++--- ...lete_get_one_interior_many_border_test.cpp | 272 ++-- ...elete_get_one_interior_two_border_test.cpp | 464 +++---- .../multi_thread_put_delete_get_test.cpp | 296 ++-- ..._thread_put_delete_get_two_border_test.cpp | 468 +++---- test/multi_thread/put_delete_get/readme.md | 44 +- ...ead_put_delete_scan_many_interior_test.cpp | 626 ++++----- ...thread_put_delete_scan_one_border_test.cpp | 560 ++++---- ...read_put_delete_scan_one_interior_test.cpp | 318 ++--- ...lete_scan_one_interior_two_border_test.cpp | 548 ++++---- .../multi_thread_put_delete_scan_test.cpp | 302 ++--- ...thread_put_delete_scan_two_border_test.cpp | 534 ++++---- test/multi_thread/put_delete_scan/readme.md | 56 +- test/multi_thread/readme.md | 24 +- test/put_get/readme.md | 30 +- test/scan/readme.md | 44 +- 61 files changed, 11741 insertions(+), 11741 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9a32c32..66e746b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,114 +1,114 @@ - -cmake_minimum_required(VERSION 3.10) - -project(yakushima - VERSION 1.0.0 - DESCRIPTION "yakushima" - LANGUAGES CXX) - -list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake") - -option(BUILD_BENCHMARK "Build benchmark programs" ON) -option(BUILD_TESTS "Build test programs" ON) -option(BUILD_DOCUMENTS "Build documents" ON) -option(BUILD_ONLY_WD_TEST "Build only working directory about test for dev" OFF) - -option(ENABLE_SANITIZER "enable sanitizer on debug build" ON) -option(ENABLE_UB_SANITIZER "enable undefined behavior sanitizer on debug build" ON) -option(ENABLE_COVERAGE "enable coverage on debug build" OFF) -option(ENABLE_JEMALLOC "use jemalloc for bench/malloc.cc as memory allocator." OFF) -option(PERFORMANCE_TOOLS "Enable tooling to measure performance" OFF) - -find_package(Doxygen REQUIRED) -find_package(Threads REQUIRED) -find_package(gflags REQUIRED) -find_package(glog REQUIRED) -find_package(TBB - COMPONENTS tbb tbbmalloc tbbmalloc_proxy - CONFIG QUIET -) -if (DEFINED TBB_CONFIG) - set(tbb_prefix "TBB::") -else() - find_package(TBB REQUIRED) - set(tbb_prefix "") -endif() -if(PERFORMANCE_TOOLS) - find_package(performance-tools REQUIRED) -endif() -# for logging -find_package(Boost COMPONENTS filesystem) - -include(GNUInstallDirs) -include(CompileOptions) -include(Tests) -include(CMakePackageConfigHelpers) - -# for ctest -if (BUILD_TESTS) - enable_testing() -endif () - -set(export_name "yakushima") -set(package_name "yakushima") - -add_library(yakushima INTERFACE) - -target_include_directories(yakushima - INTERFACE - $ - $ -) - -install( - TARGETS - yakushima - EXPORT - ${export_name} -) - -install( - DIRECTORY include/ - DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/${export_name}/yakushima/include" - COMPONENT Development - FILES_MATCHING PATTERN "*.h" -) - -configure_file( - ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Config.cmake.in - ${CMAKE_CURRENT_BINARY_DIR}/${package_name}-config.cmake - @ONLY -) - -write_basic_package_version_file( - "${CMAKE_CURRENT_BINARY_DIR}/${package_name}-config-version.cmake" - COMPATIBILITY SameMajorVersion -) - -install( - FILES - ${CMAKE_CURRENT_BINARY_DIR}/${package_name}-config.cmake - ${CMAKE_CURRENT_BINARY_DIR}/${package_name}-config-version.cmake - DESTINATION - ${CMAKE_INSTALL_LIBDIR}/cmake/${package_name} -) - -install( - EXPORT ${package_name} - FILE ${package_name}-targets.cmake - DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${package_name} -) - -add_subdirectory(third_party) - -if (BUILD_BENCHMARK) - add_subdirectory(bench) -endif () - -if (BUILD_TESTS) - add_subdirectory(test) -endif () - -if (BUILD_DOCUMENTS) - add_subdirectory(doxygen) -endif () + +cmake_minimum_required(VERSION 3.10) + +project(yakushima + VERSION 1.0.0 + DESCRIPTION "yakushima" + LANGUAGES CXX) + +list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake") + +option(BUILD_BENCHMARK "Build benchmark programs" ON) +option(BUILD_TESTS "Build test programs" ON) +option(BUILD_DOCUMENTS "Build documents" ON) +option(BUILD_ONLY_WD_TEST "Build only working directory about test for dev" OFF) + +option(ENABLE_SANITIZER "enable sanitizer on debug build" ON) +option(ENABLE_UB_SANITIZER "enable undefined behavior sanitizer on debug build" ON) +option(ENABLE_COVERAGE "enable coverage on debug build" OFF) +option(ENABLE_JEMALLOC "use jemalloc for bench/malloc.cc as memory allocator." OFF) +option(PERFORMANCE_TOOLS "Enable tooling to measure performance" OFF) + +find_package(Doxygen REQUIRED) +find_package(Threads REQUIRED) +find_package(gflags REQUIRED) +find_package(glog REQUIRED) +find_package(TBB + COMPONENTS tbb tbbmalloc tbbmalloc_proxy + CONFIG QUIET +) +if (DEFINED TBB_CONFIG) + set(tbb_prefix "TBB::") +else() + find_package(TBB REQUIRED) + set(tbb_prefix "") +endif() +if(PERFORMANCE_TOOLS) + find_package(performance-tools REQUIRED) +endif() +# for logging +find_package(Boost COMPONENTS filesystem) + +include(GNUInstallDirs) +include(CompileOptions) +include(Tests) +include(CMakePackageConfigHelpers) + +# for ctest +if (BUILD_TESTS) + enable_testing() +endif () + +set(export_name "yakushima") +set(package_name "yakushima") + +add_library(yakushima INTERFACE) + +target_include_directories(yakushima + INTERFACE + $ + $ +) + +install( + TARGETS + yakushima + EXPORT + ${export_name} +) + +install( + DIRECTORY include/ + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/${export_name}/yakushima/include" + COMPONENT Development + FILES_MATCHING PATTERN "*.h" +) + +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Config.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/${package_name}-config.cmake + @ONLY +) + +write_basic_package_version_file( + "${CMAKE_CURRENT_BINARY_DIR}/${package_name}-config-version.cmake" + COMPATIBILITY SameMajorVersion +) + +install( + FILES + ${CMAKE_CURRENT_BINARY_DIR}/${package_name}-config.cmake + ${CMAKE_CURRENT_BINARY_DIR}/${package_name}-config-version.cmake + DESTINATION + ${CMAKE_INSTALL_LIBDIR}/cmake/${package_name} +) + +install( + EXPORT ${package_name} + FILE ${package_name}-targets.cmake + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${package_name} +) + +add_subdirectory(third_party) + +if (BUILD_BENCHMARK) + add_subdirectory(bench) +endif () + +if (BUILD_TESTS) + add_subdirectory(test) +endif () + +if (BUILD_DOCUMENTS) + add_subdirectory(doxygen) +endif () diff --git a/bench/README.md b/bench/README.md index 6e71be7..36defb3 100644 --- a/bench/README.md +++ b/bench/README.md @@ -1,133 +1,133 @@ -# yakushima benchmark - -Benchmarking of yakushima and malloc. - -## Preparation - -Please do release-build. -If you do benchmarking of yakushima, -you should also build some high performance memory allocator (ex. jemalloc) to avoid contentions against heap memory. - -``` shell -cd [/path/to/project_root] -mkdir build_release -cd build_release -cmake -G Ninja -DCMAKE_BUILD_TYPE=Release .. -ninja -``` - -## Running - -``` shell -cd [/path/to/project_root] -cd build_release/bench -LD_PRELOAD=[/path/to/some memory allocator lib] ./yakushima_bench -LD_PRELOAD=[/path/to/some memory allocator lib] ./malloc_bench -``` - -### note : If you don't use a high performance memory allocator, heap memory contention may result in poor performance - -## `yakushima_bench` : Available options - -* `-duration` - + This is experimental time [seconds]. - + default : `3` -* `-initial_record` - + This is the number of initial key-values for get / remove benchmarking. - + note: remove performance is about larger than 553k ops / thread / sec. So - you should set very large initial_record at remove benchmark. - + default : `1000` -* `-get_skew` - + This is the access zipf skew for get benchmarking. - + default : `0.0` -* `-instruction` - + This is the selection of benchmarking. - + default : `get` - + Please use `get`, `put`, `scan`, or `remove`. -* `-range_of_scan` - + Number of elements of range scan. - + default : `1000` - + Please use `scan`. -* `-thread` - + This is the number of worker threads. - + default : `1` -* `-value_size` - + This is the size of value which is of key-value. - + default : `8` - + Please set very small size if you want to check sharpness of parallel logic. Otherwise, if you want to check - - the realistic performance, you should set appropriate size. - -## `malloc_bench` : Available options - -* `-alloc_size` - + Memory allocation size for bench. - + default : `4` -* `-duration` - + This is experimental time [seconds]. - + default : `3` -* `-thread` - + This is the number of worker threads. - + default : `1` - -## `yakushima_bench` : Example - -* Get benchmark. - + duration : default : `3` - + initial_record : `1000000` - + get_skew : default : `0.0` - + instruction : default : `get` - + thread : `200` - + value_size : default : `8` - -``` shell -LD_PRELOAD=[/path/to/some memory allocator lib] ./yakushima_bench -initial_record 1000000 -thread 200 -``` - -* Scan benchmark. - + duration : default : `3` - + initial_record : `1000000` - + get_skew : default : `0.0` - + instruction : `scan` - + range_of_scan : `1000` (Use default value) - + thread : `200` - + value_size : default : `8` - -``` shell -LD_PRELOAD=[/path/to/some memory allocator lib] ./yakushima_bench -initial_record 1000000 -thread 200 -``` - -* Remove benchmark. - + duration : default : `3` - + initial_record : `1000000` - + get_skew : default : `0.0` - + instruction : `remove` - + thread : `200` - + value_size : default : `8` - -``` shell -LD_PRELOAD=[/path/to/some memory allocator lib] ./yakushima_bench -initial_record 1000000 -thread 200 -instruction remove -``` - -* Put benchmark. - + duration : default : `3` - + [unused] initial_record : default - + [unused] get_skew : default - + instruction : `put` - + thread : `200` - + value_size : default : `8` - -``` shell -LD_PRELOAD=[/path/to/some memory allocator lib] ./yakushima_bench -instruction put -thread 200 -``` - -## `malloc_bench` : Example - -* benchmark. - + alloc_size : `1000` - + duration : `10` - + thread : `224` - -``` shell -LD_PRELOAD=[/path/to/some memory allocator lib] ./malloc_bench -alloc_size 1000 -duration 10 -thread 224 -``` +# yakushima benchmark + +Benchmarking of yakushima and malloc. + +## Preparation + +Please do release-build. +If you do benchmarking of yakushima, +you should also build some high performance memory allocator (ex. jemalloc) to avoid contentions against heap memory. + +``` shell +cd [/path/to/project_root] +mkdir build_release +cd build_release +cmake -G Ninja -DCMAKE_BUILD_TYPE=Release .. +ninja +``` + +## Running + +``` shell +cd [/path/to/project_root] +cd build_release/bench +LD_PRELOAD=[/path/to/some memory allocator lib] ./yakushima_bench +LD_PRELOAD=[/path/to/some memory allocator lib] ./malloc_bench +``` + +### note : If you don't use a high performance memory allocator, heap memory contention may result in poor performance + +## `yakushima_bench` : Available options + +* `-duration` + + This is experimental time [seconds]. + + default : `3` +* `-initial_record` + + This is the number of initial key-values for get / remove benchmarking. + + note: remove performance is about larger than 553k ops / thread / sec. So + you should set very large initial_record at remove benchmark. + + default : `1000` +* `-get_skew` + + This is the access zipf skew for get benchmarking. + + default : `0.0` +* `-instruction` + + This is the selection of benchmarking. + + default : `get` + + Please use `get`, `put`, `scan`, or `remove`. +* `-range_of_scan` + + Number of elements of range scan. + + default : `1000` + + Please use `scan`. +* `-thread` + + This is the number of worker threads. + + default : `1` +* `-value_size` + + This is the size of value which is of key-value. + + default : `8` + + Please set very small size if you want to check sharpness of parallel logic. Otherwise, if you want to check + + the realistic performance, you should set appropriate size. + +## `malloc_bench` : Available options + +* `-alloc_size` + + Memory allocation size for bench. + + default : `4` +* `-duration` + + This is experimental time [seconds]. + + default : `3` +* `-thread` + + This is the number of worker threads. + + default : `1` + +## `yakushima_bench` : Example + +* Get benchmark. + + duration : default : `3` + + initial_record : `1000000` + + get_skew : default : `0.0` + + instruction : default : `get` + + thread : `200` + + value_size : default : `8` + +``` shell +LD_PRELOAD=[/path/to/some memory allocator lib] ./yakushima_bench -initial_record 1000000 -thread 200 +``` + +* Scan benchmark. + + duration : default : `3` + + initial_record : `1000000` + + get_skew : default : `0.0` + + instruction : `scan` + + range_of_scan : `1000` (Use default value) + + thread : `200` + + value_size : default : `8` + +``` shell +LD_PRELOAD=[/path/to/some memory allocator lib] ./yakushima_bench -initial_record 1000000 -thread 200 +``` + +* Remove benchmark. + + duration : default : `3` + + initial_record : `1000000` + + get_skew : default : `0.0` + + instruction : `remove` + + thread : `200` + + value_size : default : `8` + +``` shell +LD_PRELOAD=[/path/to/some memory allocator lib] ./yakushima_bench -initial_record 1000000 -thread 200 -instruction remove +``` + +* Put benchmark. + + duration : default : `3` + + [unused] initial_record : default + + [unused] get_skew : default + + instruction : `put` + + thread : `200` + + value_size : default : `8` + +``` shell +LD_PRELOAD=[/path/to/some memory allocator lib] ./yakushima_bench -instruction put -thread 200 +``` + +## `malloc_bench` : Example + +* benchmark. + + alloc_size : `1000` + + duration : `10` + + thread : `224` + +``` shell +LD_PRELOAD=[/path/to/some memory allocator lib] ./malloc_bench -alloc_size 1000 -duration 10 -thread 224 +``` diff --git a/cmake/CompileOptions.cmake b/cmake/CompileOptions.cmake index 0921b3c..adca40e 100644 --- a/cmake/CompileOptions.cmake +++ b/cmake/CompileOptions.cmake @@ -1,51 +1,51 @@ -set(CMAKE_CXX_STANDARD 17) -set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_STANDARD_REQUIRED ON) - -set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer") -set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-omit-frame-pointer") - -set(sanitizers "address") -if (ENABLE_UB_SANITIZER) - set(sanitizers "${sanitizers},undefined") -endif () - -if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - #do nothing for gcc -elseif (CMAKE_CXX_COMPILER_ID MATCHES "^(Clang|AppleClang)$") - set(sanitizers "${sanitizers},nullability") -else () - message(FATAL_ERROR "unsupported compiler ${CMAKE_CXX_COMPILER_ID}") -endif () - -if (ENABLE_SANITIZER) - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=${sanitizers}") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-sanitize=alignment") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-sanitize-recover=${sanitizers}") -endif () -if (ENABLE_COVERAGE) - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} --coverage") -endif () - -cmake_host_system_information(RESULT cores QUERY NUMBER_OF_LOGICAL_CORES) - -add_definitions(-D YAKUSHIMA_EPOCH_TIME=40) - -if (NOT DEFINED YAKUSHIMA_MAX_PARALLEL_SESSIONS) - add_definitions(-D YAKUSHIMA_MAX_PARALLEL_SESSIONS=${cores}) - message("YAKUSHIMA_MAX_PARALLEL_SESSIONS is default (${cores})") -else () - add_definitions(-D YAKUSHIMA_MAX_PARALLEL_SESSIONS=${YAKUSHIMA_MAX_PARALLEL_SESSIONS}) - message("YAKUSHIMA_MAX_PARALLEL_SESSIONS is ${YAKUSHIMA_MAX_PARALLEL_SESSIONS}") -endif () - -add_definitions(-D YAKUSHIMA_LINUX) - -if (ENABLE_JEMALLOC) - add_definitions(-D ENABLE_JEMALLOC) -endif () - -function(set_compile_options target_name) - target_compile_options(${target_name} - PRIVATE -Wall -Wextra -Werror) -endfunction(set_compile_options) +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_EXTENSIONS OFF) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer") +set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-omit-frame-pointer") + +set(sanitizers "address") +if (ENABLE_UB_SANITIZER) + set(sanitizers "${sanitizers},undefined") +endif () + +if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + #do nothing for gcc +elseif (CMAKE_CXX_COMPILER_ID MATCHES "^(Clang|AppleClang)$") + set(sanitizers "${sanitizers},nullability") +else () + message(FATAL_ERROR "unsupported compiler ${CMAKE_CXX_COMPILER_ID}") +endif () + +if (ENABLE_SANITIZER) + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=${sanitizers}") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-sanitize=alignment") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-sanitize-recover=${sanitizers}") +endif () +if (ENABLE_COVERAGE) + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} --coverage") +endif () + +cmake_host_system_information(RESULT cores QUERY NUMBER_OF_LOGICAL_CORES) + +add_definitions(-D YAKUSHIMA_EPOCH_TIME=40) + +if (NOT DEFINED YAKUSHIMA_MAX_PARALLEL_SESSIONS) + add_definitions(-D YAKUSHIMA_MAX_PARALLEL_SESSIONS=${cores}) + message("YAKUSHIMA_MAX_PARALLEL_SESSIONS is default (${cores})") +else () + add_definitions(-D YAKUSHIMA_MAX_PARALLEL_SESSIONS=${YAKUSHIMA_MAX_PARALLEL_SESSIONS}) + message("YAKUSHIMA_MAX_PARALLEL_SESSIONS is ${YAKUSHIMA_MAX_PARALLEL_SESSIONS}") +endif () + +add_definitions(-D YAKUSHIMA_LINUX) + +if (ENABLE_JEMALLOC) + add_definitions(-D ENABLE_JEMALLOC) +endif () + +function(set_compile_options target_name) + target_compile_options(${target_name} + PRIVATE -Wall -Wextra -Werror) +endfunction(set_compile_options) diff --git a/cmake/FindTBB.cmake b/cmake/FindTBB.cmake index acc74dd..ee9d123 100644 --- a/cmake/FindTBB.cmake +++ b/cmake/FindTBB.cmake @@ -1,29 +1,29 @@ -if (TARGET tbb) - return() -endif () - -find_library(TBB_LIBRARY_FILE NAMES tbb) -find_path(TBB_INCLUDE_DIR NAMES tbb/tbb.h) -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(TBB DEFAULT_MSG - TBB_LIBRARY_FILE - TBB_INCLUDE_DIR) - -if (TBB_FOUND) - add_library(tbb SHARED IMPORTED) - set_target_properties(tbb PROPERTIES - IMPORTED_LOCATION "${TBB_LIBRARY_FILE}" - INTERFACE_INCLUDE_DIRECTORIES "${TBB_INCLUDE_DIR}") - - ############################################# - # Work-around for TBB with clang-tidy - # https://www.threadingbuildingblocks.org/docs/help/reference/environment/feature_macros.html - # https://www.threadingbuildingblocks.org/docs/help/reference/appendices/known_issues/linux_os.html - ############################################# - if (NOT TBB_USE_GLIBCXX_VERSION) - string(REPLACE "." "0" TBB_USE_GLIBCXX_VERSION ${CMAKE_CXX_COMPILER_VERSION}) - add_definitions(-DTBB_USE_GLIBCXX_VERSION=${TBB_USE_GLIBCXX_VERSION}) - endif () -endif () -unset(TBB_LIBRARY_FILE CACHE) +if (TARGET tbb) + return() +endif () + +find_library(TBB_LIBRARY_FILE NAMES tbb) +find_path(TBB_INCLUDE_DIR NAMES tbb/tbb.h) +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(TBB DEFAULT_MSG + TBB_LIBRARY_FILE + TBB_INCLUDE_DIR) + +if (TBB_FOUND) + add_library(tbb SHARED IMPORTED) + set_target_properties(tbb PROPERTIES + IMPORTED_LOCATION "${TBB_LIBRARY_FILE}" + INTERFACE_INCLUDE_DIRECTORIES "${TBB_INCLUDE_DIR}") + + ############################################# + # Work-around for TBB with clang-tidy + # https://www.threadingbuildingblocks.org/docs/help/reference/environment/feature_macros.html + # https://www.threadingbuildingblocks.org/docs/help/reference/appendices/known_issues/linux_os.html + ############################################# + if (NOT TBB_USE_GLIBCXX_VERSION) + string(REPLACE "." "0" TBB_USE_GLIBCXX_VERSION ${CMAKE_CXX_COMPILER_VERSION}) + add_definitions(-DTBB_USE_GLIBCXX_VERSION=${TBB_USE_GLIBCXX_VERSION}) + endif () +endif () +unset(TBB_LIBRARY_FILE CACHE) unset(TBB_INCLUDE_DIR CACHE) \ No newline at end of file diff --git a/include/base_node.h b/include/base_node.h index 05cfbdd..ab8c315 100644 --- a/include/base_node.h +++ b/include/base_node.h @@ -1,307 +1,307 @@ -/** - * @file base_node.h - */ - -#pragma once - -#include -#include -#include - -#include "atomic_wrapper.h" -#include "cpu.h" -#include "scheme.h" -#include "version.h" - -#include "glog/logging.h" - -namespace yakushima { - -class base_node { // NOLINT -public: - class key_tuple { - public: - key_tuple() = default; - - key_tuple(key_slice_type slice, key_length_type length) - : key_slice_(slice), key_length_(length) {} - - bool operator<(const key_tuple& r) const { - if (key_length_ == 0) { return true; } - if (r.key_length_ == 0) { return false; } - int ret = memcmp(&key_slice_, &r.key_slice_, - key_length_ < r.key_length_ ? key_length_ - : r.key_length_); - if (ret < 0) { return true; } - if (ret == 0) { return key_length_ < r.key_length_; } - return false; - } - - bool operator==(const key_tuple& r) const { - return key_slice_ == r.key_slice_ && key_length_ == r.key_length_; - } - - [[nodiscard]] key_length_type get_key_length() const { - return key_length_; - } - - [[nodiscard]] key_slice_type get_key_slice() const { - return key_slice_; - } - - void set_key_length(const key_length_type length) { - key_length_ = length; - } - - void set_key_slice(const key_slice_type slice) { key_slice_ = slice; } - - private: - key_slice_type key_slice_{0}; - key_length_type key_length_{0}; - }; - - virtual ~base_node() = default; // NOLINT - - void atomic_set_version_root(const bool tf) { - version_.atomic_set_root(tf); - } - - /** - * A virtual function is defined because It wants to distinguish the children class of - * the contents by using polymorphism. So this function is pure virtual function. - */ - virtual status destroy() = 0; - - /** - * @details display function for analysis and debug. - */ - virtual void display() = 0; - - /** - * @brief Collect the memory usage of this partial tree. - * - * @param level the level of this node in the tree. - * @param mem_stat the stack of memory usage for each level. - */ - virtual void mem_usage(std::size_t level, - memory_usage_stack& mem_stat) const = 0; - - void display_base() { - std::cout << "base_node::display_base" << std::endl; - version_.display(); - std::cout << "parent_ : " << get_parent() << std::endl; - for (std::size_t i = 0; i < key_slice_length; ++i) { - std::cout << "key_slice_[" << i - << "] : " << std::to_string(get_key_slice_at(i)) - << std::endl; - std::cout << "key_length_[" << i - << "] : " << std::to_string(get_key_length_at(i)) - << std::endl; - } - } - - [[nodiscard]] const std::array& - get_key_length_ref() const { - return key_length_; - } - - [[nodiscard]] key_length_type - get_key_length_at(const std::size_t index) const { - return key_length_.at(index); - } - - [[nodiscard]] const std::array& - get_key_slice_ref() const { - return key_slice_; - } - - [[nodiscard]] key_slice_type - get_key_slice_at(const std::size_t index) const { - return key_slice_.at(index); - } - - [[maybe_unused]] [[nodiscard]] bool get_lock() const { - return version_.get_locked(); - } - - [[nodiscard]] node_version64_body get_stable_version() const { - return version_.get_stable_version(); - } - - [[nodiscard]] base_node* get_parent() const { - return loadAcquireN(parent_); - } - - [[nodiscard]] node_version64_body get_version() const { - return version_.get_body(); - } - - [[nodiscard]] node_version64* get_version_ptr() { return &version_; } - - [[nodiscard]] bool get_version_border() const { - return version_.get_border(); - } - - [[nodiscard]] bool get_version_deleted() const { - return version_.get_deleted(); - } - - [[nodiscard]] bool get_version_root() const { return version_.get_root(); } - - [[nodiscard]] node_version64_body::vinsert_delete_type - get_version_vinsert_delete() const { - return version_.get_vinsert_delete(); - } - - [[nodiscard]] node_version64_body::vsplit_type get_version_vsplit() const { - return version_.get_vsplit(); - } - - void init_base() { - version_.init(); - set_parent(nullptr); - key_slice_.fill(0); - key_length_.fill(0); - } - - /** - * @details init at @a pos as position. - * @param[in] pos This is a position (index) to be initialized. - */ - void init_base(const std::size_t pos) { set_key(pos, 0, 0); } - - [[maybe_unused]] void init_base_member_range(const std::size_t start) { - for (std::size_t i = start; i < key_slice_length; ++i) { - set_key(i, 0, 0); - } - } - - /** - * @brief It locks this node. - * @pre It didn't lock by myself. - * @return void - */ - void lock() { version_.lock(); } - - /** - * @pre This function is called by split. - */ - [[nodiscard]] base_node* lock_parent() const { - base_node* p = get_parent(); - for (;;) { - if (p == nullptr) { return nullptr; } - p->lock(); - base_node* check = get_parent(); - if (p == check) { return p; } - p->version_unlock(); - p = check; - } - } - - [[maybe_unused]] void move_key_to_base_range(base_node* const right, - const std::size_t start) { - for (auto i = start; i < key_slice_length; ++i) { - right->set_key(i - start, get_key_slice_at(i), - get_key_length_at(i)); - set_key(i, 0, 0); - } - } - - void set_key(const std::size_t index, const key_slice_type key_slice, - const key_length_type key_length) { - set_key_slice_at(index, key_slice); - set_key_length_at(index, key_length); - } - - void set_key_length_at(const std::size_t index, - const key_length_type length) { - storeReleaseN(key_length_.at(index), length); - } - - void set_key_slice_at(const std::size_t index, - const key_slice_type key_slice) { - storeReleaseN(key_slice_.at(index), key_slice); - } - - void set_parent(base_node* const new_parent) { - storeReleaseN(parent_, new_parent); - } - - [[maybe_unused]] void - set_version(const node_version64_body nv) { // this function is used. - version_.set_body(nv); - } - - void set_version_border(const bool tf) { version_.atomic_set_border(tf); } - - void set_version_deleted(const bool tf) { version_.atomic_set_deleted(tf); } - - void set_version_inserting_deleting(const bool tf) { - version_.atomic_set_inserting_deleting(tf); - } - - void set_version_root(const bool tf) { version_.atomic_set_root(tf); } - - [[maybe_unused]] void set_version_splitting(const bool tf) { - version_.atomic_set_splitting(tf); - } - - void shift_left_base_member(const std::size_t start_pos, - const std::size_t shift_size) { - memmove(&key_slice_.at(start_pos - shift_size), - &key_slice_.at(start_pos), - sizeof(key_slice_type) * (key_slice_length - start_pos)); - memmove(&key_length_.at(start_pos - shift_size), - &key_length_.at(start_pos), - sizeof(key_length_type) * (key_slice_length - start_pos)); - } - - void shift_right_base_member(const std::size_t start, - const std::size_t shift_size) { - memmove(&key_slice_.at(start + shift_size), &key_slice_.at(start), - sizeof(key_slice_type) * - (key_slice_length - start - shift_size)); - memmove(&key_length_.at(start + shift_size), &key_length_.at(start), - sizeof(key_length_type) * - (key_slice_length - start - shift_size)); - } - - /** - * @brief It unlocks this node. - * @pre This node was already locked. - * @return void - */ - void version_unlock() { version_.unlock(); } - - [[maybe_unused]] void version_atomic_inc_vinsert() { - version_.atomic_inc_vinsert(); - } - -private: - /** - * @attention This variable is read/written concurrently. - */ - std::array key_slice_{}; - /** - * @attention This member is protected by its parent's lock. - * In the original paper, Fig 2 tells that parent's type is interior_node*, - * however, at Fig 1, parent's type is interior_node or border_node both - * interior's view and border's view. - * This variable is read/written concurrently. - */ - base_node* parent_{nullptr}; - /** - * @attention This variable is read/written concurrently. - */ - node_version64 version_{}; - /** - * @attention This variable is read/written concurrently. - * @details This is used for distinguishing the identity of link or value and same - * slices. For example, key 1 : \0, key 2 : \0\0, ... , key 8 : \0\0\0\0\0\0\0\0. These - * keys have same key_slices (0) but different key_length. If the length is more than 8, - * the lv points out to next layer. - */ - std::array key_length_{}; -}; - +/** + * @file base_node.h + */ + +#pragma once + +#include +#include +#include + +#include "atomic_wrapper.h" +#include "cpu.h" +#include "scheme.h" +#include "version.h" + +#include "glog/logging.h" + +namespace yakushima { + +class base_node { // NOLINT +public: + class key_tuple { + public: + key_tuple() = default; + + key_tuple(key_slice_type slice, key_length_type length) + : key_slice_(slice), key_length_(length) {} + + bool operator<(const key_tuple& r) const { + if (key_length_ == 0) { return true; } + if (r.key_length_ == 0) { return false; } + int ret = memcmp(&key_slice_, &r.key_slice_, + key_length_ < r.key_length_ ? key_length_ + : r.key_length_); + if (ret < 0) { return true; } + if (ret == 0) { return key_length_ < r.key_length_; } + return false; + } + + bool operator==(const key_tuple& r) const { + return key_slice_ == r.key_slice_ && key_length_ == r.key_length_; + } + + [[nodiscard]] key_length_type get_key_length() const { + return key_length_; + } + + [[nodiscard]] key_slice_type get_key_slice() const { + return key_slice_; + } + + void set_key_length(const key_length_type length) { + key_length_ = length; + } + + void set_key_slice(const key_slice_type slice) { key_slice_ = slice; } + + private: + key_slice_type key_slice_{0}; + key_length_type key_length_{0}; + }; + + virtual ~base_node() = default; // NOLINT + + void atomic_set_version_root(const bool tf) { + version_.atomic_set_root(tf); + } + + /** + * A virtual function is defined because It wants to distinguish the children class of + * the contents by using polymorphism. So this function is pure virtual function. + */ + virtual status destroy() = 0; + + /** + * @details display function for analysis and debug. + */ + virtual void display() = 0; + + /** + * @brief Collect the memory usage of this partial tree. + * + * @param level the level of this node in the tree. + * @param mem_stat the stack of memory usage for each level. + */ + virtual void mem_usage(std::size_t level, + memory_usage_stack& mem_stat) const = 0; + + void display_base() { + std::cout << "base_node::display_base" << std::endl; + version_.display(); + std::cout << "parent_ : " << get_parent() << std::endl; + for (std::size_t i = 0; i < key_slice_length; ++i) { + std::cout << "key_slice_[" << i + << "] : " << std::to_string(get_key_slice_at(i)) + << std::endl; + std::cout << "key_length_[" << i + << "] : " << std::to_string(get_key_length_at(i)) + << std::endl; + } + } + + [[nodiscard]] const std::array& + get_key_length_ref() const { + return key_length_; + } + + [[nodiscard]] key_length_type + get_key_length_at(const std::size_t index) const { + return key_length_.at(index); + } + + [[nodiscard]] const std::array& + get_key_slice_ref() const { + return key_slice_; + } + + [[nodiscard]] key_slice_type + get_key_slice_at(const std::size_t index) const { + return key_slice_.at(index); + } + + [[maybe_unused]] [[nodiscard]] bool get_lock() const { + return version_.get_locked(); + } + + [[nodiscard]] node_version64_body get_stable_version() const { + return version_.get_stable_version(); + } + + [[nodiscard]] base_node* get_parent() const { + return loadAcquireN(parent_); + } + + [[nodiscard]] node_version64_body get_version() const { + return version_.get_body(); + } + + [[nodiscard]] node_version64* get_version_ptr() { return &version_; } + + [[nodiscard]] bool get_version_border() const { + return version_.get_border(); + } + + [[nodiscard]] bool get_version_deleted() const { + return version_.get_deleted(); + } + + [[nodiscard]] bool get_version_root() const { return version_.get_root(); } + + [[nodiscard]] node_version64_body::vinsert_delete_type + get_version_vinsert_delete() const { + return version_.get_vinsert_delete(); + } + + [[nodiscard]] node_version64_body::vsplit_type get_version_vsplit() const { + return version_.get_vsplit(); + } + + void init_base() { + version_.init(); + set_parent(nullptr); + key_slice_.fill(0); + key_length_.fill(0); + } + + /** + * @details init at @a pos as position. + * @param[in] pos This is a position (index) to be initialized. + */ + void init_base(const std::size_t pos) { set_key(pos, 0, 0); } + + [[maybe_unused]] void init_base_member_range(const std::size_t start) { + for (std::size_t i = start; i < key_slice_length; ++i) { + set_key(i, 0, 0); + } + } + + /** + * @brief It locks this node. + * @pre It didn't lock by myself. + * @return void + */ + void lock() { version_.lock(); } + + /** + * @pre This function is called by split. + */ + [[nodiscard]] base_node* lock_parent() const { + base_node* p = get_parent(); + for (;;) { + if (p == nullptr) { return nullptr; } + p->lock(); + base_node* check = get_parent(); + if (p == check) { return p; } + p->version_unlock(); + p = check; + } + } + + [[maybe_unused]] void move_key_to_base_range(base_node* const right, + const std::size_t start) { + for (auto i = start; i < key_slice_length; ++i) { + right->set_key(i - start, get_key_slice_at(i), + get_key_length_at(i)); + set_key(i, 0, 0); + } + } + + void set_key(const std::size_t index, const key_slice_type key_slice, + const key_length_type key_length) { + set_key_slice_at(index, key_slice); + set_key_length_at(index, key_length); + } + + void set_key_length_at(const std::size_t index, + const key_length_type length) { + storeReleaseN(key_length_.at(index), length); + } + + void set_key_slice_at(const std::size_t index, + const key_slice_type key_slice) { + storeReleaseN(key_slice_.at(index), key_slice); + } + + void set_parent(base_node* const new_parent) { + storeReleaseN(parent_, new_parent); + } + + [[maybe_unused]] void + set_version(const node_version64_body nv) { // this function is used. + version_.set_body(nv); + } + + void set_version_border(const bool tf) { version_.atomic_set_border(tf); } + + void set_version_deleted(const bool tf) { version_.atomic_set_deleted(tf); } + + void set_version_inserting_deleting(const bool tf) { + version_.atomic_set_inserting_deleting(tf); + } + + void set_version_root(const bool tf) { version_.atomic_set_root(tf); } + + [[maybe_unused]] void set_version_splitting(const bool tf) { + version_.atomic_set_splitting(tf); + } + + void shift_left_base_member(const std::size_t start_pos, + const std::size_t shift_size) { + memmove(&key_slice_.at(start_pos - shift_size), + &key_slice_.at(start_pos), + sizeof(key_slice_type) * (key_slice_length - start_pos)); + memmove(&key_length_.at(start_pos - shift_size), + &key_length_.at(start_pos), + sizeof(key_length_type) * (key_slice_length - start_pos)); + } + + void shift_right_base_member(const std::size_t start, + const std::size_t shift_size) { + memmove(&key_slice_.at(start + shift_size), &key_slice_.at(start), + sizeof(key_slice_type) * + (key_slice_length - start - shift_size)); + memmove(&key_length_.at(start + shift_size), &key_length_.at(start), + sizeof(key_length_type) * + (key_slice_length - start - shift_size)); + } + + /** + * @brief It unlocks this node. + * @pre This node was already locked. + * @return void + */ + void version_unlock() { version_.unlock(); } + + [[maybe_unused]] void version_atomic_inc_vinsert() { + version_.atomic_inc_vinsert(); + } + +private: + /** + * @attention This variable is read/written concurrently. + */ + std::array key_slice_{}; + /** + * @attention This member is protected by its parent's lock. + * In the original paper, Fig 2 tells that parent's type is interior_node*, + * however, at Fig 1, parent's type is interior_node or border_node both + * interior's view and border's view. + * This variable is read/written concurrently. + */ + base_node* parent_{nullptr}; + /** + * @attention This variable is read/written concurrently. + */ + node_version64 version_{}; + /** + * @attention This variable is read/written concurrently. + * @details This is used for distinguishing the identity of link or value and same + * slices. For example, key 1 : \0, key 2 : \0\0, ... , key 8 : \0\0\0\0\0\0\0\0. These + * keys have same key_slices (0) but different key_length. If the length is more than 8, + * the lv points out to next layer. + */ + std::array key_length_{}; +}; + } // namespace yakushima \ No newline at end of file diff --git a/include/border_helper.h b/include/border_helper.h index 0a734d6..d57fb12 100644 --- a/include/border_helper.h +++ b/include/border_helper.h @@ -1,336 +1,336 @@ -/** - * @file border_helper.h - * @details Declare functions that could not be member functions of the class for - * dependency resolution. - */ - -#pragma once - -#include -#include -#include -#include - -#include "base_node.h" -#include "interior_helper.h" -#include "link_or_value.h" -#include "log.h" -#include "tree_instance.h" - -#include "glog/logging.h" - -namespace yakushima { - -/** - * forward declaration. - */ -template -static void interior_split(tree_instance* ti, interior_node* interior, - base_node* child_node, - key_slice_type pivot_slice, // NOLINT - key_length_type pivot_length); - -/** - * @pre It already locked this node. - * @details border node split. - * @param[in] border - * @param[in] key_view - * @param[in] value_ptr - * @param[out] created_value_ptr The pointer to created value in yakushima. - * @param[in] value_length - * @param[in] value_align - * @param[out] inserted_node_version_ptr - * @param[in] rank - */ -template -static void -border_split(tree_instance* ti, border_node* border, std::string_view key_view, - void* value_ptr, - void** created_value_ptr, // NOLINT - value_length_type value_length, value_align_type value_align, - node_version64** inserted_node_version_ptr, std::size_t rank); - -/** - * Start impl. - */ - -/** - * @details This may be called at split function. - * It creates new interior node as parents of this border_node and @a higher_border node. - * After that, it inserts based on @a key_view, @a value_ptr, ... (args). - * @param[in] left - * @param[in] right This is a higher border_node as result of split for this node. - * @param[out] lock_list This is unused because the border nodes is not-full as result of - * split. - * @param[out] new_parent This is a new parents. - * The insert_lv function needs lock_list as an argument, so it is passed in spite of not - * using. - */ - -template -static void create_interior_parent_of_border(border_node* const left, - border_node* const right, - interior_node** const new_parent) { - left->set_version_root(false); - right->set_version_root(false); - /** - * create a new interior node p with children n, n' - */ - auto ni = new interior_node(); // NOLINT - ni->init_interior(); - ni->set_version_root(true); - ni->set_version_inserting_deleting(true); - ni->lock(); - /** - * process base node members - */ - ni->set_key(0, right->get_key_slice_at(0), right->get_key_length_at(0)); - /** - * process interior node members - */ - ni->set_child_at(0, left); - ni->set_child_at(1, right); - ni->n_keys_increment(); - /** - * release interior parent to global. - */ - left->set_parent(ni); - right->set_parent(ni); - *new_parent = ni; -} - -/** - * @pre It already locked @a border. - * @details This function is also called when creating a new layer when 8 bytes-key - * collides at a border node. At that time, the original value is moved to the new layer. - * This function does not use a template declaration because its pointer is retrieved with - * void *. - * @param[in] border - * @param[in] key_view - * @param[in] new_value - * @param[out] created_value_ptr - * @param[out] inserted_node_version_ptr - * @param[in] rank - */ - -template -static void insert_lv(tree_instance* ti, border_node* const border, - std::string_view key_view, value* new_value, - void** const created_value_ptr, - node_version64** inserted_node_version_ptr, - std::size_t rank) { - border->set_version_inserting_deleting(true); - std::size_t cnk = border->get_permutation_cnk(); - if (cnk == 0) { - // this must be root && border node - if (!border->get_version_root()) { - LOG(ERROR) << log_location_prefix - << "programming error. ti->load_root_ptr(): " - << ti->load_root_ptr() - << ", this border node: " << border; - } - border->set_version_deleted(false); - } - if (cnk == key_slice_length) { - /** - * It needs splitting - */ - border_split( - ti, border, key_view, new_value, created_value_ptr, - inserted_node_version_ptr, rank); - } else { - /** - * Insert into this nodes. - */ - if (inserted_node_version_ptr != nullptr) { - *inserted_node_version_ptr = border->get_version_ptr(); - } - border->insert_lv_at(border->get_permutation().get_empty_slot(), - key_view, new_value, created_value_ptr, rank); - border->version_unlock(); - } -} - -template -static void border_split(tree_instance* ti, border_node* const border, - std::string_view key_view, value* new_value, - void** const created_value_ptr, - node_version64** inserted_node_version_ptr, - [[maybe_unused]] std::size_t rank) { - // update inserted_node_version_ptr - if (inserted_node_version_ptr != nullptr) { - *inserted_node_version_ptr = border->get_version_ptr(); - } - - border->set_version_splitting(true); - border_node* new_border = new border_node(); // NOLINT - new_border->init_border(); - new_border->set_next(border->get_next()); - new_border->set_prev(border); - - /** - * new border is initially locked - */ - new_border->set_version(border->get_version()); - border->set_next(new_border); - if (new_border->get_next() != nullptr) { - /** - * The prev of border next can be updated if it posesses the border lock. - */ - new_border->get_next()->set_prev(new_border); - } - /** - * split - * If the fan-out is odd, keep more than half to improve the performance. - */ - std::size_t remaining_size = key_slice_length / 2 + 1; - - std::size_t index_ctr(0); - for (std::size_t i = remaining_size; i < key_slice_length; ++i) { - /** - * move base_node members to new nodes - */ - std::size_t src_index{border->get_permutation().get_index_of_rank( - remaining_size)}; // this is tricky. - new_border->set_key_slice_at(index_ctr, - border->get_key_slice_at(src_index)); - new_border->set_key_length_at(index_ctr, - border->get_key_length_at(src_index)); - new_border->set_lv(index_ctr, border->get_lv_at(src_index)); - base_node* nl = border->get_lv_at(src_index)->get_next_layer(); - if (nl != nullptr) { nl->set_parent(new_border); } - ++index_ctr; - border->init_border(src_index); - border->get_permutation().delete_rank( - remaining_size); // this is tricky. - border->get_permutation().dec_key_num(); - } - /** - * fix permutations - */ - new_border->get_permutation().split_dest(key_slice_length - remaining_size); - - /** - * The insert process we wanted to do before we split. - * key_slice must be initialized to 0. - */ - key_slice_type key_slice{0}; - key_length_type key_length{0}; // NOLINT - if (key_view.size() > sizeof(key_slice_type)) { - memcpy(&key_slice, key_view.data(), sizeof(key_slice_type)); - key_length = sizeof(key_slice_type) + 1; - } else { - if (!key_view.empty()) { - memcpy(&key_slice, key_view.data(), key_view.size()); - } - key_length = static_cast(key_view.size()); - } - int ret_memcmp{ - memcmp(&key_slice, &new_border->get_key_slice_ref().at(0), - (key_length > sizeof(key_slice_type) && - new_border->get_key_length_at(0) > sizeof(key_slice_type)) - ? sizeof(key_slice_type) - : key_length < new_border->get_key_length_at(0) - ? key_length - : new_border->get_key_length_at(0))}; - if (key_length == 0 || // definitely - ret_memcmp < 0 || // smaller than front of new border node - (ret_memcmp == 0 && key_length < new_border->get_key_length_at(0)) || - // same string to the front of new border node and smaller string. - (ret_memcmp == 0 && rank < remaining_size) - // null string can't compare but rank is smaller then that. - ) { - /** - * insert to lower border node. - */ - border->insert_lv_at(border->get_permutation().get_empty_slot(), - key_view, new_value, created_value_ptr, rank); - } else { - /** - * insert to higher border node. - */ - new_border->insert_lv_at(new_border->get_permutation().get_empty_slot(), - key_view, new_value, created_value_ptr, - rank - remaining_size); - } - - base_node* p = border->lock_parent(); - if (p == nullptr) { -#ifndef NDEBUG - if (ti->load_root_ptr() != border) { - LOG(ERROR) << log_location_prefix; - } -#endif - /** - * create interior as parents and insert k. - * The disappearance of the parent node may have made this node the root node in - * parallel. It cares in below function. - */ - create_interior_parent_of_border( - border, new_border, - reinterpret_cast(&p)); // NOLINT - border->version_unlock(); - new_border->version_unlock(); - ti->store_root_ptr(p); - p->version_unlock(); - return; - } - -#ifndef NDEBUG - if (p != border->get_parent()) { LOG(ERROR) << log_location_prefix; } -#endif - - if (p->get_version_border()) { - /** - * parent is border node. - * The old border node which is before this split was root of the some layer. - * So it creates new interior nodes in the layer and insert its interior pointer - * to the (parent) border node. - * attention : The parent border node had this border node as one of the next_layer - * before the split. The pointer is exchanged for a new parent interior node. - */ - auto* pb = dynamic_cast(p); - interior_node* pi{}; - create_interior_parent_of_border( - border, new_border, &pi); - border->version_unlock(); - new_border->version_unlock(); - pi->set_parent(p); - pi->version_unlock(); - link_or_value* lv = pb->get_lv(border); - lv->set_next_layer(pi); - p->version_unlock(); - return; - } - /** - * parent is interior node. - */ -#ifndef NDEBUG - if (p->get_version_deleted()) { LOG(ERROR) << log_location_prefix; } -#endif - auto* pi = dynamic_cast(p); - border->set_version_root(false); - new_border->set_version_root(false); - border->version_unlock(); - new_border->version_unlock(); - if (pi->get_n_keys() == key_slice_length) { - /** - * interior full case, it splits and inserts. - */ - interior_split( - ti, pi, reinterpret_cast(new_border), // NOLINT - std::make_pair(new_border->get_key_slice_at(0), - new_border->get_key_length_at(0))); - return; - } - /** - * interior not-full case, it inserts. - */ - new_border->set_parent(pi); - pi->template insert( - new_border, std::make_pair(new_border->get_key_slice_at(0), - new_border->get_key_length_at(0))); - pi->version_unlock(); -} - -} // namespace yakushima +/** + * @file border_helper.h + * @details Declare functions that could not be member functions of the class for + * dependency resolution. + */ + +#pragma once + +#include +#include +#include +#include + +#include "base_node.h" +#include "interior_helper.h" +#include "link_or_value.h" +#include "log.h" +#include "tree_instance.h" + +#include "glog/logging.h" + +namespace yakushima { + +/** + * forward declaration. + */ +template +static void interior_split(tree_instance* ti, interior_node* interior, + base_node* child_node, + key_slice_type pivot_slice, // NOLINT + key_length_type pivot_length); + +/** + * @pre It already locked this node. + * @details border node split. + * @param[in] border + * @param[in] key_view + * @param[in] value_ptr + * @param[out] created_value_ptr The pointer to created value in yakushima. + * @param[in] value_length + * @param[in] value_align + * @param[out] inserted_node_version_ptr + * @param[in] rank + */ +template +static void +border_split(tree_instance* ti, border_node* border, std::string_view key_view, + void* value_ptr, + void** created_value_ptr, // NOLINT + value_length_type value_length, value_align_type value_align, + node_version64** inserted_node_version_ptr, std::size_t rank); + +/** + * Start impl. + */ + +/** + * @details This may be called at split function. + * It creates new interior node as parents of this border_node and @a higher_border node. + * After that, it inserts based on @a key_view, @a value_ptr, ... (args). + * @param[in] left + * @param[in] right This is a higher border_node as result of split for this node. + * @param[out] lock_list This is unused because the border nodes is not-full as result of + * split. + * @param[out] new_parent This is a new parents. + * The insert_lv function needs lock_list as an argument, so it is passed in spite of not + * using. + */ + +template +static void create_interior_parent_of_border(border_node* const left, + border_node* const right, + interior_node** const new_parent) { + left->set_version_root(false); + right->set_version_root(false); + /** + * create a new interior node p with children n, n' + */ + auto ni = new interior_node(); // NOLINT + ni->init_interior(); + ni->set_version_root(true); + ni->set_version_inserting_deleting(true); + ni->lock(); + /** + * process base node members + */ + ni->set_key(0, right->get_key_slice_at(0), right->get_key_length_at(0)); + /** + * process interior node members + */ + ni->set_child_at(0, left); + ni->set_child_at(1, right); + ni->n_keys_increment(); + /** + * release interior parent to global. + */ + left->set_parent(ni); + right->set_parent(ni); + *new_parent = ni; +} + +/** + * @pre It already locked @a border. + * @details This function is also called when creating a new layer when 8 bytes-key + * collides at a border node. At that time, the original value is moved to the new layer. + * This function does not use a template declaration because its pointer is retrieved with + * void *. + * @param[in] border + * @param[in] key_view + * @param[in] new_value + * @param[out] created_value_ptr + * @param[out] inserted_node_version_ptr + * @param[in] rank + */ + +template +static void insert_lv(tree_instance* ti, border_node* const border, + std::string_view key_view, value* new_value, + void** const created_value_ptr, + node_version64** inserted_node_version_ptr, + std::size_t rank) { + border->set_version_inserting_deleting(true); + std::size_t cnk = border->get_permutation_cnk(); + if (cnk == 0) { + // this must be root && border node + if (!border->get_version_root()) { + LOG(ERROR) << log_location_prefix + << "programming error. ti->load_root_ptr(): " + << ti->load_root_ptr() + << ", this border node: " << border; + } + border->set_version_deleted(false); + } + if (cnk == key_slice_length) { + /** + * It needs splitting + */ + border_split( + ti, border, key_view, new_value, created_value_ptr, + inserted_node_version_ptr, rank); + } else { + /** + * Insert into this nodes. + */ + if (inserted_node_version_ptr != nullptr) { + *inserted_node_version_ptr = border->get_version_ptr(); + } + border->insert_lv_at(border->get_permutation().get_empty_slot(), + key_view, new_value, created_value_ptr, rank); + border->version_unlock(); + } +} + +template +static void border_split(tree_instance* ti, border_node* const border, + std::string_view key_view, value* new_value, + void** const created_value_ptr, + node_version64** inserted_node_version_ptr, + [[maybe_unused]] std::size_t rank) { + // update inserted_node_version_ptr + if (inserted_node_version_ptr != nullptr) { + *inserted_node_version_ptr = border->get_version_ptr(); + } + + border->set_version_splitting(true); + border_node* new_border = new border_node(); // NOLINT + new_border->init_border(); + new_border->set_next(border->get_next()); + new_border->set_prev(border); + + /** + * new border is initially locked + */ + new_border->set_version(border->get_version()); + border->set_next(new_border); + if (new_border->get_next() != nullptr) { + /** + * The prev of border next can be updated if it posesses the border lock. + */ + new_border->get_next()->set_prev(new_border); + } + /** + * split + * If the fan-out is odd, keep more than half to improve the performance. + */ + std::size_t remaining_size = key_slice_length / 2 + 1; + + std::size_t index_ctr(0); + for (std::size_t i = remaining_size; i < key_slice_length; ++i) { + /** + * move base_node members to new nodes + */ + std::size_t src_index{border->get_permutation().get_index_of_rank( + remaining_size)}; // this is tricky. + new_border->set_key_slice_at(index_ctr, + border->get_key_slice_at(src_index)); + new_border->set_key_length_at(index_ctr, + border->get_key_length_at(src_index)); + new_border->set_lv(index_ctr, border->get_lv_at(src_index)); + base_node* nl = border->get_lv_at(src_index)->get_next_layer(); + if (nl != nullptr) { nl->set_parent(new_border); } + ++index_ctr; + border->init_border(src_index); + border->get_permutation().delete_rank( + remaining_size); // this is tricky. + border->get_permutation().dec_key_num(); + } + /** + * fix permutations + */ + new_border->get_permutation().split_dest(key_slice_length - remaining_size); + + /** + * The insert process we wanted to do before we split. + * key_slice must be initialized to 0. + */ + key_slice_type key_slice{0}; + key_length_type key_length{0}; // NOLINT + if (key_view.size() > sizeof(key_slice_type)) { + memcpy(&key_slice, key_view.data(), sizeof(key_slice_type)); + key_length = sizeof(key_slice_type) + 1; + } else { + if (!key_view.empty()) { + memcpy(&key_slice, key_view.data(), key_view.size()); + } + key_length = static_cast(key_view.size()); + } + int ret_memcmp{ + memcmp(&key_slice, &new_border->get_key_slice_ref().at(0), + (key_length > sizeof(key_slice_type) && + new_border->get_key_length_at(0) > sizeof(key_slice_type)) + ? sizeof(key_slice_type) + : key_length < new_border->get_key_length_at(0) + ? key_length + : new_border->get_key_length_at(0))}; + if (key_length == 0 || // definitely + ret_memcmp < 0 || // smaller than front of new border node + (ret_memcmp == 0 && key_length < new_border->get_key_length_at(0)) || + // same string to the front of new border node and smaller string. + (ret_memcmp == 0 && rank < remaining_size) + // null string can't compare but rank is smaller then that. + ) { + /** + * insert to lower border node. + */ + border->insert_lv_at(border->get_permutation().get_empty_slot(), + key_view, new_value, created_value_ptr, rank); + } else { + /** + * insert to higher border node. + */ + new_border->insert_lv_at(new_border->get_permutation().get_empty_slot(), + key_view, new_value, created_value_ptr, + rank - remaining_size); + } + + base_node* p = border->lock_parent(); + if (p == nullptr) { +#ifndef NDEBUG + if (ti->load_root_ptr() != border) { + LOG(ERROR) << log_location_prefix; + } +#endif + /** + * create interior as parents and insert k. + * The disappearance of the parent node may have made this node the root node in + * parallel. It cares in below function. + */ + create_interior_parent_of_border( + border, new_border, + reinterpret_cast(&p)); // NOLINT + border->version_unlock(); + new_border->version_unlock(); + ti->store_root_ptr(p); + p->version_unlock(); + return; + } + +#ifndef NDEBUG + if (p != border->get_parent()) { LOG(ERROR) << log_location_prefix; } +#endif + + if (p->get_version_border()) { + /** + * parent is border node. + * The old border node which is before this split was root of the some layer. + * So it creates new interior nodes in the layer and insert its interior pointer + * to the (parent) border node. + * attention : The parent border node had this border node as one of the next_layer + * before the split. The pointer is exchanged for a new parent interior node. + */ + auto* pb = dynamic_cast(p); + interior_node* pi{}; + create_interior_parent_of_border( + border, new_border, &pi); + border->version_unlock(); + new_border->version_unlock(); + pi->set_parent(p); + pi->version_unlock(); + link_or_value* lv = pb->get_lv(border); + lv->set_next_layer(pi); + p->version_unlock(); + return; + } + /** + * parent is interior node. + */ +#ifndef NDEBUG + if (p->get_version_deleted()) { LOG(ERROR) << log_location_prefix; } +#endif + auto* pi = dynamic_cast(p); + border->set_version_root(false); + new_border->set_version_root(false); + border->version_unlock(); + new_border->version_unlock(); + if (pi->get_n_keys() == key_slice_length) { + /** + * interior full case, it splits and inserts. + */ + interior_split( + ti, pi, reinterpret_cast(new_border), // NOLINT + std::make_pair(new_border->get_key_slice_at(0), + new_border->get_key_length_at(0))); + return; + } + /** + * interior not-full case, it inserts. + */ + new_border->set_parent(pi); + pi->template insert( + new_border, std::make_pair(new_border->get_key_slice_at(0), + new_border->get_key_length_at(0))); + pi->version_unlock(); +} + +} // namespace yakushima diff --git a/include/border_node.h b/include/border_node.h index c428472..24448ff 100644 --- a/include/border_node.h +++ b/include/border_node.h @@ -1,604 +1,604 @@ -/** - * @file border_node.h - */ - -#pragma once - -#include -#include -#include -#include - -#include "atomic_wrapper.h" -#include "garbage_collection.h" -#include "interior_node.h" -#include "link_or_value.h" -#include "permutation.h" -#include "thread_info.h" - -#include "glog/logging.h" - -namespace yakushima { - -using std::cout; -using std::endl; - -class alignas(CACHE_LINE_SIZE) border_node final : public base_node { // NOLINT -public: - ~border_node() override {} // NOLINT - - /** - * @pre This function is called by delete_of function. - * @details delete the key-value corresponding to @a pos as position. - * @param[in] rank - * @param[in] pos The position of being deleted. - * @param[in] target_is_value - */ - void delete_at(Token token, const std::size_t rank, const std::size_t pos, - const bool target_is_value) { - auto* ti = reinterpret_cast(token); // NOLINT - if (target_is_value) { - value* vp = lv_.at(pos).get_value(); - if (value::is_value_ptr(vp)) { - // it is value ptr (not inline value) - value::remove_delete_flag(vp); - auto [v_ptr, v_len, v_align] = value::get_gc_info(vp); - ti->get_gc_info().push_value_container( - {ti->get_begin_epoch(), v_ptr, v_len, v_align}); - /** - * clear for preventing heap use after free by reference of - * need_delete - */ - lv_.at(pos).init_lv(); - } - } - - // rearrange permutation - permutation_.delete_rank(rank); - permutation_.dec_key_num(); - } - - /** - * @pre There is a lv which points to @a child. - * @details Delete operation on the element matching @a child. - * @param[in] token - * @param[in] child - */ - void delete_of(Token token, tree_instance* ti, base_node* const child) { - std::size_t cnk = get_permutation_cnk(); - for (std::size_t i = 0; i < cnk; ++i) { - std::size_t index = permutation_.get_index_of_rank(i); - if (child == lv_.at(index).get_next_layer()) { - delete_of(token, ti, get_key_slice_at(index), - get_key_length_at(index)); // NOLINT - return; - } - } - // unreachable points. - LOG(ERROR) << log_location_prefix; - } - - /** - * @brief release all heap objects and clean up. - */ - status destroy() override { - std::size_t cnk = get_permutation_cnk(); - std::vector th_vc; - for (std::size_t i = 0; i < cnk; ++i) { - // living link or value - std::size_t index = permutation_.get_index_of_rank(i); - // cleanup process - auto process = [this](std::size_t i) { lv_.at(i).destroy(); }; - if (lv_.at(index).get_next_layer() != nullptr) { - // has some layer, considering parallel - if (destroy_manager::check_room()) { - th_vc.emplace_back(process, index); - } else { - process(index); - } - } else { - // not some layer, not considering parallel - process(index); - } - } - for (auto&& th : th_vc) { - th.join(); - destroy_manager::return_room(); - } - - return status::OK_DESTROY_BORDER; - } - - /** - * @pre - * This border node was already locked by caller. - * This function is called by remove func. - * The key-value corresponding to @a key_slice and @a key_length exists in this node. - * @details delete value corresponding to @a key_slice and @a key_length - * @param[in] token - * @param[in] key_slice The key slice of key-value. - * @param[in] key_slice_length The @a key_slice length. - * @param[in] target_is_value - */ - template - void delete_of(Token token, tree_instance* ti, - const key_slice_type key_slice, - const key_length_type key_slice_length) { - // past: delete is treated to increment vinsert counter. - //set_version_inserting_deleting(true); - /** - * find position. - */ - std::size_t cnk = get_permutation_cnk(); - for (std::size_t i = 0; i < cnk; ++i) { - std::size_t index = permutation_.get_index_of_rank(i); - if ((key_slice_length == 0 && get_key_length_at(index) == 0) || - (key_slice_length == get_key_length_at(index) && - memcmp(&key_slice, &get_key_slice_ref().at(index), - sizeof(key_slice_type)) == 0)) { - delete_at(token, i, index, target_is_value); - if (cnk == 1) { // attention : this cnk is before delete_at; - set_version_deleted(true); - if (ti->load_root_ptr() != this) { - // root && deleted node is treated as special. This isn't. - set_version_root(false); - } - - /** - * After this delete operation, this border node is empty. - */ - retry_prev_lock: - border_node* prev = get_prev(); - if (prev != nullptr) { - prev->lock(); - if (prev->get_version_deleted() || prev != get_prev()) { - prev->version_unlock(); - goto retry_prev_lock; // NOLINT - } else { - prev->set_next(get_next()); - if (get_next() != nullptr) { - get_next()->set_prev(prev); - } - prev->version_unlock(); - } - } else if (get_next() != nullptr) { - get_next()->set_prev(nullptr); - } - /** - * lock order is next to prev and lower to higher. - */ - base_node* pn = lock_parent(); - if (pn == nullptr) { - //ti->store_root_ptr(nullptr); - // remain empty deleted root node. - version_unlock(); - return; - } - /** - * This node has parent node, so this must not be root. - * note: Including relation of parent-child is - * border-border. - */ - set_version_root(false); - version_unlock(); - if (pn->get_version_border()) { - dynamic_cast(pn)->delete_of(token, ti, - this); - } else { - dynamic_cast(pn) - ->delete_of(token, ti, this); - } - auto* tinfo = - reinterpret_cast(token); // NOLINT - tinfo->get_gc_info().push_node_container( - {tinfo->get_begin_epoch(), this}); - } else { - version_unlock(); - } - return; - } - } - /** - * unreachable. - */ - LOG(ERROR) << log_location_prefix - << ", deleted: " << get_version_deleted() - << ", is root: " << get_version_root(); - } - - /** - * @details display function for analysis and debug. - */ - void display() override { - display_base(); - cout << "border_node::display" << endl; - permutation_.display(); - for (std::size_t i = 0; i < get_permutation_cnk(); ++i) { - lv_.at(i).display(); - } - cout << "next : " << get_next() << endl; - } - - /** - * @brief Collect the memory usage of this partial tree. - * - * @param level the level of this node in the tree. - * @param mem_stat the stack of memory usage for each level. - */ - void mem_usage(std::size_t level, - memory_usage_stack& mem_stat) const override { - if (mem_stat.size() <= level) { mem_stat.emplace_back(0, 0, 0); } - auto& [node_num, used, reserved] = mem_stat.at(level); - - const std::size_t cnk = get_permutation_cnk(); - ++node_num; - reserved += sizeof(border_node); - used += sizeof(border_node) - - ((key_slice_length - cnk) * sizeof(link_or_value)); - - for (std::size_t i = 0; i < cnk; ++i) { - std::size_t index = permutation_.get_index_of_rank(i); - lv_.at(index).mem_usage(level, mem_stat); - } - } - - /** - * @post It is necessary for the caller to verify whether the extraction is appropriate. - * @param[out] next_layers - * @attention layers are stored in ascending order. - * @return - */ - [[maybe_unused]] void - get_all_next_layer(std::vector& next_layers) { - next_layers.clear(); - std::size_t cnk = permutation_.get_cnk(); - for (std::size_t i = 0; i < cnk; ++i) { - link_or_value* lv = get_lv_at(permutation_.get_index_of_rank(i)); - base_node* nl = lv->get_next_layer(); - if (nl != nullptr) { next_layers.emplace_back(nl); } - } - } - - [[maybe_unused]] [[nodiscard]] std::array& - get_lv() { - return lv_; - } - - /** - * @details Find link_or_value element whose next_layer is the same as @a next_layer of - * the argument. - * @pre Executor has lock of this node. There is always a lv that points to a pointer - * given as an argument. - * @param[in] next_layer - * @return link_or_value* - */ - [[maybe_unused]] [[nodiscard]] link_or_value* - get_lv(base_node* const next_layer) { - for (std::size_t i = 0; i < key_slice_length; ++i) { - if (lv_.at(i).get_next_layer() == next_layer) { return &lv_.at(i); } - } - /** - * unreachable point. - */ - LOG(ERROR) << log_location_prefix; - return nullptr; - } - - [[nodiscard]] link_or_value* get_lv_at(const std::size_t index) { - return &lv_.at(index); - } - - /** - * @brief - * - * @param key_slice - * @param key_length - * @pre Caller (put) must lock this node. - * @return std::size_t - */ - std::size_t compute_rank_if_insert(const key_slice_type key_slice, - const key_length_type key_length) { - permutation perm{permutation_.get_body()}; - std::size_t cnk = perm.get_cnk(); - for (std::size_t i = 0; i < cnk; ++i) { - std::size_t index = perm.get_index_of_rank(i); - key_slice_type target_key_slice = get_key_slice_at(index); - key_length_type target_key_len = get_key_length_at(index); - if (key_length == 0 && target_key_len == 0) { - LOG(ERROR) << log_location_prefix << "unexpected path"; - return 0; - } - // not zero key - auto ret = memcmp(&key_slice, &target_key_slice, - sizeof(key_slice_type)); - if (ret == 0) { - if ((key_length > sizeof(key_slice_type) && - target_key_len > sizeof(key_slice_type)) || - key_length == target_key_len) { - LOG(ERROR) << log_location_prefix << "unexpected path"; - return 0; - } - if (key_length < target_key_len) { return i; } - } else if (ret < 0) { - return i; - break; - } - } - - return cnk; - } - - /** - * @attention This function must not be called with locking of this node. Because - * this function executes get_stable_version and it waits own (lock-holder) - * infinitely. - * @param[in] key_slice - * @param[in] key_length - * @param[out] stable_v the stable version which is at atomically fetching lv. - * @param[out] lv_pos - * @return - */ - [[nodiscard]] link_or_value* get_lv_of(const key_slice_type key_slice, - const key_length_type key_length, - node_version64_body& stable_v, - std::size_t& lv_pos) { - node_version64_body v = get_stable_version(); - for (;;) { - /** - * It loads cnk atomically by get_cnk func. - */ - permutation perm{permutation_.get_body()}; - std::size_t cnk = perm.get_cnk(); - link_or_value* ret_lv{nullptr}; - for (std::size_t i = 0; i < cnk; ++i) { - bool suc{false}; - std::size_t index = perm.get_index_of_rank(i); - key_slice_type target_key_slice = get_key_slice_at(index); - key_length_type target_key_len = get_key_length_at(index); - if (key_length == 0 && target_key_len == 0) { - suc = true; - } else { - auto ret = memcmp(&key_slice, &target_key_slice, - sizeof(key_slice_type)); - if (ret == 0) { - if ((key_length > sizeof(key_slice_type) && - target_key_len > sizeof(key_slice_type)) || - key_length == target_key_len) { - suc = true; - } else if (key_length < target_key_len) { - break; - } - } else if (ret < 0) { - break; - } - } - - if (suc) { - ret_lv = get_lv_at(index); - lv_pos = index; - break; - } - } - node_version64_body v_check = get_stable_version(); - if (v == v_check) { - stable_v = v; - return ret_lv; - } - v = v_check; - } - } - - [[nodiscard]] link_or_value* - get_lv_of_without_lock(const key_slice_type key_slice, - const key_length_type key_length) { - /** - * It loads cnk atomically by get_cnk func. - */ - permutation perm{permutation_.get_body()}; - std::size_t cnk = perm.get_cnk(); - link_or_value* ret_lv{nullptr}; - for (std::size_t i = 0; i < cnk; ++i) { - bool suc{false}; - std::size_t index = perm.get_index_of_rank(i); - key_slice_type target_key_slice = get_key_slice_at(index); - key_length_type target_key_len = get_key_length_at(index); - if (key_length == 0 && target_key_len == 0) { - suc = true; - } else { - auto ret = memcmp(&key_slice, &target_key_slice, - sizeof(key_slice_type)); - if (ret == 0) { - if ((key_length > sizeof(key_slice_type) && - target_key_len > sizeof(key_slice_type)) || - key_length == target_key_len) { - suc = true; - } else if (key_length < target_key_len) { - break; - } - } else if (ret < 0) { - break; - } - } - - if (suc) { ret_lv = get_lv_at(index); } - } - return ret_lv; - } - - border_node* get_next() { return loadAcquireN(next_); } - - permutation& get_permutation() { return permutation_; } - - [[nodiscard]] std::uint8_t get_permutation_cnk() const { - return permutation_.get_cnk(); - } - - [[maybe_unused]] [[nodiscard]] std::size_t - get_permutation_lowest_key_pos() const { - return permutation_.get_lowest_key_pos(); - } - - border_node* get_prev() { return loadAcquireN(prev_); } - - void init_border() { - init_base(); - init_border_member_range(0); - set_version_root(true); - set_version_border(true); - permutation_.init(); - set_next(nullptr); - set_prev(nullptr); - } - - /** - * @details init at @a pos as position. - * @param[in] pos This is a position (index) to be initialized. - */ - void init_border(const std::size_t pos) { - init_base(pos); - lv_.at(pos).init_lv(); - } - - /** - * @pre This function is called by put function. - * @pre @a arg_value_length is divisible by sizeof( @a ValueType ). - * @pre This function can not be called for updating existing nodes. - * @pre If this function is used for node creation, link after set because - * set function does not execute lock function. - * @details This function inits border node by using arguments. - * @param[in] key_view - * @param[in] new_value - * @param[out] created_value_ptr The pointer to created value in yakushima. - * @param[in] root is the root node of the layer. - */ - template - void init_border(std::string_view key_view, value* new_value, - ValueType** const created_value_ptr, const bool root) { - init_border(); - set_version_root(root); - get_version_ptr()->atomic_inc_vinsert(); - insert_lv_at(get_permutation().get_empty_slot(), key_view, new_value, - reinterpret_cast(created_value_ptr), // NOLINT - 0); - } - - void init_border_member_range(const std::size_t start) { - for (auto i = start; i < lv_.size(); ++i) { lv_.at(i).init_lv(); } - } - - /** - * @pre It already locked this node. - * @param[in] index - * @param[in] key_view - * @param[in] value_ptr - * @param[out] created_value_ptr - * @param[in] arg_value_length - * @param[in] value_align - * @param[in] rank - */ - void insert_lv_at(const std::size_t index, std::string_view key_view, - value* new_value, void** const created_value_ptr, - const std::size_t rank) { - /** - * attention: key_slice must be initialized to 0. - * If key_view.size() is smaller than sizeof(key_slice_type), - * it is not possible to update the whole key_slice object with memcpy. - * It is possible that undefined values may remain from initialization. - */ - key_slice_type key_slice(0); - if (key_view.size() > sizeof(key_slice_type)) { - /** - * Create multiple border nodes. - */ - memcpy(&key_slice, key_view.data(), sizeof(key_slice_type)); - set_key_slice_at(index, key_slice); - /** - * You only need to know that it is 8 bytes or more. If it is - * stored obediently, key_length_type must be a large size type. - */ - set_key_length_at(index, sizeof(key_slice_type) + 1); - border_node* next_layer_border = new border_node(); // NOLINT - key_view.remove_prefix(sizeof(key_slice_type)); - /** - * attention: next_layer_border is the root of next layer. - */ - next_layer_border->init_border(key_view, new_value, - created_value_ptr, true); - next_layer_border->set_parent(this); - set_lv_next_layer(index, next_layer_border); - } else { - // set key - memcpy(&key_slice, key_view.data(), key_view.size()); - set_key_slice_at(index, key_slice); - set_key_length_at(index, - static_cast(key_view.size())); - // set value - set_lv_value(index, new_value, created_value_ptr); - } - permutation_.inc_key_num(); - permutation_.insert(rank, index); - } - - void permutation_rearrange() { - permutation_.rearrange(get_key_slice_ref(), get_key_length_ref()); - } - - [[maybe_unused]] void set_permutation_cnk(const std::uint8_t n) { - permutation_.set_cnk(n); - } - - /** - * @pre This is called by split process. - * @param index - * @param nlv - */ - void set_lv(const std::size_t index, link_or_value* const nlv) { - lv_.at(index).set(nlv); - } - - /** - * @brief set value to link_or_value. - * @param[in] index todo write - * @param[in] new_value todo write - * @param[out] created_value_ptr todo write - */ - void set_lv_value(const std::size_t index, value* new_value, - void** const created_value_ptr) { - lv_.at(index).set_value(new_value, created_value_ptr); - } - - void set_lv_next_layer(const std::size_t index, - base_node* const next_layer) { - lv_.at(index).set_next_layer(next_layer); - } - - void set_next(border_node* const nnext) { storeReleaseN(next_, nnext); } - - void set_prev(border_node* const prev) { storeReleaseN(prev_, prev); } - - void shift_left_border_member(const std::size_t start_pos, - const std::size_t shift_size) { - memmove(get_lv_at(start_pos - shift_size), get_lv_at(start_pos), - sizeof(link_or_value) * (key_slice_length - start_pos)); - } - -private: - // first member of base_node is aligned along with cache line size. - /** - * @attention This variable is read/written concurrently. - */ - permutation permutation_{}; - /** - * @attention This variable is read/written concurrently. - */ - std::array lv_{}; - /** - * @attention This is protected by its previous sibling's lock. - */ - border_node* prev_{nullptr}; - /** - * @attention This variable is read/written concurrently. - */ - border_node* next_{nullptr}; -}; -} // namespace yakushima +/** + * @file border_node.h + */ + +#pragma once + +#include +#include +#include +#include + +#include "atomic_wrapper.h" +#include "garbage_collection.h" +#include "interior_node.h" +#include "link_or_value.h" +#include "permutation.h" +#include "thread_info.h" + +#include "glog/logging.h" + +namespace yakushima { + +using std::cout; +using std::endl; + +class alignas(CACHE_LINE_SIZE) border_node final : public base_node { // NOLINT +public: + ~border_node() override {} // NOLINT + + /** + * @pre This function is called by delete_of function. + * @details delete the key-value corresponding to @a pos as position. + * @param[in] rank + * @param[in] pos The position of being deleted. + * @param[in] target_is_value + */ + void delete_at(Token token, const std::size_t rank, const std::size_t pos, + const bool target_is_value) { + auto* ti = reinterpret_cast(token); // NOLINT + if (target_is_value) { + value* vp = lv_.at(pos).get_value(); + if (value::is_value_ptr(vp)) { + // it is value ptr (not inline value) + value::remove_delete_flag(vp); + auto [v_ptr, v_len, v_align] = value::get_gc_info(vp); + ti->get_gc_info().push_value_container( + {ti->get_begin_epoch(), v_ptr, v_len, v_align}); + /** + * clear for preventing heap use after free by reference of + * need_delete + */ + lv_.at(pos).init_lv(); + } + } + + // rearrange permutation + permutation_.delete_rank(rank); + permutation_.dec_key_num(); + } + + /** + * @pre There is a lv which points to @a child. + * @details Delete operation on the element matching @a child. + * @param[in] token + * @param[in] child + */ + void delete_of(Token token, tree_instance* ti, base_node* const child) { + std::size_t cnk = get_permutation_cnk(); + for (std::size_t i = 0; i < cnk; ++i) { + std::size_t index = permutation_.get_index_of_rank(i); + if (child == lv_.at(index).get_next_layer()) { + delete_of(token, ti, get_key_slice_at(index), + get_key_length_at(index)); // NOLINT + return; + } + } + // unreachable points. + LOG(ERROR) << log_location_prefix; + } + + /** + * @brief release all heap objects and clean up. + */ + status destroy() override { + std::size_t cnk = get_permutation_cnk(); + std::vector th_vc; + for (std::size_t i = 0; i < cnk; ++i) { + // living link or value + std::size_t index = permutation_.get_index_of_rank(i); + // cleanup process + auto process = [this](std::size_t i) { lv_.at(i).destroy(); }; + if (lv_.at(index).get_next_layer() != nullptr) { + // has some layer, considering parallel + if (destroy_manager::check_room()) { + th_vc.emplace_back(process, index); + } else { + process(index); + } + } else { + // not some layer, not considering parallel + process(index); + } + } + for (auto&& th : th_vc) { + th.join(); + destroy_manager::return_room(); + } + + return status::OK_DESTROY_BORDER; + } + + /** + * @pre + * This border node was already locked by caller. + * This function is called by remove func. + * The key-value corresponding to @a key_slice and @a key_length exists in this node. + * @details delete value corresponding to @a key_slice and @a key_length + * @param[in] token + * @param[in] key_slice The key slice of key-value. + * @param[in] key_slice_length The @a key_slice length. + * @param[in] target_is_value + */ + template + void delete_of(Token token, tree_instance* ti, + const key_slice_type key_slice, + const key_length_type key_slice_length) { + // past: delete is treated to increment vinsert counter. + //set_version_inserting_deleting(true); + /** + * find position. + */ + std::size_t cnk = get_permutation_cnk(); + for (std::size_t i = 0; i < cnk; ++i) { + std::size_t index = permutation_.get_index_of_rank(i); + if ((key_slice_length == 0 && get_key_length_at(index) == 0) || + (key_slice_length == get_key_length_at(index) && + memcmp(&key_slice, &get_key_slice_ref().at(index), + sizeof(key_slice_type)) == 0)) { + delete_at(token, i, index, target_is_value); + if (cnk == 1) { // attention : this cnk is before delete_at; + set_version_deleted(true); + if (ti->load_root_ptr() != this) { + // root && deleted node is treated as special. This isn't. + set_version_root(false); + } + + /** + * After this delete operation, this border node is empty. + */ + retry_prev_lock: + border_node* prev = get_prev(); + if (prev != nullptr) { + prev->lock(); + if (prev->get_version_deleted() || prev != get_prev()) { + prev->version_unlock(); + goto retry_prev_lock; // NOLINT + } else { + prev->set_next(get_next()); + if (get_next() != nullptr) { + get_next()->set_prev(prev); + } + prev->version_unlock(); + } + } else if (get_next() != nullptr) { + get_next()->set_prev(nullptr); + } + /** + * lock order is next to prev and lower to higher. + */ + base_node* pn = lock_parent(); + if (pn == nullptr) { + //ti->store_root_ptr(nullptr); + // remain empty deleted root node. + version_unlock(); + return; + } + /** + * This node has parent node, so this must not be root. + * note: Including relation of parent-child is + * border-border. + */ + set_version_root(false); + version_unlock(); + if (pn->get_version_border()) { + dynamic_cast(pn)->delete_of(token, ti, + this); + } else { + dynamic_cast(pn) + ->delete_of(token, ti, this); + } + auto* tinfo = + reinterpret_cast(token); // NOLINT + tinfo->get_gc_info().push_node_container( + {tinfo->get_begin_epoch(), this}); + } else { + version_unlock(); + } + return; + } + } + /** + * unreachable. + */ + LOG(ERROR) << log_location_prefix + << ", deleted: " << get_version_deleted() + << ", is root: " << get_version_root(); + } + + /** + * @details display function for analysis and debug. + */ + void display() override { + display_base(); + cout << "border_node::display" << endl; + permutation_.display(); + for (std::size_t i = 0; i < get_permutation_cnk(); ++i) { + lv_.at(i).display(); + } + cout << "next : " << get_next() << endl; + } + + /** + * @brief Collect the memory usage of this partial tree. + * + * @param level the level of this node in the tree. + * @param mem_stat the stack of memory usage for each level. + */ + void mem_usage(std::size_t level, + memory_usage_stack& mem_stat) const override { + if (mem_stat.size() <= level) { mem_stat.emplace_back(0, 0, 0); } + auto& [node_num, used, reserved] = mem_stat.at(level); + + const std::size_t cnk = get_permutation_cnk(); + ++node_num; + reserved += sizeof(border_node); + used += sizeof(border_node) - + ((key_slice_length - cnk) * sizeof(link_or_value)); + + for (std::size_t i = 0; i < cnk; ++i) { + std::size_t index = permutation_.get_index_of_rank(i); + lv_.at(index).mem_usage(level, mem_stat); + } + } + + /** + * @post It is necessary for the caller to verify whether the extraction is appropriate. + * @param[out] next_layers + * @attention layers are stored in ascending order. + * @return + */ + [[maybe_unused]] void + get_all_next_layer(std::vector& next_layers) { + next_layers.clear(); + std::size_t cnk = permutation_.get_cnk(); + for (std::size_t i = 0; i < cnk; ++i) { + link_or_value* lv = get_lv_at(permutation_.get_index_of_rank(i)); + base_node* nl = lv->get_next_layer(); + if (nl != nullptr) { next_layers.emplace_back(nl); } + } + } + + [[maybe_unused]] [[nodiscard]] std::array& + get_lv() { + return lv_; + } + + /** + * @details Find link_or_value element whose next_layer is the same as @a next_layer of + * the argument. + * @pre Executor has lock of this node. There is always a lv that points to a pointer + * given as an argument. + * @param[in] next_layer + * @return link_or_value* + */ + [[maybe_unused]] [[nodiscard]] link_or_value* + get_lv(base_node* const next_layer) { + for (std::size_t i = 0; i < key_slice_length; ++i) { + if (lv_.at(i).get_next_layer() == next_layer) { return &lv_.at(i); } + } + /** + * unreachable point. + */ + LOG(ERROR) << log_location_prefix; + return nullptr; + } + + [[nodiscard]] link_or_value* get_lv_at(const std::size_t index) { + return &lv_.at(index); + } + + /** + * @brief + * + * @param key_slice + * @param key_length + * @pre Caller (put) must lock this node. + * @return std::size_t + */ + std::size_t compute_rank_if_insert(const key_slice_type key_slice, + const key_length_type key_length) { + permutation perm{permutation_.get_body()}; + std::size_t cnk = perm.get_cnk(); + for (std::size_t i = 0; i < cnk; ++i) { + std::size_t index = perm.get_index_of_rank(i); + key_slice_type target_key_slice = get_key_slice_at(index); + key_length_type target_key_len = get_key_length_at(index); + if (key_length == 0 && target_key_len == 0) { + LOG(ERROR) << log_location_prefix << "unexpected path"; + return 0; + } + // not zero key + auto ret = memcmp(&key_slice, &target_key_slice, + sizeof(key_slice_type)); + if (ret == 0) { + if ((key_length > sizeof(key_slice_type) && + target_key_len > sizeof(key_slice_type)) || + key_length == target_key_len) { + LOG(ERROR) << log_location_prefix << "unexpected path"; + return 0; + } + if (key_length < target_key_len) { return i; } + } else if (ret < 0) { + return i; + break; + } + } + + return cnk; + } + + /** + * @attention This function must not be called with locking of this node. Because + * this function executes get_stable_version and it waits own (lock-holder) + * infinitely. + * @param[in] key_slice + * @param[in] key_length + * @param[out] stable_v the stable version which is at atomically fetching lv. + * @param[out] lv_pos + * @return + */ + [[nodiscard]] link_or_value* get_lv_of(const key_slice_type key_slice, + const key_length_type key_length, + node_version64_body& stable_v, + std::size_t& lv_pos) { + node_version64_body v = get_stable_version(); + for (;;) { + /** + * It loads cnk atomically by get_cnk func. + */ + permutation perm{permutation_.get_body()}; + std::size_t cnk = perm.get_cnk(); + link_or_value* ret_lv{nullptr}; + for (std::size_t i = 0; i < cnk; ++i) { + bool suc{false}; + std::size_t index = perm.get_index_of_rank(i); + key_slice_type target_key_slice = get_key_slice_at(index); + key_length_type target_key_len = get_key_length_at(index); + if (key_length == 0 && target_key_len == 0) { + suc = true; + } else { + auto ret = memcmp(&key_slice, &target_key_slice, + sizeof(key_slice_type)); + if (ret == 0) { + if ((key_length > sizeof(key_slice_type) && + target_key_len > sizeof(key_slice_type)) || + key_length == target_key_len) { + suc = true; + } else if (key_length < target_key_len) { + break; + } + } else if (ret < 0) { + break; + } + } + + if (suc) { + ret_lv = get_lv_at(index); + lv_pos = index; + break; + } + } + node_version64_body v_check = get_stable_version(); + if (v == v_check) { + stable_v = v; + return ret_lv; + } + v = v_check; + } + } + + [[nodiscard]] link_or_value* + get_lv_of_without_lock(const key_slice_type key_slice, + const key_length_type key_length) { + /** + * It loads cnk atomically by get_cnk func. + */ + permutation perm{permutation_.get_body()}; + std::size_t cnk = perm.get_cnk(); + link_or_value* ret_lv{nullptr}; + for (std::size_t i = 0; i < cnk; ++i) { + bool suc{false}; + std::size_t index = perm.get_index_of_rank(i); + key_slice_type target_key_slice = get_key_slice_at(index); + key_length_type target_key_len = get_key_length_at(index); + if (key_length == 0 && target_key_len == 0) { + suc = true; + } else { + auto ret = memcmp(&key_slice, &target_key_slice, + sizeof(key_slice_type)); + if (ret == 0) { + if ((key_length > sizeof(key_slice_type) && + target_key_len > sizeof(key_slice_type)) || + key_length == target_key_len) { + suc = true; + } else if (key_length < target_key_len) { + break; + } + } else if (ret < 0) { + break; + } + } + + if (suc) { ret_lv = get_lv_at(index); } + } + return ret_lv; + } + + border_node* get_next() { return loadAcquireN(next_); } + + permutation& get_permutation() { return permutation_; } + + [[nodiscard]] std::uint8_t get_permutation_cnk() const { + return permutation_.get_cnk(); + } + + [[maybe_unused]] [[nodiscard]] std::size_t + get_permutation_lowest_key_pos() const { + return permutation_.get_lowest_key_pos(); + } + + border_node* get_prev() { return loadAcquireN(prev_); } + + void init_border() { + init_base(); + init_border_member_range(0); + set_version_root(true); + set_version_border(true); + permutation_.init(); + set_next(nullptr); + set_prev(nullptr); + } + + /** + * @details init at @a pos as position. + * @param[in] pos This is a position (index) to be initialized. + */ + void init_border(const std::size_t pos) { + init_base(pos); + lv_.at(pos).init_lv(); + } + + /** + * @pre This function is called by put function. + * @pre @a arg_value_length is divisible by sizeof( @a ValueType ). + * @pre This function can not be called for updating existing nodes. + * @pre If this function is used for node creation, link after set because + * set function does not execute lock function. + * @details This function inits border node by using arguments. + * @param[in] key_view + * @param[in] new_value + * @param[out] created_value_ptr The pointer to created value in yakushima. + * @param[in] root is the root node of the layer. + */ + template + void init_border(std::string_view key_view, value* new_value, + ValueType** const created_value_ptr, const bool root) { + init_border(); + set_version_root(root); + get_version_ptr()->atomic_inc_vinsert(); + insert_lv_at(get_permutation().get_empty_slot(), key_view, new_value, + reinterpret_cast(created_value_ptr), // NOLINT + 0); + } + + void init_border_member_range(const std::size_t start) { + for (auto i = start; i < lv_.size(); ++i) { lv_.at(i).init_lv(); } + } + + /** + * @pre It already locked this node. + * @param[in] index + * @param[in] key_view + * @param[in] value_ptr + * @param[out] created_value_ptr + * @param[in] arg_value_length + * @param[in] value_align + * @param[in] rank + */ + void insert_lv_at(const std::size_t index, std::string_view key_view, + value* new_value, void** const created_value_ptr, + const std::size_t rank) { + /** + * attention: key_slice must be initialized to 0. + * If key_view.size() is smaller than sizeof(key_slice_type), + * it is not possible to update the whole key_slice object with memcpy. + * It is possible that undefined values may remain from initialization. + */ + key_slice_type key_slice(0); + if (key_view.size() > sizeof(key_slice_type)) { + /** + * Create multiple border nodes. + */ + memcpy(&key_slice, key_view.data(), sizeof(key_slice_type)); + set_key_slice_at(index, key_slice); + /** + * You only need to know that it is 8 bytes or more. If it is + * stored obediently, key_length_type must be a large size type. + */ + set_key_length_at(index, sizeof(key_slice_type) + 1); + border_node* next_layer_border = new border_node(); // NOLINT + key_view.remove_prefix(sizeof(key_slice_type)); + /** + * attention: next_layer_border is the root of next layer. + */ + next_layer_border->init_border(key_view, new_value, + created_value_ptr, true); + next_layer_border->set_parent(this); + set_lv_next_layer(index, next_layer_border); + } else { + // set key + memcpy(&key_slice, key_view.data(), key_view.size()); + set_key_slice_at(index, key_slice); + set_key_length_at(index, + static_cast(key_view.size())); + // set value + set_lv_value(index, new_value, created_value_ptr); + } + permutation_.inc_key_num(); + permutation_.insert(rank, index); + } + + void permutation_rearrange() { + permutation_.rearrange(get_key_slice_ref(), get_key_length_ref()); + } + + [[maybe_unused]] void set_permutation_cnk(const std::uint8_t n) { + permutation_.set_cnk(n); + } + + /** + * @pre This is called by split process. + * @param index + * @param nlv + */ + void set_lv(const std::size_t index, link_or_value* const nlv) { + lv_.at(index).set(nlv); + } + + /** + * @brief set value to link_or_value. + * @param[in] index todo write + * @param[in] new_value todo write + * @param[out] created_value_ptr todo write + */ + void set_lv_value(const std::size_t index, value* new_value, + void** const created_value_ptr) { + lv_.at(index).set_value(new_value, created_value_ptr); + } + + void set_lv_next_layer(const std::size_t index, + base_node* const next_layer) { + lv_.at(index).set_next_layer(next_layer); + } + + void set_next(border_node* const nnext) { storeReleaseN(next_, nnext); } + + void set_prev(border_node* const prev) { storeReleaseN(prev_, prev); } + + void shift_left_border_member(const std::size_t start_pos, + const std::size_t shift_size) { + memmove(get_lv_at(start_pos - shift_size), get_lv_at(start_pos), + sizeof(link_or_value) * (key_slice_length - start_pos)); + } + +private: + // first member of base_node is aligned along with cache line size. + /** + * @attention This variable is read/written concurrently. + */ + permutation permutation_{}; + /** + * @attention This variable is read/written concurrently. + */ + std::array lv_{}; + /** + * @attention This is protected by its previous sibling's lock. + */ + border_node* prev_{nullptr}; + /** + * @attention This variable is read/written concurrently. + */ + border_node* next_{nullptr}; +}; +} // namespace yakushima diff --git a/include/common_helper.h b/include/common_helper.h index 62f6952..8394e95 100644 --- a/include/common_helper.h +++ b/include/common_helper.h @@ -1,88 +1,88 @@ -/** - * @file common_helper.h - */ - -#pragma once - -#include "border_node.h" -#include "interior_node.h" -#include "log.h" -#include "version.h" - -#include "glog/logging.h" - -namespace yakushima { - -/** - * - * @details It finds border node by using arguments @a root, @a key_slice. - * If the @a root is not the root of some layer, this function finds root nodes of the - * layer, then finds border node by using retry label. - * @param[in] root - * @param[in] key_slice - * @param[in] key_slice_length - * @param[out] special_status - * @return std::tuple - * node_version64_body is stable version of base_node*. - */ -static std::tuple -find_border(base_node* const root, const key_slice_type key_slice, - const key_length_type key_slice_length, status& special_status) { - special_status = status::OK; -retry: - if (root == nullptr) { - LOG(ERROR) << log_location_prefix << "find_border: root: " << root - << ", key_slice: " << key_slice - << ", key_slice_length: " << key_slice_length - << ", special_status: " << special_status; - // if special status is ok, it is just after called. - // if special status is warn, it is just after retry one. - } - - base_node* n = root; - node_version64_body v = n->get_stable_version(); - if (!v.get_root()) { - special_status = status::WARN_RETRY_FROM_ROOT_OF_ALL; - return std::make_tuple(nullptr, node_version64_body()); - } - if (v.get_deleted()) { - // root && deleted node. - if (n == nullptr) { - LOG(ERROR) << log_location_prefix << "find_border: root: " << root - << ", key_slice: " << key_slice - << ", key_slice_length: " << key_slice_length - << ", special_status: " << special_status - << ", version: " << v; - } - return std::make_tuple(dynamic_cast(n), v); - } - /** - * The caller checks whether it has been deleted. - */ - while (!v.get_border()) { - /** - * @a n points to a interior_node object. - */ - base_node* n_child = dynamic_cast(n)->get_child_of( - key_slice, key_slice_length, v); - if (n_child == nullptr) { - /** - * If the value of vsplit is different, the read location may be - * inappropriate. - * Split propagates upward. It have to start from root. - */ - special_status = status::WARN_CONCURRENT_OPERATIONS; - goto retry; // NOLINT - } - n = n_child; - } - if (n == nullptr) { - LOG(ERROR) << log_location_prefix << "find_border: root: " << root - << ", key_slice: " << key_slice - << ", key_slice_length: " << key_slice_length - << ", special_status: " << special_status; - } - return std::make_tuple(dynamic_cast(n), v); -} - +/** + * @file common_helper.h + */ + +#pragma once + +#include "border_node.h" +#include "interior_node.h" +#include "log.h" +#include "version.h" + +#include "glog/logging.h" + +namespace yakushima { + +/** + * + * @details It finds border node by using arguments @a root, @a key_slice. + * If the @a root is not the root of some layer, this function finds root nodes of the + * layer, then finds border node by using retry label. + * @param[in] root + * @param[in] key_slice + * @param[in] key_slice_length + * @param[out] special_status + * @return std::tuple + * node_version64_body is stable version of base_node*. + */ +static std::tuple +find_border(base_node* const root, const key_slice_type key_slice, + const key_length_type key_slice_length, status& special_status) { + special_status = status::OK; +retry: + if (root == nullptr) { + LOG(ERROR) << log_location_prefix << "find_border: root: " << root + << ", key_slice: " << key_slice + << ", key_slice_length: " << key_slice_length + << ", special_status: " << special_status; + // if special status is ok, it is just after called. + // if special status is warn, it is just after retry one. + } + + base_node* n = root; + node_version64_body v = n->get_stable_version(); + if (!v.get_root()) { + special_status = status::WARN_RETRY_FROM_ROOT_OF_ALL; + return std::make_tuple(nullptr, node_version64_body()); + } + if (v.get_deleted()) { + // root && deleted node. + if (n == nullptr) { + LOG(ERROR) << log_location_prefix << "find_border: root: " << root + << ", key_slice: " << key_slice + << ", key_slice_length: " << key_slice_length + << ", special_status: " << special_status + << ", version: " << v; + } + return std::make_tuple(dynamic_cast(n), v); + } + /** + * The caller checks whether it has been deleted. + */ + while (!v.get_border()) { + /** + * @a n points to a interior_node object. + */ + base_node* n_child = dynamic_cast(n)->get_child_of( + key_slice, key_slice_length, v); + if (n_child == nullptr) { + /** + * If the value of vsplit is different, the read location may be + * inappropriate. + * Split propagates upward. It have to start from root. + */ + special_status = status::WARN_CONCURRENT_OPERATIONS; + goto retry; // NOLINT + } + n = n_child; + } + if (n == nullptr) { + LOG(ERROR) << log_location_prefix << "find_border: root: " << root + << ", key_slice: " << key_slice + << ", key_slice_length: " << key_slice_length + << ", special_status: " << special_status; + } + return std::make_tuple(dynamic_cast(n), v); +} + } // namespace yakushima \ No newline at end of file diff --git a/include/cpu.h b/include/cpu.h index 41714b0..2d7b7a8 100644 --- a/include/cpu.h +++ b/include/cpu.h @@ -1,10 +1,10 @@ -/** - * @file cpu.h - */ - -#pragma once - -namespace yakushima { - -static constexpr std::size_t CACHE_LINE_SIZE{64}; -} +/** + * @file cpu.h + */ + +#pragma once + +namespace yakushima { + +static constexpr std::size_t CACHE_LINE_SIZE{64}; +} diff --git a/include/epoch.h b/include/epoch.h index d8271cf..e6bab3a 100644 --- a/include/epoch.h +++ b/include/epoch.h @@ -1,26 +1,26 @@ -/** - * @file epoch.h - */ - -#pragma once - -#include - -namespace yakushima { - -using Epoch = std::uint64_t; - -class epoch_management { -public: - static void epoch_inc() { epoch_.fetch_add(1); } - - static Epoch get_epoch() { return epoch_.load(std::memory_order_acquire); } - -private: - /** - * @todo consider wrap around. Wrap around after 23,397,696,694 days. - */ - static inline std::atomic epoch_{1}; // NOLINT -}; - +/** + * @file epoch.h + */ + +#pragma once + +#include + +namespace yakushima { + +using Epoch = std::uint64_t; + +class epoch_management { +public: + static void epoch_inc() { epoch_.fetch_add(1); } + + static Epoch get_epoch() { return epoch_.load(std::memory_order_acquire); } + +private: + /** + * @todo consider wrap around. Wrap around after 23,397,696,694 days. + */ + static inline std::atomic epoch_{1}; // NOLINT +}; + } // namespace yakushima \ No newline at end of file diff --git a/include/garbage_collection.h b/include/garbage_collection.h index 23e8e73..c8187a8 100644 --- a/include/garbage_collection.h +++ b/include/garbage_collection.h @@ -1,159 +1,159 @@ -/** - * @file garbage_collection.h - */ - -#pragma once - -#include -#include -#include -#include - -#include "base_node.h" -#include "concurrent_queue.h" -#include "cpu.h" -#include "epoch.h" - -namespace yakushima { - -class garbage_collection { -public: - /** - * @tparam interior_node - * @tparam border_node - * @attention Use a template class so that the dependency does not cycle. - */ - template - void fin() { - // for cache - if (std::get(cache_node_container_) != nullptr) { - delete std::get(cache_node_container_); // NOLINT - std::get(cache_node_container_) = nullptr; - } - - while (!node_container_.empty()) { - std::tuple elem; - if (!node_container_.try_pop(elem)) { continue; } - delete std::get(elem); // NOLINT - } - - // for cache - if (std::get(cache_value_container_) != nullptr) { - ::operator delete( - std::get(cache_value_container_), - std::get(cache_value_container_), - std::get(cache_value_container_)); - std::get(cache_value_container_) = nullptr; - } - - while (!value_container_.empty()) { - std::tuple elem; - if (!value_container_.try_pop(elem)) { continue; } - ::operator delete(std::get(elem), - std::get(elem), - std::get(elem)); - } - } - - /** - * @tparam interior_node - * @tparam border_node - * @attention Use a template class so that the dependency does not cycle. - */ - template - void gc() { - gc_node(); - gc_value(); - } - - /** - * @tparam interior_node - * @tparam border_node - * @attention Use a template class so that the dependency does not cycle. - */ - template - void gc_node() { - Epoch gc_epoch = get_gc_epoch(); - - // for cache - if (std::get(cache_node_container_) != nullptr) { - if (std::get(cache_node_container_) >= gc_epoch) { - return; - } - delete std::get(cache_node_container_); // NOLINT - std::get(cache_node_container_) = nullptr; - } - - // for container - while (!node_container_.empty()) { - std::tuple elem; - if (!node_container_.try_pop(elem)) { continue; } - if (std::get(elem) >= gc_epoch) { - cache_node_container_ = elem; - return; - } - delete std::get(elem); // NOLINT - } - } - - void gc_value() { - Epoch gc_epoch = get_gc_epoch(); - - if (std::get(cache_value_container_) != nullptr) { - if (std::get(cache_value_container_) >= gc_epoch) { - return; - } - ::operator delete( - std::get(cache_value_container_), - std::get(cache_value_container_), - std::get(cache_value_container_)); - std::get(cache_value_container_) = nullptr; - } - - while (!value_container_.empty()) { - std::tuple elem; - if (!value_container_.try_pop(elem)) { continue; } - if (std::get(elem) >= gc_epoch) { - cache_value_container_ = elem; - return; - } - ::operator delete(std::get(elem), - std::get(elem), - std::get(elem)); - } - } - - static Epoch get_gc_epoch() { - return gc_epoch_.load(std::memory_order_acquire); - } - - void push_node_container(std::tuple elem) { - node_container_.push(elem); - } - - void push_value_container( - std::tuple elem) { - value_container_.push(elem); - } - - static void set_gc_epoch(const Epoch epoch) { - gc_epoch_.store(epoch, std::memory_order_release); - } - -private: - static constexpr std::size_t gc_epoch_index = 0; - static constexpr std::size_t gc_target_index = 1; - static constexpr std::size_t gc_target_size_index = 2; - static constexpr std::size_t gc_target_align_index = 3; - alignas(CACHE_LINE_SIZE) static inline std::atomic // NOLINT - gc_epoch_{0}; // NOLINT - std::tuple cache_node_container_{0, nullptr}; // NOLINT - concurrent_queue> node_container_; // NOLINT - std::tuple - cache_value_container_{0, nullptr, 0, - static_cast(0)}; // NOLINT - concurrent_queue> - value_container_; // NOLINT -}; - -} // namespace yakushima +/** + * @file garbage_collection.h + */ + +#pragma once + +#include +#include +#include +#include + +#include "base_node.h" +#include "concurrent_queue.h" +#include "cpu.h" +#include "epoch.h" + +namespace yakushima { + +class garbage_collection { +public: + /** + * @tparam interior_node + * @tparam border_node + * @attention Use a template class so that the dependency does not cycle. + */ + template + void fin() { + // for cache + if (std::get(cache_node_container_) != nullptr) { + delete std::get(cache_node_container_); // NOLINT + std::get(cache_node_container_) = nullptr; + } + + while (!node_container_.empty()) { + std::tuple elem; + if (!node_container_.try_pop(elem)) { continue; } + delete std::get(elem); // NOLINT + } + + // for cache + if (std::get(cache_value_container_) != nullptr) { + ::operator delete( + std::get(cache_value_container_), + std::get(cache_value_container_), + std::get(cache_value_container_)); + std::get(cache_value_container_) = nullptr; + } + + while (!value_container_.empty()) { + std::tuple elem; + if (!value_container_.try_pop(elem)) { continue; } + ::operator delete(std::get(elem), + std::get(elem), + std::get(elem)); + } + } + + /** + * @tparam interior_node + * @tparam border_node + * @attention Use a template class so that the dependency does not cycle. + */ + template + void gc() { + gc_node(); + gc_value(); + } + + /** + * @tparam interior_node + * @tparam border_node + * @attention Use a template class so that the dependency does not cycle. + */ + template + void gc_node() { + Epoch gc_epoch = get_gc_epoch(); + + // for cache + if (std::get(cache_node_container_) != nullptr) { + if (std::get(cache_node_container_) >= gc_epoch) { + return; + } + delete std::get(cache_node_container_); // NOLINT + std::get(cache_node_container_) = nullptr; + } + + // for container + while (!node_container_.empty()) { + std::tuple elem; + if (!node_container_.try_pop(elem)) { continue; } + if (std::get(elem) >= gc_epoch) { + cache_node_container_ = elem; + return; + } + delete std::get(elem); // NOLINT + } + } + + void gc_value() { + Epoch gc_epoch = get_gc_epoch(); + + if (std::get(cache_value_container_) != nullptr) { + if (std::get(cache_value_container_) >= gc_epoch) { + return; + } + ::operator delete( + std::get(cache_value_container_), + std::get(cache_value_container_), + std::get(cache_value_container_)); + std::get(cache_value_container_) = nullptr; + } + + while (!value_container_.empty()) { + std::tuple elem; + if (!value_container_.try_pop(elem)) { continue; } + if (std::get(elem) >= gc_epoch) { + cache_value_container_ = elem; + return; + } + ::operator delete(std::get(elem), + std::get(elem), + std::get(elem)); + } + } + + static Epoch get_gc_epoch() { + return gc_epoch_.load(std::memory_order_acquire); + } + + void push_node_container(std::tuple elem) { + node_container_.push(elem); + } + + void push_value_container( + std::tuple elem) { + value_container_.push(elem); + } + + static void set_gc_epoch(const Epoch epoch) { + gc_epoch_.store(epoch, std::memory_order_release); + } + +private: + static constexpr std::size_t gc_epoch_index = 0; + static constexpr std::size_t gc_target_index = 1; + static constexpr std::size_t gc_target_size_index = 2; + static constexpr std::size_t gc_target_align_index = 3; + alignas(CACHE_LINE_SIZE) static inline std::atomic // NOLINT + gc_epoch_{0}; // NOLINT + std::tuple cache_node_container_{0, nullptr}; // NOLINT + concurrent_queue> node_container_; // NOLINT + std::tuple + cache_value_container_{0, nullptr, 0, + static_cast(0)}; // NOLINT + concurrent_queue> + value_container_; // NOLINT +}; + +} // namespace yakushima diff --git a/include/interior_helper.h b/include/interior_helper.h index ebe2fcc..cb91c1c 100644 --- a/include/interior_helper.h +++ b/include/interior_helper.h @@ -1,185 +1,185 @@ -/** - * @file interior_helper.h - * @details Declare functions that could not be member functions of the class for - * dependency resolution. - */ - -#pragma once - -#include "border_helper.h" -#include "log.h" -#include "tree_instance.h" - -#include "glog/logging.h" - -namespace yakushima { - -/** - * @details This may be called at split function. - * It creates new interior node as parents of this interior_node and @a right. - * @param[in] left - * @param[in] right - * @param[out] lock_list - * @param[out] new_parent This function tells new parent to the caller via this argument. - */ -template -static void create_interior_parent_of_interior( - interior_node* const left, interior_node* const right, - const std::pair pivot_key, - base_node** const new_parent) { - left->set_version_root(false); - right->set_version_root(false); - interior_node* ni = new interior_node(); // NOLINT - ni->init_interior(); - ni->set_version_root(true); - ni->set_version_inserting_deleting(true); - ni->lock(); - /** - * process base members - */ - ni->set_key(0, pivot_key.first, pivot_key.second); - /** - * process interior node members - */ - ni->n_keys_increment(); - ni->set_child_at(0, left); - ni->set_child_at(1, right); - /** - * release interior parent to global - */ - left->set_parent(ni); - right->set_parent(ni); - *new_parent = ni; -} - -/** - * @pre It already acquired lock of this node. - * @details split interior node. - * @param[in] interior - * @param[in] child_node After split, it inserts this @a child_node. - */ -template -static void -interior_split(tree_instance* ti, interior_node* const interior, - base_node* const child_node, - const std::pair inserting_key) { - interior->set_version_splitting(true); - interior_node* new_interior = new interior_node(); // NOLINT - new_interior->init_interior(); - - /** - * new interior is initially locked. - */ - new_interior->set_version(interior->get_version()); - /** - * split keys among n and n' - */ - key_slice_type pivot_key_pos = key_slice_length / 2; - std::size_t split_children_points = pivot_key_pos + 1; - interior->move_key_to_base_range(new_interior, split_children_points); - interior->set_n_keys(pivot_key_pos); - if (pivot_key_pos & 1) { // NOLINT - new_interior->set_n_keys(pivot_key_pos); - } else { - new_interior->set_n_keys(pivot_key_pos - 1); - } - interior->move_children_to_interior_range(new_interior, - split_children_points); - key_slice_type pivot_key = interior->get_key_slice_at(pivot_key_pos); - key_length_type pivot_length = interior->get_key_length_at(pivot_key_pos); - interior->set_key(pivot_key_pos, 0, 0); - - /** - * It inserts child_node. - */ - - key_slice_type key_slice{inserting_key.first}; - key_length_type key_length{inserting_key.second}; -#ifndef NDEBUG - if (key_length == 0 || pivot_length == 0) { - LOG(ERROR) << log_location_prefix; - } -#endif - std::size_t comp_length{0}; - if (key_length > sizeof(key_slice_type) && - pivot_length > sizeof(key_slice_type)) { - comp_length = 8; - } else { - comp_length = key_length < pivot_length ? key_length : pivot_length; - } - int ret_memcmp = memcmp(&key_slice, &pivot_key, comp_length); - if (ret_memcmp < 0 || (ret_memcmp == 0 && key_length < pivot_length)) { - child_node->set_parent(interior); - interior->template insert(child_node, inserting_key); - } else { - child_node->set_parent(new_interior); - new_interior->template insert(child_node, inserting_key); - } - - base_node* p = interior->lock_parent(); - if (p == nullptr) { -#ifndef NDEBUG - if (ti->load_root_ptr() != interior) { - LOG(ERROR) << log_location_prefix; - } -#endif - /** - * The disappearance of the parent node may have made this node the root node in - * parallel. It cares in below function. - */ - create_interior_parent_of_interior( - interior, new_interior, std::make_pair(pivot_key, pivot_length), - &p); - interior->version_unlock(); - new_interior->version_unlock(); - /** - * p became new root. - */ - ti->store_root_ptr(p); - p->version_unlock(); - return; - } - /** - * p exists. - */ -#ifndef NDEBUG - if (p->get_version_deleted() || p != interior->get_parent()) { - LOG(ERROR) << log_location_prefix; - } -#endif - if (p->get_version_border()) { - auto* pb = dynamic_cast(p); - base_node* new_p{}; - create_interior_parent_of_interior( - interior, new_interior, std::make_pair(pivot_key, pivot_length), - &new_p); - interior->version_unlock(); - new_interior->version_unlock(); - link_or_value* lv = pb->get_lv(interior); - lv->set_next_layer(new_p); - new_p->set_parent(pb); - new_p->version_unlock(); - p->version_unlock(); - return; - } - auto* pi = dynamic_cast(p); - interior->version_unlock(); - new_interior->set_parent(pi); - new_interior->version_unlock(); - if (pi->get_n_keys() == key_slice_length) { - /** - * parent interior full case. - */ - interior_split( - ti, pi, new_interior, std::make_pair(pivot_key, pivot_length)); - return; - } - /** - * parent interior not-full case - */ - pi->template insert(new_interior, - std::make_pair(pivot_key, pivot_length)); - pi->version_unlock(); -} - -} // namespace yakushima +/** + * @file interior_helper.h + * @details Declare functions that could not be member functions of the class for + * dependency resolution. + */ + +#pragma once + +#include "border_helper.h" +#include "log.h" +#include "tree_instance.h" + +#include "glog/logging.h" + +namespace yakushima { + +/** + * @details This may be called at split function. + * It creates new interior node as parents of this interior_node and @a right. + * @param[in] left + * @param[in] right + * @param[out] lock_list + * @param[out] new_parent This function tells new parent to the caller via this argument. + */ +template +static void create_interior_parent_of_interior( + interior_node* const left, interior_node* const right, + const std::pair pivot_key, + base_node** const new_parent) { + left->set_version_root(false); + right->set_version_root(false); + interior_node* ni = new interior_node(); // NOLINT + ni->init_interior(); + ni->set_version_root(true); + ni->set_version_inserting_deleting(true); + ni->lock(); + /** + * process base members + */ + ni->set_key(0, pivot_key.first, pivot_key.second); + /** + * process interior node members + */ + ni->n_keys_increment(); + ni->set_child_at(0, left); + ni->set_child_at(1, right); + /** + * release interior parent to global + */ + left->set_parent(ni); + right->set_parent(ni); + *new_parent = ni; +} + +/** + * @pre It already acquired lock of this node. + * @details split interior node. + * @param[in] interior + * @param[in] child_node After split, it inserts this @a child_node. + */ +template +static void +interior_split(tree_instance* ti, interior_node* const interior, + base_node* const child_node, + const std::pair inserting_key) { + interior->set_version_splitting(true); + interior_node* new_interior = new interior_node(); // NOLINT + new_interior->init_interior(); + + /** + * new interior is initially locked. + */ + new_interior->set_version(interior->get_version()); + /** + * split keys among n and n' + */ + key_slice_type pivot_key_pos = key_slice_length / 2; + std::size_t split_children_points = pivot_key_pos + 1; + interior->move_key_to_base_range(new_interior, split_children_points); + interior->set_n_keys(pivot_key_pos); + if (pivot_key_pos & 1) { // NOLINT + new_interior->set_n_keys(pivot_key_pos); + } else { + new_interior->set_n_keys(pivot_key_pos - 1); + } + interior->move_children_to_interior_range(new_interior, + split_children_points); + key_slice_type pivot_key = interior->get_key_slice_at(pivot_key_pos); + key_length_type pivot_length = interior->get_key_length_at(pivot_key_pos); + interior->set_key(pivot_key_pos, 0, 0); + + /** + * It inserts child_node. + */ + + key_slice_type key_slice{inserting_key.first}; + key_length_type key_length{inserting_key.second}; +#ifndef NDEBUG + if (key_length == 0 || pivot_length == 0) { + LOG(ERROR) << log_location_prefix; + } +#endif + std::size_t comp_length{0}; + if (key_length > sizeof(key_slice_type) && + pivot_length > sizeof(key_slice_type)) { + comp_length = 8; + } else { + comp_length = key_length < pivot_length ? key_length : pivot_length; + } + int ret_memcmp = memcmp(&key_slice, &pivot_key, comp_length); + if (ret_memcmp < 0 || (ret_memcmp == 0 && key_length < pivot_length)) { + child_node->set_parent(interior); + interior->template insert(child_node, inserting_key); + } else { + child_node->set_parent(new_interior); + new_interior->template insert(child_node, inserting_key); + } + + base_node* p = interior->lock_parent(); + if (p == nullptr) { +#ifndef NDEBUG + if (ti->load_root_ptr() != interior) { + LOG(ERROR) << log_location_prefix; + } +#endif + /** + * The disappearance of the parent node may have made this node the root node in + * parallel. It cares in below function. + */ + create_interior_parent_of_interior( + interior, new_interior, std::make_pair(pivot_key, pivot_length), + &p); + interior->version_unlock(); + new_interior->version_unlock(); + /** + * p became new root. + */ + ti->store_root_ptr(p); + p->version_unlock(); + return; + } + /** + * p exists. + */ +#ifndef NDEBUG + if (p->get_version_deleted() || p != interior->get_parent()) { + LOG(ERROR) << log_location_prefix; + } +#endif + if (p->get_version_border()) { + auto* pb = dynamic_cast(p); + base_node* new_p{}; + create_interior_parent_of_interior( + interior, new_interior, std::make_pair(pivot_key, pivot_length), + &new_p); + interior->version_unlock(); + new_interior->version_unlock(); + link_or_value* lv = pb->get_lv(interior); + lv->set_next_layer(new_p); + new_p->set_parent(pb); + new_p->version_unlock(); + p->version_unlock(); + return; + } + auto* pi = dynamic_cast(p); + interior->version_unlock(); + new_interior->set_parent(pi); + new_interior->version_unlock(); + if (pi->get_n_keys() == key_slice_length) { + /** + * parent interior full case. + */ + interior_split( + ti, pi, new_interior, std::make_pair(pivot_key, pivot_length)); + return; + } + /** + * parent interior not-full case + */ + pi->template insert(new_interior, + std::make_pair(pivot_key, pivot_length)); + pi->version_unlock(); +} + +} // namespace yakushima diff --git a/include/interior_node.h b/include/interior_node.h index 382a32d..79e8ba3 100644 --- a/include/interior_node.h +++ b/include/interior_node.h @@ -1,372 +1,372 @@ -/** - * @file interior_node.h - */ - -#pragma once - -#include -#include -#include - -#include "atomic_wrapper.h" -#include "base_node.h" -#include "destroy_manager.h" -#include "garbage_collection.h" -#include "interior_helper.h" -#include "log.h" -#include "thread_info.h" -#include "tree_instance.h" - -#include "glog/logging.h" - -namespace yakushima { - -class alignas(CACHE_LINE_SIZE) interior_node final // NOLINT - : public base_node { // NOLINT -public: - /** - * @details The structure is "ptr, key, ptr, key, ..., ptr". - * So the child_length is key_slice_length plus 1. - */ - static constexpr std::size_t child_length = key_slice_length + 1; - using n_keys_body_type = std::uint8_t; - using n_keys_type = std::atomic; - - ~interior_node() override{}; // NOLINT - - /** - * @pre There is a child which is the same to @a child. - * @post If the number of children is 1, It asks caller to make the child to root and - * delete this node. Therefore, it place the-only-one child to position 0. - * @details Delete operation on the element matching @a child. - * @param[in] token - * @param[in] child - * @param[in] lock_list - */ - template - void delete_of(Token token, tree_instance* ti, base_node* const child) { - set_version_inserting_deleting(true); - std::size_t n_key = get_n_keys(); -#ifndef NDEBUG - if (n_key == 0) { LOG(ERROR) << log_location_prefix; } -#endif - for (std::size_t i = 0; i <= n_key; ++i) { - if (get_child_at(i) == child) { - if (n_key == 1) { - set_version_deleted(true); - set_version_root(false); - n_keys_decrement(); - base_node* pn = lock_parent(); - if (pn == nullptr) { - get_child_at(!i)->atomic_set_version_root(true); - ti->store_root_ptr(get_child_at(!i)); // i == 0 or 1 - get_child_at(!i)->set_parent(nullptr); - } else { - //pn->set_version_inserting_deleting(true); - if (pn->get_version_border()) { - link_or_value* lv = - dynamic_cast(pn)->get_lv( - this); - base_node* sibling = get_child_at(!i); - lv->set_next_layer(sibling); - sibling->atomic_set_version_root(true); - } else { - dynamic_cast(pn)->swap_child( - this, get_child_at(!i)); - } - get_child_at(!i)->set_parent(pn); - pn->version_unlock(); - } - version_unlock(); - auto* tinfo = - reinterpret_cast(token); // NOLINT - tinfo->get_gc_info().push_node_container( - std::tuple{tinfo->get_begin_epoch(), this}); - } else { // n_key > 1 - if (i == 0) { // leftmost points - shift_left_base_member(1, 1); - shift_left_children(1, 1); - set_child_at(n_key, nullptr); - } else if (i == n_key) { // rightmost points - // no unique process - set_child_at(i, nullptr); - } else { // middle points - shift_left_base_member(i, 1); - shift_left_children(i + 1, 1); - set_child_at(n_key, nullptr); - } - set_key(n_key - 1, 0, 0); - n_keys_decrement(); - version_unlock(); - } - return; - } - } - -#ifndef NDEBUG - LOG(ERROR) << log_location_prefix << "precondition error"; -#endif - } - - /** - * @brief release all heap objects and clean up. - * @pre This function is called by single thread. - */ - status destroy() override { - std::vector th_vc; - th_vc.reserve(n_keys_ + 1); - for (auto i = 0; i < n_keys_ + 1; ++i) { - auto process = [this, i] { - get_child_at(i)->destroy(); - delete get_child_at(i); // NOLINT - }; - if (destroy_manager::check_room()) { - th_vc.emplace_back(process); - } else { - process(); - } - } - for (auto&& th : th_vc) { - th.join(); - destroy_manager::return_room(); - } - - return status::OK_DESTROY_INTERIOR; - } - - /** - * @details display function for analysis and debug. - */ - void display() override { - display_base(); - - std::cout << "interior_node::display" << std::endl; - std::cout << "nkeys_ : " << std::to_string(get_n_keys()) << std::endl; - for (std::size_t i = 0; i <= get_n_keys(); ++i) { - std::cout << "child : " << i << " : " << get_child_at(i) - << std::endl; - } - } - - /** - * @brief Collect the memory usage of this partial tree. - * - * @param level the level of this node in the tree. - * @param mem_stat the stack of memory usage for each level. - */ - void mem_usage(std::size_t level, - memory_usage_stack& mem_stat) const override { - if (mem_stat.size() <= level) { mem_stat.emplace_back(0, 0, 0); } - auto& [node_num, used, reserved] = mem_stat.at(level); - - const auto n_keys = n_keys_ + 1UL; - ++node_num; - reserved += sizeof(interior_node); - used += sizeof(interior_node) - - ((child_length - n_keys) * sizeof(uintptr_t)); - - const auto next_level = level + 1; - for (std::size_t i = 0; i < n_keys; ++i) { - get_child_at(i)->mem_usage(next_level, mem_stat); - } - } - - [[nodiscard]] n_keys_body_type get_n_keys() { - return n_keys_.load(std::memory_order_acquire); - } - - [[nodiscard]] base_node* get_child_at(std::size_t index) const { - return loadAcquireN(children.at(index)); - } - - base_node* get_child_of(const key_slice_type key_slice, - const key_length_type key_length, - node_version64_body& v) { - base_node* ret_child{}; - for (;;) { - n_keys_body_type n_key = get_n_keys(); - ret_child = nullptr; - for (auto i = 0; i < n_key; ++i) { - std::size_t comp_length = key_length < get_key_length_at(i) - ? key_length - : get_key_length_at(i); - int ret_memcmp = memcmp(&key_slice, &get_key_slice_ref().at(i), - comp_length > sizeof(key_slice_type) - ? sizeof(key_slice_type) - : comp_length); - if (ret_memcmp < 0 || - (ret_memcmp == 0 && key_length < get_key_length_at(i))) { - /** - * The key_slice must be left direction of the index. - */ - ret_child = children.at(i); - break; - } - } - if (ret_child == nullptr) { - /** - * The key_slice must be right direction of the index. - */ - ret_child = children.at(n_key); - if (ret_child == nullptr) { - // SMOs have found, so retry from a root node - break; - } - } - - // get child's status before rechecking version - node_version64_body child_v = ret_child->get_stable_version(); - node_version64_body check_v = get_stable_version(); - if (v == check_v && !child_v.get_deleted()) { - v = child_v; // return child's version - break; - } - if (v.get_vsplit() != check_v.get_vsplit() || - check_v.get_deleted()) { - // SMOs have found, so retry from a root node - ret_child = nullptr; - break; - } - v = check_v; - } - return ret_child; - } - - void init_interior() { - init_base(); - set_version_border(false); - children.fill(nullptr); - set_n_keys(0); - } - - /** - * @pre It already acquired lock of this node. - * @pre This interior node is not full. - * @details insert @a child and fix @a children. - * @param child new inserted child. - */ - template - void insert(base_node* const child, - const std::pair pivot_key) { - set_version_inserting_deleting(true); - // std::tuple visitor = - // std::make_tuple(pivot_key.first, pivot_key.second); - key_slice_type key_slice{pivot_key.first}; - key_length_type key_length{pivot_key.second}; - n_keys_body_type n_key = get_n_keys(); - for (auto i = 0; i < n_key; ++i) { - std::size_t comp_length{0}; - if (key_length > sizeof(key_slice_type) && - get_key_length_at(i) > sizeof(key_slice_type)) { - comp_length = 8; - } else { - comp_length = key_length < get_key_length_at(i) - ? key_length - : get_key_length_at(i); - } - int ret_memcmp = - memcmp(&key_slice, &get_key_slice_ref().at(i), comp_length); - if (ret_memcmp < 0 || - (ret_memcmp == 0 && key_length < get_key_length_at(i))) { - if (i == 0) { // insert to child[0] or child[1]. - shift_right_base_member(i, 1); - set_key(i, key_slice, key_length); - shift_right_children(i + 1); - set_child_at(i + 1, child); - n_keys_increment(); - return; - } - // insert to middle points - shift_right_base_member(i, 1); - set_key(i, key_slice, key_length); - shift_right_children(i + 1); - set_child_at(i + 1, child); - n_keys_increment(); - return; - } - } - // insert to rightmost points - set_key(n_key, key_slice, key_length); - set_child_at(n_key + 1, child); - n_keys_increment(); - } - - [[maybe_unused]] void - move_children_to_interior_range(interior_node* const right_interior, - const std::size_t start) { - for (auto i = start; i < child_length; ++i) { - right_interior->set_child_at(i - start, get_child_at(i)); - /** - * right interiror is new parent of get_child_at(i). // NOLINT - */ - get_child_at(i)->set_parent(right_interior); - set_child_at(i, nullptr); - } - } - - void set_child_at(const std::size_t index, base_node* const new_child) { - storeReleaseN(children.at(index), new_child); - } - - void set_n_keys(const n_keys_body_type new_n_key) { - n_keys_.store(new_n_key, std::memory_order_release); - } - - /** - * @pre It already acquired lock of this node. - * @param start_pos - * @param shift_size - */ - void shift_left_children(const std::size_t start_pos, - const std::size_t shift_size) { - for (std::size_t i = start_pos; i < child_length; ++i) { - set_child_at(i - shift_size, get_child_at(i)); - } - } - - /** - * @pre It already acquired lock of this node. - * It is not full-interior node. - * @param start_pos - * @param shift_size - */ - void shift_right_children(const std::size_t start_pos) { - std::size_t n_key = get_n_keys(); - for (std::size_t i = n_key + 1; i > start_pos; --i) { - set_child_at(i, get_child_at(i - 1)); - } - } - - void n_keys_decrement() { n_keys_.fetch_sub(1); } - - void n_keys_increment() { n_keys_.fetch_add(1); } - - void swap_child(base_node* const old_child, base_node* const new_child) { - for (std::size_t i = 0; i < child_length; ++i) { - if (get_child_at(i) == old_child) { - set_child_at(i, new_child); - return; - } - } - /** - * unreachable point. - */ - LOG(ERROR) << log_location_prefix << "unreachable path"; - } - -private: - /** - * first member of base_node is aligned along with cache line size. - */ - - /** - * @attention This variable is read/written concurrently. - */ - n_keys_type n_keys_{}; - /** - * @attention This variable is read/written concurrently. - */ - std::array children{}; -}; - -} // namespace yakushima +/** + * @file interior_node.h + */ + +#pragma once + +#include +#include +#include + +#include "atomic_wrapper.h" +#include "base_node.h" +#include "destroy_manager.h" +#include "garbage_collection.h" +#include "interior_helper.h" +#include "log.h" +#include "thread_info.h" +#include "tree_instance.h" + +#include "glog/logging.h" + +namespace yakushima { + +class alignas(CACHE_LINE_SIZE) interior_node final // NOLINT + : public base_node { // NOLINT +public: + /** + * @details The structure is "ptr, key, ptr, key, ..., ptr". + * So the child_length is key_slice_length plus 1. + */ + static constexpr std::size_t child_length = key_slice_length + 1; + using n_keys_body_type = std::uint8_t; + using n_keys_type = std::atomic; + + ~interior_node() override{}; // NOLINT + + /** + * @pre There is a child which is the same to @a child. + * @post If the number of children is 1, It asks caller to make the child to root and + * delete this node. Therefore, it place the-only-one child to position 0. + * @details Delete operation on the element matching @a child. + * @param[in] token + * @param[in] child + * @param[in] lock_list + */ + template + void delete_of(Token token, tree_instance* ti, base_node* const child) { + set_version_inserting_deleting(true); + std::size_t n_key = get_n_keys(); +#ifndef NDEBUG + if (n_key == 0) { LOG(ERROR) << log_location_prefix; } +#endif + for (std::size_t i = 0; i <= n_key; ++i) { + if (get_child_at(i) == child) { + if (n_key == 1) { + set_version_deleted(true); + set_version_root(false); + n_keys_decrement(); + base_node* pn = lock_parent(); + if (pn == nullptr) { + get_child_at(!i)->atomic_set_version_root(true); + ti->store_root_ptr(get_child_at(!i)); // i == 0 or 1 + get_child_at(!i)->set_parent(nullptr); + } else { + //pn->set_version_inserting_deleting(true); + if (pn->get_version_border()) { + link_or_value* lv = + dynamic_cast(pn)->get_lv( + this); + base_node* sibling = get_child_at(!i); + lv->set_next_layer(sibling); + sibling->atomic_set_version_root(true); + } else { + dynamic_cast(pn)->swap_child( + this, get_child_at(!i)); + } + get_child_at(!i)->set_parent(pn); + pn->version_unlock(); + } + version_unlock(); + auto* tinfo = + reinterpret_cast(token); // NOLINT + tinfo->get_gc_info().push_node_container( + std::tuple{tinfo->get_begin_epoch(), this}); + } else { // n_key > 1 + if (i == 0) { // leftmost points + shift_left_base_member(1, 1); + shift_left_children(1, 1); + set_child_at(n_key, nullptr); + } else if (i == n_key) { // rightmost points + // no unique process + set_child_at(i, nullptr); + } else { // middle points + shift_left_base_member(i, 1); + shift_left_children(i + 1, 1); + set_child_at(n_key, nullptr); + } + set_key(n_key - 1, 0, 0); + n_keys_decrement(); + version_unlock(); + } + return; + } + } + +#ifndef NDEBUG + LOG(ERROR) << log_location_prefix << "precondition error"; +#endif + } + + /** + * @brief release all heap objects and clean up. + * @pre This function is called by single thread. + */ + status destroy() override { + std::vector th_vc; + th_vc.reserve(n_keys_ + 1); + for (auto i = 0; i < n_keys_ + 1; ++i) { + auto process = [this, i] { + get_child_at(i)->destroy(); + delete get_child_at(i); // NOLINT + }; + if (destroy_manager::check_room()) { + th_vc.emplace_back(process); + } else { + process(); + } + } + for (auto&& th : th_vc) { + th.join(); + destroy_manager::return_room(); + } + + return status::OK_DESTROY_INTERIOR; + } + + /** + * @details display function for analysis and debug. + */ + void display() override { + display_base(); + + std::cout << "interior_node::display" << std::endl; + std::cout << "nkeys_ : " << std::to_string(get_n_keys()) << std::endl; + for (std::size_t i = 0; i <= get_n_keys(); ++i) { + std::cout << "child : " << i << " : " << get_child_at(i) + << std::endl; + } + } + + /** + * @brief Collect the memory usage of this partial tree. + * + * @param level the level of this node in the tree. + * @param mem_stat the stack of memory usage for each level. + */ + void mem_usage(std::size_t level, + memory_usage_stack& mem_stat) const override { + if (mem_stat.size() <= level) { mem_stat.emplace_back(0, 0, 0); } + auto& [node_num, used, reserved] = mem_stat.at(level); + + const auto n_keys = n_keys_ + 1UL; + ++node_num; + reserved += sizeof(interior_node); + used += sizeof(interior_node) - + ((child_length - n_keys) * sizeof(uintptr_t)); + + const auto next_level = level + 1; + for (std::size_t i = 0; i < n_keys; ++i) { + get_child_at(i)->mem_usage(next_level, mem_stat); + } + } + + [[nodiscard]] n_keys_body_type get_n_keys() { + return n_keys_.load(std::memory_order_acquire); + } + + [[nodiscard]] base_node* get_child_at(std::size_t index) const { + return loadAcquireN(children.at(index)); + } + + base_node* get_child_of(const key_slice_type key_slice, + const key_length_type key_length, + node_version64_body& v) { + base_node* ret_child{}; + for (;;) { + n_keys_body_type n_key = get_n_keys(); + ret_child = nullptr; + for (auto i = 0; i < n_key; ++i) { + std::size_t comp_length = key_length < get_key_length_at(i) + ? key_length + : get_key_length_at(i); + int ret_memcmp = memcmp(&key_slice, &get_key_slice_ref().at(i), + comp_length > sizeof(key_slice_type) + ? sizeof(key_slice_type) + : comp_length); + if (ret_memcmp < 0 || + (ret_memcmp == 0 && key_length < get_key_length_at(i))) { + /** + * The key_slice must be left direction of the index. + */ + ret_child = children.at(i); + break; + } + } + if (ret_child == nullptr) { + /** + * The key_slice must be right direction of the index. + */ + ret_child = children.at(n_key); + if (ret_child == nullptr) { + // SMOs have found, so retry from a root node + break; + } + } + + // get child's status before rechecking version + node_version64_body child_v = ret_child->get_stable_version(); + node_version64_body check_v = get_stable_version(); + if (v == check_v && !child_v.get_deleted()) { + v = child_v; // return child's version + break; + } + if (v.get_vsplit() != check_v.get_vsplit() || + check_v.get_deleted()) { + // SMOs have found, so retry from a root node + ret_child = nullptr; + break; + } + v = check_v; + } + return ret_child; + } + + void init_interior() { + init_base(); + set_version_border(false); + children.fill(nullptr); + set_n_keys(0); + } + + /** + * @pre It already acquired lock of this node. + * @pre This interior node is not full. + * @details insert @a child and fix @a children. + * @param child new inserted child. + */ + template + void insert(base_node* const child, + const std::pair pivot_key) { + set_version_inserting_deleting(true); + // std::tuple visitor = + // std::make_tuple(pivot_key.first, pivot_key.second); + key_slice_type key_slice{pivot_key.first}; + key_length_type key_length{pivot_key.second}; + n_keys_body_type n_key = get_n_keys(); + for (auto i = 0; i < n_key; ++i) { + std::size_t comp_length{0}; + if (key_length > sizeof(key_slice_type) && + get_key_length_at(i) > sizeof(key_slice_type)) { + comp_length = 8; + } else { + comp_length = key_length < get_key_length_at(i) + ? key_length + : get_key_length_at(i); + } + int ret_memcmp = + memcmp(&key_slice, &get_key_slice_ref().at(i), comp_length); + if (ret_memcmp < 0 || + (ret_memcmp == 0 && key_length < get_key_length_at(i))) { + if (i == 0) { // insert to child[0] or child[1]. + shift_right_base_member(i, 1); + set_key(i, key_slice, key_length); + shift_right_children(i + 1); + set_child_at(i + 1, child); + n_keys_increment(); + return; + } + // insert to middle points + shift_right_base_member(i, 1); + set_key(i, key_slice, key_length); + shift_right_children(i + 1); + set_child_at(i + 1, child); + n_keys_increment(); + return; + } + } + // insert to rightmost points + set_key(n_key, key_slice, key_length); + set_child_at(n_key + 1, child); + n_keys_increment(); + } + + [[maybe_unused]] void + move_children_to_interior_range(interior_node* const right_interior, + const std::size_t start) { + for (auto i = start; i < child_length; ++i) { + right_interior->set_child_at(i - start, get_child_at(i)); + /** + * right interiror is new parent of get_child_at(i). // NOLINT + */ + get_child_at(i)->set_parent(right_interior); + set_child_at(i, nullptr); + } + } + + void set_child_at(const std::size_t index, base_node* const new_child) { + storeReleaseN(children.at(index), new_child); + } + + void set_n_keys(const n_keys_body_type new_n_key) { + n_keys_.store(new_n_key, std::memory_order_release); + } + + /** + * @pre It already acquired lock of this node. + * @param start_pos + * @param shift_size + */ + void shift_left_children(const std::size_t start_pos, + const std::size_t shift_size) { + for (std::size_t i = start_pos; i < child_length; ++i) { + set_child_at(i - shift_size, get_child_at(i)); + } + } + + /** + * @pre It already acquired lock of this node. + * It is not full-interior node. + * @param start_pos + * @param shift_size + */ + void shift_right_children(const std::size_t start_pos) { + std::size_t n_key = get_n_keys(); + for (std::size_t i = n_key + 1; i > start_pos; --i) { + set_child_at(i, get_child_at(i - 1)); + } + } + + void n_keys_decrement() { n_keys_.fetch_sub(1); } + + void n_keys_increment() { n_keys_.fetch_add(1); } + + void swap_child(base_node* const old_child, base_node* const new_child) { + for (std::size_t i = 0; i < child_length; ++i) { + if (get_child_at(i) == old_child) { + set_child_at(i, new_child); + return; + } + } + /** + * unreachable point. + */ + LOG(ERROR) << log_location_prefix << "unreachable path"; + } + +private: + /** + * first member of base_node is aligned along with cache line size. + */ + + /** + * @attention This variable is read/written concurrently. + */ + n_keys_type n_keys_{}; + /** + * @attention This variable is read/written concurrently. + */ + std::array children{}; +}; + +} // namespace yakushima diff --git a/include/kvs.h b/include/kvs.h index c09f38b..316ac4c 100644 --- a/include/kvs.h +++ b/include/kvs.h @@ -1,268 +1,268 @@ -/** - * @file kvs.h - * @brief This is the interface used by outside. - */ - -#pragma once - -#include - -#include "base_node.h" -#include "interface_destroy.h" -#include "interface_display.h" -#include "interface_get.h" -#include "interface_helper.h" -#include "interface_put.h" -#include "interface_remove.h" -#include "interface_scan.h" -#include "storage.h" -#include "storage_impl.h" - -namespace yakushima { - -/** - * @brief Initialize kThreadInfoTable which is a table that holds thread execution - * information about garbage collection and invoke epoch thread. - */ -[[maybe_unused]] static void init(); // NOLINT - -/** - * @brief Delete all tree from the root, release all heap objects, and join epoch thread. - */ -[[maybe_unused]] static void fin(); // NOLINT - -/** - * @brief Display tree information. This is not thread safe. - */ -[[maybe_unused]] static void display(); // NOLINT - -/** - * @brief release all heap objects and clean up. - * @pre This function is called by single thread. - * @return status::OK_DESTROY_ALL destroyed all tree. - * @return status::OK_ROOT_IS_NULL tree was nothing. - */ -[[maybe_unused]] static status destroy(); // NOLINT - -/** - * @param [in] storage_name - * @return The memory usage data of the given storage. - */ -[[maybe_unused]] static memory_usage_stack -mem_usage(std::string_view storage_name); // NOLINT - -/** - * @brief Create storage - * @param [in] storage_name - * @attention Do not treat DDL operations in parallel with DML operations. - * create_storage / delete_storage can be processed in parallel. - * At least one of these and find_storage / list_storage cannot work in parallel. - * @return Same to put function. - */ -[[maybe_unused]] static status create_storage(std::string_view storage_name) { - return storage::create_storage(storage_name); -} - -/** - * @brief Delete existing storage and values under the storage - * @param [in] storage_name - * @attention Do not treat DDL operations in parallel with DML operations. - * create_storage / delete_storage can be processed in parallel. - * At least one of these and find_storage / list_storage cannot work in parallel. - * @return status::OK if successful. - * @return status::WARN_CONCURRENT_OPERATIONS if it can find the storage, - * but This function failed because it was preceded by concurrent delete_storage. - * After this, if create_storage is executed with the same storage name, the storage - * exists, and if it is not executed, the storage should not exist. - * @return status::WARN_NOT_EXIST if the storage was not found. - */ -[[maybe_unused]] static status delete_storage(std::string_view storage_name) { - return storage::delete_storage(storage_name); -} - -/** - * @brief Find existing storage - * @param [in] storage_name - * @param [out] found_storage output parameter to pass tree_instance information. If this - * is nullptr (by default argument), this function simply note the existence of target. - * @attention Do not treat DDL operations in parallel with DML operations. - * create_storage / delete_storage can be processed in parallel. - * At least one of these and find_storage / list_storage cannot work in parallel. - * @return status::OK if existence. - * @return status::WARN_NOT_EXIST if not existence. - */ -[[maybe_unused]] static status -find_storage(std::string_view storage_name, - tree_instance** found_storage = nullptr) { - return storage::find_storage(storage_name, found_storage); -} - -/** - * @brief List existing storage - * @param [out] out output parameter to pass list of existing storage. - * @attention Do not treat DDL operations in parallel with DML opeartions. - * create_storage / delete_storage can be processed in parallel. - * At least one of these and find_storage / list_storage cannot work in parallel. - * @return status::OK if it found. - * @return status::WARN_NOT_EXIST if it found no storage. - */ -[[maybe_unused]] static status -list_storages(std::vector>& out) { - return storage::list_storages(out); -} - -/** - * @details It declares that the session starts. In a session defined as between enter and - * leave, it is guaranteed that the heap memory object object read by get function will - * not be released in session. An occupied GC container is assigned. - * @param[out] token If the return value of the function is status::OK, then the token is - * the acquired session. - * @return status::OK success. - * @return status::WARN_MAX_SESSIONS The maximum number of sessions is already up and - * running. - */ -[[maybe_unused]] static status enter(Token& token); // NOLINT - -/** - * @details It declares that the session ends. Values read during the session may be - * invalidated from now on. It will clean up the contents of GC containers that have been - * occupied by this session as much as possible. - * @param[in] token - * @return status::OK success - * @return status::WARN_INVALID_TOKEN @a token of argument is invalid. - */ -[[maybe_unused]] static status leave(Token token); // NOLINT - -/** - * @brief Get value which is corresponding to given @a key_view. - * @tparam ValueType The returned pointer is cast to the given type information before it - * is returned. - * @param[in] storage_name The key_view of storage name. - * @param[in] key_view The key_view of key-value. - * @param[out] out The result about pointer to value and value size. - * @param[out] checked_version The version information at Status::WARN_NOT_EXIST. - * If you set non-nullptr, yakushima write there. If not, yakushima write nothing. - * This is for phantom avoidance. If transaction engine did point read and dind't find entry, - * it can't read verify but if there is a masstree node, it can do node verify. - * @return std::status::OK success - * @return status::WARN_NOT_EXIST The target storage of this operation exists, - * but the target entry of the storage does not exist. - * @return status::WARN_STORAGE_NOT_EXIST The target storage of this operation - * does not exist. - */ -template -[[maybe_unused]] static status -get(std::string_view storage_name, // NOLINT - std::string_view key_view, std::pair& out, - std::pair* checked_version); - -/** - * @biref Put the value with given @a key_view. - * @pre @a token of arguments is valid. - * @tparam ValueType If a single object is inserted, the value size and value alignment - * information can be omitted from this type information. In this case, sizeof and alignof - * are executed on the type information. In the cases where this is likely to cause - * problems and when inserting_deleting an array object, the value size and value - * alignment information should be specified explicitly. This is because sizeof for a type - * represents a single object size. - * @param[in] token todo write - * @param[in] storage_name todo write - * @param[in] key_view The key_view of key-value. - * @param[in] value_ptr The pointer to given value. - * @param[out] created_value_ptr The pointer to created value in yakushima. Default is @a - * nullptr. - * @param[in] arg_value_length The length of value object. Default is @a - * sizeof(ValueType). - * @param[in] value_align The alignment information of value object. Default is @a - * static_cast(alignof(ValueType)). - * @param[in] unique_restriction If this is true, you can't put same key. If you - * update key, you should execute remove and put. - * @param[out] inserted_node_version_ptr The pointer to version of the inserted - * node. It may be used to find out difference of the version between some - * operations. Default is @a nullptr. If split occurs due to this insert, this - * point to old border node. - * @return status::OK success. - * @return status::WARN_UNIQUE_RESTRICTION The key-value whose key is same to given key - * already exists. - * @return status::WARN_STORAGE_NOT_EXIST The target storage of this operation - * does not exist. - */ -template -[[maybe_unused]] static status -put(Token token, std::string_view storage_name, // NOLINT - std::string_view key_view, ValueType* value_ptr, - std::size_t arg_value_length, - ValueType** created_value_ptr, // NOLINT - value_align_type value_align, bool unique_restriction, - node_version64** inserted_node_version_ptr); - -/** - * @pre @a token of arguments is valid. - * @param[in] token - * @param[in] key_view The key_view of key-value. - * @return status::OK success - * @return status::OK_ROOT_IS_NULL No existing tree. - * @return status::OK_NOT_FOUND The target storage exists, but the target - * entry does not exist. - * @return status::WARN_STORAGE_NOT_EXIST The target storage of this operation - * does not exist. - */ -[[maybe_unused]] static status remove(Token token, // NOLINT - std::string_view storage_name, - std::string_view key_view); - -/** - * TODO : add new 3 modes : try-mode : 1 trial : wait-mode : try until success : mid-mode - * : middle between try and wait. - */ -/** - * @brief scan range between @a l_key and @a r_key. - * @tparam ValueType The returned pointer is cast to the given type information - * before it is returned. - * @param[in] l_key An argument that specifies the left endpoint. - * @param[in] l_end If this argument is scan_endpoint::EXCLUSIVE, the interval - * does not include the endpoint. If this argument is scan_endpoint::INCLUSIVE, - * the interval contains the endpoint. If this is scan_endpoint::INF, there is - * no limit on the interval in left direction. And ignore @a l_key. - * @param[in] r_key An argument that specifies the right endpoint. - * @note If r_key -[[maybe_unused]] static status -scan(std::string_view storage_name, std::string_view l_key, // NOLINT - scan_endpoint l_end, std::string_view r_key, scan_endpoint r_end, - std::vector>& tuple_list, - std::vector>* - node_version_vec, - std::size_t max_size); - -} // namespace yakushima +/** + * @file kvs.h + * @brief This is the interface used by outside. + */ + +#pragma once + +#include + +#include "base_node.h" +#include "interface_destroy.h" +#include "interface_display.h" +#include "interface_get.h" +#include "interface_helper.h" +#include "interface_put.h" +#include "interface_remove.h" +#include "interface_scan.h" +#include "storage.h" +#include "storage_impl.h" + +namespace yakushima { + +/** + * @brief Initialize kThreadInfoTable which is a table that holds thread execution + * information about garbage collection and invoke epoch thread. + */ +[[maybe_unused]] static void init(); // NOLINT + +/** + * @brief Delete all tree from the root, release all heap objects, and join epoch thread. + */ +[[maybe_unused]] static void fin(); // NOLINT + +/** + * @brief Display tree information. This is not thread safe. + */ +[[maybe_unused]] static void display(); // NOLINT + +/** + * @brief release all heap objects and clean up. + * @pre This function is called by single thread. + * @return status::OK_DESTROY_ALL destroyed all tree. + * @return status::OK_ROOT_IS_NULL tree was nothing. + */ +[[maybe_unused]] static status destroy(); // NOLINT + +/** + * @param [in] storage_name + * @return The memory usage data of the given storage. + */ +[[maybe_unused]] static memory_usage_stack +mem_usage(std::string_view storage_name); // NOLINT + +/** + * @brief Create storage + * @param [in] storage_name + * @attention Do not treat DDL operations in parallel with DML operations. + * create_storage / delete_storage can be processed in parallel. + * At least one of these and find_storage / list_storage cannot work in parallel. + * @return Same to put function. + */ +[[maybe_unused]] static status create_storage(std::string_view storage_name) { + return storage::create_storage(storage_name); +} + +/** + * @brief Delete existing storage and values under the storage + * @param [in] storage_name + * @attention Do not treat DDL operations in parallel with DML operations. + * create_storage / delete_storage can be processed in parallel. + * At least one of these and find_storage / list_storage cannot work in parallel. + * @return status::OK if successful. + * @return status::WARN_CONCURRENT_OPERATIONS if it can find the storage, + * but This function failed because it was preceded by concurrent delete_storage. + * After this, if create_storage is executed with the same storage name, the storage + * exists, and if it is not executed, the storage should not exist. + * @return status::WARN_NOT_EXIST if the storage was not found. + */ +[[maybe_unused]] static status delete_storage(std::string_view storage_name) { + return storage::delete_storage(storage_name); +} + +/** + * @brief Find existing storage + * @param [in] storage_name + * @param [out] found_storage output parameter to pass tree_instance information. If this + * is nullptr (by default argument), this function simply note the existence of target. + * @attention Do not treat DDL operations in parallel with DML operations. + * create_storage / delete_storage can be processed in parallel. + * At least one of these and find_storage / list_storage cannot work in parallel. + * @return status::OK if existence. + * @return status::WARN_NOT_EXIST if not existence. + */ +[[maybe_unused]] static status +find_storage(std::string_view storage_name, + tree_instance** found_storage = nullptr) { + return storage::find_storage(storage_name, found_storage); +} + +/** + * @brief List existing storage + * @param [out] out output parameter to pass list of existing storage. + * @attention Do not treat DDL operations in parallel with DML opeartions. + * create_storage / delete_storage can be processed in parallel. + * At least one of these and find_storage / list_storage cannot work in parallel. + * @return status::OK if it found. + * @return status::WARN_NOT_EXIST if it found no storage. + */ +[[maybe_unused]] static status +list_storages(std::vector>& out) { + return storage::list_storages(out); +} + +/** + * @details It declares that the session starts. In a session defined as between enter and + * leave, it is guaranteed that the heap memory object object read by get function will + * not be released in session. An occupied GC container is assigned. + * @param[out] token If the return value of the function is status::OK, then the token is + * the acquired session. + * @return status::OK success. + * @return status::WARN_MAX_SESSIONS The maximum number of sessions is already up and + * running. + */ +[[maybe_unused]] static status enter(Token& token); // NOLINT + +/** + * @details It declares that the session ends. Values read during the session may be + * invalidated from now on. It will clean up the contents of GC containers that have been + * occupied by this session as much as possible. + * @param[in] token + * @return status::OK success + * @return status::WARN_INVALID_TOKEN @a token of argument is invalid. + */ +[[maybe_unused]] static status leave(Token token); // NOLINT + +/** + * @brief Get value which is corresponding to given @a key_view. + * @tparam ValueType The returned pointer is cast to the given type information before it + * is returned. + * @param[in] storage_name The key_view of storage name. + * @param[in] key_view The key_view of key-value. + * @param[out] out The result about pointer to value and value size. + * @param[out] checked_version The version information at Status::WARN_NOT_EXIST. + * If you set non-nullptr, yakushima write there. If not, yakushima write nothing. + * This is for phantom avoidance. If transaction engine did point read and dind't find entry, + * it can't read verify but if there is a masstree node, it can do node verify. + * @return std::status::OK success + * @return status::WARN_NOT_EXIST The target storage of this operation exists, + * but the target entry of the storage does not exist. + * @return status::WARN_STORAGE_NOT_EXIST The target storage of this operation + * does not exist. + */ +template +[[maybe_unused]] static status +get(std::string_view storage_name, // NOLINT + std::string_view key_view, std::pair& out, + std::pair* checked_version); + +/** + * @biref Put the value with given @a key_view. + * @pre @a token of arguments is valid. + * @tparam ValueType If a single object is inserted, the value size and value alignment + * information can be omitted from this type information. In this case, sizeof and alignof + * are executed on the type information. In the cases where this is likely to cause + * problems and when inserting_deleting an array object, the value size and value + * alignment information should be specified explicitly. This is because sizeof for a type + * represents a single object size. + * @param[in] token todo write + * @param[in] storage_name todo write + * @param[in] key_view The key_view of key-value. + * @param[in] value_ptr The pointer to given value. + * @param[out] created_value_ptr The pointer to created value in yakushima. Default is @a + * nullptr. + * @param[in] arg_value_length The length of value object. Default is @a + * sizeof(ValueType). + * @param[in] value_align The alignment information of value object. Default is @a + * static_cast(alignof(ValueType)). + * @param[in] unique_restriction If this is true, you can't put same key. If you + * update key, you should execute remove and put. + * @param[out] inserted_node_version_ptr The pointer to version of the inserted + * node. It may be used to find out difference of the version between some + * operations. Default is @a nullptr. If split occurs due to this insert, this + * point to old border node. + * @return status::OK success. + * @return status::WARN_UNIQUE_RESTRICTION The key-value whose key is same to given key + * already exists. + * @return status::WARN_STORAGE_NOT_EXIST The target storage of this operation + * does not exist. + */ +template +[[maybe_unused]] static status +put(Token token, std::string_view storage_name, // NOLINT + std::string_view key_view, ValueType* value_ptr, + std::size_t arg_value_length, + ValueType** created_value_ptr, // NOLINT + value_align_type value_align, bool unique_restriction, + node_version64** inserted_node_version_ptr); + +/** + * @pre @a token of arguments is valid. + * @param[in] token + * @param[in] key_view The key_view of key-value. + * @return status::OK success + * @return status::OK_ROOT_IS_NULL No existing tree. + * @return status::OK_NOT_FOUND The target storage exists, but the target + * entry does not exist. + * @return status::WARN_STORAGE_NOT_EXIST The target storage of this operation + * does not exist. + */ +[[maybe_unused]] static status remove(Token token, // NOLINT + std::string_view storage_name, + std::string_view key_view); + +/** + * TODO : add new 3 modes : try-mode : 1 trial : wait-mode : try until success : mid-mode + * : middle between try and wait. + */ +/** + * @brief scan range between @a l_key and @a r_key. + * @tparam ValueType The returned pointer is cast to the given type information + * before it is returned. + * @param[in] l_key An argument that specifies the left endpoint. + * @param[in] l_end If this argument is scan_endpoint::EXCLUSIVE, the interval + * does not include the endpoint. If this argument is scan_endpoint::INCLUSIVE, + * the interval contains the endpoint. If this is scan_endpoint::INF, there is + * no limit on the interval in left direction. And ignore @a l_key. + * @param[in] r_key An argument that specifies the right endpoint. + * @note If r_key +[[maybe_unused]] static status +scan(std::string_view storage_name, std::string_view l_key, // NOLINT + scan_endpoint l_end, std::string_view r_key, scan_endpoint r_end, + std::vector>& tuple_list, + std::vector>* + node_version_vec, + std::size_t max_size); + +} // namespace yakushima diff --git a/include/link_or_value.h b/include/link_or_value.h index f422618..bc3172c 100644 --- a/include/link_or_value.h +++ b/include/link_or_value.h @@ -1,197 +1,197 @@ -/** - * @file link_or_value.h - */ - -#pragma once - -#include "atomic_wrapper.h" -#include "base_node.h" -#include "cpu.h" -#include "log.h" -#include "value.h" - -#include -#include - -#include "glog/logging.h" - -namespace yakushima { - -class link_or_value { -public: - link_or_value() = default; - - link_or_value(const link_or_value&) = default; - - link_or_value(link_or_value&&) = default; - - link_or_value& operator=(const link_or_value&) = default; - - link_or_value& operator=(link_or_value&&) = default; - - ~link_or_value() = default; - - /** - * @details release heap objects. - */ - void destroy() { - if (auto* child = get_next_layer(); child != nullptr) { - child->destroy(); - delete child; // NOLINT - } else if (auto* v = get_value(); v != nullptr) { - if (value::need_delete(v)) { value::delete_value(v); } - } - init_lv(); - } - - /** - * @details display function for analysis and debug. - */ - void display() const { - if (auto* child = get_next_layer(); child != nullptr) { - std::cout << "need_delete_value_ : " << false << std::endl; - std::cout << "next_layer_ : " << child << std::endl; - std::cout << "v_or_vp_ : " << nullptr << std::endl; - std::cout << "value_length_ : " << 0 << std::endl; - std::cout << "value_align_ : " << 0 << std::endl; - } else if (auto* v = get_value(); v != nullptr) { - const auto del_flag = value::need_delete(v); - const auto v_align = static_cast( - std::get<2>(value::get_gc_info(v))); - std::cout << "need_delete_value_ : " << del_flag << std::endl; - std::cout << "next_layer_ : " << nullptr << std::endl; - std::cout << "v_or_vp_ : " << value::get_body(v) << std::endl; - std::cout << "value_length_ : " << value::get_len(v) << std::endl; - std::cout << "value_align_ : " << v_align << std::endl; - } - } - - /** - * @brief Collect the memory usage of this record. - * - * @param[in] level The level of this node in the tree. - * @param[in,out] mem_stat The stack of memory usage for each level. - */ - void mem_usage(std::size_t level, memory_usage_stack& mem_stat) const { - if (auto* child = get_next_layer(); child != nullptr) { - child->mem_usage(level + 1, mem_stat); - } else if (auto* v = get_value(); v != nullptr) { - const auto v_len = std::get<1>(value::get_gc_info(v)); - auto& [node_num, used, reserved] = mem_stat.at(level); - used += v_len; - reserved += v_len; - } - } - - [[maybe_unused]] [[nodiscard]] const std::type_info* get_lv_type() const { - if (get_next_layer() != nullptr) { return &typeid(base_node*); } - if (get_value() != nullptr) { return &typeid(value*); } - return &typeid(nullptr); - } - - /** - * @brief Get the root node of the next layer. - * - * Note that this function uses the atomic operation (i.e., load) for dealing with - * concurrent modifications. - * - * @retval The root node of the next layer if exists. - * @retval nullptr otherwise. - */ - [[nodiscard]] base_node* get_next_layer() const { - const auto ptr = loadAcquireN(child_or_v_); - if ((ptr & kChildFlag) == 0) { return nullptr; } - return reinterpret_cast(ptr & ~kChildFlag); // NOLINT - } - - /** - * @brief Get the value pointer. - * - * Note that this function uses the atomic operation (i.e., load) for dealing with - * concurrent modifications. - * - * @retval The pointer of the contained value if exists. - * @retval nullptr otherwise. - */ - [[nodiscard]] value* get_value() const { - const auto ptr = loadAcquireN(child_or_v_); - if ((ptr & kChildFlag) > 0 || ptr == kValPtrFlag) { return nullptr; } - return reinterpret_cast(ptr); // NOLINT - } - - /** - * @brief Initialize the payload to zero. - * - */ - void init_lv() { child_or_v_ = kValPtrFlag; } - - /** - * @details This is move process. - * @param nlv - */ - void set(link_or_value* const nlv) { - /** - * This object in this function is not accessed concurrently, so it can copy assign. - */ - *this = *nlv; - } - - /** - * @brief todo : write documents much. - * @param[in] new_value todo write - * @param[out] created_value_ptr todo write - * @param[out] old_value todo write - */ - void set_value(value* new_value, void** const created_value_ptr, - value** old_value = nullptr) { - auto* cur_v = get_value(); - if (cur_v != nullptr && value::need_delete(cur_v)) { - if (old_value == nullptr) { - value::delete_value(cur_v); - } else { - *old_value = cur_v; - } - } - - // store the given value - const auto ptr = reinterpret_cast(new_value); // NOLINT - storeReleaseN(child_or_v_, ptr); - if (created_value_ptr != nullptr) { - auto* v_ptr = reinterpret_cast(child_or_v_); // NOLINT - *created_value_ptr = value::get_body(v_ptr); - } - } - - /** - * @pre This function called at initialization. - * @param[in] new_next_layer - */ - void set_next_layer(base_node* const new_next_layer) { - auto ptr = reinterpret_cast(new_next_layer); // NOLINT - storeReleaseN(child_or_v_, ptr | kChildFlag); - } - -private: - /** - * @brief A flag for indicating that the next layer exists. - * - */ - static constexpr uintptr_t kChildFlag = 0b10UL << 62UL; - - /** - * @brief A flag for indicating that the next layer exists. - * - */ - static constexpr uintptr_t kValPtrFlag = 0b01UL << 62UL; - - /** - * @attention - * This variable is read/write concurrently. - * If all the bits are zeros, this does not have any data. - * If the most significant bit is one, this contains the next layer. - * Otherwise, this contains the pointer of a value. - */ - uintptr_t child_or_v_{kValPtrFlag}; -}; - +/** + * @file link_or_value.h + */ + +#pragma once + +#include "atomic_wrapper.h" +#include "base_node.h" +#include "cpu.h" +#include "log.h" +#include "value.h" + +#include +#include + +#include "glog/logging.h" + +namespace yakushima { + +class link_or_value { +public: + link_or_value() = default; + + link_or_value(const link_or_value&) = default; + + link_or_value(link_or_value&&) = default; + + link_or_value& operator=(const link_or_value&) = default; + + link_or_value& operator=(link_or_value&&) = default; + + ~link_or_value() = default; + + /** + * @details release heap objects. + */ + void destroy() { + if (auto* child = get_next_layer(); child != nullptr) { + child->destroy(); + delete child; // NOLINT + } else if (auto* v = get_value(); v != nullptr) { + if (value::need_delete(v)) { value::delete_value(v); } + } + init_lv(); + } + + /** + * @details display function for analysis and debug. + */ + void display() const { + if (auto* child = get_next_layer(); child != nullptr) { + std::cout << "need_delete_value_ : " << false << std::endl; + std::cout << "next_layer_ : " << child << std::endl; + std::cout << "v_or_vp_ : " << nullptr << std::endl; + std::cout << "value_length_ : " << 0 << std::endl; + std::cout << "value_align_ : " << 0 << std::endl; + } else if (auto* v = get_value(); v != nullptr) { + const auto del_flag = value::need_delete(v); + const auto v_align = static_cast( + std::get<2>(value::get_gc_info(v))); + std::cout << "need_delete_value_ : " << del_flag << std::endl; + std::cout << "next_layer_ : " << nullptr << std::endl; + std::cout << "v_or_vp_ : " << value::get_body(v) << std::endl; + std::cout << "value_length_ : " << value::get_len(v) << std::endl; + std::cout << "value_align_ : " << v_align << std::endl; + } + } + + /** + * @brief Collect the memory usage of this record. + * + * @param[in] level The level of this node in the tree. + * @param[in,out] mem_stat The stack of memory usage for each level. + */ + void mem_usage(std::size_t level, memory_usage_stack& mem_stat) const { + if (auto* child = get_next_layer(); child != nullptr) { + child->mem_usage(level + 1, mem_stat); + } else if (auto* v = get_value(); v != nullptr) { + const auto v_len = std::get<1>(value::get_gc_info(v)); + auto& [node_num, used, reserved] = mem_stat.at(level); + used += v_len; + reserved += v_len; + } + } + + [[maybe_unused]] [[nodiscard]] const std::type_info* get_lv_type() const { + if (get_next_layer() != nullptr) { return &typeid(base_node*); } + if (get_value() != nullptr) { return &typeid(value*); } + return &typeid(nullptr); + } + + /** + * @brief Get the root node of the next layer. + * + * Note that this function uses the atomic operation (i.e., load) for dealing with + * concurrent modifications. + * + * @retval The root node of the next layer if exists. + * @retval nullptr otherwise. + */ + [[nodiscard]] base_node* get_next_layer() const { + const auto ptr = loadAcquireN(child_or_v_); + if ((ptr & kChildFlag) == 0) { return nullptr; } + return reinterpret_cast(ptr & ~kChildFlag); // NOLINT + } + + /** + * @brief Get the value pointer. + * + * Note that this function uses the atomic operation (i.e., load) for dealing with + * concurrent modifications. + * + * @retval The pointer of the contained value if exists. + * @retval nullptr otherwise. + */ + [[nodiscard]] value* get_value() const { + const auto ptr = loadAcquireN(child_or_v_); + if ((ptr & kChildFlag) > 0 || ptr == kValPtrFlag) { return nullptr; } + return reinterpret_cast(ptr); // NOLINT + } + + /** + * @brief Initialize the payload to zero. + * + */ + void init_lv() { child_or_v_ = kValPtrFlag; } + + /** + * @details This is move process. + * @param nlv + */ + void set(link_or_value* const nlv) { + /** + * This object in this function is not accessed concurrently, so it can copy assign. + */ + *this = *nlv; + } + + /** + * @brief todo : write documents much. + * @param[in] new_value todo write + * @param[out] created_value_ptr todo write + * @param[out] old_value todo write + */ + void set_value(value* new_value, void** const created_value_ptr, + value** old_value = nullptr) { + auto* cur_v = get_value(); + if (cur_v != nullptr && value::need_delete(cur_v)) { + if (old_value == nullptr) { + value::delete_value(cur_v); + } else { + *old_value = cur_v; + } + } + + // store the given value + const auto ptr = reinterpret_cast(new_value); // NOLINT + storeReleaseN(child_or_v_, ptr); + if (created_value_ptr != nullptr) { + auto* v_ptr = reinterpret_cast(child_or_v_); // NOLINT + *created_value_ptr = value::get_body(v_ptr); + } + } + + /** + * @pre This function called at initialization. + * @param[in] new_next_layer + */ + void set_next_layer(base_node* const new_next_layer) { + auto ptr = reinterpret_cast(new_next_layer); // NOLINT + storeReleaseN(child_or_v_, ptr | kChildFlag); + } + +private: + /** + * @brief A flag for indicating that the next layer exists. + * + */ + static constexpr uintptr_t kChildFlag = 0b10UL << 62UL; + + /** + * @brief A flag for indicating that the next layer exists. + * + */ + static constexpr uintptr_t kValPtrFlag = 0b01UL << 62UL; + + /** + * @attention + * This variable is read/write concurrently. + * If all the bits are zeros, this does not have any data. + * If the most significant bit is one, this contains the next layer. + * Otherwise, this contains the pointer of a value. + */ + uintptr_t child_or_v_{kValPtrFlag}; +}; + } // namespace yakushima \ No newline at end of file diff --git a/include/manager_thread.h b/include/manager_thread.h index ca87769..955cebf 100644 --- a/include/manager_thread.h +++ b/include/manager_thread.h @@ -1,101 +1,101 @@ -/** - * @file manager_thread.h - */ - -#pragma once - -#include "border_node.h" -#include "config.h" -#include "garbage_collection.h" -#include "interior_node.h" -#include "thread_info_table.h" -#include -#include - -namespace yakushima { - -class epoch_manager { -public: - static void epoch_thread() { - for (;;) { - sleepMs(YAKUSHIMA_EPOCH_TIME); - for (;;) { - Epoch cur_epoch = epoch_management::get_epoch(); - bool verify{true}; - for (auto&& elem : thread_info_table::get_thread_info_table()) { - Epoch check_epoch = elem.get_begin_epoch(); - if (check_epoch != 0 && check_epoch != cur_epoch) { - verify = false; - break; - } - } - if (verify) break; - sleepMs(1); - /** - * Suppose the user misuses and calls fin () without leave (token). - * When the calculation process in this loop is executed, - * there is no way to escape from the loop, so the following line is required. - */ - if (kEpochThreadEnd.load(std::memory_order_acquire)) break; - } - epoch_management::epoch_inc(); - - /** - * attention : type of epoch is uint64_t - */ - Epoch min_epoch(UINT64_MAX); - for (auto&& elem : thread_info_table::get_thread_info_table()) { - Epoch itr_epoch = elem.get_begin_epoch(); - if (itr_epoch != 0) { - /** - * itr_epoch is valid. - */ - min_epoch = std::min(min_epoch, itr_epoch); - } - } - if (min_epoch != UINT64_MAX) { - garbage_collection::set_gc_epoch(min_epoch - 1); - } else { - garbage_collection::set_gc_epoch(epoch_management::get_epoch() - - 1); - } - if (kEpochThreadEnd.load(std::memory_order_acquire)) { break; } - } - } - - static void gc_thread() { - for (;;) { - sleepMs(YAKUSHIMA_EPOCH_TIME); - thread_info_table::gc(); - if (kGCThreadEnd.load(std::memory_order_acquire)) { break; } - } - } - - static void invoke_epoch_thread() { - kEpochThread = std::thread(epoch_thread); - } - - static void invoke_gc_thread() { kGCThread = std::thread(gc_thread); } - - static void join_epoch_thread() { kEpochThread.join(); } - - static void join_gc_thread() { kGCThread.join(); } - - static void set_epoch_thread_end() { - kEpochThreadEnd.store(true, std::memory_order_release); - } - - static void set_gc_thread_end() { - kGCThreadEnd.store(true, std::memory_order_release); - } - -private: - alignas(CACHE_LINE_SIZE) static inline std::atomic // NOLINT - kEpochThreadEnd{false}; // NOLINT : can't become constexpr - static inline std::thread kEpochThread; // NOLINT : can't become constexpr - alignas(CACHE_LINE_SIZE) static inline std::atomic // NOLINT - kGCThreadEnd{false}; // NOLINT : can't become constexpr - static inline std::thread kGCThread; // NOLINT : can't become constexpr -}; - +/** + * @file manager_thread.h + */ + +#pragma once + +#include "border_node.h" +#include "config.h" +#include "garbage_collection.h" +#include "interior_node.h" +#include "thread_info_table.h" +#include +#include + +namespace yakushima { + +class epoch_manager { +public: + static void epoch_thread() { + for (;;) { + sleepMs(YAKUSHIMA_EPOCH_TIME); + for (;;) { + Epoch cur_epoch = epoch_management::get_epoch(); + bool verify{true}; + for (auto&& elem : thread_info_table::get_thread_info_table()) { + Epoch check_epoch = elem.get_begin_epoch(); + if (check_epoch != 0 && check_epoch != cur_epoch) { + verify = false; + break; + } + } + if (verify) break; + sleepMs(1); + /** + * Suppose the user misuses and calls fin () without leave (token). + * When the calculation process in this loop is executed, + * there is no way to escape from the loop, so the following line is required. + */ + if (kEpochThreadEnd.load(std::memory_order_acquire)) break; + } + epoch_management::epoch_inc(); + + /** + * attention : type of epoch is uint64_t + */ + Epoch min_epoch(UINT64_MAX); + for (auto&& elem : thread_info_table::get_thread_info_table()) { + Epoch itr_epoch = elem.get_begin_epoch(); + if (itr_epoch != 0) { + /** + * itr_epoch is valid. + */ + min_epoch = std::min(min_epoch, itr_epoch); + } + } + if (min_epoch != UINT64_MAX) { + garbage_collection::set_gc_epoch(min_epoch - 1); + } else { + garbage_collection::set_gc_epoch(epoch_management::get_epoch() - + 1); + } + if (kEpochThreadEnd.load(std::memory_order_acquire)) { break; } + } + } + + static void gc_thread() { + for (;;) { + sleepMs(YAKUSHIMA_EPOCH_TIME); + thread_info_table::gc(); + if (kGCThreadEnd.load(std::memory_order_acquire)) { break; } + } + } + + static void invoke_epoch_thread() { + kEpochThread = std::thread(epoch_thread); + } + + static void invoke_gc_thread() { kGCThread = std::thread(gc_thread); } + + static void join_epoch_thread() { kEpochThread.join(); } + + static void join_gc_thread() { kGCThread.join(); } + + static void set_epoch_thread_end() { + kEpochThreadEnd.store(true, std::memory_order_release); + } + + static void set_gc_thread_end() { + kGCThreadEnd.store(true, std::memory_order_release); + } + +private: + alignas(CACHE_LINE_SIZE) static inline std::atomic // NOLINT + kEpochThreadEnd{false}; // NOLINT : can't become constexpr + static inline std::thread kEpochThread; // NOLINT : can't become constexpr + alignas(CACHE_LINE_SIZE) static inline std::atomic // NOLINT + kGCThreadEnd{false}; // NOLINT : can't become constexpr + static inline std::thread kGCThread; // NOLINT : can't become constexpr +}; + } // namespace yakushima \ No newline at end of file diff --git a/include/permutation.h b/include/permutation.h index 72320d1..e8db065 100644 --- a/include/permutation.h +++ b/include/permutation.h @@ -1,240 +1,240 @@ -/** - * @file permutation.h - * @brief permutation which expresses the key order by index inside of the border node. - */ - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "base_node.h" -#include "log.h" -#include "scheme.h" - -#include "glog/logging.h" - -namespace yakushima { - -class permutation { -public: - /** - * cnk ... current number of keys. - */ - static constexpr std::size_t cnk_mask = 0b1111; - static constexpr std::size_t cnk_bit_size = 4; // bits - static constexpr std::size_t pkey_bit_size = - 4; // bits, permutation key size. - - permutation() : body_{} {} - - explicit permutation(const std::uint64_t body) : body_{body} {} - - /** - * @brief decrement key number. - */ - void dec_key_num() { - std::uint64_t per_body(body_.load(std::memory_order_acquire)); - // decrement key number - std::size_t cnk = per_body & cnk_mask; - --cnk; - per_body &= ~cnk_mask; - per_body |= cnk; - body_.store(per_body, std::memory_order_release); - } - - void delete_rank(std::size_t rank) { - // layout : left delete_target right cnk - std::uint64_t left{}; - std::uint64_t cnk = get_cnk(); - if (rank == cnk - 1 || rank == key_slice_length - 1) { - left = 0; - } else { - left = get_body(); - left = (left >> (pkey_bit_size * (rank + 2))) - << (pkey_bit_size * (rank + 1)); - } - std::uint64_t right = get_body(); - if (rank == 0) { - right = 0; - } else { - right = (right << (pkey_bit_size * (key_slice_length - rank))) >> - (pkey_bit_size * (key_slice_length - rank)); - } - std::uint64_t final = left | right; - final &= ~cnk_mask; - final |= cnk; - set_body(final); - } - - void display() const { - std::bitset<64> bs{get_body()}; - std::cout << " perm : " << bs << std::endl; - } - - [[nodiscard]] std::size_t get_empty_slot() const { - std::uint64_t per_body(body_.load(std::memory_order_acquire)); - std::size_t cnk = per_body & cnk_mask; - if (cnk == 0) { return 0; } - std::bitset<15> bs{}; - bs.reset(); - for (std::size_t i = 0; i < cnk; ++i) { - per_body = per_body >> cnk_bit_size; - bs.set(per_body & cnk_mask); - } - for (std::size_t i = 0; i < 15; ++i) { - if (!bs.test(i)) { return i; } - } - LOG(ERROR) << log_location_prefix << "programming error"; - return 0; - } - - [[nodiscard]] std::uint64_t get_body() const { - return body_.load(std::memory_order_acquire); - } - - [[nodiscard]] std::uint8_t get_cnk() const { - std::uint64_t per_body(body_.load(std::memory_order_acquire)); - return static_cast(per_body & cnk_mask); - } - - [[nodiscard]] std::size_t get_lowest_key_pos() const { - std::uint64_t per = get_body(); - per = per >> cnk_bit_size; - return per & cnk_mask; - } - - [[nodiscard]] std::size_t get_index_of_rank(const std::size_t rank) const { - std::uint64_t per = get_body(); - per = per >> cnk_bit_size; - if (rank != 0) { per = per >> (pkey_bit_size * rank); } - return per & cnk_mask; - } - - /** - * @brief increment key number. - */ - void inc_key_num() { - std::uint64_t per_body(body_.load(std::memory_order_acquire)); - // increment key number - std::size_t cnk = per_body & cnk_mask; -#ifndef NDEBUG - if (cnk >= pow(2, cnk_bit_size) - 1) { // NOLINT - LOG(ERROR) << log_location_prefix; - } -#endif - ++cnk; - per_body &= ~cnk_mask; - per_body |= cnk; - body_.store(per_body, std::memory_order_release); - } - - void init() { body_.store(0, std::memory_order_release); } - - void insert(std::size_t rank, std::size_t pos) { - // bit layout : left target right cnk - std::uint64_t cnk = get_cnk(); - std::uint64_t target = pos << (pkey_bit_size * (rank + 1)); - std::uint64_t left{}; - if (rank == cnk - 1) { - left = 0; - } else { - left = get_body(); - left = (left >> (pkey_bit_size * (rank + 1))) - << (pkey_bit_size * (rank + 2)); - } - std::uint64_t right{}; - if (rank == 0) { - right = 0; - } else { - right = get_body(); - right = (right << (pkey_bit_size * (key_slice_length - rank))) >> - (pkey_bit_size * (key_slice_length - rank)); - } - std::uint64_t final = left | target | right; - final &= ~cnk_mask; - final |= cnk; - set_body(final); - } - - /** - * @brief for split - */ - void split_dest(std::size_t num) { - std::uint64_t body{0}; - for (std::size_t i = 1; i < num; ++i) { - body |= i << (pkey_bit_size * (i + 1)); - } - body |= num; - set_body(body); - } - - /** - * @pre @a key_slice and @a key_length is array whose size is equal or less than cnk of - * permutation. If it ignores, it may occur seg-v error. - * @param key_slice - * @param key_length - */ - void - rearrange(const std::array& key_slice, - const std::array& key_length) { - std::uint64_t per_body(body_.load(std::memory_order_acquire)); - // get current number of keys - auto cnk = static_cast(per_body & cnk_mask); - - // tuple - constexpr std::size_t key_pos = 1; - std::array, - key_slice_length> - ar; - for (std::uint8_t i = 0; i < cnk; ++i) { - ar.at(i) = {{key_slice.at(i), key_length.at(i)}, i}; - } - /** - * sort based on key_slice and key_length for dictionary order. - * So - */ - std::sort(ar.begin(), ar.begin() + cnk); - - // rearrange - std::uint64_t new_body(0); - for (auto itr = ar.rbegin() + (key_slice_length - cnk); // NOLINT - itr != ar.rend(); ++itr) { // NOLINT : order to use "auto *itr" - new_body |= std::get(*itr); - new_body <<= pkey_bit_size; - } - new_body |= cnk; - body_.store(new_body, std::memory_order_release); - } - - void set_body(const std::uint64_t nb) { - body_.store(nb, std::memory_order_release); - } - - status set_cnk(const std::uint8_t cnk) { -#ifndef NDEBUG - if (powl(2, cnk_bit_size) <= cnk) { - LOG(ERROR) << log_location_prefix << "unreachable path"; - } -#endif - std::uint64_t body = body_.load(std::memory_order_acquire); - body &= ~cnk_mask; - body |= cnk; - body_.store(body, std::memory_order_release); - return status::OK; - } - -private: - std::atomic body_; -}; - +/** + * @file permutation.h + * @brief permutation which expresses the key order by index inside of the border node. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "base_node.h" +#include "log.h" +#include "scheme.h" + +#include "glog/logging.h" + +namespace yakushima { + +class permutation { +public: + /** + * cnk ... current number of keys. + */ + static constexpr std::size_t cnk_mask = 0b1111; + static constexpr std::size_t cnk_bit_size = 4; // bits + static constexpr std::size_t pkey_bit_size = + 4; // bits, permutation key size. + + permutation() : body_{} {} + + explicit permutation(const std::uint64_t body) : body_{body} {} + + /** + * @brief decrement key number. + */ + void dec_key_num() { + std::uint64_t per_body(body_.load(std::memory_order_acquire)); + // decrement key number + std::size_t cnk = per_body & cnk_mask; + --cnk; + per_body &= ~cnk_mask; + per_body |= cnk; + body_.store(per_body, std::memory_order_release); + } + + void delete_rank(std::size_t rank) { + // layout : left delete_target right cnk + std::uint64_t left{}; + std::uint64_t cnk = get_cnk(); + if (rank == cnk - 1 || rank == key_slice_length - 1) { + left = 0; + } else { + left = get_body(); + left = (left >> (pkey_bit_size * (rank + 2))) + << (pkey_bit_size * (rank + 1)); + } + std::uint64_t right = get_body(); + if (rank == 0) { + right = 0; + } else { + right = (right << (pkey_bit_size * (key_slice_length - rank))) >> + (pkey_bit_size * (key_slice_length - rank)); + } + std::uint64_t final = left | right; + final &= ~cnk_mask; + final |= cnk; + set_body(final); + } + + void display() const { + std::bitset<64> bs{get_body()}; + std::cout << " perm : " << bs << std::endl; + } + + [[nodiscard]] std::size_t get_empty_slot() const { + std::uint64_t per_body(body_.load(std::memory_order_acquire)); + std::size_t cnk = per_body & cnk_mask; + if (cnk == 0) { return 0; } + std::bitset<15> bs{}; + bs.reset(); + for (std::size_t i = 0; i < cnk; ++i) { + per_body = per_body >> cnk_bit_size; + bs.set(per_body & cnk_mask); + } + for (std::size_t i = 0; i < 15; ++i) { + if (!bs.test(i)) { return i; } + } + LOG(ERROR) << log_location_prefix << "programming error"; + return 0; + } + + [[nodiscard]] std::uint64_t get_body() const { + return body_.load(std::memory_order_acquire); + } + + [[nodiscard]] std::uint8_t get_cnk() const { + std::uint64_t per_body(body_.load(std::memory_order_acquire)); + return static_cast(per_body & cnk_mask); + } + + [[nodiscard]] std::size_t get_lowest_key_pos() const { + std::uint64_t per = get_body(); + per = per >> cnk_bit_size; + return per & cnk_mask; + } + + [[nodiscard]] std::size_t get_index_of_rank(const std::size_t rank) const { + std::uint64_t per = get_body(); + per = per >> cnk_bit_size; + if (rank != 0) { per = per >> (pkey_bit_size * rank); } + return per & cnk_mask; + } + + /** + * @brief increment key number. + */ + void inc_key_num() { + std::uint64_t per_body(body_.load(std::memory_order_acquire)); + // increment key number + std::size_t cnk = per_body & cnk_mask; +#ifndef NDEBUG + if (cnk >= pow(2, cnk_bit_size) - 1) { // NOLINT + LOG(ERROR) << log_location_prefix; + } +#endif + ++cnk; + per_body &= ~cnk_mask; + per_body |= cnk; + body_.store(per_body, std::memory_order_release); + } + + void init() { body_.store(0, std::memory_order_release); } + + void insert(std::size_t rank, std::size_t pos) { + // bit layout : left target right cnk + std::uint64_t cnk = get_cnk(); + std::uint64_t target = pos << (pkey_bit_size * (rank + 1)); + std::uint64_t left{}; + if (rank == cnk - 1) { + left = 0; + } else { + left = get_body(); + left = (left >> (pkey_bit_size * (rank + 1))) + << (pkey_bit_size * (rank + 2)); + } + std::uint64_t right{}; + if (rank == 0) { + right = 0; + } else { + right = get_body(); + right = (right << (pkey_bit_size * (key_slice_length - rank))) >> + (pkey_bit_size * (key_slice_length - rank)); + } + std::uint64_t final = left | target | right; + final &= ~cnk_mask; + final |= cnk; + set_body(final); + } + + /** + * @brief for split + */ + void split_dest(std::size_t num) { + std::uint64_t body{0}; + for (std::size_t i = 1; i < num; ++i) { + body |= i << (pkey_bit_size * (i + 1)); + } + body |= num; + set_body(body); + } + + /** + * @pre @a key_slice and @a key_length is array whose size is equal or less than cnk of + * permutation. If it ignores, it may occur seg-v error. + * @param key_slice + * @param key_length + */ + void + rearrange(const std::array& key_slice, + const std::array& key_length) { + std::uint64_t per_body(body_.load(std::memory_order_acquire)); + // get current number of keys + auto cnk = static_cast(per_body & cnk_mask); + + // tuple + constexpr std::size_t key_pos = 1; + std::array, + key_slice_length> + ar; + for (std::uint8_t i = 0; i < cnk; ++i) { + ar.at(i) = {{key_slice.at(i), key_length.at(i)}, i}; + } + /** + * sort based on key_slice and key_length for dictionary order. + * So + */ + std::sort(ar.begin(), ar.begin() + cnk); + + // rearrange + std::uint64_t new_body(0); + for (auto itr = ar.rbegin() + (key_slice_length - cnk); // NOLINT + itr != ar.rend(); ++itr) { // NOLINT : order to use "auto *itr" + new_body |= std::get(*itr); + new_body <<= pkey_bit_size; + } + new_body |= cnk; + body_.store(new_body, std::memory_order_release); + } + + void set_body(const std::uint64_t nb) { + body_.store(nb, std::memory_order_release); + } + + status set_cnk(const std::uint8_t cnk) { +#ifndef NDEBUG + if (powl(2, cnk_bit_size) <= cnk) { + LOG(ERROR) << log_location_prefix << "unreachable path"; + } +#endif + std::uint64_t body = body_.load(std::memory_order_acquire); + body &= ~cnk_mask; + body |= cnk; + body_.store(body, std::memory_order_release); + return status::OK; + } + +private: + std::atomic body_; +}; + } // namespace yakushima \ No newline at end of file diff --git a/include/scan_helper.h b/include/scan_helper.h index 8ce8242..943988f 100644 --- a/include/scan_helper.h +++ b/include/scan_helper.h @@ -1,440 +1,440 @@ -/** - * @file scan_helper.h - */ - -#pragma once - -#include "base_node.h" -#include "border_node.h" -#include "common_helper.h" -#include "interior_node.h" -#include "scheme.h" - -namespace yakushima { - -// forward declaration -template -static status -scan_border(border_node** target, std::string_view l_key, scan_endpoint l_end, - std::string_view r_key, scan_endpoint r_end, - std::vector>& - tuple_list, - node_version64_body& v_at_fb, - std::vector>* node_version_vec, - const std::string& key_prefix, std::size_t max_size); - -inline status scan_check_retry(border_node* const bn, - node_version64_body& v_at_fb) { - node_version64_body check = bn->get_stable_version(); - if (check != v_at_fb) { - // fail optimistic verify - if (check.get_vsplit() != v_at_fb.get_vsplit() || check.get_deleted()) { - /** - * The node at find border was changed by split or deleted. - */ - return status::OK_RETRY_FROM_ROOT; - } - /** - * The structure of the border node was not changed. - * So reading border node can retry from that. - */ - v_at_fb = check; - return status::OK_RETRY_AFTER_FB; - } - return status::OK; -} - -/** - * scan for some try nodes which is not root. -*/ -template -static status -scan(base_node* const root, const std::string_view l_key, - const scan_endpoint l_end, const std::string_view r_key, - const scan_endpoint r_end, - std::vector>& tuple_list, - std::vector>* const - node_version_vec, - const std::string& key_prefix, const std::size_t max_size) { - /** - * Log size before scanning this node. - * This must be before retry label for retry at find border. - */ - std::size_t initial_size_of_tuple_list{tuple_list.size()}; - std::size_t initial_size_of_node_version_vec{}; - if (node_version_vec != nullptr) { - initial_size_of_node_version_vec = node_version_vec->size(); - } - - /** - * For retry of failing optimistic verify, it must erase parts of - * tuple_list and node vec. clear between initial_size... and current size. - * about tuple_list. - */ - auto clean_up_tuple_list_nvc = [&tuple_list, - &node_version_vec](std::size_t isoftl, - std::size_t isonvv) { - if (tuple_list.size() != isoftl) { - std::size_t erase_num = tuple_list.size() - isoftl; - tuple_list.erase(tuple_list.end() - erase_num, tuple_list.end()); - } - // about node_version_vec - if (node_version_vec != nullptr) { - if (node_version_vec->size() != isonvv) { - std::size_t erase_num = node_version_vec->size() - isonvv; - node_version_vec->erase(node_version_vec->end() - // NOLINT - erase_num, // NOLINT - node_version_vec->end()); - } - } - }; - -retry: - if (root->get_version_deleted() || !root->get_version_root()) { - return status::OK_RETRY_FROM_ROOT; - } - - std::tuple node_and_v; - constexpr std::size_t tuple_node_index = 0; - constexpr std::size_t tuple_v_index = 1; - status check_status{}; - key_slice_type ks{0}; - key_length_type kl = l_key.size(); // NOLINT - if (l_key.size() > sizeof(key_slice_type)) { - memcpy(&ks, l_key.data(), sizeof(key_slice_type)); - } else { - if (!l_key.empty()) { memcpy(&ks, l_key.data(), l_key.size()); } - } - node_and_v = find_border(root, ks, kl, check_status); - if (check_status == status::WARN_RETRY_FROM_ROOT_OF_ALL) { - return status::OK_RETRY_AFTER_FB; - } - border_node* bn(std::get(node_and_v)); - node_version64_body check_v = std::get(node_and_v); - - for (;;) { - // log size before scan_border - std::size_t initial_size_of_tuple_list_at_fb{tuple_list.size()}; - std::size_t initial_size_of_node_version_vec_at_fb{}; - if (node_version_vec != nullptr) { - initial_size_of_node_version_vec_at_fb = node_version_vec->size(); - } - - // scan the border node - check_status = scan_border( - &bn, l_key, l_end, r_key, r_end, tuple_list, check_v, - node_version_vec, key_prefix, max_size); - - // check rc, success - if (check_status == status::OK_SCAN_END) { return status::OK; } - if (check_status == status::OK_SCAN_CONTINUE) { continue; } - - /** - * fail. it doesn't need to clear tuple and node information because - * caller of this will do. - */ - if (check_status == status::OK_RETRY_AFTER_FB) { - node_version64_body re_check_v = bn->get_stable_version(); - if (check_v.get_vsplit() != re_check_v.get_vsplit() || - // retry from this b+ tree - re_check_v.get_deleted()) { - return status::OK_RETRY_AFTER_FB; - } - if (check_v.get_vinsert_delete() != - re_check_v.get_vinsert_delete()) { - // retry from this border node - check_v = re_check_v; - clean_up_tuple_list_nvc(initial_size_of_tuple_list_at_fb, - initial_size_of_node_version_vec_at_fb); - continue; - } - } else if (check_status == status::OK_RETRY_FROM_ROOT) { - clean_up_tuple_list_nvc(initial_size_of_tuple_list, - initial_size_of_node_version_vec); - goto retry; // NOLINT - } - } -} - -/** - * scan for some leafnode of b+tree. -*/ -template -static status -scan_border(border_node** const target, const std::string_view l_key, - const scan_endpoint l_end, const std::string_view r_key, - const scan_endpoint r_end, - std::vector>& - tuple_list, - node_version64_body& v_at_fb, - std::vector>* const - node_version_vec, - const std::string& key_prefix, const std::size_t max_size) { - /** - * Log size before scanning this node. - * This must be before retry label for retry at find border. - */ - std::size_t initial_size_of_tuple_list{tuple_list.size()}; - std::size_t initial_size_of_node_version_vec{}; - if (node_version_vec != nullptr) { - initial_size_of_node_version_vec = node_version_vec->size(); - } - /** - * For retry of failing optimistic verify, it must erase parts of - * tuple_list and node vec. clear between initial_size... and current size. - * about tuple_list. - */ - auto clean_up_tuple_list_nvc = [&tuple_list, &node_version_vec, - initial_size_of_tuple_list, - initial_size_of_node_version_vec]() { - if (tuple_list.size() != initial_size_of_tuple_list) { - std::size_t erase_num = - tuple_list.size() - initial_size_of_tuple_list; - tuple_list.erase(tuple_list.end() - erase_num, tuple_list.end()); - } - // about node_version_vec - if (node_version_vec != nullptr) { - if (node_version_vec->size() != initial_size_of_node_version_vec) { - std::size_t erase_num = node_version_vec->size() - - initial_size_of_node_version_vec; - node_version_vec->erase(node_version_vec->end() - // NOLINT - erase_num, // NOLINT - node_version_vec->end()); - } - } - }; -retry: - - /** - * This is used below loop for logging whether this scan catches some - * elements in this node. - */ - bool tuple_pushed_num{false}; - - border_node* bn = *target; - /** - * next node pointer must be logged before optimistic verify. - */ - border_node* next = bn->get_next(); - /** - * get permutation at once. - * After scan border, optimistic verify support this is atomic. - */ - permutation perm(bn->get_permutation().get_body()); - // check all elements in border node. - for (std::size_t i = 0; i < perm.get_cnk(); ++i) { - std::size_t index = perm.get_index_of_rank(i); - key_slice_type ks = bn->get_key_slice_at(index); - key_length_type kl = bn->get_key_length_at(index); - std::string full_key{key_prefix}; - if (kl > 0) { - // gen full key from log and this key slice - full_key.append( - reinterpret_cast(&ks), // NOLINT - kl < sizeof(key_slice_type) ? kl : sizeof(key_slice_type)); - /** - * If the key is complete (kl < sizeof(key_slice_type)), the key - * slice must be copied by the size of key length. - * Otherwise, sizeof key_slice_type. - */ - } - link_or_value* lv = bn->get_lv_at(index); - value* vp = lv->get_value(); - base_node* next_layer = lv->get_next_layer(); - node_version64* node_version_ptr = bn->get_version_ptr(); - /** - * This verification may seem verbose, but it can also be considered - * an early abort. - */ - status check_status = scan_check_retry(bn, v_at_fb); - if (check_status != status::OK) { - // failed. clean up tuple list and node vesion vec. - clean_up_tuple_list_nvc(); - } - if (check_status == status::OK_RETRY_FROM_ROOT) { - return status::OK_RETRY_FROM_ROOT; - } - if (check_status == status::OK_RETRY_AFTER_FB) { - goto retry; // NOLINT - } - if (kl > sizeof(key_slice_type)) { - std::string_view arg_l_key; - scan_endpoint arg_l_end{}; - if (l_end == scan_endpoint::INF) { - arg_l_key = ""; - arg_l_end = scan_endpoint::INF; - } else { - key_slice_type l_key_slice{0}; - memcpy(&l_key_slice, l_key.data(), - l_key.size() < sizeof(key_slice_type) - ? l_key.size() - : sizeof(key_slice_type)); - // check left point - int ret_cmp = memcmp(&l_key_slice, &ks, sizeof(key_slice_type)); - if (ret_cmp < 0) { - arg_l_key = ""; - arg_l_end = scan_endpoint::INF; - } else if (ret_cmp == 0) { - arg_l_key = l_key; - if (arg_l_key.size() > sizeof(key_slice_type)) { - arg_l_key.remove_prefix(sizeof(key_slice_type)); - } else { - arg_l_key = ""; - } - arg_l_end = l_end; - } else { - continue; - /** - * Ignore it because it is smaller than the left end point. - */ - } - } - std::string_view arg_r_key; - scan_endpoint arg_r_end{}; - if (r_end == scan_endpoint::INF) { - arg_r_key = ""; - arg_r_end = scan_endpoint::INF; - } else { - int ret_cmp = memcmp(r_key.data(), full_key.data(), - r_key.size() < full_key.size() - ? r_key.size() - : full_key.size()); - if (ret_cmp < 0) { return status::OK_SCAN_END; } - if (ret_cmp == 0) { - if (r_key.size() <= full_key.size()) { - return status::OK_SCAN_END; - } - arg_r_key = r_key; - arg_r_end = r_end; - } else { - arg_r_key = ""; - arg_r_end = scan_endpoint::INF; - } - } - check_status = - scan(next_layer, arg_l_key, arg_l_end, arg_r_key, arg_r_end, - tuple_list, node_version_vec, full_key, max_size); - if (check_status != status::OK) { - // failed. clean up tuple list and node vesion vec. - clean_up_tuple_list_nvc(); - goto retry; // NOLINT - } - } else { - auto in_range = [&full_key, &tuple_list, &vp, &node_version_vec, - &v_at_fb, &node_version_ptr, &tuple_pushed_num, - max_size]() { - tuple_list.emplace_back(std::make_tuple( - full_key, static_cast(value::get_body(vp)), - value::get_len(vp))); - if (node_version_vec != nullptr) { - /** - * note: - * std::get<1>(node_version_vec.back()) != node_version_ptr - * Adding this can reduce redundant emplace_back. However, - * the correspondence between the value of the scan result - * and the pointer to the node version becomes unknown, - * making it impossible to perform node verify according - * to the actual situation read by the transaction - * execution engine. - */ - node_version_vec->emplace_back( - std::make_pair(v_at_fb, node_version_ptr)); - } - tuple_pushed_num = true; - if (max_size != 0 && tuple_list.size() >= max_size) { - return status::OK_SCAN_END; - } - return status::OK; - }; - if (l_end == scan_endpoint::INF && r_end == scan_endpoint::INF) { - // all range - if (in_range() != status::OK) return status::OK_SCAN_END; - continue; - } - // not all range - if (l_end != scan_endpoint::INF) { - key_slice_type l_key_slice{0}; - if (!l_key.empty()) { - memcpy(&l_key_slice, l_key.data(), - l_key.size() < sizeof(key_slice_type) - ? l_key.size() - : sizeof(key_slice_type)); - } - int l_cmp = memcmp(&l_key_slice, &ks, sizeof(key_slice_type)); - if (l_cmp > 0 || - (l_cmp == 0 && (l_key.size() > kl || - (l_key.size() == kl && - l_end == scan_endpoint::EXCLUSIVE)))) { - continue; - } - } - // pass left endpoint. - if (r_end == scan_endpoint::INF) { - if (in_range() != status::OK) return status::OK_SCAN_END; - continue; - } - int r_cmp = - memcmp(r_key.data(), full_key.data(), - r_key.size() < full_key.size() ? r_key.size() - : full_key.size()); - if (r_cmp > 0 || - (r_cmp == 0 && (r_key.size() > full_key.size() || - (r_key.size() == full_key.size() && - r_end == scan_endpoint::INCLUSIVE)))) { - if (in_range() != status::OK) { return status::OK_SCAN_END; } - continue; - } - // pass right endpoint. - if (!tuple_pushed_num && node_version_vec != nullptr) { - /** - * Since it is a rightmost node included in the range, it is - * included in the phantom verification. However, there were - * no elements included in the range. - */ - node_version_vec->emplace_back( - std::make_pair(v_at_fb, bn->get_version_ptr())); - } - - return status::OK_SCAN_END; - } - } - // done about checking for all elements of border node. - - if (!tuple_pushed_num && node_version_vec != nullptr) { - /** - * Since it is a leftmost node included in the range, it is included - * in the phantom verification. However, there were no elements - * included in the range. - */ - node_version_vec->emplace_back( - std::make_pair(v_at_fb, bn->get_version_ptr())); - } - - // log before verify for atomicity - node_version64_body next_version{}; - if (next != nullptr) { next_version = next->get_stable_version(); } - - // final check for atomicity - status check_status = scan_check_retry(bn, v_at_fb); - if (check_status != status::OK) { - // failed. clean up tuple list and node vesion vec. - clean_up_tuple_list_nvc(); - } - if (check_status == status::OK_RETRY_FROM_ROOT) { - return status::OK_RETRY_FROM_ROOT; - } - if (check_status == status::OK_RETRY_AFTER_FB) { - goto retry; // NOLINT - } - - // it reaches right endpoint of entire tree. - if (next == nullptr) { return status::OK_SCAN_END; } - - // it is in scan range and fin scaning this border node. - *target = next; - v_at_fb = next_version; - return status::OK_SCAN_CONTINUE; -} - +/** + * @file scan_helper.h + */ + +#pragma once + +#include "base_node.h" +#include "border_node.h" +#include "common_helper.h" +#include "interior_node.h" +#include "scheme.h" + +namespace yakushima { + +// forward declaration +template +static status +scan_border(border_node** target, std::string_view l_key, scan_endpoint l_end, + std::string_view r_key, scan_endpoint r_end, + std::vector>& + tuple_list, + node_version64_body& v_at_fb, + std::vector>* node_version_vec, + const std::string& key_prefix, std::size_t max_size); + +inline status scan_check_retry(border_node* const bn, + node_version64_body& v_at_fb) { + node_version64_body check = bn->get_stable_version(); + if (check != v_at_fb) { + // fail optimistic verify + if (check.get_vsplit() != v_at_fb.get_vsplit() || check.get_deleted()) { + /** + * The node at find border was changed by split or deleted. + */ + return status::OK_RETRY_FROM_ROOT; + } + /** + * The structure of the border node was not changed. + * So reading border node can retry from that. + */ + v_at_fb = check; + return status::OK_RETRY_AFTER_FB; + } + return status::OK; +} + +/** + * scan for some try nodes which is not root. +*/ +template +static status +scan(base_node* const root, const std::string_view l_key, + const scan_endpoint l_end, const std::string_view r_key, + const scan_endpoint r_end, + std::vector>& tuple_list, + std::vector>* const + node_version_vec, + const std::string& key_prefix, const std::size_t max_size) { + /** + * Log size before scanning this node. + * This must be before retry label for retry at find border. + */ + std::size_t initial_size_of_tuple_list{tuple_list.size()}; + std::size_t initial_size_of_node_version_vec{}; + if (node_version_vec != nullptr) { + initial_size_of_node_version_vec = node_version_vec->size(); + } + + /** + * For retry of failing optimistic verify, it must erase parts of + * tuple_list and node vec. clear between initial_size... and current size. + * about tuple_list. + */ + auto clean_up_tuple_list_nvc = [&tuple_list, + &node_version_vec](std::size_t isoftl, + std::size_t isonvv) { + if (tuple_list.size() != isoftl) { + std::size_t erase_num = tuple_list.size() - isoftl; + tuple_list.erase(tuple_list.end() - erase_num, tuple_list.end()); + } + // about node_version_vec + if (node_version_vec != nullptr) { + if (node_version_vec->size() != isonvv) { + std::size_t erase_num = node_version_vec->size() - isonvv; + node_version_vec->erase(node_version_vec->end() - // NOLINT + erase_num, // NOLINT + node_version_vec->end()); + } + } + }; + +retry: + if (root->get_version_deleted() || !root->get_version_root()) { + return status::OK_RETRY_FROM_ROOT; + } + + std::tuple node_and_v; + constexpr std::size_t tuple_node_index = 0; + constexpr std::size_t tuple_v_index = 1; + status check_status{}; + key_slice_type ks{0}; + key_length_type kl = l_key.size(); // NOLINT + if (l_key.size() > sizeof(key_slice_type)) { + memcpy(&ks, l_key.data(), sizeof(key_slice_type)); + } else { + if (!l_key.empty()) { memcpy(&ks, l_key.data(), l_key.size()); } + } + node_and_v = find_border(root, ks, kl, check_status); + if (check_status == status::WARN_RETRY_FROM_ROOT_OF_ALL) { + return status::OK_RETRY_AFTER_FB; + } + border_node* bn(std::get(node_and_v)); + node_version64_body check_v = std::get(node_and_v); + + for (;;) { + // log size before scan_border + std::size_t initial_size_of_tuple_list_at_fb{tuple_list.size()}; + std::size_t initial_size_of_node_version_vec_at_fb{}; + if (node_version_vec != nullptr) { + initial_size_of_node_version_vec_at_fb = node_version_vec->size(); + } + + // scan the border node + check_status = scan_border( + &bn, l_key, l_end, r_key, r_end, tuple_list, check_v, + node_version_vec, key_prefix, max_size); + + // check rc, success + if (check_status == status::OK_SCAN_END) { return status::OK; } + if (check_status == status::OK_SCAN_CONTINUE) { continue; } + + /** + * fail. it doesn't need to clear tuple and node information because + * caller of this will do. + */ + if (check_status == status::OK_RETRY_AFTER_FB) { + node_version64_body re_check_v = bn->get_stable_version(); + if (check_v.get_vsplit() != re_check_v.get_vsplit() || + // retry from this b+ tree + re_check_v.get_deleted()) { + return status::OK_RETRY_AFTER_FB; + } + if (check_v.get_vinsert_delete() != + re_check_v.get_vinsert_delete()) { + // retry from this border node + check_v = re_check_v; + clean_up_tuple_list_nvc(initial_size_of_tuple_list_at_fb, + initial_size_of_node_version_vec_at_fb); + continue; + } + } else if (check_status == status::OK_RETRY_FROM_ROOT) { + clean_up_tuple_list_nvc(initial_size_of_tuple_list, + initial_size_of_node_version_vec); + goto retry; // NOLINT + } + } +} + +/** + * scan for some leafnode of b+tree. +*/ +template +static status +scan_border(border_node** const target, const std::string_view l_key, + const scan_endpoint l_end, const std::string_view r_key, + const scan_endpoint r_end, + std::vector>& + tuple_list, + node_version64_body& v_at_fb, + std::vector>* const + node_version_vec, + const std::string& key_prefix, const std::size_t max_size) { + /** + * Log size before scanning this node. + * This must be before retry label for retry at find border. + */ + std::size_t initial_size_of_tuple_list{tuple_list.size()}; + std::size_t initial_size_of_node_version_vec{}; + if (node_version_vec != nullptr) { + initial_size_of_node_version_vec = node_version_vec->size(); + } + /** + * For retry of failing optimistic verify, it must erase parts of + * tuple_list and node vec. clear between initial_size... and current size. + * about tuple_list. + */ + auto clean_up_tuple_list_nvc = [&tuple_list, &node_version_vec, + initial_size_of_tuple_list, + initial_size_of_node_version_vec]() { + if (tuple_list.size() != initial_size_of_tuple_list) { + std::size_t erase_num = + tuple_list.size() - initial_size_of_tuple_list; + tuple_list.erase(tuple_list.end() - erase_num, tuple_list.end()); + } + // about node_version_vec + if (node_version_vec != nullptr) { + if (node_version_vec->size() != initial_size_of_node_version_vec) { + std::size_t erase_num = node_version_vec->size() - + initial_size_of_node_version_vec; + node_version_vec->erase(node_version_vec->end() - // NOLINT + erase_num, // NOLINT + node_version_vec->end()); + } + } + }; +retry: + + /** + * This is used below loop for logging whether this scan catches some + * elements in this node. + */ + bool tuple_pushed_num{false}; + + border_node* bn = *target; + /** + * next node pointer must be logged before optimistic verify. + */ + border_node* next = bn->get_next(); + /** + * get permutation at once. + * After scan border, optimistic verify support this is atomic. + */ + permutation perm(bn->get_permutation().get_body()); + // check all elements in border node. + for (std::size_t i = 0; i < perm.get_cnk(); ++i) { + std::size_t index = perm.get_index_of_rank(i); + key_slice_type ks = bn->get_key_slice_at(index); + key_length_type kl = bn->get_key_length_at(index); + std::string full_key{key_prefix}; + if (kl > 0) { + // gen full key from log and this key slice + full_key.append( + reinterpret_cast(&ks), // NOLINT + kl < sizeof(key_slice_type) ? kl : sizeof(key_slice_type)); + /** + * If the key is complete (kl < sizeof(key_slice_type)), the key + * slice must be copied by the size of key length. + * Otherwise, sizeof key_slice_type. + */ + } + link_or_value* lv = bn->get_lv_at(index); + value* vp = lv->get_value(); + base_node* next_layer = lv->get_next_layer(); + node_version64* node_version_ptr = bn->get_version_ptr(); + /** + * This verification may seem verbose, but it can also be considered + * an early abort. + */ + status check_status = scan_check_retry(bn, v_at_fb); + if (check_status != status::OK) { + // failed. clean up tuple list and node vesion vec. + clean_up_tuple_list_nvc(); + } + if (check_status == status::OK_RETRY_FROM_ROOT) { + return status::OK_RETRY_FROM_ROOT; + } + if (check_status == status::OK_RETRY_AFTER_FB) { + goto retry; // NOLINT + } + if (kl > sizeof(key_slice_type)) { + std::string_view arg_l_key; + scan_endpoint arg_l_end{}; + if (l_end == scan_endpoint::INF) { + arg_l_key = ""; + arg_l_end = scan_endpoint::INF; + } else { + key_slice_type l_key_slice{0}; + memcpy(&l_key_slice, l_key.data(), + l_key.size() < sizeof(key_slice_type) + ? l_key.size() + : sizeof(key_slice_type)); + // check left point + int ret_cmp = memcmp(&l_key_slice, &ks, sizeof(key_slice_type)); + if (ret_cmp < 0) { + arg_l_key = ""; + arg_l_end = scan_endpoint::INF; + } else if (ret_cmp == 0) { + arg_l_key = l_key; + if (arg_l_key.size() > sizeof(key_slice_type)) { + arg_l_key.remove_prefix(sizeof(key_slice_type)); + } else { + arg_l_key = ""; + } + arg_l_end = l_end; + } else { + continue; + /** + * Ignore it because it is smaller than the left end point. + */ + } + } + std::string_view arg_r_key; + scan_endpoint arg_r_end{}; + if (r_end == scan_endpoint::INF) { + arg_r_key = ""; + arg_r_end = scan_endpoint::INF; + } else { + int ret_cmp = memcmp(r_key.data(), full_key.data(), + r_key.size() < full_key.size() + ? r_key.size() + : full_key.size()); + if (ret_cmp < 0) { return status::OK_SCAN_END; } + if (ret_cmp == 0) { + if (r_key.size() <= full_key.size()) { + return status::OK_SCAN_END; + } + arg_r_key = r_key; + arg_r_end = r_end; + } else { + arg_r_key = ""; + arg_r_end = scan_endpoint::INF; + } + } + check_status = + scan(next_layer, arg_l_key, arg_l_end, arg_r_key, arg_r_end, + tuple_list, node_version_vec, full_key, max_size); + if (check_status != status::OK) { + // failed. clean up tuple list and node vesion vec. + clean_up_tuple_list_nvc(); + goto retry; // NOLINT + } + } else { + auto in_range = [&full_key, &tuple_list, &vp, &node_version_vec, + &v_at_fb, &node_version_ptr, &tuple_pushed_num, + max_size]() { + tuple_list.emplace_back(std::make_tuple( + full_key, static_cast(value::get_body(vp)), + value::get_len(vp))); + if (node_version_vec != nullptr) { + /** + * note: + * std::get<1>(node_version_vec.back()) != node_version_ptr + * Adding this can reduce redundant emplace_back. However, + * the correspondence between the value of the scan result + * and the pointer to the node version becomes unknown, + * making it impossible to perform node verify according + * to the actual situation read by the transaction + * execution engine. + */ + node_version_vec->emplace_back( + std::make_pair(v_at_fb, node_version_ptr)); + } + tuple_pushed_num = true; + if (max_size != 0 && tuple_list.size() >= max_size) { + return status::OK_SCAN_END; + } + return status::OK; + }; + if (l_end == scan_endpoint::INF && r_end == scan_endpoint::INF) { + // all range + if (in_range() != status::OK) return status::OK_SCAN_END; + continue; + } + // not all range + if (l_end != scan_endpoint::INF) { + key_slice_type l_key_slice{0}; + if (!l_key.empty()) { + memcpy(&l_key_slice, l_key.data(), + l_key.size() < sizeof(key_slice_type) + ? l_key.size() + : sizeof(key_slice_type)); + } + int l_cmp = memcmp(&l_key_slice, &ks, sizeof(key_slice_type)); + if (l_cmp > 0 || + (l_cmp == 0 && (l_key.size() > kl || + (l_key.size() == kl && + l_end == scan_endpoint::EXCLUSIVE)))) { + continue; + } + } + // pass left endpoint. + if (r_end == scan_endpoint::INF) { + if (in_range() != status::OK) return status::OK_SCAN_END; + continue; + } + int r_cmp = + memcmp(r_key.data(), full_key.data(), + r_key.size() < full_key.size() ? r_key.size() + : full_key.size()); + if (r_cmp > 0 || + (r_cmp == 0 && (r_key.size() > full_key.size() || + (r_key.size() == full_key.size() && + r_end == scan_endpoint::INCLUSIVE)))) { + if (in_range() != status::OK) { return status::OK_SCAN_END; } + continue; + } + // pass right endpoint. + if (!tuple_pushed_num && node_version_vec != nullptr) { + /** + * Since it is a rightmost node included in the range, it is + * included in the phantom verification. However, there were + * no elements included in the range. + */ + node_version_vec->emplace_back( + std::make_pair(v_at_fb, bn->get_version_ptr())); + } + + return status::OK_SCAN_END; + } + } + // done about checking for all elements of border node. + + if (!tuple_pushed_num && node_version_vec != nullptr) { + /** + * Since it is a leftmost node included in the range, it is included + * in the phantom verification. However, there were no elements + * included in the range. + */ + node_version_vec->emplace_back( + std::make_pair(v_at_fb, bn->get_version_ptr())); + } + + // log before verify for atomicity + node_version64_body next_version{}; + if (next != nullptr) { next_version = next->get_stable_version(); } + + // final check for atomicity + status check_status = scan_check_retry(bn, v_at_fb); + if (check_status != status::OK) { + // failed. clean up tuple list and node vesion vec. + clean_up_tuple_list_nvc(); + } + if (check_status == status::OK_RETRY_FROM_ROOT) { + return status::OK_RETRY_FROM_ROOT; + } + if (check_status == status::OK_RETRY_AFTER_FB) { + goto retry; // NOLINT + } + + // it reaches right endpoint of entire tree. + if (next == nullptr) { return status::OK_SCAN_END; } + + // it is in scan range and fin scaning this border node. + *target = next; + v_at_fb = next_version; + return status::OK_SCAN_CONTINUE; +} + } // namespace yakushima \ No newline at end of file diff --git a/include/scheme.h b/include/scheme.h index daa966c..ed8f456 100644 --- a/include/scheme.h +++ b/include/scheme.h @@ -1,212 +1,212 @@ -/** - * @file scheme.h - */ - -#pragma once - -#include -#include -#include -#include -#include - -#include "log.h" - -#include "glog/logging.h" - -namespace yakushima { - -/** - * @brief Session token - */ -using Token = void*; - -using key_slice_type = std::uint64_t; -static constexpr std::size_t key_slice_length = 15; -/** - * key_length_type is used at permutation.h, border_node.h. - * To avoid circular reference at there, declare here. - */ -using key_length_type = std::uint8_t; -using value_length_type = std::size_t; -using value_align_type = std::align_val_t; - -/** - * @brief The stack of (# of nodes, used memory, reserved memory) tuples. - */ -using memory_usage_stack = - std::vector>; - -enum class status : std::int32_t { - /** - * @brief Warning of mistaking usage. - */ - WARN_BAD_USAGE, - WARN_CONCURRENT_OPERATIONS, - WARN_EXIST, - WARN_NOT_EXIST, - /** - * @brief warning - */ - WARN_INVALID_TOKEN, - /** - * @brief Warning - * @details The find_border function tells that it must retry from root of all tree. - */ - WARN_RETRY_FROM_ROOT_OF_ALL, - /** - * @brief warning - * @details The target storage of operations is not found. - */ - WARN_STORAGE_NOT_EXIST, - /** - * @brief Warning - * @details (assign_gc_info) The maximum number of sessions is already up and running. - */ - WARN_MAX_SESSIONS, - /** - * @brief Warning - * @details Masstree originally has a unique key constraint. - * todo (optional): This constraint is removed. - */ - WARN_UNIQUE_RESTRICTION, - /** - * @brief success status - */ - OK, - /** - * @brief (destroy) It destroys existing all trees. - */ - OK_DESTROY_ALL, - /** - * @brief (destroy) It destroys xxx. - */ - OK_DESTROY_BORDER, - /** - * @brief (destroy) It destroys xxx. - */ - OK_DESTROY_INTERIOR, - /** - * @brief Warning - * @details (get/delete) No corresponding value in this storage engine. - */ - OK_NOT_FOUND, - OK_RETRY_FETCH_LV, - OK_RETRY_AFTER_FB, - OK_RETRY_FROM_ROOT, - OK_ROOT_IS_DELETED, - /** - * @brief - * (destroy) Root is nullptr and it could not destroy. - * (remove) No existing tree. - */ - OK_ROOT_IS_NULL, - OK_SCAN_CONTINUE, - OK_SCAN_END, - ERR_ARGUMENT, - ERR_BAD_USAGE, - ERR_BOUNDARY, - /** - * @brief fatal error - * - */ - ERR_FATAL, - /** - * @brief root is not both interior and border. - */ - ERR_UNKNOWN_ROOT, -}; - -inline constexpr std::string_view to_string_view(const status value) noexcept { - using namespace std::string_view_literals; - switch (value) { - case status::WARN_BAD_USAGE: - return "WARN_BAD_USAGE"sv; - case status::WARN_CONCURRENT_OPERATIONS: - return "WARN_CONCURRENT_OPERATIONS"sv; - case status::WARN_EXIST: - return "WARN_EXIST"sv; - case status::WARN_NOT_EXIST: - return "WARN_NOT_EXIST"sv; - case status::WARN_INVALID_TOKEN: - return "WARN_INVALID_TOKEN"sv; - case status::WARN_MAX_SESSIONS: - return "WARN_MAX_SESSIONS"sv; - case status::WARN_RETRY_FROM_ROOT_OF_ALL: - return "WARN_RETRY_FROM_ROOT_OF_ALL"sv; - case status::WARN_STORAGE_NOT_EXIST: - return "WARN_STORAGE_NOT_EXIST"sv; - case status::WARN_UNIQUE_RESTRICTION: - return "WARN_UNIQUE_RESTRICTION"sv; - case status::OK: - return "OK"sv; - case status::OK_DESTROY_ALL: - return "OK_DESTROY_ALL"sv; - case status::OK_DESTROY_BORDER: - return "OK_DESTROY_BORDER"sv; - case status::OK_DESTROY_INTERIOR: - return "OK_DESTROY_INTERIOR"sv; - case status::OK_NOT_FOUND: - return "OK_NOT_FOUND"sv; - case status::OK_ROOT_IS_NULL: - return "OK_ROOT_IS_NULL"sv; - case status::OK_RETRY_AFTER_FB: - return "OK_RETRY_AFTER_FB"sv; - case status::OK_RETRY_FETCH_LV: - return "OK_RETRY_FETCH_LV"sv; - case status::OK_RETRY_FROM_ROOT: - return "OK_RETRY_FROM_ROOT"sv; - case status::OK_ROOT_IS_DELETED: - return "OK_ROOT_IS_DELETED"sv; - case status::OK_SCAN_CONTINUE: - return "OK_SCAN_CONTINUE"sv; - case status::OK_SCAN_END: - return "OK_SCAN_END"sv; - case status::ERR_ARGUMENT: - return "ERR_ARGUMENT"sv; - case status::ERR_BAD_USAGE: - return "ERR_BAD_USAGE"sv; - case status::ERR_BOUNDARY: - return "ERR_BOUNDARY"sv; - case status::ERR_FATAL: - return "ERR_FATAL"sv; - case status::ERR_UNKNOWN_ROOT: - return "ERR_UNKNOWN_ROOT"sv; - } - LOG(ERROR) << log_location_prefix; - return ""sv; -} - -inline std::ostream& operator<<(std::ostream& out, const status value) { - return out << to_string_view(value); -} - -/** - * @brief Information about scan's endpoints. - */ -enum class scan_endpoint : char { - /** - * @details Excludes those that match the key specified for the endpoint. - */ - EXCLUSIVE, - /** - * @details Includes those that match the key specified for the endpoint. - */ - INCLUSIVE, - /** - * @details Do not limit the endpoints. - */ - INF, -}; - -template -constexpr bool is_inlinable() { - // pointer type or uintptr_t, it is inlinable - if constexpr (std::is_pointer_v || - std::is_same_v) { - return true; // NOLINT - } - return false; -} - +/** + * @file scheme.h + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include "log.h" + +#include "glog/logging.h" + +namespace yakushima { + +/** + * @brief Session token + */ +using Token = void*; + +using key_slice_type = std::uint64_t; +static constexpr std::size_t key_slice_length = 15; +/** + * key_length_type is used at permutation.h, border_node.h. + * To avoid circular reference at there, declare here. + */ +using key_length_type = std::uint8_t; +using value_length_type = std::size_t; +using value_align_type = std::align_val_t; + +/** + * @brief The stack of (# of nodes, used memory, reserved memory) tuples. + */ +using memory_usage_stack = + std::vector>; + +enum class status : std::int32_t { + /** + * @brief Warning of mistaking usage. + */ + WARN_BAD_USAGE, + WARN_CONCURRENT_OPERATIONS, + WARN_EXIST, + WARN_NOT_EXIST, + /** + * @brief warning + */ + WARN_INVALID_TOKEN, + /** + * @brief Warning + * @details The find_border function tells that it must retry from root of all tree. + */ + WARN_RETRY_FROM_ROOT_OF_ALL, + /** + * @brief warning + * @details The target storage of operations is not found. + */ + WARN_STORAGE_NOT_EXIST, + /** + * @brief Warning + * @details (assign_gc_info) The maximum number of sessions is already up and running. + */ + WARN_MAX_SESSIONS, + /** + * @brief Warning + * @details Masstree originally has a unique key constraint. + * todo (optional): This constraint is removed. + */ + WARN_UNIQUE_RESTRICTION, + /** + * @brief success status + */ + OK, + /** + * @brief (destroy) It destroys existing all trees. + */ + OK_DESTROY_ALL, + /** + * @brief (destroy) It destroys xxx. + */ + OK_DESTROY_BORDER, + /** + * @brief (destroy) It destroys xxx. + */ + OK_DESTROY_INTERIOR, + /** + * @brief Warning + * @details (get/delete) No corresponding value in this storage engine. + */ + OK_NOT_FOUND, + OK_RETRY_FETCH_LV, + OK_RETRY_AFTER_FB, + OK_RETRY_FROM_ROOT, + OK_ROOT_IS_DELETED, + /** + * @brief + * (destroy) Root is nullptr and it could not destroy. + * (remove) No existing tree. + */ + OK_ROOT_IS_NULL, + OK_SCAN_CONTINUE, + OK_SCAN_END, + ERR_ARGUMENT, + ERR_BAD_USAGE, + ERR_BOUNDARY, + /** + * @brief fatal error + * + */ + ERR_FATAL, + /** + * @brief root is not both interior and border. + */ + ERR_UNKNOWN_ROOT, +}; + +inline constexpr std::string_view to_string_view(const status value) noexcept { + using namespace std::string_view_literals; + switch (value) { + case status::WARN_BAD_USAGE: + return "WARN_BAD_USAGE"sv; + case status::WARN_CONCURRENT_OPERATIONS: + return "WARN_CONCURRENT_OPERATIONS"sv; + case status::WARN_EXIST: + return "WARN_EXIST"sv; + case status::WARN_NOT_EXIST: + return "WARN_NOT_EXIST"sv; + case status::WARN_INVALID_TOKEN: + return "WARN_INVALID_TOKEN"sv; + case status::WARN_MAX_SESSIONS: + return "WARN_MAX_SESSIONS"sv; + case status::WARN_RETRY_FROM_ROOT_OF_ALL: + return "WARN_RETRY_FROM_ROOT_OF_ALL"sv; + case status::WARN_STORAGE_NOT_EXIST: + return "WARN_STORAGE_NOT_EXIST"sv; + case status::WARN_UNIQUE_RESTRICTION: + return "WARN_UNIQUE_RESTRICTION"sv; + case status::OK: + return "OK"sv; + case status::OK_DESTROY_ALL: + return "OK_DESTROY_ALL"sv; + case status::OK_DESTROY_BORDER: + return "OK_DESTROY_BORDER"sv; + case status::OK_DESTROY_INTERIOR: + return "OK_DESTROY_INTERIOR"sv; + case status::OK_NOT_FOUND: + return "OK_NOT_FOUND"sv; + case status::OK_ROOT_IS_NULL: + return "OK_ROOT_IS_NULL"sv; + case status::OK_RETRY_AFTER_FB: + return "OK_RETRY_AFTER_FB"sv; + case status::OK_RETRY_FETCH_LV: + return "OK_RETRY_FETCH_LV"sv; + case status::OK_RETRY_FROM_ROOT: + return "OK_RETRY_FROM_ROOT"sv; + case status::OK_ROOT_IS_DELETED: + return "OK_ROOT_IS_DELETED"sv; + case status::OK_SCAN_CONTINUE: + return "OK_SCAN_CONTINUE"sv; + case status::OK_SCAN_END: + return "OK_SCAN_END"sv; + case status::ERR_ARGUMENT: + return "ERR_ARGUMENT"sv; + case status::ERR_BAD_USAGE: + return "ERR_BAD_USAGE"sv; + case status::ERR_BOUNDARY: + return "ERR_BOUNDARY"sv; + case status::ERR_FATAL: + return "ERR_FATAL"sv; + case status::ERR_UNKNOWN_ROOT: + return "ERR_UNKNOWN_ROOT"sv; + } + LOG(ERROR) << log_location_prefix; + return ""sv; +} + +inline std::ostream& operator<<(std::ostream& out, const status value) { + return out << to_string_view(value); +} + +/** + * @brief Information about scan's endpoints. + */ +enum class scan_endpoint : char { + /** + * @details Excludes those that match the key specified for the endpoint. + */ + EXCLUSIVE, + /** + * @details Includes those that match the key specified for the endpoint. + */ + INCLUSIVE, + /** + * @details Do not limit the endpoints. + */ + INF, +}; + +template +constexpr bool is_inlinable() { + // pointer type or uintptr_t, it is inlinable + if constexpr (std::is_pointer_v || + std::is_same_v) { + return true; // NOLINT + } + return false; +} + } // namespace yakushima \ No newline at end of file diff --git a/include/thread_info.h b/include/thread_info.h index 86f4aab..1e23c37 100644 --- a/include/thread_info.h +++ b/include/thread_info.h @@ -1,63 +1,63 @@ -/** - * @file thread_info.h - */ - -#pragma once - -#include - -#include "clock.h" -#include "cpu.h" -#include "epoch.h" -#include "garbage_collection.h" - -namespace yakushima { - -class alignas(CACHE_LINE_SIZE) thread_info { -public: - /** - * @details Take the right to assign this gc_info. - * @return true success. - * @return false fail. - */ - bool gain_the_right() { - bool expected(running_.load(std::memory_order_acquire)); - for (;;) { - if (expected) { return false; } - if (running_.compare_exchange_weak(expected, true, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - return true; - } - } - } - - [[nodiscard]] Epoch get_begin_epoch() const { - return begin_epoch_.load(std::memory_order_acquire); - } - - [[nodiscard]] garbage_collection& get_gc_info() { return gc_info_; } - - [[nodiscard]] bool get_running() const { - return running_.load(std::memory_order_acquire); - } - - void set_begin_epoch(const Epoch epoch) { - begin_epoch_.store(epoch, std::memory_order_relaxed); - } - - void set_running(const bool tf) { - running_.store(tf, std::memory_order_relaxed); - } - -private: - /** - * @details This is updated by worker and is read by leader. If the value is 0, - * it is invalid. - */ - std::atomic begin_epoch_{0}; - std::atomic running_{false}; - garbage_collection gc_info_; -}; - +/** + * @file thread_info.h + */ + +#pragma once + +#include + +#include "clock.h" +#include "cpu.h" +#include "epoch.h" +#include "garbage_collection.h" + +namespace yakushima { + +class alignas(CACHE_LINE_SIZE) thread_info { +public: + /** + * @details Take the right to assign this gc_info. + * @return true success. + * @return false fail. + */ + bool gain_the_right() { + bool expected(running_.load(std::memory_order_acquire)); + for (;;) { + if (expected) { return false; } + if (running_.compare_exchange_weak(expected, true, + std::memory_order_acq_rel, + std::memory_order_acquire)) { + return true; + } + } + } + + [[nodiscard]] Epoch get_begin_epoch() const { + return begin_epoch_.load(std::memory_order_acquire); + } + + [[nodiscard]] garbage_collection& get_gc_info() { return gc_info_; } + + [[nodiscard]] bool get_running() const { + return running_.load(std::memory_order_acquire); + } + + void set_begin_epoch(const Epoch epoch) { + begin_epoch_.store(epoch, std::memory_order_relaxed); + } + + void set_running(const bool tf) { + running_.store(tf, std::memory_order_relaxed); + } + +private: + /** + * @details This is updated by worker and is read by leader. If the value is 0, + * it is invalid. + */ + std::atomic begin_epoch_{0}; + std::atomic running_{false}; + garbage_collection gc_info_; +}; + } // namespace yakushima \ No newline at end of file diff --git a/include/thread_info_table.h b/include/thread_info_table.h index b82559b..23725fd 100644 --- a/include/thread_info_table.h +++ b/include/thread_info_table.h @@ -1,110 +1,110 @@ -/** - * @file thread_info_table.h - */ - -#pragma once - -#include "border_node.h" -#include "config.h" -#include "interior_node.h" -#include "thread_info.h" - -namespace yakushima { - -class thread_info_table { -public: - /** - * @brief Allocates a free session. - * @param[out] token If the return value of the function is status::OK, - * then the token is the acquired session. - * @return status::OK success. - * @return status::WARN_MAX_SESSIONS The maximum number of sessions is already up - * and running. - */ - static status assign_thread_info(Token& token) { - for (auto&& elem : thread_info_table_) { - if (elem.gain_the_right()) { - elem.set_begin_epoch(epoch_management::get_epoch()); - token = &(elem); - return status::OK; - } - } - return status::WARN_MAX_SESSIONS; - } - - static void fin() { - std::vector th_vc; - th_vc.reserve(thread_info_table_.size()); - for (auto&& elem : thread_info_table_) { - auto process = [&elem](bool do_rr) { - elem.get_gc_info().fin(); - if (do_rr) { destroy_manager::return_room(); } - }; - if (destroy_manager::check_room()) { - th_vc.emplace_back(process, true); - } else { - process(false); - } - } - for (auto&& th : th_vc) { th.join(); } - } - - static void gc() { - for (auto&& elem : thread_info_table_) { - elem.get_gc_info().gc(); - } - } - - /** - * @brief Get reference of thread_info_table_. - * @return the reference of thread_info_table_. - */ - static std::array& - get_thread_info_table() { - return thread_info_table_; - } - - /** - * @brief initialize thread_info_table_. - * @pre global epoch is not yet functional because it assigns 0 to begin_epoch as - * the initial value. - * @return void - */ - static void init() { - for (auto&& elem : thread_info_table_) { - elem.set_begin_epoch(0); - elem.set_running(false); - } - } - - /** - * @details When @a token points to an invalid memory location, an error occurs - * if @a token is referenced. - * To avoid this, it scans the table. - * So if @token is invalid one, return status::WARN_INVALID_TOKEN. - * @tparam interior_node Class information is given at compile time to eliminate - * the dependency between header files. - * @tparam border_node Class information is given at compile time to eliminate the - * dependency between header files. - * @param[in] token Session information. - * @return status::OK success. - * @return status::WARN_INVALID_TOKEN The @a token of the argument was invalid. - */ - template - static status leave_thread_info(Token token) { - auto* target = static_cast(token); - target->set_begin_epoch(0); - target->set_running(false); - return status::OK; - } - -private: - /** - * @brief Session information used by garbage collection. - */ - static inline std::array // NOLINT - thread_info_table_; // NOLINT -}; - +/** + * @file thread_info_table.h + */ + +#pragma once + +#include "border_node.h" +#include "config.h" +#include "interior_node.h" +#include "thread_info.h" + +namespace yakushima { + +class thread_info_table { +public: + /** + * @brief Allocates a free session. + * @param[out] token If the return value of the function is status::OK, + * then the token is the acquired session. + * @return status::OK success. + * @return status::WARN_MAX_SESSIONS The maximum number of sessions is already up + * and running. + */ + static status assign_thread_info(Token& token) { + for (auto&& elem : thread_info_table_) { + if (elem.gain_the_right()) { + elem.set_begin_epoch(epoch_management::get_epoch()); + token = &(elem); + return status::OK; + } + } + return status::WARN_MAX_SESSIONS; + } + + static void fin() { + std::vector th_vc; + th_vc.reserve(thread_info_table_.size()); + for (auto&& elem : thread_info_table_) { + auto process = [&elem](bool do_rr) { + elem.get_gc_info().fin(); + if (do_rr) { destroy_manager::return_room(); } + }; + if (destroy_manager::check_room()) { + th_vc.emplace_back(process, true); + } else { + process(false); + } + } + for (auto&& th : th_vc) { th.join(); } + } + + static void gc() { + for (auto&& elem : thread_info_table_) { + elem.get_gc_info().gc(); + } + } + + /** + * @brief Get reference of thread_info_table_. + * @return the reference of thread_info_table_. + */ + static std::array& + get_thread_info_table() { + return thread_info_table_; + } + + /** + * @brief initialize thread_info_table_. + * @pre global epoch is not yet functional because it assigns 0 to begin_epoch as + * the initial value. + * @return void + */ + static void init() { + for (auto&& elem : thread_info_table_) { + elem.set_begin_epoch(0); + elem.set_running(false); + } + } + + /** + * @details When @a token points to an invalid memory location, an error occurs + * if @a token is referenced. + * To avoid this, it scans the table. + * So if @token is invalid one, return status::WARN_INVALID_TOKEN. + * @tparam interior_node Class information is given at compile time to eliminate + * the dependency between header files. + * @tparam border_node Class information is given at compile time to eliminate the + * dependency between header files. + * @param[in] token Session information. + * @return status::OK success. + * @return status::WARN_INVALID_TOKEN The @a token of the argument was invalid. + */ + template + static status leave_thread_info(Token token) { + auto* target = static_cast(token); + target->set_begin_epoch(0); + target->set_running(false); + return status::OK; + } + +private: + /** + * @brief Session information used by garbage collection. + */ + static inline std::array // NOLINT + thread_info_table_; // NOLINT +}; + } // namespace yakushima \ No newline at end of file diff --git a/include/version.h b/include/version.h index cf2bd20..fcf274a 100644 --- a/include/version.h +++ b/include/version.h @@ -1,395 +1,395 @@ -/** - * @file version.h - * @brief version number layout - */ - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "atomic_wrapper.h" - -namespace yakushima { - -/** - * @brief Teh body of node_version64. - * @details This class is designed to be able to be wrapped by std::atomic, - * so it can't declare default constructor. Therefore, it should use init function to - * initialize before using this class object. - */ -class alignas(sizeof(std::uint64_t)) node_version64_body { -public: - using vinsert_delete_type = std::uint32_t; - using vsplit_type = std::uint32_t; - - node_version64_body() = default; - - node_version64_body(const node_version64_body&) = default; - - node_version64_body(node_version64_body&&) = default; - - node_version64_body& operator=(const node_version64_body&) = default; - - node_version64_body& operator=(node_version64_body&&) = default; - - ~node_version64_body() = default; - - bool operator==(const node_version64_body& rhs) const { - return memcmp(this, &rhs, sizeof(node_version64_body)) == 0; - } - - /** - * @details display function for analysis and debug. - */ - void display() const { - std::cout << "node_version64_body::display" << std::endl; - std::cout << "locked : " << get_locked() << std::endl; - std::cout << "inserting_deleting : " << get_inserting_deleting() - << std::endl; - std::cout << "splitting : " << get_splitting() << std::endl; - std::cout << "deleted : " << get_deleted() << std::endl; - std::cout << "root : " << get_root() << std::endl; - std::cout << "border : " << get_border() << std::endl; - std::cout << "vinsert_delete : " << get_vinsert_delete() << std::endl; - std::cout << "vsplit: " << get_vsplit() << std::endl; - } - - bool operator!=(const node_version64_body& rhs) const { - return !(*this == rhs); - } - - [[nodiscard]] bool get_border() const { return border; } - - [[nodiscard]] bool get_deleted() const { return deleted; } - - [[nodiscard]] bool get_inserting_deleting() const { - return inserting_deleting; - } - - [[nodiscard]] bool get_locked() const { return locked; } - - [[nodiscard]] bool get_root() const { return root; } - - [[nodiscard]] bool get_splitting() const { return splitting; } - - [[nodiscard]] vinsert_delete_type get_vinsert_delete() const { - return vinsert_delete; - } - - [[nodiscard]] vsplit_type get_vsplit() const { return vsplit; } - - void inc_vinsert_delete() { ++vinsert_delete; } - - void inc_vsplit() { ++vsplit; } - - void init() { - locked = false; - inserting_deleting = false; - splitting = false; - deleted = false; - root = false; - border = false; - vinsert_delete = 0; - vsplit = 0; - } - - void set_border(const bool new_border) { border = new_border; } - - void set_deleted(const bool new_deleted) { deleted = new_deleted; } - - void set_inserting_deleting(const bool new_inserting_deleting) { - inserting_deleting = new_inserting_deleting; - } - - void set_locked(const bool new_locked) { locked = new_locked; } - - void set_root(const bool new_root) { root = new_root; } - - void set_splitting(const bool new_splitting) { splitting = new_splitting; } - -private: - /** - * These details is based on original paper Fig. 3. - * Declaration order is because class size does not exceed 8 bytes. - */ - /** - * @attention tanabe : In the original paper, the interior node does not have a delete - * count field. On the other hand, the border node has this (nremoved) but no details in - * original paper. Since there is a @a deleted field in the version, you can check - * whether the node you are checking has been deleted. However, you do not know that the - * position has been moved. Maybe you took the node from the wrong position. The - * original paper cannot detect it. Therefore, add notion of delete field. - * @details It is a counter incremented after inserting_deleting/deleting. - */ - vinsert_delete_type vinsert_delete : 29; - /** - * @details It is claimed by update or insert. - */ - vinsert_delete_type locked : 1; - /** - * @details It is a dirty bit set during inserting_deleting. - */ - vinsert_delete_type inserting_deleting : 1; - /** - * @details It is a dirty bit set during splitting. - * If this flag is set, vsplit is incremented when the lock is unlocked. - * The function find_lowest_key takes the value from the node when this flag is up. - * Read. When we raise this flag, we guarantee that no problems will occur with it. - */ - vinsert_delete_type splitting : 1; - /** - * @details It is a counter incremented after splitting. - */ - vsplit_type vsplit : 29; - /** - * @details It is a delete bit set after delete. - */ - vsplit_type deleted : 1; - /** - * @details It tells whether the node is the root of some B+-tree. - */ - vsplit_type root : 1; - /** - * @details It tells whether the node is interior or border. - */ - vsplit_type border : 1; -}; - -inline std::ostream& operator<<(std::ostream& out, // NOLINT - node_version64_body body) { - out << "vinsert_delete:" << body.get_vinsert_delete() - << ", locked:" << body.get_locked() - << ", inserting_deleting:" << body.get_inserting_deleting() - << ", splitting:" << body.get_splitting() - << ", vsplit:" << body.get_vsplit() - << ", deleted:" << body.get_deleted() << ", root:" << body.get_root() - << ", border:" << body.get_border(); - return out; -} - -// check the size of a version body -static_assert(sizeof(node_version64_body) == 8); - -/** - * @brief The class which has atomic - */ -class node_version64 { -public: - /** - * @details Basically, it should use this default constructor to use init func. - * Of course, it can use this class without default constructor if it use init - * function(). - */ - node_version64() : body_{} {} - - /** - * @details This is atomic increment. - * If you use "setter(getter + 1)", that is not atomic increment. - */ - void atomic_inc_vinsert() { - node_version64_body expected(get_body()); - node_version64_body desired{}; - for (;;) { - desired = expected; - desired.inc_vinsert_delete(); - if (body_.compare_exchange_weak(expected, desired, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - break; - } - } - } - - void atomic_set_border(const bool tf) { - node_version64_body expected(get_body()); - node_version64_body desired{}; - for (;;) { - desired = expected; - desired.set_border(tf); - if (body_.compare_exchange_weak(expected, desired, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - break; - } - } - } - - void atomic_set_deleted(const bool tf) { - node_version64_body expected(get_body()); - node_version64_body desired{}; - for (;;) { - desired = expected; - desired.set_deleted(tf); - if (body_.compare_exchange_weak(expected, desired, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - break; - } - } - } - - void atomic_set_inserting_deleting(const bool tf) { - node_version64_body expected(get_body()); - node_version64_body desired{}; - for (;;) { - desired = expected; - desired.set_inserting_deleting(tf); - if (body_.compare_exchange_weak(expected, desired, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - break; - } - } - } - - void atomic_set_root(const bool tf) { - node_version64_body expected(get_body()); - node_version64_body desired{}; - for (;;) { - desired = expected; - desired.set_root(tf); - if (body_.compare_exchange_weak(expected, desired, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - break; - } - } - } - - void atomic_set_splitting(const bool tf) { - node_version64_body expected(get_body()); - node_version64_body desired{}; - for (;;) { - desired = expected; - desired.set_splitting(tf); - if (body_.compare_exchange_weak(expected, desired, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - break; - } - } - } - - /** - * @details display function for analysis and debug. - */ - void display() const { get_body().display(); } - - /** - * @details This function locks atomically. - * @return void - */ - void lock() { - node_version64_body expected{}; - node_version64_body desired{}; - for (;;) { - for (size_t i = 1;; ++i) { - expected = get_body(); - if (expected.get_locked()) { - if (i >= 10) { break; } - _mm_pause(); - continue; - } - desired = expected; - desired.set_locked(true); - if (body_.compare_exchange_weak(expected, desired, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - return; - } - } - std::this_thread::sleep_for(std::chrono::microseconds(1)); - } - } - - [[nodiscard]] node_version64_body get_body() const { - return body_.load(std::memory_order_acquire); - } - - [[nodiscard]] bool get_border() const { return get_body().get_border(); } - - [[nodiscard]] bool get_deleted() const { return get_body().get_deleted(); } - - [[nodiscard]] bool get_locked() const { return get_body().get_locked(); } - - [[nodiscard]] bool get_root() const { return get_body().get_root(); } - - [[nodiscard]] node_version64_body get_stable_version() const { - for (;;) { - node_version64_body sv = get_body(); - /** - * In the original paper, lock is not checked. - * However, if the lock is acquired, the member of that node can be changed. - * Even if the locked version is immutable, the members read at that time may be - * broken. Therefore, you have to check the lock. - */ - if (!sv.get_inserting_deleting() && !sv.get_locked() && - !sv.get_splitting()) { - return sv; - } - _mm_pause(); - } - } - - [[nodiscard]] node_version64_body::vinsert_delete_type - get_vinsert_delete() const { - return get_body().get_vinsert_delete(); - } - - [[nodiscard]] node_version64_body::vsplit_type get_vsplit() const { - return get_body().get_vsplit(); - } - - /** - * @pre This function is called by only single thread. - */ - void init() { set_body(node_version64_body()); } - - void set_body(const node_version64_body newv) { - body_.store(newv, std::memory_order_release); - } - - /** - * @details This function unlocks @a atomically. - * @pre The caller already succeeded acquiring lock. - */ - void unlock() { - node_version64_body expected(get_body()); - node_version64_body desired{}; - for (;;) { - desired = expected; - if (desired.get_inserting_deleting()) { - desired.inc_vinsert_delete(); - desired.set_inserting_deleting(false); - } - if (desired.get_splitting()) { - desired.inc_vsplit(); - desired.set_splitting(false); - } - desired.set_locked(false); - if (body_.compare_exchange_weak(expected, desired, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - break; - } - } - } - - static void unlock(std::vector& lock_list) { - for (auto&& l : lock_list) { l->unlock(); } - } - -private: - std::atomic body_; -}; - +/** + * @file version.h + * @brief version number layout + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atomic_wrapper.h" + +namespace yakushima { + +/** + * @brief Teh body of node_version64. + * @details This class is designed to be able to be wrapped by std::atomic, + * so it can't declare default constructor. Therefore, it should use init function to + * initialize before using this class object. + */ +class alignas(sizeof(std::uint64_t)) node_version64_body { +public: + using vinsert_delete_type = std::uint32_t; + using vsplit_type = std::uint32_t; + + node_version64_body() = default; + + node_version64_body(const node_version64_body&) = default; + + node_version64_body(node_version64_body&&) = default; + + node_version64_body& operator=(const node_version64_body&) = default; + + node_version64_body& operator=(node_version64_body&&) = default; + + ~node_version64_body() = default; + + bool operator==(const node_version64_body& rhs) const { + return memcmp(this, &rhs, sizeof(node_version64_body)) == 0; + } + + /** + * @details display function for analysis and debug. + */ + void display() const { + std::cout << "node_version64_body::display" << std::endl; + std::cout << "locked : " << get_locked() << std::endl; + std::cout << "inserting_deleting : " << get_inserting_deleting() + << std::endl; + std::cout << "splitting : " << get_splitting() << std::endl; + std::cout << "deleted : " << get_deleted() << std::endl; + std::cout << "root : " << get_root() << std::endl; + std::cout << "border : " << get_border() << std::endl; + std::cout << "vinsert_delete : " << get_vinsert_delete() << std::endl; + std::cout << "vsplit: " << get_vsplit() << std::endl; + } + + bool operator!=(const node_version64_body& rhs) const { + return !(*this == rhs); + } + + [[nodiscard]] bool get_border() const { return border; } + + [[nodiscard]] bool get_deleted() const { return deleted; } + + [[nodiscard]] bool get_inserting_deleting() const { + return inserting_deleting; + } + + [[nodiscard]] bool get_locked() const { return locked; } + + [[nodiscard]] bool get_root() const { return root; } + + [[nodiscard]] bool get_splitting() const { return splitting; } + + [[nodiscard]] vinsert_delete_type get_vinsert_delete() const { + return vinsert_delete; + } + + [[nodiscard]] vsplit_type get_vsplit() const { return vsplit; } + + void inc_vinsert_delete() { ++vinsert_delete; } + + void inc_vsplit() { ++vsplit; } + + void init() { + locked = false; + inserting_deleting = false; + splitting = false; + deleted = false; + root = false; + border = false; + vinsert_delete = 0; + vsplit = 0; + } + + void set_border(const bool new_border) { border = new_border; } + + void set_deleted(const bool new_deleted) { deleted = new_deleted; } + + void set_inserting_deleting(const bool new_inserting_deleting) { + inserting_deleting = new_inserting_deleting; + } + + void set_locked(const bool new_locked) { locked = new_locked; } + + void set_root(const bool new_root) { root = new_root; } + + void set_splitting(const bool new_splitting) { splitting = new_splitting; } + +private: + /** + * These details is based on original paper Fig. 3. + * Declaration order is because class size does not exceed 8 bytes. + */ + /** + * @attention tanabe : In the original paper, the interior node does not have a delete + * count field. On the other hand, the border node has this (nremoved) but no details in + * original paper. Since there is a @a deleted field in the version, you can check + * whether the node you are checking has been deleted. However, you do not know that the + * position has been moved. Maybe you took the node from the wrong position. The + * original paper cannot detect it. Therefore, add notion of delete field. + * @details It is a counter incremented after inserting_deleting/deleting. + */ + vinsert_delete_type vinsert_delete : 29; + /** + * @details It is claimed by update or insert. + */ + vinsert_delete_type locked : 1; + /** + * @details It is a dirty bit set during inserting_deleting. + */ + vinsert_delete_type inserting_deleting : 1; + /** + * @details It is a dirty bit set during splitting. + * If this flag is set, vsplit is incremented when the lock is unlocked. + * The function find_lowest_key takes the value from the node when this flag is up. + * Read. When we raise this flag, we guarantee that no problems will occur with it. + */ + vinsert_delete_type splitting : 1; + /** + * @details It is a counter incremented after splitting. + */ + vsplit_type vsplit : 29; + /** + * @details It is a delete bit set after delete. + */ + vsplit_type deleted : 1; + /** + * @details It tells whether the node is the root of some B+-tree. + */ + vsplit_type root : 1; + /** + * @details It tells whether the node is interior or border. + */ + vsplit_type border : 1; +}; + +inline std::ostream& operator<<(std::ostream& out, // NOLINT + node_version64_body body) { + out << "vinsert_delete:" << body.get_vinsert_delete() + << ", locked:" << body.get_locked() + << ", inserting_deleting:" << body.get_inserting_deleting() + << ", splitting:" << body.get_splitting() + << ", vsplit:" << body.get_vsplit() + << ", deleted:" << body.get_deleted() << ", root:" << body.get_root() + << ", border:" << body.get_border(); + return out; +} + +// check the size of a version body +static_assert(sizeof(node_version64_body) == 8); + +/** + * @brief The class which has atomic + */ +class node_version64 { +public: + /** + * @details Basically, it should use this default constructor to use init func. + * Of course, it can use this class without default constructor if it use init + * function(). + */ + node_version64() : body_{} {} + + /** + * @details This is atomic increment. + * If you use "setter(getter + 1)", that is not atomic increment. + */ + void atomic_inc_vinsert() { + node_version64_body expected(get_body()); + node_version64_body desired{}; + for (;;) { + desired = expected; + desired.inc_vinsert_delete(); + if (body_.compare_exchange_weak(expected, desired, + std::memory_order_acq_rel, + std::memory_order_acquire)) { + break; + } + } + } + + void atomic_set_border(const bool tf) { + node_version64_body expected(get_body()); + node_version64_body desired{}; + for (;;) { + desired = expected; + desired.set_border(tf); + if (body_.compare_exchange_weak(expected, desired, + std::memory_order_acq_rel, + std::memory_order_acquire)) { + break; + } + } + } + + void atomic_set_deleted(const bool tf) { + node_version64_body expected(get_body()); + node_version64_body desired{}; + for (;;) { + desired = expected; + desired.set_deleted(tf); + if (body_.compare_exchange_weak(expected, desired, + std::memory_order_acq_rel, + std::memory_order_acquire)) { + break; + } + } + } + + void atomic_set_inserting_deleting(const bool tf) { + node_version64_body expected(get_body()); + node_version64_body desired{}; + for (;;) { + desired = expected; + desired.set_inserting_deleting(tf); + if (body_.compare_exchange_weak(expected, desired, + std::memory_order_acq_rel, + std::memory_order_acquire)) { + break; + } + } + } + + void atomic_set_root(const bool tf) { + node_version64_body expected(get_body()); + node_version64_body desired{}; + for (;;) { + desired = expected; + desired.set_root(tf); + if (body_.compare_exchange_weak(expected, desired, + std::memory_order_acq_rel, + std::memory_order_acquire)) { + break; + } + } + } + + void atomic_set_splitting(const bool tf) { + node_version64_body expected(get_body()); + node_version64_body desired{}; + for (;;) { + desired = expected; + desired.set_splitting(tf); + if (body_.compare_exchange_weak(expected, desired, + std::memory_order_acq_rel, + std::memory_order_acquire)) { + break; + } + } + } + + /** + * @details display function for analysis and debug. + */ + void display() const { get_body().display(); } + + /** + * @details This function locks atomically. + * @return void + */ + void lock() { + node_version64_body expected{}; + node_version64_body desired{}; + for (;;) { + for (size_t i = 1;; ++i) { + expected = get_body(); + if (expected.get_locked()) { + if (i >= 10) { break; } + _mm_pause(); + continue; + } + desired = expected; + desired.set_locked(true); + if (body_.compare_exchange_weak(expected, desired, + std::memory_order_acq_rel, + std::memory_order_acquire)) { + return; + } + } + std::this_thread::sleep_for(std::chrono::microseconds(1)); + } + } + + [[nodiscard]] node_version64_body get_body() const { + return body_.load(std::memory_order_acquire); + } + + [[nodiscard]] bool get_border() const { return get_body().get_border(); } + + [[nodiscard]] bool get_deleted() const { return get_body().get_deleted(); } + + [[nodiscard]] bool get_locked() const { return get_body().get_locked(); } + + [[nodiscard]] bool get_root() const { return get_body().get_root(); } + + [[nodiscard]] node_version64_body get_stable_version() const { + for (;;) { + node_version64_body sv = get_body(); + /** + * In the original paper, lock is not checked. + * However, if the lock is acquired, the member of that node can be changed. + * Even if the locked version is immutable, the members read at that time may be + * broken. Therefore, you have to check the lock. + */ + if (!sv.get_inserting_deleting() && !sv.get_locked() && + !sv.get_splitting()) { + return sv; + } + _mm_pause(); + } + } + + [[nodiscard]] node_version64_body::vinsert_delete_type + get_vinsert_delete() const { + return get_body().get_vinsert_delete(); + } + + [[nodiscard]] node_version64_body::vsplit_type get_vsplit() const { + return get_body().get_vsplit(); + } + + /** + * @pre This function is called by only single thread. + */ + void init() { set_body(node_version64_body()); } + + void set_body(const node_version64_body newv) { + body_.store(newv, std::memory_order_release); + } + + /** + * @details This function unlocks @a atomically. + * @pre The caller already succeeded acquiring lock. + */ + void unlock() { + node_version64_body expected(get_body()); + node_version64_body desired{}; + for (;;) { + desired = expected; + if (desired.get_inserting_deleting()) { + desired.inc_vinsert_delete(); + desired.set_inserting_deleting(false); + } + if (desired.get_splitting()) { + desired.inc_vsplit(); + desired.set_splitting(false); + } + desired.set_locked(false); + if (body_.compare_exchange_weak(expected, desired, + std::memory_order_acq_rel, + std::memory_order_acquire)) { + break; + } + } + } + + static void unlock(std::vector& lock_list) { + for (auto&& l : lock_list) { l->unlock(); } + } + +private: + std::atomic body_; +}; + } // namespace yakushima \ No newline at end of file diff --git a/test/multi_thread/delete/multi_thread_delete_100_key_test.cpp b/test/multi_thread/delete/multi_thread_delete_100_key_test.cpp index a7276e0..024ce13 100644 --- a/test/multi_thread/delete/multi_thread_delete_100_key_test.cpp +++ b/test/multi_thread/delete/multi_thread_delete_100_key_test.cpp @@ -1,270 +1,270 @@ -/** - * @file multi_thread_delete_test.cpp - */ - -#include -#include -#include -#include -#include - -#include "kvs.h" - -#include "gtest/gtest.h" - -#include "glog/logging.h" -using namespace yakushima; - -namespace yakushima::testing { - -class multi_thread_delete_100_key_test : public ::testing::Test { -public: - static void call_once_f() { - google::InitGoogleLogging("yakushima-test-multi_thread-delete-multi_" - "thread_delete_100_key_test"); - FLAGS_stderrthreshold = 0; - } - void SetUp() override { - std::call_once(init_, call_once_f); - init(); - } - - void TearDown() override { fin(); } - -private: - static inline std::once_flag init_; // NOLINT -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(multi_thread_delete_100_key_test, ordered_100_key) { // NOLINT - /** - * Concurrent remove against 100 key. - */ - - constexpr std::size_t ary_size = 100; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - std::string k{8, 0}; - for (std::size_t i = 0; i < ary_size / 2; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv1.emplace_back(k, std::to_string(i)); - } - for (std::size_t i = ary_size / 2; i < ary_size; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv2.emplace_back(k, std::to_string(i)); - } - -#ifndef NDEBUG - for (size_t h = 0; h < 1; ++h) { -#else - for (size_t h = 0; h < 20; ++h) { -#endif - create_storage(test_storage_name); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::reverse(kv1.begin(), kv1.end()); - std::reverse(kv2.begin(), kv2.end()); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); - S::put_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - -TEST_F(multi_thread_delete_100_key_test, reverse_100_key) { // NOLINT - /** - * Concurrent remove against 100 key. - */ - - constexpr std::size_t ary_size = 100; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - std::string k{8, 0}; - for (std::size_t i = 0; i < ary_size / 2; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv1.emplace_back(k, std::to_string(i)); - } - for (std::size_t i = ary_size / 2; i < ary_size; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv2.emplace_back(k, std::to_string(i)); - } - -#ifndef NDEBUG - for (size_t h = 0; h < 1; ++h) { -#else - for (size_t h = 0; h < 20; ++h) { -#endif - create_storage(test_storage_name); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::reverse(kv1.begin(), kv1.end()); - std::reverse(kv2.begin(), kv2.end()); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); - S::put_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - -TEST_F(multi_thread_delete_100_key_test, shuffled_100_key) { // NOLINT - /** - * Concurrent remove against 100 key. - */ - constexpr std::size_t ary_size = 100; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - std::string k{8, 0}; - for (std::size_t i = 0; i < ary_size / 2; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv1.emplace_back(k, std::to_string(i)); - } - for (std::size_t i = ary_size / 2; i < ary_size; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv2.emplace_back(k, std::to_string(i)); - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 30; ++h) { -#endif - create_storage(test_storage_name); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::shuffle(kv1.begin(), kv1.end(), engine); - std::shuffle(kv2.begin(), kv2.end(), engine); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); - S::put_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - +/** + * @file multi_thread_delete_test.cpp + */ + +#include +#include +#include +#include +#include + +#include "kvs.h" + +#include "gtest/gtest.h" + +#include "glog/logging.h" +using namespace yakushima; + +namespace yakushima::testing { + +class multi_thread_delete_100_key_test : public ::testing::Test { +public: + static void call_once_f() { + google::InitGoogleLogging("yakushima-test-multi_thread-delete-multi_" + "thread_delete_100_key_test"); + FLAGS_stderrthreshold = 0; + } + void SetUp() override { + std::call_once(init_, call_once_f); + init(); + } + + void TearDown() override { fin(); } + +private: + static inline std::once_flag init_; // NOLINT +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(multi_thread_delete_100_key_test, ordered_100_key) { // NOLINT + /** + * Concurrent remove against 100 key. + */ + + constexpr std::size_t ary_size = 100; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + std::string k{8, 0}; + for (std::size_t i = 0; i < ary_size / 2; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv1.emplace_back(k, std::to_string(i)); + } + for (std::size_t i = ary_size / 2; i < ary_size; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv2.emplace_back(k, std::to_string(i)); + } + +#ifndef NDEBUG + for (size_t h = 0; h < 1; ++h) { +#else + for (size_t h = 0; h < 20; ++h) { +#endif + create_storage(test_storage_name); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::reverse(kv1.begin(), kv1.end()); + std::reverse(kv2.begin(), kv2.end()); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); + S::put_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + +TEST_F(multi_thread_delete_100_key_test, reverse_100_key) { // NOLINT + /** + * Concurrent remove against 100 key. + */ + + constexpr std::size_t ary_size = 100; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + std::string k{8, 0}; + for (std::size_t i = 0; i < ary_size / 2; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv1.emplace_back(k, std::to_string(i)); + } + for (std::size_t i = ary_size / 2; i < ary_size; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv2.emplace_back(k, std::to_string(i)); + } + +#ifndef NDEBUG + for (size_t h = 0; h < 1; ++h) { +#else + for (size_t h = 0; h < 20; ++h) { +#endif + create_storage(test_storage_name); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::reverse(kv1.begin(), kv1.end()); + std::reverse(kv2.begin(), kv2.end()); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); + S::put_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + +TEST_F(multi_thread_delete_100_key_test, shuffled_100_key) { // NOLINT + /** + * Concurrent remove against 100 key. + */ + constexpr std::size_t ary_size = 100; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + std::string k{8, 0}; + for (std::size_t i = 0; i < ary_size / 2; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv1.emplace_back(k, std::to_string(i)); + } + for (std::size_t i = ary_size / 2; i < ary_size; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv2.emplace_back(k, std::to_string(i)); + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 30; ++h) { +#endif + create_storage(test_storage_name); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::shuffle(kv1.begin(), kv1.end(), engine); + std::shuffle(kv2.begin(), kv2.end(), engine); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); + S::put_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/delete/multi_thread_delete_100k_key_test.cpp b/test/multi_thread/delete/multi_thread_delete_100k_key_test.cpp index ae65658..8e61e6c 100644 --- a/test/multi_thread/delete/multi_thread_delete_100k_key_test.cpp +++ b/test/multi_thread/delete/multi_thread_delete_100k_key_test.cpp @@ -1,198 +1,198 @@ -/** - * @file multi_thread_delete_100k_key_test.cpp - */ - -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "glog/logging.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class multi_thread_delete_100k_key_test : public ::testing::Test { -public: - static void call_once_f() { - google::InitGoogleLogging("yakushima-test-multi_thread-delete_multi_" - "thread_delete_100k_key_test"); - FLAGS_stderrthreshold = 0; - } - - void SetUp() override { - init(); - std::call_once(init_, call_once_f); - } - - void TearDown() override { fin(); } - -private: - static inline std::once_flag init_; // NOLINT -}; - -std::string st{"1"}; // NOLINT - -TEST_F(multi_thread_delete_100k_key_test, 100k_key) { // NOLINT - /** - * Concurrent put 100k key. - * Concurrent remove 100k key. - */ - constexpr std::size_t ary_size = 100000; - std::size_t th_nm{10}; - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(st); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread, - std::atomic* meet) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - std::string k{"12345678"}; - memcpy(k.data(), &i, sizeof(i)); - kv.emplace_back(k, "v"); - } - - Token token{nullptr}; - while (status::OK != enter(token)) { _mm_pause(); } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, st, k, v.data(), v.size()); - if (ret != status::OK) { - LOG(FATAL) << ret; // output log - std::abort(); - } - } - - meet->fetch_add(1); - while (meet->load(std::memory_order_acquire) != max_thread) { - _mm_pause(); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, st, k); - if (ret != status::OK) { - LOG(FATAL) << "thid: " << th_id << ", " - << ret; // output log - std::abort(); - } - } - - ASSERT_EQ(status::OK, leave(token)); - } - }; - - std::vector thv{}; - thv.reserve(th_nm); - std::atomic meet{0}; - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm, &meet); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - destroy(); - } -} - -TEST_F(multi_thread_delete_100k_key_test, 100k_key_shuffle) { // NOLINT - /** - * Concurrent put 100k key. - * Concurrent remove 100k key. - * Shuffle data. - */ - constexpr std::size_t ary_size = 100000; - std::size_t th_nm{2}; - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(st); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread, - std::atomic* meet) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - std::string k{"12345678"}; - memcpy(k.data(), &i, sizeof(i)); - kv.emplace_back(k, "v"); - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - Token token{}; - ASSERT_EQ(status::OK, enter(token)); - - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, st, k, v.data(), v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - - meet->fetch_add(1); - while (meet->load(std::memory_order_acquire) != max_thread) { - _mm_pause(); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, st, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - ASSERT_EQ(status::OK, leave(token)); - } - }; - - std::vector thv{}; - thv.reserve(th_nm); - std::atomic meet{0}; - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm, &meet); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - destroy(); - } -} - +/** + * @file multi_thread_delete_100k_key_test.cpp + */ + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "glog/logging.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class multi_thread_delete_100k_key_test : public ::testing::Test { +public: + static void call_once_f() { + google::InitGoogleLogging("yakushima-test-multi_thread-delete_multi_" + "thread_delete_100k_key_test"); + FLAGS_stderrthreshold = 0; + } + + void SetUp() override { + init(); + std::call_once(init_, call_once_f); + } + + void TearDown() override { fin(); } + +private: + static inline std::once_flag init_; // NOLINT +}; + +std::string st{"1"}; // NOLINT + +TEST_F(multi_thread_delete_100k_key_test, 100k_key) { // NOLINT + /** + * Concurrent put 100k key. + * Concurrent remove 100k key. + */ + constexpr std::size_t ary_size = 100000; + std::size_t th_nm{10}; + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(st); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread, + std::atomic* meet) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + std::string k{"12345678"}; + memcpy(k.data(), &i, sizeof(i)); + kv.emplace_back(k, "v"); + } + + Token token{nullptr}; + while (status::OK != enter(token)) { _mm_pause(); } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, st, k, v.data(), v.size()); + if (ret != status::OK) { + LOG(FATAL) << ret; // output log + std::abort(); + } + } + + meet->fetch_add(1); + while (meet->load(std::memory_order_acquire) != max_thread) { + _mm_pause(); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, st, k); + if (ret != status::OK) { + LOG(FATAL) << "thid: " << th_id << ", " + << ret; // output log + std::abort(); + } + } + + ASSERT_EQ(status::OK, leave(token)); + } + }; + + std::vector thv{}; + thv.reserve(th_nm); + std::atomic meet{0}; + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm, &meet); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + destroy(); + } +} + +TEST_F(multi_thread_delete_100k_key_test, 100k_key_shuffle) { // NOLINT + /** + * Concurrent put 100k key. + * Concurrent remove 100k key. + * Shuffle data. + */ + constexpr std::size_t ary_size = 100000; + std::size_t th_nm{2}; + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(st); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread, + std::atomic* meet) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + std::string k{"12345678"}; + memcpy(k.data(), &i, sizeof(i)); + kv.emplace_back(k, "v"); + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + Token token{}; + ASSERT_EQ(status::OK, enter(token)); + + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, st, k, v.data(), v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + + meet->fetch_add(1); + while (meet->load(std::memory_order_acquire) != max_thread) { + _mm_pause(); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, st, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + ASSERT_EQ(status::OK, leave(token)); + } + }; + + std::vector thv{}; + thv.reserve(th_nm); + std::atomic meet{0}; + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm, &meet); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/delete/multi_thread_delete_10_key_test.cpp b/test/multi_thread/delete/multi_thread_delete_10_key_test.cpp index 7d4d4df..cf1794d 100644 --- a/test/multi_thread/delete/multi_thread_delete_10_key_test.cpp +++ b/test/multi_thread/delete/multi_thread_delete_10_key_test.cpp @@ -1,267 +1,267 @@ -/** - * @file multi_thread_delete_test.cpp - */ - -#include -#include -#include -#include -#include - -#include "kvs.h" - -#include "gtest/gtest.h" - -#include "glog/logging.h" -using namespace yakushima; - -namespace yakushima::testing { - -class multi_thread_delete_10_key_test : public ::testing::Test { -public: - static void call_once_f() { - google::InitGoogleLogging("yakushima-test-multi_thread-delete-multi_" - "thread_delete_10_key_test"); - FLAGS_stderrthreshold = 0; - } - void SetUp() override { - std::call_once(init_, call_once_f); - init(); - } - - void TearDown() override { fin(); } - -private: - static inline std::once_flag init_; // NOLINT -}; - -std::string st{"1"}; // NOLINT - -TEST_F(multi_thread_delete_10_key_test, ordered_10_key) { // NOLINT - /** - * Concurrent remove against 10 key. - */ - - constexpr std::size_t ary_size = 10; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - std::string k{8, 0}; - for (std::size_t i = 0; i < ary_size / 2; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv1.emplace_back(k, "v"); - } - for (std::size_t i = ary_size / 2; i < ary_size; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv2.emplace_back(k, "v"); - } - -#ifndef NDEBUG - for (size_t h = 0; h < 1; ++h) { -#else - for (size_t h = 0; h < 20; ++h) { -#endif - create_storage(st); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::reverse(kv1.begin(), kv1.end()); - std::reverse(kv2.begin(), kv2.end()); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, st, k, v.data(), v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, st, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); - S::put_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - -TEST_F(multi_thread_delete_10_key_test, reverse_10_key) { // NOLINT - /** - * Concurrent remove against 100 key. - */ - - constexpr std::size_t ary_size = 10; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - std::string k{8, 0}; - for (std::size_t i = 0; i < ary_size / 2; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv1.emplace_back(k, "v"); - } - for (std::size_t i = ary_size / 2; i < ary_size; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv2.emplace_back(k, "v"); - } - -#ifndef NDEBUG - for (size_t h = 0; h < 1; ++h) { -#else - for (size_t h = 0; h < 20; ++h) { -#endif - create_storage(st); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::reverse(kv1.begin(), kv1.end()); - std::reverse(kv2.begin(), kv2.end()); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, st, k, v.data(), v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, st, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); - S::put_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - -TEST_F(multi_thread_delete_10_key_test, shuffled_10_key) { // NOLINT - /** - * Concurrent remove against 100 key. - */ - constexpr std::size_t ary_size = 10; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - std::string k{8, 0}; - for (std::size_t i = 0; i < ary_size / 2; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv1.emplace_back(k, "v"); - } - for (std::size_t i = ary_size / 2; i < ary_size; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv2.emplace_back(k, "v"); - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 30; ++h) { -#endif - create_storage(st); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::shuffle(kv1.begin(), kv1.end(), engine); - std::shuffle(kv2.begin(), kv2.end(), engine); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, st, k, v.data(), v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, st, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); - S::put_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - +/** + * @file multi_thread_delete_test.cpp + */ + +#include +#include +#include +#include +#include + +#include "kvs.h" + +#include "gtest/gtest.h" + +#include "glog/logging.h" +using namespace yakushima; + +namespace yakushima::testing { + +class multi_thread_delete_10_key_test : public ::testing::Test { +public: + static void call_once_f() { + google::InitGoogleLogging("yakushima-test-multi_thread-delete-multi_" + "thread_delete_10_key_test"); + FLAGS_stderrthreshold = 0; + } + void SetUp() override { + std::call_once(init_, call_once_f); + init(); + } + + void TearDown() override { fin(); } + +private: + static inline std::once_flag init_; // NOLINT +}; + +std::string st{"1"}; // NOLINT + +TEST_F(multi_thread_delete_10_key_test, ordered_10_key) { // NOLINT + /** + * Concurrent remove against 10 key. + */ + + constexpr std::size_t ary_size = 10; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + std::string k{8, 0}; + for (std::size_t i = 0; i < ary_size / 2; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv1.emplace_back(k, "v"); + } + for (std::size_t i = ary_size / 2; i < ary_size; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv2.emplace_back(k, "v"); + } + +#ifndef NDEBUG + for (size_t h = 0; h < 1; ++h) { +#else + for (size_t h = 0; h < 20; ++h) { +#endif + create_storage(st); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::reverse(kv1.begin(), kv1.end()); + std::reverse(kv2.begin(), kv2.end()); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, st, k, v.data(), v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, st, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); + S::put_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + +TEST_F(multi_thread_delete_10_key_test, reverse_10_key) { // NOLINT + /** + * Concurrent remove against 100 key. + */ + + constexpr std::size_t ary_size = 10; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + std::string k{8, 0}; + for (std::size_t i = 0; i < ary_size / 2; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv1.emplace_back(k, "v"); + } + for (std::size_t i = ary_size / 2; i < ary_size; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv2.emplace_back(k, "v"); + } + +#ifndef NDEBUG + for (size_t h = 0; h < 1; ++h) { +#else + for (size_t h = 0; h < 20; ++h) { +#endif + create_storage(st); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::reverse(kv1.begin(), kv1.end()); + std::reverse(kv2.begin(), kv2.end()); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, st, k, v.data(), v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, st, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); + S::put_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + +TEST_F(multi_thread_delete_10_key_test, shuffled_10_key) { // NOLINT + /** + * Concurrent remove against 100 key. + */ + constexpr std::size_t ary_size = 10; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + std::string k{8, 0}; + for (std::size_t i = 0; i < ary_size / 2; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv1.emplace_back(k, "v"); + } + for (std::size_t i = ary_size / 2; i < ary_size; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv2.emplace_back(k, "v"); + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 30; ++h) { +#endif + create_storage(st); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::shuffle(kv1.begin(), kv1.end(), engine); + std::shuffle(kv2.begin(), kv2.end(), engine); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, st, k, v.data(), v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, st, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); + S::put_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/delete/multi_thread_delete_1_key_test.cpp b/test/multi_thread/delete/multi_thread_delete_1_key_test.cpp index 772e313..6351c5a 100644 --- a/test/multi_thread/delete/multi_thread_delete_1_key_test.cpp +++ b/test/multi_thread/delete/multi_thread_delete_1_key_test.cpp @@ -1,82 +1,82 @@ -/** - * @file multi_thread_delete_1_key_test.cpp - */ - -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "glog/logging.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class multi_thread_delete_1_key_test : public ::testing::Test { -public: - static void call_once_f() { - google::InitGoogleLogging("yakushima-test-multi_thread-delete_multi_" - "thread_delete_1_key_test"); - FLAGS_stderrthreshold = 0; - } - - void SetUp() override { - init(); - std::call_once(init_, call_once_f); - } - - void TearDown() override { fin(); } - -private: - static inline std::once_flag init_; // NOLINT -}; - -std::string st{"1"}; // NOLINT - -TEST_F(multi_thread_delete_1_key_test, 1_key) { // NOLINT - /** - * Concurrent put 1 key. - * Concurrent remove 1 key. - */ - static constexpr std::size_t th_nm{10}; - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(st); - - struct S { - static void work() { - Token token{nullptr}; - while (status::OK != enter(token)) { _mm_pause(); } - - std::string k{"k"}; - std::string v{"v"}; - for (std::size_t i = 0; i < 100; ++i) { - status ret = put(token, st, k, v.data(), v.size()); - ASSERT_EQ(ret, status::OK); - ret = remove(token, st, k); - } - ASSERT_EQ(status::OK, leave(token)); - } - }; - - std::vector thv{}; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { thv.emplace_back(S::work); } - for (auto&& th : thv) { th.join(); } - thv.clear(); - } - - destroy(); -} - +/** + * @file multi_thread_delete_1_key_test.cpp + */ + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "glog/logging.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class multi_thread_delete_1_key_test : public ::testing::Test { +public: + static void call_once_f() { + google::InitGoogleLogging("yakushima-test-multi_thread-delete_multi_" + "thread_delete_1_key_test"); + FLAGS_stderrthreshold = 0; + } + + void SetUp() override { + init(); + std::call_once(init_, call_once_f); + } + + void TearDown() override { fin(); } + +private: + static inline std::once_flag init_; // NOLINT +}; + +std::string st{"1"}; // NOLINT + +TEST_F(multi_thread_delete_1_key_test, 1_key) { // NOLINT + /** + * Concurrent put 1 key. + * Concurrent remove 1 key. + */ + static constexpr std::size_t th_nm{10}; + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(st); + + struct S { + static void work() { + Token token{nullptr}; + while (status::OK != enter(token)) { _mm_pause(); } + + std::string k{"k"}; + std::string v{"v"}; + for (std::size_t i = 0; i < 100; ++i) { + status ret = put(token, st, k, v.data(), v.size()); + ASSERT_EQ(ret, status::OK); + ret = remove(token, st, k); + } + ASSERT_EQ(status::OK, leave(token)); + } + }; + + std::vector thv{}; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { thv.emplace_back(S::work); } + for (auto&& th : thv) { th.join(); } + thv.clear(); + } + + destroy(); +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/delete/multi_thread_delete_1m_key_test.cpp b/test/multi_thread/delete/multi_thread_delete_1m_key_test.cpp index da93b48..a400491 100644 --- a/test/multi_thread/delete/multi_thread_delete_1m_key_test.cpp +++ b/test/multi_thread/delete/multi_thread_delete_1m_key_test.cpp @@ -1,198 +1,198 @@ -/** - * @file multi_thread_delete_1m_key_test.cpp - */ - -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "glog/logging.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class multi_thread_delete_1m_key_test : public ::testing::Test { -public: - static void call_once_f() { - google::InitGoogleLogging("yakushima-test-multi_thread-delete_multi_" - "thread_delete_1m_key_test"); - FLAGS_stderrthreshold = 0; - } - - void SetUp() override { - init(); - std::call_once(init_, call_once_f); - } - - void TearDown() override { fin(); } - -private: - static inline std::once_flag init_; // NOLINT -}; - -std::string st{"1"}; // NOLINT - -#ifndef NDEBUG -TEST_F(multi_thread_delete_1m_key_test, DISABLED_1m_key) { // NOLINT -#else -TEST_F(multi_thread_delete_1m_key_test, 1m_key) { // NOLINT -#endif - /** - * Concurrent put 1m key. - * Concurrent remove 1m key. - */ - constexpr std::size_t ary_size = 1000000; - std::size_t th_nm{std::thread::hardware_concurrency()}; - - for (std::size_t h = 0; h < 1; ++h) { - create_storage(st); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread, - std::atomic* meet) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - std::string k{"12345678"}; - memcpy(k.data(), &i, sizeof(i)); - kv.emplace_back(k, "v"); - } - - Token token{nullptr}; - while (status::OK != enter(token)) { _mm_pause(); } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, st, k, v.data(), v.size()); - if (ret != status::OK) { - LOG(FATAL) << ret; // output log - std::abort(); - } - } - - meet->fetch_add(1); - while (meet->load(std::memory_order_acquire) != max_thread) { - _mm_pause(); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, st, k); - if (ret != status::OK) { - LOG(FATAL) << "thid: " << th_id << ", " - << ret; // output log - std::abort(); - } - } - - ASSERT_EQ(status::OK, leave(token)); - } - }; - - std::vector thv{}; - thv.reserve(th_nm); - std::atomic meet{0}; - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm, &meet); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - destroy(); - } -} - -#ifndef NDEBUG -TEST_F(multi_thread_delete_1m_key_test, DISABLED_1m_key_shuffle) { // NOLINT -#else -TEST_F(multi_thread_delete_1m_key_test, 1m_key_shuffle) { // NOLINT -#endif - /** - * Concurrent put 1m key. - * Concurrent remove 1m key. - * Shuffle data. - */ - constexpr std::size_t ary_size = 1000000; - std::size_t th_nm{std::thread::hardware_concurrency()}; - - for (std::size_t h = 0; h < 1; ++h) { - create_storage(st); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread, - std::atomic* meet) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - std::string k{"12345678"}; - memcpy(k.data(), &i, sizeof(i)); - kv.emplace_back(k, "v"); - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - Token token{}; - ASSERT_EQ(status::OK, enter(token)); - - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, st, k, v.data(), v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - - meet->fetch_add(1); - while (meet->load(std::memory_order_acquire) != max_thread) { - _mm_pause(); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, st, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - ASSERT_EQ(status::OK, leave(token)); - } - }; - - std::vector thv{}; - thv.reserve(th_nm); - std::atomic meet{0}; - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm, &meet); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - destroy(); - } -} - +/** + * @file multi_thread_delete_1m_key_test.cpp + */ + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "glog/logging.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class multi_thread_delete_1m_key_test : public ::testing::Test { +public: + static void call_once_f() { + google::InitGoogleLogging("yakushima-test-multi_thread-delete_multi_" + "thread_delete_1m_key_test"); + FLAGS_stderrthreshold = 0; + } + + void SetUp() override { + init(); + std::call_once(init_, call_once_f); + } + + void TearDown() override { fin(); } + +private: + static inline std::once_flag init_; // NOLINT +}; + +std::string st{"1"}; // NOLINT + +#ifndef NDEBUG +TEST_F(multi_thread_delete_1m_key_test, DISABLED_1m_key) { // NOLINT +#else +TEST_F(multi_thread_delete_1m_key_test, 1m_key) { // NOLINT +#endif + /** + * Concurrent put 1m key. + * Concurrent remove 1m key. + */ + constexpr std::size_t ary_size = 1000000; + std::size_t th_nm{std::thread::hardware_concurrency()}; + + for (std::size_t h = 0; h < 1; ++h) { + create_storage(st); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread, + std::atomic* meet) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + std::string k{"12345678"}; + memcpy(k.data(), &i, sizeof(i)); + kv.emplace_back(k, "v"); + } + + Token token{nullptr}; + while (status::OK != enter(token)) { _mm_pause(); } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, st, k, v.data(), v.size()); + if (ret != status::OK) { + LOG(FATAL) << ret; // output log + std::abort(); + } + } + + meet->fetch_add(1); + while (meet->load(std::memory_order_acquire) != max_thread) { + _mm_pause(); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, st, k); + if (ret != status::OK) { + LOG(FATAL) << "thid: " << th_id << ", " + << ret; // output log + std::abort(); + } + } + + ASSERT_EQ(status::OK, leave(token)); + } + }; + + std::vector thv{}; + thv.reserve(th_nm); + std::atomic meet{0}; + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm, &meet); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + destroy(); + } +} + +#ifndef NDEBUG +TEST_F(multi_thread_delete_1m_key_test, DISABLED_1m_key_shuffle) { // NOLINT +#else +TEST_F(multi_thread_delete_1m_key_test, 1m_key_shuffle) { // NOLINT +#endif + /** + * Concurrent put 1m key. + * Concurrent remove 1m key. + * Shuffle data. + */ + constexpr std::size_t ary_size = 1000000; + std::size_t th_nm{std::thread::hardware_concurrency()}; + + for (std::size_t h = 0; h < 1; ++h) { + create_storage(st); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread, + std::atomic* meet) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + std::string k{"12345678"}; + memcpy(k.data(), &i, sizeof(i)); + kv.emplace_back(k, "v"); + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + Token token{}; + ASSERT_EQ(status::OK, enter(token)); + + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, st, k, v.data(), v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + + meet->fetch_add(1); + while (meet->load(std::memory_order_acquire) != max_thread) { + _mm_pause(); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, st, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + ASSERT_EQ(status::OK, leave(token)); + } + }; + + std::vector thv{}; + thv.reserve(th_nm); + std::atomic meet{0}; + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm, &meet); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/delete/multi_thread_delete_200_key_test.cpp b/test/multi_thread/delete/multi_thread_delete_200_key_test.cpp index 2994ddf..2efebb2 100644 --- a/test/multi_thread/delete/multi_thread_delete_200_key_test.cpp +++ b/test/multi_thread/delete/multi_thread_delete_200_key_test.cpp @@ -1,217 +1,217 @@ -/** - * @file multi_thread_delete_200_key_test.cpp - */ - -#include -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "glog/logging.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class multi_thread_delete_200_key_test : public ::testing::Test { -public: - static void call_once_f() { - google::InitGoogleLogging("yakushima-test-multi_thread-delete_multi_" - "thread_delete_200_key_test"); - FLAGS_stderrthreshold = 0; - } - - void SetUp() override { - init(); - std::call_once(init_, call_once_f); - } - - void TearDown() override { fin(); } - -private: - static inline std::once_flag init_; // NOLINT -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(multi_thread_delete_200_key_test, 200_key) { // NOLINT - /** - * Concurrent put 200 key. - * Concurrent remove 200 key. - */ - constexpr std::size_t ary_size = 200; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread, - std::atomic* meet) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - char d_1 = 0; - char d_0 = 0; - if (i > static_cast( - std::numeric_limits::max())) { - d_1 = i / std::numeric_limits::max(); // NOLINT - d_0 = i % std::numeric_limits::max(); // NOLINT - } else { - d_0 = i; // NOLINT - } - std::string key = std::string(1, d_1) + std::string(1, d_0); - kv.emplace_back(key, std::to_string(i)); - } - - Token token{nullptr}; - while (status::OK != enter(token)) { _mm_pause(); } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - - meet->fetch_add(1); - while (meet->load(std::memory_order_acquire) != max_thread) { - _mm_pause(); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - - ASSERT_EQ(status::OK, leave(token)); - } - }; - - std::vector thv; - thv.reserve(th_nm); - std::atomic meet{0}; - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm, &meet); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - destroy(); - } -} - -TEST_F(multi_thread_delete_200_key_test, 200_key_shuffle) { // NOLINT - /** - * Concurrent put 200 key. - * Concurrent remove 200 key. - * Shuffle data. - */ - constexpr std::size_t ary_size = 200; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread, - std::atomic* meet) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::string(1, i), // NOLINT - std::to_string(i)); - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - Token token{}; - ASSERT_EQ(status::OK, enter(token)); - - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - - meet->fetch_add(1); - while (meet->load(std::memory_order_acquire) != max_thread) { - _mm_pause(); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - ASSERT_EQ(status::OK, leave(token)); - } - }; - - std::vector thv; - thv.reserve(th_nm); - std::atomic meet{0}; - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm, &meet); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - destroy(); - } -} - +/** + * @file multi_thread_delete_200_key_test.cpp + */ + +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "glog/logging.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class multi_thread_delete_200_key_test : public ::testing::Test { +public: + static void call_once_f() { + google::InitGoogleLogging("yakushima-test-multi_thread-delete_multi_" + "thread_delete_200_key_test"); + FLAGS_stderrthreshold = 0; + } + + void SetUp() override { + init(); + std::call_once(init_, call_once_f); + } + + void TearDown() override { fin(); } + +private: + static inline std::once_flag init_; // NOLINT +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(multi_thread_delete_200_key_test, 200_key) { // NOLINT + /** + * Concurrent put 200 key. + * Concurrent remove 200 key. + */ + constexpr std::size_t ary_size = 200; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread, + std::atomic* meet) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + char d_1 = 0; + char d_0 = 0; + if (i > static_cast( + std::numeric_limits::max())) { + d_1 = i / std::numeric_limits::max(); // NOLINT + d_0 = i % std::numeric_limits::max(); // NOLINT + } else { + d_0 = i; // NOLINT + } + std::string key = std::string(1, d_1) + std::string(1, d_0); + kv.emplace_back(key, std::to_string(i)); + } + + Token token{nullptr}; + while (status::OK != enter(token)) { _mm_pause(); } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + + meet->fetch_add(1); + while (meet->load(std::memory_order_acquire) != max_thread) { + _mm_pause(); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + + ASSERT_EQ(status::OK, leave(token)); + } + }; + + std::vector thv; + thv.reserve(th_nm); + std::atomic meet{0}; + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm, &meet); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + destroy(); + } +} + +TEST_F(multi_thread_delete_200_key_test, 200_key_shuffle) { // NOLINT + /** + * Concurrent put 200 key. + * Concurrent remove 200 key. + * Shuffle data. + */ + constexpr std::size_t ary_size = 200; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread, + std::atomic* meet) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::string(1, i), // NOLINT + std::to_string(i)); + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + Token token{}; + ASSERT_EQ(status::OK, enter(token)); + + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + + meet->fetch_add(1); + while (meet->load(std::memory_order_acquire) != max_thread) { + _mm_pause(); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + ASSERT_EQ(status::OK, leave(token)); + } + }; + + std::vector thv; + thv.reserve(th_nm); + std::atomic meet{0}; + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm, &meet); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/delete/multi_thread_delete_20_key_test.cpp b/test/multi_thread/delete/multi_thread_delete_20_key_test.cpp index 6bd58da..f45547c 100644 --- a/test/multi_thread/delete/multi_thread_delete_20_key_test.cpp +++ b/test/multi_thread/delete/multi_thread_delete_20_key_test.cpp @@ -1,269 +1,269 @@ -/** - * @file multi_thread_delete_test.cpp - */ - -#include -#include -#include -#include -#include - -#include "kvs.h" - -#include "gtest/gtest.h" - -#include "glog/logging.h" -using namespace yakushima; - -namespace yakushima::testing { - -class multi_thread_delete_20_key_test : public ::testing::Test { -public: - static void call_once_f() { - google::InitGoogleLogging("yakushima-test-multi_thread-delete-multi_" - "thread_delete_20_key_test"); - FLAGS_stderrthreshold = 0; - } - void SetUp() override { - std::call_once(init_, call_once_f); - init(); - } - - void TearDown() override { fin(); } - -private: - static inline std::once_flag init_; // NOLINT -}; - -std::string st{"1"}; // NOLINT - -TEST_F(multi_thread_delete_20_key_test, ordered_20_key) { // NOLINT - /** - * Concurrent remove against 10 key. - */ - - constexpr std::size_t ary_size = 20; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - std::string k{8, 0}; - for (std::size_t i = 0; i < ary_size / 2; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv1.emplace_back(k, "v"); - } - for (std::size_t i = ary_size / 2; i < ary_size; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv2.emplace_back(k, "v"); - } - -#ifndef NDEBUG - for (size_t h = 0; h < 1; ++h) { -#else - for (size_t h = 0; h < 20; ++h) { -#endif - create_storage(st); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::reverse(kv1.begin(), kv1.end()); - std::reverse(kv2.begin(), kv2.end()); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, st, k, v.data(), v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = - remove(token, st, std::string_view(k)); // NOLINT - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); - S::put_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - -TEST_F(multi_thread_delete_20_key_test, reverse_20_key) { // NOLINT - /** - * Concurrent remove against 100 key. - */ - - constexpr std::size_t ary_size = 20; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - std::string k{8, 0}; - for (std::size_t i = 0; i < ary_size / 2; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv1.emplace_back(k, "v"); - } - for (std::size_t i = ary_size / 2; i < ary_size; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv2.emplace_back(k, "v"); - } - -#ifndef NDEBUG - for (size_t h = 0; h < 1; ++h) { -#else - for (size_t h = 0; h < 20; ++h) { -#endif - create_storage(st); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::reverse(kv1.begin(), kv1.end()); - std::reverse(kv2.begin(), kv2.end()); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, st, k, v.data(), v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = - remove(token, st, std::string_view(k)); // NOLINT - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); - S::put_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - -TEST_F(multi_thread_delete_20_key_test, shuffled_20_key) { // NOLINT - /** - * Concurrent remove against 100 key. - */ - constexpr std::size_t ary_size = 20; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - std::string k{8, 0}; - for (std::size_t i = 0; i < ary_size / 2; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv1.emplace_back(k, "v"); - } - for (std::size_t i = ary_size / 2; i < ary_size; ++i) { - memcpy(k.data(), &i, sizeof(i)); - kv2.emplace_back(k, "v"); - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 30; ++h) { -#endif - create_storage(st); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::shuffle(kv1.begin(), kv1.end(), engine); - std::shuffle(kv2.begin(), kv2.end(), engine); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, st, k, v.data(), v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, st, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); - S::put_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - +/** + * @file multi_thread_delete_test.cpp + */ + +#include +#include +#include +#include +#include + +#include "kvs.h" + +#include "gtest/gtest.h" + +#include "glog/logging.h" +using namespace yakushima; + +namespace yakushima::testing { + +class multi_thread_delete_20_key_test : public ::testing::Test { +public: + static void call_once_f() { + google::InitGoogleLogging("yakushima-test-multi_thread-delete-multi_" + "thread_delete_20_key_test"); + FLAGS_stderrthreshold = 0; + } + void SetUp() override { + std::call_once(init_, call_once_f); + init(); + } + + void TearDown() override { fin(); } + +private: + static inline std::once_flag init_; // NOLINT +}; + +std::string st{"1"}; // NOLINT + +TEST_F(multi_thread_delete_20_key_test, ordered_20_key) { // NOLINT + /** + * Concurrent remove against 10 key. + */ + + constexpr std::size_t ary_size = 20; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + std::string k{8, 0}; + for (std::size_t i = 0; i < ary_size / 2; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv1.emplace_back(k, "v"); + } + for (std::size_t i = ary_size / 2; i < ary_size; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv2.emplace_back(k, "v"); + } + +#ifndef NDEBUG + for (size_t h = 0; h < 1; ++h) { +#else + for (size_t h = 0; h < 20; ++h) { +#endif + create_storage(st); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::reverse(kv1.begin(), kv1.end()); + std::reverse(kv2.begin(), kv2.end()); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, st, k, v.data(), v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = + remove(token, st, std::string_view(k)); // NOLINT + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); + S::put_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + +TEST_F(multi_thread_delete_20_key_test, reverse_20_key) { // NOLINT + /** + * Concurrent remove against 100 key. + */ + + constexpr std::size_t ary_size = 20; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + std::string k{8, 0}; + for (std::size_t i = 0; i < ary_size / 2; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv1.emplace_back(k, "v"); + } + for (std::size_t i = ary_size / 2; i < ary_size; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv2.emplace_back(k, "v"); + } + +#ifndef NDEBUG + for (size_t h = 0; h < 1; ++h) { +#else + for (size_t h = 0; h < 20; ++h) { +#endif + create_storage(st); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::reverse(kv1.begin(), kv1.end()); + std::reverse(kv2.begin(), kv2.end()); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, st, k, v.data(), v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = + remove(token, st, std::string_view(k)); // NOLINT + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); + S::put_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + +TEST_F(multi_thread_delete_20_key_test, shuffled_20_key) { // NOLINT + /** + * Concurrent remove against 100 key. + */ + constexpr std::size_t ary_size = 20; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + std::string k{8, 0}; + for (std::size_t i = 0; i < ary_size / 2; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv1.emplace_back(k, "v"); + } + for (std::size_t i = ary_size / 2; i < ary_size; ++i) { + memcpy(k.data(), &i, sizeof(i)); + kv2.emplace_back(k, "v"); + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 30; ++h) { +#endif + create_storage(st); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::shuffle(kv1.begin(), kv1.end(), engine); + std::shuffle(kv2.begin(), kv2.end(), engine); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, st, k, v.data(), v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, st, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); + S::put_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/delete/multi_thread_delete_one_border_test.cpp b/test/multi_thread/delete/multi_thread_delete_one_border_test.cpp index a6a6771..a3e0789 100644 --- a/test/multi_thread/delete/multi_thread_delete_one_border_test.cpp +++ b/test/multi_thread/delete/multi_thread_delete_one_border_test.cpp @@ -1,356 +1,356 @@ -/** - * @file multi_thread_delete_test.cpp - */ - -#include -#include -#include -#include -#include - -#include "kvs.h" - -#include "gtest/gtest.h" - -#include "glog/logging.h" -using namespace yakushima; - -namespace yakushima::testing { - -class multi_thread_delete_one_border_test : public ::testing::Test { -public: - static void call_once_f() { - google::InitGoogleLogging("yakushima-test-multi_thread-delete-multi_" - "thread_delete_one_border_test"); - FLAGS_stderrthreshold = 0; - } - void SetUp() override { - std::call_once(init_, call_once_f); - init(); - } - - void TearDown() override { fin(); } - -private: - static inline std::once_flag init_; // NOLINT -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(multi_thread_delete_one_border_test, one_border) { // NOLINT - /** - * Initial state : multi threads put same null char key slices and - * different key length to multiple border. Concurrent remove against - * initial state. - */ - constexpr std::size_t ary_size = 9; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 100; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread, - std::atomic* meet) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::string(i, '\0'), std::to_string(i)); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - EXPECT_EQ(ret, status::OK); // output log - std::abort(); - } - } - - meet->fetch_add(1); - while (meet->load(std::memory_order_acquire) != max_thread) { - _mm_pause(); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - EXPECT_EQ(ret, status::OK); // output log - std::abort(); - } - } - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - std::atomic meet{0}; - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm, &meet); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - destroy(); - } -} - -TEST_F(multi_thread_delete_one_border_test, one_border_shuffle) { // NOLINT - /** - * Initial state : multi threads put same null char key slices and - * different key length to multiple border, which is using shuffled data. - * Concurrent remove against initial state. - */ - constexpr std::size_t ary_size = 9; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 100; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread, - std::atomic* meet) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::string(i, '\0'), std::to_string(i)); - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - std::shuffle(kv.begin(), kv.end(), engine); - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - EXPECT_EQ(ret, status::OK); // output log - std::abort(); - } - } - - meet->fetch_add(1); - while (meet->load(std::memory_order_acquire) != max_thread) { - _mm_pause(); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - EXPECT_EQ(ret, status::OK); // output log - std::abort(); - } - } - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - std::atomic meet{0}; - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm, &meet); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - destroy(); - } -} - -TEST_F(multi_thread_delete_one_border_test, test3) { // NOLINT - /** - * Initial state : multi threads put same null char key slices and - * different key length to single border. Concurrent remove against - * initial state. - */ - - constexpr std::size_t ary_size = 15; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - for (std::size_t i = 0; i < 7; ++i) { - kv1.emplace_back(std::string(i, '\0'), std::to_string(i)); - } - for (std::size_t i = 7; i < ary_size; ++i) { - kv2.emplace_back(std::string(i, '\0'), std::to_string(i)); - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 200; ++h) { -#endif - create_storage(test_storage_name); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::reverse(kv1.begin(), kv1.end()); - std::reverse(kv2.begin(), kv2.end()); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv2)); - S::put_work(std::ref(token.at(1)), std::ref(kv1)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - -TEST_F(multi_thread_delete_one_border_test, test4) { // NOLINT - /** - * Initial state : multi threads put same null char key slices and - * different key length to single border, which is using shuffled data. - * Concurrent remove against initial state. - */ - - constexpr std::size_t ary_size = 15; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - for (std::size_t i = 0; i < ary_size / 2; ++i) { - kv1.emplace_back(std::string(i, '\0'), std::to_string(i)); - } - for (std::size_t i = ary_size / 2; i < ary_size; ++i) { - kv2.emplace_back(std::string(i, '\0'), std::to_string(i)); - } - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 100; ++h) { -#endif - create_storage(test_storage_name); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::shuffle(kv1.begin(), kv1.end(), engine); - std::shuffle(kv2.begin(), kv2.end(), engine); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); - S::put_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - +/** + * @file multi_thread_delete_test.cpp + */ + +#include +#include +#include +#include +#include + +#include "kvs.h" + +#include "gtest/gtest.h" + +#include "glog/logging.h" +using namespace yakushima; + +namespace yakushima::testing { + +class multi_thread_delete_one_border_test : public ::testing::Test { +public: + static void call_once_f() { + google::InitGoogleLogging("yakushima-test-multi_thread-delete-multi_" + "thread_delete_one_border_test"); + FLAGS_stderrthreshold = 0; + } + void SetUp() override { + std::call_once(init_, call_once_f); + init(); + } + + void TearDown() override { fin(); } + +private: + static inline std::once_flag init_; // NOLINT +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(multi_thread_delete_one_border_test, one_border) { // NOLINT + /** + * Initial state : multi threads put same null char key slices and + * different key length to multiple border. Concurrent remove against + * initial state. + */ + constexpr std::size_t ary_size = 9; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 100; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread, + std::atomic* meet) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::string(i, '\0'), std::to_string(i)); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + EXPECT_EQ(ret, status::OK); // output log + std::abort(); + } + } + + meet->fetch_add(1); + while (meet->load(std::memory_order_acquire) != max_thread) { + _mm_pause(); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + EXPECT_EQ(ret, status::OK); // output log + std::abort(); + } + } + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + std::atomic meet{0}; + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm, &meet); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + destroy(); + } +} + +TEST_F(multi_thread_delete_one_border_test, one_border_shuffle) { // NOLINT + /** + * Initial state : multi threads put same null char key slices and + * different key length to multiple border, which is using shuffled data. + * Concurrent remove against initial state. + */ + constexpr std::size_t ary_size = 9; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 100; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread, + std::atomic* meet) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::string(i, '\0'), std::to_string(i)); + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + std::shuffle(kv.begin(), kv.end(), engine); + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + EXPECT_EQ(ret, status::OK); // output log + std::abort(); + } + } + + meet->fetch_add(1); + while (meet->load(std::memory_order_acquire) != max_thread) { + _mm_pause(); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + EXPECT_EQ(ret, status::OK); // output log + std::abort(); + } + } + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + std::atomic meet{0}; + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm, &meet); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + destroy(); + } +} + +TEST_F(multi_thread_delete_one_border_test, test3) { // NOLINT + /** + * Initial state : multi threads put same null char key slices and + * different key length to single border. Concurrent remove against + * initial state. + */ + + constexpr std::size_t ary_size = 15; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + for (std::size_t i = 0; i < 7; ++i) { + kv1.emplace_back(std::string(i, '\0'), std::to_string(i)); + } + for (std::size_t i = 7; i < ary_size; ++i) { + kv2.emplace_back(std::string(i, '\0'), std::to_string(i)); + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 200; ++h) { +#endif + create_storage(test_storage_name); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::reverse(kv1.begin(), kv1.end()); + std::reverse(kv2.begin(), kv2.end()); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv2)); + S::put_work(std::ref(token.at(1)), std::ref(kv1)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + +TEST_F(multi_thread_delete_one_border_test, test4) { // NOLINT + /** + * Initial state : multi threads put same null char key slices and + * different key length to single border, which is using shuffled data. + * Concurrent remove against initial state. + */ + + constexpr std::size_t ary_size = 15; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + for (std::size_t i = 0; i < ary_size / 2; ++i) { + kv1.emplace_back(std::string(i, '\0'), std::to_string(i)); + } + for (std::size_t i = ary_size / 2; i < ary_size; ++i) { + kv2.emplace_back(std::string(i, '\0'), std::to_string(i)); + } + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 100; ++h) { +#endif + create_storage(test_storage_name); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::shuffle(kv1.begin(), kv1.end(), engine); + std::shuffle(kv2.begin(), kv2.end(), engine); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); + S::put_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/delete/multi_thread_delete_two_border_test.cpp b/test/multi_thread/delete/multi_thread_delete_two_border_test.cpp index 09e0cf0..88210de 100644 --- a/test/multi_thread/delete/multi_thread_delete_two_border_test.cpp +++ b/test/multi_thread/delete/multi_thread_delete_two_border_test.cpp @@ -1,341 +1,341 @@ -/** - * @file multi_thread_delete_test.cpp - */ - -#include -#include -#include -#include -#include - -#include "kvs.h" - -#include "gtest/gtest.h" - -#include "glog/logging.h" -using namespace yakushima; - -namespace yakushima::testing { - -class multi_thread_delete_two_border_test : public ::testing::Test { -public: - static void call_once_f() { - google::InitGoogleLogging("yakushima-test-multi_thread-delete-multi_" - "thread_delete_two_border_test"); - FLAGS_stderrthreshold = 0; - } - void SetUp() override { - std::call_once(init_, call_once_f); - init(); - } - - void TearDown() override { fin(); } - -private: - static inline std::once_flag init_; // NOLINT -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(multi_thread_delete_two_border_test, test5) { // NOLINT - /** - * Initial state : multi threads put until first split of border. - * Concurrent remove against initial state. - */ - - constexpr std::size_t ary_size = key_slice_length + 1; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - for (std::size_t i = 0; i < ary_size / 2; ++i) { - kv1.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT - } - for (std::size_t i = ary_size / 2; i < ary_size; ++i) { - kv2.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 100; ++h) { -#endif - create_storage(test_storage_name); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::reverse(kv1.begin(), kv1.end()); - std::reverse(kv2.begin(), kv2.end()); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); - S::put_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - -TEST_F(multi_thread_delete_two_border_test, test6) { // NOLINT - /** - * Initial state : multi threads put until first split of border, which is - * using shuffled data. Concurrent remove against initial state. - */ - - constexpr std::size_t ary_size = key_slice_length + 1; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - for (std::size_t i = 0; i < ary_size / 2; ++i) { - kv1.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT - } - for (std::size_t i = ary_size / 2; i < ary_size; ++i) { - kv2.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT - } - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 100; ++h) { -#endif - create_storage(test_storage_name); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::shuffle(kv1.begin(), kv1.end(), engine); - std::shuffle(kv2.begin(), kv2.end(), engine); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); - S::put_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - -TEST_F(multi_thread_delete_two_border_test, test7) { // NOLINT - /** - * Initial state : multi threads put between first split of border and - * first split of interior. Concurrent remove against initial state. - */ - - constexpr std::size_t ary_size = 100; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - for (std::size_t i = 0; i < ary_size / 2; ++i) { - kv1.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT - } - for (std::size_t i = ary_size / 2; i < ary_size; ++i) { - kv2.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT - } - -#ifndef NDEBUG - for (size_t h = 0; h < 1; ++h) { -#else - for (size_t h = 0; h < 20; ++h) { -#endif - create_storage(test_storage_name); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::reverse(kv1.begin(), kv1.end()); - std::reverse(kv2.begin(), kv2.end()); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); - S::put_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - -TEST_F(multi_thread_delete_two_border_test, test8) { // NOLINT - /** - * Initial state : multi threads put between first split of border and - * first split of interior, which is using shuffled data. Concurrent - * remove against initial state. - */ - constexpr std::size_t ary_size = 100; - std::vector> kv1{}; // NOLINT - std::vector> kv2{}; // NOLINT - for (std::size_t i = 0; i < ary_size / 2; ++i) { - kv1.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT - } - for (std::size_t i = ary_size / 2; i < ary_size; ++i) { - kv2.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 30; ++h) { -#endif - create_storage(test_storage_name); - std::array token{}; - ASSERT_EQ(enter(token.at(0)), status::OK); - ASSERT_EQ(enter(token.at(1)), status::OK); - - std::shuffle(kv1.begin(), kv1.end(), engine); - std::shuffle(kv2.begin(), kv2.end(), engine); - - struct S { - static void - put_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - - static void - remove_work(Token& token, - std::vector>& kv) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - } - }; - - std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); - S::put_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); - S::remove_work(std::ref(token.at(1)), std::ref(kv2)); - t.join(); - - ASSERT_EQ(leave(token.at(0)), status::OK); - ASSERT_EQ(leave(token.at(1)), status::OK); - destroy(); - } -} - +/** + * @file multi_thread_delete_test.cpp + */ + +#include +#include +#include +#include +#include + +#include "kvs.h" + +#include "gtest/gtest.h" + +#include "glog/logging.h" +using namespace yakushima; + +namespace yakushima::testing { + +class multi_thread_delete_two_border_test : public ::testing::Test { +public: + static void call_once_f() { + google::InitGoogleLogging("yakushima-test-multi_thread-delete-multi_" + "thread_delete_two_border_test"); + FLAGS_stderrthreshold = 0; + } + void SetUp() override { + std::call_once(init_, call_once_f); + init(); + } + + void TearDown() override { fin(); } + +private: + static inline std::once_flag init_; // NOLINT +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(multi_thread_delete_two_border_test, test5) { // NOLINT + /** + * Initial state : multi threads put until first split of border. + * Concurrent remove against initial state. + */ + + constexpr std::size_t ary_size = key_slice_length + 1; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + for (std::size_t i = 0; i < ary_size / 2; ++i) { + kv1.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT + } + for (std::size_t i = ary_size / 2; i < ary_size; ++i) { + kv2.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 100; ++h) { +#endif + create_storage(test_storage_name); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::reverse(kv1.begin(), kv1.end()); + std::reverse(kv2.begin(), kv2.end()); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); + S::put_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + +TEST_F(multi_thread_delete_two_border_test, test6) { // NOLINT + /** + * Initial state : multi threads put until first split of border, which is + * using shuffled data. Concurrent remove against initial state. + */ + + constexpr std::size_t ary_size = key_slice_length + 1; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + for (std::size_t i = 0; i < ary_size / 2; ++i) { + kv1.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT + } + for (std::size_t i = ary_size / 2; i < ary_size; ++i) { + kv2.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT + } + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 100; ++h) { +#endif + create_storage(test_storage_name); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::shuffle(kv1.begin(), kv1.end(), engine); + std::shuffle(kv2.begin(), kv2.end(), engine); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); + S::put_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + +TEST_F(multi_thread_delete_two_border_test, test7) { // NOLINT + /** + * Initial state : multi threads put between first split of border and + * first split of interior. Concurrent remove against initial state. + */ + + constexpr std::size_t ary_size = 100; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + for (std::size_t i = 0; i < ary_size / 2; ++i) { + kv1.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT + } + for (std::size_t i = ary_size / 2; i < ary_size; ++i) { + kv2.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT + } + +#ifndef NDEBUG + for (size_t h = 0; h < 1; ++h) { +#else + for (size_t h = 0; h < 20; ++h) { +#endif + create_storage(test_storage_name); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::reverse(kv1.begin(), kv1.end()); + std::reverse(kv2.begin(), kv2.end()); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); + S::put_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + +TEST_F(multi_thread_delete_two_border_test, test8) { // NOLINT + /** + * Initial state : multi threads put between first split of border and + * first split of interior, which is using shuffled data. Concurrent + * remove against initial state. + */ + constexpr std::size_t ary_size = 100; + std::vector> kv1{}; // NOLINT + std::vector> kv2{}; // NOLINT + for (std::size_t i = 0; i < ary_size / 2; ++i) { + kv1.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT + } + for (std::size_t i = ary_size / 2; i < ary_size; ++i) { + kv2.emplace_back(std::string(1, i), std::to_string(i)); // NOLINT + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 30; ++h) { +#endif + create_storage(test_storage_name); + std::array token{}; + ASSERT_EQ(enter(token.at(0)), status::OK); + ASSERT_EQ(enter(token.at(1)), status::OK); + + std::shuffle(kv1.begin(), kv1.end(), engine); + std::shuffle(kv2.begin(), kv2.end(), engine); + + struct S { + static void + put_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + + static void + remove_work(Token& token, + std::vector>& kv) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + } + }; + + std::thread t(S::put_work, std::ref(token.at(0)), std::ref(kv1)); + S::put_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + t = std::thread(S::remove_work, std::ref(token.at(0)), std::ref(kv1)); + S::remove_work(std::ref(token.at(1)), std::ref(kv2)); + t.join(); + + ASSERT_EQ(leave(token.at(0)), status::OK); + ASSERT_EQ(leave(token.at(1)), status::OK); + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/put/multi_thread_put_100k_key_test.cpp b/test/multi_thread/put/multi_thread_put_100k_key_test.cpp index 9811be0..2130b35 100644 --- a/test/multi_thread/put/multi_thread_put_100k_key_test.cpp +++ b/test/multi_thread/put/multi_thread_put_100k_key_test.cpp @@ -1,162 +1,162 @@ -/** - * @file multi_thread_put_1K_key_test.cpp - */ - -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "glog/logging.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class multi_thread_put_100k_key_test : public ::testing::Test { -public: - static void call_once_f() { - google::InitGoogleLogging("yakushima-test-multi_thread-put-multi_" - "thread_put_100k_key_test"); - FLAGS_stderrthreshold = 0; - } - - void SetUp() override { - init(); - std::call_once(init_, call_once_f); - } - - void TearDown() override { fin(); } - -private: - static inline std::once_flag init_; // NOLINT -}; - -std::string st{"1"}; // NOLINT - -#ifndef NDEBUG -TEST_F(multi_thread_put_100k_key_test, DISABLED_100k_key) { // NOLINT -#else -TEST_F(multi_thread_put_100k_key_test, 100k_key) { // NOLINT -#endif - /** - * Concurrent put 100k key. - */ - constexpr std::size_t ary_size = 100000; - std::size_t th_nm{std::thread::hardware_concurrency()}; - - for (std::size_t h = 0; h < 1; ++h) { - create_storage(st); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - std::string k{"12345678"}; - memcpy(k.data(), &i, sizeof(i)); - kv.emplace_back(k, "v"); - } - - Token token{nullptr}; - while (status::OK != enter(token)) { _mm_pause(); } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, st, k, v.data(), v.size()); - if (ret != status::OK) { - LOG(FATAL) << ret; // output log - std::abort(); - } - } - - ASSERT_EQ(status::OK, leave(token)); - } - }; - - std::vector thv{}; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - destroy(); - } -} - -#ifndef NDEBUG -TEST_F(multi_thread_put_100k_key_test, DISABLED_100k_key_shuffle) { // NOLINT -#else -TEST_F(multi_thread_put_100k_key_test, 100k_key_shuffle) { // NOLINT -#endif - /** - * Concurrent put 100k key. - * Shuffle data. - */ - constexpr std::size_t ary_size = 1000000; - std::size_t th_nm{std::thread::hardware_concurrency()}; - - for (std::size_t h = 0; h < 1; ++h) { - create_storage(st); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - std::string k{"12345678"}; - memcpy(k.data(), &i, sizeof(i)); - kv.emplace_back(k, "v"); - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - Token token{}; - ASSERT_EQ(status::OK, enter(token)); - - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, st, k, v.data(), v.size()); - if (ret != status::OK) { - EXPECT_EQ(status::OK, ret); // output log - std::abort(); - } - } - - ASSERT_EQ(status::OK, leave(token)); - } - }; - - std::vector thv{}; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - destroy(); - } -} - +/** + * @file multi_thread_put_1K_key_test.cpp + */ + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "glog/logging.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class multi_thread_put_100k_key_test : public ::testing::Test { +public: + static void call_once_f() { + google::InitGoogleLogging("yakushima-test-multi_thread-put-multi_" + "thread_put_100k_key_test"); + FLAGS_stderrthreshold = 0; + } + + void SetUp() override { + init(); + std::call_once(init_, call_once_f); + } + + void TearDown() override { fin(); } + +private: + static inline std::once_flag init_; // NOLINT +}; + +std::string st{"1"}; // NOLINT + +#ifndef NDEBUG +TEST_F(multi_thread_put_100k_key_test, DISABLED_100k_key) { // NOLINT +#else +TEST_F(multi_thread_put_100k_key_test, 100k_key) { // NOLINT +#endif + /** + * Concurrent put 100k key. + */ + constexpr std::size_t ary_size = 100000; + std::size_t th_nm{std::thread::hardware_concurrency()}; + + for (std::size_t h = 0; h < 1; ++h) { + create_storage(st); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + std::string k{"12345678"}; + memcpy(k.data(), &i, sizeof(i)); + kv.emplace_back(k, "v"); + } + + Token token{nullptr}; + while (status::OK != enter(token)) { _mm_pause(); } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, st, k, v.data(), v.size()); + if (ret != status::OK) { + LOG(FATAL) << ret; // output log + std::abort(); + } + } + + ASSERT_EQ(status::OK, leave(token)); + } + }; + + std::vector thv{}; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + destroy(); + } +} + +#ifndef NDEBUG +TEST_F(multi_thread_put_100k_key_test, DISABLED_100k_key_shuffle) { // NOLINT +#else +TEST_F(multi_thread_put_100k_key_test, 100k_key_shuffle) { // NOLINT +#endif + /** + * Concurrent put 100k key. + * Shuffle data. + */ + constexpr std::size_t ary_size = 1000000; + std::size_t th_nm{std::thread::hardware_concurrency()}; + + for (std::size_t h = 0; h < 1; ++h) { + create_storage(st); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + std::string k{"12345678"}; + memcpy(k.data(), &i, sizeof(i)); + kv.emplace_back(k, "v"); + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + Token token{}; + ASSERT_EQ(status::OK, enter(token)); + + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, st, k, v.data(), v.size()); + if (ret != status::OK) { + EXPECT_EQ(status::OK, ret); // output log + std::abort(); + } + } + + ASSERT_EQ(status::OK, leave(token)); + } + }; + + std::vector thv{}; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/put/multi_thread_put_many_interior_test.cpp b/test/multi_thread/put/multi_thread_put_many_interior_test.cpp index 9978a2f..450a5d7 100644 --- a/test/multi_thread/put/multi_thread_put_many_interior_test.cpp +++ b/test/multi_thread/put/multi_thread_put_many_interior_test.cpp @@ -1,172 +1,172 @@ -/** - * @file multi_thread_put_test.cpp - */ - -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpt : public ::testing::Test { - void SetUp() override { init(); } - - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpt, many_interior) { // NOLINT - constexpr std::size_t ary_size{241}; // value fanout 15 * node fanout 16 + 1 - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::string k(1, ary_size - 1); // NOLINT - std::vector> - tuple_list{}; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, k, - scan_endpoint::INCLUSIVE, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - - for (std::size_t i = 0; i < ary_size; ++i) { - std::string v(std::to_string(i)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(i)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -TEST_F(mtpt, many_interior_shuffle) { // NOLINT - constexpr std::size_t ary_size{241}; // value fanout 15 * node fanout 16 + 1 - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::string k(1, ary_size - 1); // NOLINT - std::vector> - tuple_list{}; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, k, - scan_endpoint::INCLUSIVE, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -} // namespace yakushima::testing +/** + * @file multi_thread_put_test.cpp + */ + +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpt : public ::testing::Test { + void SetUp() override { init(); } + + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpt, many_interior) { // NOLINT + constexpr std::size_t ary_size{241}; // value fanout 15 * node fanout 16 + 1 + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::string k(1, ary_size - 1); // NOLINT + std::vector> + tuple_list{}; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, k, + scan_endpoint::INCLUSIVE, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + + for (std::size_t i = 0; i < ary_size; ++i) { + std::string v(std::to_string(i)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(i)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +TEST_F(mtpt, many_interior_shuffle) { // NOLINT + constexpr std::size_t ary_size{241}; // value fanout 15 * node fanout 16 + 1 + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::string k(1, ary_size - 1); // NOLINT + std::vector> + tuple_list{}; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, k, + scan_endpoint::INCLUSIVE, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +} // namespace yakushima::testing diff --git a/test/multi_thread/put/multi_thread_put_one_border_test.cpp b/test/multi_thread/put/multi_thread_put_one_border_test.cpp index c1cc64c..99d7b67 100644 --- a/test/multi_thread/put/multi_thread_put_one_border_test.cpp +++ b/test/multi_thread/put/multi_thread_put_one_border_test.cpp @@ -1,168 +1,168 @@ -/** - * @file multi_thread_put_test.cpp - */ - -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpt : public ::testing::Test { - void SetUp() override { init(); } - - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpt, one_border) { // NOLINT - constexpr std::size_t ary_size = 9; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 100; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, '\0'), - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, std::string_view(k), - v.data(), v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::string k(ary_size - 1, '\0'); - std::vector> - tuple_list{}; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, k, - scan_endpoint::INCLUSIVE, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -TEST_F(mtpt, one_border_shuffle) { // NOLINT - constexpr std::size_t ary_size = 9; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 100; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, '\0'), - std::to_string(i))); - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - std::shuffle(kv.begin(), kv.end(), engine); - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, std::string_view(k), - v.data(), v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::string k(ary_size - 1, '\0'); - std::vector> - tuple_list{}; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, k, - scan_endpoint::INCLUSIVE, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -} // namespace yakushima::testing +/** + * @file multi_thread_put_test.cpp + */ + +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpt : public ::testing::Test { + void SetUp() override { init(); } + + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpt, one_border) { // NOLINT + constexpr std::size_t ary_size = 9; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 100; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, '\0'), + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, std::string_view(k), + v.data(), v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::string k(ary_size - 1, '\0'); + std::vector> + tuple_list{}; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, k, + scan_endpoint::INCLUSIVE, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +TEST_F(mtpt, one_border_shuffle) { // NOLINT + constexpr std::size_t ary_size = 9; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 100; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, '\0'), + std::to_string(i))); + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + std::shuffle(kv.begin(), kv.end(), engine); + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, std::string_view(k), + v.data(), v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::string k(ary_size - 1, '\0'); + std::vector> + tuple_list{}; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, k, + scan_endpoint::INCLUSIVE, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +} // namespace yakushima::testing diff --git a/test/multi_thread/put/multi_thread_put_one_interior_many_border_test.cpp b/test/multi_thread/put/multi_thread_put_one_interior_many_border_test.cpp index ff992ef..d555b8d 100644 --- a/test/multi_thread/put/multi_thread_put_one_interior_many_border_test.cpp +++ b/test/multi_thread/put/multi_thread_put_one_interior_many_border_test.cpp @@ -1,166 +1,166 @@ -/** - * @file multi_thread_put_test.cpp - */ - -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpt : public ::testing::Test { - void SetUp() override { init(); } - - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpt, one_interior) { // NOLINT - constexpr std::size_t ary_size = 100; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (size_t h = 0; h < 1; ++h) { -#else - for (size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, std::string_view(k), - v.data(), v.size()), - status::OK); - } - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::string k(1, ary_size); - std::vector> - tuple_list{}; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, k, - scan_endpoint::INCLUSIVE, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -TEST_F(mtpt, one_interior_shuffle) { // NOLINT - constexpr std::size_t ary_size = 100; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (size_t h = 0; h < 1; ++h) { -#else - for (size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, std::string_view(k), - v.data(), v.size()), - status::OK); - } - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::string k(1, ary_size); - std::vector> - tuple_list{}; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, k, - scan_endpoint::INCLUSIVE, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -} // namespace yakushima::testing +/** + * @file multi_thread_put_test.cpp + */ + +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpt : public ::testing::Test { + void SetUp() override { init(); } + + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpt, one_interior) { // NOLINT + constexpr std::size_t ary_size = 100; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (size_t h = 0; h < 1; ++h) { +#else + for (size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, std::string_view(k), + v.data(), v.size()), + status::OK); + } + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::string k(1, ary_size); + std::vector> + tuple_list{}; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, k, + scan_endpoint::INCLUSIVE, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +TEST_F(mtpt, one_interior_shuffle) { // NOLINT + constexpr std::size_t ary_size = 100; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (size_t h = 0; h < 1; ++h) { +#else + for (size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, std::string_view(k), + v.data(), v.size()), + status::OK); + } + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::string k(1, ary_size); + std::vector> + tuple_list{}; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, k, + scan_endpoint::INCLUSIVE, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +} // namespace yakushima::testing diff --git a/test/multi_thread/put/multi_thread_put_one_interior_two_border_test.cpp b/test/multi_thread/put/multi_thread_put_one_interior_two_border_test.cpp index fc5a92e..e8804b7 100644 --- a/test/multi_thread/put/multi_thread_put_one_interior_two_border_test.cpp +++ b/test/multi_thread/put/multi_thread_put_one_interior_two_border_test.cpp @@ -1,168 +1,168 @@ -/** - * @file multi_thread_put_test.cpp - */ - -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpt : public ::testing::Test { - void SetUp() override { init(); } - - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpt, one_interior) { // NOLINT - - constexpr std::size_t ary_size = key_slice_length + 1; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 100; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back( - std::make_pair(std::string(1, 'a' + i), // NOLINT - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, std::string_view(k), - v.data(), v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list{}; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -TEST_F(mtpt, one_interior_shuffle) { // NOLINT - constexpr std::size_t ary_size = key_slice_length + 1; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 100; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back( - std::make_pair(std::string(1, 'a' + i), // NOLINT - std::to_string(i))); - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, std::string_view(k), - v.data(), v.size()), - status::OK); - } - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list{}; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -} // namespace yakushima::testing +/** + * @file multi_thread_put_test.cpp + */ + +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpt : public ::testing::Test { + void SetUp() override { init(); } + + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpt, one_interior) { // NOLINT + + constexpr std::size_t ary_size = key_slice_length + 1; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 100; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back( + std::make_pair(std::string(1, 'a' + i), // NOLINT + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, std::string_view(k), + v.data(), v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list{}; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +TEST_F(mtpt, one_interior_shuffle) { // NOLINT + constexpr std::size_t ary_size = key_slice_length + 1; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 100; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back( + std::make_pair(std::string(1, 'a' + i), // NOLINT + std::to_string(i))); + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, std::string_view(k), + v.data(), v.size()), + status::OK); + } + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list{}; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +} // namespace yakushima::testing diff --git a/test/multi_thread/put/multi_thread_put_two_border_test.cpp b/test/multi_thread/put/multi_thread_put_two_border_test.cpp index 3577bd9..0106dc0 100644 --- a/test/multi_thread/put/multi_thread_put_two_border_test.cpp +++ b/test/multi_thread/put/multi_thread_put_two_border_test.cpp @@ -1,166 +1,166 @@ -/** - * @file multi_thread_put_test.cpp - */ - -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpt : public ::testing::Test { - void SetUp() override { init(); } - - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpt, multi_layer_two_border) { // NOLINT - - constexpr std::size_t ary_size = 15; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 100; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, '\0'), - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, std::string_view(k), - v.data(), v.size()), - status::OK); - } - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list{}; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -TEST_F(mtpt, multi_layer_two_border_shuffle) { // NOLINT - - constexpr std::size_t ary_size = 15; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 30; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, '\0'), - std::to_string(i))); - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, std::string_view(k), - v.data(), v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list{}; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -} // namespace yakushima::testing +/** + * @file multi_thread_put_test.cpp + */ + +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpt : public ::testing::Test { + void SetUp() override { init(); } + + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpt, multi_layer_two_border) { // NOLINT + + constexpr std::size_t ary_size = 15; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 100; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, '\0'), + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, std::string_view(k), + v.data(), v.size()), + status::OK); + } + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list{}; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +TEST_F(mtpt, multi_layer_two_border_shuffle) { // NOLINT + + constexpr std::size_t ary_size = 15; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 30; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, '\0'), + std::to_string(i))); + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, std::string_view(k), + v.data(), v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list{}; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +} // namespace yakushima::testing diff --git a/test/multi_thread/put/readme.md b/test/multi_thread/put/readme.md index 408200e..1e39605 100644 --- a/test/multi_thread/put/readme.md +++ b/test/multi_thread/put/readme.md @@ -1,20 +1,20 @@ -# Test that put / delete operations work in parallel - -* multi_thread_put_one_border_test.cpp - * Test the operations on one border node. -* multi_thread_put_two_border_test.cpp - * Perform put / delete operations in parallel. The state of the tree can range from nothing to two border nodes. -* multi_thread_put_one_interior_two_border_test.cpp - * Perform put / delete operations in parallel. The state of the tree can range from nothing to one interior node and two border nodes. -* multi_thread_put_one_interior_many_border_test.cpp - * Perform put / delete operations in parallel. The state of the tree can range from nothing to one interior node and many border nodes. -* multi_thread_put_many_interior_test.cpp - * Perform put / delete operations in parallel. The state of the tree can range from nothing to many interior nodes and many border nodes. - -## Restriction - -Prefix the test file with multi_thread_put_ to avoid duplicate executable names. - -## Todo - -Separate files some file consumes a lot of time. Add kindly documents. +# Test that put / delete operations work in parallel + +* multi_thread_put_one_border_test.cpp + * Test the operations on one border node. +* multi_thread_put_two_border_test.cpp + * Perform put / delete operations in parallel. The state of the tree can range from nothing to two border nodes. +* multi_thread_put_one_interior_two_border_test.cpp + * Perform put / delete operations in parallel. The state of the tree can range from nothing to one interior node and two border nodes. +* multi_thread_put_one_interior_many_border_test.cpp + * Perform put / delete operations in parallel. The state of the tree can range from nothing to one interior node and many border nodes. +* multi_thread_put_many_interior_test.cpp + * Perform put / delete operations in parallel. The state of the tree can range from nothing to many interior nodes and many border nodes. + +## Restriction + +Prefix the test file with multi_thread_put_ to avoid duplicate executable names. + +## Todo + +Separate files some file consumes a lot of time. Add kindly documents. diff --git a/test/multi_thread/put_delete/multi_thread_put_delete_many_interior_test.cpp b/test/multi_thread/put_delete/multi_thread_put_delete_many_interior_test.cpp index c72e551..3e3fcb3 100644 --- a/test/multi_thread/put_delete/multi_thread_put_delete_many_interior_test.cpp +++ b/test/multi_thread/put_delete/multi_thread_put_delete_many_interior_test.cpp @@ -1,121 +1,121 @@ -/** - * @file multi_thread_put_delete_test.cpp - */ - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpdt : public ::testing::Test { - void SetUp() override { init(); } - - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpdt, many_interior) { // NOLINT - /** - * concurrent put/delete in the state between none to many split of interior. - */ - - constexpr std::size_t ary_size = - interior_node::child_length * key_slice_length * 1.4; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 15; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - if (i <= INT8_MAX) { - kv.emplace_back( - std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } else { - kv.emplace_back(std::make_pair( - std::string(i / INT8_MAX, INT8_MAX) + // NOLINT - std::string(1, i % INT8_MAX), // NOLINT - std::to_string(i))); - } - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -} // namespace yakushima::testing +/** + * @file multi_thread_put_delete_test.cpp + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpdt : public ::testing::Test { + void SetUp() override { init(); } + + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpdt, many_interior) { // NOLINT + /** + * concurrent put/delete in the state between none to many split of interior. + */ + + constexpr std::size_t ary_size = + interior_node::child_length * key_slice_length * 1.4; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 15; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + if (i <= INT8_MAX) { + kv.emplace_back( + std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } else { + kv.emplace_back(std::make_pair( + std::string(i / INT8_MAX, INT8_MAX) + // NOLINT + std::string(1, i % INT8_MAX), // NOLINT + std::to_string(i))); + } + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +} // namespace yakushima::testing diff --git a/test/multi_thread/put_delete/multi_thread_put_delete_one_border_test.cpp b/test/multi_thread/put_delete/multi_thread_put_delete_one_border_test.cpp index a3d0902..fa4f18e 100644 --- a/test/multi_thread/put_delete/multi_thread_put_delete_one_border_test.cpp +++ b/test/multi_thread/put_delete/multi_thread_put_delete_one_border_test.cpp @@ -1,254 +1,254 @@ -/** - * @file multi_thread_put_delete_test.cpp - */ - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpdt : public ::testing::Test { - void SetUp() override { init(); } - - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpdt, one_border) { // NOLINT - /** - * concurrent put/delete same null char key slices and different key length to single - * border by multi threads. - */ - constexpr std::size_t ary_size = 9; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, '\0'), - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), status::OK); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -TEST_F(mtpdt, one_border_shuffle) { // NOLINT - /** - * concurrent put/delete same null char key slices and different key - * length by shuffled order to single border by multi threads. - */ - constexpr std::size_t ary_size = 9; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, '\0'), - std::to_string(i))); - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - std::shuffle(kv.begin(), kv.end(), engine); - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), status::OK); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -TEST_F(mtpdt, one_border_random) { // NOLINT - /** - * concurrent put/delete different char key slices to single border by - * multi threads. - */ - std::size_t th_nm{15}; - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id) { - // data generation - std::string kv = std::string(1, 'a' + th_id); // NOLINT - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (std::size_t i = 0; i < 100; ++i) { - ASSERT_EQ(put(token, test_storage_name, kv, kv.data(), - kv.size()), - status::OK); - ASSERT_EQ(remove(token, test_storage_name, kv), status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - ASSERT_EQ(scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list), - status::OK); - ASSERT_EQ(tuple_list.size(), 0); - - destroy(); - } -} - +/** + * @file multi_thread_put_delete_test.cpp + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpdt : public ::testing::Test { + void SetUp() override { init(); } + + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpdt, one_border) { // NOLINT + /** + * concurrent put/delete same null char key slices and different key length to single + * border by multi threads. + */ + constexpr std::size_t ary_size = 9; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, '\0'), + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), status::OK); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +TEST_F(mtpdt, one_border_shuffle) { // NOLINT + /** + * concurrent put/delete same null char key slices and different key + * length by shuffled order to single border by multi threads. + */ + constexpr std::size_t ary_size = 9; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, '\0'), + std::to_string(i))); + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + std::shuffle(kv.begin(), kv.end(), engine); + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), status::OK); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +TEST_F(mtpdt, one_border_random) { // NOLINT + /** + * concurrent put/delete different char key slices to single border by + * multi threads. + */ + std::size_t th_nm{15}; + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id) { + // data generation + std::string kv = std::string(1, 'a' + th_id); // NOLINT + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (std::size_t i = 0; i < 100; ++i) { + ASSERT_EQ(put(token, test_storage_name, kv, kv.data(), + kv.size()), + status::OK); + ASSERT_EQ(remove(token, test_storage_name, kv), status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + ASSERT_EQ(scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list), + status::OK); + ASSERT_EQ(tuple_list.size(), 0); + + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/put_delete/multi_thread_put_delete_one_interior_many_border_test.cpp b/test/multi_thread/put_delete/multi_thread_put_delete_one_interior_many_border_test.cpp index 323fe3f..7acfa1a 100644 --- a/test/multi_thread/put_delete/multi_thread_put_delete_one_interior_many_border_test.cpp +++ b/test/multi_thread/put_delete/multi_thread_put_delete_one_interior_many_border_test.cpp @@ -1,212 +1,212 @@ -/** - * @file multi_thread_put_delete_test.cpp - */ - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpdt : public ::testing::Test { - void SetUp() override { init(); } - - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpdt, one_interior_many_border_shuffle) { // NOLINT - /** - * concurrent put/delete in the state between none to split of interior, which is using - * shuffled data. - */ - - constexpr std::size_t ary_size = - interior_node::child_length * key_slice_length / 2; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - std::shuffle(kv.begin(), kv.end(), engine); - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), status::OK); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -TEST_F(mtpdt, second_layer_one_interior_many_border_shuffle) { // NOLINT - /** - * concurrent put/delete in the state between none to split of interior, which is using - * shuffled data. - */ - constexpr std::size_t ary_size = - interior_node::child_length * key_slice_length / 2; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back( - std::make_pair(std::string(8, INT8_MAX) + - std::string(1, i), // NOLINT - std::to_string(i))); - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), status::OK); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -} // namespace yakushima::testing +/** + * @file multi_thread_put_delete_test.cpp + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpdt : public ::testing::Test { + void SetUp() override { init(); } + + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpdt, one_interior_many_border_shuffle) { // NOLINT + /** + * concurrent put/delete in the state between none to split of interior, which is using + * shuffled data. + */ + + constexpr std::size_t ary_size = + interior_node::child_length * key_slice_length / 2; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + std::shuffle(kv.begin(), kv.end(), engine); + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), status::OK); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +TEST_F(mtpdt, second_layer_one_interior_many_border_shuffle) { // NOLINT + /** + * concurrent put/delete in the state between none to split of interior, which is using + * shuffled data. + */ + constexpr std::size_t ary_size = + interior_node::child_length * key_slice_length / 2; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back( + std::make_pair(std::string(8, INT8_MAX) + + std::string(1, i), // NOLINT + std::to_string(i))); + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), status::OK); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +} // namespace yakushima::testing diff --git a/test/multi_thread/put_delete/multi_thread_put_delete_test.cpp b/test/multi_thread/put_delete/multi_thread_put_delete_test.cpp index 0566098..b71560f 100644 --- a/test/multi_thread/put_delete/multi_thread_put_delete_test.cpp +++ b/test/multi_thread/put_delete/multi_thread_put_delete_test.cpp @@ -1,167 +1,167 @@ -/** - * @file multi_thread_put_delete_test.cpp - */ - -#include -#include -#include -#include -#include - -#include "kvs.h" - -#include "gtest/gtest.h" - -#include "glog/logging.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpdt : public ::testing::Test { -public: - static void call_once_f() { - google::InitGoogleLogging("yakushima-test-multi_thread-put_delete-" - "multi_thread_put_delete_test"); - FLAGS_stderrthreshold = 0; - } - void SetUp() override { - std::call_once(init_, call_once_f); - init(); - } - - void TearDown() override { fin(); } - -private: - std::once_flag init_; -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpdt, multi_layer_many_interior_shuffle) { // NOLINT - /** - * multi-layer put-delete test. - */ - - constexpr std::size_t ary_size = - interior_node::child_length * key_slice_length * 10; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - - LOG(INFO) << "thread num is " << th_nm; - LOG(INFO) << "ary size is " << ary_size; - -#ifndef NDEBUG - for (size_t h = 0; h < 1; ++h) { -#else - for (size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void - work(std::size_t th_id, std::size_t max_thread, - [[maybe_unused]] std::atomic* join_num) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - if (i <= INT8_MAX) { - kv.emplace_back( - std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } else { - kv.emplace_back(std::make_pair( - std::string(i / INT8_MAX, INT8_MAX) + // NOLINT - std::string(1, i % INT8_MAX), // NOLINT - std::to_string(i))); - } - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string v(std::get<1>(i)); - std::string k(std::get<0>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (status::OK != ret) { - ret = put(token, test_storage_name, k, v.data(), - v.size()); - ASSERT_EQ(status::OK, ret); - std::abort(); - } - } - //(*join_num)++; - //while (join_num->load(std::memory_order_acquire) < max_thread) { - // _mm_pause(); - //} - for (auto& i : kv) { - std::string v(std::get<1>(i)); - std::string k(std::get<0>(i)); - status ret = remove(token, test_storage_name, k); - if (status::OK != ret) { - ret = remove(token, test_storage_name, k); - ASSERT_EQ(status::OK, ret); - std::abort(); - } - } - //(*join_num)++; - //while (join_num->load(std::memory_order_acquire) < - // max_thread * 2) { - // _mm_pause(); - //} - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (status::OK != ret) { - ASSERT_EQ(status::OK, ret); - std::abort(); - } - } - - leave(token); - } - }; - - std::vector thv; - std::atomic join_num{0}; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm, &join_num); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -} // namespace yakushima::testing +/** + * @file multi_thread_put_delete_test.cpp + */ + +#include +#include +#include +#include +#include + +#include "kvs.h" + +#include "gtest/gtest.h" + +#include "glog/logging.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpdt : public ::testing::Test { +public: + static void call_once_f() { + google::InitGoogleLogging("yakushima-test-multi_thread-put_delete-" + "multi_thread_put_delete_test"); + FLAGS_stderrthreshold = 0; + } + void SetUp() override { + std::call_once(init_, call_once_f); + init(); + } + + void TearDown() override { fin(); } + +private: + std::once_flag init_; +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpdt, multi_layer_many_interior_shuffle) { // NOLINT + /** + * multi-layer put-delete test. + */ + + constexpr std::size_t ary_size = + interior_node::child_length * key_slice_length * 10; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + + LOG(INFO) << "thread num is " << th_nm; + LOG(INFO) << "ary size is " << ary_size; + +#ifndef NDEBUG + for (size_t h = 0; h < 1; ++h) { +#else + for (size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void + work(std::size_t th_id, std::size_t max_thread, + [[maybe_unused]] std::atomic* join_num) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + if (i <= INT8_MAX) { + kv.emplace_back( + std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } else { + kv.emplace_back(std::make_pair( + std::string(i / INT8_MAX, INT8_MAX) + // NOLINT + std::string(1, i % INT8_MAX), // NOLINT + std::to_string(i))); + } + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string v(std::get<1>(i)); + std::string k(std::get<0>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (status::OK != ret) { + ret = put(token, test_storage_name, k, v.data(), + v.size()); + ASSERT_EQ(status::OK, ret); + std::abort(); + } + } + //(*join_num)++; + //while (join_num->load(std::memory_order_acquire) < max_thread) { + // _mm_pause(); + //} + for (auto& i : kv) { + std::string v(std::get<1>(i)); + std::string k(std::get<0>(i)); + status ret = remove(token, test_storage_name, k); + if (status::OK != ret) { + ret = remove(token, test_storage_name, k); + ASSERT_EQ(status::OK, ret); + std::abort(); + } + } + //(*join_num)++; + //while (join_num->load(std::memory_order_acquire) < + // max_thread * 2) { + // _mm_pause(); + //} + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (status::OK != ret) { + ASSERT_EQ(status::OK, ret); + std::abort(); + } + } + + leave(token); + } + }; + + std::vector thv; + std::atomic join_num{0}; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm, &join_num); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +} // namespace yakushima::testing diff --git a/test/multi_thread/put_delete/multi_thread_put_delete_two_border_test.cpp b/test/multi_thread/put_delete/multi_thread_put_delete_two_border_test.cpp index 6db12c5..074590c 100644 --- a/test/multi_thread/put_delete/multi_thread_put_delete_two_border_test.cpp +++ b/test/multi_thread/put_delete/multi_thread_put_delete_two_border_test.cpp @@ -1,579 +1,579 @@ -/** - * @file multi_thread_put_delete_test.cpp - */ - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpdt : public ::testing::Test { - static void call_once_f() { - google::InitGoogleLogging("yakushima-test-multi_thread-put_delete-" - "multi_thread_put_delete_two_border_test"); - } - - void SetUp() override { - std::call_once(init_google_, call_once_f); - init(); - } - - void TearDown() override { fin(); } - -private: - static inline std::once_flag init_google_; // NOLINT -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpdt, two_layer_two_border) { // NOLINT - /** - * multiple put same null char key whose length is different each other - * against multiple border, which is across some layer. - */ - constexpr std::size_t ary_size = 15; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, '\0'), - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), status::OK); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -TEST_F(mtpdt, two_layer_two_border_shuffle) { // NOLINT - /** - * multiple put same null char key whose length is different each other against multiple - * border, which is across some layer. use shuffle data. - */ - constexpr std::size_t ary_size = 15; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, '\0'), - std::to_string(i))); - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), status::OK); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -TEST_F(mtpdt, concurrent_put_delete_between_none_and_interior) { // NOLINT - /** - * The number of puts that can be split border only once and the deletes are repeated in - * multiple threads. - */ - constexpr std::size_t ary_size = key_slice_length + 1; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), status::OK); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} - -TEST_F(mtpdt, // NOLINT - concurrent_put_delete_between_none_and_interior_in_second_layer) { // NOLINT - /** - * The number of puts that can be split only once and the deletes are repeated in - * multiple threads. This situations in second layer. - */ - - constexpr std::size_t ary_size = key_slice_length + 1; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back( - std::make_pair(std::string(8, INT8_MAX) + - std::string(1, i), // NOLINT - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} - -TEST_F(mtpdt, // NOLINT - concurrent_put_delete_between_none_and_interior_in_first_layer) { // NOLINT - /** - * The number of puts that can be split only once and the deletes are - * repeated in multiple threads. Use shuffled data. - */ - constexpr std::size_t ary_size = key_slice_length + 1; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); }; - - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), status::OK); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} - -TEST_F( // NOLINT - mtpdt, - concurrent_put_delete_between_none_and_interior_in_second_layer_with_shuffle) { // NOLINT - /** - * The number of puts that can be split only once and the deletes are repeated in - * multiple threads. Use shuffled data. - */ - constexpr std::size_t ary_size = key_slice_length + 1; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back( - std::make_pair(std::string(8, INT8_MAX) + - std::string(1, i), // NOLINT - std::to_string(i))); - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), status::OK); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v{std::to_string(j)}; - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} - +/** + * @file multi_thread_put_delete_test.cpp + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpdt : public ::testing::Test { + static void call_once_f() { + google::InitGoogleLogging("yakushima-test-multi_thread-put_delete-" + "multi_thread_put_delete_two_border_test"); + } + + void SetUp() override { + std::call_once(init_google_, call_once_f); + init(); + } + + void TearDown() override { fin(); } + +private: + static inline std::once_flag init_google_; // NOLINT +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpdt, two_layer_two_border) { // NOLINT + /** + * multiple put same null char key whose length is different each other + * against multiple border, which is across some layer. + */ + constexpr std::size_t ary_size = 15; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, '\0'), + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), status::OK); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +TEST_F(mtpdt, two_layer_two_border_shuffle) { // NOLINT + /** + * multiple put same null char key whose length is different each other against multiple + * border, which is across some layer. use shuffle data. + */ + constexpr std::size_t ary_size = 15; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, '\0'), + std::to_string(i))); + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), status::OK); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +TEST_F(mtpdt, concurrent_put_delete_between_none_and_interior) { // NOLINT + /** + * The number of puts that can be split border only once and the deletes are repeated in + * multiple threads. + */ + constexpr std::size_t ary_size = key_slice_length + 1; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), status::OK); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} + +TEST_F(mtpdt, // NOLINT + concurrent_put_delete_between_none_and_interior_in_second_layer) { // NOLINT + /** + * The number of puts that can be split only once and the deletes are repeated in + * multiple threads. This situations in second layer. + */ + + constexpr std::size_t ary_size = key_slice_length + 1; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back( + std::make_pair(std::string(8, INT8_MAX) + + std::string(1, i), // NOLINT + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} + +TEST_F(mtpdt, // NOLINT + concurrent_put_delete_between_none_and_interior_in_first_layer) { // NOLINT + /** + * The number of puts that can be split only once and the deletes are + * repeated in multiple threads. Use shuffled data. + */ + constexpr std::size_t ary_size = key_slice_length + 1; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); }; + + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), status::OK); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} + +TEST_F( // NOLINT + mtpdt, + concurrent_put_delete_between_none_and_interior_in_second_layer_with_shuffle) { // NOLINT + /** + * The number of puts that can be split only once and the deletes are repeated in + * multiple threads. Use shuffled data. + */ + constexpr std::size_t ary_size = key_slice_length + 1; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back( + std::make_pair(std::string(8, INT8_MAX) + + std::string(1, i), // NOLINT + std::to_string(i))); + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), status::OK); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v{std::to_string(j)}; + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/put_delete/readme.md b/test/multi_thread/put_delete/readme.md index 359cb16..e41bcd8 100644 --- a/test/multi_thread/put_delete/readme.md +++ b/test/multi_thread/put_delete/readme.md @@ -1,22 +1,22 @@ -# Test that put operations work in parallel - -* multi_thread_put_one_border_test.cpp - * Test the operations on one border node. -* multi_thread_put_two_border_test.cpp - * Perform put operations in parallel. The state of the tree can range from nothing to two border nodes. -* multi_thread_put_one_interior_two_border_test.cpp - * Perform put operations in parallel. The state of the tree can range from nothing to one interior node and two border nodes. -* multi_thread_put_one_interior_many_border_test.cpp - * Perform put operations in parallel. The state of the tree can range from nothing to one interior node and many border nodes. -* multi_thread_put_many_interior_test.cpp - * Perform put operations in parallel. The state of the tree can range from nothing to many interior nodes and many border nodes. -* multi_thread_put_test.cpp - * Others. - -## Restriction - -Prefix the test file with multi_thread_put_ to avoid duplicate executable names. - -## Todo - -Separate files some file consumes a lot of time. Add kindly documents. +# Test that put operations work in parallel + +* multi_thread_put_one_border_test.cpp + * Test the operations on one border node. +* multi_thread_put_two_border_test.cpp + * Perform put operations in parallel. The state of the tree can range from nothing to two border nodes. +* multi_thread_put_one_interior_two_border_test.cpp + * Perform put operations in parallel. The state of the tree can range from nothing to one interior node and two border nodes. +* multi_thread_put_one_interior_many_border_test.cpp + * Perform put operations in parallel. The state of the tree can range from nothing to one interior node and many border nodes. +* multi_thread_put_many_interior_test.cpp + * Perform put operations in parallel. The state of the tree can range from nothing to many interior nodes and many border nodes. +* multi_thread_put_test.cpp + * Others. + +## Restriction + +Prefix the test file with multi_thread_put_ to avoid duplicate executable names. + +## Todo + +Separate files some file consumes a lot of time. Add kindly documents. diff --git a/test/multi_thread/put_delete_get/multi_thread_put_delete_get_many_interior_test.cpp b/test/multi_thread/put_delete_get/multi_thread_put_delete_get_many_interior_test.cpp index 58172a9..bdb3958 100644 --- a/test/multi_thread/put_delete_get/multi_thread_put_delete_get_many_interior_test.cpp +++ b/test/multi_thread/put_delete_get/multi_thread_put_delete_get_many_interior_test.cpp @@ -1,237 +1,237 @@ -/** - * @file multi_thread_put_delete_get_many_interior_test.cpp - */ - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -std::string test_storage_name{"1"}; // NOLINT - -class mtpdgt : public ::testing::Test { -protected: - void SetUp() override { - init(); - create_storage(test_storage_name); - } - - void TearDown() override { fin(); } -}; - -TEST_F(mtpdgt, many_interior) { // NOLINT - /** - * concurrent put/delete/get in the state between none to many split of interior. - */ - - constexpr std::size_t ary_size = - interior_node::child_length * key_slice_length * 1.4; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 20; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - if (i <= INT8_MAX) { - kv.emplace_back( - std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } else { - kv.emplace_back(std::make_pair( - std::string(i / INT8_MAX, INT8_MAX) + // NOLINT - std::string(1, i % INT8_MAX), // NOLINT - std::to_string(i))); - } - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - std::pair ret{}; - ASSERT_EQ(status::OK, get(test_storage_name, k, ret)); - ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), 0); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> tuple_list; - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -TEST_F(mtpdgt, many_interior_shuffle) { // NOLINT - /** - * concurrent put/delete/get in the state between none to many split of interior with - * shuffle. - */ - - constexpr std::size_t ary_size = - interior_node::child_length * key_slice_length * 1.4; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (size_t h = 0; h < 1; ++h) { -#else - for (size_t h = 0; h < 20; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - if (i <= INT8_MAX) { - kv.emplace_back( - std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } else { - kv.emplace_back(std::make_pair( - std::string(i / INT8_MAX, INT8_MAX) + - std::string(1, i % INT8_MAX), // NOLINT - std::to_string(i))); - } - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - std::shuffle(kv.begin(), kv.end(), engine); - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - std::pair ret{}; - ASSERT_EQ(status::OK, get(test_storage_name, k, ret)); - ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), 0); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, std::string_view(k), - v.data(), v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> tuple_list; - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -} // namespace yakushima::testing +/** + * @file multi_thread_put_delete_get_many_interior_test.cpp + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +std::string test_storage_name{"1"}; // NOLINT + +class mtpdgt : public ::testing::Test { +protected: + void SetUp() override { + init(); + create_storage(test_storage_name); + } + + void TearDown() override { fin(); } +}; + +TEST_F(mtpdgt, many_interior) { // NOLINT + /** + * concurrent put/delete/get in the state between none to many split of interior. + */ + + constexpr std::size_t ary_size = + interior_node::child_length * key_slice_length * 1.4; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 20; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + if (i <= INT8_MAX) { + kv.emplace_back( + std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } else { + kv.emplace_back(std::make_pair( + std::string(i / INT8_MAX, INT8_MAX) + // NOLINT + std::string(1, i % INT8_MAX), // NOLINT + std::to_string(i))); + } + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + std::pair ret{}; + ASSERT_EQ(status::OK, get(test_storage_name, k, ret)); + ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), 0); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> tuple_list; + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +TEST_F(mtpdgt, many_interior_shuffle) { // NOLINT + /** + * concurrent put/delete/get in the state between none to many split of interior with + * shuffle. + */ + + constexpr std::size_t ary_size = + interior_node::child_length * key_slice_length * 1.4; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (size_t h = 0; h < 1; ++h) { +#else + for (size_t h = 0; h < 20; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + if (i <= INT8_MAX) { + kv.emplace_back( + std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } else { + kv.emplace_back(std::make_pair( + std::string(i / INT8_MAX, INT8_MAX) + + std::string(1, i % INT8_MAX), // NOLINT + std::to_string(i))); + } + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + std::shuffle(kv.begin(), kv.end(), engine); + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + std::pair ret{}; + ASSERT_EQ(status::OK, get(test_storage_name, k, ret)); + ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), 0); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, std::string_view(k), + v.data(), v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> tuple_list; + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +} // namespace yakushima::testing diff --git a/test/multi_thread/put_delete_get/multi_thread_put_delete_get_one_border_test.cpp b/test/multi_thread/put_delete_get/multi_thread_put_delete_get_one_border_test.cpp index 4e7f202..2054661 100644 --- a/test/multi_thread/put_delete_get/multi_thread_put_delete_get_one_border_test.cpp +++ b/test/multi_thread/put_delete_get/multi_thread_put_delete_get_one_border_test.cpp @@ -1,220 +1,220 @@ -/** - * @file multi_thread_put_delete_get_one_border_test.cpp - */ - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpdgt : public ::testing::Test { - void SetUp() override { init(); } - - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpdgt, one_border_null_key) { // NOLINT - /** - * concurrent put/delete/get same null char key slices and different key length to - * single border by multi threads. - */ - constexpr std::size_t ary_size = 9; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 50; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, '\0'), - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (std::size_t j = 0; j < 1; ++j) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - std::pair ret{}; - ASSERT_EQ(status::OK, - get(test_storage_name, k, ret)); - ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), - 0); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), - status::OK); - } - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} - -TEST_F(mtpdgt, one_border_null_key_shuffle) { // NOLINT - /** - * test1 variant which is the test using shuffle order data. - */ - - constexpr std::size_t ary_size = 9; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 50; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, '\0'), - std::to_string(i))); - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (std::size_t j = 0; j < 1; ++j) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - std::pair ret{}; - ASSERT_EQ(status::OK, - get(test_storage_name, k, ret)); - ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), - 0); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), - status::OK); - } - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} - -} // namespace yakushima::testing +/** + * @file multi_thread_put_delete_get_one_border_test.cpp + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpdgt : public ::testing::Test { + void SetUp() override { init(); } + + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpdgt, one_border_null_key) { // NOLINT + /** + * concurrent put/delete/get same null char key slices and different key length to + * single border by multi threads. + */ + constexpr std::size_t ary_size = 9; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 50; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, '\0'), + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (std::size_t j = 0; j < 1; ++j) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + std::pair ret{}; + ASSERT_EQ(status::OK, + get(test_storage_name, k, ret)); + ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), + 0); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), + status::OK); + } + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} + +TEST_F(mtpdgt, one_border_null_key_shuffle) { // NOLINT + /** + * test1 variant which is the test using shuffle order data. + */ + + constexpr std::size_t ary_size = 9; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 50; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, '\0'), + std::to_string(i))); + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (std::size_t j = 0; j < 1; ++j) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + std::pair ret{}; + ASSERT_EQ(status::OK, + get(test_storage_name, k, ret)); + ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), + 0); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), + status::OK); + } + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} + +} // namespace yakushima::testing diff --git a/test/multi_thread/put_delete_get/multi_thread_put_delete_get_one_interior_many_border_test.cpp b/test/multi_thread/put_delete_get/multi_thread_put_delete_get_one_interior_many_border_test.cpp index a8b9e79..394c256 100644 --- a/test/multi_thread/put_delete_get/multi_thread_put_delete_get_one_interior_many_border_test.cpp +++ b/test/multi_thread/put_delete_get/multi_thread_put_delete_get_one_interior_many_border_test.cpp @@ -1,136 +1,136 @@ -/** - * @file multi_thread_put_delete_get_one_interior_many_border_test.cpp - */ - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -std::string test_storage_name{"1"}; // NOLINT - -class mtpdgt : public ::testing::Test { -protected: - void SetUp() override { init(); } - - void TearDown() override { fin(); } -}; - -TEST_F(mtpdgt, many_interior_many_border_shuffle) { // NOLINT - /** - * concurrent put/delete/get in the state between none to split of interior, which is - * using shuffled data. - */ - constexpr std::size_t ary_size = - interior_node::child_length * key_slice_length / 2; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - if (i <= INT8_MAX) { - kv.emplace_back( - std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } else { - kv.emplace_back(std::make_pair( - std::string(i / INT8_MAX, INT8_MAX) + // NOLINT - std::string(1, i), // NOLINT - std::to_string(i))); - } - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - std::shuffle(kv.begin(), kv.end(), engine); - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - std::pair ret{}; - ASSERT_EQ(status::OK, get(test_storage_name, k, ret)); - ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), 0); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), status::OK); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> tuple_list; - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -} // namespace yakushima::testing +/** + * @file multi_thread_put_delete_get_one_interior_many_border_test.cpp + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +std::string test_storage_name{"1"}; // NOLINT + +class mtpdgt : public ::testing::Test { +protected: + void SetUp() override { init(); } + + void TearDown() override { fin(); } +}; + +TEST_F(mtpdgt, many_interior_many_border_shuffle) { // NOLINT + /** + * concurrent put/delete/get in the state between none to split of interior, which is + * using shuffled data. + */ + constexpr std::size_t ary_size = + interior_node::child_length * key_slice_length / 2; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + if (i <= INT8_MAX) { + kv.emplace_back( + std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } else { + kv.emplace_back(std::make_pair( + std::string(i / INT8_MAX, INT8_MAX) + // NOLINT + std::string(1, i), // NOLINT + std::to_string(i))); + } + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + std::shuffle(kv.begin(), kv.end(), engine); + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + std::pair ret{}; + ASSERT_EQ(status::OK, get(test_storage_name, k, ret)); + ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), 0); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), status::OK); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> tuple_list; + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +} // namespace yakushima::testing diff --git a/test/multi_thread/put_delete_get/multi_thread_put_delete_get_one_interior_two_border_test.cpp b/test/multi_thread/put_delete_get/multi_thread_put_delete_get_one_interior_two_border_test.cpp index 784d3a9..857b2e2 100644 --- a/test/multi_thread/put_delete_get/multi_thread_put_delete_get_one_interior_two_border_test.cpp +++ b/test/multi_thread/put_delete_get/multi_thread_put_delete_get_one_interior_two_border_test.cpp @@ -1,232 +1,232 @@ -/** - * @file multi_thread_put_delete_get_one_interior_two_border_test.cpp - */ - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpdgt : public ::testing::Test { - void SetUp() override { init(); } - - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpdgt, one_interior_two_border) { // NOLINT - /** - * The number of puts that can be split only once and the deletes are repeated in - * multiple threads. - */ - - constexpr std::size_t ary_size = key_slice_length + 1; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 100; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - -#ifndef NDEBUG - for (std::size_t j = 0; j < 1; ++j) { -#else - for (std::size_t j = 0; j < 10; ++j) { -#endif - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - std::pair ret{}; - ASSERT_EQ(status::OK, - get(test_storage_name, k, ret)); - ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), - 0); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), - status::OK); - } - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> tuple_list; - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -TEST_F(mtpdgt, one_interior_two_border_shuffle) { // NOLINT - /** - * The number of puts that can be split only once and the deletes are repeated in - * multiple threads. Use shuffled data. - */ - - constexpr std::size_t ary_size = key_slice_length + 1; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 100; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - -#ifndef NDEBUG - for (std::size_t j = 0; j < 1; ++j) { -#else - for (std::size_t j = 0; j < 10; ++j) { -#endif - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - std::pair ret{}; - ASSERT_EQ(status::OK, - get(test_storage_name, k, ret)); - ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), - 0); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), - status::OK); - } - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> tuple_list; - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} - -} // namespace yakushima::testing +/** + * @file multi_thread_put_delete_get_one_interior_two_border_test.cpp + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpdgt : public ::testing::Test { + void SetUp() override { init(); } + + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpdgt, one_interior_two_border) { // NOLINT + /** + * The number of puts that can be split only once and the deletes are repeated in + * multiple threads. + */ + + constexpr std::size_t ary_size = key_slice_length + 1; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 100; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + +#ifndef NDEBUG + for (std::size_t j = 0; j < 1; ++j) { +#else + for (std::size_t j = 0; j < 10; ++j) { +#endif + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + std::pair ret{}; + ASSERT_EQ(status::OK, + get(test_storage_name, k, ret)); + ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), + 0); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), + status::OK); + } + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> tuple_list; + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +TEST_F(mtpdgt, one_interior_two_border_shuffle) { // NOLINT + /** + * The number of puts that can be split only once and the deletes are repeated in + * multiple threads. Use shuffled data. + */ + + constexpr std::size_t ary_size = key_slice_length + 1; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 100; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + +#ifndef NDEBUG + for (std::size_t j = 0; j < 1; ++j) { +#else + for (std::size_t j = 0; j < 10; ++j) { +#endif + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + std::pair ret{}; + ASSERT_EQ(status::OK, + get(test_storage_name, k, ret)); + ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), + 0); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), + status::OK); + } + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> tuple_list; + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} + +} // namespace yakushima::testing diff --git a/test/multi_thread/put_delete_get/multi_thread_put_delete_get_test.cpp b/test/multi_thread/put_delete_get/multi_thread_put_delete_get_test.cpp index ec7e649..251ff8c 100644 --- a/test/multi_thread/put_delete_get/multi_thread_put_delete_get_test.cpp +++ b/test/multi_thread/put_delete_get/multi_thread_put_delete_get_test.cpp @@ -1,149 +1,149 @@ -/** - * @file multi_thread_put_delete_get_test.cpp - */ - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "glog/logging.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -std::string test_storage_name{"1"}; // NOLINT - -class mtpdgt : public ::testing::Test { -public: - static void call_once_f() { - google::InitGoogleLogging("yakushima-test-multi_turead-put_delete_get-" - "multi_thread_put_delete_get_test"); - FLAGS_stderrthreshold = 0; - } - -protected: - void SetUp() override { - init(); - std::call_once(init_, call_once_f); - } - - void TearDown() override { fin(); } - -private: - std::once_flag init_; -}; - -TEST_F(mtpdgt, many_layer_many_interior_many_border) { // NOLINT - /** - * multi-layer put-delete-get test. - */ - constexpr std::size_t ary_size = - interior_node::child_length * key_slice_length * 10; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - LOG(INFO) << "ary_size is " << ary_size; - LOG(INFO) << "th_nm is " << th_nm; - LOG(INFO) << "ary_size / th_nm is " << ary_size / th_nm; - -#ifndef NDEBUG - for (size_t h = 0; h < 1; ++h) { - LOG(INFO) << "trial " << h; -#else - for (size_t h = 0; h < 20; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - if (i <= INT8_MAX) { - kv.emplace_back( - std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } else { - kv.emplace_back(std::make_pair( - std::string(i / INT8_MAX, INT8_MAX) + - std::string(1, i % INT8_MAX), // NOLINT - std::to_string(i))); - } - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - std::pair ret{}; - ASSERT_EQ(status::OK, get(test_storage_name, k, ret)); - ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), 0); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), status::OK); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> tuple_list; - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - +/** + * @file multi_thread_put_delete_get_test.cpp + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "glog/logging.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +std::string test_storage_name{"1"}; // NOLINT + +class mtpdgt : public ::testing::Test { +public: + static void call_once_f() { + google::InitGoogleLogging("yakushima-test-multi_turead-put_delete_get-" + "multi_thread_put_delete_get_test"); + FLAGS_stderrthreshold = 0; + } + +protected: + void SetUp() override { + init(); + std::call_once(init_, call_once_f); + } + + void TearDown() override { fin(); } + +private: + std::once_flag init_; +}; + +TEST_F(mtpdgt, many_layer_many_interior_many_border) { // NOLINT + /** + * multi-layer put-delete-get test. + */ + constexpr std::size_t ary_size = + interior_node::child_length * key_slice_length * 10; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + LOG(INFO) << "ary_size is " << ary_size; + LOG(INFO) << "th_nm is " << th_nm; + LOG(INFO) << "ary_size / th_nm is " << ary_size / th_nm; + +#ifndef NDEBUG + for (size_t h = 0; h < 1; ++h) { + LOG(INFO) << "trial " << h; +#else + for (size_t h = 0; h < 20; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + if (i <= INT8_MAX) { + kv.emplace_back( + std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } else { + kv.emplace_back(std::make_pair( + std::string(i / INT8_MAX, INT8_MAX) + + std::string(1, i % INT8_MAX), // NOLINT + std::to_string(i))); + } + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + std::pair ret{}; + ASSERT_EQ(status::OK, get(test_storage_name, k, ret)); + ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), 0); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), status::OK); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> tuple_list; + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/put_delete_get/multi_thread_put_delete_get_two_border_test.cpp b/test/multi_thread/put_delete_get/multi_thread_put_delete_get_two_border_test.cpp index dd4762a..5fa3a35 100644 --- a/test/multi_thread/put_delete_get/multi_thread_put_delete_get_two_border_test.cpp +++ b/test/multi_thread/put_delete_get/multi_thread_put_delete_get_two_border_test.cpp @@ -1,235 +1,235 @@ -/** - * @file multi_thread_put_delete_get_two_border_test.cpp - */ - -#include -#include -#include -#include -#include - -#include "kvs.h" - -#include "glog/logging.h" -#include "gtest/gtest.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpdgt : public ::testing::Test { -public: - static void call_once_f() { - google::InitGoogleLogging( - "yakushima-test-multi_thread-put_delete_get-multi_thread_put_" - "delete_get_two_border_test"); - google::InstallFailureSignalHandler(); - FLAGS_stderrthreshold = 0; - } - - void SetUp() override { - init(); - std::call_once(init_, call_once_f); - } - - void TearDown() override { fin(); } - -private: - static inline std::once_flag init_; // NOLINT -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpdgt, two_border_null_key) { // NOLINT - /** - * multiple put/delete/get same null char key whose length is different - * each other against multiple border, which is across some layer. - */ - constexpr std::size_t ary_size = 15; - std::size_t th_nm{15}; - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 100; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, 'a'), - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - -#ifndef NDEBUG - for (std::size_t j = 0; j < 1; ++j) { -#else - for (std::size_t j = 0; j < 10; ++j) { -#endif - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - std::pair ret{}; - ASSERT_EQ(status::OK, - get(test_storage_name, k, ret)); - ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), - 0); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), - status::OK); - } - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} - -TEST_F(mtpdgt, two_border_null_key_shuffle) { // NOLINT - /** - * test3 variant which is the test using shuffle order data. - */ - constexpr std::size_t ary_size = 15; - std::size_t th_nm{15}; - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 100; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, 'a'), - std::to_string(i))); - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - -#ifndef NDEBUG - for (std::size_t j = 0; j < 1; ++j) { -#else - for (std::size_t j = 0; j < 10; ++j) { -#endif - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - std::pair ret{}; - ASSERT_EQ(status::OK, - get(test_storage_name, k, ret)); - ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), - 0); - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(remove(token, test_storage_name, k), - status::OK); - } - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - ASSERT_EQ(put(token, test_storage_name, k, v.data(), - v.size()), - status::OK); - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} - +/** + * @file multi_thread_put_delete_get_two_border_test.cpp + */ + +#include +#include +#include +#include +#include + +#include "kvs.h" + +#include "glog/logging.h" +#include "gtest/gtest.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpdgt : public ::testing::Test { +public: + static void call_once_f() { + google::InitGoogleLogging( + "yakushima-test-multi_thread-put_delete_get-multi_thread_put_" + "delete_get_two_border_test"); + google::InstallFailureSignalHandler(); + FLAGS_stderrthreshold = 0; + } + + void SetUp() override { + init(); + std::call_once(init_, call_once_f); + } + + void TearDown() override { fin(); } + +private: + static inline std::once_flag init_; // NOLINT +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpdgt, two_border_null_key) { // NOLINT + /** + * multiple put/delete/get same null char key whose length is different + * each other against multiple border, which is across some layer. + */ + constexpr std::size_t ary_size = 15; + std::size_t th_nm{15}; + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 100; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, 'a'), + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + +#ifndef NDEBUG + for (std::size_t j = 0; j < 1; ++j) { +#else + for (std::size_t j = 0; j < 10; ++j) { +#endif + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + std::pair ret{}; + ASSERT_EQ(status::OK, + get(test_storage_name, k, ret)); + ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), + 0); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), + status::OK); + } + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} + +TEST_F(mtpdgt, two_border_null_key_shuffle) { // NOLINT + /** + * test3 variant which is the test using shuffle order data. + */ + constexpr std::size_t ary_size = 15; + std::size_t th_nm{15}; + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 100; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, 'a'), + std::to_string(i))); + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + +#ifndef NDEBUG + for (std::size_t j = 0; j < 1; ++j) { +#else + for (std::size_t j = 0; j < 10; ++j) { +#endif + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + std::pair ret{}; + ASSERT_EQ(status::OK, + get(test_storage_name, k, ret)); + ASSERT_EQ(memcmp(std::get<0>(ret), v.data(), v.size()), + 0); + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(remove(token, test_storage_name, k), + status::OK); + } + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + ASSERT_EQ(put(token, test_storage_name, k, v.data(), + v.size()), + status::OK); + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/put_delete_get/readme.md b/test/multi_thread/put_delete_get/readme.md index 9ca88f9..d7acd5f 100644 --- a/test/multi_thread/put_delete_get/readme.md +++ b/test/multi_thread/put_delete_get/readme.md @@ -1,22 +1,22 @@ -# Test that put / delete / get operations work in parallel - -* multi_thread_put_delete_get_one_border_test.cpp - * Test the operations on one border node. -* multi_thread_put_delete_get_two_border_test.cpp - * Perform put / delete / get operations in parallel. The state of the tree can range from nothing to two border nodes. -* multi_thread_put_delete_get_one_interior_two_border_test.cpp - * Perform put / delete / get operations in parallel. The state of the tree can range from nothing to one interior node and two border nodes. -* multi_thread_put_delete_get_one_interior_many_border_test.cpp - * Perform put / delete / get operations in parallel. The state of the tree can range from nothing to one interior node and many border nodes. -* multi_thread_put_delete_get_many_interior_test.cpp - * Perform put / delete / get operations in parallel. The state of the tree can range from nothing to many interior nodes and many border nodes. -* multi_thread_put_delete_get_test.cpp - * Others. - -## Restriction - -Prefix the test file with multi_thread_put_delete_get_ to avoid duplicate executable names. - -## Todo - -Separate files some file consumes a lot of time. Add kindly documents. +# Test that put / delete / get operations work in parallel + +* multi_thread_put_delete_get_one_border_test.cpp + * Test the operations on one border node. +* multi_thread_put_delete_get_two_border_test.cpp + * Perform put / delete / get operations in parallel. The state of the tree can range from nothing to two border nodes. +* multi_thread_put_delete_get_one_interior_two_border_test.cpp + * Perform put / delete / get operations in parallel. The state of the tree can range from nothing to one interior node and two border nodes. +* multi_thread_put_delete_get_one_interior_many_border_test.cpp + * Perform put / delete / get operations in parallel. The state of the tree can range from nothing to one interior node and many border nodes. +* multi_thread_put_delete_get_many_interior_test.cpp + * Perform put / delete / get operations in parallel. The state of the tree can range from nothing to many interior nodes and many border nodes. +* multi_thread_put_delete_get_test.cpp + * Others. + +## Restriction + +Prefix the test file with multi_thread_put_delete_get_ to avoid duplicate executable names. + +## Todo + +Separate files some file consumes a lot of time. Add kindly documents. diff --git a/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_many_interior_test.cpp b/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_many_interior_test.cpp index 401d7e9..ca50d68 100644 --- a/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_many_interior_test.cpp +++ b/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_many_interior_test.cpp @@ -1,314 +1,314 @@ -/** - * @file multi_thread_put_delete_scan_test.cpp - */ - -#include -#include -#include -#include -#include - -#include "kvs.h" - -#include "glog/logging.h" -#include "gtest/gtest.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class multi_thread_put_delete_scan_many_interior_test : public ::testing::Test { -public: - static void call_once_f() { - google::InitGoogleLogging( - "yakushima-test-multi_thread-put_delete_scan-multi_thread_put_" - "delete_scan_many_interior_test"); - FLAGS_stderrthreshold = 0; - } - - void SetUp() override { - init(); - std::call_once(init_, call_once_f); - } - - void TearDown() override { fin(); } - -private: - static inline std::once_flag init_; // NOLINT -}; - -std::string test_storage_name{"1"}; // NOLINT -std::mutex debug_mtx; // NOLINT - -TEST_F(multi_thread_put_delete_scan_many_interior_test, // NOLINT - many_interior) { // NOLINT - /** - * concurrent put/delete/scan in the state between none to many split of - * interior. - */ - - constexpr std::size_t ary_size = - interior_node::child_length * key_slice_length * 1.4; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - - for (std::size_t h = 0; h < 1; ++h) { - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - if (i <= INT8_MAX) { - kv.emplace_back( - std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } else { - kv.emplace_back(std::make_pair( - std::string(i / INT8_MAX, INT8_MAX) + // NOLINT - std::string(1, i % INT8_MAX), // NOLINT - std::to_string(i))); // NOLINT - } - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (std::size_t j = 0; j < 1; ++j) { - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - std::vector> - tuple_list; // NOLINT - ASSERT_EQ(status::OK, - scan(test_storage_name, "", - scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list)); - // this thread put kv entries, so at lest, it must find it. - ASSERT_EQ(tuple_list.size() >= kv.size(), true); - std::size_t check_ctr{0}; - for (auto&& elem : tuple_list) { - if (kv.size() == check_ctr) break; - for (auto&& elem2 : kv) { - if (std::get<1>(elem2).size() == - std::get<2>(elem) && - memcmp(std::get<1>(elem2).data(), - std::get<1>(elem), - std::get<2>(elem)) == 0) { - ++check_ctr; - break; - } - } - } - ASSERT_EQ(check_ctr, kv.size()); - // check success for own puts. check duplicate about key. - std::sort(tuple_list.begin(), tuple_list.end()); - std::string check_key = std::get<0>(*tuple_list.begin()); - for (auto itr = tuple_list.begin() + 1; - itr != tuple_list.end(); ++itr) { // NOLINT - if (check_key == std::get<0>(*itr)) { - std::unique_lock lk{debug_mtx}; - LOG(INFO) << "it found duplicate. thread " << th_id; - for (auto itr_2 = tuple_list.begin(); // NOLINT - itr_2 != tuple_list.end(); ++itr_2) { - LOG(INFO) << "th_id:" << th_id << ", size:" - << std::get<0>(*itr_2).size() - << ", key:" << std::get<0>(*itr_2); - } - LOG(FATAL); - } - check_key = std::get<0>(*itr); - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -TEST_F(multi_thread_put_delete_scan_many_interior_test, // NOLINT - many_interior_shuffle) { // NOLINT - /** - * concurrent put/delete/scan in the state between none to many split of - * interior with shuffle. - */ - - constexpr std::size_t ary_size = - interior_node::child_length * key_slice_length * 1.4; - constexpr std::size_t th_nm{ary_size / 2}; - - for (size_t h = 0; h < 1; ++h) { - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id) { - std::vector> kv; - kv.reserve(ary_size / th_nm); - // data generation - for (std::size_t i = (ary_size / th_nm) * th_id; - i < (th_id != th_nm - 1 ? (ary_size / th_nm) * (th_id + 1) - : ary_size); - ++i) { - if (i <= INT8_MAX) { - kv.emplace_back( - std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } else { - kv.emplace_back(std::make_pair( - std::string(i / INT8_MAX, INT8_MAX) + - std::string(1, i % INT8_MAX), // NOLINT - std::to_string(i))); - } - } - - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (std::size_t j = 0; j < 1; ++j) { - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - std::vector> - tuple_list; // NOLINT - ASSERT_EQ(status::OK, - scan(test_storage_name, "", - scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list)); - ASSERT_EQ(tuple_list.size() >= kv.size(), true); - std::size_t check_ctr{0}; - for (auto&& elem : tuple_list) { - if (kv.size() == check_ctr) { break; } - for (auto&& elem2 : kv) { - if (std::get<1>(elem2).size() == - std::get<2>(elem) && - memcmp(std::get<1>(elem2).data(), - std::get<1>(elem), - std::get<2>(elem)) == 0) { - ++check_ctr; - break; - } - } - } - ASSERT_EQ(check_ctr, kv.size()); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - +/** + * @file multi_thread_put_delete_scan_test.cpp + */ + +#include +#include +#include +#include +#include + +#include "kvs.h" + +#include "glog/logging.h" +#include "gtest/gtest.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class multi_thread_put_delete_scan_many_interior_test : public ::testing::Test { +public: + static void call_once_f() { + google::InitGoogleLogging( + "yakushima-test-multi_thread-put_delete_scan-multi_thread_put_" + "delete_scan_many_interior_test"); + FLAGS_stderrthreshold = 0; + } + + void SetUp() override { + init(); + std::call_once(init_, call_once_f); + } + + void TearDown() override { fin(); } + +private: + static inline std::once_flag init_; // NOLINT +}; + +std::string test_storage_name{"1"}; // NOLINT +std::mutex debug_mtx; // NOLINT + +TEST_F(multi_thread_put_delete_scan_many_interior_test, // NOLINT + many_interior) { // NOLINT + /** + * concurrent put/delete/scan in the state between none to many split of + * interior. + */ + + constexpr std::size_t ary_size = + interior_node::child_length * key_slice_length * 1.4; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + + for (std::size_t h = 0; h < 1; ++h) { + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + if (i <= INT8_MAX) { + kv.emplace_back( + std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } else { + kv.emplace_back(std::make_pair( + std::string(i / INT8_MAX, INT8_MAX) + // NOLINT + std::string(1, i % INT8_MAX), // NOLINT + std::to_string(i))); // NOLINT + } + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (std::size_t j = 0; j < 1; ++j) { + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + std::vector> + tuple_list; // NOLINT + ASSERT_EQ(status::OK, + scan(test_storage_name, "", + scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list)); + // this thread put kv entries, so at lest, it must find it. + ASSERT_EQ(tuple_list.size() >= kv.size(), true); + std::size_t check_ctr{0}; + for (auto&& elem : tuple_list) { + if (kv.size() == check_ctr) break; + for (auto&& elem2 : kv) { + if (std::get<1>(elem2).size() == + std::get<2>(elem) && + memcmp(std::get<1>(elem2).data(), + std::get<1>(elem), + std::get<2>(elem)) == 0) { + ++check_ctr; + break; + } + } + } + ASSERT_EQ(check_ctr, kv.size()); + // check success for own puts. check duplicate about key. + std::sort(tuple_list.begin(), tuple_list.end()); + std::string check_key = std::get<0>(*tuple_list.begin()); + for (auto itr = tuple_list.begin() + 1; + itr != tuple_list.end(); ++itr) { // NOLINT + if (check_key == std::get<0>(*itr)) { + std::unique_lock lk{debug_mtx}; + LOG(INFO) << "it found duplicate. thread " << th_id; + for (auto itr_2 = tuple_list.begin(); // NOLINT + itr_2 != tuple_list.end(); ++itr_2) { + LOG(INFO) << "th_id:" << th_id << ", size:" + << std::get<0>(*itr_2).size() + << ", key:" << std::get<0>(*itr_2); + } + LOG(FATAL); + } + check_key = std::get<0>(*itr); + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +TEST_F(multi_thread_put_delete_scan_many_interior_test, // NOLINT + many_interior_shuffle) { // NOLINT + /** + * concurrent put/delete/scan in the state between none to many split of + * interior with shuffle. + */ + + constexpr std::size_t ary_size = + interior_node::child_length * key_slice_length * 1.4; + constexpr std::size_t th_nm{ary_size / 2}; + + for (size_t h = 0; h < 1; ++h) { + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id) { + std::vector> kv; + kv.reserve(ary_size / th_nm); + // data generation + for (std::size_t i = (ary_size / th_nm) * th_id; + i < (th_id != th_nm - 1 ? (ary_size / th_nm) * (th_id + 1) + : ary_size); + ++i) { + if (i <= INT8_MAX) { + kv.emplace_back( + std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } else { + kv.emplace_back(std::make_pair( + std::string(i / INT8_MAX, INT8_MAX) + + std::string(1, i % INT8_MAX), // NOLINT + std::to_string(i))); + } + } + + std::random_device seed_gen; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (std::size_t j = 0; j < 1; ++j) { + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + std::vector> + tuple_list; // NOLINT + ASSERT_EQ(status::OK, + scan(test_storage_name, "", + scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list)); + ASSERT_EQ(tuple_list.size() >= kv.size(), true); + std::size_t check_ctr{0}; + for (auto&& elem : tuple_list) { + if (kv.size() == check_ctr) { break; } + for (auto&& elem2 : kv) { + if (std::get<1>(elem2).size() == + std::get<2>(elem) && + memcmp(std::get<1>(elem2).data(), + std::get<1>(elem), + std::get<2>(elem)) == 0) { + ++check_ctr; + break; + } + } + } + ASSERT_EQ(check_ctr, kv.size()); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_one_border_test.cpp b/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_one_border_test.cpp index 3c741f7..ce0b8a0 100644 --- a/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_one_border_test.cpp +++ b/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_one_border_test.cpp @@ -1,281 +1,281 @@ -/** - * @file multi_thread_put_delete_scan_test.cpp - */ - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpdst : public ::testing::Test { - void SetUp() override { init(); } - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpdst, one_border) { // NOLINT - /** - * concurrent put/delete/scan same null char key slices and different key length to - * single border by multi threads. - */ - constexpr std::size_t ary_size = 9; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 50; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, '\0'), - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (std::size_t j = 0; j < 1; ++j) { - for (auto&& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - std::vector> - tuple_list; // NOLINT - std::string_view left{}; - std::string_view right{}; - if (std::get<0>(kv.front()).size() > - std::get<0>(kv.back()).size()) { - left = std::get<0>(kv.back()); - right = std::get<0>(kv.front()); - } else { - left = std::get<0>(kv.front()); - right = std::get<0>(kv.back()); - } - ASSERT_EQ(status::OK, - scan(test_storage_name, left, - scan_endpoint::INCLUSIVE, right, - scan_endpoint::INCLUSIVE, tuple_list)); - ASSERT_EQ(tuple_list.size() >= kv.size(), true); - std::size_t check_ctr{0}; - for (auto&& elem : tuple_list) { - if (kv.size() == check_ctr) break; - for (auto&& elem2 : kv) { - if (memcmp(std::get<1>(elem2).data(), - std::get<1>(elem), - std::get<2>(elem)) == 0) { - ++check_ctr; - break; - } - } - } - ASSERT_EQ(check_ctr, kv.size()); - for (auto&& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - } - - for (auto&& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} - -TEST_F(mtpdst, one_border_shuffle) { // NOLINT - /** - * test1 variant which is the test using shuffle order data. - */ - constexpr std::size_t ary_size = 9; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 50; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, '\0'), - std::to_string(i))); - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (std::size_t j = 0; j < 1; ++j) { - std::shuffle(kv.begin(), kv.end(), engine); - for (auto&& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - std::vector> - tuple_list; // NOLINT - std::string_view left{}; - std::string_view right{}; - if (std::get<0>(kv.front()).size() > - std::get<0>(kv.back()).size()) { - left = std::get<0>(kv.back()); - right = std::get<0>(kv.front()); - } else { - left = std::get<0>(kv.front()); - right = std::get<0>(kv.back()); - } - ASSERT_EQ(status::OK, - scan(test_storage_name, left, - scan_endpoint::INCLUSIVE, right, - scan_endpoint::INCLUSIVE, tuple_list)); - ASSERT_EQ(tuple_list.size() >= kv.size(), true); - std::size_t check_ctr{0}; - for (auto&& elem : tuple_list) { - if (kv.size() == check_ctr) { break; } - for (auto&& elem2 : kv) { - if (memcmp(std::get<1>(elem2).data(), - std::get<1>(elem), - std::get<2>(elem)) == 0) { - ++check_ctr; - break; - } - } - } - ASSERT_EQ(check_ctr, kv.size()); - for (auto&& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - } - - for (auto&& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} - +/** + * @file multi_thread_put_delete_scan_test.cpp + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpdst : public ::testing::Test { + void SetUp() override { init(); } + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpdst, one_border) { // NOLINT + /** + * concurrent put/delete/scan same null char key slices and different key length to + * single border by multi threads. + */ + constexpr std::size_t ary_size = 9; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 50; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, '\0'), + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (std::size_t j = 0; j < 1; ++j) { + for (auto&& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + std::vector> + tuple_list; // NOLINT + std::string_view left{}; + std::string_view right{}; + if (std::get<0>(kv.front()).size() > + std::get<0>(kv.back()).size()) { + left = std::get<0>(kv.back()); + right = std::get<0>(kv.front()); + } else { + left = std::get<0>(kv.front()); + right = std::get<0>(kv.back()); + } + ASSERT_EQ(status::OK, + scan(test_storage_name, left, + scan_endpoint::INCLUSIVE, right, + scan_endpoint::INCLUSIVE, tuple_list)); + ASSERT_EQ(tuple_list.size() >= kv.size(), true); + std::size_t check_ctr{0}; + for (auto&& elem : tuple_list) { + if (kv.size() == check_ctr) break; + for (auto&& elem2 : kv) { + if (memcmp(std::get<1>(elem2).data(), + std::get<1>(elem), + std::get<2>(elem)) == 0) { + ++check_ctr; + break; + } + } + } + ASSERT_EQ(check_ctr, kv.size()); + for (auto&& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + } + + for (auto&& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} + +TEST_F(mtpdst, one_border_shuffle) { // NOLINT + /** + * test1 variant which is the test using shuffle order data. + */ + constexpr std::size_t ary_size = 9; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 50; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, '\0'), + std::to_string(i))); + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (std::size_t j = 0; j < 1; ++j) { + std::shuffle(kv.begin(), kv.end(), engine); + for (auto&& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + std::vector> + tuple_list; // NOLINT + std::string_view left{}; + std::string_view right{}; + if (std::get<0>(kv.front()).size() > + std::get<0>(kv.back()).size()) { + left = std::get<0>(kv.back()); + right = std::get<0>(kv.front()); + } else { + left = std::get<0>(kv.front()); + right = std::get<0>(kv.back()); + } + ASSERT_EQ(status::OK, + scan(test_storage_name, left, + scan_endpoint::INCLUSIVE, right, + scan_endpoint::INCLUSIVE, tuple_list)); + ASSERT_EQ(tuple_list.size() >= kv.size(), true); + std::size_t check_ctr{0}; + for (auto&& elem : tuple_list) { + if (kv.size() == check_ctr) { break; } + for (auto&& elem2 : kv) { + if (memcmp(std::get<1>(elem2).data(), + std::get<1>(elem), + std::get<2>(elem)) == 0) { + ++check_ctr; + break; + } + } + } + ASSERT_EQ(check_ctr, kv.size()); + for (auto&& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + } + + for (auto&& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_one_interior_test.cpp b/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_one_interior_test.cpp index 64cc32f..611505e 100644 --- a/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_one_interior_test.cpp +++ b/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_one_interior_test.cpp @@ -1,160 +1,160 @@ -/** - * @file multi_thread_put_delete_scan_one_interior_test.cpp - */ - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpdst : public ::testing::Test { - void SetUp() override { init(); } - - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpdst, one_interior) { // NOLINT - /** - * concurrent put/delete/scan in the state between none to split of interior, which is - * using shuffled data. - */ - constexpr std::size_t ary_size = - interior_node::child_length * key_slice_length / 2; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 20; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - if (i <= INT8_MAX) { - kv.emplace_back( - std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } else { - kv.emplace_back(std::make_pair( - std::string(i / INT8_MAX, - static_cast(INT8_MAX)) + - std::string(1, i - INT8_MAX), // NOLINT - std::to_string(i))); - } - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - for (std::size_t j = 0; j < 10; ++j) { - std::shuffle(kv.begin(), kv.end(), engine); - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - std::vector> - tuple_list; // NOLINT - ASSERT_EQ(status::OK, - scan(test_storage_name, "", - scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list)); - ASSERT_EQ(tuple_list.size() >= kv.size(), true); - std::size_t check_ctr{0}; - for (auto&& elem : tuple_list) { - if (kv.size() == check_ctr) { break; } - for (auto&& elem2 : kv) { - if (std::get<1>(elem2).size() == - std::get<2>(elem) && - memcmp(std::get<1>(elem2).data(), - std::get<1>(elem), - std::get<2>(elem)) == 0) { - ++check_ctr; - break; - } - } - } - ASSERT_EQ(check_ctr, kv.size()); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - +/** + * @file multi_thread_put_delete_scan_one_interior_test.cpp + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpdst : public ::testing::Test { + void SetUp() override { init(); } + + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpdst, one_interior) { // NOLINT + /** + * concurrent put/delete/scan in the state between none to split of interior, which is + * using shuffled data. + */ + constexpr std::size_t ary_size = + interior_node::child_length * key_slice_length / 2; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 20; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + if (i <= INT8_MAX) { + kv.emplace_back( + std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } else { + kv.emplace_back(std::make_pair( + std::string(i / INT8_MAX, + static_cast(INT8_MAX)) + + std::string(1, i - INT8_MAX), // NOLINT + std::to_string(i))); + } + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + for (std::size_t j = 0; j < 10; ++j) { + std::shuffle(kv.begin(), kv.end(), engine); + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + std::vector> + tuple_list; // NOLINT + ASSERT_EQ(status::OK, + scan(test_storage_name, "", + scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list)); + ASSERT_EQ(tuple_list.size() >= kv.size(), true); + std::size_t check_ctr{0}; + for (auto&& elem : tuple_list) { + if (kv.size() == check_ctr) { break; } + for (auto&& elem2 : kv) { + if (std::get<1>(elem2).size() == + std::get<2>(elem) && + memcmp(std::get<1>(elem2).data(), + std::get<1>(elem), + std::get<2>(elem)) == 0) { + ++check_ctr; + break; + } + } + } + ASSERT_EQ(check_ctr, kv.size()); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_one_interior_two_border_test.cpp b/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_one_interior_two_border_test.cpp index 5e21803..3be883d 100644 --- a/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_one_interior_two_border_test.cpp +++ b/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_one_interior_two_border_test.cpp @@ -1,275 +1,275 @@ -/** - * @file multi_thread_put_delete_scan_test.cpp - */ - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpdst : public ::testing::Test { - void SetUp() override { init(); } - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpdst, one_interior) { // NOLINT - /** - * The number of puts that can be split only once and the deletes are repeated in - * multiple threads. - */ - constexpr std::size_t ary_size = key_slice_length + 1; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - -#ifndef NDEBUG - for (std::size_t j = 0; j < 1; ++j) { -#else - for (std::size_t j = 0; j < 10; ++j) { -#endif - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - std::vector> - tuple_list; // NOLINT - ASSERT_EQ(status::OK, - scan(test_storage_name, "", - scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list)); - ASSERT_EQ(tuple_list.size() >= kv.size(), true); - std::size_t check_ctr{0}; - for (auto&& elem : tuple_list) { - if (kv.size() == check_ctr) { break; } - for (auto&& elem2 : kv) { - if (std::get<1>(elem2).size() == - std::get<2>(elem) && - memcmp(std::get<1>(elem2).data(), - std::get<1>(elem), - std::get<2>(elem)) == 0) { - ++check_ctr; - break; - } - } - } - ASSERT_EQ(check_ctr, kv.size()); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} - -TEST_F(mtpdst, one_interior_shuffle) { // NOLINT - /** - * The number of puts that can be split only once and the deletes are repeated in - * multiple threads. Use shuffled data. - */ - constexpr std::size_t ary_size = key_slice_length + 1; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - -#ifndef NDEBUG - for (std::size_t j = 0; j < 1; ++j) { -#else - for (std::size_t j = 0; j < 10; ++j) { -#endif - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - std::vector> - tuple_list; // NOLINT - ASSERT_EQ(status::OK, - scan(test_storage_name, "", - scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list)); - ASSERT_EQ(tuple_list.size() >= kv.size(), true); - std::size_t check_ctr{0}; - for (auto&& elem : tuple_list) { - if (kv.size() == check_ctr) break; - for (auto&& elem2 : kv) { - if (std::get<1>(elem2).size() == - std::get<2>(elem) && - memcmp(std::get<1>(elem2).data(), - std::get<1>(elem), - std::get<2>(elem)) == 0) { - ++check_ctr; - break; - } - } - } - ASSERT_EQ(check_ctr, kv.size()); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} +/** + * @file multi_thread_put_delete_scan_test.cpp + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpdst : public ::testing::Test { + void SetUp() override { init(); } + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpdst, one_interior) { // NOLINT + /** + * The number of puts that can be split only once and the deletes are repeated in + * multiple threads. + */ + constexpr std::size_t ary_size = key_slice_length + 1; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + +#ifndef NDEBUG + for (std::size_t j = 0; j < 1; ++j) { +#else + for (std::size_t j = 0; j < 10; ++j) { +#endif + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + std::vector> + tuple_list; // NOLINT + ASSERT_EQ(status::OK, + scan(test_storage_name, "", + scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list)); + ASSERT_EQ(tuple_list.size() >= kv.size(), true); + std::size_t check_ctr{0}; + for (auto&& elem : tuple_list) { + if (kv.size() == check_ctr) { break; } + for (auto&& elem2 : kv) { + if (std::get<1>(elem2).size() == + std::get<2>(elem) && + memcmp(std::get<1>(elem2).data(), + std::get<1>(elem), + std::get<2>(elem)) == 0) { + ++check_ctr; + break; + } + } + } + ASSERT_EQ(check_ctr, kv.size()); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} + +TEST_F(mtpdst, one_interior_shuffle) { // NOLINT + /** + * The number of puts that can be split only once and the deletes are repeated in + * multiple threads. Use shuffled data. + */ + constexpr std::size_t ary_size = key_slice_length + 1; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + +#ifndef NDEBUG + for (std::size_t j = 0; j < 1; ++j) { +#else + for (std::size_t j = 0; j < 10; ++j) { +#endif + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + std::vector> + tuple_list; // NOLINT + ASSERT_EQ(status::OK, + scan(test_storage_name, "", + scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list)); + ASSERT_EQ(tuple_list.size() >= kv.size(), true); + std::size_t check_ctr{0}; + for (auto&& elem : tuple_list) { + if (kv.size() == check_ctr) break; + for (auto&& elem2 : kv) { + if (std::get<1>(elem2).size() == + std::get<2>(elem) && + memcmp(std::get<1>(elem2).data(), + std::get<1>(elem), + std::get<2>(elem)) == 0) { + ++check_ctr; + break; + } + } + } + ASSERT_EQ(check_ctr, kv.size()); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_test.cpp b/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_test.cpp index 768b38c..a153886 100644 --- a/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_test.cpp +++ b/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_test.cpp @@ -1,151 +1,151 @@ -/** - * @file multi_thread_put_delete_scan_test.cpp - */ - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpdst : public ::testing::Test { - void SetUp() override { init(); } - - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpdst, many_layer) { // NOLINT - /** - * multi-layer put-delete-scan test. - */ - constexpr std::size_t ary_size = - interior_node::child_length * key_slice_length * 10; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (size_t h = 0; h < 1; ++h) { -#else - for (size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - if (i <= INT8_MAX) { - kv.emplace_back( - std::make_pair(std::string(1, i), // NOLINT - std::to_string(i))); - } else { - kv.emplace_back(std::make_pair( - std::string(i / INT8_MAX, INT8_MAX) + // NOLINT - std::string(1, i % INT8_MAX), // NOLINT - std::to_string(i))); - } - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (status::OK != ret) { - ASSERT_EQ(status::OK, ret); - std::abort(); - } - } - std::vector> - tuple_list; // NOLINT - ASSERT_EQ(status::OK, - scan(test_storage_name, "", scan_endpoint::INF, - "", scan_endpoint::INF, tuple_list)); - ASSERT_EQ(tuple_list.size() >= kv.size(), true); - std::size_t check_ctr{0}; - for (auto&& elem : tuple_list) { - if (kv.size() == check_ctr) { break; } - for (auto&& elem2 : kv) { - if (std::get<1>(elem2).size() == std::get<2>(elem) && - memcmp(std::get<1>(elem2).data(), std::get<1>(elem), - std::get<2>(elem)) == 0) { - ++check_ctr; - break; - } - } - } - ASSERT_EQ(check_ctr, kv.size()); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (status::OK != ret) { - ASSERT_EQ(status::OK, ret); - std::abort(); - } - } - - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (status::OK != ret) { - ASSERT_EQ(status::OK, ret); - std::abort(); - } - } - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - ASSERT_EQ(tuple_list.size(), ary_size); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - - destroy(); - } -} - -} // namespace yakushima::testing +/** + * @file multi_thread_put_delete_scan_test.cpp + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpdst : public ::testing::Test { + void SetUp() override { init(); } + + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpdst, many_layer) { // NOLINT + /** + * multi-layer put-delete-scan test. + */ + constexpr std::size_t ary_size = + interior_node::child_length * key_slice_length * 10; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (size_t h = 0; h < 1; ++h) { +#else + for (size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + if (i <= INT8_MAX) { + kv.emplace_back( + std::make_pair(std::string(1, i), // NOLINT + std::to_string(i))); + } else { + kv.emplace_back(std::make_pair( + std::string(i / INT8_MAX, INT8_MAX) + // NOLINT + std::string(1, i % INT8_MAX), // NOLINT + std::to_string(i))); + } + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (status::OK != ret) { + ASSERT_EQ(status::OK, ret); + std::abort(); + } + } + std::vector> + tuple_list; // NOLINT + ASSERT_EQ(status::OK, + scan(test_storage_name, "", scan_endpoint::INF, + "", scan_endpoint::INF, tuple_list)); + ASSERT_EQ(tuple_list.size() >= kv.size(), true); + std::size_t check_ctr{0}; + for (auto&& elem : tuple_list) { + if (kv.size() == check_ctr) { break; } + for (auto&& elem2 : kv) { + if (std::get<1>(elem2).size() == std::get<2>(elem) && + memcmp(std::get<1>(elem2).data(), std::get<1>(elem), + std::get<2>(elem)) == 0) { + ++check_ctr; + break; + } + } + } + ASSERT_EQ(check_ctr, kv.size()); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (status::OK != ret) { + ASSERT_EQ(status::OK, ret); + std::abort(); + } + } + + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (status::OK != ret) { + ASSERT_EQ(status::OK, ret); + std::abort(); + } + } + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + ASSERT_EQ(tuple_list.size(), ary_size); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + + destroy(); + } +} + +} // namespace yakushima::testing diff --git a/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_two_border_test.cpp b/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_two_border_test.cpp index 1d312c8..8bb6140 100644 --- a/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_two_border_test.cpp +++ b/test/multi_thread/put_delete_scan/multi_thread_put_delete_scan_two_border_test.cpp @@ -1,268 +1,268 @@ -/** - * @file multi_thread_put_delete_scan_test.cpp - */ - -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "kvs.h" - -using namespace yakushima; - -namespace yakushima::testing { - -class mtpdst : public ::testing::Test { - void SetUp() override { init(); } - void TearDown() override { fin(); } -}; - -std::string test_storage_name{"1"}; // NOLINT - -TEST_F(mtpdst, two_border) { // NOLINT - /** - * multiple put/delete/scan same null char key whose length is different each other - * against multiple border, which is across some layer. - */ - constexpr std::size_t ary_size = 15; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, '\0'), - std::to_string(i))); - } - - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - -#ifndef NDEBUG - for (std::size_t j = 0; j < 1; ++j) { -#else - for (std::size_t j = 0; j < 10; ++j) { -#endif - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - std::vector> - tuple_list; // NOLINT - ASSERT_EQ(status::OK, - scan(test_storage_name, "", - scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list)); - ASSERT_EQ(tuple_list.size() >= kv.size(), true); - std::size_t check_ctr{0}; - for (auto&& elem : tuple_list) { - if (kv.size() == check_ctr) { break; } - for (auto&& elem2 : kv) { - if (std::get<1>(elem2).size() == - std::get<2>(elem) && - memcmp(std::get<1>(elem2).data(), - std::get<1>(elem), - std::get<2>(elem)) == 0) { - ++check_ctr; - break; - } - } - } - ASSERT_EQ(check_ctr, kv.size()); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} - -TEST_F(mtpdst, two_border_shuffle) { // NOLINT - constexpr std::size_t ary_size = 15; - std::size_t th_nm{}; - if (ary_size > std::thread::hardware_concurrency()) { - th_nm = std::thread::hardware_concurrency(); - } else { - th_nm = ary_size; - } - -#ifndef NDEBUG - for (std::size_t h = 0; h < 1; ++h) { -#else - for (std::size_t h = 0; h < 10; ++h) { -#endif - create_storage(test_storage_name); - - struct S { - static void work(std::size_t th_id, std::size_t max_thread) { - std::vector> kv; - kv.reserve(ary_size / max_thread); - // data generation - for (std::size_t i = (ary_size / max_thread) * th_id; - i < (th_id != max_thread - 1 - ? (ary_size / max_thread) * (th_id + 1) - : ary_size); - ++i) { - kv.emplace_back(std::make_pair(std::string(i, '\0'), - std::to_string(i))); - } - - std::random_device seed_gen{}; - std::mt19937 engine(seed_gen()); - Token token{}; - while (enter(token) != status::OK) { _mm_pause(); } - -#ifndef NDEBUG - for (std::size_t j = 0; j < 1; ++j) { -#else - for (std::size_t j = 0; j < 10; ++j) { -#endif - std::shuffle(kv.begin(), kv.end(), engine); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - std::vector> - tuple_list; // NOLINT - ASSERT_EQ(status::OK, - scan(test_storage_name, "", - scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list)); - ASSERT_EQ(tuple_list.size() >= kv.size(), true); - std::size_t check_ctr{0}; - for (auto&& elem : tuple_list) { - if (kv.size() == check_ctr) break; - for (auto&& elem2 : kv) { - if (std::get<1>(elem2).size() == - std::get<2>(elem) && - memcmp(std::get<1>(elem2).data(), - std::get<1>(elem), - std::get<2>(elem)) == 0) { - ++check_ctr; - break; - } - } - } - ASSERT_EQ(check_ctr, kv.size()); - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = remove(token, test_storage_name, k); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - } - for (auto& i : kv) { - std::string k(std::get<0>(i)); - std::string v(std::get<1>(i)); - status ret = put(token, test_storage_name, k, v.data(), - v.size()); - if (ret != status::OK) { - ASSERT_EQ(ret, status::OK); - std::abort(); - } - } - - leave(token); - } - }; - - std::vector thv; - thv.reserve(th_nm); - for (std::size_t i = 0; i < th_nm; ++i) { - thv.emplace_back(S::work, i, th_nm); - } - for (auto&& th : thv) { th.join(); } - thv.clear(); - - std::vector> - tuple_list; // NOLINT - scan(test_storage_name, "", scan_endpoint::INF, "", - scan_endpoint::INF, tuple_list); - for (std::size_t j = 0; j < ary_size; ++j) { - std::string v(std::to_string(j)); - constexpr std::size_t v_index = 1; - ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), - v.size()), - 0); - } - destroy(); - } -} - +/** + * @file multi_thread_put_delete_scan_test.cpp + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "kvs.h" + +using namespace yakushima; + +namespace yakushima::testing { + +class mtpdst : public ::testing::Test { + void SetUp() override { init(); } + void TearDown() override { fin(); } +}; + +std::string test_storage_name{"1"}; // NOLINT + +TEST_F(mtpdst, two_border) { // NOLINT + /** + * multiple put/delete/scan same null char key whose length is different each other + * against multiple border, which is across some layer. + */ + constexpr std::size_t ary_size = 15; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, '\0'), + std::to_string(i))); + } + + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + +#ifndef NDEBUG + for (std::size_t j = 0; j < 1; ++j) { +#else + for (std::size_t j = 0; j < 10; ++j) { +#endif + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + std::vector> + tuple_list; // NOLINT + ASSERT_EQ(status::OK, + scan(test_storage_name, "", + scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list)); + ASSERT_EQ(tuple_list.size() >= kv.size(), true); + std::size_t check_ctr{0}; + for (auto&& elem : tuple_list) { + if (kv.size() == check_ctr) { break; } + for (auto&& elem2 : kv) { + if (std::get<1>(elem2).size() == + std::get<2>(elem) && + memcmp(std::get<1>(elem2).data(), + std::get<1>(elem), + std::get<2>(elem)) == 0) { + ++check_ctr; + break; + } + } + } + ASSERT_EQ(check_ctr, kv.size()); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} + +TEST_F(mtpdst, two_border_shuffle) { // NOLINT + constexpr std::size_t ary_size = 15; + std::size_t th_nm{}; + if (ary_size > std::thread::hardware_concurrency()) { + th_nm = std::thread::hardware_concurrency(); + } else { + th_nm = ary_size; + } + +#ifndef NDEBUG + for (std::size_t h = 0; h < 1; ++h) { +#else + for (std::size_t h = 0; h < 10; ++h) { +#endif + create_storage(test_storage_name); + + struct S { + static void work(std::size_t th_id, std::size_t max_thread) { + std::vector> kv; + kv.reserve(ary_size / max_thread); + // data generation + for (std::size_t i = (ary_size / max_thread) * th_id; + i < (th_id != max_thread - 1 + ? (ary_size / max_thread) * (th_id + 1) + : ary_size); + ++i) { + kv.emplace_back(std::make_pair(std::string(i, '\0'), + std::to_string(i))); + } + + std::random_device seed_gen{}; + std::mt19937 engine(seed_gen()); + Token token{}; + while (enter(token) != status::OK) { _mm_pause(); } + +#ifndef NDEBUG + for (std::size_t j = 0; j < 1; ++j) { +#else + for (std::size_t j = 0; j < 10; ++j) { +#endif + std::shuffle(kv.begin(), kv.end(), engine); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + std::vector> + tuple_list; // NOLINT + ASSERT_EQ(status::OK, + scan(test_storage_name, "", + scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list)); + ASSERT_EQ(tuple_list.size() >= kv.size(), true); + std::size_t check_ctr{0}; + for (auto&& elem : tuple_list) { + if (kv.size() == check_ctr) break; + for (auto&& elem2 : kv) { + if (std::get<1>(elem2).size() == + std::get<2>(elem) && + memcmp(std::get<1>(elem2).data(), + std::get<1>(elem), + std::get<2>(elem)) == 0) { + ++check_ctr; + break; + } + } + } + ASSERT_EQ(check_ctr, kv.size()); + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = remove(token, test_storage_name, k); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + } + for (auto& i : kv) { + std::string k(std::get<0>(i)); + std::string v(std::get<1>(i)); + status ret = put(token, test_storage_name, k, v.data(), + v.size()); + if (ret != status::OK) { + ASSERT_EQ(ret, status::OK); + std::abort(); + } + } + + leave(token); + } + }; + + std::vector thv; + thv.reserve(th_nm); + for (std::size_t i = 0; i < th_nm; ++i) { + thv.emplace_back(S::work, i, th_nm); + } + for (auto&& th : thv) { th.join(); } + thv.clear(); + + std::vector> + tuple_list; // NOLINT + scan(test_storage_name, "", scan_endpoint::INF, "", + scan_endpoint::INF, tuple_list); + for (std::size_t j = 0; j < ary_size; ++j) { + std::string v(std::to_string(j)); + constexpr std::size_t v_index = 1; + ASSERT_EQ(memcmp(std::get(tuple_list.at(j)), v.data(), + v.size()), + 0); + } + destroy(); + } +} + } // namespace yakushima::testing \ No newline at end of file diff --git a/test/multi_thread/put_delete_scan/readme.md b/test/multi_thread/put_delete_scan/readme.md index 760abd8..8c64e95 100644 --- a/test/multi_thread/put_delete_scan/readme.md +++ b/test/multi_thread/put_delete_scan/readme.md @@ -1,28 +1,28 @@ -# Test that put / delete / scan operations work in parallel - -* multi_thread_put_delete_scan_one_border_test.cpp - * Test the operations on one border node. -* multi_thread_put_delete_scan_two_border_test.cpp - * Perform put / delete / scan operations in parallel. The state of the tree can range from nothing to two border - - nodes. - -* multi_thread_put_delete_scan_one_interior_two_border_test.cpp - * Perform put / delete / scan operations in parallel. The state of the tree can range from nothing to one interior - - node and two border nodes. - -* multi_thread_put_delete_scan_one_interior_test.cpp - * Perform put / delete / scan operations in parallel. The state of the tree can range from nothing to one interior node and many border nodes. -* multi_thread_put_delete_scan_many_interior_test.cpp - * Perform put / delete / scan operations in parallel. The state of the tree can range from nothing to many interior nodes. -* multi_thread_put_delete_scan_test.cpp - * Others. - -## Restriction - -Prefix the test file with multi_thread_put_delete_scan_ to avoid duplicate executable names. - -## Todo - -Separate files some file consumes a lot of time. Add kindly documents. +# Test that put / delete / scan operations work in parallel + +* multi_thread_put_delete_scan_one_border_test.cpp + * Test the operations on one border node. +* multi_thread_put_delete_scan_two_border_test.cpp + * Perform put / delete / scan operations in parallel. The state of the tree can range from nothing to two border + + nodes. + +* multi_thread_put_delete_scan_one_interior_two_border_test.cpp + * Perform put / delete / scan operations in parallel. The state of the tree can range from nothing to one interior + + node and two border nodes. + +* multi_thread_put_delete_scan_one_interior_test.cpp + * Perform put / delete / scan operations in parallel. The state of the tree can range from nothing to one interior node and many border nodes. +* multi_thread_put_delete_scan_many_interior_test.cpp + * Perform put / delete / scan operations in parallel. The state of the tree can range from nothing to many interior nodes. +* multi_thread_put_delete_scan_test.cpp + * Others. + +## Restriction + +Prefix the test file with multi_thread_put_delete_scan_ to avoid duplicate executable names. + +## Todo + +Separate files some file consumes a lot of time. Add kindly documents. diff --git a/test/multi_thread/readme.md b/test/multi_thread/readme.md index e42cb0b..7c4f975 100644 --- a/test/multi_thread/readme.md +++ b/test/multi_thread/readme.md @@ -1,12 +1,12 @@ -# Test about multi thread - -* delete - * Test that delete operations work in parallel. -* put - * Test that put operations work in parallel. -* put_delete - * Test that put / delete operations work in parallel. -* put_delete_get - * Test that put / delete / get operations work in parallel. -* put_delete_scan - * Test that put / delete / scan operations work in parallel. +# Test about multi thread + +* delete + * Test that delete operations work in parallel. +* put + * Test that put operations work in parallel. +* put_delete + * Test that put / delete operations work in parallel. +* put_delete_get + * Test that put / delete / get operations work in parallel. +* put_delete_scan + * Test that put / delete / scan operations work in parallel. diff --git a/test/put_get/readme.md b/test/put_get/readme.md index ac4cc43..c47d089 100644 --- a/test/put_get/readme.md +++ b/test/put_get/readme.md @@ -1,15 +1,15 @@ -# Test about put / get - -* put_get_one_key_test.cpp - * Test the operation on putting one key. -* put_get_test.cpp - * Others. - -## Restriction - -Prefix the test file with put_get_ to avoid duplicate executable names. - -## Todo - -Separate files when put_get_test.cpp consumes a lot of time. -Add kindly documents. +# Test about put / get + +* put_get_one_key_test.cpp + * Test the operation on putting one key. +* put_get_test.cpp + * Others. + +## Restriction + +Prefix the test file with put_get_ to avoid duplicate executable names. + +## Todo + +Separate files when put_get_test.cpp consumes a lot of time. +Add kindly documents. diff --git a/test/scan/readme.md b/test/scan/readme.md index f1387dd..32a07b2 100644 --- a/test/scan/readme.md +++ b/test/scan/readme.md @@ -1,22 +1,22 @@ -# Test about scan - -* scan_basic_usage_test.cpp - * Test basic usage. -* scan_max_num_test.cpp - * Test with maximum number specified. -* scan_no_elem_nodes_test.cpp - * Test the operation on some border nodes. - One of them has elements in the range, but some border nodes in the range. -* scan_one_border_test.cpp - * Test the operation on one border node. -* scan_test.cpp - * Others. - -## Restriction - -Prefix the test file with scan_ to avoid duplicate executable names. - -## Todo - -Separate files when scan_test.cpp consumes a lot of time. -Add kindly documents. +# Test about scan + +* scan_basic_usage_test.cpp + * Test basic usage. +* scan_max_num_test.cpp + * Test with maximum number specified. +* scan_no_elem_nodes_test.cpp + * Test the operation on some border nodes. + One of them has elements in the range, but some border nodes in the range. +* scan_one_border_test.cpp + * Test the operation on one border node. +* scan_test.cpp + * Others. + +## Restriction + +Prefix the test file with scan_ to avoid duplicate executable names. + +## Todo + +Separate files when scan_test.cpp consumes a lot of time. +Add kindly documents.