From 625f90cc729f71575c94c3a65ebc2a87b826a148 Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Fri, 10 Jan 2025 21:09:29 -0600 Subject: [PATCH] DSL Support for Layout and Buffer classes (#192) * backup [skip ci] * backup [skip ci] * works, let's see what GCC hates... * Committing clang-format changes * remove ambiguity in string_type --------- Co-authored-by: github-actions[bot] --- include/tensorwrapper/buffer/buffer_base.hpp | 40 +- include/tensorwrapper/buffer/eigen.hpp | 28 +- include/tensorwrapper/dsl/dummy_indices.hpp | 94 ++++ include/tensorwrapper/layout/layout_base.hpp | 118 ++-- .../tensor/detail_/tensor_input.hpp | 6 +- src/tensorwrapper/buffer/buffer_base.cpp | 56 +- src/tensorwrapper/buffer/eigen.cpp | 246 ++++++-- .../buffer/eigen_contraction.cpp | 137 +++++ .../buffer/eigen_contraction.hpp | 41 ++ src/tensorwrapper/layout/layout_base.cpp | 85 +++ .../tensor/detail_/tensor_factory.cpp | 8 +- .../tensorwrapper/allocator/eigen.cpp | 15 +- .../tensorwrapper/buffer/buffer_base.cpp | 8 +- .../unit_tests/tensorwrapper/buffer/eigen.cpp | 529 +++++++++++++++++- .../buffer/eigen_contraction.cpp | 78 +++ .../tensorwrapper/detail_/dsl_base.cpp | 2 +- .../detail_/unique_ptr_utilities.cpp | 2 +- .../tensorwrapper/detail_/view_traits.cpp | 2 +- .../cxx/unit_tests/tensorwrapper/dsl/dsl.cpp | 86 ++- .../tensorwrapper/dsl/dummy_indices.cpp | 77 +++ .../unit_tests/tensorwrapper/dsl/labeled.cpp | 4 +- .../tensorwrapper/dsl/pairwise_parser.cpp | 83 ++- .../tensorwrapper/layout/layout_base.cpp | 120 +++- .../tensorwrapper/layout/logical.cpp | 8 +- .../tensorwrapper/layout/physical.cpp | 8 +- .../shape/detail_/smooth_alias.cpp | 2 +- .../shape/detail_/smooth_view_pimpl.cpp | 2 +- .../unit_tests/tensorwrapper/shape/smooth.cpp | 2 +- .../tensorwrapper/shape/smooth_view.cpp | 2 +- .../tensorwrapper/sparsity/pattern.cpp | 2 +- .../tensorwrapper/symmetry/group.cpp | 2 +- .../tensorwrapper/symmetry/permutation.cpp | 2 +- .../tensor/detail_/tensor_factory.cpp | 28 +- .../tensor/detail_/tensor_input.cpp | 5 +- .../tensor/detail_/tensor_pimpl.cpp | 11 +- .../tensor/detail_/unwrap_il.cpp | 2 +- .../tensorwrapper/tensor/tensor_class.cpp | 32 +- .../unit_tests/tensorwrapper/testing/dsl.hpp | 48 ++ .../tensorwrapper/testing/eigen_buffers.hpp | 22 +- .../tensorwrapper/{ => testing}/helpers.hpp | 1 + .../tensorwrapper/{ => testing}/inputs.hpp | 12 +- .../tensorwrapper/testing/layouts.hpp | 65 +++ .../tensorwrapper/testing/shapes.hpp | 4 +- .../tensorwrapper/testing/testing.hpp | 6 +- 44 files changed, 1850 insertions(+), 281 deletions(-) create mode 100644 src/tensorwrapper/buffer/eigen_contraction.cpp create mode 100644 src/tensorwrapper/buffer/eigen_contraction.hpp create mode 100644 src/tensorwrapper/layout/layout_base.cpp create mode 100644 tests/cxx/unit_tests/tensorwrapper/buffer/eigen_contraction.cpp create mode 100644 tests/cxx/unit_tests/tensorwrapper/testing/dsl.hpp rename tests/cxx/unit_tests/tensorwrapper/{ => testing}/helpers.hpp (98%) rename tests/cxx/unit_tests/tensorwrapper/{ => testing}/inputs.hpp (90%) create mode 100644 tests/cxx/unit_tests/tensorwrapper/testing/layouts.hpp diff --git a/include/tensorwrapper/buffer/buffer_base.hpp b/include/tensorwrapper/buffer/buffer_base.hpp index c0801afc..cedd2800 100644 --- a/include/tensorwrapper/buffer/buffer_base.hpp +++ b/include/tensorwrapper/buffer/buffer_base.hpp @@ -15,6 +15,7 @@ */ #pragma once +#include #include #include #include @@ -25,30 +26,33 @@ namespace tensorwrapper::buffer { * * All classes which wrap existing tensor libraries derive from this class. */ -class BufferBase : public detail_::PolymorphicBase { +class BufferBase : public detail_::PolymorphicBase, + public detail_::DSLBase { private: /// Type of *this using my_type = BufferBase; +protected: /// Type *this inherits from - using my_base_type = detail_::PolymorphicBase; + using polymorphic_base = detail_::PolymorphicBase; public: /// Type all buffers inherit from - using buffer_base_type = typename my_base_type::base_type; + using buffer_base_type = typename polymorphic_base::base_type; /// Type of a mutable reference to a buffer_base_type object - using buffer_base_reference = typename my_base_type::base_reference; + using buffer_base_reference = typename polymorphic_base::base_reference; /// Type of a read-only reference to a buffer_base_type object using const_buffer_base_reference = - typename my_base_type::const_base_reference; + typename polymorphic_base::const_base_reference; /// Type of a pointer to an object of type buffer_base_type - using buffer_base_pointer = typename my_base_type::base_pointer; + using buffer_base_pointer = typename polymorphic_base::base_pointer; /// Type of a pointer to a read-only object of type buffer_base_type - using const_buffer_base_pointer = typename my_base_type::const_base_pointer; + using const_buffer_base_pointer = + typename polymorphic_base::const_base_pointer; /// Type of the class describing the physical layout of the buffer using layout_type = layout::LayoutBase; @@ -59,6 +63,9 @@ class BufferBase : public detail_::PolymorphicBase { /// Type of a pointer to the layout using layout_pointer = typename layout_type::layout_pointer; + /// Type used to represent the tensor's rank + using rank_type = typename layout_type::size_type; + // ------------------------------------------------------------------------- // -- Accessors // ------------------------------------------------------------------------- @@ -90,6 +97,10 @@ class BufferBase : public detail_::PolymorphicBase { return *m_layout_; } + rank_type rank() const noexcept { + return has_layout() ? layout().rank() : 0; + } + // ------------------------------------------------------------------------- // -- Utility methods // ------------------------------------------------------------------------- @@ -191,6 +202,21 @@ class BufferBase : public detail_::PolymorphicBase { return *this; } + dsl_reference addition_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) override; + + dsl_reference subtraction_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) override; + + dsl_reference multiplication_assignment_( + label_type this_labels, const_labeled_reference lhs, + const_labeled_reference rhs) override; + + dsl_reference permute_assignment_(label_type this_labels, + const_labeled_reference rhs) override; + private: /// Throws std::runtime_error when there is no layout void assert_layout_() const { diff --git a/include/tensorwrapper/buffer/eigen.hpp b/include/tensorwrapper/buffer/eigen.hpp index b14ee00f..320e7f0d 100644 --- a/include/tensorwrapper/buffer/eigen.hpp +++ b/include/tensorwrapper/buffer/eigen.hpp @@ -180,10 +180,36 @@ class Eigen : public Replicated { return my_base_type::are_equal_impl_(rhs); } + /// Implements addition_assignment by calling addition_assignment on state + dsl_reference addition_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) override; + + /// Calls subtraction_assignment on each member + dsl_reference subtraction_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) override; + + /// Calls multiplication_assignment on each member + dsl_reference multiplication_assignment_( + label_type this_labels, const_labeled_reference lhs, + const_labeled_reference rhs) override; + + /// Calls permute_assignment on each member + dsl_reference permute_assignment_(label_type this_labels, + const_labeled_reference rhs) override; + /// Implements to_string - typename my_base_type::string_type to_string_() const override; + typename polymorphic_base::string_type to_string_() const override; private: + dsl_reference hadamard_(label_type this_labels, const_labeled_reference lhs, + const_labeled_reference rhs); + + dsl_reference contraction_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs); + /// The actual Eigen tensor data_type m_tensor_; }; diff --git a/include/tensorwrapper/dsl/dummy_indices.hpp b/include/tensorwrapper/dsl/dummy_indices.hpp index 81af1a31..1a557dff 100644 --- a/include/tensorwrapper/dsl/dummy_indices.hpp +++ b/include/tensorwrapper/dsl/dummy_indices.hpp @@ -163,6 +163,51 @@ class DummyIndices return true; } + /** @brief Is a thruple of DummyIndices consistent with a pure element-wise + * product? + * + * In generalized Einstein notation a pure element-wise (also commonly + * termed Hadamard) product is denoted by *this, @p lhs, and @p rhs + * having the same ordered set of dummy indices, up to permutation. + * Additionally, the dummy indices associated with any given tensor may + * not include a repeated index. + * + * @param[in] lhs The dummy indices associated with the tensor to the + * left of the times operator. + * @param[in] rhs The dummy indices associated with the tensor to the + * right of the times operator. + * + * @return True If the dummy indices given by *this, @p lhs, and @p rhs + * are consistent with a purely element-wise product of the tensors + * that @p lhs and @p rhs label. + * + * @throw None No throw guarantee. + */ + bool is_hadamard_product(const DummyIndices& lhs, + const DummyIndices& rhs) const noexcept; + + /** @brief Does a thruple of DummyIndices indicate a product is a pure + * contraction? + * + * In generalized Einstein notation a pure contraction is an operation + * where indices common to @p lhs and @p rhs are summed over and do NOT + * appear in the result, i.e., *this. Additionally, we stipulate that + * there must be at least one index summed over (if no index is summed over + * the operation is a pure direct-product). + * + * @param[in] lhs The dummy indices associated with the tensor to the + * left of the times operator. + * @param[in] rhs The dummy indices associated with the tensor to the + * right of the times operator. + * + * @return True if the indices associated with *this, @p lhs, and @p rhs + * are consistent with a contraction and false otherwise. + * + * @throw None No throw guarantee. + */ + bool is_contraction(const DummyIndices& lhs, + const DummyIndices& rhs) const noexcept; + /** @brief Computes the permutation needed to convert *this into @p other. * * Each DummyIndices object is viewed as an ordered set of objects. If @@ -366,6 +411,31 @@ class DummyIndices return rv; } + /** @brief Returns the set difference of *this and @p other. + * + * The set difference of *this with @p other is the set of indices which + * appear in *this, but not in @p other. This method will return the set + * (indices which appear more than once in *this will only appear once + * in the result) which results from the set difference of *this with + * @p other. + * + * @param[in] other The set to remove from *this. + * + * @return The set difference of *this and @p rhs. + * + * @throw std::bad_alloc if there is a problem allocating the return. + * Strong throw guarantee. + */ + DummyIndices difference(const DummyIndices& other) const { + DummyIndices rv; + for(const auto& x : *this) { + if(other.count(x)) continue; + if(rv.count(x)) continue; + rv.m_dummy_indices_.push_back(x); + } + return rv; + } + protected: /// Main ctor for setting the value, throws if any index is empty explicit DummyIndices(split_string_type split_dummy_indices) : @@ -401,4 +471,28 @@ class DummyIndices split_string_type m_dummy_indices_; }; +template +bool DummyIndices::is_hadamard_product( + const DummyIndices& lhs, const DummyIndices& rhs) const noexcept { + if(has_repeated_indices()) return false; + if(lhs.has_repeated_indices()) return false; + if(rhs.has_repeated_indices()) return false; + if(!is_permutation(lhs)) return false; + if(!is_permutation(rhs)) return false; + return true; +} + +template +bool DummyIndices::is_contraction( + const DummyIndices& lhs, const DummyIndices& rhs) const noexcept { + if(has_repeated_indices()) return false; + if(lhs.has_repeated_indices()) return false; + if(rhs.has_repeated_indices()) return false; + auto lhs_cap_rhs = lhs.intersection(rhs); + if(lhs_cap_rhs.empty()) return false; // No common indices + if(!intersection(lhs_cap_rhs).empty()) + return false; // Common index not summed + return true; +} + } // namespace tensorwrapper::dsl \ No newline at end of file diff --git a/include/tensorwrapper/layout/layout_base.hpp b/include/tensorwrapper/layout/layout_base.hpp index 6ab86225..7ac8f6ae 100644 --- a/include/tensorwrapper/layout/layout_base.hpp +++ b/include/tensorwrapper/layout/layout_base.hpp @@ -15,6 +15,7 @@ */ #pragma once +#include #include #include #include @@ -25,7 +26,8 @@ namespace tensorwrapper::layout { /** @brief Common base class for all layouts. * */ -class LayoutBase : public detail_::PolymorphicBase { +class LayoutBase : public tensorwrapper::detail_::PolymorphicBase, + public tensorwrapper::detail_::DSLBase { public: /// Type all layouts derive from using layout_base = LayoutBase; @@ -87,8 +89,7 @@ class LayoutBase : public detail_::PolymorphicBase { */ LayoutBase(const_shape_reference shape, const_symmetry_reference symmetry, const_sparsity_reference sparsity) : - LayoutBase(shape.clone(), std::make_unique(symmetry), - std::make_unique(sparsity)) {} + LayoutBase(shape.clone(), symmetry.clone(), sparsity.clone()) {} /** @brief Initialize by shape copy ctor * @@ -101,28 +102,8 @@ class LayoutBase : public detail_::PolymorphicBase { * Strong throw guarantee. */ LayoutBase(const_shape_reference shape) : - LayoutBase(shape.clone(), std::make_unique(), - std::make_unique()) {} - - /** @brief Initialize by move ctor - * - * This ctor is used when the user wants *this to take ownership of the - * objects being used to initialize * this. - * - * @throw std::runtime_error if @p shape, @p symmetry, or @p sparsity is - * a nullptr. Strong throw guarantee. - */ - LayoutBase(shape_pointer shape, symmetry_pointer symmetry, - sparsity_pointer sparsity) : - m_shape_(std::move(shape)), - m_symmetry_(std::move(symmetry)), - m_sparsity_(std::move(sparsity)) { - if(m_shape_ == nullptr) throw std::runtime_error("Shape can't be null"); - if(m_symmetry_ == nullptr) - throw std::runtime_error("Symmetry can't be null"); - if(m_sparsity_ == nullptr) - throw std::runtime_error("Sparsity can't be null"); - } + LayoutBase(shape.clone(), std::make_unique(shape.rank()), + std::make_unique(shape.rank())) {} /** @brief Initialize by shape move ctor * @@ -137,10 +118,29 @@ class LayoutBase : public detail_::PolymorphicBase { * @throw std::runtime_error if @p shape, is a nullptr. Strong throw * guarantee. */ - LayoutBase(shape_pointer shape) : m_shape_(std::move(shape)) { - if(m_shape_ == nullptr) throw std::runtime_error("Shape can't be null"); - m_symmetry_ = std::make_unique(); - m_sparsity_ = std::make_unique(); + explicit LayoutBase(shape_pointer shape) : + m_shape_(std::move(shape)), + m_symmetry_(m_shape_ ? std::make_unique(m_shape_->rank()) : + nullptr), + m_sparsity_(m_shape_ ? std::make_unique(m_shape_->rank()) : + nullptr) { + assert_valid_state_(); + } + + /** @brief Initialize by move ctor + * + * This ctor is used when the user wants *this to take ownership of the + * objects being used to initialize *this. + * + * @throw std::runtime_error if @p shape, @p symmetry, or @p sparsity is + * a nullptr. Strong throw guarantee. + */ + LayoutBase(shape_pointer shape, symmetry_pointer symmetry, + sparsity_pointer sparsity) : + m_shape_(std::move(shape)), + m_symmetry_(std::move(symmetry)), + m_sparsity_(std::move(sparsity)) { + assert_valid_state_(); } /// Defaulted polymorphic dtor @@ -174,6 +174,18 @@ class LayoutBase : public detail_::PolymorphicBase { */ const_sparsity_reference sparsity() const { return *m_sparsity_; } + /** @brief The rank of the tensor this layout describes. + * + * This method is convenience function for calling the rank methods on one + * of the members (ensuring they all have the same rank is done at + * construction). + * + * @return The rank of the tensor associated with *this. + * + * @throw None No throw guarantee. + */ + size_type rank() const noexcept { return m_shape_->rank(); } + // ------------------------------------------------------------------------- // -- Utility methods // ------------------------------------------------------------------------- @@ -190,9 +202,10 @@ class LayoutBase : public detail_::PolymorphicBase { * @throw None No throw guarantee. */ bool operator==(const layout_base& rhs) const noexcept { - if(!m_shape_->are_equal(*rhs.m_shape_)) return false; - return std::tie(*m_symmetry_, *m_sparsity_) == - std::tie(*rhs.m_symmetry_, *rhs.m_sparsity_); + if(m_shape_->are_different(*rhs.m_shape_)) return false; + if(m_symmetry_->are_different(*rhs.m_symmetry_)) return false; + if(m_sparsity_->are_different(*rhs.m_sparsity_)) return false; + return true; } /** @brief Is *this different from @p rhs? @@ -223,13 +236,38 @@ class LayoutBase : public detail_::PolymorphicBase { */ LayoutBase(const LayoutBase& other) : m_shape_(other.m_shape_->clone()), - m_symmetry_(std::make_unique(*other.m_symmetry_)), - m_sparsity_(std::make_unique(*other.m_sparsity_)) {} + m_symmetry_(other.m_symmetry_->clone()), + m_sparsity_(other.m_sparsity_->clone()) {} + /// Deleted to help avoid slicing + ///@{ LayoutBase& operator=(const LayoutBase&) = delete; LayoutBase& operator=(LayoutBase&&) = delete; + ///@} + + /// Implements addition_assignment by calling addition_assignment on state + dsl_reference addition_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) override; + + /// Calls subtraction_assignment on each member + dsl_reference subtraction_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) override; + + /// Calls multiplication_assignment on each member + dsl_reference multiplication_assignment_( + label_type this_labels, const_labeled_reference lhs, + const_labeled_reference rhs) override; + + /// Calls permute_assignment on each member + dsl_reference permute_assignment_(label_type this_labels, + const_labeled_reference rhs) override; private: + /// Asserts that *this is in a valid state + void assert_valid_state_() const; + /// The actual shape of the tensor shape_pointer m_shape_; @@ -240,4 +278,16 @@ class LayoutBase : public detail_::PolymorphicBase { sparsity_pointer m_sparsity_; }; +inline void LayoutBase::assert_valid_state_() const { + if(m_shape_ == nullptr) throw std::runtime_error("Shape can't be null"); + if(m_symmetry_ == nullptr) + throw std::runtime_error("Symmetry can't be null"); + if(m_sparsity_ == nullptr) + throw std::runtime_error("Sparsity can't be null"); + + bool same_rank = m_shape_->rank() == m_symmetry_->rank(); + same_rank = same_rank && m_shape_->rank() == m_sparsity_->rank(); + if(!same_rank) throw std::runtime_error("Must have same rank"); +} + } // namespace tensorwrapper::layout diff --git a/include/tensorwrapper/tensor/detail_/tensor_input.hpp b/include/tensorwrapper/tensor/detail_/tensor_input.hpp index b874e311..825ce648 100644 --- a/include/tensorwrapper/tensor/detail_/tensor_input.hpp +++ b/include/tensorwrapper/tensor/detail_/tensor_input.hpp @@ -151,8 +151,7 @@ struct TensorInput { template TensorInput(const_symmetry_reference symmetry, Args&&... args) : - TensorInput(std::make_unique(symmetry), - std::forward(args)...) {} + TensorInput(symmetry.clone(), std::forward(args)...) {} template TensorInput(symmetry_pointer psymmetry, Args&&... args) : @@ -162,8 +161,7 @@ struct TensorInput { template TensorInput(const_sparsity_reference sparsity, Args&&... args) : - TensorInput(std::make_unique(), - std::forward(args)...) {} + TensorInput(sparsity.clone(), std::forward(args)...) {} template TensorInput(sparsity_pointer psparsity, Args&&... args) : diff --git a/src/tensorwrapper/buffer/buffer_base.cpp b/src/tensorwrapper/buffer/buffer_base.cpp index e82461fb..dbc923e7 100644 --- a/src/tensorwrapper/buffer/buffer_base.cpp +++ b/src/tensorwrapper/buffer/buffer_base.cpp @@ -16,4 +16,58 @@ #include -namespace tensorwrapper::buffer {} // namespace tensorwrapper::buffer \ No newline at end of file +namespace tensorwrapper::buffer { + +using dsl_reference = typename BufferBase::dsl_reference; + +dsl_reference BufferBase::addition_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) { + auto llayout = lhs.object().layout()(lhs.labels()); + auto rlayout = rhs.object().layout()(rhs.labels()); + + if(!has_layout()) m_layout_ = lhs.object().layout().clone(); + + m_layout_->addition_assignment(this_labels, llayout, rlayout); + + return *this; +} + +dsl_reference BufferBase::subtraction_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) { + auto llayout = lhs.object().layout()(lhs.labels()); + auto rlayout = rhs.object().layout()(rhs.labels()); + + if(!has_layout()) m_layout_ = lhs.object().layout().clone(); + + m_layout_->subtraction_assignment(this_labels, llayout, rlayout); + + return *this; +} + +dsl_reference BufferBase::multiplication_assignment_( + label_type this_labels, const_labeled_reference lhs, + const_labeled_reference rhs) { + auto llayout = lhs.object().layout()(lhs.labels()); + auto rlayout = rhs.object().layout()(rhs.labels()); + + if(!has_layout()) m_layout_ = lhs.object().layout().clone(); + + m_layout_->multiplication_assignment(this_labels, llayout, rlayout); + + return *this; +} + +dsl_reference BufferBase::permute_assignment_(label_type this_labels, + const_labeled_reference rhs) { + auto rlayout = rhs.object().layout()(rhs.labels()); + + if(!has_layout()) m_layout_ = rhs.object().layout().clone(); + + m_layout_->permute_assignment(this_labels, rlayout); + + return *this; +} + +} // namespace tensorwrapper::buffer \ No newline at end of file diff --git a/src/tensorwrapper/buffer/eigen.cpp b/src/tensorwrapper/buffer/eigen.cpp index c5d90da4..81b726ff 100644 --- a/src/tensorwrapper/buffer/eigen.cpp +++ b/src/tensorwrapper/buffer/eigen.cpp @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - +#include "eigen_contraction.hpp" #include #include #include @@ -21,65 +21,211 @@ namespace tensorwrapper::buffer { -using dummy_indices_type = dsl::DummyIndices; - #define TPARAMS template #define EIGEN Eigen -// TPARAMS -// typename EIGEN::buffer_base_reference EIGEN::addition_assignment_( -// label_type this_labels, const_labeled_buffer_reference rhs) { -// // TODO layouts -// if(layout() != rhs.lhs().layout()) -// throw std::runtime_error("Layouts must be the same (for now)"); - -// dummy_indices_type llabels(this_labels); -// dummy_indices_type rlabels(rhs.rhs()); - -// using allocator_type = allocator::Eigen; -// const auto& rhs_downcasted = allocator_type::rebind(rhs.lhs()); - -// if(llabels != rlabels) { -// auto r_to_l = rlabels.permutation(llabels); -// std::vector r_to_l2(r_to_l.begin(), r_to_l.end()); -// m_tensor_ += rhs_downcasted.value().shuffle(r_to_l2); -// } else { -// m_tensor_ += rhs_downcasted.value(); -// } - -// return *this; -// } - -// TPARAMS -// typename EIGEN::buffer_base_reference EIGEN::permute_assignment_( -// label_type this_labels, const_labeled_buffer_reference rhs) { -// dummy_indices_type llabels(this_labels); -// dummy_indices_type rlabels(rhs.rhs()); - -// using allocator_type = allocator::Eigen; -// const auto& rhs_downcasted = allocator_type::rebind(rhs.lhs()); - -// if(llabels != rlabels) { // We need to permute rhs before assignment -// auto r_to_l = rlabels.permutation(llabels); -// // Eigen wants int objects -// std::vector r_to_l2(r_to_l.begin(), r_to_l.end()); -// m_tensor_ = rhs_downcasted.value().shuffle(r_to_l2); -// } else { -// m_tensor_ = rhs_downcasted.value(); -// } - -// // TODO: permute layout - -// return *this; -// } +using const_labeled_reference = + typename Eigen::const_labeled_reference; +using dsl_reference = typename Eigen::dsl_reference; + +TPARAMS +typename EIGEN::dsl_reference EIGEN::addition_assignment_( + label_type this_labels, const_labeled_reference lhs, + const_labeled_reference rhs) { + BufferBase::addition_assignment_(this_labels, lhs, rhs); + + using allocator_type = allocator::Eigen; + const auto& lhs_downcasted = allocator_type::rebind(lhs.object()); + const auto& rhs_downcasted = allocator_type::rebind(rhs.object()); + const auto& lhs_eigen = lhs_downcasted.value(); + const auto& rhs_eigen = rhs_downcasted.value(); + + const auto& lhs_labels = lhs.labels(); + const auto& rhs_labels = rhs.labels(); + + bool this_matches_lhs = (this_labels == lhs_labels); + bool this_matches_rhs = (this_labels == rhs_labels); + bool lhs_matches_rhs = (lhs_labels == rhs_labels); + + auto get_permutation = [](auto&& lhs_, auto&& rhs_) { + auto l_to_r = lhs_.permutation(rhs_); + return std::vector(l_to_r.begin(), l_to_r.end()); + }; + + auto r_to_l = get_permutation(rhs_labels, lhs_labels); + auto l_to_r = get_permutation(lhs_labels, rhs_labels); + auto this_to_r = get_permutation(this_labels, rhs_labels); + + if(this_matches_lhs && this_matches_rhs) { // No permutations + m_tensor_ = lhs_eigen + rhs_eigen; + } else if(this_matches_lhs) { // RHS needs permuted + m_tensor_ = lhs_eigen + rhs_eigen.shuffle(r_to_l); + } else if(this_matches_rhs) { // LHS needs permuted + m_tensor_ = lhs_eigen.shuffle(l_to_r) + rhs_eigen; + } else if(lhs_matches_rhs) { // This needs permuted + m_tensor_ = (lhs_eigen + rhs_eigen).shuffle(this_to_r); + } else { // Everything needs permuted + m_tensor_ = (lhs_eigen.shuffle(l_to_r) + rhs_eigen).shuffle(this_to_r); + } + + return *this; +} + +TPARAMS +typename EIGEN::dsl_reference EIGEN::subtraction_assignment_( + label_type this_labels, const_labeled_reference lhs, + const_labeled_reference rhs) { + BufferBase::subtraction_assignment_(this_labels, lhs, rhs); + + using allocator_type = allocator::Eigen; + const auto& lhs_downcasted = allocator_type::rebind(lhs.object()); + const auto& rhs_downcasted = allocator_type::rebind(rhs.object()); + const auto& lhs_eigen = lhs_downcasted.value(); + const auto& rhs_eigen = rhs_downcasted.value(); + + const auto& lhs_labels = lhs.labels(); + const auto& rhs_labels = rhs.labels(); + + bool this_matches_lhs = (this_labels == lhs_labels); + bool this_matches_rhs = (this_labels == rhs_labels); + bool lhs_matches_rhs = (lhs_labels == rhs_labels); + + auto get_permutation = [](auto&& lhs_, auto&& rhs_) { + auto l_to_r = lhs_.permutation(rhs_); + return std::vector(l_to_r.begin(), l_to_r.end()); + }; + + auto r_to_l = get_permutation(rhs_labels, lhs_labels); + auto l_to_r = get_permutation(lhs_labels, rhs_labels); + auto this_to_r = get_permutation(this_labels, rhs_labels); + + if(this_matches_lhs && this_matches_rhs) { // No permutations + m_tensor_ = lhs_eigen - rhs_eigen; + } else if(this_matches_lhs) { // RHS needs permuted + m_tensor_ = lhs_eigen - rhs_eigen.shuffle(r_to_l); + } else if(this_matches_rhs) { // LHS needs permuted + m_tensor_ = lhs_eigen.shuffle(l_to_r) - rhs_eigen; + } else if(lhs_matches_rhs) { // This needs permuted + m_tensor_ = (lhs_eigen - rhs_eigen).shuffle(this_to_r); + } else { // Everything needs permuted + m_tensor_ = (lhs_eigen.shuffle(l_to_r) - rhs_eigen).shuffle(this_to_r); + } + + return *this; +} + +TPARAMS +typename EIGEN::dsl_reference EIGEN::multiplication_assignment_( + label_type this_labels, const_labeled_reference lhs, + const_labeled_reference rhs) { + BufferBase::multiplication_assignment_(this_labels, lhs, rhs); + + if(this_labels.is_hadamard_product(lhs.labels(), rhs.labels())) + return hadamard_(this_labels, lhs, rhs); + else if(this_labels.is_contraction(lhs.labels(), rhs.labels())) + return contraction_(this_labels, lhs, rhs); + else + throw std::runtime_error("Mixed products NYI"); +} TPARAMS -typename EIGEN::string_type EIGEN::to_string_() const { +typename EIGEN::dsl_reference EIGEN::permute_assignment_( + label_type this_labels, const_labeled_reference rhs) { + BufferBase::permute_assignment_(this_labels, rhs); + + using allocator_type = allocator::Eigen; + const auto& rhs_downcasted = allocator_type::rebind(rhs.object()); + + const auto& rlabels = rhs.labels(); + + if(this_labels != rlabels) { // We need to permute rhs before assignment + auto r_to_l = rhs.labels().permutation(this_labels); + // Eigen wants int objects + std::vector r_to_l2(r_to_l.begin(), r_to_l.end()); + m_tensor_ = rhs_downcasted.value().shuffle(r_to_l2); + } else { + m_tensor_ = rhs_downcasted.value(); + } + + return *this; +} + +TPARAMS +typename detail_::PolymorphicBase::string_type EIGEN::to_string_() + const { std::stringstream ss; ss << m_tensor_; return ss.str(); } +TPARAMS +typename EIGEN::dsl_reference EIGEN::hadamard_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) { + using allocator_type = allocator::Eigen; + const auto& lhs_downcasted = allocator_type::rebind(lhs.object()); + const auto& rhs_downcasted = allocator_type::rebind(rhs.object()); + const auto& lhs_eigen = lhs_downcasted.value(); + const auto& rhs_eigen = rhs_downcasted.value(); + + const auto& lhs_labels = lhs.labels(); + const auto& rhs_labels = rhs.labels(); + + bool this_matches_lhs = (this_labels == lhs_labels); + bool this_matches_rhs = (this_labels == rhs_labels); + bool lhs_matches_rhs = (lhs_labels == rhs_labels); + + auto get_permutation = [](auto&& lhs_, auto&& rhs_) { + auto l_to_r = lhs_.permutation(rhs_); + return std::vector(l_to_r.begin(), l_to_r.end()); + }; + + auto r_to_l = get_permutation(rhs_labels, lhs_labels); + auto l_to_r = get_permutation(lhs_labels, rhs_labels); + auto this_to_r = get_permutation(this_labels, rhs_labels); + + if(this_matches_lhs && this_matches_rhs) { // No permutations + m_tensor_ = lhs_eigen * rhs_eigen; + } else if(this_matches_lhs) { // RHS needs permuted + m_tensor_ = lhs_eigen * rhs_eigen.shuffle(r_to_l); + } else if(this_matches_rhs) { // LHS needs permuted + m_tensor_ = lhs_eigen.shuffle(l_to_r) * rhs_eigen; + } else if(lhs_matches_rhs) { // This needs permuted + m_tensor_ = (lhs_eigen * rhs_eigen).shuffle(this_to_r); + } else { // Everything needs permuted + m_tensor_ = (lhs_eigen.shuffle(l_to_r) * rhs_eigen).shuffle(this_to_r); + } + + return *this; +} + +TPARAMS typename EIGEN::dsl_reference EIGEN::contraction_( + label_type this_labels, const_labeled_reference lhs, + const_labeled_reference rhs) { + const auto& llabels = lhs.labels(); + const auto& lobject = lhs.object(); + const auto& rlabels = rhs.labels(); + const auto& robject = rhs.object(); + + // N.b. is a pure contraction, so common indices are summed over + auto common = llabels.intersection(rlabels); + + // -- This block converts string indices to mode offsets + using rank_type = unsigned short; + using pair_type = std::pair; + std::vector modes; + auto rank = common.size(); + for(decltype(rank) i = 0; i < rank; ++i) { + const auto& index_i = common.at(i); + // N.b., pure contraction so there's no repeats within a tensor's label + auto lindex = llabels.find(index_i)[0]; + auto rindex = rlabels.find(index_i)[0]; + modes.push_back(pair_type(lindex, rindex)); + } + + return eigen_contraction(*this, lobject, robject, modes); +} + #undef EIGEN #undef TPARAMS diff --git a/src/tensorwrapper/buffer/eigen_contraction.cpp b/src/tensorwrapper/buffer/eigen_contraction.cpp new file mode 100644 index 00000000..23b729cf --- /dev/null +++ b/src/tensorwrapper/buffer/eigen_contraction.cpp @@ -0,0 +1,137 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "eigen_contraction.hpp" +#include +#include + +namespace tensorwrapper::buffer { + +using rank_type = unsigned short; +using base_reference = BufferBase::base_reference; +using const_base_reference = BufferBase::const_base_reference; +using return_type = BufferBase::dsl_reference; +using pair_type = std::pair; +using vector_type = std::vector; + +// N.b. will create about max_rank**3 instantiations of eigen_contraction +static constexpr unsigned int max_rank = 6; + +/// Wraps the contraction once we've worked out all of the template params. +template +return_type eigen_contraction(RVType&& rv, LHSType&& lhs, RHSType&& rhs, + ModesType&& sum_modes) { + rv.value() = lhs.value().contract(rhs.value(), sum_modes); + return rv; +} + +/// This function converts @p sum_modes to a statically sized array +template +return_type n_contraction_modes(buffer::Eigen& rv, + const buffer::Eigen& lhs, + const buffer::Eigen& rhs, + const vector_type& sum_modes) { + // Can't contract more modes than a tensor has (this is recursion end point) + constexpr auto max_n = std::min({LHSRank, RHSRank}); + if constexpr(N == max_n + 1) { + throw std::runtime_error("Contracted more modes than a tensor has!!?"); + } else { + if(N == sum_modes.size()) { + std::array temp; + for(std::size_t i = 0; i < temp.size(); ++i) temp[i] = sum_modes[i]; + return eigen_contraction(rv, lhs, rhs, std::move(temp)); + } else { + return n_contraction_modes(rv, lhs, rhs, sum_modes); + } + } +} + +/// This function works out the rank of RHS +template +return_type rhs_rank(buffer::Eigen& rv, + const buffer::Eigen& lhs, + const_base_reference rhs, const vector_type& sum_modes) { + if constexpr(RHSRank == max_rank + 1) { + throw std::runtime_error("RHS has rank > max_rank"); + } else { + if(RHSRank == rhs.rank()) { + using allocator_type = allocator::Eigen; + const auto& rhs_eigen = allocator_type::rebind(rhs); + return n_contraction_modes(rv, lhs, rhs_eigen, sum_modes); + } else { + return rhs_rank(rv, lhs, rhs, sum_modes); + } + } +} + +/// This function works out the rank of LHS +template +return_type lhs_rank(buffer::Eigen& rv, + const_base_reference lhs, const_base_reference rhs, + const vector_type& sum_modes) { + if constexpr(LHSRank == max_rank + 1) { + throw std::runtime_error("LHS has rank > max_rank"); + } else { + if(LHSRank == lhs.rank()) { + using allocator_type = allocator::Eigen; + const auto& lhs_eigen = allocator_type::rebind(lhs); + return rhs_rank(rv, lhs_eigen, rhs, sum_modes); + } else { + return lhs_rank(rv, lhs, rhs, sum_modes); + } + } +} + +/// This function works out the rank of rv +template +return_type eigen_contraction_(base_reference rv, const_base_reference lhs, + const_base_reference rhs, + const vector_type& sum_modes) { + if constexpr(RVRank == max_rank + 1) { + throw std::runtime_error("Return has rank > max_rank"); + } else { + if(RVRank == rv.rank()) { + using allocator_type = allocator::Eigen; + auto& rv_eigen = allocator_type::rebind(rv); + return lhs_rank(rv_eigen, lhs, rhs, sum_modes); + } else { + constexpr auto RVp1 = RVRank + 1; + return eigen_contraction_(rv, lhs, rhs, sum_modes); + } + } +} + +template +return_type eigen_contraction(base_reference rv, const_base_reference lhs, + const_base_reference rhs, + const vector_type& sum_modes) { + return eigen_contraction_(rv, lhs, rhs, sum_modes); +} + +#define EIGEN_CONTRACTION(FLOAT_TYPE) \ + template return_type eigen_contraction( \ + base_reference, const_base_reference, const_base_reference, \ + const vector_type&) + +EIGEN_CONTRACTION(float); +EIGEN_CONTRACTION(double); + +#undef EIGEN_CONTRACTION + +} // namespace tensorwrapper::buffer \ No newline at end of file diff --git a/src/tensorwrapper/buffer/eigen_contraction.hpp b/src/tensorwrapper/buffer/eigen_contraction.hpp new file mode 100644 index 00000000..5160cab3 --- /dev/null +++ b/src/tensorwrapper/buffer/eigen_contraction.hpp @@ -0,0 +1,41 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include + +namespace tensorwrapper::buffer { + +/** @brief Relatively template-free API for Eigen tensor contraction. + * + * Eigen's tensor library relies on a heavy amount of template meta-programming + * to implement contract. TensorWrapper strives to do things at runtime. + * Ultimately, to have it both ways we need to create contraction dispatch + * instantiations for every combination of template parameters that Eigen may + * end up seeing, that's what the functions in this header do. + * +// * The entry point into this infrastructure is currently the return_rank + * method, which kicks the process off by working out the rank of the tensor + * which will + * + */ +template +BufferBase::dsl_reference eigen_contraction( + BufferBase::base_reference rv, BufferBase::const_base_reference lhs, + BufferBase::const_base_reference rhs, + const std::vector>& sum_modes); + +} // namespace tensorwrapper::buffer \ No newline at end of file diff --git a/src/tensorwrapper/layout/layout_base.cpp b/src/tensorwrapper/layout/layout_base.cpp new file mode 100644 index 00000000..26fa41c0 --- /dev/null +++ b/src/tensorwrapper/layout/layout_base.cpp @@ -0,0 +1,85 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +namespace tensorwrapper::layout { + +using dsl_reference = typename LayoutBase::dsl_reference; + +dsl_reference LayoutBase::addition_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) { + const auto& lobject = lhs.object(); + const auto& llabels = lhs.labels(); + const auto& robject = rhs.object(); + const auto& rlabels = rhs.labels(); + + m_shape_->addition_assignment(this_labels, lobject.shape()(llabels), + robject.shape()(rlabels)); + m_sparsity_->addition_assignment(this_labels, lobject.sparsity()(llabels), + robject.sparsity()(rlabels)); + m_symmetry_->addition_assignment(this_labels, lobject.symmetry()(llabels), + robject.symmetry()(rlabels)); + return *this; +} + +dsl_reference LayoutBase::subtraction_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) { + const auto& lobject = lhs.object(); + const auto& llabels = lhs.labels(); + const auto& robject = rhs.object(); + const auto& rlabels = rhs.labels(); + + m_shape_->subtraction_assignment(this_labels, lobject.shape()(llabels), + robject.shape()(rlabels)); + m_sparsity_->subtraction_assignment( + this_labels, lobject.sparsity()(llabels), robject.sparsity()(rlabels)); + m_symmetry_->subtraction_assignment( + this_labels, lobject.symmetry()(llabels), robject.symmetry()(rlabels)); + return *this; +} + +dsl_reference LayoutBase::multiplication_assignment_( + label_type this_labels, const_labeled_reference lhs, + const_labeled_reference rhs) { + const auto& lobject = lhs.object(); + const auto& llabels = lhs.labels(); + const auto& robject = rhs.object(); + const auto& rlabels = rhs.labels(); + + m_shape_->multiplication_assignment(this_labels, lobject.shape()(llabels), + robject.shape()(rlabels)); + m_sparsity_->multiplication_assignment( + this_labels, lobject.sparsity()(llabels), robject.sparsity()(rlabels)); + m_symmetry_->multiplication_assignment( + this_labels, lobject.symmetry()(llabels), robject.symmetry()(rlabels)); + return *this; +} + +dsl_reference LayoutBase::permute_assignment_(label_type this_labels, + const_labeled_reference rhs) { + const auto& robject = rhs.object(); + const auto& rlabels = rhs.labels(); + + m_shape_->permute_assignment(this_labels, robject.shape()(rlabels)); + m_sparsity_->permute_assignment(this_labels, robject.sparsity()(rlabels)); + m_symmetry_->permute_assignment(this_labels, robject.symmetry()(rlabels)); + return *this; +} + +} // namespace tensorwrapper::layout \ No newline at end of file diff --git a/src/tensorwrapper/tensor/detail_/tensor_factory.cpp b/src/tensorwrapper/tensor/detail_/tensor_factory.cpp index c415b13b..5ec9efc7 100644 --- a/src/tensorwrapper/tensor/detail_/tensor_factory.cpp +++ b/src/tensorwrapper/tensor/detail_/tensor_factory.cpp @@ -39,15 +39,15 @@ using buffer_pointer = typename pimpl_type::buffer_pointer; // ----------------------------------------------------------------------------- symmetry_pointer TensorFactory::default_logical_symmetry( - const_shape_reference) { + const_shape_reference shape) { // Symmetry is at present NOT polymorphic - return std::make_unique(); + return std::make_unique(shape.rank()); } sparsity_pointer TensorFactory::default_logical_sparsity( - const_shape_reference, const_symmetry_reference) { + const_shape_reference shape, const_symmetry_reference) { // Sparsity is at present NOT polymorphic - return std::make_unique(); + return std::make_unique(shape.rank()); } logical_layout_pointer TensorFactory::default_logical_layout( diff --git a/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp b/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp index ef83631c..dedd8e79 100644 --- a/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "../helpers.hpp" +#include "../testing/testing.hpp" #include #include #include @@ -26,10 +26,6 @@ TEMPLATE_TEST_CASE("EigenAllocator", "", float, double) { using scalar_alloc_type = allocator::Eigen; using vector_alloc_type = allocator::Eigen; using matrix_alloc_type = allocator::Eigen; - using layout_type = typename scalar_alloc_type::eigen_layout_type; - using shape_type = typename shape::Smooth; - using symmetry_type = typename layout_type::symmetry_type; - using sparsity_type = typename layout_type::sparsity_type; using eigen_buffer_scalar = typename scalar_alloc_type::eigen_buffer_type; using eigen_buffer_vector = typename vector_alloc_type::eigen_buffer_type; using eigen_buffer_matrix = typename matrix_alloc_type::eigen_buffer_type; @@ -39,11 +35,10 @@ TEMPLATE_TEST_CASE("EigenAllocator", "", float, double) { parallelzone::runtime::RuntimeView rv; - symmetry_type g; - sparsity_type sparsity; - layout_type scalar_layout(shape_type{}, g, sparsity); - layout_type vector_layout(shape_type{2}, g, sparsity); - layout_type matrix_layout(shape_type{2, 2}, g, sparsity); + auto scalar_layout = testing::scalar_physical(); + auto vector_layout = testing::vector_physical(2); + auto matrix_layout = testing::matrix_physical(2, 2); + using layout_type = decltype(scalar_layout); scalar_alloc_type scalar_alloc(rv); vector_alloc_type vector_alloc(rv); diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp index 74e415f6..a27a24c3 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "../helpers.hpp" +#include "../testing/testing.hpp" #include #include #include @@ -45,10 +45,8 @@ TEST_CASE("BufferBase") { eigen_vector(0) = 1.0; eigen_vector(1) = 2.0; - symmetry::Group g; - sparsity::Pattern p; - layout::Physical scalar_layout(shape::Smooth{}, g, p); - layout::Physical vector_layout(shape::Smooth{2}, g, p); + auto scalar_layout = testing::scalar_physical(); + auto vector_layout = testing::vector_physical(2); vector_buffer defaulted; scalar_buffer scalar(eigen_scalar, scalar_layout); diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp index 73572c8e..1ab164da 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "../helpers.hpp" +#include "../testing/testing.hpp" #include #include #include @@ -25,12 +25,11 @@ using namespace testing; namespace { template -bool compare_eigen(const LHSType& lhs, const RHSType& rhs) { +void compare_eigen(const LHSType& lhs, const RHSType& rhs) { using r_type = Eigen::Tensor; auto d = lhs - rhs; r_type r = d.sum(); - - return (r() == 0.0); + REQUIRE_THAT(r() + 1.0, Catch::Matchers::WithinAbs(1.0, 1E-6)); } } // namespace @@ -40,41 +39,50 @@ TEMPLATE_TEST_CASE("Eigen", "", float, double) { using scalar_buffer = buffer::Eigen; using vector_buffer = buffer::Eigen; using matrix_buffer = buffer::Eigen; + using tensor_buffer = buffer::Eigen; typename scalar_buffer::data_type eigen_scalar; - eigen_scalar() = 1.0; + eigen_scalar() = 10.0; typename vector_buffer::data_type eigen_vector(2); - eigen_vector(0) = 1.0; - eigen_vector(1) = 2.0; + eigen_vector(0) = 10.0; + eigen_vector(1) = 20.0; typename matrix_buffer::data_type eigen_matrix(2, 3); - eigen_matrix(0, 0) = 1.0; - eigen_matrix(0, 1) = 2.0; - eigen_matrix(0, 2) = 3.0; - eigen_matrix(1, 0) = 4.0; - eigen_matrix(1, 1) = 5.0; - eigen_matrix(1, 2) = 6.0; - - symmetry::Group g; - sparsity::Pattern p; - layout::Physical scalar_layout(shape::Smooth{}, g, p); - layout::Physical vector_layout(shape::Smooth{2}, g, p); - layout::Physical matrix_layout(shape::Smooth{2, 3}, g, p); + eigen_matrix(0, 0) = 10.0; + eigen_matrix(0, 1) = 20.0; + eigen_matrix(0, 2) = 30.0; + eigen_matrix(1, 0) = 40.0; + eigen_matrix(1, 1) = 50.0; + eigen_matrix(1, 2) = 60.0; + + typename tensor_buffer::data_type eigen_tensor(1, 2, 3); + eigen_tensor(0, 0, 0) = 10.0; + eigen_tensor(0, 0, 1) = 20.0; + eigen_tensor(0, 0, 2) = 30.0; + eigen_tensor(0, 1, 0) = 40.0; + eigen_tensor(0, 1, 1) = 50.0; + eigen_tensor(0, 1, 2) = 60.0; + + auto scalar_layout = scalar_physical(); + auto vector_layout = vector_physical(2); + auto matrix_layout = matrix_physical(2, 3); + auto tensor_layout = tensor_physical(1, 2, 3); scalar_buffer scalar(eigen_scalar, scalar_layout); vector_buffer vector(eigen_vector, vector_layout); matrix_buffer matrix(eigen_matrix, matrix_layout); + tensor_buffer tensor(eigen_tensor, tensor_layout); SECTION("ctors, assignment") { SECTION("value ctor") { - REQUIRE(compare_eigen(scalar.value(), eigen_scalar)); + compare_eigen(scalar.value(), eigen_scalar); REQUIRE(scalar.layout().are_equal(scalar_layout)); - REQUIRE(compare_eigen(vector.value(), eigen_vector)); + compare_eigen(vector.value(), eigen_vector); REQUIRE(vector.layout().are_equal(vector_layout)); - REQUIRE(compare_eigen(matrix.value(), eigen_matrix)); + compare_eigen(matrix.value(), eigen_matrix); REQUIRE(matrix.layout().are_equal(matrix_layout)); } @@ -82,18 +90,18 @@ TEMPLATE_TEST_CASE("Eigen", "", float, double) { } SECTION("value()") { - REQUIRE(compare_eigen(scalar.value(), eigen_scalar)); - REQUIRE(compare_eigen(vector.value(), eigen_vector)); - REQUIRE(compare_eigen(matrix.value(), eigen_matrix)); + compare_eigen(scalar.value(), eigen_scalar); + compare_eigen(vector.value(), eigen_vector); + compare_eigen(matrix.value(), eigen_matrix); } SECTION("value() const") { const auto& cscalar = scalar; const auto& cvector = vector; const auto& cmatrix = matrix; - REQUIRE(compare_eigen(cscalar.value(), eigen_scalar)); - REQUIRE(compare_eigen(cvector.value(), eigen_vector)); - REQUIRE(compare_eigen(cmatrix.value(), eigen_matrix)); + compare_eigen(cscalar.value(), eigen_scalar); + compare_eigen(cvector.value(), eigen_vector); + compare_eigen(cmatrix.value(), eigen_matrix); } SECTION("operator==") { @@ -103,7 +111,7 @@ TEMPLATE_TEST_CASE("Eigen", "", float, double) { // tensor same layout. typename scalar_buffer::data_type eigen_scalar2; - eigen_scalar2() = 1.0; + eigen_scalar2() = 10.0; // Everything the same REQUIRE(scalar == scalar_buffer(eigen_scalar2, scalar_layout)); @@ -130,7 +138,7 @@ TEMPLATE_TEST_CASE("Eigen", "", float, double) { // This just negates operator== so spot-checking is okay typename scalar_buffer::data_type eigen_scalar2; - eigen_scalar2() = 1.0; + eigen_scalar2() = 10.0; // Everything the same scalar_buffer scalar2(eigen_scalar2, scalar_layout); @@ -160,5 +168,466 @@ TEMPLATE_TEST_CASE("Eigen", "", float, double) { REQUIRE_FALSE(pmatrix.are_equal(scalar2)); } } + + SECTION("addition_assignment_") { + SECTION("scalar") { + scalar_buffer scalar2(eigen_scalar, scalar_layout); + scalar2.value()() = 42.0; + + auto s = scalar(""); + auto pscalar2 = &(scalar2.addition_assignment("", s, s)); + + scalar_buffer scalar_corr(eigen_scalar, scalar_layout); + scalar_corr.value()() = 20.0; + REQUIRE(pscalar2 == &scalar2); + REQUIRE(scalar2 == scalar_corr); + } + + SECTION("vector") { + auto vector2 = testing::eigen_vector(); + + auto vi = vector("i"); + auto pvector2 = &(vector2.addition_assignment("i", vi, vi)); + + vector_buffer vector_corr(eigen_vector, vector_layout); + vector_corr.value()(0) = 20.0; + vector_corr.value()(1) = 40.0; + + REQUIRE(pvector2 == &vector2); + REQUIRE(vector2 == vector_corr); + } + + SECTION("matrix : no permutation") { + auto matrix2 = testing::eigen_matrix(); + + auto mij = matrix("i,j"); + auto pmatrix2 = &(matrix2.addition_assignment("i,j", mij, mij)); + + matrix_buffer matrix_corr(eigen_matrix, matrix_layout); + + matrix_corr.value()(0, 0) = 20.0; + matrix_corr.value()(0, 1) = 40.0; + matrix_corr.value()(0, 2) = 60.0; + matrix_corr.value()(1, 0) = 80.0; + matrix_corr.value()(1, 1) = 100.0; + matrix_corr.value()(1, 2) = 120.0; + + REQUIRE(pmatrix2 == &matrix2); + REQUIRE(matrix2 == matrix_corr); + } + + SECTION("matrix: permutations") { + auto matrix2 = testing::eigen_matrix(); + auto l = testing::matrix_physical(3, 2); + std::array p10{1, 0}; + auto eigen_matrix_t = eigen_matrix.shuffle(p10); + matrix_buffer matrix1(eigen_matrix_t, l); + + auto mij = matrix("i,j"); + auto mji = matrix1("j,i"); + + matrix_buffer matrix_corr(eigen_matrix, matrix_layout); + + matrix_corr.value()(0, 0) = 20.0; + matrix_corr.value()(0, 1) = 40.0; + matrix_corr.value()(0, 2) = 60.0; + matrix_corr.value()(1, 0) = 80.0; + matrix_corr.value()(1, 1) = 100.0; + matrix_corr.value()(1, 2) = 120.0; + + SECTION("permute this") { + matrix2.addition_assignment("j,i", mij, mij); + + matrix_buffer corr(eigen_matrix_t, l); + corr.value()(0, 0) = 20.0; + corr.value()(0, 1) = 80.0; + corr.value()(1, 0) = 40.0; + corr.value()(1, 1) = 100.0; + corr.value()(2, 0) = 60.0; + corr.value()(2, 1) = 120.0; + + REQUIRE(matrix2 == corr); + } + + SECTION("permute LHS") { + matrix2.addition_assignment("i,j", mji, mij); + REQUIRE(matrix2 == matrix_corr); + } + + SECTION("permute RHS") { + matrix2.addition_assignment("i,j", mij, mji); + REQUIRE(matrix2 == matrix_corr); + } + } + + SECTION("tensor (must permute all)") { + auto tensor2 = testing::eigen_tensor3(); + + std::array p102{1, 0, 2}; + auto l102 = testing::tensor_physical(2, 1, 3); + tensor_buffer tensor102(eigen_tensor.shuffle(p102), l102); + + auto tijk = tensor("i,j,k"); + auto tjik = tensor102("j,i,k"); + + tensor2.addition_assignment("k,j,i", tijk, tjik); + + std::array p210{2, 1, 0}; + auto l210 = testing::tensor_physical(3, 2, 1); + tensor_buffer corr(eigen_tensor.shuffle(p210), l210); + corr.value()(0, 0, 0) = 20.0; + corr.value()(0, 1, 0) = 80.0; + corr.value()(1, 0, 0) = 40.0; + corr.value()(1, 1, 0) = 100.0; + corr.value()(2, 0, 0) = 60.0; + corr.value()(2, 1, 0) = 120.0; + REQUIRE(tensor2 == corr); + } + } + + SECTION("subtraction_assignment_") { + SECTION("scalar") { + scalar_buffer scalar2(eigen_scalar, scalar_layout); + scalar2.value()() = 42.0; + + auto s = scalar(""); + auto pscalar2 = &(scalar2.subtraction_assignment("", s, s)); + + scalar_buffer scalar_corr(eigen_scalar, scalar_layout); + scalar_corr.value()() = 0.0; + REQUIRE(pscalar2 == &scalar2); + REQUIRE(scalar2 == scalar_corr); + } + + SECTION("vector") { + auto vector2 = testing::eigen_vector(); + + auto vi = vector("i"); + auto pvector2 = &(vector2.subtraction_assignment("i", vi, vi)); + + vector_buffer vector_corr(eigen_vector, vector_layout); + vector_corr.value()(0) = 0.0; + vector_corr.value()(1) = 0.0; + + REQUIRE(pvector2 == &vector2); + REQUIRE(vector2 == vector_corr); + } + + SECTION("matrix : no permutation") { + auto matrix2 = testing::eigen_matrix(); + + auto mij = matrix("i,j"); + auto pmatrix2 = + &(matrix2.subtraction_assignment("i,j", mij, mij)); + + matrix_buffer matrix_corr(eigen_matrix, matrix_layout); + + matrix_corr.value()(0, 0) = 0.0; + matrix_corr.value()(0, 1) = 0.0; + matrix_corr.value()(0, 2) = 0.0; + matrix_corr.value()(1, 0) = 0.0; + matrix_corr.value()(1, 1) = 0.0; + matrix_corr.value()(1, 2) = 0.0; + + REQUIRE(pmatrix2 == &matrix2); + REQUIRE(matrix2 == matrix_corr); + } + + SECTION("matrix: permutations") { + auto matrix2 = testing::eigen_matrix(); + auto l = testing::matrix_physical(3, 2); + std::array p10{1, 0}; + auto eigen_matrix_t = eigen_matrix.shuffle(p10); + matrix_buffer matrix1(eigen_matrix_t, l); + + auto mij = matrix("i,j"); + auto mji = matrix1("j,i"); + + matrix_buffer matrix_corr(eigen_matrix, matrix_layout); + + matrix_corr.value()(0, 0) = 0.0; + matrix_corr.value()(0, 1) = 0.0; + matrix_corr.value()(0, 2) = 0.0; + matrix_corr.value()(1, 0) = 0.0; + matrix_corr.value()(1, 1) = 0.0; + matrix_corr.value()(1, 2) = 0.0; + + SECTION("permute this") { + matrix2.subtraction_assignment("j,i", mij, mij); + + matrix_buffer corr(eigen_matrix_t, l); + corr.value()(0, 0) = 0.0; + corr.value()(0, 1) = 0.0; + corr.value()(1, 0) = 0.0; + corr.value()(1, 1) = 0.0; + corr.value()(2, 0) = 0.0; + corr.value()(2, 1) = 0.0; + + REQUIRE(matrix2 == corr); + } + + SECTION("permute LHS") { + matrix2.subtraction_assignment("i,j", mji, mij); + REQUIRE(matrix2 == matrix_corr); + } + + SECTION("permute RHS") { + matrix2.subtraction_assignment("i,j", mij, mji); + REQUIRE(matrix2 == matrix_corr); + } + } + + SECTION("tensor (must permute all)") { + auto tensor2 = testing::eigen_tensor3(); + + std::array p102{1, 0, 2}; + auto l102 = testing::tensor_physical(2, 1, 3); + tensor_buffer tensor102(eigen_tensor.shuffle(p102), l102); + + auto tijk = tensor("i,j,k"); + auto tjik = tensor102("j,i,k"); + + tensor2.subtraction_assignment("k,j,i", tijk, tjik); + + std::array p210{2, 1, 0}; + auto l210 = testing::tensor_physical(3, 2, 1); + tensor_buffer corr(eigen_tensor.shuffle(p210), l210); + corr.value()(0, 0, 0) = 0.0; + corr.value()(0, 1, 0) = 0.0; + corr.value()(1, 0, 0) = 0.0; + corr.value()(1, 1, 0) = 0.0; + corr.value()(2, 0, 0) = 0.0; + corr.value()(2, 1, 0) = 0.0; + REQUIRE(tensor2 == corr); + } + } + + SECTION("multiplication_assignment_") { + // Multiplication just dispatches to hadamard_ or contraction_ + // Here we test the error-handling + + // Must be either a pure hadamard or a pure contraction + auto matrix2 = testing::eigen_matrix(); + auto mij = matrix("i,j"); + + REQUIRE_THROWS_AS(matrix2.subtraction_assignment("i", mij, mij), + std::runtime_error); + } + + SECTION("permute_assignment_") { + SECTION("scalar") { + auto scalar2 = testing::eigen_scalar(); + scalar2.value()() = 42.0; + + auto s = scalar(""); + auto pscalar2 = &(scalar2.permute_assignment("", s)); + REQUIRE(pscalar2 == &scalar2); + REQUIRE(scalar2 == scalar); + } + + SECTION("vector") { + auto vector2 = testing::eigen_vector(); + + auto vi = vector("i"); + auto pvector2 = &(vector2.permute_assignment("i", vi)); + + REQUIRE(pvector2 == &vector2); + REQUIRE(vector2 == vector); + } + + SECTION("matrix : no permutation") { + auto matrix2 = testing::eigen_matrix(); + + auto mij = matrix("i,j"); + auto pmatrix2 = &(matrix2.permute_assignment("i,j", mij)); + + REQUIRE(pmatrix2 == &matrix2); + REQUIRE(matrix2 == matrix); + } + + SECTION("matrix: permutation") { + auto matrix2 = testing::eigen_matrix(); + auto p = &(matrix2.permute_assignment("j,i", matrix("i,j"))); + + auto corr = testing::eigen_matrix(3, 2); + corr.value()(0, 0) = 10.0; + corr.value()(1, 0) = 20.0; + corr.value()(2, 0) = 30.0; + corr.value()(0, 1) = 40.0; + corr.value()(1, 1) = 50.0; + corr.value()(2, 1) = 60.0; + REQUIRE(p == &matrix2); + compare_eigen(corr.value(), matrix2.value()); + } + } + + SECTION("hadamard_") { + SECTION("scalar") { + scalar_buffer scalar2(eigen_scalar, scalar_layout); + scalar2.value()() = 42.0; + + auto s = scalar(""); + auto pscalar2 = &(scalar2.multiplication_assignment("", s, s)); + + scalar_buffer scalar_corr(eigen_scalar, scalar_layout); + scalar_corr.value()() = 100.0; + REQUIRE(pscalar2 == &scalar2); + REQUIRE(scalar2 == scalar_corr); + } + + SECTION("vector") { + auto vector2 = testing::eigen_vector(); + + auto vi = vector("i"); + auto pvector2 = + &(vector2.multiplication_assignment("i", vi, vi)); + + vector_buffer vector_corr(eigen_vector, vector_layout); + vector_corr.value()(0) = 100.0; + vector_corr.value()(1) = 400.0; + + REQUIRE(pvector2 == &vector2); + REQUIRE(vector2 == vector_corr); + } + + SECTION("matrix : no permutation") { + auto matrix2 = testing::eigen_matrix(); + + auto mij = matrix("i,j"); + auto pmatrix2 = + &(matrix2.multiplication_assignment("i,j", mij, mij)); + + matrix_buffer matrix_corr(eigen_matrix, matrix_layout); + + matrix_corr.value()(0, 0) = 100.0; + matrix_corr.value()(0, 1) = 400.0; + matrix_corr.value()(0, 2) = 900.0; + matrix_corr.value()(1, 0) = 1600.0; + matrix_corr.value()(1, 1) = 2500.0; + matrix_corr.value()(1, 2) = 3600.0; + + REQUIRE(pmatrix2 == &matrix2); + REQUIRE(matrix2 == matrix_corr); + } + + SECTION("matrix: permutations") { + auto matrix2 = testing::eigen_matrix(); + auto l = testing::matrix_physical(3, 2); + std::array p10{1, 0}; + auto eigen_matrix_t = eigen_matrix.shuffle(p10); + matrix_buffer matrix1(eigen_matrix_t, l); + + auto mij = matrix("i,j"); + auto mji = matrix1("j,i"); + + matrix_buffer matrix_corr(eigen_matrix, matrix_layout); + + matrix_corr.value()(0, 0) = 100.0; + matrix_corr.value()(0, 1) = 400.0; + matrix_corr.value()(0, 2) = 900.0; + matrix_corr.value()(1, 0) = 1600.0; + matrix_corr.value()(1, 1) = 2500.0; + matrix_corr.value()(1, 2) = 3600.0; + + SECTION("permute this") { + matrix2.multiplication_assignment("j,i", mij, mij); + + matrix_buffer corr(eigen_matrix_t, l); + corr.value()(0, 0) = 100.0; + corr.value()(0, 1) = 1600.0; + corr.value()(1, 0) = 400.0; + corr.value()(1, 1) = 2500.0; + corr.value()(2, 0) = 900.0; + corr.value()(2, 1) = 3600.0; + + REQUIRE(matrix2 == corr); + } + + SECTION("permute LHS") { + matrix2.multiplication_assignment("i,j", mji, mij); + REQUIRE(matrix2 == matrix_corr); + } + + SECTION("permute RHS") { + matrix2.multiplication_assignment("i,j", mij, mji); + REQUIRE(matrix2 == matrix_corr); + } + } + + SECTION("tensor (must permute all)") { + auto tensor2 = testing::eigen_tensor3(); + + std::array p102{1, 0, 2}; + auto l102 = testing::tensor_physical(2, 1, 3); + tensor_buffer tensor102(eigen_tensor.shuffle(p102), l102); + + auto tijk = tensor("i,j,k"); + auto tjik = tensor102("j,i,k"); + + tensor2.multiplication_assignment("k,j,i", tijk, tjik); + + std::array p210{2, 1, 0}; + auto l210 = testing::tensor_physical(3, 2, 1); + tensor_buffer corr(eigen_tensor.shuffle(p210), l210); + corr.value()(0, 0, 0) = 100.0; + corr.value()(0, 1, 0) = 1600.0; + corr.value()(1, 0, 0) = 400.0; + corr.value()(1, 1, 0) = 2500.0; + corr.value()(2, 0, 0) = 900.0; + corr.value()(2, 1, 0) = 3600.0; + REQUIRE(tensor2 == corr); + } + } + + SECTION("contraction_") { + auto vi = vector("i"); + auto mij = matrix("i,j"); + auto mik = matrix("i,k"); + auto mjk = matrix("j,k"); + + SECTION("vector with vector") { + auto p = &(scalar.multiplication_assignment("", vi, vi)); + + auto scalar_corr = testing::eigen_scalar(); + scalar_corr.value()() = 500.0; // 10*10 + 20*20 + REQUIRE(p == &scalar); + compare_eigen(scalar_corr.value(), scalar.value()); + } + + SECTION("ij,ij->") { + auto p = &(scalar.multiplication_assignment("", mij, mij)); + + auto scalar_corr = testing::eigen_scalar(); + scalar_corr.value()() = 9100.0; // 1400 + 7700 + REQUIRE(p == &scalar); + compare_eigen(scalar_corr.value(), scalar.value()); + } + + SECTION("ki,kj->ij") { + auto buffer2 = testing::eigen_matrix(); + auto p = &(buffer2.multiplication_assignment("i,j", mik, mjk)); + + auto matrix_corr = testing::eigen_matrix(2, 2); + matrix_corr.value()(0, 0) = 1400.0; // 100 + 400 + 900 + matrix_corr.value()(0, 1) = 3200.0; // 400 + 1000 + 1800 + matrix_corr.value()(1, 0) = 3200.0; // 400 + 1000 + 1800 + matrix_corr.value()(1, 1) = 7700.0; // 1600 + 2500 + 3600 + + REQUIRE(p == &buffer2); + compare_eigen(matrix_corr.value(), buffer2.value()); + } + + SECTION("ij,i->j") { + auto buffer1 = testing::eigen_vector(); + auto p = &(buffer1.multiplication_assignment("j", mij, vi)); + + auto vector_corr = testing::eigen_vector(3); + vector_corr.value()(0) = 900.0; // 10(10) + 20(40) + vector_corr.value()(1) = 1200.0; // 10(20) + 20(50) + vector_corr.value()(2) = 1500.0; // 10(30) + 20(60) + REQUIRE(p == &buffer1); + compare_eigen(vector_corr.value(), buffer1.value()); + } + } } } diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/eigen_contraction.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/eigen_contraction.cpp new file mode 100644 index 00000000..15ce98ca --- /dev/null +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/eigen_contraction.cpp @@ -0,0 +1,78 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../testing/testing.hpp" +#include +#include + +using namespace tensorwrapper; +using namespace buffer; + +TEMPLATE_TEST_CASE("eigen_contraction", "", float, double) { + using float_t = TestType; + using mode_type = unsigned short; + using pair_type = std::pair; + using mode_array = std::vector; + + // Inputs + auto scalar = testing::eigen_scalar(); + auto vector = testing::eigen_vector(); + auto vector2 = testing::eigen_vector(2); + auto matrix = testing::eigen_matrix(); + + mode_array m00{pair_type{0, 0}}; + mode_array m11{pair_type{1, 1}}; + mode_array m00_11{pair_type{0, 0}, pair_type{1, 1}}; + + auto scalar_corr = testing::eigen_scalar(); + scalar_corr.value()() = 30.0; + + auto vector_corr = testing::eigen_vector(2); + vector_corr.value()(0) = 3.0; + vector_corr.value()(1) = 4.0; + + auto matrix_corr = testing::eigen_matrix(2, 2); + matrix_corr.value()(0, 0) = 10.0; + matrix_corr.value()(0, 1) = 14.0; + matrix_corr.value()(1, 0) = 14.0; + matrix_corr.value()(1, 1) = 20.0; + + SECTION("vector with vector") { + auto& rv = eigen_contraction(scalar, vector, vector, m00); + REQUIRE(&rv == static_cast(&scalar)); + REQUIRE(scalar_corr.are_equal(scalar)); + } + + SECTION("ij,ij->") { + auto& rv = eigen_contraction(scalar, matrix, matrix, m00_11); + REQUIRE(&rv == static_cast(&scalar)); + REQUIRE(scalar_corr.are_equal(scalar)); + } + + SECTION("ki,kj->ij") { + auto buffer = testing::eigen_matrix(); + auto& rv = eigen_contraction(buffer, matrix, matrix, m00); + REQUIRE(&rv == static_cast(&buffer)); + REQUIRE(matrix_corr.are_equal(buffer)); + } + + SECTION("ij,i->j") { + auto buffer = testing::eigen_vector(2); + auto& rv = eigen_contraction(buffer, matrix, vector2, m00); + REQUIRE(&rv == static_cast(&buffer)); + REQUIRE(vector_corr.are_equal(rv)); + } +} \ No newline at end of file diff --git a/tests/cxx/unit_tests/tensorwrapper/detail_/dsl_base.cpp b/tests/cxx/unit_tests/tensorwrapper/detail_/dsl_base.cpp index 3e640339..3f162775 100644 --- a/tests/cxx/unit_tests/tensorwrapper/detail_/dsl_base.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/detail_/dsl_base.cpp @@ -43,7 +43,7 @@ TEMPLATE_LIST_TEST_CASE("DSLBase", "", test_types) { using label_type = typename object_type::label_type; test_types default_values{shape::Smooth{}}; - test_types values{test_tensorwrapper::smooth_matrix()}; + test_types values{testing::smooth_matrix()}; auto default_value = std::get(default_values); auto value = std::get(values); diff --git a/tests/cxx/unit_tests/tensorwrapper/detail_/unique_ptr_utilities.cpp b/tests/cxx/unit_tests/tensorwrapper/detail_/unique_ptr_utilities.cpp index 650bbeea..32115a1c 100644 --- a/tests/cxx/unit_tests/tensorwrapper/detail_/unique_ptr_utilities.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/detail_/unique_ptr_utilities.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "../helpers.hpp" +#include "../testing/helpers.hpp" #include #include diff --git a/tests/cxx/unit_tests/tensorwrapper/detail_/view_traits.cpp b/tests/cxx/unit_tests/tensorwrapper/detail_/view_traits.cpp index 22699691..a15a7659 100644 --- a/tests/cxx/unit_tests/tensorwrapper/detail_/view_traits.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/detail_/view_traits.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "../helpers.hpp" +#include "../testing/helpers.hpp" #include using namespace tensorwrapper::detail_; diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp index 8b72b415..f8f3a4ab 100644 --- a/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp @@ -19,17 +19,15 @@ using namespace tensorwrapper; -using test_types = - std::tuple; - -TEMPLATE_LIST_TEST_CASE("DSL", "", test_types) { +TEMPLATE_LIST_TEST_CASE("DSL", "", testing::dsl_types) { using object_type = TestType; - test_types scalar_values{test_tensorwrapper::smooth_scalar(), - symmetry::Group(0), sparsity::Pattern(0)}; - test_types matrix_values{test_tensorwrapper::smooth_matrix(), - symmetry::Group(2), sparsity::Pattern(2)}; + auto scalar_values = testing::scalar_values(); + auto vector_values = testing::vector_values(); + auto matrix_values = testing::matrix_values(); + auto value0 = std::get(scalar_values); + auto value1 = std::get(vector_values); auto value2 = std::get(matrix_values); SECTION("assignment") { @@ -40,32 +38,80 @@ TEMPLATE_LIST_TEST_CASE("DSL", "", test_types) { SECTION("permutation") { value0("j,i") = value2("i,j"); - object_type corr{}; - corr.permute_assignment("i,j", value2("j,i")); - REQUIRE(corr.are_equal(value0)); + value1.permute_assignment("i,j", value2("j,i")); + REQUIRE(value1.are_equal(value0)); } SECTION("addition") { value0("i,j") = value2("i,j") + value2("i,j"); - object_type corr{}; - corr.addition_assignment("i,j", value2("i,j"), value2("i,j")); - REQUIRE(corr.are_equal(value0)); + value1.addition_assignment("i,j", value2("i,j"), value2("i,j")); + REQUIRE(value1.are_equal(value0)); } SECTION("subtraction") { value0("i,j") = value2("i,j") - value2("i,j"); - object_type corr{}; - corr.subtraction_assignment("i,j", value2("i,j"), value2("i,j")); - REQUIRE(corr.are_equal(value0)); + value1.subtraction_assignment("i,j", value2("i,j"), value2("i,j")); + REQUIRE(value1.are_equal(value0)); } SECTION("multiplication") { value0("i,j") = value2("i,j") * value2("i,j"); - object_type corr{}; - corr.multiplication_assignment("i,j", value2("i,j"), value2("i,j")); - REQUIRE(corr.are_equal(value0)); + value1.multiplication_assignment("i,j", value2("i,j"), value2("i,j")); + REQUIRE(value1.are_equal(value0)); + } +} + +// Since Eigen buffers are templated on the rank there isn't an easy way to +// include them in dsl_types +TEST_CASE("DSLr : buffer::Eigen") { + auto scalar0 = testing::eigen_scalar(); + auto scalar1 = testing::eigen_scalar(); + auto scalar2 = testing::eigen_scalar(); + auto corr = testing::eigen_scalar(); + + scalar0.value()() = 1.0; + scalar1.value()() = 2.0; + scalar2.value()() = 3.0; + + SECTION("assignment") { + SECTION("scalar") { + scalar0("") = scalar1(""); + corr.permute_assignment("", scalar1("")); + REQUIRE(corr.are_equal(scalar0)); + } + } + + SECTION("addition") { + SECTION("scalar") { + scalar0("") = scalar1("") + scalar2(""); + corr.addition_assignment("", scalar1(""), scalar2("")); + REQUIRE(corr.are_equal(scalar0)); + } + } + + SECTION("subtraction") { + SECTION("scalar") { + scalar0("") = scalar1("") - scalar2(""); + corr.subtraction_assignment("", scalar1(""), scalar2("")); + REQUIRE(corr.are_equal(scalar0)); + } + } + + SECTION("multiplication") { + SECTION("scalar") { + scalar0("") = scalar1("") * scalar2(""); + corr.multiplication_assignment("", scalar1(""), scalar2("")); + REQUIRE(corr.are_equal(scalar0)); + } + } + + SECTION("scalar_multiplication") { + // This should actually work. Will fix in a future PR + using error_t = std::runtime_error; + + REQUIRE_THROWS_AS(scalar0("") = scalar0("") * 1.0, error_t); } } \ No newline at end of file diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/dummy_indices.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/dummy_indices.cpp index efa42ab2..078a54fa 100644 --- a/tests/cxx/unit_tests/tensorwrapper/dsl/dummy_indices.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/dummy_indices.cpp @@ -86,6 +86,65 @@ TEST_CASE("DummyIndices") { REQUIRE_FALSE(matrix.is_permutation(dummy_indices_type("i,k"))); } + SECTION("is_hadamard_product") { + REQUIRE(scalar.is_hadamard_product(scalar, scalar)); + REQUIRE(vector.is_hadamard_product(vector, vector)); + REQUIRE(matrix.is_hadamard_product(matrix, matrix)); + REQUIRE(tensor.is_hadamard_product(tensor, tensor)); + + // Permutations are fine + REQUIRE(matrix.is_hadamard_product(matrix, dummy_indices_type("j,i"))); + + // *this, LHS, and RHS can't have repeated index + dummy_indices_type ii("i,i"); + REQUIRE_FALSE(ii.is_hadamard_product(matrix, matrix)); + REQUIRE_FALSE(matrix.is_hadamard_product(ii, matrix)); + REQUIRE_FALSE(matrix.is_hadamard_product(matrix, ii)); + + // Can't be contraction + dummy_indices_type ik("i,k"); + dummy_indices_type kj("k,j"); + REQUIRE_FALSE(matrix.is_hadamard_product(ik, kj)); + REQUIRE_FALSE(matrix.is_hadamard_product(kj, ik)); + + // Can't be mixed Hadamard/contraction product + dummy_indices_type ijk("i,j,k"); + dummy_indices_type ikj("i,k,j"); + REQUIRE_FALSE(matrix.is_hadamard_product(ijk, ikj)); + + // Can't be direct product + dummy_indices_type kl("k,l"); + dummy_indices_type ijkl("i,j,k,l"); + REQUIRE_FALSE(ijkl.is_hadamard_product(matrix, kl)); + } + + SECTION("is_contraction") { + dummy_indices_type ik("i,k"); + dummy_indices_type kj("k,j"); + REQUIRE(matrix.is_contraction(ik, kj)); + REQUIRE(matrix.is_contraction(kj, ik)); + + // Hadamard products are not contractions + REQUIRE_FALSE(matrix.is_contraction(matrix, matrix)); + REQUIRE_FALSE(matrix.is_contraction(matrix, dummy_indices_type("j,i"))); + + // *this, LHS, and RHS can't have repeated index + dummy_indices_type ii("i,i"); + REQUIRE_FALSE(ii.is_contraction(matrix, matrix)); + REQUIRE_FALSE(matrix.is_contraction(ii, matrix)); + REQUIRE_FALSE(matrix.is_contraction(matrix, ii)); + + // Can't be mixed Hadamard/contraction product + dummy_indices_type ijk("i,j,k"); + dummy_indices_type ikj("i,k,j"); + REQUIRE_FALSE(matrix.is_contraction(ijk, ikj)); + + // Can't be direct product + dummy_indices_type kl("k,l"); + dummy_indices_type ijkl("i,j,k,l"); + REQUIRE_FALSE(ijkl.is_contraction(matrix, kl)); + } + SECTION("permutation") { using offset_vector = typename dummy_indices_type::offset_vector; @@ -240,4 +299,22 @@ TEST_CASE("DummyIndices") { REQUIRE(matrix.intersection(matrix) == dummy_indices_type("i,j")); REQUIRE(matrix.intersection(matrix2) == dummy_indices_type("")); } + + SECTION("difference") { + dummy_indices_type matrix2("k,l"); + REQUIRE(scalar.difference(scalar) == dummy_indices_type("")); + REQUIRE(scalar.difference(vector) == dummy_indices_type("")); + REQUIRE(scalar.difference(matrix) == dummy_indices_type("")); + REQUIRE(scalar.difference(matrix2) == dummy_indices_type("")); + + REQUIRE(vector.difference(scalar) == dummy_indices_type("i")); + REQUIRE(vector.difference(vector) == dummy_indices_type("")); + REQUIRE(vector.difference(matrix) == dummy_indices_type("")); + REQUIRE(vector.difference(matrix2) == dummy_indices_type("i")); + + REQUIRE(matrix.difference(scalar) == dummy_indices_type("i,j")); + REQUIRE(matrix.difference(vector) == dummy_indices_type("j")); + REQUIRE(matrix.difference(matrix) == dummy_indices_type("")); + REQUIRE(matrix.difference(matrix2) == dummy_indices_type("i,j")); + } } diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/labeled.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/labeled.cpp index be49f8e1..1510a094 100644 --- a/tests/cxx/unit_tests/tensorwrapper/dsl/labeled.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/labeled.cpp @@ -28,7 +28,7 @@ TEMPLATE_LIST_TEST_CASE("Labeled", "", test_types) { using labels_type = typename labeled_type::label_type; test_types defaulted_values{shape::Smooth{}}; - test_types values{test_tensorwrapper::smooth_matrix()}; + test_types values{testing::smooth_matrix()}; labels_type scalar; labels_type ij("i,j"); @@ -130,7 +130,7 @@ TEMPLATE_LIST_TEST_CASE("Labeled", "", test_types) { REQUIRE(clabeled_default == labeled_default); // Different object, same labels - auto value2 = test_tensorwrapper::smooth_matrix(20, 10); + auto value2 = testing::smooth_matrix(20, 10); REQUIRE_FALSE(labeled_value == labeled_type(value2, ij)); // Same object, different labels diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp index 59d34a13..7478335d 100644 --- a/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp @@ -17,25 +17,22 @@ using namespace tensorwrapper; -using test_types = - std::tuple; - -TEMPLATE_LIST_TEST_CASE("PairwiseParser", "", test_types) { +TEMPLATE_LIST_TEST_CASE("PairwiseParser", "", testing::dsl_types) { using object_type = TestType; - test_types scalar_values{test_tensorwrapper::smooth_scalar(), - symmetry::Group(0), sparsity::Pattern(0)}; - test_types matrix_values{test_tensorwrapper::smooth_matrix(), - symmetry::Group(2), sparsity::Pattern(2)}; + auto scalar_values = testing::scalar_values(); + auto vector_values = testing::vector_values(); + auto matrix_values = testing::matrix_values(); auto value0 = std::get(scalar_values); + auto value1 = std::get(vector_values); auto value2 = std::get(matrix_values); dsl::PairwiseParser p; SECTION("assignment") { - object_type rv{}; - object_type corr{}; + object_type rv(value1); + object_type corr(value1); SECTION("scalar") { p.dispatch(rv(""), value0("")); corr.permute_assignment("", value0("")); @@ -50,8 +47,8 @@ TEMPLATE_LIST_TEST_CASE("PairwiseParser", "", test_types) { } SECTION("addition") { - object_type rv{}; - object_type corr{}; + object_type rv(value1); + object_type corr(value1); SECTION("scalar") { p.dispatch(rv(""), value0("") + value0("")); corr.addition_assignment("", value0(""), value0("")); @@ -66,8 +63,8 @@ TEMPLATE_LIST_TEST_CASE("PairwiseParser", "", test_types) { } SECTION("subtraction") { - object_type rv{}; - object_type corr{}; + object_type rv(value1); + object_type corr(value1); SECTION("scalar") { p.dispatch(rv(""), value0("") - value0("")); corr.subtraction_assignment("", value0(""), value0("")); @@ -82,8 +79,8 @@ TEMPLATE_LIST_TEST_CASE("PairwiseParser", "", test_types) { } SECTION("multiplication") { - object_type rv{}; - object_type corr{}; + object_type rv(value1); + object_type corr(value1); SECTION("scalar") { p.dispatch(rv(""), value0("") * value0("")); corr.multiplication_assignment("", value0(""), value0("")); @@ -104,4 +101,58 @@ TEMPLATE_LIST_TEST_CASE("PairwiseParser", "", test_types) { REQUIRE_THROWS_AS(p.dispatch(value0(""), value0("") * 1.0), error_t); } +} + +// Since Eigen buffers are templated on the rank there isn't an easy way to +// include them in dsl_types +TEST_CASE("PairwiseParser : buffer::Eigen") { + auto scalar0 = testing::eigen_scalar(); + auto scalar1 = testing::eigen_scalar(); + auto scalar2 = testing::eigen_scalar(); + auto corr = testing::eigen_scalar(); + + scalar0.value()() = 1.0; + scalar1.value()() = 2.0; + scalar2.value()() = 3.0; + + dsl::PairwiseParser p; + + SECTION("assignment") { + SECTION("scalar") { + p.dispatch(scalar0(""), scalar1("")); + corr.permute_assignment("", scalar1("")); + REQUIRE(corr.are_equal(scalar0)); + } + } + + SECTION("addition") { + SECTION("scalar") { + p.dispatch(scalar0(""), scalar1("") + scalar2("")); + corr.addition_assignment("", scalar1(""), scalar2("")); + REQUIRE(corr.are_equal(scalar0)); + } + } + + SECTION("subtraction") { + SECTION("scalar") { + p.dispatch(scalar0(""), scalar1("") - scalar2("")); + corr.subtraction_assignment("", scalar1(""), scalar2("")); + REQUIRE(corr.are_equal(scalar0)); + } + } + + SECTION("multiplication") { + SECTION("scalar") { + p.dispatch(scalar0(""), scalar1("") * scalar2("")); + corr.multiplication_assignment("", scalar1(""), scalar2("")); + REQUIRE(corr.are_equal(scalar0)); + } + } + + SECTION("scalar_multiplication") { + // This should actually work. Will fix in a future PR + using error_t = std::runtime_error; + + REQUIRE_THROWS_AS(p.dispatch(scalar0(""), scalar0("") * 1.0), error_t); + } } \ No newline at end of file diff --git a/tests/cxx/unit_tests/tensorwrapper/layout/layout_base.cpp b/tests/cxx/unit_tests/tensorwrapper/layout/layout_base.cpp index 458f822a..6e03dd3e 100644 --- a/tests/cxx/unit_tests/tensorwrapper/layout/layout_base.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/layout/layout_base.cpp @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "../helpers.hpp" +#include "../testing/testing.hpp" #include #include #include @@ -31,9 +31,9 @@ using namespace layout; TEST_CASE("LayoutBase") { shape::Smooth matrix_shape{2, 3}; - symmetry::Permutation p01{0, 1}; - symmetry::Group no_symm, symm{p01}; - sparsity::Pattern no_sparsity; + symmetry::Permutation p01{1, 0}; + symmetry::Group no_symm(2), symm{p01}; + sparsity::Pattern no_sparsity(2); Physical phys_copy_no_sym(matrix_shape, no_symm, no_sparsity); Physical phys_copy_has_sym(matrix_shape, symm, no_sparsity); @@ -44,34 +44,45 @@ TEST_CASE("LayoutBase") { LayoutBase& base_copy_has_sym = phys_copy_has_sym; LayoutBase& base_copy_just_shape = phys_copy_just_shape; - SECTION("Ctors and assignment") { + SECTION("Ctors") { SECTION("Copy state") { REQUIRE(base_copy_no_sym.shape().are_equal(matrix_shape)); - REQUIRE(base_copy_no_sym.symmetry() == no_symm); - REQUIRE(base_copy_no_sym.sparsity() == no_sparsity); + REQUIRE(base_copy_no_sym.symmetry().are_equal(no_symm)); + REQUIRE(base_copy_no_sym.sparsity().are_equal(no_sparsity)); REQUIRE(base_copy_has_sym.shape().are_equal(matrix_shape)); - REQUIRE(base_copy_has_sym.symmetry() == symm); - REQUIRE(base_copy_has_sym.sparsity() == no_sparsity); + REQUIRE(base_copy_has_sym.symmetry().are_equal(symm)); + REQUIRE(base_copy_has_sym.sparsity().are_equal(no_sparsity)); REQUIRE(base_copy_just_shape.shape().are_equal(matrix_shape)); - REQUIRE(base_copy_just_shape.symmetry() == no_symm); - REQUIRE(base_copy_just_shape.sparsity() == no_sparsity); + REQUIRE(base_copy_just_shape.symmetry().are_equal(no_symm)); + REQUIRE(base_copy_just_shape.sparsity().are_equal(no_sparsity)); + } + + SECTION("Copy shape, default others") { + Physical only_shape(matrix_shape); + REQUIRE(only_shape.shape().are_equal(matrix_shape)); + REQUIRE(only_shape.symmetry().are_equal(no_symm)); + REQUIRE(only_shape.sparsity().are_equal(no_sparsity)); + } + + SECTION("Move shape, default others") { + Physical only_shape(matrix_shape.clone()); + REQUIRE(only_shape.shape().are_equal(matrix_shape)); + REQUIRE(only_shape.symmetry().are_equal(no_symm)); + REQUIRE(only_shape.sparsity().are_equal(no_sparsity)); } SECTION("Move state") { auto pshape = matrix_shape.clone(); - auto psymm = std::make_unique(no_symm); - auto psparse = std::make_unique(no_sparsity); + auto psymm = no_symm.clone(); + auto psparse = no_sparsity.clone(); SECTION("All non-null") { Physical rhs(std::move(pshape), std::move(psymm), std::move(psparse)); REQUIRE(base_copy_no_sym == rhs); } - SECTION("Only Shape input") { - Physical rhs(std::move(pshape)); - REQUIRE(base_copy_just_shape == rhs); - } + SECTION("Shape is null") { REQUIRE_THROWS_AS( Physical(nullptr, std::move(psymm), std::move(psparse)), @@ -88,6 +99,25 @@ TEST_CASE("LayoutBase") { Physical(std::move(pshape), std::move(psymm), nullptr), std::runtime_error); } + SECTION("Shape ranks is inconsistent") { + shape::Smooth s0{}; + REQUIRE_THROWS_AS( + Physical(s0.clone(), std::move(psymm), std::move(psparse)), + std::runtime_error); + } + SECTION("Symmetry rank is inconsistent") { + symmetry::Group g0(0); + REQUIRE_THROWS_AS( + Physical(std::move(pshape), g0.clone(), std::move(psparse)), + std::runtime_error); + } + + SECTION("Sparsity rank is inconsistent") { + sparsity::Pattern p3(3); + REQUIRE_THROWS_AS( + Physical(std::move(pshape), std::move(psymm), p3.clone()), + std::runtime_error); + } } } @@ -106,17 +136,69 @@ TEST_CASE("LayoutBase") { REQUIRE(base_copy_has_sym.sparsity() == no_sparsity); } + SECTION("rank") { + REQUIRE(base_copy_no_sym.rank() == 2); + REQUIRE(base_copy_has_sym.rank() == 2); + } + SECTION("operator==") { // Same REQUIRE(base_copy_no_sym == Physical(matrix_shape, no_symm, no_sparsity)); // Different shape - shape::Smooth vector_shape{2}; + shape::Smooth matrix_shape2{3, 2}; REQUIRE_FALSE(base_copy_no_sym == - Physical(vector_shape, no_symm, no_sparsity)); + Physical(matrix_shape2, no_symm, no_sparsity)); // Different symmetry REQUIRE_FALSE(base_copy_no_sym == base_copy_has_sym); + + // N.b. presently not possible to have different sparsities w/o + // different ranks + } + + SECTION("operator!=") { + REQUIRE_FALSE(base_copy_no_sym != + Physical(matrix_shape, no_symm, no_sparsity)); + REQUIRE(base_copy_no_sym != base_copy_has_sym); + } + + SECTION("xxx_assignment_") { + // Layout just calls the equivlanent xxx_assignment_ method on its + // shape, symmetry, and sparsity objects. Spot checking works here if + // since the called methods are tested + auto scalar_layout = testing::scalar_physical(); + auto matrix_layout = testing::matrix_physical(); + + SECTION("addition_assignment_") { + auto lij = matrix_layout("i,j"); + auto pout = &(scalar_layout.addition_assignment("i,j", lij, lij)); + REQUIRE(pout == &scalar_layout); + REQUIRE(scalar_layout == matrix_layout); + } + + SECTION("subtraction_assignment_") { + auto lij = matrix_layout("i,j"); + auto pout = + &(scalar_layout.subtraction_assignment("i,j", lij, lij)); + REQUIRE(pout == &scalar_layout); + REQUIRE(scalar_layout == matrix_layout); + } + + SECTION("multiplication_assignment_") { + auto lij = matrix_layout("i,j"); + auto pout = + &(scalar_layout.multiplication_assignment("i,j", lij, lij)); + REQUIRE(pout == &scalar_layout); + REQUIRE(scalar_layout == matrix_layout); + } + + SECTION("permute_assignment_") { + auto lij = matrix_layout("i,j"); + auto pout = &(scalar_layout.permute_assignment("i,j", lij)); + REQUIRE(pout == &scalar_layout); + REQUIRE(scalar_layout == matrix_layout); + } } } diff --git a/tests/cxx/unit_tests/tensorwrapper/layout/logical.cpp b/tests/cxx/unit_tests/tensorwrapper/layout/logical.cpp index de4e0fb4..c48833db 100644 --- a/tests/cxx/unit_tests/tensorwrapper/layout/logical.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/layout/logical.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "../helpers.hpp" +#include "../testing/testing.hpp" #include #include #include @@ -32,9 +32,9 @@ using namespace layout; */ TEST_CASE("Logical") { shape::Smooth matrix_shape{2, 3}; - symmetry::Permutation p01{0, 1}; - symmetry::Group no_symm, symm{p01}; - sparsity::Pattern no_sparsity; + symmetry::Permutation p01{1, 0}; + symmetry::Group no_symm(2), symm{p01}; + sparsity::Pattern no_sparsity(2); Logical logi_copy_no_sym(matrix_shape, no_symm, no_sparsity); Logical logi_copy_has_sym(matrix_shape, symm, no_sparsity); diff --git a/tests/cxx/unit_tests/tensorwrapper/layout/physical.cpp b/tests/cxx/unit_tests/tensorwrapper/layout/physical.cpp index 02e28b4a..84e68a82 100644 --- a/tests/cxx/unit_tests/tensorwrapper/layout/physical.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/layout/physical.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "../helpers.hpp" +#include "../testing/testing.hpp" #include #include #include @@ -32,9 +32,9 @@ using namespace layout; */ TEST_CASE("Physical") { shape::Smooth matrix_shape{2, 3}; - symmetry::Permutation p01{0, 1}; - symmetry::Group no_symm, symm{p01}; - sparsity::Pattern no_sparsity; + symmetry::Permutation p01{1, 0}; + symmetry::Group no_symm(2), symm{p01}; + sparsity::Pattern no_sparsity(2); Physical phys_copy_no_sym(matrix_shape, no_symm, no_sparsity); Physical phys_copy_has_sym(matrix_shape, symm, no_sparsity); diff --git a/tests/cxx/unit_tests/tensorwrapper/shape/detail_/smooth_alias.cpp b/tests/cxx/unit_tests/tensorwrapper/shape/detail_/smooth_alias.cpp index 224c4812..a272ae12 100644 --- a/tests/cxx/unit_tests/tensorwrapper/shape/detail_/smooth_alias.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/shape/detail_/smooth_alias.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "../../helpers.hpp" +#include "../../testing/testing.hpp" #include using namespace tensorwrapper::shape; diff --git a/tests/cxx/unit_tests/tensorwrapper/shape/detail_/smooth_view_pimpl.cpp b/tests/cxx/unit_tests/tensorwrapper/shape/detail_/smooth_view_pimpl.cpp index 5c07e15b..cf5ff1d8 100644 --- a/tests/cxx/unit_tests/tensorwrapper/shape/detail_/smooth_view_pimpl.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/shape/detail_/smooth_view_pimpl.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "../../helpers.hpp" +#include "../../testing/testing.hpp" #include /* Testing Strategy. diff --git a/tests/cxx/unit_tests/tensorwrapper/shape/smooth.cpp b/tests/cxx/unit_tests/tensorwrapper/shape/smooth.cpp index 0df4c693..208f72ee 100644 --- a/tests/cxx/unit_tests/tensorwrapper/shape/smooth.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/shape/smooth.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "../helpers.hpp" +#include "../testing/testing.hpp" #include #include diff --git a/tests/cxx/unit_tests/tensorwrapper/shape/smooth_view.cpp b/tests/cxx/unit_tests/tensorwrapper/shape/smooth_view.cpp index 48587b86..e47c59fd 100644 --- a/tests/cxx/unit_tests/tensorwrapper/shape/smooth_view.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/shape/smooth_view.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "../helpers.hpp" +#include "../testing/testing.hpp" #include #include #include diff --git a/tests/cxx/unit_tests/tensorwrapper/sparsity/pattern.cpp b/tests/cxx/unit_tests/tensorwrapper/sparsity/pattern.cpp index bbf8fa95..bb3fdb3e 100644 --- a/tests/cxx/unit_tests/tensorwrapper/sparsity/pattern.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/sparsity/pattern.cpp @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "../helpers.hpp" +#include "../testing/testing.hpp" #include using namespace tensorwrapper::testing; diff --git a/tests/cxx/unit_tests/tensorwrapper/symmetry/group.cpp b/tests/cxx/unit_tests/tensorwrapper/symmetry/group.cpp index 14792008..3d611bd9 100644 --- a/tests/cxx/unit_tests/tensorwrapper/symmetry/group.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/symmetry/group.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "../helpers.hpp" +#include "../testing/testing.hpp" #include #include diff --git a/tests/cxx/unit_tests/tensorwrapper/symmetry/permutation.cpp b/tests/cxx/unit_tests/tensorwrapper/symmetry/permutation.cpp index 61cb8d0a..a99489e4 100644 --- a/tests/cxx/unit_tests/tensorwrapper/symmetry/permutation.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/symmetry/permutation.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "../helpers.hpp" +#include "../testing/testing.hpp" #include using namespace tensorwrapper::testing; diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp index d0156e7d..d4a74d62 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp @@ -14,8 +14,7 @@ * limitations under the License. */ -#include "../../helpers.hpp" -#include "../../inputs.hpp" +#include "../../testing/testing.hpp" #include #include @@ -59,16 +58,16 @@ TEST_CASE("TensorFactory") { SECTION("default_logical_symmetry") { // N.B. at moment default symmetry is no-symmetry, i.e., an empty Group symmetry::Group corr; - auto i = testing::smooth_scalar(); + auto i = testing::smooth_scalar_input(); auto result = TensorFactory::default_logical_symmetry(*i.m_pshape); REQUIRE((*result) == corr); } SECTION("default_logical_sparsity") { // N.B. at moment default symmetry is no sparsity - sparsity::Pattern corr; + sparsity::Pattern corr(2); - auto i = testing::smooth_symmetric_matrix(); + auto i = testing::smooth_symmetric_matrix_input(); auto result = TensorFactory::default_logical_sparsity(*i.m_pshape, *i.m_psymmetry); REQUIRE((*result) == corr); @@ -116,7 +115,7 @@ TEST_CASE("TensorFactory") { SECTION("construct(scalar_il_type)") { auto ppimpl = TensorFactory::construct(42.0); - auto corr = TensorFactory::construct(testing::smooth_scalar()); + auto corr = TensorFactory::construct(testing::smooth_scalar_input()); REQUIRE(*ppimpl == *corr); } @@ -124,7 +123,7 @@ TEST_CASE("TensorFactory") { using vector_il_type = typename TensorFactory::vector_il_type; vector_il_type il{0.0, 1.0, 2.0, 3.0, 4.0}; auto ppimpl = TensorFactory::construct(il); - auto corr = TensorFactory::construct(testing::smooth_vector()); + auto corr = TensorFactory::construct(testing::smooth_vector_input()); REQUIRE(*ppimpl == *corr); } @@ -132,7 +131,7 @@ TEST_CASE("TensorFactory") { using matrix_il_type = typename TensorFactory::matrix_il_type; matrix_il_type il{{1.0, 2.0}, {3.0, 4.0}}; auto ppimpl = TensorFactory::construct(il); - auto corr = TensorFactory::construct(testing::smooth_matrix()); + auto corr = TensorFactory::construct(testing::smooth_matrix_input()); REQUIRE(*ppimpl == *corr); } @@ -140,7 +139,7 @@ TEST_CASE("TensorFactory") { using tensor3_il_type = typename TensorFactory::tensor3_il_type; tensor3_il_type il{{{1.0, 2.0}, {3.0, 4.0}}, {{5.0, 6.0}, {7.0, 8.0}}}; auto ppimpl = TensorFactory::construct(il); - auto corr = TensorFactory::construct(testing::smooth_tensor3()); + auto corr = TensorFactory::construct(testing::smooth_tensor3_input()); REQUIRE(*ppimpl == *corr); } @@ -150,7 +149,7 @@ TEST_CASE("TensorFactory") { {{{1.0, 2.0}, {3.0, 4.0}}, {{5.0, 6.0}, {7.0, 8.0}}}, {{{9.0, 10.0}, {11.0, 12.0}}, {{13.0, 14.0}, {15.0, 16.0}}}}; auto ppimpl = TensorFactory::construct(il); - auto corr = TensorFactory::construct(testing::smooth_tensor4()); + auto corr = TensorFactory::construct(testing::smooth_tensor4_input()); REQUIRE(*ppimpl == *corr); } @@ -164,9 +163,10 @@ TEST_CASE("TensorFactory") { SECTION("assert_valid") { TensorFactory f; - REQUIRE_NOTHROW(f.assert_valid(testing::smooth_scalar())); - REQUIRE_NOTHROW(f.assert_valid(testing::smooth_vector())); - REQUIRE_NOTHROW(f.assert_valid(testing::smooth_symmetric_matrix())); + REQUIRE_NOTHROW(f.assert_valid(testing::smooth_scalar_input())); + REQUIRE_NOTHROW(f.assert_valid(testing::smooth_vector_input())); + REQUIRE_NOTHROW( + f.assert_valid(testing::smooth_symmetric_matrix_input())); using e_t = std::runtime_error; @@ -182,7 +182,7 @@ TEST_CASE("TensorFactory") { } SECTION("Buffer with incompatible physical layout") { - layout::Physical p(shape::Smooth{3, 3}, g, sparsity); + layout::Physical p(shape::Smooth{3, 3}); TensorInput i(std::move(pbuffer), p); REQUIRE_THROWS_AS(f.assert_valid(i), e_t); } diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp index d453cc77..9814fde5 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp @@ -14,8 +14,7 @@ * limitations under the License. */ -#include "../../helpers.hpp" -#include "../../inputs.hpp" +#include "../../testing/testing.hpp" using namespace tensorwrapper; @@ -34,7 +33,7 @@ TEST_CASE("TensorInput") { parallelzone::runtime::RuntimeView rv; shape::Smooth shape{3, 3}; symmetry::Group g{symmetry::Permutation{0, 1}}; - sparsity::Pattern sparsity; + sparsity::Pattern sparsity(2); layout::Logical logical(shape, g, sparsity); layout::Physical physical(shape, g, sparsity); allocator::Eigen alloc(rv); diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp index c91a537f..26c45120 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp @@ -14,8 +14,7 @@ * limitations under the License. */ -#include "../../helpers.hpp" -#include "../../inputs.hpp" +#include "../../testing/testing.hpp" #include #include #include @@ -25,9 +24,9 @@ using namespace tensorwrapper; using buffer_type = buffer::Eigen; TEST_CASE("TensorPIMPL") { - auto input = testing::smooth_vector(); - symmetry::Group g; - sparsity::Pattern sparsity; + auto input = testing::smooth_vector_input(); + symmetry::Group g(1); + sparsity::Pattern sparsity(1); layout::Logical logical_corr(*input.m_pshape, g, sparsity); auto pbuffer_corr = input.m_pbuffer->clone(); @@ -90,7 +89,7 @@ TEST_CASE("TensorPIMPL") { SECTION("Different logical layout") { shape::Smooth scalar{}; - auto pl2 = std::make_unique(scalar, g, sparsity); + auto pl2 = std::make_unique(scalar); detail_::TensorPIMPL diff(std::move(pl2), std::move(pbuffer2)); REQUIRE_FALSE(value == diff); } diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/unwrap_il.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/unwrap_il.cpp index 6bf6a849..f084ec75 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/unwrap_il.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/unwrap_il.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "../../helpers.hpp" +#include "../../testing/testing.hpp" #include TEST_CASE("unwrap_il") { diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/tensor_class.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/tensor_class.cpp index 05a59f96..f6161410 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/tensor_class.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/tensor_class.cpp @@ -17,21 +17,23 @@ #include #include #include + using namespace tensorwrapper; +using namespace testing; TEST_CASE("Tensor") { using detail_::TensorFactory; Tensor defaulted; - Tensor scalar(testing::smooth_scalar()); - Tensor vector(testing::smooth_vector()); + Tensor scalar(smooth_scalar_input()); + Tensor vector(smooth_vector_input()); // We know TensorFactory works from unit testing it - auto scalar_corr = TensorFactory::construct(testing::smooth_scalar()); + auto scalar_corr = TensorFactory::construct(smooth_scalar_input()); auto& scalar_layout_corr = scalar_corr->logical_layout(); auto& scalar_buffer_corr = scalar_corr->buffer(); - auto vector_corr = TensorFactory::construct(testing::smooth_vector()); + auto vector_corr = TensorFactory::construct(smooth_vector_input()); auto& vector_layout_corr = vector_corr->logical_layout(); auto& vector_buffer_corr = vector_corr->buffer(); @@ -46,7 +48,7 @@ TEST_CASE("Tensor") { SECTION("scalar_il_type") { Tensor t(42.0); - Tensor corr(testing::smooth_scalar()); + Tensor corr(testing::smooth_scalar_input()); REQUIRE(t == corr); } @@ -54,7 +56,7 @@ TEST_CASE("Tensor") { using vector_il_type = typename Tensor::vector_il_type; vector_il_type il{0.0, 1.0, 2.0, 3.0, 4.0}; Tensor t(il); - Tensor corr(testing::smooth_vector()); + Tensor corr(smooth_vector_input()); REQUIRE(t == corr); } @@ -62,7 +64,7 @@ TEST_CASE("Tensor") { using matrix_il_type = typename Tensor::matrix_il_type; matrix_il_type il{{1.0, 2.0}, {3.0, 4.0}}; Tensor t(il); - Tensor corr(testing::smooth_matrix()); + Tensor corr(smooth_matrix_input()); REQUIRE(t == corr); } @@ -71,7 +73,7 @@ TEST_CASE("Tensor") { tensor3_il_type il{{{1.0, 2.0}, {3.0, 4.0}}, {{5.0, 6.0}, {7.0, 8.0}}}; Tensor t(il); - Tensor corr(testing::smooth_tensor3()); + Tensor corr(smooth_tensor3_input()); REQUIRE(t == corr); } @@ -81,7 +83,7 @@ TEST_CASE("Tensor") { {{{1.0, 2.0}, {3.0, 4.0}}, {{5.0, 6.0}, {7.0, 8.0}}}, {{{9.0, 10.0}, {11.0, 12.0}}, {{13.0, 14.0}, {15.0, 16.0}}}}; Tensor t(il); - Tensor corr(testing::smooth_tensor4()); + Tensor corr(smooth_tensor4_input()); REQUIRE(t == corr); } @@ -132,16 +134,16 @@ TEST_CASE("Tensor") { SECTION("operator==") { REQUIRE(defaulted == Tensor{}); - Tensor other_scalar(testing::smooth_scalar()); - Tensor other_vector(testing::smooth_vector()); + Tensor other_scalar(smooth_scalar_input()); + Tensor other_vector(smooth_vector_input()); REQUIRE(scalar == other_scalar); REQUIRE(vector == other_vector); SECTION("Different layout") { - auto vector_input = testing::smooth_vector(); + auto vector_input = smooth_vector_input(); shape::Smooth alt_shape{5, 1}; - symmetry::Group g; - sparsity::Pattern sparsity; + symmetry::Group g(2); + sparsity::Pattern sparsity(2); auto p = std::make_unique(alt_shape, g, sparsity); vector_input.m_pshape = nullptr; vector_input.m_plogical.swap(p); @@ -156,7 +158,7 @@ TEST_CASE("Tensor") { SECTION("operator!=") { // Implemented in terms of operator==, just spot check - Tensor other_scalar(testing::smooth_scalar()); + Tensor other_scalar(smooth_scalar_input()); REQUIRE_FALSE(scalar != other_scalar); REQUIRE(scalar != vector); diff --git a/tests/cxx/unit_tests/tensorwrapper/testing/dsl.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/dsl.hpp new file mode 100644 index 00000000..4b7ffe51 --- /dev/null +++ b/tests/cxx/unit_tests/tensorwrapper/testing/dsl.hpp @@ -0,0 +1,48 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include "layouts.hpp" +#include "shapes.hpp" +#include + +namespace tensorwrapper::testing { + +/// Types that participate in the DSL +using dsl_types = + std::tuple; + +inline auto scalar_values() { + return dsl_types{smooth_scalar(), tensorwrapper::symmetry::Group(0), + tensorwrapper::sparsity::Pattern(0), scalar_logical(), + scalar_physical()}; +} + +inline auto vector_values() { + return dsl_types{smooth_vector(), tensorwrapper::symmetry::Group(1), + tensorwrapper::sparsity::Pattern(1), vector_logical(), + vector_physical()}; +} + +inline auto matrix_values() { + return dsl_types{smooth_matrix(), tensorwrapper::symmetry::Group(2), + tensorwrapper::sparsity::Pattern(2), matrix_logical(), + matrix_physical()}; +} + +} // namespace tensorwrapper::testing \ No newline at end of file diff --git a/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp index 84643484..97b5811c 100644 --- a/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp +++ b/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp @@ -48,26 +48,26 @@ auto eigen_scalar() { } template -auto eigen_vector() { +auto eigen_vector(std::size_t n = 5) { using buffer_type = buffer::Eigen; using data_type = typename buffer_type::data_type; - data_type vector(5); - for(std::size_t i = 0; i < 5; ++i) vector(i) = i; - shape::Smooth shape{5}; + data_type vector(n); + for(std::size_t i = 0; i < n; ++i) vector(i) = i; + shape::Smooth shape{n}; layout::Physical l(shape); return buffer_type(vector, l); } template -auto eigen_matrix() { +auto eigen_matrix(std::size_t n = 2, std::size_t m = 2) { using buffer_type = buffer::Eigen; using data_type = typename buffer_type::data_type; - data_type matrix(2, 2); - matrix(0, 0) = 1.0; - matrix(0, 1) = 2.0; - matrix(1, 0) = 3.0; - matrix(1, 1) = 4.0; - shape::Smooth shape{2, 2}; + data_type matrix(n, m); + double counter = 1.0; + for(std::size_t i = 0; i < n; ++i) + for(std::size_t j = 0; j < m; ++j) matrix(i, j) = counter++; + + shape::Smooth shape{n, m}; layout::Physical l(shape); return buffer_type(matrix, l); } diff --git a/tests/cxx/unit_tests/tensorwrapper/helpers.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/helpers.hpp similarity index 98% rename from tests/cxx/unit_tests/tensorwrapper/helpers.hpp rename to tests/cxx/unit_tests/tensorwrapper/testing/helpers.hpp index ec299426..9978dccb 100644 --- a/tests/cxx/unit_tests/tensorwrapper/helpers.hpp +++ b/tests/cxx/unit_tests/tensorwrapper/testing/helpers.hpp @@ -18,6 +18,7 @@ #include #include #include +#include namespace tensorwrapper::testing { diff --git a/tests/cxx/unit_tests/tensorwrapper/inputs.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/inputs.hpp similarity index 90% rename from tests/cxx/unit_tests/tensorwrapper/inputs.hpp rename to tests/cxx/unit_tests/tensorwrapper/testing/inputs.hpp index b564969f..ae3151b8 100644 --- a/tests/cxx/unit_tests/tensorwrapper/inputs.hpp +++ b/tests/cxx/unit_tests/tensorwrapper/testing/inputs.hpp @@ -33,7 +33,7 @@ inline auto smooth_scalar_() { return detail_::TensorInput(shape, std::move(buffer)); } -inline auto smooth_scalar() { return smooth_scalar_(); } +inline auto smooth_scalar_input() { return smooth_scalar_(); } /// 5 element vector such that element i is i template @@ -43,7 +43,7 @@ inline auto smooth_vector_() { return detail_::TensorInput(shape, std::move(buffer)); } -inline auto smooth_vector() { return smooth_vector_(); } +inline auto smooth_vector_input() { return smooth_vector_(); } /// 5 element vector internally stored as a 5 by 1 matrix inline auto smooth_vector_alt() { @@ -63,9 +63,9 @@ inline auto smooth_matrix_() { return detail_::TensorInput(shape, std::move(buffer)); } -inline auto smooth_matrix() { return smooth_matrix_(); } +inline auto smooth_matrix_input() { return smooth_matrix_(); } -inline auto smooth_symmetric_matrix() { +inline auto smooth_symmetric_matrix_input() { using buffer_type = buffer::Eigen; using data_type = typename buffer_type::data_type; shape::Smooth shape{3, 3}; @@ -92,9 +92,9 @@ inline auto smooth_tensor3_() { return detail_::TensorInput(shape, std::move(buffer)); } -inline auto smooth_tensor3() { return smooth_tensor3_(); } +inline auto smooth_tensor3_input() { return smooth_tensor3_(); } -inline auto smooth_tensor4() { +inline auto smooth_tensor4_input() { using buffer_type = buffer::Eigen; using data_type = typename buffer_type::data_type; shape::Smooth shape{2, 2, 2, 2}; diff --git a/tests/cxx/unit_tests/tensorwrapper/testing/layouts.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/layouts.hpp new file mode 100644 index 00000000..f1f71247 --- /dev/null +++ b/tests/cxx/unit_tests/tensorwrapper/testing/layouts.hpp @@ -0,0 +1,65 @@ +/* + * Copyright 2025 NWChemEx-Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include "shapes.hpp" +#include + +namespace tensorwrapper::testing { + +// ----------------------------------------------------------------------------- +// -- Logical layouts +// ----------------------------------------------------------------------------- + +inline auto scalar_logical() { + return tensorwrapper::layout::Logical(smooth_scalar()); +} + +inline auto vector_logical(std::size_t i = 10) { + return tensorwrapper::layout::Logical(smooth_vector(i)); +} + +inline auto matrix_logical(std::size_t i = 10, std::size_t j = 10) { + return tensorwrapper::layout::Logical(smooth_matrix(i, j)); +} + +inline auto tensor_logical(std::size_t i = 10, std::size_t j = 10, + std::size_t k = 10) { + return tensorwrapper::layout::Logical(smooth_tensor(i, j, k)); +} + +// ----------------------------------------------------------------------------- +// -- Physical layouts +// ----------------------------------------------------------------------------- + +inline auto scalar_physical() { + return tensorwrapper::layout::Physical(smooth_scalar()); +} + +inline auto vector_physical(std::size_t i = 10) { + return tensorwrapper::layout::Physical(smooth_vector(i)); +} + +inline auto matrix_physical(std::size_t i = 10, std::size_t j = 10) { + return tensorwrapper::layout::Physical(smooth_matrix(i, j)); +} + +inline auto tensor_physical(std::size_t i = 10, std::size_t j = 10, + std::size_t k = 10) { + return tensorwrapper::layout::Physical(smooth_tensor(i, j, k)); +} + +} // namespace tensorwrapper::testing \ No newline at end of file diff --git a/tests/cxx/unit_tests/tensorwrapper/testing/shapes.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/shapes.hpp index 1e25ec0c..074dde12 100644 --- a/tests/cxx/unit_tests/tensorwrapper/testing/shapes.hpp +++ b/tests/cxx/unit_tests/tensorwrapper/testing/shapes.hpp @@ -22,7 +22,7 @@ #pragma once #include -namespace test_tensorwrapper { +namespace tensorwrapper::testing { inline auto smooth_scalar() { return tensorwrapper::shape::Smooth{}; } @@ -39,4 +39,4 @@ inline auto smooth_tensor(std::size_t i = 10, std::size_t j = 10, return tensorwrapper::shape::Smooth{i, j, k}; } -} // namespace test_tensorwrapper \ No newline at end of file +} // namespace tensorwrapper::testing \ No newline at end of file diff --git a/tests/cxx/unit_tests/tensorwrapper/testing/testing.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/testing.hpp index 47e6de70..cb1ddf7d 100644 --- a/tests/cxx/unit_tests/tensorwrapper/testing/testing.hpp +++ b/tests/cxx/unit_tests/tensorwrapper/testing/testing.hpp @@ -15,7 +15,9 @@ */ #pragma once -#include "../helpers.hpp" -#include "../inputs.hpp" +#include "dsl.hpp" #include "eigen_buffers.hpp" +#include "helpers.hpp" +#include "inputs.hpp" +#include "layouts.hpp" #include "shapes.hpp" \ No newline at end of file