Skip to content

Commit

Permalink
RcppArmadillo 12.1.99-1 with updated 'rc0' of Armadillo 14.2.0
Browse files Browse the repository at this point in the history
  • Loading branch information
eddelbuettel committed Nov 7, 2024
1 parent 8645cba commit 4e639e0
Show file tree
Hide file tree
Showing 14 changed files with 59 additions and 32 deletions.
6 changes: 5 additions & 1 deletion ChangeLog
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
2024-11-05 Dirk Eddelbuettel <[email protected]>
2024-11-07 Dirk Eddelbuettel <[email protected]>

* DESCRIPTION (Version, Date): RcppArmadillo 12.1.99-1
* inst/NEWS.Rd: Idem
Expand All @@ -10,6 +10,10 @@

* inst/include/armadillo_bits/: Re-sync Armadillo 12.2.0-rc0

2024-11-05 Dirk Eddelbuettel <[email protected]>

* inst/include/armadillo_bits/: Re-sync Armadillo 12.2.0-rc0

2024-11-03 Dirk Eddelbuettel <[email protected]>

* inst/include/armadillo_bits/: Armadillo 12.2.0-rc0
Expand Down
2 changes: 1 addition & 1 deletion DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ Package: RcppArmadillo
Type: Package
Title: 'Rcpp' Integration for the 'Armadillo' Templated Linear Algebra Library
Version: 14.1.99-1
Date: 2024-11-05
Date: 2024-11-07
Authors@R: c(person("Dirk", "Eddelbuettel", role = c("aut", "cre"), email = "[email protected]",
comment = c(ORCID = "0000-0001-6419-907X")),
person("Romain", "Francois", role = "aut",
Expand Down
2 changes: 1 addition & 1 deletion inst/NEWS.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
\newcommand{\ghpr}{\href{https://github.com/RcppCore/RcppArmadillo/pull/#1}{##1}}
\newcommand{\ghit}{\href{https://github.com/RcppCore/RcppArmadillo/issues/#1}{##1}}

\section{Changes in RcppArmadillo version 14.1.99-1 (2024-11-05)}{
\section{Changes in RcppArmadillo version 14.1.99-1 (2024-11-07)}{
\itemize{
\item Upgraded to Armadillo release 14.2.0-rc0 (Stochastic Parrot)
\itemize{
Expand Down
2 changes: 1 addition & 1 deletion inst/include/armadillo_bits/auxlib_meat.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ auxlib::inv(Mat<eT>& A)
{
arma_debug_sigprint();

// NOTE: given a matrix with NaN values, getrf() and getri() do not necessarily fail,
// NOTE: given a matrix with NaN values, lapack::getrf() and lapack::getri() do not necessarily fail,
// NOTE: and can produce matrices with NaN values.
// NOTE: we're not checking for non-finite values to avoid breaking existing user code.

Expand Down
8 changes: 4 additions & 4 deletions inst/include/armadillo_bits/config.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,16 +95,16 @@
#error "use ARMA_USE_FFTW3 instead of ARMA_USE_FFTW"
#endif

// #define ARMA_BLAS_64BIT_INT
//// Uncomment the above line if your BLAS and LAPACK libraries use 64 bit integers

// #define ARMA_BLAS_CAPITALS
//// Uncomment the above line if your BLAS and LAPACK libraries have capitalised function names

#define ARMA_BLAS_UNDERSCORE
//// Uncomment the above line if your BLAS and LAPACK libraries have function names with a trailing underscore.
//// Conversely, comment it out if the function names don't have a trailing underscore.

// #define ARMA_BLAS_LONG_LONG
//// Uncomment the above line if your BLAS and LAPACK libraries use 64 bit integers, ie. "long long" instead of "int"

// #define ARMA_BLAS_NOEXCEPT
//// Uncomment the above line if you require BLAS functions to have the 'noexcept' specification

Expand All @@ -128,7 +128,7 @@
//// Uncomment the above line to use Intel MKL types for complex numbers.
//// You will need to include appropriate MKL headers before the Armadillo header.
//// You may also need to enable or disable the following options:
//// ARMA_BLAS_LONG, ARMA_BLAS_LONG_LONG, ARMA_USE_FORTRAN_HIDDEN_ARGS
//// ARMA_BLAS_LONG_LONG, ARMA_USE_FORTRAN_HIDDEN_ARGS

#if !defined(ARMA_USE_OPENMP)
// #define ARMA_USE_OPENMP
Expand Down
2 changes: 1 addition & 1 deletion inst/include/armadillo_bits/op_cond_meat.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ op_cond::apply(const Base<typename T1::elem_type, T1>& X)

if(is_op_diagmat<T1>::value || A.is_diagmat())
{
arma_debug_print("op_cond::apply(): detected diagonal matrix");
arma_debug_print("op_cond::apply(): diag optimisation");

return op_cond::apply_diag(A);
}
Expand Down
4 changes: 2 additions & 2 deletions inst/include/armadillo_bits/op_expmat_meat.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ op_expmat::apply_direct(Mat<typename T1::elem_type>& out, const Base<typename T1

if(A.is_diagmat())
{
arma_debug_print("op_expmat: detected diagonal matrix");
arma_debug_print("op_expmat: diag optimisation");

const uword N = (std::min)(A.n_rows, A.n_cols);

Expand Down Expand Up @@ -195,7 +195,7 @@ op_expmat_sym::apply_direct(Mat<typename T1::elem_type>& out, const Base<typenam

if(is_op_diagmat<T1>::value || X.is_diagmat())
{
arma_debug_print("op_expmat_sym: detected diagonal matrix");
arma_debug_print("op_expmat_sym: diag optimisation");

out = X;

Expand Down
4 changes: 2 additions & 2 deletions inst/include/armadillo_bits/op_inv_spd_meat.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ op_inv_spd_full::apply_direct(Mat<typename T1::elem_type>& out, const Base<typen

if(is_op_diagmat<T1>::value || out.is_diagmat())
{
arma_debug_print("op_inv_spd_full: detected diagonal matrix");
arma_debug_print("op_inv_spd_full: diag optimisation");

eT* colmem = out.memptr();

Expand Down Expand Up @@ -300,7 +300,7 @@ op_inv_spd_rcond::apply_direct(Mat<typename T1::elem_type>& out, op_inv_spd_stat

if(is_op_diagmat<T1>::value || out.is_diagmat())
{
arma_debug_print("op_inv_spd_rcond: detected diagonal matrix");
arma_debug_print("op_inv_spd_rcond: diag optimisation");

out_state.is_diag = true;

Expand Down
2 changes: 1 addition & 1 deletion inst/include/armadillo_bits/op_log_det_meat.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ op_log_det_sympd::apply_direct(typename T1::pod_type& out_val, const Base<typena

if(is_op_diagmat<T1>::value || A.is_diagmat())
{
arma_debug_print("op_log_det_sympd: detected diagonal matrix");
arma_debug_print("op_log_det_sympd: diag optimisation");

eT* colmem = A.memptr();

Expand Down
6 changes: 3 additions & 3 deletions inst/include/armadillo_bits/op_logmat_meat.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ op_logmat::apply_direct(Mat< std::complex<typename T1::elem_type> >& out, const

if(A.is_diagmat())
{
arma_debug_print("op_logmat: detected diagonal matrix");
arma_debug_print("op_logmat: diag optimisation");

const uword N = A.n_rows;

Expand Down Expand Up @@ -292,7 +292,7 @@ op_logmat_cx::apply_direct(Mat<typename T1::elem_type>& out, const Base<typename

if(S.is_diagmat())
{
arma_debug_print("op_logmat_cx: detected diagonal matrix");
arma_debug_print("op_logmat_cx: diag optimisation");

const uword N = S.n_rows;

Expand Down Expand Up @@ -512,7 +512,7 @@ op_logmat_sympd::apply_direct(Mat<typename T1::elem_type>& out, const Base<typen

if(is_op_diagmat<T1>::value || X.is_diagmat())
{
arma_debug_print("op_logmat_sympd: detected diagonal matrix");
arma_debug_print("op_logmat_sympd: diag optimisation");

out = X;

Expand Down
2 changes: 1 addition & 1 deletion inst/include/armadillo_bits/op_pinv_meat.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ op_pinv::apply_direct(Mat<typename T1::elem_type>& out, const Base<typename T1::

if(is_op_diagmat<T1>::value || A.is_diagmat())
{
arma_debug_print("op_pinv: detected diagonal matrix");
arma_debug_print("op_pinv: diag optimisation");

return op_pinv::apply_diag(out, A, tol);
}
Expand Down
43 changes: 33 additions & 10 deletions inst/include/armadillo_bits/op_powmat_meat.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ op_powmat::apply_direct_positive(Mat<eT>& out, const Mat<eT>& X, const uword y)

if(X.is_diagmat())
{
arma_debug_print("op_powmat: detected diagonal matrix");
arma_debug_print("op_powmat: diag optimisation");

podarray<eT> tmp(N); // use temporary array in case we have aliasing

Expand Down Expand Up @@ -194,11 +194,11 @@ op_powmat_cx::apply_direct(Mat< std::complex<typename T1::pod_type> >& out, cons

if(A.is_diagmat())
{
arma_debug_print("op_powmat_cx: detected diagonal matrix");
arma_debug_print("op_powmat_cx: diag optimisation");

podarray<out_eT> tmp(N); // use temporary array in case we have aliasing

for(uword i=0; i<N; ++i) { tmp[i] = eop_aux::pow( std::complex<in_T>(A.at(i,i)), y) ; }
for(uword i=0; i<N; ++i) { tmp[i] = eop_aux::pow( std::complex<in_T>(A.at(i,i)), y ); }

out.zeros(N,N);

Expand All @@ -207,11 +207,11 @@ op_powmat_cx::apply_direct(Mat< std::complex<typename T1::pod_type> >& out, cons
return true;
}

const bool try_sympd = arma_config::optimise_sym && sym_helper::guess_sympd(A);
const bool try_sym = arma_config::optimise_sym && sym_helper::is_approx_sym(A);

if(try_sympd)
if(try_sym)
{
arma_debug_print("op_powmat_cx: attempting sympd optimisation");
arma_debug_print("op_powmat_cx: symmetric/hermitian optimisation");

Col<in_T> eigval;
Mat<in_eT> eigvec;
Expand All @@ -220,16 +220,39 @@ op_powmat_cx::apply_direct(Mat< std::complex<typename T1::pod_type> >& out, cons

if(eig_status)
{
eigval = pow(eigval, y);
bool all_pos = true;

const Mat<in_eT> tmp = diagmat(eigval) * eigvec.t();
for(uword i=0; i<N; ++i) { all_pos = (eigval[i] <= in_T(0)) ? false : all_pos; }

out = conv_to< Mat<out_eT> >::from(eigvec * tmp);
if(all_pos)
{
arma_debug_print("op_powmat_cx: all_pos = true");

eigval = pow(eigval, y);

const Mat<in_eT> tmp = eigvec * diagmat(eigval);

out = conv_to< Mat<out_eT> >::from(tmp * eigvec.t());
}
else
{
arma_debug_print("op_powmat_cx: all_pos = false");

Col<out_eT> cx_eigval_pow(N, arma_nozeros_indicator());

for(uword i=0; i<N; ++i) { cx_eigval_pow[i] = eop_aux::pow( std::complex<in_T>(eigval[i]), y ); }

const Mat<out_eT> cx_eigvec = conv_to< Mat<out_eT> >::from(eigvec);

const Mat<out_eT> tmp = cx_eigvec * diagmat(cx_eigval_pow);

out = tmp * cx_eigvec.t();
}

return true;
}

arma_debug_print("op_powmat_cx: sympd optimisation failed");
arma_debug_print("op_powmat_cx: symmetric/hermitian optimisation failed");

// fallthrough if optimisation failed
}
Expand Down
2 changes: 1 addition & 1 deletion inst/include/armadillo_bits/op_rank_meat.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ op_rank::apply(uword& out, const Base<typename T1::elem_type,T1>& expr, const ty

if(is_op_diagmat<T1>::value || A.is_diagmat())
{
arma_debug_print("op_rank::apply(): detected diagonal matrix");
arma_debug_print("op_rank::apply(): diag optimisation");

return op_rank::apply_diag(out, A, tol);
}
Expand Down
6 changes: 3 additions & 3 deletions inst/include/armadillo_bits/op_sqrtmat_meat.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ op_sqrtmat::apply_direct(Mat< std::complex<typename T1::elem_type> >& out, const

if(A.is_diagmat())
{
arma_debug_print("op_sqrtmat: detected diagonal matrix");
arma_debug_print("op_sqrtmat: diag optimisation");

const uword N = A.n_rows;

Expand Down Expand Up @@ -325,7 +325,7 @@ op_sqrtmat_cx::apply_direct(Mat<typename T1::elem_type>& out, const Base<typenam

if(S.is_diagmat())
{
arma_debug_print("op_sqrtmat_cx: detected diagonal matrix");
arma_debug_print("op_sqrtmat_cx: diag optimisation");

const uword N = S.n_rows;

Expand Down Expand Up @@ -489,7 +489,7 @@ op_sqrtmat_sympd::apply_direct(Mat<typename T1::elem_type>& out, const Base<type

if(is_op_diagmat<T1>::value || X.is_diagmat())
{
arma_debug_print("op_sqrtmat_sympd: detected diagonal matrix");
arma_debug_print("op_sqrtmat_sympd: diag optimisation");

out = X;

Expand Down

0 comments on commit 4e639e0

Please sign in to comment.