Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Coot 2: Change arma::function to function #391

Closed
wants to merge 22 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions include/ensmallen_bits/ada_belief/ada_belief_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,13 +113,13 @@ class AdaBeliefUpdate
m += (1 - parent.beta1) * gradient;

s *= parent.beta2;
s += (1 - parent.beta2) * arma::pow(gradient - m, 2.0) + parent.epsilon;
s += (1 - parent.beta2) * pow(gradient - m, 2.0) + parent.epsilon;

const double biasCorrection1 = 1.0 - std::pow(parent.beta1, iteration);
const double biasCorrection2 = 1.0 - std::pow(parent.beta2, iteration);

// And update the iterate.
iterate -= ((m / biasCorrection1) * stepSize) / (arma::sqrt(s /
iterate -= ((m / biasCorrection1) * stepSize) / (sqrt(s /
biasCorrection2) + parent.epsilon);
}

Expand Down
4 changes: 2 additions & 2 deletions include/ensmallen_bits/ada_bound/ada_bound_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -150,8 +150,8 @@ class AdaBoundUpdate
const ElemType upper = fl * (1.0 + 1.0 / (parent.gamma * iteration));

// Applies bounds on actual learning rate.
iterate -= arma::clamp((stepSize *
std::sqrt(biasCorrection2) / biasCorrection1) / (arma::sqrt(v) +
iterate -= clamp((stepSize *
std::sqrt(biasCorrection2) / biasCorrection1) / (sqrt(v) +
parent.epsilon), lower, upper) % m;
}

Expand Down
4 changes: 2 additions & 2 deletions include/ensmallen_bits/ada_bound/ams_bound_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,9 @@ class AMSBoundUpdate
vImproved = arma::max(vImproved, v);

// Applies bounds on actual learning rate.
iterate -= arma::clamp((stepSize *
iterate -= clamp((stepSize *
std::sqrt(biasCorrection2) / biasCorrection1) /
(arma::sqrt(vImproved) + parent.epsilon), lower, upper) % m;
(sqrt(vImproved) + parent.epsilon), lower, upper) % m;
}

private:
Expand Down
2 changes: 1 addition & 1 deletion include/ensmallen_bits/ada_delta/ada_delta_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ class AdaDeltaUpdate
// Accumulate gradient.
meanSquaredGradient *= parent.rho;
meanSquaredGradient += (1 - parent.rho) * (gradient % gradient);
GradType dx = arma::sqrt((meanSquaredGradientDx + parent.epsilon) /
GradType dx = sqrt((meanSquaredGradientDx + parent.epsilon) /
(meanSquaredGradient + parent.epsilon)) % gradient;

// Accumulate updates.
Expand Down
2 changes: 1 addition & 1 deletion include/ensmallen_bits/ada_grad/ada_grad_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ class AdaGradUpdate
const GradType& gradient)
{
squaredGradient += (gradient % gradient);
iterate -= (stepSize * gradient) / (arma::sqrt(squaredGradient) +
iterate -= (stepSize * gradient) / (sqrt(squaredGradient) +
parent.epsilon);
}

Expand Down
2 changes: 1 addition & 1 deletion include/ensmallen_bits/ada_sqrt/ada_sqrt_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ class AdaSqrtUpdate
{
++iteration;

squaredGradient += arma::square(gradient);
squaredGradient += square(gradient);

iterate -= stepSize * std::sqrt(iteration) * gradient /
(squaredGradient + parent.epsilon);
Expand Down
6 changes: 3 additions & 3 deletions include/ensmallen_bits/adam/adam_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,12 +123,12 @@ class AdamUpdate
const double biasCorrection2 = 1.0 - std::pow(parent.beta2, iteration);

/**
* It should be noted that the term, m / (arma::sqrt(v) + eps), in the
* It should be noted that the term, m / (sqrt(v) + eps), in the
* following expression is an approximation of the following actual term;
* m / (arma::sqrt(v) + (arma::sqrt(biasCorrection2) * eps).
* m / (sqrt(v) + (sqrt(biasCorrection2) * eps).
*/
iterate -= (stepSize * std::sqrt(biasCorrection2) / biasCorrection1) *
m / (arma::sqrt(v) + parent.epsilon);
m / (sqrt(v) + parent.epsilon);
}

private:
Expand Down
2 changes: 1 addition & 1 deletion include/ensmallen_bits/adam/adamax_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ class AdaMaxUpdate

// Update the exponentially weighted infinity norm.
u *= parent.beta2;
u = arma::max(u, arma::abs(gradient));
u = arma::max(u, abs(gradient));

const double biasCorrection1 = 1.0 - std::pow(parent.beta1, iteration);

Expand Down
2 changes: 1 addition & 1 deletion include/ensmallen_bits/adam/amsgrad_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ class AMSGradUpdate
vImproved = arma::max(vImproved, v);

iterate -= (stepSize * std::sqrt(biasCorrection2) / biasCorrection1) *
m / (arma::sqrt(vImproved) + parent.epsilon);
m / (sqrt(vImproved) + parent.epsilon);
}

private:
Expand Down
6 changes: 3 additions & 3 deletions include/ensmallen_bits/adam/nadam_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -135,12 +135,12 @@ class NadamUpdate
const double biasCorrection2 = 1.0 - std::pow(parent.beta2, iteration);
const double biasCorrection3 = 1.0 - (cumBeta1 * beta1T1);

/* Note :- arma::sqrt(v) + epsilon * sqrt(biasCorrection2) is approximated
* as arma::sqrt(v) + epsilon
/* Note :- sqrt(v) + epsilon * sqrt(biasCorrection2) is approximated
* as sqrt(v) + epsilon
*/
iterate -= (stepSize * (((1 - beta1T) / biasCorrection1) * gradient
+ (beta1T1 / biasCorrection3) * m) * sqrt(biasCorrection2))
/ (arma::sqrt(v) + parent.epsilon);
/ (sqrt(v) + parent.epsilon);
}

private:
Expand Down
2 changes: 1 addition & 1 deletion include/ensmallen_bits/adam/nadamax_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ class NadaMaxUpdate
m *= parent.beta1;
m += (1 - parent.beta1) * gradient;

u = arma::max(u * parent.beta2, arma::abs(gradient));
u = arma::max(u * parent.beta2, abs(gradient));

double beta1T = parent.beta1 * (1 - (0.5 *
std::pow(0.96, iteration * parent.scheduleDecay)));
Expand Down
4 changes: 2 additions & 2 deletions include/ensmallen_bits/adam/optimisticadam_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,12 +117,12 @@ class OptimisticAdamUpdate
m += (1 - parent.beta1) * gradient;

v *= parent.beta2;
v += (1 - parent.beta2) * arma::square(gradient);
v += (1 - parent.beta2) * square(gradient);

GradType mCorrected = m / (1.0 - std::pow(parent.beta1, iteration));
GradType vCorrected = v / (1.0 - std::pow(parent.beta2, iteration));

GradType update = mCorrected / (arma::sqrt(vCorrected) + parent.epsilon);
GradType update = mCorrected / (sqrt(vCorrected) + parent.epsilon);

iterate -= (2 * stepSize * update - stepSize * g);

Expand Down
2 changes: 1 addition & 1 deletion include/ensmallen_bits/bigbatch_sgd/adaptive_stepsize.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ class AdaptiveStepsize
2.0);

// Compute curvature.
double v = arma::trace(arma::trans(iterate - iteratePrev) *
double v = arma::trace(trans(iterate - iteratePrev) *
(gradient - gradPrevIterate)) /
std::pow(arma::norm(iterate - iteratePrev, 2), 2.0);

Expand Down
2 changes: 1 addition & 1 deletion include/ensmallen_bits/callbacks/grad_clip_by_value.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class GradClipByValue
const MatType& /* coordinates */,
MatType& gradient)
{
gradient = arma::clamp(gradient, lower, upper);
gradient = clamp(gradient, lower, upper);
return false;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ class GreedyDescent

function.PartialGradient(iterate, i, fGrad);

ElemType descent = arma::accu(fGrad);
ElemType descent = accu(fGrad);
if (descent > bestDescent)
{
bestFeature = i;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class RandomDescent
const MatType& /* iterate */,
const ResolvableFunctionType& function)
{
return arma::as_scalar(arma::randi<arma::uvec>(
return as_scalar(arma::randi<arma::uvec>(
1, arma::distr_param(0, function.NumFeatures() - 1)));
}
};
Expand Down
8 changes: 4 additions & 4 deletions include/ensmallen_bits/cmaes/active_cmaes_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -191,10 +191,10 @@ typename MatType::elem_type ActiveCMAES<SelectionPolicyType,
// Perform Cholesky decomposition. If the matrix is not positive definite,
// add a small value and try again.
BaseMatType covLower;
while (!arma::chol(covLower, C[idx0], "lower"))
while (!chol(covLower, C[idx0], "lower"))
C[idx0].diag() += std::numeric_limits<ElemType>::epsilon();

arma::eig_sym(eigval, eigvec, C[idx0]);
eig_sym(eigval, eigvec, C[idx0]);

for (size_t j = 0; j < lambda; ++j)
{
Expand All @@ -218,7 +218,7 @@ typename MatType::elem_type ActiveCMAES<SelectionPolicyType,
}

// Sort population.
idx = arma::sort_index(pObjective);
idx = sort_index(pObjective);

step = w * pStep[idx(0)];
for (size_t j = 1; j < mu; ++j)
Expand Down Expand Up @@ -308,7 +308,7 @@ typename MatType::elem_type ActiveCMAES<SelectionPolicyType,
}
}

arma::eig_sym(eigval, eigvec, C[idx1]);
eig_sym(eigval, eigvec, C[idx1]);
const arma::uvec negativeEigval = arma::find(eigval < 0, 1);
if (!negativeEigval.is_empty())
{
Expand Down
14 changes: 7 additions & 7 deletions include/ensmallen_bits/cmaes/cmaes_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,12 +95,12 @@ typename MatType::elem_type CMAES<SelectionPolicyType,

// Parent weights.
const size_t mu = std::round(lambda / 2);
BaseMatType w = std::log(mu + 0.5) - arma::log(
BaseMatType w = std::log(mu + 0.5) - log(
arma::linspace<BaseMatType>(0, mu - 1, mu) + 1.0);
w /= arma::accu(w);
w /= accu(w);

// Number of effective solutions.
const double muEffective = 1 / arma::accu(arma::pow(w, 2));
const double muEffective = 1 / accu(pow(w, 2));

// Step size control parameters.
BaseMatType sigma(2, 1); // sigma is vector-shaped.
Expand Down Expand Up @@ -195,10 +195,10 @@ typename MatType::elem_type CMAES<SelectionPolicyType,
// Perform Cholesky decomposition. If the matrix is not positive definite,
// add a small value and try again.
BaseMatType covLower;
while (!arma::chol(covLower, C[idx0], "lower"))
while (!chol(covLower, C[idx0], "lower"))
C[idx0].diag() += std::numeric_limits<ElemType>::epsilon();

arma::eig_sym(eigval, eigvec, C[idx0]);
eig_sym(eigval, eigvec, C[idx0]);

for (size_t j = 0; j < lambda; ++j)
{
Expand All @@ -222,7 +222,7 @@ typename MatType::elem_type CMAES<SelectionPolicyType,
}

// Sort population.
idx = arma::sort_index(pObjective);
idx = sort_index(pObjective);

step = w(0) * pStep[idx(0)];
for (size_t j = 1; j < mu; ++j)
Expand Down Expand Up @@ -324,7 +324,7 @@ typename MatType::elem_type CMAES<SelectionPolicyType,
}
}

arma::eig_sym(eigval, eigvec, C[idx1]);
eig_sym(eigval, eigvec, C[idx1]);
const arma::uvec negativeEigval = arma::find(eigval < 0, 1);
if (!negativeEigval.is_empty())
{
Expand Down
2 changes: 1 addition & 1 deletion include/ensmallen_bits/cmaes/random_selection.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ class RandomSelection
typename MatType::elem_type objective = 0;
for (size_t f = 0; f < std::floor(numFunctions * fraction); f += batchSize)
{
const size_t selection = arma::as_scalar(arma::randi<arma::uvec>(
const size_t selection = as_scalar(arma::randi<arma::uvec>(
1, arma::distr_param(0, numFunctions - 1)));
const size_t effectiveBatchSize = std::min(batchSize,
numFunctions - selection);
Expand Down
6 changes: 3 additions & 3 deletions include/ensmallen_bits/cne/cne_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ inline void CNE::Reproduce(std::vector<MatType>& population,
arma::uvec& index)
{
// Sort fitness values. Smaller fitness value means better performance.
index = arma::sort_index(fitnessValues);
index = sort_index(fitnessValues);

// First parent.
size_t mom;
Expand All @@ -181,10 +181,10 @@ inline void CNE::Reproduce(std::vector<MatType>& population,
for (size_t i = numElite; i < populationSize - 1; i++)
{
// Select 2 different parents from elite group randomly [0, numElite).
mom = arma::as_scalar(arma::randi<arma::uvec>(
mom = as_scalar(arma::randi<arma::uvec>(
1, arma::distr_param(0, numElite - 1)));

dad = arma::as_scalar(arma::randi<arma::uvec>(
dad = as_scalar(arma::randi<arma::uvec>(
1, arma::distr_param(0, numElite - 1)));

// Making sure both parents are not the same.
Expand Down
2 changes: 1 addition & 1 deletion include/ensmallen_bits/eve/eve_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ Eve::Optimize(SeparableFunctionType& function,
lastObjective = objective;

iterate -= stepSize / dt * (m / biasCorrection1) /
(arma::sqrt(v / biasCorrection2) + epsilon);
(sqrt(v / biasCorrection2) + epsilon);

terminate |= Callback::StepTaken(*this, f, iterate, callbacks...);

Expand Down
2 changes: 1 addition & 1 deletion include/ensmallen_bits/ftml/ftml_update.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ class FTMLUpdate

MatType sigma = -parent.beta1 * d;
d = biasCorrection1 / stepSize *
(arma::sqrt(v / biasCorrection2) + parent.epsilon);
(sqrt(v / biasCorrection2) + parent.epsilon);
sigma += d;

z *= parent.beta1;
Expand Down
10 changes: 5 additions & 5 deletions include/ensmallen_bits/fw/constr_lpball.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ class ConstrLpBallSolver
if (p == std::numeric_limits<double>::infinity())
{
// l-inf ball.
s = -arma::sign(v);
s = -sign(v);
if (regFlag)
{
// Do element-wise division.
Expand All @@ -104,8 +104,8 @@ class ConstrLpBallSolver
s = v;

double q = 1 / (1.0 - 1.0 / p);
s = -arma::sign(v) % arma::pow(arma::abs(s), q - 1);
s = arma::normalise(s, p);
s = -sign(v) % pow(abs(s), q - 1);
s = normalise(s, p);

if (regFlag)
s = s / arma::conv_to<arma::Col<ElemType>>::from(lambda);
Expand All @@ -114,9 +114,9 @@ class ConstrLpBallSolver
{
// l1 ball, also used in OMP.
if (regFlag)
s = arma::abs(v / arma::conv_to<arma::Col<ElemType>>::from(lambda));
s = abs(v / arma::conv_to<arma::Col<ElemType>>::from(lambda));
else
s = arma::abs(v);
s = abs(v);

arma::uword k = 0;
s.max(k); // k is the linear index of the largest element.
Expand Down
2 changes: 1 addition & 1 deletion include/ensmallen_bits/fw/func_sq.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class FuncSq
double Evaluate(const arma::mat& coords)
{
arma::vec r = A * coords - b;
return arma::dot(r, r) * 0.5;
return dot(r, r) * 0.5;
}

/**
Expand Down
2 changes: 1 addition & 1 deletion include/ensmallen_bits/fw/line_search/line_search_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ typename MatType::elem_type LineSearch::Derivative(FunctionType& function,
{
GradType gradient(x0.n_rows, x0.n_cols);
function.Gradient(x0 + gamma * deltaX, gradient);
return arma::dot(gradient, deltaX);
return dot(gradient, deltaX);
}

} // namespace ens
Expand Down
10 changes: 5 additions & 5 deletions include/ensmallen_bits/fw/proximal/proximal_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,14 @@ namespace ens {
template<typename MatType>
inline void Proximal::ProjectToL1Ball(MatType& v, double tau)
{
MatType simplexSol = arma::abs(v);
MatType simplexSol = abs(v);

// Already with L1 norm <= tau.
if (arma::accu(simplexSol) <= tau)
if (accu(simplexSol) <= tau)
return;

simplexSol = arma::sort(simplexSol, "descend");
MatType simplexSum = arma::cumsum(simplexSol);
simplexSol = sort(simplexSol, "descend");
MatType simplexSum = cumsum(simplexSol);

double nu = 0;
size_t rho = simplexSol.n_rows - 1;
Expand Down Expand Up @@ -72,7 +72,7 @@ inline void Proximal::ProjectToL1Ball(MatType& v, double tau)
template<typename MatType>
inline void Proximal::ProjectToL0Ball(MatType& v, int tau)
{
arma::uvec indices = arma::sort_index(arma::abs(v));
arma::uvec indices = sort_index(abs(v));
arma::uword numberToKill = v.n_elem - tau;

for (arma::uword i = 0; i < numberToKill; i++)
Expand Down
2 changes: 1 addition & 1 deletion include/ensmallen_bits/fw/update_full_correction.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ class UpdateFullCorrection
MatType v = tau * s - oldCoords;
MatType b = function.Vectorb();
MatType A = function.MatrixA();
typename MatType::elem_type gamma = arma::dot(b - A * oldCoords, A * v);
typename MatType::elem_type gamma = dot(b - A * oldCoords, A * v);
gamma = gamma / std::pow(arma::norm(A * v, "fro"), 2);
gamma = std::min(gamma, 1.0);
atoms.CurrentCoeffs() = (1.0 - gamma) * atoms.CurrentCoeffs();
Expand Down
Loading