Skip to content

Commit

Permalink
Minutiae.
Browse files Browse the repository at this point in the history
  • Loading branch information
bluescarni committed Nov 2, 2023
1 parent 7d474ec commit b93e68c
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 17 deletions.
12 changes: 6 additions & 6 deletions include/heyoka/model/ffnn.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ auto ffnn_common_opts(const KwArgs &...kw_args)
static_assert(!p.has_unnamed_arguments(), "This function accepts only named arguments");

// Network inputs. Mandatory.
// The kw::inputs must be a range of values from which
// The kw::inputs argument must be a range of values from which
// an expression can be constructed.
std::vector<expression> inputs;
if constexpr (p.has(kw::inputs)) {
Expand Down Expand Up @@ -102,18 +102,18 @@ auto ffnn_common_opts(const KwArgs &...kw_args)
// overflows when manipulating indices and sizes.
using su32 = boost::safe_numerics::safe<std::uint32_t>;

// Number of hidden layers (defined as all neuronal columns that are nor input nor output neurons)
// Number of hidden layers (defined as all neuronal columns that are nor input nor output neurons).
auto n_hidden_layers = su32(nn_hidden.size());
// Number of neuronal layers (counting input and output)
// Number of neuronal layers (counting input and output).
auto n_layers = n_hidden_layers + 2;
// Number of inputs
// Number of inputs.
auto n_in = su32(inputs.size());
// Number of neurons per neuronal layer
// Number of neurons per neuronal layer.
std::vector<su32> n_neurons{n_in};
n_neurons.insert(n_neurons.end(), nn_hidden.begin(), nn_hidden.end());
n_neurons.insert(n_neurons.end(), n_out);

// Number of network parameters (wb: weights and biases, w: only weights)
// Number of network parameters (wb: weights and biases, w: only weights).
su32 n_wb = 0;
for (su32 i = 1; i < n_layers; ++i) {
n_wb += n_neurons[i - 1] * n_neurons[i];
Expand Down
22 changes: 11 additions & 11 deletions src/model/ffnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,15 @@ std::vector<expression> compute_layer(su32 layer_id, const std::vector<expressio
for (su32 i = 0; i < n_neurons_curr_layer; ++i) {
for (su32 j = 0; j < n_neurons_prev_layer; ++j) {

// Add the weight and update the weight counter
// Add the weight and update the weight counter.
retval[i] += nn_wb[wcounter] * inputs[j];
++wcounter;
}

// Add the bias and update the counter
// Add the bias and update the counter.
retval[i] += nn_wb[bcounter + n_net_w];
++bcounter;
// Activation function
// Activation function.
retval[i] = activation(retval[i]);
}
return retval;
Expand All @@ -68,7 +68,7 @@ std::vector<expression> ffnn_impl(const std::vector<expression> &in, const std::
const std::vector<std::function<expression(const expression &)>> &activations,
const std::vector<expression> &nn_wb)
{
// Sanity checks
// Sanity checks.
if (activations.empty()) {
throw std::invalid_argument("Cannot create a FFNN with an empty list of activation functions");
}
Expand Down Expand Up @@ -96,32 +96,32 @@ std::vector<expression> ffnn_impl(const std::vector<expression> &in, const std::
// indices and sizes.
using detail::su32;

// Number of hidden layers (defined as all neuronal columns that are nor input nor output neurons)
// Number of hidden layers (defined as all neuronal columns that are nor input nor output neurons).
auto n_hidden_layers = su32(nn_hidden.size());
// Number of neuronal layers (counting input and output)
// Number of neuronal layers (counting input and output).
auto n_layers = n_hidden_layers + 2;
// Number of inputs
// Number of inputs.
auto n_in = su32(in.size());
// Number of neurons per neuronal layer
// Number of neurons per neuronal layer.
std::vector<su32> n_neurons{n_in};
n_neurons.insert(n_neurons.end(), nn_hidden.begin(), nn_hidden.end());
n_neurons.insert(n_neurons.end(), n_out);
// Number of network parameters (wb: weights and biases, w: only weights)
// Number of network parameters (wb: weights and biases, w: only weights).
su32 n_net_wb = 0, n_net_w = 0;
for (su32 i = 1; i < n_layers; ++i) {
n_net_wb += n_neurons[i - 1u] * n_neurons[i];
n_net_w += n_neurons[i - 1u] * n_neurons[i];
n_net_wb += n_neurons[i];
}
// Sanity check
// Sanity check.
if (nn_wb.size() != n_net_wb) {
throw std::invalid_argument(fmt::format(
"The number of network parameters, detected from its structure to be {}, does not match the size of "
"the corresponding expressions: {}.",
static_cast<std::uint32_t>(n_net_wb), nn_wb.size()));
}

// Now we build the expressions recursively transvering from layer to layer (L = f(Wx+b)))
// Now we build the expressions recursively transvering from layer to layer (L = f(Wx+b))).
std::vector<expression> retval = in;
su32 wcounter = 0, bcounter = 0;
for (su32 i = 1; i < n_layers; ++i) {
Expand Down

0 comments on commit b93e68c

Please sign in to comment.