diff --git a/include/heyoka/model/ffnn.hpp b/include/heyoka/model/ffnn.hpp index ea9560c26..e0141d763 100644 --- a/include/heyoka/model/ffnn.hpp +++ b/include/heyoka/model/ffnn.hpp @@ -40,7 +40,7 @@ auto ffnn_common_opts(const KwArgs &...kw_args) static_assert(!p.has_unnamed_arguments(), "This function accepts only named arguments"); // Network inputs. Mandatory. - // The kw::inputs must be a range of values from which + // The kw::inputs argument must be a range of values from which // an expression can be constructed. std::vector inputs; if constexpr (p.has(kw::inputs)) { @@ -102,18 +102,18 @@ auto ffnn_common_opts(const KwArgs &...kw_args) // overflows when manipulating indices and sizes. using su32 = boost::safe_numerics::safe; - // Number of hidden layers (defined as all neuronal columns that are nor input nor output neurons) + // Number of hidden layers (defined as all neuronal columns that are nor input nor output neurons). auto n_hidden_layers = su32(nn_hidden.size()); - // Number of neuronal layers (counting input and output) + // Number of neuronal layers (counting input and output). auto n_layers = n_hidden_layers + 2; - // Number of inputs + // Number of inputs. auto n_in = su32(inputs.size()); - // Number of neurons per neuronal layer + // Number of neurons per neuronal layer. std::vector n_neurons{n_in}; n_neurons.insert(n_neurons.end(), nn_hidden.begin(), nn_hidden.end()); n_neurons.insert(n_neurons.end(), n_out); - // Number of network parameters (wb: weights and biases, w: only weights) + // Number of network parameters (wb: weights and biases, w: only weights). su32 n_wb = 0; for (su32 i = 1; i < n_layers; ++i) { n_wb += n_neurons[i - 1] * n_neurons[i]; diff --git a/src/model/ffnn.cpp b/src/model/ffnn.cpp index 88ce78d2b..4e5358021 100644 --- a/src/model/ffnn.cpp +++ b/src/model/ffnn.cpp @@ -47,15 +47,15 @@ std::vector compute_layer(su32 layer_id, const std::vector ffnn_impl(const std::vector &in, const std:: const std::vector> &activations, const std::vector &nn_wb) { - // Sanity checks + // Sanity checks. if (activations.empty()) { throw std::invalid_argument("Cannot create a FFNN with an empty list of activation functions"); } @@ -96,24 +96,24 @@ std::vector ffnn_impl(const std::vector &in, const std:: // indices and sizes. using detail::su32; - // Number of hidden layers (defined as all neuronal columns that are nor input nor output neurons) + // Number of hidden layers (defined as all neuronal columns that are nor input nor output neurons). auto n_hidden_layers = su32(nn_hidden.size()); - // Number of neuronal layers (counting input and output) + // Number of neuronal layers (counting input and output). auto n_layers = n_hidden_layers + 2; - // Number of inputs + // Number of inputs. auto n_in = su32(in.size()); - // Number of neurons per neuronal layer + // Number of neurons per neuronal layer. std::vector n_neurons{n_in}; n_neurons.insert(n_neurons.end(), nn_hidden.begin(), nn_hidden.end()); n_neurons.insert(n_neurons.end(), n_out); - // Number of network parameters (wb: weights and biases, w: only weights) + // Number of network parameters (wb: weights and biases, w: only weights). su32 n_net_wb = 0, n_net_w = 0; for (su32 i = 1; i < n_layers; ++i) { n_net_wb += n_neurons[i - 1u] * n_neurons[i]; n_net_w += n_neurons[i - 1u] * n_neurons[i]; n_net_wb += n_neurons[i]; } - // Sanity check + // Sanity check. if (nn_wb.size() != n_net_wb) { throw std::invalid_argument(fmt::format( "The number of network parameters, detected from its structure to be {}, does not match the size of " @@ -121,7 +121,7 @@ std::vector ffnn_impl(const std::vector &in, const std:: static_cast(n_net_wb), nn_wb.size())); } - // Now we build the expressions recursively transvering from layer to layer (L = f(Wx+b))) + // Now we build the expressions recursively transvering from layer to layer (L = f(Wx+b))). std::vector retval = in; su32 wcounter = 0, bcounter = 0; for (su32 i = 1; i < n_layers; ++i) {