diff --git a/src/model/ffnn.cpp b/src/model/ffnn.cpp index 4ef1c60da..a0e2c6420 100644 --- a/src/model/ffnn.cpp +++ b/src/model/ffnn.cpp @@ -70,18 +70,30 @@ std::vector compute_layer(std::uint32_t layer_id, const std::vector< auto n_neurons_curr_layer = n_neurons[layer_id]; std::vector retval(n_neurons_curr_layer, 0_dbl); - fmt::print("nneurons: {}", n_neurons_prev_layer); + fmt::print("net_wb: {}\n", net_wb.size()); std::cout << std::endl; for (std::uint32_t i = 0u; i < n_neurons_curr_layer; ++i) { for (std::uint32_t j = 0u; j < n_neurons_prev_layer; ++j) { - fmt::print("layer, i, j: {}, {}, {}", layer_id, i, j); + fmt::print("layer, i, j, idx: {}, {}, {}\n", layer_id, i, j, flattenw(i, j, n_neurons, layer_id)); std::cout << std::endl; - retval[i] += net_wb[flattenw(i, j, n_neurons, layer_id)] * inputs[j]; + retval[i] += 1_dbl;//net_wb[flattenw(i, j, n_neurons, layer_id)] * inputs[j]; } - retval[i] += net_wb[flattenb(i, n_neurons, layer_id, n_net_w)]; + fmt::print("idxb {}\n", flattenb(i, n_neurons, layer_id, n_net_w)); + std::cout << std::endl; + + retval[i]+= 1_dbl; //net_wb[flattenb(i, n_neurons, layer_id, n_net_w)]; + + fmt::print("\n{}\n", retval[i]); + fmt::print("Here1"); + + std::cout << std::endl; retval[i] = activation(retval[i]); + fmt::print("Here2"); + std::cout << std::endl; } + fmt::print("Here3"); + return retval; } } // namespace detail diff --git a/test/model_ffnn.cpp b/test/model_ffnn.cpp index b09c6a113..5ac59511e 100644 --- a/test/model_ffnn.cpp +++ b/test/model_ffnn.cpp @@ -20,8 +20,9 @@ using namespace heyoka; TEST_CASE("impl") { + auto linear = [](expression ret) -> expression { return ret; }; auto [x] = make_vars("x"); - auto my_net = model::ffnn_impl({x}, 2, {2, 2}, {heyoka::tanh, heyoka::tanh, [](auto ret) { return ret; }}, + auto my_net = model::ffnn_impl({x}, 2, {2, 2}, {heyoka::tanh, heyoka::tanh, heyoka::tanh}, {1_dbl, 2_dbl, 3_dbl, 4_dbl, 5_dbl, 6_dbl, 7_dbl, 8_dbl, 9_dbl, 0_dbl, 1_dbl, 2_dbl, 3_dbl, 4_dbl, 5_dbl, 6_dbl}); } \ No newline at end of file