-
Notifications
You must be signed in to change notification settings - Fork 60
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Browse files
Browse the repository at this point in the history
Co-authored-by: Peter Röseler <[email protected]> Co-authored-by: Steve Wood <[email protected]> Co-authored-by: Elena Peña Tapia <[email protected]>
- Loading branch information
1 parent
23f3b85
commit 6724b47
Showing
3 changed files
with
99 additions
and
2 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
9 changes: 9 additions & 0 deletions
9
releasenotes/notes/fix_aqgd_max_grouped_evals-fbe108c005a9b7ac.yaml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
--- | ||
fixes: | ||
- | | ||
Fixed the AQGD optimizer grouping objective function calls by default so that a single point is now passed to the | ||
objective function. For algorithms that can handle more than one gradient evaluations in their objective function, | ||
such as a VQE in the algorithms here, the number of grouped evaluations can be controlled via the max_grouped_evals | ||
parameter. Grouped evaluations allows a list of points to be handed over so that they can potentially be assessed | ||
more efficiently in a single job. | ||
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
# This code is part of a Qiskit project. | ||
# | ||
# (C) Copyright IBM 2024. | ||
# | ||
# This code is licensed under the Apache License, Version 2.0. You may | ||
# obtain a copy of this license in the LICENSE.txt file in the root directory | ||
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. | ||
# | ||
# Any modifications or derivative works of this code must retain this | ||
# copyright notice, and modified files need to carry a notice indicating | ||
# that they have been altered from the originals. | ||
|
||
"""Tests for the ADAM optimizer.""" | ||
|
||
from test import QiskitAlgorithmsTestCase | ||
|
||
from ddt import ddt, data | ||
import numpy as np | ||
|
||
from qiskit_algorithms.optimizers import ADAM, Optimizer | ||
from qiskit_algorithms.utils import algorithm_globals | ||
|
||
|
||
@ddt | ||
class TestADAM(QiskitAlgorithmsTestCase): | ||
"""Tests for the ADAM optimizer.""" | ||
|
||
def setUp(self): | ||
super().setUp() | ||
algorithm_globals.random_seed = 52 | ||
# Feature vector | ||
self.x = np.array([1, 2, 3, 4]) | ||
# Target value | ||
self.y = 5 | ||
|
||
def objective(self, w): | ||
""" | ||
Objective function to minimize mean squared error. | ||
Parameters: | ||
w : numpy array | ||
The weights (including bias) for the linear model. | ||
Returns: | ||
float | ||
The mean squared error. | ||
""" | ||
# Extract weights and bias from the parameter vector | ||
new_shape = (5, int(len(w) / 5)) | ||
w = np.reshape(w, new_shape) | ||
|
||
weights = w[:-1, :] | ||
bias = w[-1, :] | ||
# Calculate the predicted values | ||
y_pred = np.dot(self.x, weights) + bias | ||
# Calculate the mean squared error | ||
mse = np.mean((self.y - y_pred) ** 2) | ||
return mse | ||
|
||
def run_optimizer(self, optimizer: Optimizer, weights: np.ndarray, max_nfev: int): | ||
"""Test the optimizer. | ||
Args: | ||
optimizer: The optimizer instance to test. | ||
weights: The weights to optimize. | ||
max_nfev: The maximal allowed number of function evaluations. | ||
""" | ||
|
||
# Minimize | ||
res = optimizer.minimize(self.objective, np.array(weights), None) | ||
error = res.fun | ||
nfev = res.nfev | ||
|
||
self.assertAlmostEqual(error, 0, places=3) | ||
self.assertLessEqual(nfev, max_nfev) | ||
|
||
@data(1, 5) | ||
def test_adam_max_evals(self, max_evals_grouped): | ||
"""adam test""" | ||
# Initialize weights (including bias) | ||
w = np.zeros(len(self.x) + 1) | ||
# Initialize optimizer | ||
optimizer = ADAM(maxiter=10000, tol=1e-06) | ||
# Test one evaluation at a time | ||
optimizer.set_max_evals_grouped(max_evals_grouped) | ||
self.run_optimizer(optimizer, w, max_nfev=10000) |