diff --git a/docs/_static/dataframe.css b/docs/_static/dataframe.css new file mode 100644 index 0000000000..760af7617f --- /dev/null +++ b/docs/_static/dataframe.css @@ -0,0 +1,35 @@ +/* Styling for pandas dataframes in documentation */ + +div.output table { + border: none; + border-collapse: collapse; + border-spacing: 0; + color: black; + font-size: 12px; + table-layout: fixed; + width: 100%; +} +div.output thead { + border-bottom: 1px solid black; + vertical-align: bottom; +} +div.output tr, +div.output th, +div.output td { + text-align: right; + vertical-align: middle; + padding: 0.5em 0.5em; + line-height: normal; + white-space: normal; + max-width: none; + border: none; +} +div.output th { + font-weight: bold; +} +div.output tbody tr:nth-child(odd) { + background: #f5f5f5; +} +div.output tbody tr:hover { + background: rgba(66, 165, 245, 0.2); +} \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index 817a15ab67..bff413d70b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -80,9 +80,7 @@ templates_path = ["_templates"] # Manually add the gallery CSS file for now # TODO: Figure out why the styling is not working by default -html_css_files = [ - "nbsphinx-gallery.css", -] +html_css_files = ["nbsphinx-gallery.css", "dataframe.css"] nbsphinx_timeout = 360 nbsphinx_execute = os.getenv("QISKIT_DOCS_BUILD_TUTORIALS", "never") @@ -171,6 +169,7 @@ "matplotlib": ("https://matplotlib.org/stable/", None), "qiskit": ("https://docs.quantum.ibm.com/api/qiskit/", None), "uncertainties": ("https://pythonhosted.org/uncertainties", None), + "pandas": ("http://pandas.pydata.org/docs/", None), "qiskit_aer": ("https://qiskit.org/ecosystem/aer", None), "qiskit_dynamics": ("https://qiskit.org/ecosystem/dynamics/", None), "qiskit_ibm_runtime": ("https://docs.quantum.ibm.com/api/qiskit-ibm-runtime/", None), @@ -236,6 +235,11 @@ def maybe_skip_member(app, what, name, obj, skip, options): "filter_kwargs", "fit_func", "signature", + "artifact_id", + "artifact_data", + "device_components", + "created_time", + "data", ] skip_members = [ ParameterRepr.repr, diff --git a/docs/howtos/artifacts.rst b/docs/howtos/artifacts.rst new file mode 100644 index 0000000000..57b4222f71 --- /dev/null +++ b/docs/howtos/artifacts.rst @@ -0,0 +1,148 @@ +Work with experiment artifacts +============================== + +Problem +------- + +You want to view, add, remove, and save artifacts associated with your :class:`.ExperimentData` instance. + +Solution +-------- + +Artifacts are used to store auxiliary data for an experiment that don't fit neatly in the +:class:`.AnalysisResult` model. Any data that can be serialized, such as fit data, can be added as +:class:`.ArtifactData` artifacts to :class:`.ExperimentData`. + +For example, after an experiment that uses :class:`.CurveAnalysis` is run, its :class:`.ExperimentData` +object is automatically populated with ``fit_summary`` and ``curve_data`` artifacts. The ``fit_summary`` +artifact has one or more :class:`.CurveFitResult` objects that contain parameters from the fit. The +``curve_data`` artifact has a :class:`.ScatterTable` object that contains raw and fitted data in a pandas +:class:`~pandas:pandas.DataFrame`. + +Viewing artifacts +~~~~~~~~~~~~~~~~~ + +Here we run a parallel experiment consisting of two :class:`.T1` experiments in parallel and then view the output +artifacts as a list of :class:`.ArtifactData` objects accessed by :meth:`.ExperimentData.artifacts`: + +.. jupyter-execute:: + + from qiskit_ibm_runtime.fake_provider import FakePerth + from qiskit_aer import AerSimulator + from qiskit_experiments.library import T1 + from qiskit_experiments.framework import ParallelExperiment + import numpy as np + + backend = AerSimulator.from_backend(FakePerth()) + exp1 = T1(physical_qubits=[0], delays=np.arange(1e-6, 6e-4, 5e-5)) + exp2 = T1(physical_qubits=[1], delays=np.arange(1e-6, 6e-4, 5e-5)) + data = ParallelExperiment([exp1, exp2], flatten_results=True).run(backend).block_for_results() + data.artifacts() + +Artifacts can be accessed using either the artifact ID, which has to be unique in each +:class:`.ExperimentData` object, or the artifact name, which does not have to be unique and will return +all artifacts with the same name: + +.. jupyter-execute:: + + print("Number of curve_data artifacts:", len(data.artifacts("curve_data"))) + # retrieve by name and index + curve_data_id = data.artifacts("curve_data")[0].artifact_id + # retrieve by ID + scatter_table = data.artifacts(curve_data_id).data + print("The first curve_data artifact:\n") + scatter_table.dataframe + +In composite experiments, artifacts behave like analysis results and figures in that if +``flatten_results`` isn't ``True``, they are accessible in the :meth:`.artifacts` method of each +:meth:`.child_data`. The artifacts in a large composite experiment with ``flatten_results=True`` can be +distinguished from each other using the :attr:`~.ArtifactData.experiment` and +:attr:`~.ArtifactData.device_components` +attributes. + +One useful pattern is to load raw or fitted data from ``curve_data`` for further data manipulation. You +can work with the dataframe using standard pandas dataframe methods or the built-in +:class:`.ScatterTable` methods: + +.. jupyter-execute:: + + import matplotlib.pyplot as plt + + exp_type = data.artifacts(curve_data_id).experiment + component = data.artifacts(curve_data_id).device_components[0] + + raw_data = scatter_table.filter(category="raw") + fitted_data = scatter_table.filter(category="fitted") + + # visualize the data + plt.figure() + plt.errorbar(raw_data.x, raw_data.y, yerr=raw_data.y_err, capsize=5, label="raw data") + plt.errorbar(fitted_data.x, fitted_data.y, yerr=fitted_data.y_err, capsize=5, label="fitted data") + plt.title(f"{exp_type} experiment on {component}") + plt.xlabel('x') + plt.ylabel('y') + plt.legend() + plt.show() + +Adding artifacts +~~~~~~~~~~~~~~~~ + +You can add arbitrary data as an artifact as long as it's serializable with :class:`.ExperimentEncoder`, +which extends Python's default JSON serialization with support for other data types commonly used with +Qiskit Experiments. + +.. jupyter-execute:: + + from qiskit_experiments.framework import ArtifactData + + new_artifact = ArtifactData(name="experiment_notes", data={"content": "Testing some new ideas."}) + data.add_artifacts(new_artifact) + data.artifacts("experiment_notes") + +.. jupyter-execute:: + + print(data.artifacts("experiment_notes").data) + +Saving and loading artifacts +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. note:: + This feature is only for those who have access to the cloud service. You can + check whether you do by logging into the IBM Quantum interface + and seeing if you can see the `database `__. + +Artifacts are saved and loaded to and from the cloud service along with the rest of the +:class:`ExperimentData` object. Artifacts are stored as ``.zip`` files in the cloud service grouped by +the artifact name. For example, the composite experiment above will generate two artifact files, ``fit_summary.zip`` and +``curve_data.zip``. Each of these zipfiles will contain serialized artifact data in JSON format named +by their unique artifact ID: + +.. jupyter-execute:: + :hide-code: + + print("fit_summary.zip") + print(f"|- {data.artifacts('fit_summary')[0].artifact_id}.json") + print(f"|- {data.artifacts('fit_summary')[1].artifact_id}.json") + print("curve_data.zip") + print(f"|- {data.artifacts('curve_data')[0].artifact_id}.json") + print(f"|- {data.artifacts('curve_data')[1].artifact_id}.json") + print("experiment_notes.zip") + print(f"|- {data.artifacts('experiment_notes').artifact_id}.json") + +Note that for performance reasons, the auto save feature does not apply to artifacts. You must still +call :meth:`.ExperimentData.save` once the experiment analysis has completed to upload artifacts to the +cloud service. + +Note also though individual artifacts can be deleted, currently artifact files cannot be removed from the +cloud service. Instead, you can delete all artifacts of that name +using :meth:`~.delete_artifact` and then call :meth:`.ExperimentData.save`. +This will save an empty file to the service, and the loaded experiment data will not contain +these artifacts. + +See Also +-------- + +* :ref:`Curve Analysis: Data management with scatter table ` tutorial +* :class:`.ArtifactData` API documentation +* :class:`.ScatterTable` API documentation +* :class:`.CurveFitResult` API documentation diff --git a/docs/manuals/measurement/readout_mitigation.rst b/docs/manuals/measurement/readout_mitigation.rst index 418e7c7be4..3c1486be1b 100644 --- a/docs/manuals/measurement/readout_mitigation.rst +++ b/docs/manuals/measurement/readout_mitigation.rst @@ -78,7 +78,7 @@ circuits, one for all “0” and one for all “1” results. exp.analysis.set_options(plot=True) result = exp.run(backend) - mitigator = result.analysis_results(0).value + mitigator = result.analysis_results("Local Readout Mitigator").value The resulting measurement matrix can be illustrated by comparing it to the identity. diff --git a/docs/tutorials/curve_analysis.rst b/docs/tutorials/curve_analysis.rst index 4b243ef48d..368cf35f17 100644 --- a/docs/tutorials/curve_analysis.rst +++ b/docs/tutorials/curve_analysis.rst @@ -318,6 +318,8 @@ without an overhead of complex data management, as well as end-users with retrieving and reusing the intermediate data for their custom fitting workflow outside our curve fitting framework. Note that a :class:`ScatterTable` instance may be saved in the :class:`.ExperimentData` as an artifact. +See the :doc:`Artifacts how-to ` for more information. + .. _curve_analysis_workflow: diff --git a/docs/tutorials/getting_started.rst b/docs/tutorials/getting_started.rst index 7ca6e32567..7e0ade47ef 100644 --- a/docs/tutorials/getting_started.rst +++ b/docs/tutorials/getting_started.rst @@ -150,6 +150,9 @@ analysis, respectively: print(exp_data.job_status()) print(exp_data.analysis_status()) +Figures +------- + Once the analysis is complete, figures are retrieved using the :meth:`~.ExperimentData.figure` method. See the :doc:`visualization module ` tutorial on how to customize figures for an experiment. For our @@ -160,15 +163,22 @@ exponential decay model of the :math:`T_1` experiment: display(exp_data.figure(0)) -The fit results and associated parameters are accessed with -:meth:`~.ExperimentData.analysis_results`: +Analysis Results +---------------- + +The analysis results resulting from the fit are accessed with :meth:`~.ExperimentData.analysis_results`: .. jupyter-execute:: for result in exp_data.analysis_results(): print(result) -Results can be indexed numerically (starting from 0) or using their name. +Results can be indexed numerically (starting from 0) or using their name. Analysis results can also be +retrieved in the pandas :class:`~pandas:pandas.DataFrame` format by passing ``dataframe=True``: + +.. jupyter-execute:: + + exp_data.analysis_results(dataframe=True) .. note:: See the :meth:`~.ExperimentData.analysis_results` API documentation for more @@ -186,6 +196,24 @@ value and standard deviation of each value can be accessed as follows: For further documentation on how to work with UFloats, consult the ``uncertainties`` :external+uncertainties:doc:`user_guide`. +Artifacts +--------- + +The curve fit data itself is contained in :meth:`~.ExperimentData.artifacts`, which are accessed +in an analogous manner. Artifacts for a standard experiment include both the curve fit data +stored in ``artifacts("curve_data")`` and information on the fit stored in ``artifacts("fit_summary")``. +Use the ``data`` attribute to access artifact data: + +.. jupyter-execute:: + + print(exp_data.artifacts("fit_summary").data) + +.. note:: + See the :doc:`artifacts ` how-to for more information on using artifacts. + +Circuit data and metadata +------------------------- + Raw circuit output data and its associated metadata can be accessed with the :meth:`~.ExperimentData.data` property. Data is indexed by the circuit it corresponds to. Depending on the measurement level set in the experiment, the raw data will either @@ -210,6 +238,9 @@ Experiments also have global associated metadata accessed by the print(exp_data.metadata) +Job information +--------------- + The actual backend jobs that were executed for the experiment can be accessed with the :meth:`~.ExperimentData.jobs` method. @@ -406,8 +437,7 @@ into one level: ) parallel_data = parallel_exp.run(backend, seed_simulator=101).block_for_results() - for result in parallel_data.analysis_results(): - print(result) + parallel_data.analysis_results(dataframe=True) Broadcasting analysis options to child experiments -------------------------------------------------- diff --git a/qiskit_experiments/curve_analysis/base_curve_analysis.py b/qiskit_experiments/curve_analysis/base_curve_analysis.py index e11cb63cc9..5aefcbe9f0 100644 --- a/qiskit_experiments/curve_analysis/base_curve_analysis.py +++ b/qiskit_experiments/curve_analysis/base_curve_analysis.py @@ -98,13 +98,6 @@ class BaseCurveAnalysis(BaseAnalysis, ABC): This method creates analysis results for important fit parameters that might be defined by analysis options ``result_parameters``. - .. rubric:: _create_curve_data - - This method creates analysis results for the formatted dataset, i.e. data used for the fitting. - Entries are created when the analysis option ``return_data_points`` is ``True``. - If analysis consists of multiple series, analysis result is created for - each curve data in the series definitions. - .. rubric:: _create_figures This method creates figures by consuming the scatter table data. @@ -162,9 +155,9 @@ def _default_options(cls) -> Options: dataset without formatting, on canvas. This is ``False`` by default. plot (bool): Set ``True`` to create figure for fit result or ``False`` to not create a figure. This overrides the behavior of ``generate_figures``. - return_fit_parameters (bool): Set ``True`` to return all fit model parameters - with details of the fit outcome. Default to ``True``. - return_data_points (bool): Set ``True`` to include in the analysis result + return_fit_parameters (bool): (Deprecated) Set ``True`` to return all fit model parameters + with details of the fit outcome. Default to ``False``. + return_data_points (bool): (Deprecated) Set ``True`` to include in the analysis result the formatted data points given to the fitter. Default to ``False``. data_processor (Callable): A callback function to format experiment data. This can be a :class:`.DataProcessor` @@ -237,49 +230,6 @@ def _default_options(cls) -> Options: return options - def set_options(self, **fields): - """Set the analysis options for :meth:`run` method. - - Args: - fields: The fields to update the options - - Raises: - KeyError: When removed option ``curve_fitter`` is set. - """ - # TODO remove this in Qiskit Experiments v0.5 - - if "curve_fitter_options" in fields: - warnings.warn( - "The option 'curve_fitter_options' is replaced with 'lmfit_options.' " - "This option will be removed in Qiskit Experiments 0.5.", - DeprecationWarning, - stacklevel=2, - ) - fields["lmfit_options"] = fields.pop("curve_fitter_options") - - # TODO remove this in Qiskit Experiments 0.6 - if "curve_drawer" in fields: - warnings.warn( - "The option 'curve_drawer' is replaced with 'plotter'. " - "This option will be removed in Qiskit Experiments 0.6.", - DeprecationWarning, - stacklevel=2, - ) - # Set the plotter drawer to `curve_drawer`. If `curve_drawer` is the right type, set it - # directly. If not, wrap it in a compatibility drawer. - if isinstance(fields["curve_drawer"], BaseDrawer): - plotter = self.options.plotter - plotter.drawer = fields.pop("curve_drawer") - fields["plotter"] = plotter - else: - drawer = fields["curve_drawer"] - compat_drawer = LegacyCurveCompatDrawer(drawer) - plotter = self.options.plotter - plotter.drawer = compat_drawer - fields["plotter"] = plotter - - super().set_options(**fields) - @abstractmethod def _run_data_processing( self, diff --git a/qiskit_experiments/curve_analysis/composite_curve_analysis.py b/qiskit_experiments/curve_analysis/composite_curve_analysis.py index 2a116f24f9..82aa3862e1 100644 --- a/qiskit_experiments/curve_analysis/composite_curve_analysis.py +++ b/qiskit_experiments/curve_analysis/composite_curve_analysis.py @@ -38,7 +38,8 @@ MplDrawer, ) -from .base_curve_analysis import PARAMS_ENTRY_PREFIX, BaseCurveAnalysis +from qiskit_experiments.framework.containers import FigureType, ArtifactData +from .base_curve_analysis import DATA_ENTRY_PREFIX, BaseCurveAnalysis, PARAMS_ENTRY_PREFIX from .curve_data import CurveFitResult from .scatter_table import ScatterTable from .utils import eval_with_uncertainties @@ -86,8 +87,6 @@ class CompositeCurveAnalysis(BaseAnalysis): The experimental circuits starting with different initial states must be distinguished by the circuit metadata ``{"init_state": 0}`` or ``{"init_state": 1}``, along with the "xval" in the same dictionary. - If you want to compute another quantity using two fitting outcomes, you can - override :meth:`CompositeCurveAnalysis._create_curve_data` in subclass. :class:`.CompositeCurveAnalysis` subclass may override following methods. @@ -273,9 +272,9 @@ def _default_options(cls) -> Options: the analysis result. plot (bool): Set ``True`` to create figure for fit result. This is ``True`` by default. - return_fit_parameters (bool): Set ``True`` to return all fit model parameters - with details of the fit outcome. Default to ``True``. - return_data_points (bool): Set ``True`` to include in the analysis result + return_fit_parameters (bool): (Deprecated) Set ``True`` to return all fit model parameters + with details of the fit outcome. Default to ``False``. + return_data_points (bool): (Deprecated) Set ``True`` to include in the analysis result the formatted data points given to the fitter. Default to ``False``. extra (Dict[str, Any]): A dictionary that is appended to all database entries as extra information. @@ -284,7 +283,7 @@ def _default_options(cls) -> Options: options.update_options( plotter=CurvePlotter(MplDrawer()), plot=True, - return_fit_parameters=True, + return_fit_parameters=False, return_data_points=False, extra={}, ) @@ -331,7 +330,10 @@ def set_options(self, **fields): def _run_analysis( self, experiment_data: ExperimentData, - ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]: + ) -> Tuple[List[Union[AnalysisResultData, ArtifactData]], List[FigureType]]: + result_data: List[Union[AnalysisResultData, ArtifactData]] = [] + figures: List[FigureType] = [] + artifacts: list[ArtifactData] = [] # Flag for plotting can be "always", "never", or "selective" # the analysis option overrides self._generate_figures if set @@ -342,9 +344,6 @@ def _run_analysis( else: plot = getattr(self, "_generate_figures", "always") - analysis_results = [] - figures = [] - fit_dataset = {} curve_data_set = [] for analysis in self._analyses: @@ -373,7 +372,7 @@ def _run_analysis( quality=quality, extra=metadata, ) - analysis_results.append(overview) + result_data.append(overview) if fit_data.success: # Add fit data to curve data table @@ -406,7 +405,7 @@ def _run_analysis( category="fitted", analysis=analysis.name, ) - analysis_results.extend( + result_data.extend( analysis._create_analysis_results( fit_data=fit_data, quality=quality, @@ -416,7 +415,13 @@ def _run_analysis( if self.options.return_data_points: # Add raw data points - analysis_results.extend( + warnings.warn( + f"{DATA_ENTRY_PREFIX + self.name} has been moved to experiment data artifacts. " + "Saving this result with 'return_data_points'=True will be disabled in " + "Qiskit Experiments 0.7.", + DeprecationWarning, + ) + result_data.extend( analysis._create_curve_data(curve_data=formatted_subset, **metadata) ) @@ -436,10 +441,23 @@ def _run_analysis( composite_results = self._create_analysis_results( fit_data=fit_dataset, quality=total_quality, **self.options.extra.copy() ) - analysis_results.extend(composite_results) + result_data.extend(composite_results) else: composite_results = [] + artifacts.append( + ArtifactData( + name="curve_data", + data=combined_curve_data, + ) + ) + artifacts.append( + ArtifactData( + name="fit_summary", + data=fit_dataset, + ) + ) + if plot_bool: self.plotter.set_supplementary_data( fit_red_chi={k: v.reduced_chisq for k, v in fit_dataset.items() if v.success}, @@ -447,4 +465,4 @@ def _run_analysis( ) figures.extend(self._create_figures(curve_data=combined_curve_data)) - return analysis_results, figures + return result_data + artifacts, figures diff --git a/qiskit_experiments/curve_analysis/curve_analysis.py b/qiskit_experiments/curve_analysis/curve_analysis.py index b08366ec0d..d1266e1615 100644 --- a/qiskit_experiments/curve_analysis/curve_analysis.py +++ b/qiskit_experiments/curve_analysis/curve_analysis.py @@ -13,6 +13,8 @@ """ Analysis class for curve fitting. """ +import warnings + # pylint: disable=invalid-name from typing import Dict, List, Tuple, Union, Optional @@ -23,10 +25,14 @@ import pandas as pd from uncertainties import unumpy as unp -from qiskit_experiments.framework import ExperimentData, AnalysisResultData +from qiskit_experiments.framework import ( + ExperimentData, + AnalysisResultData, +) +from qiskit_experiments.framework.containers import FigureType, ArtifactData from qiskit_experiments.data_processing.exceptions import DataProcessorError -from .base_curve_analysis import BaseCurveAnalysis, PARAMS_ENTRY_PREFIX +from .base_curve_analysis import BaseCurveAnalysis, DATA_ENTRY_PREFIX, PARAMS_ENTRY_PREFIX from .curve_data import FitOptions, CurveFitResult from .scatter_table import ScatterTable from .utils import ( @@ -85,14 +91,6 @@ class CurveAnalysis(BaseCurveAnalysis): This method creates analysis results for important fit parameters that might be defined by analysis options ``result_parameters``. - .. rubric:: _create_curve_data - - This method creates analysis results containing the formatted dataset, - i.e. data used for the fitting. - Entries are created when the analysis option ``return_data_points`` is ``True``. - If analysis consists of multiple series, an analysis result is created for - each series definition. - .. rubric:: _create_figures This method creates figures by consuming the scatter table data. @@ -456,9 +454,10 @@ def _create_figures( def _run_analysis( self, experiment_data: ExperimentData, - ) -> Tuple[List[AnalysisResultData], List["pyplot.Figure"]]: - analysis_results = [] - figures = [] + ) -> Tuple[List[Union[AnalysisResultData, ArtifactData]], List[FigureType]]: + figures: List[FigureType] = [] + result_data: List[Union[AnalysisResultData, ArtifactData]] = [] + artifacts: list[ArtifactData] = [] # Flag for plotting can be "always", "never", or "selective" # the analysis option overrides self._generate_figures if set @@ -494,7 +493,7 @@ def _run_analysis( quality=quality, extra=self.options.extra, ) - analysis_results.append(overview) + result_data.append(overview) if fit_data.success: # Add fit data to curve data table @@ -527,7 +526,7 @@ def _run_analysis( category="fitted", analysis=self.name, ) - analysis_results.extend( + result_data.extend( self._create_analysis_results( fit_data=fit_data, quality=quality, @@ -537,17 +536,36 @@ def _run_analysis( if self.options.return_data_points: # Add raw data points - analysis_results.extend(self._create_curve_data(curve_data=formatted_subset)) + warnings.warn( + f"{DATA_ENTRY_PREFIX + self.name} has been moved to experiment data artifacts. " + "Saving this result with 'return_data_points'=True will be disabled in " + "Qiskit Experiments 0.7.", + DeprecationWarning, + ) + result_data.extend(self._create_curve_data(curve_data=formatted_subset)) + + artifacts.append( + ArtifactData( + name="curve_data", + data=table, + ) + ) + artifacts.append( + ArtifactData( + name="fit_summary", + data=fit_data, + ) + ) if plot_bool: if fit_data.success: self.plotter.set_supplementary_data( fit_red_chi=fit_data.reduced_chisq, - primary_results=[r for r in analysis_results if not r.name.startswith("@")], + primary_results=[r for r in result_data if not r.name.startswith("@")], ) figures.extend(self._create_figures(curve_data=table)) - return analysis_results, figures + return result_data + artifacts, figures def __getstate__(self): state = self.__dict__.copy() diff --git a/qiskit_experiments/curve_analysis/curve_data.py b/qiskit_experiments/curve_analysis/curve_data.py index 4f5ee74380..8627b404ff 100644 --- a/qiskit_experiments/curve_analysis/curve_data.py +++ b/qiskit_experiments/curve_analysis/curve_data.py @@ -114,7 +114,7 @@ class CurveData: @deprecate_func( since="0.6", - additional_msg="CurveData is replaced with 'ScatterTable' with dataframe representation.", + additional_msg="CurveData is replaced by `ScatterTable`'s DataFrame representation.", removal_timeline="after 0.7", package_name="qiskit-experiments", ) diff --git a/qiskit_experiments/curve_analysis/utils.py b/qiskit_experiments/curve_analysis/utils.py index 3cd8496849..c2d4f47900 100644 --- a/qiskit_experiments/curve_analysis/utils.py +++ b/qiskit_experiments/curve_analysis/utils.py @@ -318,7 +318,7 @@ def sample_average( @deprecate_func( since="0.6", - additional_msg="The curve data representation is replaced with dataframe format.", + additional_msg="The curve data representation has been replaced by the `DataFrame` format.", package_name="qiskit-experiments", pending=True, ) @@ -351,7 +351,7 @@ def filter_data(data: List[Dict[str, any]], **filters) -> List[Dict[str, any]]: @deprecate_func( since="0.6", - additional_msg="The curve data representation is replaced with dataframe format.", + additional_msg="The curve data representation has been replaced by the `DataFrame` format.", package_name="qiskit-experiments", pending=True, ) @@ -477,7 +477,7 @@ def mean_xy_data( @deprecate_func( since="0.6", - additional_msg="The curve data representation is replaced with dataframe format.", + additional_msg="The curve data representation has been replaced by the `DataFrame` format.", package_name="qiskit-experiments", pending=True, ) @@ -541,7 +541,7 @@ def multi_mean_xy_data( @deprecate_func( since="0.6", - additional_msg="The curve data representation is replaced with dataframe format.", + additional_msg="The curve data representation has been replaced by the `DataFrame` format.", package_name="qiskit-experiments", pending=True, ) diff --git a/qiskit_experiments/database_service/utils.py b/qiskit_experiments/database_service/utils.py index 928d3ac43b..8cdbec1a20 100644 --- a/qiskit_experiments/database_service/utils.py +++ b/qiskit_experiments/database_service/utils.py @@ -14,13 +14,14 @@ import importlib.metadata import io +import zipfile import logging import threading import traceback from abc import ABC, abstractmethod from collections import OrderedDict from datetime import datetime, timezone -from typing import Callable, Tuple, Dict, Any, Union, Type, Optional +from typing import Callable, Tuple, Dict, Any, Union, Type, Optional, List, Iterator import json import dateutil.parser @@ -74,6 +75,52 @@ def utc_to_local(utc_dt: datetime) -> datetime: return local_dt +def objs_to_zip( + filenames: List[str], objects: List[any], json_encoder: Optional[json.JSONEncoder] = None +) -> bytes: + """Serialize a list of objects to JSON and pack into a zipped file buffer. + + Args: + filenames: List of names for each serialized object inside the buffer. Names will have the + ``.json`` extension added. + objects: List of objects to be JSON serialized then zipped. + json_encoder: The JSON encoder to use. + + Returns: + A bytes object containing the zipped files. + """ + zip_buffer = io.BytesIO() + + with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED, False) as zip_file: + for filename, data in zip(filenames, objects): + zip_file.writestr(f"{filename}.json", json.dumps(data, cls=json_encoder)) + + zip_buffer.seek(0) + return zip_buffer + + +def zip_to_objs(zip_bytes: bytes, json_decoder: Optional[json.JSONDecoder] = None) -> Iterator[any]: + """Extract objects by deserializing JSON files in a zipped buffer. + + Args: + zip_bytes: Bytes object representing the zipped file. + json_decoder: The JSON decoder to use. + + Returns: + A list of objects extracted from the zip file buffer. + """ + if len(zip_bytes) == 0: # artifact has been deleted + yield iter(()) + + zip_buffer = io.BytesIO(zip_bytes) + + with zipfile.ZipFile(zip_buffer, "r") as zip_file: + for file_name in zip_file.namelist(): + with zip_file.open(file_name) as file: + json_data = file.read().decode("utf-8") + yield json.loads(json_data, cls=json_decoder) + + def plot_to_svg_bytes(figure: "pyplot.Figure") -> bytes: """Convert a pyplot Figure to SVG in bytes. diff --git a/qiskit_experiments/framework/__init__.py b/qiskit_experiments/framework/__init__.py index c6d9ccbae8..08263a72e6 100644 --- a/qiskit_experiments/framework/__init__.py +++ b/qiskit_experiments/framework/__init__.py @@ -91,6 +91,7 @@ AnalysisConfig ExperimentEncoder ExperimentDecoder + ArtifactData FigureData .. _composite-experiment: @@ -129,10 +130,15 @@ from qiskit.providers.options import Options from qiskit_experiments.framework.backend_data import BackendData from qiskit_experiments.framework.analysis_result import AnalysisResult -from qiskit_experiments.framework.experiment_data import ( +from qiskit_experiments.framework.status import ( ExperimentStatus, AnalysisStatus, + AnalysisCallback, +) +from qiskit_experiments.framework.containers import ( + ArtifactData, FigureData, + FigureType, ) from .base_analysis import BaseAnalysis from .base_experiment import BaseExperiment diff --git a/qiskit_experiments/framework/base_analysis.py b/qiskit_experiments/framework/base_analysis.py index acced5544d..d9ae7715da 100644 --- a/qiskit_experiments/framework/base_analysis.py +++ b/qiskit_experiments/framework/base_analysis.py @@ -23,8 +23,10 @@ from qiskit_experiments.database_service.device_component import Qubit from qiskit_experiments.framework import Options +from qiskit_experiments.framework.containers.artifact_data import ArtifactData from qiskit_experiments.framework.store_init_args import StoreInitArgs -from qiskit_experiments.framework.experiment_data import ExperimentData, FigureData +from qiskit_experiments.framework.experiment_data import ExperimentData +from qiskit_experiments.framework.containers import FigureData, FigureType from qiskit_experiments.framework.configs import AnalysisConfig from qiskit_experiments.framework.analysis_result_data import AnalysisResultData, as_table_element @@ -126,14 +128,14 @@ def run( Args: experiment_data: the experiment data to analyze. - replace_results: If True clear any existing analysis results and - figures in the experiment data and replace with + replace_results: If True clear any existing analysis results, figures, + and artifacts in the experiment data and replace with new results. See note for additional information. options: additional analysis options. See class documentation for supported options. Returns: - An experiment data object containing the analysis results and figures. + An experiment data object containing analysis results, figures, and artifacts. Raises: QiskitError: If experiment_data container is not valid for analysis. @@ -141,8 +143,8 @@ def run( .. note:: **Updating Results** - If analysis is run with ``replace_results=True`` then any analysis results - and figures in the experiment data will be cleared and replaced with the + If analysis is run with ``replace_results=True`` then any analysis results, + figures, and artifacts in the experiment data will be cleared and replaced with the new analysis results. Saving this experiment data will replace any previously saved data in a database service using the same experiment ID. @@ -175,28 +177,42 @@ def run_analysis(expdata: ExperimentData): if results: for result in results: - # Populate missing data fields - if not result.experiment_id: - result.experiment_id = expdata.experiment_id - if not result.experiment: - result.experiment = expdata.experiment_type - if not result.device_components: - result.device_components = self._get_experiment_components(expdata) - if not result.backend: - result.backend = expdata.backend_name - if not result.created_time: - result.created_time = datetime.now(tz.tzlocal()) - if not result.run_time: - result.run_time = expdata.running_time - - # To canonical kwargs to add to the analysis table. - table_format = as_table_element(result) - - # Remove result_id to make sure the id is unique in the scope of the container. - # This will let the container generate a unique id. - del table_format["result_id"] - - expdata.add_analysis_results(**table_format) + if isinstance(result, AnalysisResultData): + # Populate missing data fields + if not result.experiment_id: + result.experiment_id = expdata.experiment_id + if not result.experiment: + result.experiment = expdata.experiment_type + if not result.device_components: + result.device_components = self._get_experiment_components(expdata) + if not result.backend: + result.backend = expdata.backend_name + if not result.created_time: + result.created_time = datetime.now(tz.tzlocal()) + if not result.run_time: + result.run_time = expdata.running_time + + # To canonical kwargs to add to the analysis table. + table_format = as_table_element(result) + + # Remove result_id to make sure the id is unique in the scope of the container. + # This will let the container generate a unique id. + del table_format["result_id"] + + expdata.add_analysis_results(**table_format) + elif isinstance(result, ArtifactData): + if not result.experiment_id: + result.experiment_id = expdata.experiment_id + if not result.device_components: + result.device_components = self._get_experiment_components(expdata) + if not result.experiment: + result.experiment = expdata.experiment_type + expdata.add_artifacts(result) + else: + raise TypeError( + f"Invalid object type {result.__class__.__name__} for analysis results. " + "This data cannot be stored in the experiment data." + ) if figures: figure_to_add = [] @@ -232,7 +248,7 @@ def _get_experiment_components(self, experiment_data: ExperimentData): def _run_analysis( self, experiment_data: ExperimentData, - ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]: + ) -> Tuple[List[Union[AnalysisResultData, ArtifactData]], List[FigureType]]: """Run analysis on circuit data. Args: diff --git a/qiskit_experiments/framework/composite/composite_analysis.py b/qiskit_experiments/framework/composite/composite_analysis.py index 18c2c1f576..fb04ba50f9 100644 --- a/qiskit_experiments/framework/composite/composite_analysis.py +++ b/qiskit_experiments/framework/composite/composite_analysis.py @@ -406,5 +406,7 @@ def _combine_results( for _, series in analysis_table.iterrows(): data = AnalysisResultData.from_table_element(**series.to_dict()) analysis_results.append(data) + for artifact in sub_expdata.artifacts(): + analysis_results.append(artifact) return analysis_results, figures diff --git a/qiskit_experiments/framework/containers/__init__.py b/qiskit_experiments/framework/containers/__init__.py new file mode 100644 index 0000000000..80b9f98191 --- /dev/null +++ b/qiskit_experiments/framework/containers/__init__.py @@ -0,0 +1,16 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2023. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Container classes for storing data related to experiments.""" + +from .artifact_data import ArtifactData +from .figure_data import FigureData, FigureType diff --git a/qiskit_experiments/framework/containers/artifact_data.py b/qiskit_experiments/framework/containers/artifact_data.py new file mode 100644 index 0000000000..c08412196e --- /dev/null +++ b/qiskit_experiments/framework/containers/artifact_data.py @@ -0,0 +1,62 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2023. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +""" +Entry for artifact data. +""" + +from dataclasses import dataclass, field +from typing import Any, Optional, List +from datetime import datetime +import uuid + +from dateutil import tz + + +@dataclass +class ArtifactData: + """A dataclass for non-analysis result payloads in :class:`.ExperimentData` objects. + + This class can convert results generated by the analysis class into a payload + for saving and retrieving to and from the experiments service, which stores this + as artifacts. Types of objects that may be converted to artifacts include fitted and raw data, + fit status, and any other JSON-based data needed to serialize experiments and experiment data. + + Attributes: + name: The name of the artifact. When saved to the cloud service, this will be the name + of the zipfile this artifact object is stored in. + data: The artifact payload. + artifact_id: Artifact ID. Must be unique inside an :class:`ExperimentData` object. + experiment_id: Experiment ID that the artifact is associated with. + experiment: The name of the experiment. + device_components: The device components of the experiment. + created_time: Time when the artifact was created. + """ + + name: str + data: Any + artifact_id: Optional[str] = field(default_factory=lambda: str(uuid.uuid4())) + experiment_id: Optional[str] = None + experiment: Optional[str] = None + device_components: List = field(default_factory=list) + created_time: Optional[datetime] = field(default_factory=lambda: datetime.now(tz.tzlocal())) + + @property + def dtype(self): + """Data type of the payload.""" + return self.data.__class__.__name__ + + def __repr__(self): + return ( + f"ArtifactData(name={self.name}, dtype={self.dtype}, uid={self.artifact_id}, " + f"experiment={self.experiment}, device_components={self.device_components})" + ) diff --git a/qiskit_experiments/framework/containers/figure_data.py b/qiskit_experiments/framework/containers/figure_data.py new file mode 100644 index 0000000000..c71781407a --- /dev/null +++ b/qiskit_experiments/framework/containers/figure_data.py @@ -0,0 +1,102 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2023. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Container of experiment data components.""" + +from __future__ import annotations + +import copy +import io +from typing import Dict, Optional, Union, Any + +from matplotlib.figure import Figure as MatplotlibFigure + + +class FigureData: + """A plot data container. + + .. note:: + Raw figure data can be accessed through the :attr:`.FigureData.figure` attribute. + + """ + + def __init__( + self, + figure, + name: str | None = None, + metadata: dict[str, Any] | None = None, + ): + """Creates a new figure data object. + + Args: + figure: The raw figure itself. Can be SVG or matplotlib.Figure. + name: The name of the figure. + metadata: Any metadata to be stored with the figure. + """ + self.figure = figure + self._name = name + self.metadata = metadata or {} + + def __eq__(self, value): + """Test equality between two instances of FigureData.""" + return vars(self) == vars(value) + + # name is read only + @property + def name(self) -> str: + """The name of the figure""" + return self._name + + @property + def metadata(self) -> dict: + """The metadata dictionary stored with the figure""" + return self._metadata + + @metadata.setter + def metadata(self, new_metadata: dict): + """Set the metadata to new value; must be a dictionary""" + if not isinstance(new_metadata, dict): + raise ValueError("figure metadata must be a dictionary") + self._metadata = new_metadata + + def copy(self, new_name: Optional[str] = None): + """Creates a copy of the figure data""" + name = new_name or self.name + return FigureData(figure=self.figure, name=name, metadata=copy.deepcopy(self.metadata)) + + def __json_encode__(self) -> Dict[str, Any]: + """Return the json representation of the figure data""" + return {"figure": self.figure, "name": self.name, "metadata": self.metadata} + + @classmethod + def __json_decode__(cls, args: Dict[str, Any]) -> "FigureData": + """Initialize a figure data from the json representation""" + return cls(**args) + + def _repr_png_(self): + if isinstance(self.figure, MatplotlibFigure): + b = io.BytesIO() + self.figure.savefig(b, format="png", bbox_inches="tight") + png = b.getvalue() + return png + else: + return None + + def _repr_svg_(self): + if isinstance(self.figure, str): + return self.figure + if isinstance(self.figure, bytes): + return self.figure.decode("utf-8") + return None + + +FigureType = Union[str, bytes, MatplotlibFigure, FigureData] diff --git a/qiskit_experiments/framework/experiment_data.py b/qiskit_experiments/framework/experiment_data.py index 60396fe309..1806f1aeec 100644 --- a/qiskit_experiments/framework/experiment_data.py +++ b/qiskit_experiments/framework/experiment_data.py @@ -15,20 +15,16 @@ from __future__ import annotations import logging -import dataclasses import re from typing import Dict, Optional, List, Union, Any, Callable, Tuple, TYPE_CHECKING from datetime import datetime, timezone from concurrent import futures -from threading import Event from functools import wraps -from collections import deque +from collections import deque, defaultdict import contextlib import copy import uuid -import enum import time -import io import sys import json import traceback @@ -37,7 +33,6 @@ import pandas as pd from dateutil import tz from matplotlib import pyplot -from matplotlib.figure import Figure as MatplotlibFigure from qiskit.result import Result from qiskit.providers.jobstatus import JobStatus, JOB_FINAL_STATES from qiskit.exceptions import QiskitError @@ -62,12 +57,16 @@ from qiskit_experiments.framework.analysis_result_data import AnalysisResultData from qiskit_experiments.framework.analysis_result_table import AnalysisResultTable from qiskit_experiments.framework import BackendData +from qiskit_experiments.framework.containers import ArtifactData +from qiskit_experiments.framework import ExperimentStatus, AnalysisStatus, AnalysisCallback from qiskit_experiments.database_service.exceptions import ( ExperimentDataError, ExperimentEntryNotFound, ExperimentDataSaveFailed, ) +from qiskit_experiments.database_service.utils import objs_to_zip, zip_to_objs +from .containers.figure_data import FigureData, FigureType if TYPE_CHECKING: # There is a cyclical dependency here, but the name needs to exist for @@ -133,74 +132,6 @@ def parse_utc_datetime(dt_str: str) -> datetime: return dt_utc -class FigureData: - """Wrapper class for figures and figure metadata. The raw figure can be accessed with - the ``figure`` attribute.""" - - def __init__(self, figure, name=None, metadata=None): - """Creates a new figure data object. - - Args: - figure: the raw figure itself. Can be SVG or matplotlib.Figure. - name: Optional, the name of the figure. - metadata: Optional, any metadata to be stored with the figure. - """ - self.figure = figure - self._name = name - self.metadata = metadata or {} - - # name is read only - @property - def name(self) -> str: - """The name of the figure""" - return self._name - - @property - def metadata(self) -> dict: - """The metadata dictionary stored with the figure""" - return self._metadata - - @metadata.setter - def metadata(self, new_metadata: dict): - """Set the metadata to new value; must be a dictionary""" - if not isinstance(new_metadata, dict): - raise ValueError("figure metadata must be a dictionary") - self._metadata = new_metadata - - def copy(self, new_name: Optional[str] = None): - """Creates a copy of the figure data""" - name = new_name or self.name - return FigureData(figure=self.figure, name=name, metadata=copy.deepcopy(self.metadata)) - - def __json_encode__(self) -> Dict[str, Any]: - """Return the json representation of the figure data""" - return {"figure": self.figure, "name": self.name, "metadata": self.metadata} - - @classmethod - def __json_decode__(cls, args: Dict[str, Any]) -> "FigureData": - """Initialize a figure data from the json representation""" - return cls(**args) - - def _repr_png_(self): - if isinstance(self.figure, MatplotlibFigure): - b = io.BytesIO() - self.figure.savefig(b, format="png", bbox_inches="tight") - png = b.getvalue() - return png - else: - return None - - def _repr_svg_(self): - if isinstance(self.figure, str): - return self.figure - if isinstance(self.figure, bytes): - return self.figure.decode("utf-8") - return None - - -FigureType = Union[str, bytes, MatplotlibFigure, FigureData] - - class ExperimentData: """Experiment data container class. @@ -355,9 +286,11 @@ def __init__( self._result_data = ThreadSafeList() self._figures = ThreadSafeOrderedDict(self._db_data.figure_names) self._analysis_results = AnalysisResultTable() + self._artifacts = ThreadSafeOrderedDict() self._deleted_figures = deque() self._deleted_analysis_results = deque() + self._deleted_artifacts = set() # for holding unique artifact names to be deleted # Child related # Add component data and set parent ID to current container @@ -689,9 +622,14 @@ def _clear_results(self): self._deleted_analysis_results.extend(list(self._analysis_results.result_ids)) self._analysis_results.clear() # Schedule existing figures for deletion next save call + # TODO: Fully delete artifacts from the service + # Current implementation uploads empty files instead + for artifact in self._artifacts.values(): + self._deleted_artifacts.add(artifact.name) for key in self._figures.keys(): self._deleted_figures.append(key) self._figures = ThreadSafeOrderedDict() + self._artifacts = ThreadSafeOrderedDict() @property def service(self) -> Optional[IBMExperimentService]: @@ -1289,10 +1227,7 @@ def delete_figure( Raises: ExperimentEntryNotFound: If the figure is not found. """ - if isinstance(figure_key, int): - figure_key = self._figures.keys()[figure_key] - elif figure_key not in self._figures: - raise ExperimentEntryNotFound(f"Figure {figure_key} not found.") + figure_key = self._find_figure_key(figure_key) del self._figures[figure_key] self._deleted_figures.append(figure_key) @@ -1304,6 +1239,26 @@ def delete_figure( return figure_key + def _find_figure_key( + self, + figure_key: int | str, + ) -> str: + """A helper method to find figure key.""" + if isinstance(figure_key, int): + if figure_key < 0 or figure_key >= len(self._figures): + raise ExperimentEntryNotFound(f"Figure index {figure_key} out of range.") + return self._figures.keys()[figure_key] + + # All figures must have '.svg' in their names when added, as the extension is added to the key + # name in the `add_figures()` method of this class. + if isinstance(figure_key, str): + if not figure_key.endswith(".svg"): + figure_key += ".svg" + + if figure_key not in self._figures: + raise ExperimentEntryNotFound(f"Figure key {figure_key} not found.") + return figure_key + def figure( self, figure_key: Union[str, int], @@ -1323,16 +1278,7 @@ def figure( Raises: ExperimentEntryNotFound: If the figure cannot be found. """ - if isinstance(figure_key, int): - if figure_key < 0 or figure_key >= len(self._figures.keys()): - raise ExperimentEntryNotFound(f"Figure {figure_key} not found.") - figure_key = self._figures.keys()[figure_key] - - # All figures must have '.svg' in their names when added, as the extension is added to the key - # name in the `add_figures()` method of this class. - if isinstance(figure_key, str): - if not figure_key.endswith(".svg"): - figure_key += ".svg" + figure_key = self._find_figure_key(figure_key) figure_data = self._figures.get(figure_key, None) if figure_data is None and self.service: @@ -1614,6 +1560,20 @@ def analysis_results( auto_save=self._auto_save, ) ) + if index == 0 and tmp_df.iloc[0]["name"].startswith("@"): + warnings.warn( + "Curve fit results have moved to experiment artifacts and will be removed " + "from analysis results in a future release. Use " + 'expdata.artifacts("fit_summary").data to access curve fit results.', + DeprecationWarning, + ) + elif isinstance(index, (int, slice)): + warnings.warn( + "Accessing analysis results via a numerical index is deprecated and will be " + "removed in a future release. Use the ID or name of the analysis result " + "instead.", + DeprecationWarning, + ) if len(service_results) == 1 and index is not None: return service_results[0] return service_results @@ -1624,7 +1584,7 @@ def save_metadata(self) -> None: """Save this experiments metadata to a database service. .. note:: - This method does not save analysis results nor figures. + This method does not save analysis results, figures, or artifacts. Use :meth:`save` for general saving of all experiment data. See :meth:`qiskit.providers.experiment.IBMExperimentService.create_experiment` @@ -1693,11 +1653,14 @@ def _metadata_too_large(self): total_metadata_size = sys.getsizeof(json.dumps(self.metadata, cls=self._json_encoder)) return total_metadata_size > 10000 + # Save and load from the database + def save( self, suppress_errors: bool = True, max_workers: int = 3, save_figures: bool = True, + save_artifacts: bool = True, save_children: bool = True, ) -> None: """Save the experiment data to a database service. @@ -1705,8 +1668,9 @@ def save( Args: suppress_errors: should the method catch exceptions (true) or pass them on, potentially aborting the experiment (false) - max_workers: Maximum number of concurrent worker threads (capped by 10) + max_workers: Maximum number of concurrent worker threads (default 3, maximum 10) save_figures: Whether to save figures in the database or not + save_artifacts: Whether to save artifacts in the database save_children: For composite experiments, whether to save children as well Raises: @@ -1739,7 +1703,15 @@ def save( self._max_workers_cap, ) max_workers = self._max_workers_cap + + if save_artifacts: + # populate the metadata entry for artifact file names + self.metadata["artifact_files"] = { + f"{artifact.name}.zip" for artifact in self._artifacts.values() + } + self._save_experiment_metadata(suppress_errors=suppress_errors) + if not self._created_in_db: LOG.warning("Could not save experiment metadata to DB, aborting experiment save") return @@ -1799,6 +1771,44 @@ def save( self._service.delete_figure(experiment_id=self.experiment_id, figure_name=name) self._deleted_figures.remove(name) + # save artifacts + if save_artifacts: + with self._artifacts.lock: + # make dictionary {artifact name: [artifact ids]} + artifact_list = defaultdict(list) + for artifact in self._artifacts.values(): + artifact_list[artifact.name].append(artifact.artifact_id) + try: + for artifact_name, artifact_ids in artifact_list.items(): + file_zipped = objs_to_zip( + artifact_ids, + [self._artifacts[artifact_id] for artifact_id in artifact_ids], + json_encoder=self._json_encoder, + ) + self.service.file_upload( + experiment_id=self.experiment_id, + file_name=f"{artifact_name}.zip", + file_data=file_zipped, + ) + except Exception: # pylint: disable=broad-except: + LOG.error("Unable to save artifacts: %s", traceback.format_exc()) + + # Upload a blank file if the whole file should be deleted + # TODO: replace with direct artifact deletion when available + for artifact_name in self._deleted_artifacts.copy(): + try: # Don't overwrite with a blank file if there's still artifacts with this name + self.artifacts(artifact_name) + except Exception: # pylint: disable=broad-except: + with service_exception_to_warning(): + self.service.file_upload( + experiment_id=self.experiment_id, + file_name=f"{artifact_name}.zip", + file_data=None, + ) + # Even if we didn't overwrite an artifact file, we don't need to keep this because + # an existing artifact(s) needs to be deleted to delete the artifact file in the future + self._deleted_artifacts.remove(artifact_name) + if not self.service.local and self.verbose: print( "You can view the experiment online at " @@ -1813,6 +1823,7 @@ def save( suppress_errors=suppress_errors, max_workers=max_workers, save_figures=save_figures, + save_artifacts=save_artifacts, ) data.verbose = original_verbose @@ -1820,7 +1831,10 @@ def jobs(self) -> List[Job]: """Return a list of jobs for the experiment""" return self._jobs.values() - def cancel_jobs(self, ids: Optional[Union[str, List[str]]] = None) -> bool: + def cancel_jobs( + self, + ids: str | list[str] | None = None, + ) -> bool: """Cancel any running jobs. Args: @@ -1854,7 +1868,10 @@ def cancel_jobs(self, ids: Optional[Union[str, List[str]]] = None) -> bool: return all_cancelled - def cancel_analysis(self, ids: Optional[Union[str, List[str]]] = None) -> bool: + def cancel_analysis( + self, + ids: str | list[str] | None = None, + ) -> bool: """Cancel any queued analysis callbacks. .. note:: @@ -2106,7 +2123,6 @@ def job_status(self) -> JobStatus: """ statuses = set() with self._jobs.lock: - # No jobs present if not self._jobs: return JobStatus.DONE @@ -2276,6 +2292,7 @@ def load( experiment_id, cls._metadata_filename, json_decoder=cls._json_decoder ) data.metadata.update(metadata) + expdata = cls(service=service, db_data=data, provider=provider) # Retrieve data and analysis results @@ -2284,6 +2301,17 @@ def load( expdata._retrieve_data() expdata._retrieve_analysis_results() + # Recreate artifacts + try: + if "artifact_files" in expdata.metadata: + for filename in expdata.metadata["artifact_files"]: + if service.experiment_has_file(experiment_id, filename): + artifact_file = service.file_download(experiment_id, filename) + for artifact in zip_to_objs(artifact_file, json_decoder=cls._json_decoder): + expdata.add_artifacts(artifact) + except Exception: # pylint: disable=broad-except: + LOG.error("Unable to load artifacts: %s", traceback.format_exc()) + # mark it as existing in the DB expdata._created_in_db = True @@ -2299,7 +2327,7 @@ def copy(self, copy_results: bool = True) -> "ExperimentData": """Make a copy of the experiment data with a new experiment ID. Args: - copy_results: If True copy the analysis results and figures + copy_results: If True copy the analysis results, figures, and artifacts into the returned container, along with the experiment data and metadata. If False only copy the experiment data and metadata. @@ -2325,6 +2353,8 @@ def copy(self, copy_results: bool = True) -> "ExperimentData": verbose=self.verbose, ) new_instance._db_data = self._db_data.copy() + # Figure names shouldn't be copied over + new_instance._db_data.figure_names = [] new_instance._db_data.experiment_id = str( uuid.uuid4() ) # different id for copied experiment @@ -2364,6 +2394,10 @@ def copy(self, copy_results: bool = True) -> "ExperimentData": new_instance._figures = ThreadSafeOrderedDict() new_instance.add_figures(self._figures.values()) + with self._artifacts.lock: + new_instance._artifacts = ThreadSafeOrderedDict() + new_instance.add_artifacts(self._artifacts.values()) + # Recursively copy child data child_data = [data.copy(copy_results=copy_results) for data in self.child_data()] new_instance._set_child_data(child_data) @@ -2493,6 +2527,7 @@ def __json_encode__(self): "_created_in_db": self._created_in_db, "_figures": self._safe_serialize_figures(), # Convert figures to SVG "_jobs": self._safe_serialize_jobs(), # Handle non-serializable objects + "_artifacts": self._artifacts, "_experiment": self._experiment, "_child_data": self._child_data, "_running_time": self._running_time, @@ -2602,75 +2637,101 @@ def __str__(self): ret += f"\nData: {len(self._result_data)}" ret += f"\nAnalysis Results: {n_res}" ret += f"\nFigures: {len(self._figures)}" + ret += f"\nArtifacts: {len(self._artifacts)}" return ret + def add_artifacts(self, artifacts: ArtifactData | list[ArtifactData], overwrite: bool = False): + """Add artifacts to experiment. The artifact ID must be unique. -@contextlib.contextmanager -def service_exception_to_warning(): - """Convert an exception raised by experiment service to a warning.""" - try: - yield - except Exception: # pylint: disable=broad-except - LOG.warning("Experiment service operation failed: %s", traceback.format_exc()) + Args: + artifacts: Artifact or list of artifacts to be added. + overwrite: Whether to overwrite the existing artifact. + """ + if isinstance(artifacts, ArtifactData): + artifacts = [artifacts] + + for artifact in artifacts: + if artifact.artifact_id in self._artifacts and not overwrite: + raise ValueError( + "An artifact with id {artifact.id} already exists." + "Set overwrite to True if you want to overwrite the existing" + "artifact." + ) + self._artifacts[artifact.artifact_id] = artifact + def delete_artifact( + self, + artifact_key: int | str, + ) -> str | list[str]: + """Delete specified artifact data. -class ExperimentStatus(enum.Enum): - """Class for experiment status enumerated type.""" + Args: + artifact_key: UID, name or index of the figure. - EMPTY = "experiment data is empty" - INITIALIZING = "experiment jobs are being initialized" - VALIDATING = "experiment jobs are validating" - QUEUED = "experiment jobs are queued" - RUNNING = "experiment jobs is actively running" - CANCELLED = "experiment jobs or analysis has been cancelled" - POST_PROCESSING = "experiment analysis is actively running" - DONE = "experiment jobs and analysis have successfully run" - ERROR = "experiment jobs or analysis incurred an error" + Returns: + Deleted artifact ids. + """ + artifact_keys = self._find_artifact_keys(artifact_key) - def __json_encode__(self): - return self.name + for key in artifact_keys: + self._deleted_artifacts.add(self._artifacts[key].name) + del self._artifacts[key] - @classmethod - def __json_decode__(cls, value): - return cls.__members__[value] # pylint: disable=unsubscriptable-object + if len(artifact_keys) == 1: + return artifact_keys[0] + return artifact_keys + def artifacts( + self, + artifact_key: int | str = None, + ) -> ArtifactData | list[ArtifactData]: + """Return specified artifact data. -class AnalysisStatus(enum.Enum): - """Class for analysis callback status enumerated type.""" + Args: + artifact_key: UID, name or index of the figure. - QUEUED = "analysis callback is queued" - RUNNING = "analysis callback is actively running" - CANCELLED = "analysis callback has been cancelled" - DONE = "analysis callback has successfully run" - ERROR = "analysis callback incurred an error" + Returns: + A list of specified artifact data. + """ + if artifact_key is None: + return self._artifacts.values() - def __json_encode__(self): - return self.name + artifact_keys = self._find_artifact_keys(artifact_key) - @classmethod - def __json_decode__(cls, value): - return cls.__members__[value] # pylint: disable=unsubscriptable-object + out = [] + for key in artifact_keys: + artifact_data = self._artifacts[key] + out.append(artifact_data) + if len(out) == 1: + return out[0] + return out -@dataclasses.dataclass -class AnalysisCallback: - """Dataclass for analysis callback status""" + def _find_artifact_keys( + self, + artifact_key: int | str, + ) -> list[str]: + """A helper method to find artifact key.""" + if isinstance(artifact_key, int): + if artifact_key < 0 or artifact_key >= len(self._artifacts): + raise ExperimentEntryNotFound(f"Artifact index {artifact_key} out of range.") + return [self._artifacts.keys()[artifact_key]] - name: str = "" - callback_id: str = "" - status: AnalysisStatus = AnalysisStatus.QUEUED - error_msg: Optional[str] = None - event: Event = dataclasses.field(default_factory=Event) + if artifact_key not in self._artifacts: + name_matched = [k for k, d in self._artifacts.items() if d.name == artifact_key] + if len(name_matched) == 0: + raise ExperimentEntryNotFound(f"Artifact key {artifact_key} not found.") + return name_matched + return [artifact_key] - def __getstate__(self): - # We need to remove the Event object from state when pickling - # since events are not pickleable - state = self.__dict__ - state["event"] = None - return state - def __json_encode__(self): - return self.__getstate__() +@contextlib.contextmanager +def service_exception_to_warning(): + """Convert an exception raised by experiment service to a warning.""" + try: + yield + except Exception: # pylint: disable=broad-except + LOG.warning("Experiment service operation failed: %s", traceback.format_exc()) def _series_to_service_result( diff --git a/qiskit_experiments/framework/status.py b/qiskit_experiments/framework/status.py new file mode 100644 index 0000000000..e0d2eca26f --- /dev/null +++ b/qiskit_experiments/framework/status.py @@ -0,0 +1,78 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2023. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. +"""Status of experiment execution.""" + +from __future__ import annotations + +import dataclasses +import enum +from threading import Event +from typing import Optional + + +class ExperimentStatus(enum.Enum): + """Class for experiment status enumerated type.""" + + EMPTY = "experiment data is empty" + INITIALIZING = "experiment jobs are being initialized" + VALIDATING = "experiment jobs are validating" + QUEUED = "experiment jobs are queued" + RUNNING = "experiment jobs is actively running" + CANCELLED = "experiment jobs or analysis has been cancelled" + POST_PROCESSING = "experiment analysis is actively running" + DONE = "experiment jobs and analysis have successfully run" + ERROR = "experiment jobs or analysis incurred an error" + + def __json_encode__(self): + return self.name + + @classmethod + def __json_decode__(cls, value): + return cls.__members__[value] # pylint: disable=unsubscriptable-object + + +class AnalysisStatus(enum.Enum): + """Class for analysis callback status enumerated type.""" + + QUEUED = "analysis callback is queued" + RUNNING = "analysis callback is actively running" + CANCELLED = "analysis callback has been cancelled" + DONE = "analysis callback has successfully run" + ERROR = "analysis callback incurred an error" + + def __json_encode__(self): + return self.name + + @classmethod + def __json_decode__(cls, value): + return cls.__members__[value] # pylint: disable=unsubscriptable-object + + +@dataclasses.dataclass +class AnalysisCallback: + """Dataclass for analysis callback status""" + + name: str = "" + callback_id: str = "" + status: AnalysisStatus = AnalysisStatus.QUEUED + error_msg: Optional[str] = None + event: Event = dataclasses.field(default_factory=Event) + + def __getstate__(self): + # We need to remove the Event object from state when pickling + # since events are not pickleable + state = self.__dict__ + state["event"] = None + return state + + def __json_encode__(self): + return self.__getstate__() diff --git a/qiskit_experiments/library/tomography/mit_tomography_analysis.py b/qiskit_experiments/library/tomography/mit_tomography_analysis.py index 25ef45ac8c..892afa41a9 100644 --- a/qiskit_experiments/library/tomography/mit_tomography_analysis.py +++ b/qiskit_experiments/library/tomography/mit_tomography_analysis.py @@ -99,7 +99,7 @@ def _run_analysis(self, experiment_data): roerror_analysis.run(roerror_data, replace_results=True).block_for_results() # Construct noisy measurement basis - mitigator = roerror_data.analysis_results(0).value + mitigator = roerror_data.analysis_results("Local Readout Mitigator").value # Run mitigated tomography analysis with noisy mitigated basis # Tomo analysis instance is internally copied by setting option with run. diff --git a/releasenotes/notes/experiment-artifacts-c481f4e07226ce9e.yaml b/releasenotes/notes/experiment-artifacts-c481f4e07226ce9e.yaml new file mode 100644 index 0000000000..c83fc92772 --- /dev/null +++ b/releasenotes/notes/experiment-artifacts-c481f4e07226ce9e.yaml @@ -0,0 +1,26 @@ +--- +features: + - | + An artifact class has been introduced to store long-form data generated by experiments. + The :class:`.CurveFitResult` and :class:`.ScatterTable` generated by experiments + are now stored in artifacts in the :class:`.ExperimentData` class. :meth:`.add_artifacts` + and :meth:`.delete_artifact` have been added to manipulate the artifacts. These will be uploaded + to the cloud service in JSON form along with the rest of the :class:`.ExperimentData` object + when saved. For more information, see the :doc:`artifacts how-to `. +deprecations: + - | + Setting the option ``return_data_points`` to ``True`` in curve analysis has been deprecated. + Data points are now automatically provided in :class:`ExperimentData` objects via the ``curve_data`` + artifact. + - | + Direct access to the curve fit summary in :class:`.ExperimentData` has moved from + :meth:`.analysis_results` to :meth:`.artifacts`, where values are stored in the + :attr:`~.ArtifactData.data` attribute of :class:`.ArtifactData` objects. For example, to access the + chi-squared of the fit, ``expdata.analysis_results(0).chisq`` is deprecated in favor of + ``expdata.artifacts("fit_summary").data.chisq``. In a future release, the curve fit summary + will be removed from :meth:`.analysis_results` and the option ``return_fit_parameters`` will be + removed. For more information on artifacts, see the :doc:`artifacts how-to `. + - | + Using numerical indices with :meth:`.ExperimentData.analysis_results`, including both integers and + slices, is now deprecated. Access analysis results by analysis result name or ID instead. + \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 54ea5ea51c..c296cd77b0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ numpy>=1.17 scipy>=1.4 qiskit>=0.45 -qiskit-ibm-experiment>=0.3.4 +qiskit-ibm-experiment>=0.4.6 matplotlib>=3.4 uncertainties lmfit diff --git a/test/base.py b/test/base.py index eda6c5d177..a9f3865fda 100644 --- a/test/base.py +++ b/test/base.py @@ -116,7 +116,7 @@ def setUpClass(cls): warnings.filterwarnings( "default", module="qiskit_experiments", - message=".*The curve data representation is replaced with dataframe format.*", + message=".*The curve data representation has been replaced by the `DataFrame` format.*", category=PendingDeprecationWarning, ) diff --git a/test/calibration/test_update_library.py b/test/calibration/test_update_library.py index 027b21b511..2edd851c22 100644 --- a/test/calibration/test_update_library.py +++ b/test/calibration/test_update_library.py @@ -49,7 +49,7 @@ def test_frequency(self): spec.set_run_options(meas_level=MeasLevel.CLASSIFIED) exp_data = spec.run(backend) self.assertExperimentDone(exp_data) - result = exp_data.analysis_results(1) + result = exp_data.analysis_results("f01") value = result.value.n self.assertTrue(freq01 + peak_offset - 2e6 < value < freq01 + peak_offset + 2e6) diff --git a/test/curve_analysis/test_baseclass.py b/test/curve_analysis/test_baseclass.py index 7025dbd60f..8c9626f870 100644 --- a/test/curve_analysis/test_baseclass.py +++ b/test/curve_analysis/test_baseclass.py @@ -43,7 +43,11 @@ def single_sampler(x, y, shots=10000, seed=123, **metadata): counts = rng.binomial(shots, y) circuit_results = [ - {"counts": {"0": shots - count, "1": count}, "metadata": {"xval": xi, **metadata}} + { + "counts": {"0": shots - count, "1": count}, + "metadata": {"xval": xi, **metadata}, + "shots": 1024, + } for xi, count in zip(x, counts) ] expdata = ExperimentData(experiment=FakeExperiment()) @@ -209,10 +213,11 @@ class InvalidClass: def test_end_to_end_single_function(self): """Integration test for single function.""" + init_params = {"amp": 0.5, "tau": 0.3} analysis = CurveAnalysis(models=[ExpressionModel(expr="amp * exp(-x/tau)", name="test")]) analysis.set_options( data_processor=DataProcessor(input_key="counts", data_actions=[Probability("1")]), - p0={"amp": 0.5, "tau": 0.3}, + p0=init_params, result_parameters=["amp", "tau"], plot=False, ) @@ -226,12 +231,43 @@ def test_end_to_end_single_function(self): result = analysis.run(test_data) self.assertExperimentDone(result) + curve_data = result.artifacts("curve_data").data + np.testing.assert_array_equal(curve_data.series_name, "test") + np.testing.assert_array_equal(curve_data.analysis, "CurveAnalysis") + self.assertEqual(len(curve_data.filter(category="raw")), 100) + self.assertEqual(len(curve_data.filter(category="formatted")), 100) + self.assertEqual(len(curve_data.filter(category="fitted")), 100) + np.testing.assert_array_equal(curve_data.filter(category="raw").x, np.linspace(0, 1, 100)) + np.testing.assert_array_equal(curve_data.filter(category="raw").shots, 1024) + np.testing.assert_array_equal(curve_data.filter(category="formatted").shots, 1024) + self.assertTrue( + np.isnan(np.array(curve_data.filter(category="fitted").shots, dtype=float)).all() + ) + np.testing.assert_array_equal( + curve_data.filter(category="fitted").x, np.linspace(0, 1, 100) + ) + np.testing.assert_array_equal( + curve_data.filter(category="formatted").x, np.linspace(0, 1, 100) + ) + + fit_data = result.artifacts("fit_summary").data + self.assertEqual( + fit_data.model_repr, + {"test": "amp * exp(-x/tau)"}, + ) + self.assertEqual(fit_data.dof, 98) + self.assertEqual(fit_data.init_params, init_params) + self.assertEqual(fit_data.success, True) + self.assertAlmostEqual(fit_data.params["amp"], 0.5, delta=0.1) + self.assertAlmostEqual(fit_data.params["tau"], 0.3, delta=0.1) + self.assertAlmostEqual(result.analysis_results("amp").value.nominal_value, 0.5, delta=0.1) self.assertAlmostEqual(result.analysis_results("tau").value.nominal_value, 0.3, delta=0.1) self.assertEqual(len(result._figures), 0) def test_end_to_end_multi_objective(self): """Integration test for multi objective function.""" + init_params = {"amp": 0.5, "freq": 2.1, "phi": 0.3, "base": 0.1} analysis = CurveAnalysis( models=[ ExpressionModel( @@ -250,7 +286,7 @@ def test_end_to_end_multi_objective(self): "m1": {"series": "cos"}, "m2": {"series": "sin"}, }, - p0={"amp": 0.5, "freq": 2.1, "phi": 0.3, "base": 0.1}, + p0=init_params, result_parameters=["amp", "freq", "phi", "base"], plot=False, ) @@ -274,6 +310,22 @@ def test_end_to_end_multi_objective(self): result = analysis.run(expdata) self.assertExperimentDone(result) + fit_data = result.artifacts("fit_summary").data + self.assertEqual( + fit_data.model_repr, + { + "m1": "amp * cos(2 * pi * freq * x + phi) + base", + "m2": "amp * sin(2 * pi * freq * x + phi) + base", + }, + ) + self.assertEqual(fit_data.dof, 196) + self.assertEqual(fit_data.init_params, init_params) + self.assertEqual(fit_data.success, True) + self.assertAlmostEqual(fit_data.params["amp"], amp, delta=0.1) + self.assertAlmostEqual(fit_data.params["freq"], freq, delta=0.1) + self.assertAlmostEqual(fit_data.params["phi"], phi, delta=0.1) + self.assertAlmostEqual(fit_data.params["base"], base, delta=0.1) + self.assertAlmostEqual(result.analysis_results("amp").value.nominal_value, amp, delta=0.1) self.assertAlmostEqual(result.analysis_results("freq").value.nominal_value, freq, delta=0.1) self.assertAlmostEqual(result.analysis_results("phi").value.nominal_value, phi, delta=0.1) @@ -299,6 +351,11 @@ def test_end_to_end_single_function_with_fixed_parameter(self): result = analysis.run(test_data) self.assertExperimentDone(result) + fit_data = result.artifacts("fit_summary").data + self.assertEqual(fit_data.init_params, {"amp": 0.5, "tau": 0.3}) + self.assertEqual(fit_data.success, True) + self.assertEqual(fit_data.params["amp"], 0.5) + self.assertEqual(result.analysis_results("amp").value.nominal_value, 0.5) self.assertEqual(result.analysis_results("amp").value.std_dev, 0.0) self.assertAlmostEqual(result.analysis_results("tau").value.nominal_value, 0.3, delta=0.1) @@ -344,8 +401,8 @@ def _create_analysis_results(self, fit_data, quality, **metadata): # Use ufloat_params in @Parameters dataclass. # This dataclass stores UFloat values with correlation. - fit_amp = result.analysis_results(0).value.ufloat_params["amp"] - fit_tau = result.analysis_results(0).value.ufloat_params["tau"] + fit_amp = result.artifacts("fit_summary").data.ufloat_params["amp"] + fit_tau = result.artifacts("fit_summary").data.ufloat_params["tau"] self.assertEqual(new_value.n, fit_amp.n + fit_tau.n) @@ -404,10 +461,13 @@ def test_end_to_end_parallel_analysis(self, plot_flag, figure_flag, n_figures): """Integration test for running two curve analyses in parallel, including selective figure generation.""" + fit1_p0 = {"amp": 0.5, "tau": 0.3} + fit2_p0 = {"amp": 0.7, "tau": 0.5} + analysis1 = CurveAnalysis(models=[ExpressionModel(expr="amp * exp(-x/tau)", name="test")]) analysis1.set_options( data_processor=DataProcessor(input_key="counts", data_actions=[Probability("1")]), - p0={"amp": 0.5, "tau": 0.3}, + p0=fit1_p0, result_parameters=["amp", "tau"], plot=plot_flag, ) @@ -415,7 +475,7 @@ def test_end_to_end_parallel_analysis(self, plot_flag, figure_flag, n_figures): analysis2 = CurveAnalysis(models=[ExpressionModel(expr="amp * exp(-x/tau)", name="test")]) analysis2.set_options( data_processor=DataProcessor(input_key="counts", data_actions=[Probability("1")]), - p0={"amp": 0.7, "tau": 0.5}, + p0=fit2_p0, result_parameters=["amp", "tau"], plot=plot_flag, ) @@ -436,6 +496,36 @@ def test_end_to_end_parallel_analysis(self, plot_flag, figure_flag, n_figures): result = composite.run(test_data) self.assertExperimentDone(result) + self.assertEqual(len(result.artifacts()), 4) + fit1 = result.artifacts("fit_summary")[0].data + self.assertEqual(fit1.model_repr, {"test": "amp * exp(-x/tau)"}) + self.assertEqual(fit1.init_params, fit1_p0) + self.assertAlmostEqual(fit1.params["amp"], amp1, delta=0.1) + self.assertAlmostEqual(fit1.params["tau"], tau1, delta=0.1) + + fit2 = result.artifacts("fit_summary")[1].data + self.assertEqual(fit2.model_repr, {"test": "amp * exp(-x/tau)"}) + self.assertEqual(fit2.init_params, fit2_p0) + self.assertAlmostEqual(fit2.params["amp"], amp2, delta=0.1) + self.assertAlmostEqual(fit2.params["tau"], tau2, delta=0.1) + + data1 = result.artifacts("curve_data")[0].data + data2 = result.artifacts("curve_data")[1].data + + identical_cols = ["xval", "series_name", "series_id", "category", "shots", "analysis"] + self.assertTrue(data1.dataframe[identical_cols].equals(data2.dataframe[identical_cols])) + self.assertEqual(len(data1), 300) + + np.testing.assert_array_equal(data1.category[:100], "raw") + np.testing.assert_array_equal(data1.category[100:200], "formatted") + np.testing.assert_array_equal(data1.category[-100:], "fitted") + np.testing.assert_array_equal(data1.series_name, "test") + np.testing.assert_array_equal(data1.series_id, 0) + np.testing.assert_array_equal(data1.analysis, "CurveAnalysis") + np.testing.assert_array_equal(data1.x[:100], np.linspace(0, 1, 100)) + np.testing.assert_array_equal(data1.x[100:200], np.linspace(0, 1, 100)) + np.testing.assert_array_equal(data1.x[-100:], np.linspace(0, 1, 100)) + amps = result.analysis_results("amp") taus = result.analysis_results("tau") @@ -481,7 +571,7 @@ def test_selective_figure_generation(self): for res in result.child_data(): # only generate a figure if the quality is bad - if res.analysis_results(0).quality == "bad": + if res.analysis_results("amp").quality == "bad": self.assertEqual(len(res._figures), 1) else: self.assertEqual(len(res._figures), 0) @@ -527,6 +617,15 @@ def test_end_to_end_zero_yerr(self): result = analysis.run(expdata) self.assertExperimentDone(result) + for i in range(3): + self.assertEqual( + result.data(i), + {"counts": {"0": 10000, "1": 0}, "metadata": {"xval": i / 99}, "shots": 1024}, + ) + self.assertEqual( + result.artifacts("curve_data").data.y[i], 0.5 / 10001 + ) # from Beta distribution estimate + self.assertAlmostEqual(result.analysis_results("amp").value.nominal_value, amp, delta=0.1) def test_get_init_params(self): @@ -548,7 +647,7 @@ def test_get_init_params(self): result = analysis.run(test_data) self.assertExperimentDone(result) - overview = result.analysis_results(0).value + overview = result.artifacts("fit_summary").data self.assertDictEqual(overview.init_params, {"amp": 0.45, "tau": 0.25}) @@ -556,9 +655,14 @@ def test_get_init_params(self): y_reproduced = analysis.models[0].eval(x=x, **overview.init_params) np.testing.assert_array_almost_equal(y_ref, y_reproduced) - @data((False, "never", 0), (True, "never", 1), (None, "never", 0), (None, "always", 1)) + @data( + (False, "never", 0, "m1", "raw"), + (True, "never", 1, "m2", "raw"), + (None, "never", 0, 0, "fitted"), + (None, "always", 1, 1, "fitted"), + ) @unpack - def test_multi_composite_curve_analysis(self, plot, gen_figures, n_figures): + def test_multi_composite_curve_analysis(self, plot, gen_figures, n_figures, series, category): """Integration test for composite curve analysis. This analysis consists of two curve fittings for cos and sin series. @@ -594,8 +698,10 @@ def test_multi_composite_curve_analysis(self, plot, gen_figures, n_figures): analyses.append(analysis) group_analysis = CompositeCurveAnalysis(analyses) - group_analysis.analyses("group_A").set_options(p0={"amp": 0.3, "freq": 2.1, "b": 0.5}) - group_analysis.analyses("group_B").set_options(p0={"amp": 0.5, "freq": 3.2, "b": 0.5}) + group_A_p0 = {"amp": 0.3, "freq": 2.1, "b": 0.5} + group_B_p0 = {"amp": 0.5, "freq": 3.2, "b": 0.5} + group_analysis.analyses("group_A").set_options(p0=group_A_p0) + group_analysis.analyses("group_B").set_options(p0=group_B_p0) group_analysis.set_options(plot=plot) group_analysis._generate_figures = gen_figures @@ -629,12 +735,52 @@ def test_multi_composite_curve_analysis(self, plot, gen_figures, n_figures): self.assertExperimentDone(result) amps = result.analysis_results("amp") + fit_A = expdata.artifacts("fit_summary").data["group_A"] + self.assertEqual( + fit_A.model_repr, + {"m1": "amp * cos(2 * pi * freq * x) + b", "m2": "amp * sin(2 * pi * freq * x) + b"}, + ) + self.assertEqual(fit_A.init_params, group_A_p0) + self.assertAlmostEqual(fit_A.params["amp"], amp1, delta=0.1) + self.assertAlmostEqual(fit_A.params["freq"], freq1, delta=0.1) + self.assertAlmostEqual(fit_A.params["b"], b1, delta=0.1) + + fit_B = expdata.artifacts("fit_summary").data["group_B"] + self.assertEqual( + fit_B.model_repr, + {"m1": "amp * cos(2 * pi * freq * x) + b", "m2": "amp * sin(2 * pi * freq * x) + b"}, + ) + self.assertEqual(fit_B.init_params, group_B_p0) + self.assertAlmostEqual(fit_B.params["amp"], amp2, delta=0.1) + self.assertAlmostEqual(fit_B.params["freq"], freq2, delta=0.1) + self.assertAlmostEqual(fit_B.params["b"], b2, delta=0.1) + + table_subset = expdata.artifacts("curve_data").data.filter(series=series, category=category) + self.assertEqual(len(table_subset), 200) + if isinstance(series, int): + np.testing.assert_array_equal(table_subset.series_id, series) + else: + np.testing.assert_array_equal(table_subset.series_name, series) + if category == "raw": + np.testing.assert_array_equal(table_subset.shots, 1024) + else: + self.assertTrue(np.isnan(np.array(table_subset.shots, dtype=float)).all()) + np.testing.assert_array_equal(table_subset.category, category) + np.testing.assert_array_equal(table_subset.analysis[:100], "group_A") + np.testing.assert_array_equal(table_subset.analysis[-100:], "group_B") + np.testing.assert_array_equal( + table_subset.filter(analysis="group_A").x, np.linspace(0, 1, 100) + ) + np.testing.assert_array_equal( + table_subset.filter(analysis="group_B").x, np.linspace(0, 1, 100) + ) + # two entries are generated for group A and group B self.assertEqual(len(amps), 2) self.assertEqual(amps[0].extra["group"], "group_A") self.assertEqual(amps[1].extra["group"], "group_B") - self.assertAlmostEqual(amps[0].value.n, 0.2, delta=0.1) - self.assertAlmostEqual(amps[1].value.n, 0.4, delta=0.1) + self.assertAlmostEqual(amps[0].value.n, amp1, delta=0.1) + self.assertAlmostEqual(amps[1].value.n, amp2, delta=0.1) self.assertEqual(len(result._figures), n_figures) diff --git a/test/data_processing/test_restless_experiment.py b/test/data_processing/test_restless_experiment.py index 7eb1844c45..7a02e3768e 100644 --- a/test/data_processing/test_restless_experiment.py +++ b/test/data_processing/test_restless_experiment.py @@ -63,14 +63,14 @@ def test_end_to_end_restless(self, pi_ratio): expdata = amp_exp.run(backend) self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("d_theta") d_theta = result.value.n self.assertAlmostEqual(d_theta, error, delta=0.01) self.assertEqual(result.quality, "good") # check that the fit amplitude is almost 1 as expected. - amp_fit = expdata.analysis_results(0).value.params["amp"] + amp_fit = expdata.artifacts("fit_summary").data.params["amp"] self.assertAlmostEqual(amp_fit, 1.0, delta=0.02) @data(-0.02, 0.04) @@ -89,11 +89,11 @@ def test_end_to_end_restless_standard_processor(self, pi_ratio): expdata = amp_exp.run(backend) self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("d_theta") d_theta = result.value.n self.assertTrue(abs(d_theta - error) > 0.01) # check that the fit amplitude is much smaller than 1. - amp_fit = expdata.analysis_results(0).value.params["amp"] + amp_fit = expdata.artifacts("fit_summary").data.params["amp"] self.assertTrue(amp_fit < 0.05) diff --git a/test/database_service/test_db_experiment_data.py b/test/database_service/test_db_experiment_data.py index 57958ae34b..020e871cc7 100644 --- a/test/database_service/test_db_experiment_data.py +++ b/test/database_service/test_db_experiment_data.py @@ -35,9 +35,8 @@ from qiskit.providers import JobStatus from qiskit.providers.backend import Backend from qiskit_ibm_experiment import IBMExperimentService -from qiskit_experiments.framework import ExperimentData -from qiskit_experiments.framework import AnalysisResult -from qiskit_experiments.framework import BackendData +from qiskit_experiments.framework import ExperimentData, AnalysisResult, BackendData, ArtifactData + from qiskit_experiments.database_service.exceptions import ( ExperimentDataError, ExperimentEntryNotFound, @@ -489,19 +488,20 @@ def test_add_get_analysis_result(self): exp_data.add_analysis_results(res) # We cannot compare results with exp_data.analysis_results() - # This test is too hacky since it tris to compare MagicMock with AnalysisResult. + # This test is too hacky since it tries to compare MagicMock with AnalysisResult. self.assertEqual( [res.result_id for res in exp_data.analysis_results()], result_ids, ) - self.assertEqual( - exp_data.analysis_results(1).result_id, - result_ids[1], - ) - self.assertEqual( - [res.result_id for res in exp_data.analysis_results(slice(2, 4))], - result_ids[2:4], - ) + with self.assertWarns(DeprecationWarning): + self.assertEqual( + exp_data.analysis_results(1).result_id, + result_ids[1], + ) + self.assertEqual( + [res.result_id for res in exp_data.analysis_results(slice(2, 4))], + result_ids[2:4], + ) def test_add_get_analysis_results(self): """Test adding and getting a list of analysis results.""" @@ -1064,6 +1064,18 @@ def test_copy_metadata(self): self.assertFalse(copied.analysis_results()) self.assertEqual(exp_data.provider, copied.provider) + def test_copy_figure_artifacts(self): + """Test copy expdata figures and artifacts.""" + exp_data = FakeExperiment(experiment_type="qiskit_test").run(backend=FakeBackend()) + exp_data.add_figures(str.encode("hello world")) + exp_data.add_artifacts(ArtifactData(name="test", data="foo")) + copied = exp_data.copy(copy_results=True) + + self.assertEqual(exp_data.artifacts(), copied.artifacts()) + self.assertEqual(exp_data.figure_names, copied.figure_names) + for i in exp_data.figure_names: + self.assertEqual(exp_data.figure(i), copied.figure(i)) + def test_copy_metadata_pending_job(self): """Test copy metadata with a pending job.""" event = threading.Event() @@ -1183,8 +1195,8 @@ def test_getters(self): self.assertEqual(data.source, "source_data") def test_metadata_too_large(self): - """Tests that ExperimentData can detect when the metadta - should be saved as a seperate file""" + """Tests that ExperimentData can detect when the metadata + should be saved as a separate file""" exp_data = ExperimentData() metadata_size = 100000 exp_data.metadata["components"] = [ @@ -1201,3 +1213,90 @@ def test_hgp_setter(self): self.assertEqual("ibm-q-internal", exp_data.hub) self.assertEqual("deployed", exp_data.group) self.assertEqual("default", exp_data.project) + + def test_add_delete_artifact(self): + """Tests adding an artifact and a list of artifacts. Tests deleting an artifact + by name, ID, and index and ID. Test the metadata is correctly tracking additions + and deletions.""" + exp_data = ExperimentData() + self.assertEqual(exp_data.artifacts(), []) + new_artifact = ArtifactData(name="test", data="foo") + exp_data.add_artifacts(new_artifact) + self.assertEqual(exp_data.artifacts(0), new_artifact) + self.assertEqual(exp_data.artifacts("test"), new_artifact) + + service = mock.create_autospec(IBMExperimentService, instance=True) + exp_data.service = service + exp_data.save() + + self.assertEqual(exp_data.metadata["artifact_files"], {"test.zip"}) + + # delete by name + exp_data.delete_artifact("test") + self.assertEqual(exp_data.artifacts(), []) + self.assertEqual(exp_data._deleted_artifacts, {"test"}) + with self.assertRaises(ExperimentEntryNotFound): + exp_data.artifacts(0) + + exp_data.save() + # after saving, artifact_files should be updated again + self.assertEqual(exp_data._deleted_artifacts, set()) + self.assertEqual(exp_data.metadata["artifact_files"], set()) + + new_artifact2 = ArtifactData(name="test", data="foo2") + new_artifact3 = ArtifactData(name="test2", data="foo2") + exp_data.add_artifacts([new_artifact, new_artifact2, new_artifact3]) + self.assertEqual(exp_data.artifacts(), [new_artifact, new_artifact2, new_artifact3]) + self.assertEqual(exp_data.artifacts("test"), [new_artifact, new_artifact2]) + + deleted_id = exp_data.artifacts(0).artifact_id + # delete by index + exp_data.delete_artifact(0) + + self.assertEqual(exp_data.artifacts(), [new_artifact2, new_artifact3]) + with self.assertRaises(ExperimentEntryNotFound): + exp_data.artifacts(deleted_id) + self.assertEqual(exp_data._deleted_artifacts, {"test"}) + + exp_data.save() + # after saving, deleted artifacts should be cleared again + self.assertEqual(exp_data._deleted_artifacts, set()) + self.assertEqual(exp_data.metadata["artifact_files"], {"test.zip", "test2.zip"}) + + # finish deleting artifacts named test + # delete by id + exp_data.delete_artifact(exp_data.artifacts(0).artifact_id) + self.assertEqual(exp_data.artifacts(), [new_artifact3]) + exp_data.save() + self.assertEqual(exp_data._deleted_artifacts, set()) + self.assertEqual(exp_data.metadata["artifact_files"], {"test2.zip"}) + + def test_add_duplicated_artifact(self): + """Tests behavior when adding an artifact with a duplicate ID.""" + exp_data = ExperimentData() + + new_artifact1 = ArtifactData(artifact_id="0", name="test", data="foo") + new_artifact2 = ArtifactData(artifact_id="0", name="test2", data="foo3") + + exp_data.add_artifacts(new_artifact1) + + # Adding an artifact with the same ID should fail + with self.assertRaises(ValueError): + exp_data.add_artifacts(new_artifact2) + + # Overwrite the artifact with a new one of the same ID + exp_data.add_artifacts(new_artifact2, overwrite=True) + self.assertEqual(exp_data.artifacts(), [new_artifact2]) + + def test_delete_nonexistent_artifact(self): + """Tests behavior when deleting a nonexistent artifact.""" + exp_data = ExperimentData() + + new_artifact1 = ArtifactData(artifact_id="0", name="test", data="foo") + exp_data.add_artifacts(new_artifact1) + + with self.assertRaises(ExperimentEntryNotFound): + exp_data.delete_artifact(2) + + with self.assertRaises(ExperimentEntryNotFound): + exp_data.delete_artifact("123") diff --git a/test/database_service/test_json.py b/test/database_service/test_json.py index 7c7e151bfa..d85b489f4a 100644 --- a/test/database_service/test_json.py +++ b/test/database_service/test_json.py @@ -16,9 +16,11 @@ from test.fake_experiment import FakeExperiment import ddt +import numpy as np from qiskit.circuit import Instruction from qiskit.circuit.library import QuantumVolume, SXGate, RZXGate, Barrier, Measure import qiskit.quantum_info as qi +from qiskit_experiments.curve_analysis import CurveFitResult class CustomClass: @@ -123,6 +125,19 @@ def test_roundtrip_function(self): obj = custom_function self.assertRoundTripSerializable(obj) + def test_roundtrip_curvefitresult(self): + """Test roundtrip serialization of the ScatterTable class""" + obj = CurveFitResult( + method="some_method", + model_repr={"s1": "par0 * x + par1"}, + success=True, + params={"par0": 0.3, "par1": 0.4}, + var_names=["par0", "par1"], + covar=np.array([[2.19188077e-03, 2.19906808e-01], [2.19906808e-01, 2.62351788e01]]), + reduced_chisq=1.5, + ) + self.assertRoundTripSerializable(obj) + def test_roundtrip_class_type(self): """Test roundtrip serialization of custom class""" obj = CustomClass diff --git a/test/extended_equality.py b/test/extended_equality.py index 369d6f169d..d333e5a2f8 100644 --- a/test/extended_equality.py +++ b/test/extended_equality.py @@ -37,6 +37,7 @@ BaseAnalysis, AnalysisResult, AnalysisResultTable, + ArtifactData, ) from qiskit_experiments.visualization import BaseDrawer @@ -263,6 +264,27 @@ def _check_service_analysis_results( ) +@_is_equivalent_dispatcher.register +def _check_artifact_data( + data1: ArtifactData, + data2: ArtifactData, + **kwargs, +): + """Check equality of the ArtifactData class.""" + return _check_all_attributes( + attrs=[ + "name", + "data", + "device_components", + "experiment_id", + "experiment", + ], + data1=data1, + data2=data2, + **kwargs, + ) + + @_is_equivalent_dispatcher.register def _check_configurable_classes( data1: Union[BaseExperiment, BaseAnalysis, BaseDrawer], @@ -365,7 +387,13 @@ def _check_experiment_data( data2.child_data(), **kwargs, ) - return all([attributes_equiv, data_equiv, analysis_results_equiv, child_equiv]) + artifact_equiv = is_equivalent( + data1.artifacts(), + data2.artifacts(), + **kwargs, + ) + + return all([attributes_equiv, data_equiv, analysis_results_equiv, child_equiv, artifact_equiv]) def _check_all_attributes( diff --git a/test/fake_experiment.py b/test/fake_experiment.py index 52a18f2940..80236e8e74 100644 --- a/test/fake_experiment.py +++ b/test/fake_experiment.py @@ -13,9 +13,17 @@ """A FakeExperiment for testing.""" import numpy as np +import pandas as pd from matplotlib.figure import Figure as MatplotlibFigure from qiskit import QuantumCircuit -from qiskit_experiments.framework import BaseExperiment, BaseAnalysis, Options, AnalysisResultData +from qiskit_experiments.framework import ( + BaseExperiment, + BaseAnalysis, + Options, + AnalysisResultData, + ArtifactData, +) +from qiskit_experiments.curve_analysis import ScatterTable, CurveFitResult class FakeAnalysis(BaseAnalysis): @@ -33,6 +41,18 @@ def _run_analysis(self, experiment_data): analysis_results = [ AnalysisResultData(f"result_{i}", value) for i, value in enumerate(rng.random(3)) ] + scatter_table = ScatterTable.from_dataframe(pd.DataFrame(columns=ScatterTable.COLUMNS)) + fit_data = CurveFitResult( + method="some_method", + model_repr={"s1": "par0 * x + par1"}, + success=True, + params={"par0": rng.random(), "par1": rng.random()}, + var_names=["par0", "par1"], + covar=rng.random((2, 2)), + reduced_chisq=rng.random(), + ) + analysis_results.append(ArtifactData(name="curve_data", data=scatter_table)) + analysis_results.append(ArtifactData(name="fit_summary", data=fit_data)) figures = None add_figures = self.options.get("add_figures", False) if add_figures: diff --git a/test/framework/test_artifacts.py b/test/framework/test_artifacts.py new file mode 100644 index 0000000000..18046cba27 --- /dev/null +++ b/test/framework/test_artifacts.py @@ -0,0 +1,54 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2021. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Test artifacts.""" + +from test.base import QiskitExperimentsTestCase +from datetime import datetime + +from qiskit_experiments.framework import ArtifactData + + +class TestArtifacts(QiskitExperimentsTestCase): + """Test cases for the ArtifactData class.""" + + def test_basic_artifact(self): + """Test artifact properties.""" + timestamp = datetime.now() + artifact = ArtifactData(artifact_id=0, name="test", data="foo", created_time=timestamp) + self.assertEqual(artifact.artifact_id, 0) + self.assertEqual(artifact.name, "test") + self.assertEqual(artifact.experiment, None) + self.assertEqual(artifact.device_components, []) + self.assertEqual(artifact.dtype, "str") + self.assertEqual(artifact.created_time, timestamp) + self.assertEqual( + str(artifact), + "ArtifactData(name=test, dtype=str, uid=0, experiment=None, device_components=[])", + ) + + def test_artifact_equality(self): + """Test artifact equality.""" + timestamp = datetime.now() + artifact1 = ArtifactData(name="test", data="foo") + artifact2 = ArtifactData(name="test", data="foo") + self.assertNotEqual(artifact1, artifact2) + artifact1 = ArtifactData(artifact_id=0, name="test", data="foo", created_time=timestamp) + artifact2 = ArtifactData(artifact_id=0, name="test", data="foo", created_time=timestamp) + self.assertEqual(artifact1, artifact2) + + def test_serialize_artifact(self): + """Test serializing the artifact.""" + obj = ArtifactData(name="test", data="foo") + self.assertRoundTripSerializable(obj) + obj2 = ArtifactData(name="test", data={"foo": 123, "blah": obj}) + self.assertRoundTripSerializable(obj2) diff --git a/test/framework/test_composite.py b/test/framework/test_composite.py index 1ca1e5a5b6..c1667a3f60 100644 --- a/test/framework/test_composite.py +++ b/test/framework/test_composite.py @@ -98,6 +98,7 @@ def test_flatten_results_nested(self): self.assertEqual(len(expdata.child_data()), 0) # Check right number of analysis results is returned self.assertEqual(len(expdata.analysis_results()), 30) + self.assertEqual(len(expdata.artifacts()), 20) def test_flatten_results_partial(self): """Test flattening results.""" @@ -117,6 +118,7 @@ def test_flatten_results_partial(self): # Check out experiment wasn't flattened self.assertEqual(len(expdata.child_data()), 2) self.assertEqual(len(expdata.analysis_results()), 0) + self.assertEqual(len(expdata.artifacts()), 0) # check inner experiments were flattened child0 = expdata.child_data(0) @@ -126,6 +128,8 @@ def test_flatten_results_partial(self): # Check right number of analysis results is returned self.assertEqual(len(child0.analysis_results()), 9) self.assertEqual(len(child1.analysis_results()), 6) + self.assertEqual(len(child0.artifacts()), 6) + self.assertEqual(len(child1.artifacts()), 4) def test_experiment_config(self): """Test converting to and from config works""" @@ -188,6 +192,7 @@ def setUp(self): self.rootdata = batch_exp.run(backend=self.backend) self.assertExperimentDone(self.rootdata) self.assertEqual(len(self.rootdata.child_data()), 2) + self.assertEqual(len(self.rootdata.artifacts()), 0) self.rootdata.share_level = self.share_level @@ -202,8 +207,19 @@ def check_attributes(self, expdata): for childdata in components: self.check_attributes(childdata) self.assertEqual(childdata.parent_id, expdata.experiment_id) - - def check_if_equal(self, expdata1, expdata2, is_a_copy): + if not hasattr(childdata, "child_data"): + self.assertEqual(len(childdata.artifacts()), 2) + self.assertEqual(childdata.artifacts("curve_data").experiment, "FakeExperiment") + self.assertEqual( + childdata.artifacts("curve_data").device_components, childdata.device_components + ) + self.assertEqual(childdata.artifacts("fit_summary").experiment, "FakeExperiment") + self.assertEqual( + childdata.artifacts("fit_summary").device_components, + childdata.device_components, + ) + + def check_if_equal(self, expdata1, expdata2, is_a_copy, check_artifact=False): """ Recursively traverse the tree and check equality of expdata1 and expdata2 """ @@ -223,6 +239,11 @@ def check_if_equal(self, expdata1, expdata2, is_a_copy): else: self.assertEqual(expdata1.experiment_id, expdata2.experiment_id) + if check_artifact: + self.assertEqual(len(expdata1.artifacts()), len(expdata2.artifacts())) + for artifact1, artifact2 in zip(expdata1.artifacts(), expdata2.artifacts()): + self.assertEqual(artifact1, artifact2, msg="artifacts not equal") + self.assertEqual(len(expdata1.child_data()), len(expdata2.child_data())) for childdata1, childdata2 in zip(expdata1.child_data(), expdata2.child_data()): self.check_if_equal(childdata1, childdata2, is_a_copy) @@ -242,7 +263,7 @@ def test_composite_save_load(self): self.rootdata.service = IBMExperimentService(local=True, local_save=False) self.rootdata.save() loaded_data = ExperimentData.load(self.rootdata.experiment_id, self.rootdata.service) - self.check_if_equal(loaded_data, self.rootdata, is_a_copy=False) + self.check_if_equal(loaded_data, self.rootdata, is_a_copy=False, check_artifact=True) def test_composite_save_metadata(self): """ @@ -251,7 +272,6 @@ def test_composite_save_metadata(self): self.rootdata.service = IBMExperimentService(local=True, local_save=False) self.rootdata.save_metadata() loaded_data = ExperimentData.load(self.rootdata.experiment_id, self.rootdata.service) - self.check_if_equal(loaded_data, self.rootdata, is_a_copy=False) def test_composite_copy(self): @@ -259,7 +279,7 @@ def test_composite_copy(self): Test composite ExperimentData.copy """ new_instance = self.rootdata.copy() - self.check_if_equal(new_instance, self.rootdata, is_a_copy=True) + self.check_if_equal(new_instance, self.rootdata, is_a_copy=True, check_artifact=True) self.check_attributes(new_instance) self.assertEqual(new_instance.parent_id, None) @@ -930,9 +950,13 @@ def test_batch_transpile_options_integrated(self): expdata = self.batch2.run(backend, noise_model=noise_model, shots=1000) self.assertExperimentDone(expdata) - self.assertEqual(expdata.child_data(0).analysis_results(0).value, 8) - self.assertEqual(expdata.child_data(1).child_data(0).analysis_results(0).value, 16) - self.assertEqual(expdata.child_data(1).child_data(1).analysis_results(0).value, 4) + self.assertEqual(expdata.child_data(0).analysis_results("non-zero counts").value, 8) + self.assertEqual( + expdata.child_data(1).child_data(0).analysis_results("non-zero counts").value, 16 + ) + self.assertEqual( + expdata.child_data(1).child_data(1).analysis_results("non-zero counts").value, 4 + ) def test_separate_jobs(self): """Test the separate_job experiment option""" diff --git a/test/library/calibration/test_drag.py b/test/library/calibration/test_drag.py index d870b83d3a..1c1f651028 100644 --- a/test/library/calibration/test_drag.py +++ b/test/library/calibration/test_drag.py @@ -72,7 +72,7 @@ def test_end_to_end(self, freq, betas, p0_opt): expdata = drag.run(backend) self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("beta") # pylint: disable=no-member self.assertTrue(abs(result.value.n - backend.experiment_helper.ideal_beta) < self.test_tol) diff --git a/test/library/calibration/test_fine_amplitude.py b/test/library/calibration/test_fine_amplitude.py index 7608f434b5..ea87c992b0 100644 --- a/test/library/calibration/test_fine_amplitude.py +++ b/test/library/calibration/test_fine_amplitude.py @@ -51,7 +51,7 @@ def test_end_to_end_under_rotation(self, pi_ratio): expdata = amp_exp.run(backend) self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("d_theta") d_theta = result.value.n tol = 0.04 @@ -71,7 +71,7 @@ def test_end_to_end_over_rotation(self, pi_ratio): backend.target.add_instruction(SXGate(), properties={(0,): None}) expdata = amp_exp.run(backend) self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("d_theta") d_theta = result.value.n tol = 0.04 @@ -101,7 +101,7 @@ def test_end_to_end(self, pi_ratio): expdata = amp_exp.run(backend) self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("d_theta") d_theta = result.value.n tol = 0.04 @@ -274,7 +274,7 @@ def test_run_x_cal(self): # run the calibration experiment. This should update the amp parameter of x which we test. exp_data = amp_cal.run() self.assertExperimentDone(exp_data) - d_theta = exp_data.analysis_results(1).value.n + d_theta = exp_data.analysis_results("d_theta").value.n new_amp = init_amp * np.pi / (np.pi + d_theta) circs = amp_cal._transpiled_circuits() @@ -310,7 +310,7 @@ def test_run_sx_cal(self): # run the calibration experiment. This should update the amp parameter of x which we test. exp_data = amp_cal.run() self.assertExperimentDone(exp_data) - d_theta = exp_data.analysis_results(1).value.n + d_theta = exp_data.analysis_results("d_theta").value.n new_amp = init_amp * (np.pi / 2) / (np.pi / 2 + d_theta) circs = amp_cal._transpiled_circuits() diff --git a/test/library/calibration/test_fine_drag.py b/test/library/calibration/test_fine_drag.py index f592008c17..50c495c3dd 100644 --- a/test/library/calibration/test_fine_drag.py +++ b/test/library/calibration/test_fine_drag.py @@ -57,14 +57,14 @@ def test_end_to_end(self): exp_data = drag.run(MockIQBackend(FineDragHelper())) self.assertExperimentDone(exp_data) - self.assertEqual(exp_data.analysis_results(0).quality, "good") + self.assertEqual(exp_data.analysis_results("d_theta").quality, "good") def test_end_to_end_no_schedule(self): """Test that we can run without a schedule.""" exp_data = FineXDrag([0]).run(MockIQBackend(FineDragHelper())) self.assertExperimentDone(exp_data) - self.assertEqual(exp_data.analysis_results(0).quality, "good") + self.assertEqual(exp_data.analysis_results("d_theta").quality, "good") def test_circuits_roundtrip_serializable(self): """Test circuits serialization of the experiment.""" @@ -122,7 +122,7 @@ def test_update_cals(self): # run the calibration experiment. This should update the beta parameter of x which we test. exp_data = drag_cal.run(self.backend) self.assertExperimentDone(exp_data) - d_theta = exp_data.analysis_results(1).value.n + d_theta = exp_data.analysis_results("d_theta").value.n sigma = 40 target_angle = np.pi new_beta = -np.sqrt(np.pi) * d_theta * sigma / target_angle**2 diff --git a/test/library/calibration/test_fine_frequency.py b/test/library/calibration/test_fine_frequency.py index e6d6f78d1d..fb8a21cad6 100644 --- a/test/library/calibration/test_fine_frequency.py +++ b/test/library/calibration/test_fine_frequency.py @@ -60,7 +60,7 @@ def test_end_to_end(self, freq_shift): expdata = freq_exp.run(shots=100) self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("d_theta") d_theta = result.value.n dt = BackendData(backend).dt d_freq = d_theta / (2 * np.pi * self.sx_duration * dt) diff --git a/test/library/calibration/test_rabi.py b/test/library/calibration/test_rabi.py index 36df7fce9b..510c818dd3 100644 --- a/test/library/calibration/test_rabi.py +++ b/test/library/calibration/test_rabi.py @@ -29,6 +29,7 @@ from qiskit_experiments.data_processing.nodes import Probability from qiskit_experiments.test.pulse_backend import SingleTransmonTestBackend from qiskit_experiments.framework.experiment_data import ExperimentStatus +from qiskit_experiments.curve_analysis import ParameterRepr class TestRabiEndToEnd(QiskitExperimentsTestCase): @@ -58,12 +59,14 @@ def test_rabi_end_to_end(self): rabi.set_experiment_options(amplitudes=np.linspace(-0.1, 0.1, 21)) expdata = rabi.run() self.assertExperimentDone(expdata) - result = expdata.analysis_results(0) + result = expdata.analysis_results("rabi_rate") self.assertEqual(result.quality, "good") # The comparison is made against the object that exists in the backend for accurate testing self.assertAlmostEqual( - result.value.params["freq"], self.backend.rabi_rate_01, delta=test_tol + expdata.artifacts("fit_summary").data.params["freq"], + self.backend.rabi_rate_01, + delta=test_tol, ) def test_wrong_processor(self): @@ -124,7 +127,7 @@ def test_ef_rabi_end_to_end(self): rabi.set_experiment_options(amplitudes=np.linspace(-0.1, 0.1, 11)) expdata = rabi.run() self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("rabi_rate_12") self.assertEqual(result.quality, "good") self.assertTrue(abs(result.value.n - self.backend.rabi_rate_12) < test_tol) @@ -261,12 +264,18 @@ def test_good_analysis(self): data_processor = DataProcessor("counts", [Probability(outcome="1")]) - experiment_data = OscillationAnalysis().run( - experiment_data, data_processor=data_processor, plot=False + analysis = OscillationAnalysis() + analysis.set_options( + result_parameters=[ParameterRepr("freq", "rabi_rate")], ) - result = experiment_data.analysis_results(0) + + experiment_data = analysis.run( + experiment_data, data_processor=data_processor, plot=False + ).block_for_results() + + result = experiment_data.analysis_results("rabi_rate") self.assertEqual(result.quality, "good") - self.assertAlmostEqual(result.value.params["freq"], expected_rate, delta=test_tol) + self.assertAlmostEqual(result.value, expected_rate, delta=test_tol) def test_bad_analysis(self): """Test the Rabi analysis.""" @@ -282,12 +291,19 @@ def test_bad_analysis(self): data_processor = DataProcessor("counts", [Probability(outcome="1")]) - experiment_data = OscillationAnalysis().run( - experiment_data, data_processor=data_processor, plot=False + analysis = OscillationAnalysis() + analysis.set_options( + result_parameters=[ParameterRepr("freq", "rabi_rate")], ) - result = experiment_data.analysis_results() + experiment_data = analysis.run( + experiment_data, + data_processor=data_processor, + plot=False, + ).block_for_results() + + result = experiment_data.analysis_results("rabi_rate") - self.assertEqual(result[0].quality, "bad") + self.assertEqual(result.quality, "bad") class TestCompositeExperiment(QiskitExperimentsTestCase): diff --git a/test/library/characterization/test_cross_resonance_hamiltonian.py b/test/library/characterization/test_cross_resonance_hamiltonian.py index 61176cca24..00b574430a 100644 --- a/test/library/characterization/test_cross_resonance_hamiltonian.py +++ b/test/library/characterization/test_cross_resonance_hamiltonian.py @@ -210,7 +210,7 @@ def test_integration(self, ix, iy, iz, zx, zy, zz): exp_data = expr.run() self.assertExperimentDone(exp_data, timeout=1000) - self.assertEqual(exp_data.analysis_results(0).quality, "good") + self.assertEqual(exp_data.analysis_results("omega_ix").quality, "good") # These values are computed from other analysis results in post hook. # Thus at least one of these values should be round-trip tested. @@ -265,7 +265,7 @@ def test_integration_backward_compat(self): exp_data = expr.run() self.assertExperimentDone(exp_data, timeout=1000) - self.assertEqual(exp_data.analysis_results(0).quality, "good") + self.assertEqual(exp_data.analysis_results("omega_ix").quality, "good") self.assertAlmostEqual(exp_data.analysis_results("omega_ix").value.n, ix, delta=delta) self.assertAlmostEqual(exp_data.analysis_results("omega_iy").value.n, iy, delta=delta) diff --git a/test/library/characterization/test_half_angle.py b/test/library/characterization/test_half_angle.py index d131fcae99..7e2359b292 100644 --- a/test/library/characterization/test_half_angle.py +++ b/test/library/characterization/test_half_angle.py @@ -39,7 +39,7 @@ def test_end_to_end(self): exp_data = hac.run(backend) self.assertExperimentDone(exp_data) - d_theta = exp_data.analysis_results(1).value.n + d_theta = exp_data.analysis_results("d_hac").value.n self.assertTrue(abs(d_theta - error) < tol) diff --git a/test/library/characterization/test_qubit_spectroscopy.py b/test/library/characterization/test_qubit_spectroscopy.py index 8dc52bfeb9..ae8fc8bde8 100644 --- a/test/library/characterization/test_qubit_spectroscopy.py +++ b/test/library/characterization/test_qubit_spectroscopy.py @@ -52,7 +52,7 @@ def test_spectroscopy_end2end_classified(self): spec.set_run_options(meas_level=MeasLevel.CLASSIFIED) expdata = spec.run(backend) self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("f01") self.assertRoundTripSerializable(result.value) self.assertAlmostEqual(result.value.n, freq01, delta=1e6) @@ -65,7 +65,7 @@ def test_spectroscopy_end2end_classified(self): spec.set_run_options(meas_level=MeasLevel.CLASSIFIED) expdata = spec.run(backend) self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("f01") self.assertRoundTripSerializable(result.value) self.assertAlmostEqual(result.value.n, freq01 + 5e6, delta=1e6) @@ -91,7 +91,7 @@ def test_spectroscopy_end2end_kerneled(self): spec = QubitSpectroscopy([qubit], frequencies) expdata = spec.run(backend) self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("f01") self.assertRoundTripSerializable(result.value) self.assertTrue(freq01 - 2e6 < result.value.n < freq01 + 2e6) @@ -103,7 +103,7 @@ def test_spectroscopy_end2end_kerneled(self): spec = QubitSpectroscopy([qubit], frequencies) expdata = spec.run(backend) self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("f01") self.assertRoundTripSerializable(result.value) self.assertTrue(freq01 + 3e6 < result.value.n < freq01 + 8e6) @@ -112,7 +112,7 @@ def test_spectroscopy_end2end_kerneled(self): spec.set_run_options(meas_return="avg") expdata = spec.run(backend) self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("f01") self.assertRoundTripSerializable(result.value) self.assertTrue(freq01 + 3e6 < result.value.n < freq01 + 8e6) @@ -140,7 +140,7 @@ def test_spectroscopy12_end2end_classified(self): spec.set_run_options(meas_level=MeasLevel.CLASSIFIED) expdata = spec.run(backend) self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("f12") self.assertRoundTripSerializable(result.value) self.assertTrue(freq01 - 2e6 < result.value.n < freq01 + 2e6) @@ -189,7 +189,7 @@ def test_expdata_serialization(self): self.assertRoundTripSerializable(expdata) # Checking serialization of the analysis - self.assertRoundTripSerializable(expdata.analysis_results(1)) + self.assertRoundTripSerializable(expdata.analysis_results("f01")) def test_kerneled_expdata_serialization(self): """Test experiment data and analysis data JSON serialization""" @@ -216,7 +216,7 @@ def test_kerneled_expdata_serialization(self): self.assertRoundTripSerializable(expdata) # Checking serialization of the analysis - self.assertRoundTripSerializable(expdata.analysis_results(1)) + self.assertRoundTripSerializable(expdata.analysis_results("f01")) def test_parallel_experiment(self): """Test for parallel experiment""" diff --git a/test/library/characterization/test_readout_angle.py b/test/library/characterization/test_readout_angle.py index a26ffd4e2b..be9b4057bc 100644 --- a/test/library/characterization/test_readout_angle.py +++ b/test/library/characterization/test_readout_angle.py @@ -38,7 +38,8 @@ def test_readout_angle_end2end(self): exp = ReadoutAngle([0]) expdata = exp.run(backend, shots=10000) self.assertExperimentDone(expdata) - res = expdata.analysis_results(0) + + res = expdata.analysis_results("readout_angle") self.assertAlmostEqual(res.value % (2 * np.pi), np.pi / 2, places=2) backend = MockIQBackend( @@ -47,7 +48,7 @@ def test_readout_angle_end2end(self): exp = ReadoutAngle([0]) expdata = exp.run(backend, shots=10000) self.assertExperimentDone(expdata) - res = expdata.analysis_results(0) + res = expdata.analysis_results("readout_angle") self.assertAlmostEqual(res.value % (2 * np.pi), 15 * np.pi / 8, places=2) def test_kerneled_expdata_serialization(self): @@ -69,4 +70,4 @@ def test_kerneled_expdata_serialization(self): self.assertRoundTripSerializable(expdata) # Checking serialization of the analysis - self.assertRoundTripSerializable(expdata.analysis_results(0)) + self.assertRoundTripSerializable(expdata.analysis_results("readout_angle")) diff --git a/test/library/characterization/test_readout_error.py b/test/library/characterization/test_readout_error.py index 0e32e75d0e..6bf871bcb6 100644 --- a/test/library/characterization/test_readout_error.py +++ b/test/library/characterization/test_readout_error.py @@ -39,7 +39,8 @@ def test_local_analysis_ideal(self): exp = LocalReadoutError(backend=backend) expdata = exp.run(backend) self.assertExperimentDone(expdata) - mitigator = expdata.analysis_results(0).value + + mitigator = expdata.analysis_results("Local Readout Mitigator").value qubits = list(range(num_qubits)) self.assertEqual(mitigator._num_qubits, num_qubits) @@ -56,7 +57,7 @@ def test_correlated_analysis_ideal(self): exp = CorrelatedReadoutError(backend=backend) expdata = exp.run(backend) self.assertExperimentDone(expdata) - mitigator = expdata.analysis_results(0).value + mitigator = expdata.analysis_results("Correlated Readout Mitigator").value qubits = list(range(num_qubits)) self.assertEqual(mitigator._num_qubits, num_qubits) @@ -89,7 +90,7 @@ def test_local_analysis(self): expdata.metadata.update(run_meta) exp = LocalReadoutError(qubits) result = exp.analysis.run(expdata) - mitigator = result.analysis_results(0).value + mitigator = result.analysis_results("Local Readout Mitigator").value self.assertEqual(len(qubits), mitigator._num_qubits) self.assertEqual(qubits, mitigator._qubits) @@ -159,7 +160,7 @@ def test_correlated_analysis(self): expdata.metadata.update(run_meta) exp = CorrelatedReadoutError(qubits) result = exp.analysis.run(expdata) - mitigator = result.analysis_results(0).value + mitigator = result.analysis_results("Correlated Readout Mitigator").value self.assertEqual(len(qubits), mitigator._num_qubits) self.assertEqual(qubits, mitigator._qubits) @@ -182,8 +183,8 @@ def test_parallel_running(self): exp = ParallelExperiment([exp1, exp2], flatten_results=False) expdata = exp.run(backend=backend) self.assertExperimentDone(expdata) - mit1 = expdata.child_data(0).analysis_results(0).value - mit2 = expdata.child_data(1).analysis_results(0).value + mit1 = expdata.child_data(0).analysis_results("Correlated Readout Mitigator").value + mit2 = expdata.child_data(1).analysis_results("Correlated Readout Mitigator").value assignment_matrix1 = mit1.assignment_matrix() assignment_matrix2 = mit2.assignment_matrix() self.assertFalse(matrix_equal(assignment_matrix1, assignment_matrix2)) @@ -211,7 +212,7 @@ def test_json_serialization(self): exp = LocalReadoutError(qubits) exp_data = exp.run(backend) self.assertExperimentDone(exp_data) - mitigator = exp_data.analysis_results(0).value + mitigator = exp_data.analysis_results("Local Readout Mitigator").value serialized = json.dumps(mitigator, cls=ExperimentEncoder) loaded = json.loads(serialized, cls=ExperimentDecoder) self.assertTrue(matrix_equal(mitigator.assignment_matrix(), loaded.assignment_matrix())) diff --git a/test/library/characterization/test_resonator_spectroscopy.py b/test/library/characterization/test_resonator_spectroscopy.py index cbbb110292..38b53c82da 100644 --- a/test/library/characterization/test_resonator_spectroscopy.py +++ b/test/library/characterization/test_resonator_spectroscopy.py @@ -125,7 +125,7 @@ def test_end_to_end(self, freq_shift): expdata = spec.run(backend) self.assertExperimentDone(expdata) - result = expdata.analysis_results(1) + result = expdata.analysis_results("res_freq0") self.assertRoundTripSerializable(result.value) self.assertAlmostEqual(result.value.n, res_freq + freq_shift, delta=0.1e6) @@ -189,7 +189,7 @@ def test_kerneled_expdata_serialization(self, freq_shift): self.assertRoundTripSerializable(expdata) # Checking serialization of the analysis - self.assertRoundTripSerializable(expdata.analysis_results(1)) + self.assertRoundTripSerializable(expdata.analysis_results("res_freq0")) def test_parallel_experiment(self): """Test for parallel experiment""" diff --git a/test/library/characterization/test_t1.py b/test/library/characterization/test_t1.py index 475e1d82e9..7f2d46f6ec 100644 --- a/test/library/characterization/test_t1.py +++ b/test/library/characterization/test_t1.py @@ -205,8 +205,9 @@ def test_t1_analysis(self): } ) - res, _ = T1Analysis()._run_analysis(data) - result = res[1] + experiment_data = T1Analysis().run(data, plot=False) + result = experiment_data.analysis_results("T1") + self.assertEqual(result.quality, "good") self.assertAlmostEqual(result.value.nominal_value, 25e-9, delta=3) @@ -241,8 +242,8 @@ def test_t1_low_quality(self): } ) - res, _ = T1Analysis()._run_analysis(data) - result = res[1] + experiment_data = T1Analysis().run(data, plot=False) + result = experiment_data.analysis_results("T1") self.assertEqual(result.quality, "bad") def test_t1_parallel_exp_transpile(self): diff --git a/test/library/characterization/test_t2hahn.py b/test/library/characterization/test_t2hahn.py index 6c068a0e75..566a6fb8d8 100644 --- a/test/library/characterization/test_t2hahn.py +++ b/test/library/characterization/test_t2hahn.py @@ -201,7 +201,7 @@ def test_roundtrip_serializable(self): self.assertRoundTripSerializable(expdata) # Checking serialization of the analysis - self.assertRoundTripSerializable(expdata.analysis_results(1)) + self.assertRoundTripSerializable(expdata.analysis_results("T2")) def test_circuit_roundtrip_serializable(self): """Test round trip JSON serialization""" diff --git a/test/library/quantum_volume/test_qv.py b/test/library/quantum_volume/test_qv.py index c3b81c4591..08e2009301 100644 --- a/test/library/quantum_volume/test_qv.py +++ b/test/library/quantum_volume/test_qv.py @@ -113,12 +113,13 @@ def test_qv_sigma_decreasing(self): qv_exp.set_experiment_options(trials=2) expdata1 = qv_exp.run(backend) self.assertExperimentDone(expdata1) - result_data1 = expdata1.analysis_results(0) + + result_data1 = expdata1.analysis_results("mean_HOP") expdata2 = qv_exp.run(backend, analysis=None) self.assertExperimentDone(expdata2) expdata2.add_data(expdata1.data()) qv_exp.analysis.run(expdata2) - result_data2 = expdata2.analysis_results(0) + result_data2 = expdata2.analysis_results("mean_HOP") self.assertTrue(result_data1.extra["trials"] == 2, "number of trials is incorrect") self.assertTrue( @@ -151,7 +152,7 @@ def test_qv_failure_insufficient_trials(self): with self.assertWarns(UserWarning): qv_exp.analysis.run(exp_data) - qv_result = exp_data.analysis_results(1) + qv_result = exp_data.analysis_results("quantum_volume") self.assertTrue( qv_result.extra["success"] is False and qv_result.value == 1, "quantum volume is successful with less than 100 trials", @@ -177,7 +178,7 @@ def test_qv_failure_insufficient_hop(self): exp_data.add_data(insufficient_hop_data) qv_exp.analysis.run(exp_data) - qv_result = exp_data.analysis_results(1) + qv_result = exp_data.analysis_results("quantum_volume") self.assertTrue( qv_result.extra["success"] is False and qv_result.value == 1, "quantum volume is successful with heavy output probability less than 2/3", @@ -204,7 +205,7 @@ def test_qv_failure_insufficient_confidence(self): exp_data.add_data(insufficient_confidence_data) qv_exp.analysis.run(exp_data) - qv_result = exp_data.analysis_results(1) + qv_result = exp_data.analysis_results("quantum_volume") self.assertTrue( qv_result.extra["success"] is False and qv_result.value == 1, "quantum volume is successful with insufficient confidence", diff --git a/test/library/randomized_benchmarking/test_standard_rb.py b/test/library/randomized_benchmarking/test_standard_rb.py index 2ccd5a47df..b11f5b926a 100644 --- a/test/library/randomized_benchmarking/test_standard_rb.py +++ b/test/library/randomized_benchmarking/test_standard_rb.py @@ -392,7 +392,7 @@ def test_poor_experiment_result(self): expdata = exp.run() self.assertExperimentDone(expdata) - overview = expdata.analysis_results(0).value + overview = expdata.artifacts("fit_summary").data # This yields bad fit due to poor data points, but still fit is not completely off. self.assertLess(overview.reduced_chisq, 14)