diff --git a/docs/ftbbl.ipynb b/docs/ftbbl.ipynb
index 850f5194..5d7af0f5 100644
--- a/docs/ftbbl.ipynb
+++ b/docs/ftbbl.ipynb
@@ -907,6 +907,7 @@
"
n_repetitions | \n",
" success_probability | \n",
" processor_str | \n",
+ " job_finished_time | \n",
" \n",
" \n",
" \n",
@@ -922,6 +923,7 @@
" 1000 | \n",
" 0.954 | \n",
" rainbow-depol(5.000e-03) | \n",
+ " 1969-12-31 16:00:00 | \n",
" \n",
" \n",
" 1 | \n",
@@ -935,6 +937,7 @@
" 1000 | \n",
" 0.940 | \n",
" rainbow-depol(5.000e-03) | \n",
+ " 1969-12-31 16:00:00 | \n",
"
\n",
" \n",
" 2 | \n",
@@ -948,6 +951,7 @@
" 1000 | \n",
" 0.946 | \n",
" rainbow-depol(5.000e-03) | \n",
+ " 1969-12-31 16:00:00 | \n",
"
\n",
" \n",
" 3 | \n",
@@ -961,6 +965,7 @@
" 1000 | \n",
" 0.708 | \n",
" rainbow-depol(5.000e-03) | \n",
+ " 1969-12-31 16:00:00 | \n",
"
\n",
" \n",
" 4 | \n",
@@ -974,6 +979,7 @@
" 1000 | \n",
" 0.682 | \n",
" rainbow-depol(5.000e-03) | \n",
+ " 1969-12-31 16:00:00 | \n",
"
\n",
" \n",
"\n",
@@ -994,12 +1000,12 @@
"3 1 40 0 1000 0.708 \n",
"4 1 40 1 1000 0.682 \n",
"\n",
- " processor_str \n",
- "0 rainbow-depol(5.000e-03) \n",
- "1 rainbow-depol(5.000e-03) \n",
- "2 rainbow-depol(5.000e-03) \n",
- "3 rainbow-depol(5.000e-03) \n",
- "4 rainbow-depol(5.000e-03) "
+ " processor_str job_finished_time \n",
+ "0 rainbow-depol(5.000e-03) 1969-12-31 16:00:00 \n",
+ "1 rainbow-depol(5.000e-03) 1969-12-31 16:00:00 \n",
+ "2 rainbow-depol(5.000e-03) 1969-12-31 16:00:00 \n",
+ "3 rainbow-depol(5.000e-03) 1969-12-31 16:00:00 \n",
+ "4 rainbow-depol(5.000e-03) 1969-12-31 16:00:00 "
]
},
"execution_count": 15,
@@ -1043,7 +1049,7 @@
"\n",
"colors = plt.get_cmap('tab10')\n",
"\n",
- "for i, row in total_df.iterrows():\n",
+ "for i, row in total_df.reset_index().iterrows():\n",
" plt.errorbar(\n",
" x=row['macrocycle_depth'],\n",
" y=row['success_probability_mean'],\n",
diff --git a/recirq/otoc/loschmidt/tilted_square_lattice/analysis-walkthrough.ipynb b/recirq/otoc/loschmidt/tilted_square_lattice/analysis-walkthrough.ipynb
index 61dd2bf8..a5caf1f0 100644
--- a/recirq/otoc/loschmidt/tilted_square_lattice/analysis-walkthrough.ipynb
+++ b/recirq/otoc/loschmidt/tilted_square_lattice/analysis-walkthrough.ipynb
@@ -116,11 +116,22 @@
"metadata": {},
"outputs": [],
"source": [
- "means_df, means_gb_cols = analysis.groupby_all_except(\n",
- " df.drop(['n_qubits', 'q_area'], axis=1), \n",
- " y_cols=('instance_i', 'success_probability'), \n",
- " agg_func={'success_probability': ['mean', 'std']}\n",
- ")\n",
+ "from analysis import WHD_GB_COLS\n",
+ "WHD_GB_COLS"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "means_y_cols = {\n",
+ " 'success_probability_mean': ('success_probability', 'mean'),\n",
+ " 'success_probability_std': ('success_probability', 'std'),\n",
+ " 'job_finished_time': ('job_finished_time', 'last'),\n",
+ "}\n",
+ "means_df = df.groupby(WHD_GB_COLS).agg(**means_y_cols)\n",
"means_df"
]
},
@@ -137,11 +148,24 @@
"metadata": {},
"outputs": [],
"source": [
- "vs_depth_df, vs_depth_gb_cols = analysis.groupby_all_except(\n",
- " means_df, \n",
- " y_cols=('macrocycle_depth', 'success_probability_mean', 'success_probability_std'),\n",
- " agg_func=list\n",
- ")\n",
+ "from analysis import WH_GB_COLS\n",
+ "WH_GB_COLS"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "vs_depth_y_cols = {\n",
+ " 'macrocycle_depth': list,\n",
+ " 'success_probability_mean': list,\n",
+ " 'success_probability_std': list,\n",
+ " 'job_finished_time': 'last',\n",
+ "}\n",
+ "\n",
+ "vs_depth_df = means_df.reset_index(level='macrocycle_depth').groupby(WH_GB_COLS).agg(vs_depth_y_cols)\n",
"vs_depth_df"
]
},
@@ -151,18 +175,18 @@
"metadata": {},
"outputs": [],
"source": [
- "for i, row in vs_depth_df.iterrows():\n",
+ "for i, row in vs_depth_df.reset_index().iterrows():\n",
" plt.errorbar(\n",
" x=row['macrocycle_depth'],\n",
" y=row['success_probability_mean'],\n",
" yerr=row['success_probability_std'],\n",
- " label=', '.join(f'{row[col]}' for col in vs_depth_gb_cols),\n",
+ " label=', '.join(f'{row[col]}' for col in WH_GB_COLS),\n",
" capsize=5, ls='', marker='o',\n",
" )\n",
" \n",
"plt.xlabel('Macrocycle Depth')\n",
"plt.ylabel('Success Probability')\n",
- "plt.legend(title=','.join(vs_depth_gb_cols), loc='best')\n",
+ "plt.legend(title=','.join(WH_GB_COLS), loc='best')\n",
"plt.tight_layout()"
]
},
@@ -197,12 +221,14 @@
"metadata": {},
"outputs": [],
"source": [
- "agg_df3, gb_cols3 = analysis.groupby_all_except(\n",
- " df.drop('n_qubits', axis=1),\n",
- " y_cols=('instance_i', 'macrocycle_depth', 'q_area', 'success_probability'),\n",
- " agg_func=list,\n",
- ")\n",
- "agg_df3"
+ "vs_size_y_cols = {\n",
+ " 'macrocycle_depth': list,\n",
+ " 'success_probability': list,\n",
+ " 'job_finished_time': 'last',\n",
+ " 'n_qubits': analysis.assert_one_unique_val,\n",
+ "}\n",
+ "vs_size_df = df.groupby(WH_GB_COLS).agg(vs_size_y_cols)\n",
+ "vs_size_df"
]
},
{
@@ -230,17 +256,8 @@
"metadata": {},
"outputs": [],
"source": [
- "print(vs_depth_gb_cols)\n",
- "print(gb_cols3)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "total_df = pd.merge(vs_depth_df, fit_df, on=vs_depth_gb_cols)\n",
+ "duplicate_cols = ['job_finished_time']\n",
+ "total_df = fit_df.join(vs_depth_df.drop(duplicate_cols, axis=1))\n",
"total_df"
]
},
@@ -252,7 +269,7 @@
"source": [
"colors = plt.get_cmap('tab10')\n",
"\n",
- "for i, row in total_df.iterrows():\n",
+ "for i, row in total_df.reset_index().iterrows():\n",
" plt.errorbar(\n",
" x=row['macrocycle_depth'],\n",
" y=row['success_probability_mean'],\n",
@@ -281,7 +298,7 @@
"\n",
"In a local depolarizing model, we expect success to decay exponentially in circuit depth and the number of qubits. We define a quantity called quantum area (`q_area`) which is the circuit width (i.e. number of qubits) multiplied by its depth. This is the number of operations in the circuit (also including any idle operations).\n",
"\n",
- "By defining this new quantity, we can fit a curve of fidelity vs. quantum area.. The following cell shows the groupby operation used in `analysis.fit_vs_q_area`."
+ "By defining this new quantity, we can fit a curve of fidelity vs. quantum area. The following cell shows the groupby operation used in `analysis.fit_vs_q_area`."
]
},
{
@@ -290,12 +307,22 @@
"metadata": {},
"outputs": [],
"source": [
- "agg_df4, gb_cols4 = analysis.groupby_all_except(\n",
- " df.drop(['width', 'height'], axis=1),\n",
- " y_cols=('q_area', 'n_qubits', 'instance_i', 'macrocycle_depth', 'success_probability'),\n",
- " agg_func=list,\n",
- ")\n",
- "agg_df4"
+ "from analysis import BASE_GB_COLS\n",
+ "BASE_GB_COLS"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "vs_q_area_y_cols = {\n",
+ " 'q_area': list,\n",
+ " 'success_probability': list,\n",
+ " 'job_finished_time': 'last',\n",
+ "}\n",
+ "df.groupby(BASE_GB_COLS).agg(vs_q_area_y_cols)"
]
},
{
@@ -324,7 +351,9 @@
"outputs": [],
"source": [
"vs_q_area_df, vs_q_area_gb_cols = analysis.agg_vs_q_area(df)\n",
- "total_df2 = pd.merge(vs_q_area_df, fit_df2, on=vs_q_area_gb_cols)\n",
+ "\n",
+ "duplicate_cols = ['job_finished_time']\n",
+ "total_df2 = fit_df2.join(vs_q_area_df.drop(duplicate_cols, axis=1))\n",
"total_df2"
]
},
@@ -336,7 +365,7 @@
"source": [
"colors = plt.get_cmap('tab10')\n",
"\n",
- "for i, row in total_df2.iterrows():\n",
+ "for i, row in total_df2.reset_index().iterrows():\n",
" plt.errorbar(x=row['q_area'], \n",
" y=row['success_probability_mean'], \n",
" yerr=row['success_probability_std'],\n",
@@ -349,7 +378,7 @@
"\n",
"plt.legend(loc='best')\n",
"plt.xlabel('Quantum Area')\n",
- "plt.ylabel('Macrocycle Fidelity')\n",
+ "plt.ylabel('Success Probability')\n",
"plt.yscale('log')\n",
"plt.tight_layout()"
]
diff --git a/recirq/otoc/loschmidt/tilted_square_lattice/analysis.py b/recirq/otoc/loschmidt/tilted_square_lattice/analysis.py
index 68811cdd..a2ed235e 100644
--- a/recirq/otoc/loschmidt/tilted_square_lattice/analysis.py
+++ b/recirq/otoc/loschmidt/tilted_square_lattice/analysis.py
@@ -16,19 +16,31 @@
See also the notebooks in this directory demonstrating usage of these analysis routines.
"""
-
-from typing import Callable, Dict, cast, Sequence, Any, Tuple, List
+import datetime
+from typing import Callable, Dict, cast, Sequence, Any, Tuple, List, TypeVar, Iterable
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import cirq
-from cirq_google.workflow import (
- ExecutableGroupResult, SharedRuntimeInfo,
- QuantumRuntimeConfiguration,
- ExecutableResult)
-from recirq.otoc.loschmidt.tilted_square_lattice import TiltedSquareLatticeLoschmidtSpec
+import cirq_google as cg
+
+try:
+ from cirq_google.workflow import (
+ ExecutableGroupResult, SharedRuntimeInfo,
+ QuantumRuntimeConfiguration,
+ ExecutableResult)
+ from recirq.otoc.loschmidt.tilted_square_lattice import TiltedSquareLatticeLoschmidtSpec
+
+ workflow = True
+except ImportError as e:
+ import os
+
+ if 'RECIRQ_IMPORT_FAILSAFE' in os.environ:
+ workflow = False
+ else:
+ raise ImportError(f"This functionality requires a Cirq >= 0.14: {e}")
CYCLES_PER_MACROCYCLE = 4
"""Each macrocycle has 4 'cycles' for the four directions in the tilted square lattice."""
@@ -36,6 +48,18 @@
U_APPLICATION_COUNT = 2
"""In the echo, we apply the random circuit forwards and backwards, for two total applications."""
+BASE_GB_COLS = ['run_id', 'processor_str', 'n_repetitions']
+"""Basic grouping of runs."""
+
+WH_GB_COLS = BASE_GB_COLS + ['width', 'height']
+"""Additionally group by the width and height of the rectangle of qubits."""
+
+A_GB_COLS = BASE_GB_COLS + ['q_area']
+"""Additionally group by the quantum area (n_qubits*depth)."""
+
+WHD_GB_COLS = WH_GB_COLS + ['macrocycle_depth']
+"""Additionally group by width, height, and depth."""
+
def to_ground_state_prob(result: cirq.Result) -> float:
"""Compute the fraction of times we return to the state we started from.
@@ -49,6 +73,23 @@ def to_ground_state_prob(result: cirq.Result) -> float:
return np.mean(np.sum(result.measurements["z"], axis=1) == 0).item()
+T = TypeVar('T')
+
+
+def assert_one_unique_val(vals: Iterable[T]) -> T:
+ """Extract one unique value from a column.
+
+ Raises `AssertionError` if there is not exactly one unique value.
+
+ Can be used during groupby aggregation to preserve a column you expect to
+ have one consistent value in a given group.
+ """
+ vals = list(set(vals))
+ if len(vals) != 1:
+ raise AssertionError("Expected one unique value")
+ return vals[0]
+
+
def groupby_all_except(df: pd.DataFrame, *, y_cols: Sequence[Any], agg_func: Any) \
-> Tuple[pd.DataFrame, List[str]]:
"""Group by all columns except the named columns.
@@ -86,15 +127,15 @@ def groupby_all_except(df: pd.DataFrame, *, y_cols: Sequence[Any], agg_func: Any
def _results_to_dataframe(
- results: ExecutableGroupResult,
- func: Callable[[ExecutableResult, QuantumRuntimeConfiguration, SharedRuntimeInfo], Dict]
+ results: 'ExecutableGroupResult',
+ func: Callable[['ExecutableResult', 'QuantumRuntimeConfiguration', 'SharedRuntimeInfo'], Dict]
) -> pd.DataFrame:
"""Call a function on each result in an `ExecutableGroupResult` to construct a DataFrame."""
return pd.DataFrame([func(result, results.runtime_configuration, results.shared_runtime_info)
for result in results.executable_results])
-def loschmidt_results_to_dataframe(results: ExecutableGroupResult) -> pd.DataFrame:
+def loschmidt_results_to_dataframe(results: 'ExecutableGroupResult') -> pd.DataFrame:
"""Process an `ExecutableGroupResult`.
This function performs the data analysis using `to_ground_state_prob` and
@@ -109,13 +150,13 @@ def loschmidt_results_to_dataframe(results: ExecutableGroupResult) -> pd.DataFra
we do U and/or its inverse.
"""
- def _to_record(result: ExecutableResult,
- rt_config: QuantumRuntimeConfiguration,
- shared_rt_info: SharedRuntimeInfo) -> Dict:
+ def _to_record(result: 'ExecutableResult',
+ rt_config: 'QuantumRuntimeConfiguration',
+ shared_rt_info: 'SharedRuntimeInfo') -> Dict:
success_prob = to_ground_state_prob(result.raw_data)
- spec = cast(TiltedSquareLatticeLoschmidtSpec, result.spec)
+ spec = cast('TiltedSquareLatticeLoschmidtSpec', result.spec)
- return {
+ record = {
'run_id': shared_rt_info.run_id,
'width': spec.topology.width,
'height': spec.topology.height,
@@ -130,6 +171,13 @@ def _to_record(result: ExecutableResult,
'processor_str': str(rt_config.processor_record),
}
+ if isinstance(result.raw_data, cg.EngineResult):
+ record['job_finished_time'] = result.raw_data.job_finished_time
+ else:
+ record['job_finished_time'] = datetime.datetime.fromtimestamp(0)
+
+ return record
+
return _results_to_dataframe(results, _to_record)
@@ -152,17 +200,29 @@ def agg_vs_macrocycle_depth(df: pd.DataFrame) -> Tuple[pd.DataFrame, List[str]]:
vs_depth_df: A new, aggregated dataframe.
vs_depth_gb_cols: The named of the columns used in the final groupby operation.
"""
- means_df, means_gb_cols = groupby_all_except(
- df.drop(['n_qubits', 'q_area'], axis=1),
- y_cols=('instance_i', 'success_probability'),
- agg_func={'success_probability': ['mean', 'std']}
- )
- vs_depth_df, vs_depth_gb_cols = groupby_all_except(
- means_df,
- y_cols=('macrocycle_depth', 'success_probability_mean', 'success_probability_std'),
- agg_func=list
- )
- return vs_depth_df, vs_depth_gb_cols
+
+ # 1. Average over random circuit instances.
+ means_y_cols = {
+ 'success_probability_mean': ('success_probability', 'mean'),
+ 'success_probability_std': ('success_probability', 'std'),
+ 'job_finished_time': ('job_finished_time', 'last'),
+ }
+ means_df = df.groupby(WHD_GB_COLS).agg(**means_y_cols)
+
+ # 2. Group these averaged quantities into lists.
+ # (a) first "ungroup" macrocycle_depth
+ means_df = means_df.reset_index('macrocycle_depth')
+
+ # (b) now do the list aggregation.
+ vs_depth_y_cols = {
+ 'macrocycle_depth': list,
+ 'success_probability_mean': list,
+ 'success_probability_std': list,
+ 'job_finished_time': 'last',
+ }
+
+ vs_depth_df = means_df.groupby(WH_GB_COLS).agg(vs_depth_y_cols)
+ return vs_depth_df, WH_GB_COLS
def fit_vs_macrocycle_depth(df):
@@ -172,7 +232,7 @@ def fit_vs_macrocycle_depth(df):
df: The dataframe from `loschmidt_results_to_dataframe`.
Returns:
- fitted_df: A new dataframe containing fit parameters.
+ fit_df: A new dataframe containing fit parameters.
exp_ansatz_vs_macrocycle_depth: The function used for the fit. This is
a * f^depth. The depth in this expression is the number of macrocycles
multiplied by four (to give the number of cycles) and multiplied by two
@@ -196,15 +256,17 @@ def _fit(row):
row['f_err'] = f_err
return row
- y_cols = ['instance_i', 'macrocycle_depth', 'q_area', 'success_probability']
- agged, _ = groupby_all_except(
- df,
- y_cols=y_cols,
- agg_func=list,
- )
- fitted_df = agged.apply(_fit, axis=1) \
- .drop(y_cols, axis=1)
- return fitted_df, exp_ansatz_vs_macrocycle_depth
+ vs_size_y_cols = {
+ 'macrocycle_depth': list,
+ 'success_probability': list,
+ 'job_finished_time': 'last',
+ 'n_qubits': assert_one_unique_val,
+ }
+ vs_size_df = df.groupby(WH_GB_COLS).agg(vs_size_y_cols)
+
+ fit_df_y_cols = ['job_finished_time', 'n_qubits', 'a', 'f', 'a_err', 'f_err']
+ fit_df = vs_size_df.apply(_fit, axis=1).loc[:, fit_df_y_cols]
+ return fit_df, exp_ansatz_vs_macrocycle_depth
def agg_vs_q_area(df: pd.DataFrame) -> Tuple[pd.DataFrame, List[str]]:
@@ -228,18 +290,22 @@ def agg_vs_q_area(df: pd.DataFrame) -> Tuple[pd.DataFrame, List[str]]:
vs_q_area_df: A new, aggregated dataframe.
vs_q_area_gb_cols: The named of the columns used in the final groupby operation.
"""
- means_df, means_gb_cols = groupby_all_except(
- df.drop(['width', 'height', 'n_qubits'], axis=1),
- y_cols=('instance_i', 'success_probability'),
- agg_func={'success_probability': ['mean', 'std']}
- )
- vs_q_area_df, vs_q_area_gb_cols = groupby_all_except(
- means_df,
- y_cols=('q_area', 'macrocycle_depth',
- 'success_probability_mean', 'success_probability_std'),
- agg_func=list,
- )
- return vs_q_area_df, vs_q_area_gb_cols
+ means_y_cols = {
+ 'success_probability_mean': ('success_probability', 'mean'),
+ 'success_probability_std': ('success_probability', 'std'),
+ 'job_finished_time': ('job_finished_time', 'last'),
+ }
+ means_df = df.groupby(A_GB_COLS).agg(**means_y_cols)
+
+ means_df = means_df.reset_index('q_area')
+ vs_q_area_y_cols = {
+ 'q_area': list,
+ 'success_probability_mean': list,
+ 'success_probability_std': list,
+ 'job_finished_time': 'last',
+ }
+ vs_q_area_df = means_df.groupby(BASE_GB_COLS).agg(vs_q_area_y_cols)
+ return vs_q_area_df, BASE_GB_COLS
def fit_vs_q_area(df):
@@ -269,12 +335,13 @@ def _fit(row):
row['f_err'] = f_err
return row
- y_cols = ['q_area', 'n_qubits', 'instance_i', 'macrocycle_depth', 'success_probability']
- agged, _ = groupby_all_except(
- df.drop(['width', 'height'], axis=1),
- y_cols=y_cols,
- agg_func=list,
- )
- fit_df = agged.apply(_fit, axis=1).drop(y_cols, axis=1)
+ vs_q_area_y_cols = {
+ 'q_area': list,
+ 'success_probability': list,
+ 'job_finished_time': 'last',
+ }
+ vs_q_area_df = df.groupby(BASE_GB_COLS).agg(vs_q_area_y_cols)
+ fit_df_y_cols = ['job_finished_time', 'a', 'f', 'a_err', 'f_err']
+ fit_df = vs_q_area_df.apply(_fit, axis=1).loc[:, fit_df_y_cols]
return fit_df, exp_ansatz_vs_q_area
diff --git a/recirq/otoc/loschmidt/tilted_square_lattice/analysis_test.py b/recirq/otoc/loschmidt/tilted_square_lattice/analysis_test.py
new file mode 100644
index 00000000..09ae62ae
--- /dev/null
+++ b/recirq/otoc/loschmidt/tilted_square_lattice/analysis_test.py
@@ -0,0 +1,189 @@
+# Copyright 2022 Google
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pandas as pd
+import pytest
+
+import recirq.otoc.loschmidt.tilted_square_lattice.analysis as analysis
+
+if not analysis.workflow:
+ pytestmark = pytest.mark.skip('Requires Cirq >= 0.14.')
+
+
+@pytest.fixture
+def sample_df():
+ # steps to (re-)generate:
+ # 1. load an example `df` like in `analysis-walkthrough.ipynb`
+ # 2. use print(repr(df.to_dict()))
+ # 3. s/Timestamp/pd.Timestamp/g
+ # 4. add a `pd.DataFrame.from_dict()` around it
+
+ return pd.DataFrame.from_dict(
+ {'run_id': {0: 'simulated-1', 1: 'simulated-1', 2: 'simulated-1', 3: 'simulated-1',
+ 4: 'simulated-1', 5: 'simulated-1', 6: 'simulated-1', 7: 'simulated-1',
+ 8: 'simulated-1', 9: 'simulated-1', 10: 'simulated-1', 11: 'simulated-1',
+ 12: 'simulated-1', 13: 'simulated-1', 14: 'simulated-1', 15: 'simulated-1',
+ 16: 'simulated-1', 17: 'simulated-1', 18: 'simulated-1', 19: 'simulated-1',
+ 20: 'simulated-1', 21: 'simulated-1', 22: 'simulated-1', 23: 'simulated-1',
+ 24: 'simulated-1', 25: 'simulated-1', 26: 'simulated-1', 27: 'simulated-1',
+ 28: 'simulated-1', 29: 'simulated-1', 30: 'simulated-1', 31: 'simulated-1',
+ 32: 'simulated-1', 33: 'simulated-1', 34: 'simulated-1', 35: 'simulated-1',
+ 36: 'simulated-1', 37: 'simulated-1', 38: 'simulated-1', 39: 'simulated-1',
+ 40: 'simulated-1', 41: 'simulated-1', 42: 'simulated-1', 43: 'simulated-1',
+ 44: 'simulated-1'},
+ 'width': {0: 2, 1: 2, 2: 2, 3: 2, 4: 2, 5: 2, 6: 2, 7: 2, 8: 2, 9: 2, 10: 2, 11: 2,
+ 12: 2, 13: 2, 14: 2, 15: 2, 16: 2, 17: 2, 18: 2, 19: 2, 20: 2, 21: 2, 22: 2,
+ 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 3, 31: 3, 32: 3, 33: 3,
+ 34: 3, 35: 3, 36: 3, 37: 3, 38: 3, 39: 3, 40: 3, 41: 3, 42: 3, 43: 3, 44: 3},
+ 'height': {0: 2, 1: 2, 2: 2, 3: 2, 4: 2, 5: 2, 6: 2, 7: 2, 8: 2, 9: 2, 10: 2, 11: 2,
+ 12: 2, 13: 2, 14: 2, 15: 3, 16: 3, 17: 3, 18: 3, 19: 3, 20: 3, 21: 3, 22: 3,
+ 23: 3, 24: 3, 25: 3, 26: 3, 27: 3, 28: 3, 29: 3, 30: 3, 31: 3, 32: 3, 33: 3,
+ 34: 3, 35: 3, 36: 3, 37: 3, 38: 3, 39: 3, 40: 3, 41: 3, 42: 3, 43: 3, 44: 3},
+ 'n_qubits': {0: 5, 1: 5, 2: 5, 3: 5, 4: 5, 5: 5, 6: 5, 7: 5, 8: 5, 9: 5, 10: 5, 11: 5,
+ 12: 5, 13: 5, 14: 5, 15: 6, 16: 6, 17: 6, 18: 6, 19: 6, 20: 6, 21: 6,
+ 22: 6, 23: 6, 24: 6, 25: 6, 26: 6, 27: 6, 28: 6, 29: 6, 30: 8, 31: 8,
+ 32: 8, 33: 8, 34: 8, 35: 8, 36: 8, 37: 8, 38: 8, 39: 8, 40: 8, 41: 8,
+ 42: 8, 43: 8, 44: 8},
+ 'macrocycle_depth': {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 2, 7: 2, 8: 2, 9: 3, 10: 3,
+ 11: 3, 12: 4, 13: 4, 14: 4, 15: 0, 16: 0, 17: 0, 18: 1, 19: 1,
+ 20: 1, 21: 2, 22: 2, 23: 2, 24: 3, 25: 3, 26: 3, 27: 4, 28: 4,
+ 29: 4, 30: 0, 31: 0, 32: 0, 33: 1, 34: 1, 35: 1, 36: 2, 37: 2,
+ 38: 2, 39: 3, 40: 3, 41: 3, 42: 4, 43: 4, 44: 4},
+ 'q_area': {0: 0, 1: 0, 2: 0, 3: 40, 4: 40, 5: 40, 6: 80, 7: 80, 8: 80, 9: 120, 10: 120,
+ 11: 120, 12: 160, 13: 160, 14: 160, 15: 0, 16: 0, 17: 0, 18: 48, 19: 48,
+ 20: 48, 21: 96, 22: 96, 23: 96, 24: 144, 25: 144, 26: 144, 27: 192, 28: 192,
+ 29: 192, 30: 0, 31: 0, 32: 0, 33: 64, 34: 64, 35: 64, 36: 128, 37: 128,
+ 38: 128, 39: 192, 40: 192, 41: 192, 42: 256, 43: 256, 44: 256},
+ 'instance_i': {0: 0, 1: 1, 2: 2, 3: 0, 4: 1, 5: 2, 6: 0, 7: 1, 8: 2, 9: 0, 10: 1, 11: 2,
+ 12: 0, 13: 1, 14: 2, 15: 0, 16: 1, 17: 2, 18: 0, 19: 1, 20: 2, 21: 0,
+ 22: 1, 23: 2, 24: 0, 25: 1, 26: 2, 27: 0, 28: 1, 29: 2, 30: 0, 31: 1,
+ 32: 2, 33: 0, 34: 1, 35: 2, 36: 0, 37: 1, 38: 2, 39: 0, 40: 1, 41: 2,
+ 42: 0, 43: 1, 44: 2},
+ 'n_repetitions': {0: 1000, 1: 1000, 2: 1000, 3: 1000, 4: 1000, 5: 1000, 6: 1000,
+ 7: 1000, 8: 1000, 9: 1000, 10: 1000, 11: 1000, 12: 1000, 13: 1000,
+ 14: 1000, 15: 1000, 16: 1000, 17: 1000, 18: 1000, 19: 1000, 20: 1000,
+ 21: 1000, 22: 1000, 23: 1000, 24: 1000, 25: 1000, 26: 1000, 27: 1000,
+ 28: 1000, 29: 1000, 30: 1000, 31: 1000, 32: 1000, 33: 1000, 34: 1000,
+ 35: 1000, 36: 1000, 37: 1000, 38: 1000, 39: 1000, 40: 1000, 41: 1000,
+ 42: 1000, 43: 1000, 44: 1000},
+ 'success_probability': {0: 0.959, 1: 0.945, 2: 0.947, 3: 0.703, 4: 0.692, 5: 0.697,
+ 6: 0.488, 7: 0.508, 8: 0.493, 9: 0.368, 10: 0.367, 11: 0.387,
+ 12: 0.258, 13: 0.255, 14: 0.256, 15: 0.936, 16: 0.94, 17: 0.938,
+ 18: 0.657, 19: 0.624, 20: 0.641, 21: 0.454, 22: 0.45, 23: 0.452,
+ 24: 0.284, 25: 0.291, 26: 0.305, 27: 0.19, 28: 0.182, 29: 0.178,
+ 30: 0.924, 31: 0.929, 32: 0.92, 33: 0.564, 34: 0.529, 35: 0.555,
+ 36: 0.306, 37: 0.296, 38: 0.319, 39: 0.194, 40: 0.172,
+ 41: 0.192, 42: 0.098, 43: 0.097, 44: 0.105},
+ 'processor_str': {0: 'rainbow-depol(5.000e-03)', 1: 'rainbow-depol(5.000e-03)',
+ 2: 'rainbow-depol(5.000e-03)', 3: 'rainbow-depol(5.000e-03)',
+ 4: 'rainbow-depol(5.000e-03)', 5: 'rainbow-depol(5.000e-03)',
+ 6: 'rainbow-depol(5.000e-03)', 7: 'rainbow-depol(5.000e-03)',
+ 8: 'rainbow-depol(5.000e-03)', 9: 'rainbow-depol(5.000e-03)',
+ 10: 'rainbow-depol(5.000e-03)', 11: 'rainbow-depol(5.000e-03)',
+ 12: 'rainbow-depol(5.000e-03)', 13: 'rainbow-depol(5.000e-03)',
+ 14: 'rainbow-depol(5.000e-03)', 15: 'rainbow-depol(5.000e-03)',
+ 16: 'rainbow-depol(5.000e-03)', 17: 'rainbow-depol(5.000e-03)',
+ 18: 'rainbow-depol(5.000e-03)', 19: 'rainbow-depol(5.000e-03)',
+ 20: 'rainbow-depol(5.000e-03)', 21: 'rainbow-depol(5.000e-03)',
+ 22: 'rainbow-depol(5.000e-03)', 23: 'rainbow-depol(5.000e-03)',
+ 24: 'rainbow-depol(5.000e-03)', 25: 'rainbow-depol(5.000e-03)',
+ 26: 'rainbow-depol(5.000e-03)', 27: 'rainbow-depol(5.000e-03)',
+ 28: 'rainbow-depol(5.000e-03)', 29: 'rainbow-depol(5.000e-03)',
+ 30: 'rainbow-depol(5.000e-03)', 31: 'rainbow-depol(5.000e-03)',
+ 32: 'rainbow-depol(5.000e-03)', 33: 'rainbow-depol(5.000e-03)',
+ 34: 'rainbow-depol(5.000e-03)', 35: 'rainbow-depol(5.000e-03)',
+ 36: 'rainbow-depol(5.000e-03)', 37: 'rainbow-depol(5.000e-03)',
+ 38: 'rainbow-depol(5.000e-03)', 39: 'rainbow-depol(5.000e-03)',
+ 40: 'rainbow-depol(5.000e-03)', 41: 'rainbow-depol(5.000e-03)',
+ 42: 'rainbow-depol(5.000e-03)', 43: 'rainbow-depol(5.000e-03)',
+ 44: 'rainbow-depol(5.000e-03)'},
+ 'job_finished_time': {0: pd.Timestamp('1969-12-31 16:00:00'),
+ 1: pd.Timestamp('1969-12-31 16:00:00'),
+ 2: pd.Timestamp('1969-12-31 16:00:00'),
+ 3: pd.Timestamp('1969-12-31 16:00:00'),
+ 4: pd.Timestamp('1969-12-31 16:00:00'),
+ 5: pd.Timestamp('1969-12-31 16:00:00'),
+ 6: pd.Timestamp('1969-12-31 16:00:00'),
+ 7: pd.Timestamp('1969-12-31 16:00:00'),
+ 8: pd.Timestamp('1969-12-31 16:00:00'),
+ 9: pd.Timestamp('1969-12-31 16:00:00'),
+ 10: pd.Timestamp('1969-12-31 16:00:00'),
+ 11: pd.Timestamp('1969-12-31 16:00:00'),
+ 12: pd.Timestamp('1969-12-31 16:00:00'),
+ 13: pd.Timestamp('1969-12-31 16:00:00'),
+ 14: pd.Timestamp('1969-12-31 16:00:00'),
+ 15: pd.Timestamp('1969-12-31 16:00:00'),
+ 16: pd.Timestamp('1969-12-31 16:00:00'),
+ 17: pd.Timestamp('1969-12-31 16:00:00'),
+ 18: pd.Timestamp('1969-12-31 16:00:00'),
+ 19: pd.Timestamp('1969-12-31 16:00:00'),
+ 20: pd.Timestamp('1969-12-31 16:00:00'),
+ 21: pd.Timestamp('1969-12-31 16:00:00'),
+ 22: pd.Timestamp('1969-12-31 16:00:00'),
+ 23: pd.Timestamp('1969-12-31 16:00:00'),
+ 24: pd.Timestamp('1969-12-31 16:00:00'),
+ 25: pd.Timestamp('1969-12-31 16:00:00'),
+ 26: pd.Timestamp('1969-12-31 16:00:00'),
+ 27: pd.Timestamp('1969-12-31 16:00:00'),
+ 28: pd.Timestamp('1969-12-31 16:00:00'),
+ 29: pd.Timestamp('1969-12-31 16:00:00'),
+ 30: pd.Timestamp('1969-12-31 16:00:00'),
+ 31: pd.Timestamp('1969-12-31 16:00:00'),
+ 32: pd.Timestamp('1969-12-31 16:00:00'),
+ 33: pd.Timestamp('1969-12-31 16:00:00'),
+ 34: pd.Timestamp('1969-12-31 16:00:00'),
+ 35: pd.Timestamp('1969-12-31 16:00:00'),
+ 36: pd.Timestamp('1969-12-31 16:00:00'),
+ 37: pd.Timestamp('1969-12-31 16:00:00'),
+ 38: pd.Timestamp('1969-12-31 16:00:00'),
+ 39: pd.Timestamp('1969-12-31 16:00:00'),
+ 40: pd.Timestamp('1969-12-31 16:00:00'),
+ 41: pd.Timestamp('1969-12-31 16:00:00'),
+ 42: pd.Timestamp('1969-12-31 16:00:00'),
+ 43: pd.Timestamp('1969-12-31 16:00:00'),
+ 44: pd.Timestamp('1969-12-31 16:00:00')}})
+
+
+def test_agg_vs_macrocycle_depth(sample_df):
+ vs_depth_df, gb_cols = analysis.agg_vs_macrocycle_depth(sample_df)
+ assert 'width' in gb_cols
+ assert 'height' in gb_cols
+ assert 'macrocycle_depth' not in gb_cols
+ assert vs_depth_df.index.names == gb_cols
+ assert sorted(vs_depth_df.columns) == sorted(['macrocycle_depth', 'success_probability_mean',
+ 'success_probability_std', 'job_finished_time'])
+
+
+def test_agg_vs_q_area(sample_df):
+ vs_q_area_df, gb_cols = analysis.agg_vs_q_area(sample_df)
+ assert 'run_id' in gb_cols
+ assert 'q_area' not in gb_cols
+ assert vs_q_area_df.index.names == gb_cols
+ assert sorted(vs_q_area_df.columns) == sorted(['q_area', 'success_probability_mean',
+ 'success_probability_std', 'job_finished_time'])
+
+
+def test_fit_vs_macrocycle_depth(sample_df):
+ fit_df, exp_ansatz = analysis.fit_vs_macrocycle_depth(sample_df)
+ assert len(fit_df) == 3, '3 different topo shapes in sample df'
+ assert sorted(fit_df['n_qubits']) == [5, 6, 8]
+ for c in ['a', 'f', 'a_err', 'f_err']:
+ assert c in fit_df.columns, c
+
+
+def test_fit_vs_q_area(sample_df):
+ fit_df, exp_ansatz = analysis.fit_vs_q_area(sample_df)
+ assert len(fit_df) == 1, '1 processor/run'
+ for c in ['a', 'f', 'a_err', 'f_err']:
+ assert c in fit_df.columns, c
diff --git a/recirq/otoc/loschmidt/tilted_square_lattice/plots.ipynb b/recirq/otoc/loschmidt/tilted_square_lattice/plots.ipynb
index 7fa9ab33..e3480ca1 100644
--- a/recirq/otoc/loschmidt/tilted_square_lattice/plots.ipynb
+++ b/recirq/otoc/loschmidt/tilted_square_lattice/plots.ipynb
@@ -101,7 +101,7 @@
"source": [
"colors = plt.get_cmap('tab10')\n",
"\n",
- "for i, row in total_df.iterrows():\n",
+ "for i, row in total_df.reset_index().iterrows():\n",
" plt.errorbar(\n",
" x=row['macrocycle_depth'],\n",
" y=row['success_probability_mean'],\n",
@@ -150,7 +150,7 @@
"source": [
"colors = plt.get_cmap('tab10')\n",
"\n",
- "for i, row in total_df2.iterrows():\n",
+ "for i, row in total_df2.reset_index().iterrows():\n",
" plt.errorbar(x=row['q_area'], \n",
" y=row['success_probability_mean'], \n",
" yerr=row['success_probability_std'],\n",