From c347b2a277cb8d637ad6179065fee3de09491899 Mon Sep 17 00:00:00 2001 From: CalMacCQ <93673602+CalMacCQ@users.noreply.github.com> Date: Fri, 25 Oct 2024 12:03:28 +0100 Subject: [PATCH] fix accidental changes to ibm.py --- pytket/extensions/qiskit/backends/ibm.py | 86 +++++++++++++----------- 1 file changed, 45 insertions(+), 41 deletions(-) diff --git a/pytket/extensions/qiskit/backends/ibm.py b/pytket/extensions/qiskit/backends/ibm.py index e65b89f2..c21df162 100644 --- a/pytket/extensions/qiskit/backends/ibm.py +++ b/pytket/extensions/qiskit/backends/ibm.py @@ -15,7 +15,7 @@ import itertools import json from ast import literal_eval -from collections import Counter +from collections import Counter, OrderedDict from collections.abc import Sequence from time import sleep from typing import ( @@ -75,14 +75,17 @@ from pytket.utils import prepare_circuit from pytket.utils.outcomearray import OutcomeArray from pytket.utils.results import KwargTypes -from qiskit.primitives import PrimitiveResult, SamplerResult # type: ignore +from qiskit.primitives import ( # type: ignore + BitArray, + DataBin, + PrimitiveResult, + SamplerPubResult, +) # RuntimeJob has no queue_position attribute, which is referenced # via job_monitor see-> https://github.com/CQCL/pytket-qiskit/issues/48 # therefore we can't use job_monitor until fixed # from qiskit.tools.monitor import job_monitor # type: ignore -from qiskit.result.distributions import QuasiDistribution # type: ignore - from .._metadata import __extension_version__ from ..qiskit_convert import ( _tk_gate_set, @@ -99,14 +102,10 @@ _DEBUG_HANDLE_PREFIX = "_MACHINE_DEBUG_" -def _gen_debug_results(n_qubits: int, shots: int, index: int) -> SamplerResult: - debug_dist = {n: 0.0 for n in range(pow(2, n_qubits))} - debug_dist[0] = 1.0 - qd = QuasiDistribution(debug_dist) - return SamplerResult( - quasi_dists=[qd] * (index + 1), - metadata=[{"header_metadata": {}, "shots": shots}] * (index + 1), - ) +def _gen_debug_results(n_bits: int, shots: int) -> PrimitiveResult: + n_u8s = (n_bits - 1) // 8 + 1 + arr = np.array([[0] * n_u8s for _ in range(shots)], dtype=np.uint8) + return PrimitiveResult([SamplerPubResult(DataBin(c=BitArray(arr, n_bits)))]) class NoIBMQCredentialsError(Exception): @@ -205,7 +204,9 @@ def __init__( self._monitor = monitor # cache of results keyed by job id and circuit index - self._ibm_res_cache: dict[tuple[str, int], Counter] = dict() + self._ibm_res_cache: dict[ + tuple[str, int], tuple[Counter, Optional[list[Bit]]] + ] = dict() if sampler_options is None: sampler_options = SamplerOptions() @@ -396,7 +397,7 @@ def default_compilation_pass_offline( passlist = [DecomposeBoxes()] # If you make changes to the default_compilation_pass, # then please update this page accordingly - # https://docs.quantinuum.com/tket/extensions/pytket-qiskit/index.html#default-compilation + # https://tket.quantinuum.com/extensions/pytket-qiskit/index.html#default-compilation # Edit this docs source file -> pytket-qiskit/docs/intro.txt if optimisation_level == 0: if supports_rz: @@ -456,7 +457,7 @@ def default_compilation_pass_offline( @property def _result_id_type(self) -> _ResultIdTuple: - # IBMQ job ID, index, number of measurements per shot, post-processing circuit + # IBMQ job ID, index, number of bits, post-processing circuit return (str, int, int, str) def rebase_pass(self) -> BasePass: @@ -521,14 +522,11 @@ def process_circuits( qcs, ppcirc_strs = [], [] for tkc in batch_chunk: - tkc1 = tkc.copy() - # Flatten bits to default register in lexicographic order: - tkc1.rename_units({bit: Bit(i) for i, bit in enumerate(tkc1.bits)}) if postprocess: - c0, ppcirc = prepare_circuit(tkc1, allow_classical=False) + c0, ppcirc = prepare_circuit(tkc, allow_classical=False) ppcirc_rep = ppcirc.to_dict() else: - c0, ppcirc_rep = tkc1, None + c0, ppcirc_rep = tkc, None if simplify_initial: SimplifyInitial( allow_classical=False, create_all_qubits=True @@ -540,7 +538,7 @@ def process_circuits( handle_list[ind] = ResultHandle( _DEBUG_HANDLE_PREFIX + str((n_shots, batch_id)), i, - batch_chunk[i].n_qubits, + batch_chunk[i].n_bits, ppcirc_strs[i], ) else: @@ -549,7 +547,7 @@ def process_circuits( job_id = job.job_id() for i, ind in enumerate(indices_chunk): handle_list[ind] = ResultHandle( - job_id, i, qcs[i].count_ops()["measure"], ppcirc_strs[i] + job_id, i, qcs[i].num_clbits, ppcirc_strs[i] ) batch_id += 1 # noqa: SIM113 for handle in handle_list: @@ -586,7 +584,7 @@ def get_result(self, handle: ResultHandle, **kwargs: KwargTypes) -> BackendResul cached_result = self._cache[handle] if "result" in cached_result: return cast(BackendResult, cached_result["result"]) - jobid, index, n_meas, ppcirc_str = handle + jobid, index, n_bits, ppcirc_str = handle ppcirc_rep = json.loads(ppcirc_str) ppcirc = Circuit.from_dict(ppcirc_rep) if ppcirc_rep is not None else None cache_key = (jobid, index) @@ -594,7 +592,7 @@ def get_result(self, handle: ResultHandle, **kwargs: KwargTypes) -> BackendResul if self._MACHINE_DEBUG or jobid.startswith(_DEBUG_HANDLE_PREFIX): shots: int shots, _ = literal_eval(jobid[len(_DEBUG_HANDLE_PREFIX) :]) - res = _gen_debug_results(n_meas, shots, index) + res = _gen_debug_results(n_bits, shots) else: try: job = self._retrieve_job(jobid) @@ -612,32 +610,38 @@ def get_result(self, handle: ResultHandle, **kwargs: KwargTypes) -> BackendResul sleep(10) res = job.result(timeout=kwargs.get("timeout")) - if isinstance(res, SamplerResult): - # TODO Is this code still reachable? - for circ_index, (r, d) in enumerate(zip(res.quasi_dists, res.metadata)): - self._ibm_res_cache[(jobid, circ_index)] = Counter( - {n: int(0.5 + d["shots"] * p) for n, p in r.items()} - ) - else: - assert isinstance(res, PrimitiveResult) - for circ_index, pub_result in enumerate(res._pub_results): - readouts = pub_result.data.c.array - self._ibm_res_cache[(jobid, circ_index)] = Counter( - _int_from_readout(readout) for readout in readouts - ) - - counts = self._ibm_res_cache[cache_key] # Counter[int] + assert isinstance(res, PrimitiveResult) + for circ_index, pub_result in enumerate(res._pub_results): + data = pub_result.data + c_regs = OrderedDict( + (reg_name, data.__getattribute__(reg_name).num_bits) + for reg_name in sorted(data.keys()) + ) + readouts = BitArray.concatenate_bits( + [data.__getattribute__(reg_name) for reg_name in c_regs] + ).array + self._ibm_res_cache[(jobid, circ_index)] = ( + Counter(_int_from_readout(readout) for readout in readouts), + list( + itertools.chain.from_iterable( + [Bit(reg_name, i) for i in range(reg_size)] + for reg_name, reg_size in c_regs.items() + ) + ), + ) + + counts, c_bits = self._ibm_res_cache[cache_key] # Counter[int], list[Bit] # Convert to `OutcomeArray`: tket_counts: Counter = Counter() for outcome_key, sample_count in counts.items(): array = OutcomeArray.from_ints( ints=[outcome_key], - width=n_meas, + width=n_bits, big_endian=False, ) tket_counts[array] = sample_count # Convert to `BackendResult`: - result = BackendResult(counts=tket_counts, ppcirc=ppcirc) + result = BackendResult(c_bits=c_bits, counts=tket_counts, ppcirc=ppcirc) self._cache[handle] = {"result": result} return result