diff --git a/examples/ex_11_utility_scale.py b/examples/ex_11_utility_scale.py new file mode 100644 index 00000000..d6c0a4b7 --- /dev/null +++ b/examples/ex_11_utility_scale.py @@ -0,0 +1,156 @@ +import datetime +import json +import os +import typing +import warnings +from pathlib import Path +from typing import Literal + +from benchq.algorithms.time_evolution import qsp_time_evolution_algorithm +from benchq.compilation import get_ruby_slippers_compiler +from benchq.data_structures import DETAILED_ION_TRAP_ARCHITECTURE_MODEL, DecoderModel +from benchq.problem_ingestion.hamiltonian_generation import ( + generate_cubic_hamiltonian, + generate_kitaev_hamiltonian, + generate_triangular_hamiltonian, +) +from benchq.resource_estimation.graph import ( + ExtrapolationResourceEstimator, + create_big_graph_from_subcircuits, + remove_isolated_nodes, + run_custom_extrapolation_pipeline, + transpile_to_native_gates, +) +from benchq.resource_estimation.openfermion_re import get_physical_cost + + +def get_resources(lattice_type: str, size: int, decoder_data_file: str): + print(f"Getting operator for size {size} {lattice_type} lattice...") + if lattice_type == "triangular": + operator = generate_triangular_hamiltonian(size) + elif lattice_type == "kitaev": + operator = generate_kitaev_hamiltonian(size) + elif lattice_type == "cubic": + operator = generate_cubic_hamiltonian(size) + else: + raise ValueError(f"Lattice type {lattice_type} not supported") + + architecture_model = DETAILED_ION_TRAP_ARCHITECTURE_MODEL + + print("Getting algorithm implementation...") + evolution_time = 1 + failure_tolerance = 1e-4 + algorithm_implementation = qsp_time_evolution_algorithm( + operator, evolution_time, failure_tolerance + ) + + print("Setting resource estimation parameters...") + decoder_model = DecoderModel.from_csv(decoder_data_file) + my_estimator = ExtrapolationResourceEstimator( + architecture_model, + [2, 4, 6, 8, 10], + n_measurement_steps_fit_type="logarithmic", + optimization="space", + decoder_model=decoder_model, + ) + + # select teleportation threshold to tune number of logical qubits + if lattice_type == "triangular": + gpm = get_ruby_slippers_compiler(teleportation_threshold=70) + elif lattice_type == "kitaev": + gpm = get_ruby_slippers_compiler(teleportation_threshold=60) + elif lattice_type == "cubic": + gpm = get_ruby_slippers_compiler(teleportation_threshold=70) + else: + raise ValueError(f"Lattice type {lattice_type} not supported") + + print("Estimating resources via graph state compilation...") + gsc_resources = run_custom_extrapolation_pipeline( + algorithm_implementation, + my_estimator, + transformers=[ + transpile_to_native_gates, + create_big_graph_from_subcircuits(gpm), + remove_isolated_nodes, + ], + ) + + total_t_gates = my_estimator.get_n_total_t_gates( + gsc_resources.extra.n_t_gates, + gsc_resources.extra.n_rotation_gates, + algorithm_implementation.error_budget.transpilation_failure_tolerance, + ) + + footprint_resources = get_physical_cost( + algorithm_implementation.program.num_data_qubits, + num_t=total_t_gates, + architecture_model=my_estimator.hw_model, + hardware_failure_tolerance=algorithm_implementation.error_budget.hardware_failure_tolerance, + decoder_model=decoder_model, + ) + return gsc_resources, footprint_resources + + +def save_to_file(gsc_resources, footprint_resources, lattice_type, path: str): + results_folder = path + + with open(results_folder + lattice_type + "_gsc_re_data.json", "w") as outfile: + json.dump(gsc_resources, outfile, indent=4, sort_keys=True, default=str) + with open( + results_folder + lattice_type + "_footprint_re_data.json", "w" + ) as outfile: + json.dump(footprint_resources, outfile, indent=4, sort_keys=True, default=str) + + +def main( + decoder_data_file: str, + save_results: bool, + lattice_type: Literal["triangular", "kitaev", "cubic"], + size: int, + path_to_save_results: typing.Optional[str] = None, +): + gsc_estimates, footprint_estimates = get_resources( + lattice_type, size, decoder_data_file + ) + + if save_results: + if path_to_save_results is None: + warnings.warn("Path is required to save the results.") + else: + save_to_file( + gsc_estimates, footprint_estimates, lattice_type, path_to_save_results + ) + + return gsc_estimates, footprint_estimates + + +if __name__ == "__main__": + warnings.warn( + "These utility scale estimates take a lot of time to calculate." + "It can take up to a day for single example to finish calculation." + ) + + decoder_data = "data/sample_decoder_data.csv" + save_results = False + path_to_save_results = "." + + utiliy_scale_problems: typing.Dict[ + Literal["triangular", "kitaev", "cubic"], int + ] = {"triangular": 30, "kitaev": 22, "cubic": 10} + + lattice_type: Literal["triangular", "kitaev", "cubic"] + + lattice_type = "triangular" + # lattice_type = "kitaev" + # lattice_type = "cubic" + + gsc_estimates, footprint_estimates = main( + decoder_data, + save_results, + lattice_type, + utiliy_scale_problems[lattice_type], + path_to_save_results, + ) + + print(gsc_estimates) + print(footprint_estimates) diff --git a/examples/ex_4_extrapolation.py b/examples/ex_4_extrapolation.py deleted file mode 100644 index caa76840..00000000 --- a/examples/ex_4_extrapolation.py +++ /dev/null @@ -1,80 +0,0 @@ -################################################################################ -# © Copyright 2022-2023 Zapata Computing Inc. -################################################################################ -""" -In this example we show how to deal with the case where the problem is too large to be -compiled to a graph. We use the extrapolation technique to estimate resources -for running time evolution for H2 molecule. -Number of block encodings needed to run the algorithm is too high, so we -estimate resources need for running similar circuit with 1, 2 and 3 block encodings -and then we extrapolate the results to estimate resources for full problem. -WARNING: This example requires the pyscf extra. run `pip install benchq[pyscf]` -to install the extra. -""" - -from pathlib import Path -from pprint import pprint - -from benchq.algorithms.time_evolution import qsp_time_evolution_algorithm -from benchq.data_structures import BASIC_SC_ARCHITECTURE_MODEL, DecoderModel -from benchq.problem_ingestion import ( - generate_jw_qubit_hamiltonian_from_mol_data, - get_vlasov_hamiltonian, -) -from benchq.problem_ingestion.molecule_instance_generation import ( - generate_hydrogen_chain_instance, -) -from benchq.resource_estimation.graph import ( - ExtrapolationResourceEstimator, - create_big_graph_from_subcircuits, - run_custom_extrapolation_pipeline, - transpile_to_native_gates, -) -from benchq.timing import measure_time - - -def main(use_hydrogen=True): - evolution_time = 5.0 - architecture_model = BASIC_SC_ARCHITECTURE_MODEL - - steps_to_extrapolate_from = [1, 2, 3] - - # Load some dummy decoder data for now. Replace with your own decoder data. - decoder_file_path = str(Path(__file__).parent / "data" / "sample_decoder_data.csv") - decoder_model = DecoderModel.from_csv(decoder_file_path) - - with measure_time() as t_info: - N = 2 - if use_hydrogen: - application_instance = generate_hydrogen_chain_instance(N) - operator = generate_jw_qubit_hamiltonian_from_mol_data(application_instance) - else: - operator = get_vlasov_hamiltonian(N=N, k=2.0, alpha=0.6, nu=0) - - print("Operator generation time:", t_info.total) - - with measure_time() as t_info: - algorithm = qsp_time_evolution_algorithm(operator, evolution_time, 1e-3) - print("Circuit generation time:", t_info.total) - - with measure_time() as t_info: - extrapolated_resource_estimates = run_custom_extrapolation_pipeline( - algorithm, - estimator=ExtrapolationResourceEstimator( - architecture_model, - steps_to_extrapolate_from, - decoder_model=decoder_model, - n_measurement_steps_fit_type="linear", - ), - transformers=[ - transpile_to_native_gates, - create_big_graph_from_subcircuits(), - ], - ) - - print("Resource estimation time with GSC:", t_info.total) - pprint(extrapolated_resource_estimates) - - -if __name__ == "__main__": - main() diff --git a/examples/ex_4_fast_graph_estimates.py b/examples/ex_4_fast_graph_estimates.py new file mode 100644 index 00000000..01a4760d --- /dev/null +++ b/examples/ex_4_fast_graph_estimates.py @@ -0,0 +1,55 @@ +################################################################################ +# © Copyright 2022-2023 Zapata Computing Inc. +################################################################################ +""" +In this example we show how to deal with the case where the problem is too large to be +compiled to a graph. We use the extrapolation technique to estimate resources +for running time evolution for H2 molecule. +Number of block encodings needed to run the algorithm is too high, so we +estimate resources need for running similar circuit with 1, 2 and 3 block encodings +and then we extrapolate the results to estimate resources for full problem. +WARNING: This example requires the pyscf extra. run `pip install benchq[pyscf]` +to install the extra. +""" + +from pathlib import Path +from pprint import pprint + +from benchq.algorithms.time_evolution import qsp_time_evolution_algorithm +from benchq.data_structures import DETAILED_ION_TRAP_ARCHITECTURE_MODEL, DecoderModel +from benchq.problem_ingestion.hamiltonian_generation import ( + generate_triangular_hamiltonian, +) +from benchq.resource_estimation.default_pipelines import run_fast_graph_estimate +from benchq.timing import measure_time + + +def main(): + architecture_model = DETAILED_ION_TRAP_ARCHITECTURE_MODEL + + with measure_time() as t_info: + lattice_size = 3 + operator = generate_triangular_hamiltonian(lattice_size) + + print("Operator generation time:", t_info.total) + + with measure_time() as t_info: + evolution_time: float = 1.0 + failure_tolerance: float = 1e-3 + algorithm = qsp_time_evolution_algorithm( + operator, + evolution_time, + failure_tolerance, + ) + + print("Circuit generation time:", t_info.total) + + with measure_time() as t_info: + fast_gsc_resources = run_fast_graph_estimate(algorithm, architecture_model) + + print("Resource estimation time with GSC:", t_info.total) + pprint(fast_gsc_resources) + + +if __name__ == "__main__": + main() diff --git a/tests/benchq/examples/test_examples.py b/tests/benchq/examples/test_examples.py index ded93d3e..ed65cfcc 100644 --- a/tests/benchq/examples/test_examples.py +++ b/tests/benchq/examples/test_examples.py @@ -32,7 +32,8 @@ from examples.ex_3_packages_comparison import ( # noqa: E402 main as packages_comparison_main, ) -from examples.ex_4_extrapolation import main as extrapolation_main # noqa: E402 +from examples.ex_4_fast_graph_estimates import main as fast_graph # noqa: E402 +from examples.ex_11_utility_scale import main as utility_scale # noqa: E402 SKIP_AZURE = pytest.mark.skipif( os.getenv("BENCHQ_TEST_AZURE") is None, @@ -78,7 +79,14 @@ def test_packages_comparison_example(): def test_extrapolation_example(): - extrapolation_main(use_hydrogen=False) + fast_graph() + + +def test_utility_scale_example(): + decoder_data = os.path.join("examples", "data", "sample_decoder_data.csv") + gsc, footprint = utility_scale(decoder_data, False, "triangular", 3) + assert gsc + assert footprint def test_toy_example_notebook():