diff --git a/.github/dependabot.yml b/.github/dependabot.yml index f384d36a..0d8ea0bf 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,7 +5,7 @@ updates: - package-ecosystem: "github-actions" directory: "/" schedule: - interval: "daily" + interval: "weekly" - package-ecosystem: pip directory: "/" schedule: diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 9de996d7..793507f4 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -152,4 +152,4 @@ jobs: steps: - name: Deploy to GitHub Pages id: deployment - uses: actions/deploy-pages@v4.0.3 + uses: actions/deploy-pages@v4.0.5 diff --git a/.github/workflows/check-examples.yml b/.github/workflows/check-examples.yml new file mode 100644 index 00000000..4a043c3c --- /dev/null +++ b/.github/workflows/check-examples.yml @@ -0,0 +1,52 @@ +name: check examples + +on: + pull_request: + branches: + - develop + - main + schedule: + # 04:00 every Saturday morning + - cron: '0 4 * * 6' + +jobs: + + changes: + runs-on: ubuntu-22.04 + outputs: + examples: ${{ steps.filter.outputs.examples }} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + base: ${{ github.ref }} + filters: | + examples: + - 'examples/**' + - '.github/**' + + check: + name: check examples + needs: changes + if: github.event_name == 'schedule' || needs.changes.outputs.examples == 'true' + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: '0' + - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/* + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: 3.11 + - name: install python requirements for notebooks + run: | + python -m pip install --upgrade pip + python -m pip install . + cd examples + python -m pip install p2j + - name: test example notebooks + run: | + cd examples + ./check-examples diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 2356b943..27c568ff 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -25,10 +25,10 @@ jobs: - name: Update pip run: pip install --upgrade pip - name: Install black and pylint - run: pip install black~=22.3 pylint~=2.13,!=2.13.6 + run: pip install black~=22.3 pylint~=3.0 - name: Check files are formatted with black run: | black --check . - name: Run pylint run: | - pylint --recursive=y */ \ No newline at end of file + pylint --recursive=y --ignore=ttn_tutorial.py,mps_tutorial.py */ \ No newline at end of file diff --git a/_metadata.py b/_metadata.py index 92614389..cce7b976 100644 --- a/_metadata.py +++ b/_metadata.py @@ -1,2 +1,2 @@ -__extension_version__ = "0.5.4" +__extension_version__ = "0.6.0" __extension_name__ = "pytket-cutensornet" diff --git a/docs/api.rst b/docs/api.rst index 89fa9d19..f669c940 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -3,4 +3,4 @@ API documentation .. toctree:: modules/fullTN.rst - modules/mps.rst + modules/structured_state.rst diff --git a/docs/changelog.rst b/docs/changelog.rst index ef4f76f2..84f4984e 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -1,6 +1,19 @@ Changelog ~~~~~~~~~ +0.6.0 (April 2024) +------------------ + +* **New feature**: Tree Tensor Network (TTN) simulator, supporting both fixed ``chi`` and ``truncation_fidelity``. Calculation of single amplitudes is supported by ``get_amplitude`` and inner products by ``vdot``. Measurement and postselection are not yet supported. +* **New API**: both ``MPS`` and ``TTN`` share a common interface: ``StructuredState``. Import paths have changed, multiple classes have been renamed: ``ConfigMPS`` is now ``Config``, ``ContractionAlg`` is now ``SimulationAlgorithm``. Documentation has been updated accordingly. + +* Canonicalisation of MPS is now always applied before a two-qubit gate. We found that this tends to reduce runtime due to canonicalisation decreasing virtual bond dimension. +* Two-qubit gates are now decomposed (SVD) before applying them to remove null singular values (e.g. in ``XXPhase`` gates). +* Fixed a bug on copying an ``MPS`` if ``truncation_fidelity`` was set. +* Fixed a bug on ``CuTensorNetHandle`` that would prevent it from working when the device set was different from the default one (``dev=0``) and when using ``cuTensorNet>=2.3.0``. +* Fixed a bug on ``TensorNetwork`` due to unsupported ``Create`` operation. +* Updated pytket version requirement to 1.26. + 0.5.4 (January 2024) -------------------- diff --git a/docs/modules/mps.rst b/docs/modules/mps.rst deleted file mode 100644 index d6fff984..00000000 --- a/docs/modules/mps.rst +++ /dev/null @@ -1,63 +0,0 @@ -Matrix Product State (MPS) -========================== - -.. automodule:: pytket.extensions.cutensornet.mps - - -Simulation -~~~~~~~~~~ - -.. autofunction:: pytket.extensions.cutensornet.mps.simulate - -.. autoenum:: pytket.extensions.cutensornet.mps.ContractionAlg() - :members: - -.. autoclass:: pytket.extensions.cutensornet.mps.ConfigMPS() - - .. automethod:: __init__ - -.. autoclass:: pytket.extensions.cutensornet.mps.CuTensorNetHandle - - -Classes -~~~~~~~ - -.. autoclass:: pytket.extensions.cutensornet.mps.MPS() - - .. automethod:: __init__ - .. automethod:: apply_gate - .. automethod:: vdot - .. automethod:: canonicalise - .. automethod:: sample - .. automethod:: measure - .. automethod:: postselect - .. automethod:: expectation_value - .. automethod:: get_statevector - .. automethod:: get_amplitude - .. automethod:: get_qubits - .. automethod:: get_virtual_dimensions - .. automethod:: get_physical_dimension - .. automethod:: get_device_id - .. automethod:: is_valid - .. automethod:: update_libhandle - .. automethod:: copy - .. automethod:: __len__ - -.. autoclass:: pytket.extensions.cutensornet.mps.MPSxGate() - :show-inheritance: - - .. automethod:: __init__ - -.. autoclass:: pytket.extensions.cutensornet.mps.MPSxMPO() - :show-inheritance: - - .. automethod:: __init__ - - -Miscellaneous -~~~~~~~~~~~~~ - -.. autoenum:: pytket.extensions.cutensornet.mps.DirectionMPS() - :members: - -.. autofunction:: pytket.extensions.cutensornet.mps.prepare_circuit diff --git a/docs/modules/structured_state.rst b/docs/modules/structured_state.rst new file mode 100644 index 00000000..326a43ce --- /dev/null +++ b/docs/modules/structured_state.rst @@ -0,0 +1,61 @@ +Structured state evolution +========================== + +.. automodule:: pytket.extensions.cutensornet.structured_state + + +Simulation +~~~~~~~~~~ + +.. autofunction:: pytket.extensions.cutensornet.structured_state.simulate + +.. autoenum:: pytket.extensions.cutensornet.structured_state.SimulationAlgorithm() + :members: + +.. autoclass:: pytket.extensions.cutensornet.structured_state.Config() + + .. automethod:: __init__ + +.. autoclass:: pytket.extensions.cutensornet.structured_state.CuTensorNetHandle + + +Classes +~~~~~~~ + +.. autoclass:: pytket.extensions.cutensornet.structured_state.StructuredState() + + .. automethod:: __init__ + .. automethod:: is_valid + .. automethod:: apply_gate + .. automethod:: apply_scalar + .. automethod:: vdot + .. automethod:: sample + .. automethod:: measure + .. automethod:: postselect + .. automethod:: expectation_value + .. automethod:: get_fidelity + .. automethod:: get_statevector + .. automethod:: get_amplitude + .. automethod:: get_qubits + .. automethod:: get_byte_size + .. automethod:: get_device_id + .. automethod:: update_libhandle + .. automethod:: copy + +.. autoclass:: pytket.extensions.cutensornet.structured_state.TTNxGate() + + .. automethod:: __init__ + +.. autoclass:: pytket.extensions.cutensornet.structured_state.MPSxGate() + + .. automethod:: __init__ + +.. autoclass:: pytket.extensions.cutensornet.structured_state.MPSxMPO() + + .. automethod:: __init__ + + +Miscellaneous +~~~~~~~~~~~~~ + +.. autofunction:: pytket.extensions.cutensornet.structured_state.prepare_circuit_mps diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 00000000..fd569338 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,10 @@ +# Contents + +Available tutorials for users: +* `mps_tutorial.ipynb`: Use of MPS simulation and features. +* `ttn_tutorial.ipynb`: Use of TTN simulation and features. +* `mpi/`: Example on how to use MPS for embarrasingly parallel tasks with `mpi4py` see the `mpi` folder. + +Developers: +* `check-examples`: The script to check that the Jupyter notebooks are generated correctly from the files in `python/`. To generate the `.ipynb` from these run the `p2j` command in this script. +* `python/`: The `.py` files that generate the `.ipynb` files. As a developer, you are expected to update these files instead of the `.ipynb` files. Remember to generate the latter using the `p2j` command before opening a pull request that changes these examples. \ No newline at end of file diff --git a/examples/check-examples b/examples/check-examples new file mode 100755 index 00000000..a9e947cc --- /dev/null +++ b/examples/check-examples @@ -0,0 +1,15 @@ +#!/bin/bash + +set -e + +for name in `cat ci-tested-notebooks.txt` +do + echo "Checking: ${name} ..." + # Check that notebook is generated from script: + p2j -o -t ${name}-gen.ipynb python/${name}.py + cmp ${name}.ipynb ${name}-gen.ipynb + rm ${name}-gen.ipynb + # TODO, add this when GPU is added to CI + # Run script: + # python python/${name}.py +done diff --git a/examples/ci-tested-notebooks.txt b/examples/ci-tested-notebooks.txt new file mode 100644 index 00000000..1cfc10e5 --- /dev/null +++ b/examples/ci-tested-notebooks.txt @@ -0,0 +1,2 @@ +mps_tutorial +ttn_tutorial \ No newline at end of file diff --git a/examples/images/mps.png b/examples/images/mps.png new file mode 100644 index 00000000..ec6bb8f2 Binary files /dev/null and b/examples/images/mps.png differ diff --git a/examples/mpi/mpi_overlap_bcast_mps.py b/examples/mpi/mpi_overlap_bcast_mps.py index a0d662d9..4d33eed6 100644 --- a/examples/mpi/mpi_overlap_bcast_mps.py +++ b/examples/mpi/mpi_overlap_bcast_mps.py @@ -42,10 +42,10 @@ from pytket.circuit import Circuit, fresh_symbol -from pytket.extensions.cutensornet.mps import ( +from pytket.extensions.cutensornet.structured_state import ( simulate, - ConfigMPS, - ContractionAlg, + Config, + SimulationAlgorithm, CuTensorNetHandle, ) @@ -109,7 +109,7 @@ this_proc_mps = [] with CuTensorNetHandle(device_id) as libhandle: # Different handle for each process for circ in this_proc_circs: - mps = simulate(libhandle, circ, ContractionAlg.MPSxGate, ConfigMPS()) + mps = simulate(libhandle, circ, SimulationAlgorithm.MPSxGate, Config()) this_proc_mps.append(mps) if rank == root: diff --git a/examples/mps_tutorial.ipynb b/examples/mps_tutorial.ipynb index ed8bf238..1bf78d5d 100644 --- a/examples/mps_tutorial.ipynb +++ b/examples/mps_tutorial.ipynb @@ -1,2136 +1 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "f831380d-4535-47e5-8843-2397da9381f5", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from time import time\n", - "import matplotlib.pyplot as plt\n", - "from pytket import Circuit\n", - "from pytket.circuit.display import render_circuit_jupyter\n", - "\n", - "from pytket.extensions.cutensornet.mps import (\n", - " CuTensorNetHandle,\n", - " ConfigMPS,\n", - " ContractionAlg,\n", - " simulate, \n", - " prepare_circuit\n", - ")" - ] - }, - { - "attachments": { - "318cc014-ad93-4722-85e8-12f9b2e20ca3.png": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAdMAAABwCAYAAABFAZpZAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAEnQAABJ0Ad5mH3gAACD+SURBVHhe7Z0HXNVHtsd/wd6xIiKggDQRsCHYe429Rk1ioonu27jJZmM2ydv31uya5G2y2ay6m8ReY2/YC/YGigVBpDcLoiAWLFjwzRmG/eTjZ7Mi9/7L9Z6vH5Q5f7wXDjPnnJk5c+aVpwIwDMMwDFNmHNS/DMMwDMOUEXamDMMwDGMh7EwZhmEYxkLYmTIMwzCMhbAzZRiGYRgLYWfKMAzDMBbCzpRhGIZhLISdKcMwDMNYCDtThmEYhrEQdqYMwzAMYyHsTBmGYRjGQtiZMgzDMIyFsDNlGIZhGAthZ8owDMMwFsLOlGEYhmEshJ0pwzAMw1gIO1OGYRiGsRB2pgzDMAxjIexMGYZhGMZC2JkyDMMwjIWwM2UYhmEYC2FnyjAMwzAWws6UYRiGYSyEnSnDMAzDWIjpnWlKehaePHmiWvZJeuYl3H9QqFr2yaUrV3H7ToFq2Sc513ORl39TteyTGzdv4eq1XNWyT+4U3MXFy9mqZZ88KCxEWuZF1TIHpnemv//zN8KI3lUt++TbHxYhNT1TteyTJas34djJM6pln2zdfQDhO/aqln0SGX0WC35ap1r2SbKwBX+ZPU+17JP8m7cxbfrXqmUOTO9Mnz5Vn9gxT8Ufe+cpdwSpg1deUQ07hXUgkDqwbyWQTTSbDsy/Z8pGlB2JoOhpkfrMfinuB3ZuRNmRsA4IMRTYmb4g7EgErAOpAo7GWQfFQ4F1wP3gKRwc2Jm+EOxHWAcSoQQ2IKQD1bBTipf3VMNO4QlGyVhgZ/pC8H4hDx5CDh71ud1iQgOiN2Y0onpjxv1CvSm2iexMXwh2JGq/kA0IGxAZUNi3Dii25n4gDLeD+dNdtES6UpP1A3amNgCpgA2I+MvuAwruB8UBhX0jdWDnSjCjDswf3rAv5cEjYB0U64AnppR4Yt+zMtER7H6FotgemEsH5p+Zij/2bkR58Jhz8OgN64B1QPCWh9BBkflsoumdaVFREXccHjwUT3BAQTrgfsDL/dwPlE1UDZNgA3um4i8ePHY/eGj4sCOhaNy+KZ6ZqoadQjqw945gxhUKG3CmbEDYgLAOiuH9wmJ7YN8dgVfrivuB2QyCjeyZcsex91C02JmyDuxdBwT3A7IGrAOz9QPbmJna++ARH2xA6G/WAfcDsgeqYbewDmQ/UJ+bBfOvGbEBYQMi4Kzu4n5g73BwzTogzLhiaSMzU9WwV3jwsAER8JZH8X4hr1BwPxBDwXQ6sA1nyoOHB48YPJx8w6s0NDe3ex3IBQq2iWbrBuZ3pvQXDx4OKDigkB2BVcA64C0P1Q9MZhN5mdcGoMFj575UDR77hgMK1gHBOlCYTAc24kx58Ni9DmRAYd86kOcL7T6qom5g7/aAdUCYTQfm34TiJU4ePALWgRwKrAMKLNXndosMrtXndozZdGADe6Y8KyMD4sA6sHsDQjrgiSnbA8LuVygEZusHvGdqE7ABKZ6RsA64H8glCtWyXzigEM7rFXO5r1dE56TVI9OQmJqOC0mpyLqUjbz8fEQcPIY+3TrBqX4duDV2gb+PF9wbN1Jf/XKSnnUJ8YkpyLx0Bbl5N7D/6Am0DQqAm2sjoYNG8GvmiWYe7uqrX04uX83B+YQUqYvcvDwcPXEGTVxd4NvMA40bNRT/esLf21N99ctJ7o18xMYnIS3zInKu5yE6Jg41q1VDUICv1AH1geAAP/XVLye37xQg5nwCUjNIB7mIu5CEB4UP0a5VIBo5O6FZ02IdVKhQXv2Pl4/Chw9xNi4BKWkZuHL1GpLTM3Ap+xq6hLVFo4YN4NnEDYH+Pqherar6Hy8np2PjkZSSLmzDNVy8fAUxQie9u3UUvqGe0IErWggd1HGspb5af0zhTLNzrmPT9gjs3H8YFSuUg6d7YzjVqwPHmjVQXrQLHzxE/q07uHotD0liUFWrWgV9u3fGkH494FirpnoV2+Z2wV1s3LYHO/YexP379+HV1A3ODeqijvj5KlQsj4eFj3BLGJac6zeQknkZT548kR1pSL+eckC9DNDPtEHoYNue/dJ5+Hi6o5FTPamDSpUq4vHjJ7gl9HRNPEu7mI1bok/07NoBA3t3g7dnE/Uqts/W3fuFDg4Io5kJX/FzNXZuIIxETVSpXAlPiopw585d5OTli2ArG1eEUe3asR0G9e2BoOa+6hVsn31HIrF1135En40TAbQn3IQO6tauhSpVKuOp0EHBvfu4LoKNzMtXkZSaic7CsQzq0x1hbVuqV7B9ok7FIHznXhwQwXRzbwoinVC/bi1Ur1JVzkwL7t0TE45byLqSgwvJ6WgjAu4BYiz07BymXsH2ORefiM1CB/uORMFN/PzujZ1Rv44jalQnHTjg/oMHyBU6IAebmJIpAswmGNCrKwb26aZeQT8MdaaUnfjD4pVYtXEregqD0DEkGK5CYc8jRTjUoydjcCjqDCaMGYaJ40aoJ7bJsrXhmL98LTq0DUJH8eEloqznkSWMyPFT57Dn8AkMG9Abk98cI42trULB1Nxlq+Hn1UToIBjNfTzUk1/mWu4NHIuOwb5jp9EhpJXQwWg0qFdXPbU99h46jjlLV6O+cBo0FloHPt855t++g8hTsTgYeVpE5+6YMmGMnKnYKlGnz2HOklXisyJ0btcSoS0Dnlus4979B4g6E4dDkWdQSwRe774x2qZn7PFJKUIHq0XQmIsuoa0Q1rrFc8c2BaJRIvA4ciIGDx89wTuvj0an0Nbqqe1x6cpV6RvOJySja1hrtG8TKCZX1dXTX+Z0bAIOC9+QnZMrdDAK/Xp0Vk+0xzBnGisijq9mzoWbixOG9etWKkU9y3URnW/YsV9EqDfxyfuT4e/tpZ7YBlmXs/HFdz+gkph9kw4aOdVXT0pPwd172LjzIGITU/DJ1HcR2iZYPbENaLb9pdABLeUO79+9VIHEs1BQtnHXAeGMTuCj9yahb/dO6ont8OXf54goPAEjRD8I9G+mpC/Gjv3HsHZrBH7zzhsYNbifktoO3y9agR1iRj7y1Z4IFQ6kLByMPIX12/dj+Kt9pDG1NX5avwXzlq3B6EE90aNDiJK+GNEx8Vi/4wDC2gTho19PUlLbgVZl/jJrHgb36YyBvcrmDOOT0rBh537hX1zw6QdT5Gqm1hjiTGkJ57Mv/oZ3xw6VEbilHDh+CovXbMWfP30fPTrZxhLH6XPxQgffol+39vLDUk6JiGzhqnAxMxkrDElvJTU3GVmX8cmMvyLQ1xMjBvRQ0rKTmJqJRau3yKUuW1mtuHnrttDBt6hTsxomjBqopGWHlrtoLAT4+WCaCCxshU/+/C3u3SvAW0IHlhq+W7cLsGTdNtSoUQNf/eF3Smp+vp49T85KJ44eCKf6lq2wUIC5eM0W3Cq4jy//+0PUNnAv8UVY8NM6udX19phB8HJvrKRlZ9Xm3UhIycQXn32IplZ4vf9EuekC9bkukCOd8e33+GjyeLQOtM5STBPXRvDxcse3Py5BwwbFG/Jm5mzcBXzwhy8xYeSrcgnDGtDeYrC/N+av2CgzHgP8vNUTc0Kz8vf/8AW6hbUS0ad1ZpL16jgiJNgf67dFSKcS0jJQPTEntKrw/n9/AU/Xhhg7tK+SWkbN6tXk8ujeI1GIPnseXdq3VU/My+/++H+o4PAK3h03FBUrVFDSslO5UkW0axkg99u27D4g8yvMDq1MXM6+Iu2iNRKJaE+1ZYCvTFhavn4runcMFXox9zYQbXEcjYzG7yaPg3ODekpqGQE+nnj0+AlmL/hJ7qfX1jDHRldnSp2bnMhHU8bDr1lTJbUO9evUhreHO2Z8NxdtggOEU7XOL8PaXM7OkQb0zRED0K5VgJJah5o1qsFFONXFq8PhJH5+D/cXXzLVA8rGfP+zGejUNgi9u4QqqXWgRKWQIH+s3LRDROdP0dzHvEv/0z7/C1wb1seIV3sqifUICW6OvYcjkXkxG21blm3JVA9m/O17PH74QM5ErE1wc2/EJSTj2MkYEVSUbclUD35cvBKJKan4YOJrSmI9/IWdvX7jBsJ37kf/nl2U1Hys3bwTOyIOSkdq7axkT3cXue++RNjF/j27okJ5bTK/dTuoQ+ndM/72AyaPHwY/L+s60hKaNXXFpLGDRZT3Ix4/fqyk5oL2SHt3DrG6Iy3Bz9sDk14bjK+EDihL2ozQ99asaWP06arNkjw51HfGDsE/F/4kj5aYkdnzl6GcmI2NHGh9R1rCO2OHIuLgEbkaZEZWb9qOtMwsTHxtiJJYn7dGD0J6VpY01mYk4tBx7DlwBJPFrFwrhvfrLhyIgxwPZoSOPn2/6Ce8O26IZnubvTqFiMmWK76aOUdJrI9uzvQf85fDR/wwHdoEKYk20Os3a9JYGKvlSmIelq7ZBNGnMaBHRyXRBn/hUPt0CcPsuUuVxDzs2HsIqRmZeG1wHyXRBkrmGj+0L/4+d7GSmAc67rFr32G8Mby/kmhD1SqVMW5YP8ycswQPHz1SUnNAZ6j/PncJ3hDfn9a8Lt7jux8Xy1UhM3H//gPxfS3CuKH9NE+QGS90QEeuzsReUBLz8Pc5i8VY7Q8XjY/4kc1JTcvAnoNHlcS66OJMqQgDFV8YObCXkmgLJbNsjziA5LRMJTEeOoBPWXrDB2g3E/k5g/t0QaqI+o+eOK0k5mDu0tUY0b+7amlL59BWqFCunDx2YybmLVsts7fJ2WlNC18vNPduioUr1iuJOViwfK3sB3qckW7s7IQhfTrL5BYzsXDlerQK8EGAr/bFR6oLZz2sX1fMX75GScwBrRhUq1wZndrpcwphWP9uwgZpowNdnOnKDVvRt1sYKle0PLmgNJCR6tutvTy/ahao0/Ts1E4WYtAL0jnp3iys37ob7i4N5cxZL/p1by+PG5iFw5HRuHf/vlWy2EsLrYRQP6D3NQNJYnZA1ZzKeuyhLNB7HYk6JTPIzcCt23ewYv1W8bvpoCTa0yW0NW7k30TkqRglMZ4V6zeLMarfCQwKLhvUc8TmXfuUxHpo7kyv593AociT6NFR36zCHh3aYue+I7LTmoHwHRFWy9wtLbTkTWW3klLTlcRYSAeUaaonVASierXKog9GK4mxbNm1H1101kH9urXRJsgf2/YcVBJj2bb7gO5joVy5cuga2hJbdu9XEmPZFnFQjIVg3cvfUd/booEjKQt7Dx+XfbNZU31PX3Rp10raImujuTM9dDwa7YJbWCXl/UWg2SlVTzlw7ISSGAdFxLTUpOestIS2Qc1lKS6jSUnPwp2CAl2WtJ6FjsvsO3xMtYyDjsIcO3kaYW30P7LTLrg5Ig4ZrwNir/g+6OiK3lAhCDLgZoD6I41NvaG+RwlplFFvNPuFXWpjpeORLwJleedcy0XGReuuUmjuTKm+ZGlKw2kBGe7I6DOqZRyUcBLgrU0G8/MI9PdCVPRZ1TKO6LOxQgfGFKYP8vMROjinWsZB/SDI31uz1Pz/BBkQyl24U3BXSYwhITlNJtsYEVhSQFvOwcHwXAoq1JGWccmQwJLKEgb6NcPJM8aPhxOnYxAk+qURtBC6P3kmVrWsg+bOND4xWR6DMIJmHm6Iu5CsWsYReyFR3mpgBD4e7kgSxoOOJhlJXEISPJq4qJa+1KtTC5UqVZAZpEZyITlVFhgxCuoL8cKhGgk5dM8mxtgDwkv0QaoyZCSkA6+mxp0B93BzljVvjYT2rmtUq4raNWsoib7Q0jLdQGRNNHWmVHf1fmEh6tZ2VBJ9aVC3Nu4/KJQ3shgJGXGt075/CaqEQrdNZFp5SeNFybhIOnjx2sPWwkXMSozWQVrGRTQ2UAfOTvWEEbukWsZAY6Fh/TqqpT90ZCo901gdUPUva1X4KQuuzg3llXZGknnpsqG3XdFNTKkZWaplHTR1pnQXZ11HYxxpCXRtE13ZZRSUQfnkSRGqV9O+0PIvUU/ogK40M5LruXmiLxhXH7Ru7Zq4ei1XtYyBkvGMvG+RrnGjO0GNhMaikTqoJwL77JxrqmUMdNtR7VrGzMgIKruZc93Ygi50SYmjkToQEy36PVgTTQvdU/WZb7+fj8+mvqUk+jNj1kJ079Re3g9qBHTn5qwFSzD7Tx8rif78uHwDXF1cDC0r9/HnX2PBX/9HZlUaAd0udO/BI3kHrFH8ZdZcTJsy3rBSl4ejzuDshRSMHjJASfRn3tI1GNS7ozyiYAQXUtKxZus+/GqC9Uv3lRY6Jufv5YZu7dsoib5cz7uJL2YvwmcfTFYS/aHCCRXEVG6UTrUH/h1v/nY6jm9frVqWo6kzpTJRM39chE/em6Ak+vP5d/NRrVpVEY3VVhJ9efCgEDFxF/DD/32qJPozZ9lG5NzIh5uLs5Loz+4Dx7B89p/g8MorSqIv4bsOIvLMefhoVMqyNByPPosZ06bI4wBGcDQ6Bpt2HTK0XvHZuAS889pgw5ISk9Ky8M/Fa9Ey0F9J9Cc+MQX9u7fX/XhQCfk3b2PaF7PQsZ1x950mp2XIpLiRVrgtqqy89eGfcGjzcqsF+Jo600QRBX7+zSxM//BdJdGfL8TMlO70a+HvoyT6QudcR078Df4xw8CZ6bIN6NerK3p10e+A+LP0GPYGvpv+oWE3V6zdsgeubm54Y5R2dWCfx7gpv8PboweW6gJ8Ldh39CRuFjzAJ78xbjzSDTFtA33QKuD5F59rQWxCMg5GncOsL/+gJPrzNzHBqChmZda+5KG0ZOfk4R9L1mLtgplKoj9URCQ5JRVjBhtzXeTjx08w5dOvhDO1Xr1iTfdMaU389p17qmUMlARl5F1+tWrWwL37D+RN+EZxu6AAjjW1u3qoNNDv4OYt4wpo3Ll3H7VrGdcPCKkDA4uI3Ll7F3UMSgYsoW7t2sImGJcQeOs26cDYflDH0RG37xpnF28V3JE5BEZCY8HIxNCbt8kmWnfPVlNn6lS/nhzAhQYdECYHdi03H40aGjMTKIGy1rINTH7JuX4DLgbNhkpwcW6Iq+L7MApKNnBxNi57kHB1cRY6MC4R7FruTXnW0khcXRpaPfHjRcgR7+3mYtzxJIIy+ykBxyiuXstD40bG6oD6If0ujIKSEV0bNVQt66CpMyV8PJsgzaB0/LTMy/AW7+/gYMw+XQl+Xh5IzzLmjCPtjzx6/BiNnIx1JL5eTZFu4NEU0n8zjyaqZQw+nk1x8YpxmaQZl7Lh1dRdtYyhmXj/zMtXVUt/sq5cNV4HwiZlXMxWLf3JupIDby9jx4KXGItpmZfwpKhISfSFjur5WrmIjObOtHVQAM4nZaiWvsQlpqJVoP4lu56lldBBgkFVV+KS0tCyhf4lu56lpfg9JKRa91xXaaEMziZujVGjejUlMYagAF/EJ6eplr5cy7uBu/ceyODSSIIC/OS4LHyo/5VwZLjjE9MRbNB+bQlNXF1QJL6XKwbdN5yQJHTQ3FibULlSRbTw80a8sE9GQPY4OMC6SWiaO9MO7VrjzPlE1dKXs/HJ6BRqTPr5z+kQ0grRZ+PlANKb03GJ6BwWolrGEdo6CJdERExGXW9OxSagS3vjddBUOPRa1avjQrL+Fw+cFP2vc5jxY4HK2VEW6cmz55VEP0gHlMVbs0Z1JTGOLh3aITpG/7tFqUZ2+YoVDA+qiE5hbcXY1N830GpdcvpFtA+x7oUTmjvTAN9mqF6tKs6et27ppucRl5AKh3LlTDErowPC7YQzOXDslJLow5VruUhKyTD0bOXPGdCrKw5G6lsr+YGYAR0W79nHJDroL3Rw5KT+V2AdjT6Hvt07qZax9OvRWX4/ekPvSe9tBqg/GqGDwydi5Dg0A727dsDhqNMouKvv1YAHxXsO6NlZ1mm2Jpo7U2Lk4P7Yc1jfm0t2i/cbNVj7W/xLy8hBfcX3pO8NNrv2H8PooQOs3mnKyvBXe2P3wUh5e4pe7Np/FD06h8HZybgyfj9nmNBBjAgsMy/pt2d2MPKULKMXHGB8YEl07xgqgxw9V6xiE1OQf+u2aYIqOutLKxX7jpxUEu25cvUaos7GYWh/4wol/Jz6devg1d7dsPOAfrcZUXnZ3YeixDjsoyTWQxcr219Ggw44FHm6WKAxdDi9sPARBvc17kDws9DesZ+3J8J3H1ISbaF9wnNidj5+5CAlMR7KZiWHun67PndKUsbehp0H8ObooUpiPJUqVsTb40Zg02597hYlpxW+6xAmvDZcSczB22NHyEIaekE6oPc0E2+NHS7twd17+szMNgp9vzVmuFwpNAs0NrfvO4rLwtHrAVVC69u9MzzcrX/RgG5TlqnvvI6V4bs1T4vPy7+JlZt24b1J45XEPPx64jjZcRJTtU1Gor3ZlZt24zfvvGFYkYRf4lcTxiIpPQtRZ+KURDtWhIt+8PY46cTNxJihA8Tv6BXsOqD93ZorNu4QM/P2pkjE+zk9OoXCq2kTrNmyR0m0Y932fXBxdjbNMncJlIBDy/4/bdqpJNoRcSQKd+8X4vVRg5XEHFBpzamTXhf9dJeSaMepcxcQcyEFv35rrJJYF92caaC/j4zI56/crGkizgLx+uNGDJIzQbNBx1Oo+szC1ZuRr+Hh/Xk/bUKbli1Ms6T1cypUKC90MBkLVm3W9Eq0lcJAOdZylH3BjHw8dRI27Togl3y1YtveI8jNv43fTjGunOd/Ytp7k4RxS5XL0Fpx9GSMTDz6+L2JSmIupoqgv+BuITZruFJB+SOrN0fg91ONq3z1nxg9pD8aNnTCsg3blcT6XMrOwYLV4dL+VqlSWUmtS7npAvW55pBDTc28JCLyowjV4KZ9qrnp7uaK9yaab1ZaAhXcp1vuV4tZU+tAX1SsUEE9sQ7L1m3DExEjTZ82VUnMBxWxoEo8Py5diwAfT6sfWdm0cz9Ss67im+kfo7wBF3GXhtq1asrzjl/Omo9mTV2tXq+X9uL2Ho3GN5//HjWrG5+9+u+gJW+anX3zz0WoV9dRXotlTWgmMn9VOP46/fdwdzXmLt3SENIqEHOXr0PRkyJ4ulv3rtfktIuYtXAVpn881XSrEz+nS1hbrAnfKfd1/b2tW7eZbsyavXA1Jo0fhV5d2iup9dHVmRLt27YUkVIytu45jBY+HqhUqaJ6UnYoG+wfwpE2cm6Izz6YoqTmhRJB8m7cxPL12+Dj4Wo1Z0Iz0kIxIMl40D2mZoYKzlcQgcQ/F61CE1dneS2UNaAl/tSsbOlIzepESqCLB9waN5IOtVGD+la773VrxGHsO3YKfxWOlM40mhlKQgnw88G3PyxG1cqV0dTNOpV5aLa7RASW3/zxY7RsYVxR+9JQrWpV6VDnLVsnk/N8rVRQ4XTsBcwUjvRTYRN7dApTUnPi4OCAzu1DsHbzLqRnXUagn3UuY6CjQDMXrsFrwwZixMC+SqoNujtTgs6Z0d7pHNF56H7BRk5lv5KKjtyQI+0Y2gYf/sq4q95elNZBzUFXDHzzwxLUqlEN7o3Lvq9HHYZm5W5ujTHj0w9M70hL8PfxkiUnv5q9QMwgy8nb78sKHYCftzIc5YWD/vqPYjZmgrOEpYESIWh2RkEF1SptbkFUTnWol67dhivXb4hgQszGhKO2BSjTOrR1MJau24KsS9ky0Cpfxps8qID5ik07cSo2CV//7zRZKMMWcKxVE906tMOmnftwRjhBLzFDpTO5ZWXjjv0I331Y2gMznLEuDVTIgZKD9h09gT2HIuHu4mTROKaTAz8sW4+P/msihvTrqaTaoemtMc/jSNQpzJq3FC7CmfbsFPJCxpSWiyMOn5ClyWgD21Y6zLOci0/EzLmLpfHo3aWdmK2XPiIjBxJx6ASiY+Px67dfx8A+3dQT2yLj4mXMnLNEJo/17hyCkODSL0fduHkLEUdOYMe+Y/ivt8dh3PCB6oltcSP/lugHS3A+MRl9u4Sic2gr9eT50EUKlGCyLeKoTG6a/OYY9cS2oFra34l+sPfQMfTv1h49OobIIKs0UHUjWtqmYxadwkLw/rtvyGVkW2TesjVYvi4cA3p0knaxetUq6snzOXLiLHYJJ0Ll+n47eYI8426LrN60Hd8vXCFm1CHoKfpBvTqlv5yAlvfp+Ev16tXxweQ34dlEn7usDXWmJazauA3rtuyUmaeBvh6yficteVFUQnV1KWHplojas6/mIjnjoixHRjdPjBjU12aN57Ns2bUfa8K3o7CwEEF+XvD2EDpwdkIdxxpyCYS4LWYe2dfyZCARn5yOi1euYuiAPkIHr8qlIltn35FIrBF9gS4FaNXCF95NXeV1ZXUca/3LqBbcuy+LxadnXkZ8SgbiElLkubnXhg2Qs1xbJ+r0OawWOqA7L1sH+cFX9APXxg1R17Hmv5wDnZXLyc2T9V0TUjMReSoWA3p1ETp4VZOUf72hbSCyCRRsU2Dl4+mOJo2d0UA4hpJtIbo84/qNfGReuip1EB0Tj3atgjBG9APKzbB1KMBcuWEbwndGIKxVC6kDDzcXNKhXB1VVAs3DR49EAHobWZezkZSahVNxCbL+9Jgh/RHW1rrVfYzget4N2Q82bt8DP6+m4qOJ6N8ucG5QT9i74gCDViLo/HDW5RzhG7LkSiXdXT1qcH/01HB/9N9hCmdaQvTZOGFMYhB3IVko54os+1SunIOIWIvkdW50xKGFnw9CROcKaRmo/tfLRdyFJGEcY+TF6pliQNHMi3gsona6MogcbHPfZmgT1MIU5eG0IDktE8ejzyBGGIe0zItyxkoBFc0+aoigwblhA/g280DbYNJBW1SsaN0kLjNwURjIYyfP4ExsPFLTs2QZRpq5FRU9lcbUSRgUKpxP2wW0xVHLRpa1X4TcvHwcjozG6djzsk9cE0FUSU1f+p2TYyHn0SrQX5bsfBmCqWe5IyYRpINTMeeRmJqOqznX5UoETTLoUmtKXKOZF9WZpfJ4trK0/yI8fvwYB4+flDqIT0yWSUp37t6TxWhookFX6nm4uSKwuS/atw2GtxgXRmAqZ/rvoMijtEs9LyvUmWjg2MpeqBaQDhwcykkjYq/IO3FFHzBLRSsjkDoQ0HiwVyiofCo+zJqprgcUXNOHmXRgemfKMAzDMGbHfkNchmEYhrES7EwZhmEYxkLYmTIMwzCMhbAzZRiGYRgLYWfKMAzDMBbCzpRhGIZhLISdKcMwDMNYCDtThmEYhrEQdqYMwzAMYyHsTBmGYRjGQtiZMgzDMIyFsDNlGIZhGAthZ8owDMMwFsLOlGEYhmEshJ0pwzAMw1gIO1OGYRiGsRB2pgzDMAxjIexMGYZhGMZC2JkyDMMwjIWwM2UYhmEYC2FnyjAMwzAWws6UYRiGYSyEnSnDMAzDWAg7U4ZhGIaxCOD/AemAaKCFVkO8AAAAAElFTkSuQmCC" - } - }, - "cell_type": "markdown", - "id": "889c9557-7372-4a3e-8178-1d2d3f7cecf7", - "metadata": {}, - "source": [ - "# Introduction\n", - "\n", - "This notebook provides examples of the usage of the MPS functionalities of `pytket_cutensornet`. For more information, see the docs at https://tket.quantinuum.com/extensions/pytket-cutensornet/api/index.html.\n", - "\n", - "A Matrix Product State (MPS) represents a state on `n` qubits as a list of `n` tensors connected in a line as show below:\n", - "\n", - "![image.png](attachment:318cc014-ad93-4722-85e8-12f9b2e20ca3.png)\n", - "\n", - "Each of these circles corresponds to a tensor. We refer to each leg of a tensor as a *bond* and the number of bonds a tensor has is its *rank*. In code, a tensor is just a multidimensional array:\n", - "```\n", - " tensor[i][j][k] = v\n", - "```\n", - "In the case above, we are assigning an entry value `v` of a rank-3 tensor (one `[ ]` coordinate per bond). Each bond allows a different number of values for its indices; for instance `0 <= i < 4` would mean that the first bond of our tensor can take up to four different indices; we refer to this as the *dimension* of the bond. We refer to the bonds connecting different tensors in the MPS as *virtual bonds*; the maximum allowed value for the dimension of virtual bonds is often denoted by the greek letter `chi`. The open bonds are known as *physical bonds* and, in our case, each will correspond to a qubit; hence, they have dimension `2` -- the dimension of the vector space of a single qubit. \n", - "\n", - "In essence, whenever we want to apply a gate to certain qubit we will connect a tensor (matrix) representing the gate to the corresponding physical bond and *contract* the network back to an MPS form (tensor contraction is a generalisation of matrix multiplication to multidimensional arrays). Whenever a two-qubit gate is applied, the entanglement information after contraction will be kept in the degrees of freedom of the virtual bonds. As such, the dimension of the virtual bonds will generally increase exponentially as we apply entangling gates, leading to large memory footprints of the tensors and, consequently, long runtime for tensor contraction. We provide functionalities to limit the growth of the dimension of the virtual bonds, keeping resource consumption in check. Read the *Approximate simulation* section on this notebook to learn more.\n", - "\n", - "**NOTE**: MPS methods can only be applied to circuits that only contain gates that act between nearest-neighbours in a line. If your circuit does not satisfy this constraint, you can use the `prepare_circuit` function (see the *Preparing the circuit* section); this will add multiple `SWAP` gates to the circuit that *need* to be simulated explicitly within the MPS, increasing the resources required considerably. In the future, we will support other tensor network state approaches that do not suffer so drastically from this restrictive connectivity.\n", - "\n", - "**References**: To read more about MPS we recommend the following papers.\n", - "* For an introduction to MPS and its canonical form: https://arxiv.org/abs/1901.05824.\n", - "* For a description of the `MPSxGate` algorithm we provide: https://arxiv.org/abs/2002.07730.\n", - "* For a description of the `MPSxMPO` algorithm we provide: https://arxiv.org/abs/2207.05612.\n", - "* For insights on the reationship between truncation error and the error model in a quantum computer: https://arxiv.org/abs/2004.02388" - ] - }, - { - "cell_type": "markdown", - "id": "8cc9379e-6dad-4c25-b735-e7a729799d90", - "metadata": {}, - "source": [ - "# Basic functionality and exact simulation\n", - "\n", - "Here we show an example of the basic use of our MPS methods. We first generate a simple `pytket` circuit to be simulated." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "f0c46b6a-b40c-4502-94de-1946d0da4b3b", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - " \n", - "
\n", - "\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "my_circ = Circuit(5)\n", - "my_circ.CX(3, 4)\n", - "my_circ.H(2)\n", - "my_circ.CZ(0, 1)\n", - "my_circ.ZZPhase(0.1, 4, 3)\n", - "my_circ.TK2(0.3, 0.5, 0.7, 2, 1)\n", - "my_circ.Ry(0.2, 0)\n", - "\n", - "render_circuit_jupyter(my_circ)" - ] - }, - { - "cell_type": "markdown", - "id": "a9ede80b-32d0-4d43-b910-099dcc2a8a95", - "metadata": {}, - "source": [ - "For **exact** simulation, simply call the `simulate` function on the circuit and choose a contraction algorithm. To learn more about the contraction algorithms we provide see the *Contraction algorithms* section of this notebook. You will also need to provide a configuration, the default one is provided by `ConfigMPS()`. Custom settings of `ConfigMPS` are discussed in the *Approximate simulation* section.\n", - "\n", - "**NOTE**: whenever you wish to generate an `MPS` object or execute calculations on it you must do so within a `with CuTensorNetHandle() as libhandle:` block; this will initialise the cuTensorNetwork library for you, and destroy its handles at the end of the `with` block. You will need to pass the `libhandle` to the `MPS` object via the method that generates it (in the snippet below, `simulate`), or if already initialised, pass it via the `update_libhandle` method.\n", - "\n", - "Due to the nature of Jupyter notebooks, we will be starting most of these cells with a `with CuTensorNetHandle() as libhandle:`. However, in a standard script, all of these cells would be grouped together and a single `with CuTensorNetHandle() as libhandle:` statement would be necessary at the beginning of the script." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "80f72559-b5cf-4a7c-8622-5d40ca43eddd", - "metadata": {}, - "outputs": [], - "source": [ - "with CuTensorNetHandle() as libhandle:\n", - " my_mps = simulate(libhandle, my_circ, ContractionAlg.MPSxGate, ConfigMPS())" - ] - }, - { - "cell_type": "markdown", - "id": "2dc38a08-8dae-47c1-a6ce-d1e5fc3e197c", - "metadata": {}, - "source": [ - "Notice that `my_circ` uses a rich gateset -- in fact, every single-qubit and two-qubit gate supported by `pytket` can be used in our MPS approaches. Gates acting on more than two qubits are not currently supported." - ] - }, - { - "cell_type": "markdown", - "id": "d9e75086-d2c4-4dfa-b7be-c40ec3818267", - "metadata": {}, - "source": [ - "The output of `simulate` is an `MPS` object encoding the output state of the circuit. Currently we support two basic operations on `MPS` objects: obtaining amplitudes of computational states and calculating inner products with other `MPS` objects. More functionality (including sampling from the measurement distribution of the state) will come in due course.\n", - "\n", - "### Obtain an amplitude from an MPS\n", - "\n", - "Let's first see how to get the amplitude of the state `|10100>` from the output of the previous circuit." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "19425188-339d-4ab7-86ca-e49bec43c15f", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(0.03968884089773739+0.05462700305610267j)\n" - ] - } - ], - "source": [ - "state = int('10100', 2)\n", - "with CuTensorNetHandle() as libhandle:\n", - " my_mps.update_libhandle(libhandle)\n", - " amplitude = my_mps.get_amplitude(state)\n", - "print(amplitude)" - ] - }, - { - "cell_type": "markdown", - "id": "35adb1f4-7615-4b03-b4a9-2e27d791ce6e", - "metadata": {}, - "source": [ - "Since this is a very small circuit, we can use `pytket`'s state vector simulator capabilities to verify that the state is correct by checking the amplitude of each of the computational states." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "d22f53f4-760b-4c70-b300-88134555bb02", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Are all amplitudes correct?\n", - "True\n" - ] - } - ], - "source": [ - "state_vector = my_circ.get_statevector()\n", - "n_qubits = len(my_circ.qubits)\n", - "\n", - "correct_amplitude = [False] * (2**n_qubits)\n", - "with CuTensorNetHandle() as libhandle:\n", - " my_mps.update_libhandle(libhandle)\n", - " for i in range(2**n_qubits):\n", - " correct_amplitude[i] = np.isclose(state_vector[i], my_mps.get_amplitude(i))\n", - "\n", - "print(\"Are all amplitudes correct?\")\n", - "print(all(correct_amplitude))" - ] - }, - { - "cell_type": "markdown", - "id": "a4b00cb2-9871-4768-9b25-00cf25101828", - "metadata": {}, - "source": [ - "### Sampling from an MPS\n", - "\n", - "We can also sample from the output state of a circuit by calling `my_mps.sample`, where `my_mps` is the outcome of simulating the circuit." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "434e6b70-0ed0-4677-b97b-3c396db87b21", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjIAAAGwCAYAAACzXI8XAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAA8R0lEQVR4nO3deViVdf7/8dcRAZHlgKgsgaKZWy6lpTKVWZKi5Zg6LeZeo79My6WZSb8tZsvYcjWmTYvLJPm1sqmkMifLTKgpcsHMXHLMQUEFqdSDoIDB/fvjfD16BBQOy31ueD6u61x5Pvf2vhc9r+77c9+3zTAMQwAAABbUyOwCAAAAPEWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAltXY7AJqW2lpqQ4fPqzg4GDZbDazywEAAJVgGIZOnDih6OhoNWpU8XmXeh9kDh8+rNjYWLPLAAAAHsjKylJMTEyFw+t9kAkODpbk3BAhISEmVwMAACojLy9PsbGxrt/xitT7IHPmclJISAhBBgAAi7lYtxA6+wIAAMsiyAAAAMsiyAAAAMuq931kAADeq7S0VMXFxWaXARP4+vrKx8en2vMhyAAATFFcXKyMjAyVlpaaXQpMEhoaqsjIyGo9540gAwCoc4ZhKDs7Wz4+PoqNjb3gA89Q/xiGoZMnTyo3N1eSFBUV5fG8CDIAgDr322+/6eTJk4qOjlbTpk3NLgcmCAgIkCTl5uaqZcuWHl9mIgIDAOpcSUmJJMnPz8/kSmCmMyH29OnTHs+DIAMAMA3vwGvYamL/E2TQsBU6JMeh8oc5DjmHAwC8FkEGDVehQ1oxQkoaLDkOug9zHHS2rxhBmAEAL0aQQcNVlC8V/Cwd2y8l3Xw2zDgOOr8f2+8cXpRvZpUALCQlJUU2m03Hjx83u5QGgyCDhst+iTR+jRQWdzbMZG48G2LC4pzD7ZeYWyeACpWUGkrb96s+3HZIaft+VUmpUWvLstlsF/w8/vjjtbZsVIzbr9Gw2WOcYeVMeHl9gLPdFWJizKwOwAWs3ZGtuat3KdtR6GqLsjfRnCGdldjF8+eSVCQ7O9v153feeUePPfaY9uzZ42oLCgrSli1bany5lVFcXNxg7wDjjAxgj5GGLXZvG7aYEAN4sbU7sjV5xVa3ECNJOY5CTV6xVWt3ZFcwpeciIyNdH7vdLpvN5tYWFBTkGjc9PV1XXXWVmjZtqt/97ndugUeSPvzwQ/Xo0UNNmjRR27ZtNXfuXP3222+u4ZmZmRo6dKiCgoIUEhKi22+/XUeOHHENf/zxx3XFFVdo6dKlatOmjZo0aaLly5crPDxcRUVFbsu69dZbNWbMmBrfHt6CIAM4DkrJk9zbkieV7QAMwCuUlBqau3qXyruIdKZt7updtXqZ6WIefvhhvfDCC9qyZYsaN26su+++2zXsq6++0tixYzVt2jTt2rVLixYtUlJSkp5++mlJzvdPDR06VEePHlVqaqrWrVun//73v7rjjjvclvHTTz/p/fff16pVq7Rt2zbddtttKikp0UcffeQaJzc3V2vWrHFbfn1DkEHDdm7H3rA46e7P3PvMEGYAr7Mp42iZMzHnMiRlOwq1KeNo3RV1nqefflrXX3+9OnfurFmzZumbb75RYaGz5rlz52rWrFkaN26c2rZtq5tuuklPPvmkFi1aJElav369fvjhB7311lvq2bOnevfureXLlys1NVWbN292LaO4uFjLly/XlVdeqW7duikgIEB33XWXli1b5hpnxYoVatWqlfr161en61+XCDJouByHynbsbdW7bAfgip4zA8AUuScqDjGejFcbunXr5vrzmfcInXmv0Pfff68nnnhCQUFBrs/EiROVnZ2tkydPavfu3YqNjVVsbKxrHp07d1ZoaKh2797tamvdurVatGjhttyJEyfqs88+06FDzn+3kpKSNH78+Hr94EE6+3qgpNTQpoyjyj1RqJbBTdSrTTP5NKq/B0m95R8kBTr/ESgZ+7E2/RKg3IxDahkcoF5jP5bP8lucw/2DLjIjAHWpZXCTGh2vNvj6+rr+fCZEnHnLd35+vubOnavhw4eXma5Jk8rXHBgYWKbtyiuvVPfu3bV8+XINGDBAO3fu1Jo1a6pavqUQZKqornvJoxY1sUuj39eG7f/V/7z2nzL79K/9k3RDt7bO8QB4jV5tminK3kQ5jsJy+8nYJEXanf+T6Y169OihPXv2qF27duUO79Spk7KyspSVleU6K7Nr1y4dP35cnTt3vuj8//jHP+rFF1/UoUOHlJCQ4HZmpz7i0lIVmNFLHrVr7U8ndfeqw+Xu07tXHdban06aVBmAivg0smnOEOcP+vnnws98nzOks9eeKX/ssce0fPlyzZ07Vzt37tTu3bu1cuVKPfLII5KkhIQEde3aVaNGjdLWrVu1adMmjR07Vtdff72uuuqqi87/rrvu0sGDB7VkyZJ63cn3DIJMJVmhlzyqhn0KWFdilyi9OrqHIu3ul2Ii7U306ugeXn2GfODAgfr444/12Wef6eqrr1afPn00f/58tW7dWpLzUtSHH36osLAw9e3bVwkJCWrbtq3eeeedSs3fbrdrxIgRCgoK0q233lqLa+IdbIZh1Ot/pfPy8mS32+VwOBQSEuLxfNL2/aqRS7696HhvT+yj+EvDPV4O6g77FDBPYWGhMjIyXM9A8RR9FsvXv39/XX755Vq4cKHZpVzQhY6Dyv5+00emkqzQSx5Vwz4FrM+nkY3/0TjHsWPHlJKSopSUFL3yyitml1MnCDKVZIVe8qga9imA+ubKK6/UsWPH9Oyzz6pDhw5ml1MnCDKVZPVe8iiLfQqgvtm/f7/ZJdQ5r+ns+8wzz8hms2n69OmutsLCQk2ZMkXh4eEKCgrSiBEj3N41UZes3kseZbFPAcD6vCLIbN68WYsWLXJ7EqIkzZgxQ6tXr9a7776r1NRUHT58uNwHCNUVK/eSR/nYpwBgbaZfWsrPz9eoUaO0ZMkSPfXUU652h8Ohf/zjH3rrrbd04403SpKWLVumTp066dtvv1WfPn1MqTexS5Ru6hxJL/l6hH0KANZlepCZMmWKbr75ZiUkJLgFmfT0dJ0+fVoJCQmuto4dO6pVq1ZKS0urMMgUFRW5vcI8Ly+vxmuml3z9wz4FAGsyNcisXLlSW7dudXub5xk5OTny8/NTaGioW3tERIRycnIqnOe8efM0d+7cmi4VAAB4IdP6yGRlZWnatGl68803q/UwpPPNnj1bDofD9cnKyqqxeQMA4M3Gjx/fIJ7mey7Tgkx6erpyc3PVo0cPNW7cWI0bN1ZqaqoWLlyoxo0bKyIiQsXFxTp+/LjbdEeOHFFkZGSF8/X391dISIjbBwCAmjJ+/HjZbLYyn8TERLNL04IFC5SUlGR2GZKcr1r44IMPan05pl1a6t+/v3744Qe3tgkTJqhjx4566KGHFBsbK19fX61fv14jRoyQJO3Zs0eZmZmKj483o2QAgLcodEhF+ZL9krLDHIck/6BafXN9YmKili1b5tbm7+9fa8u7mJKSEtlsNtnttbfO3sq0MzLBwcHq0qWL2ycwMFDh4eHq0qWL7Ha77rnnHs2cOVMbNmxQenq6JkyYoPj4eNPuWAIAeIFCh7RihJQ0WHIcdB/mOOhsXzHCOV4t8ff3V2RkpNsnLCxMKSkp8vPz01dffeUa97nnnlPLli1dz0Hr16+fpk6dqqlTp8put6t58+Z69NFHde6rD4uKivSnP/1Jl1xyiQIDA9W7d2+lpKS4hiclJSk0NFQfffSROnfuLH9/f2VmZpa5tNSvXz/df//9mj59usLCwhQREaElS5aooKBAEyZMUHBwsNq1a6dPPvnEbf127NihQYMGKSgoSBERERozZox++eUXt/k+8MAD+stf/qJmzZopMjJSjz/+uGt4XFycJGnYsGGy2Wyu77XBK54jU5H58+frlltu0YgRI9S3b19FRkZq1apVZpcFADBTUb5U8LN0bL+UdPPZMOM46Px+bL9zeFF+nZfWr18/TZ8+XWPGjJHD4dB3332nRx99VEuXLlVERIRrvDfeeEONGzfWpk2btGDBAv3tb3/T0qVLXcOnTp2qtLQ0rVy5Utu3b9dtt92mxMRE7d271zXOyZMn9eyzz2rp0qXauXOnWrZsWW5Nb7zxhpo3b65Nmzbp/vvv1+TJk3Xbbbfpd7/7nbZu3aoBAwZozJgxOnnypCTp+PHjuvHGG3XllVdqy5YtWrt2rY4cOaLbb7+9zHwDAwO1ceNGPffcc3riiSe0bt06SXLdxLNs2TJlZ2eXe1NPjTHqOYfDYUgyHA6H2aUAAP7PqVOnjF27dhmnTp3ybAbHswzjxW6GMSfE+d8D37p/P55VswWfY9y4cYaPj48RGBjo9nn66acNwzCMoqIi44orrjBuv/12o3PnzsbEiRPdpr/++uuNTp06GaWlpa62hx56yOjUqZNhGIZx4MABw8fHxzh06JDbdP379zdmz55tGIZhLFu2zJBkbNu2rUxtQ4cOdVvWtdde6/r+22+/GYGBgcaYMWNcbdnZ2YYkIy0tzTAMw3jyySeNAQMGuM03KyvLkGTs2bOn3PkahmFcffXVxkMPPeT6LslITk6uYCs6Xeg4qOzvt+nPkQEAoMrsMdL4NWfPwLw+wNkeFudst8fU6uJvuOEGvfrqq25tzZo538vm5+enN998U926dVPr1q01f/78MtP36dNHNtvZh27Gx8frhRdeUElJiX744QeVlJSoffv2btMUFRUpPPzs8678/PzKPBG/POeO4+Pjo/DwcHXt2tXVduZMUW5uriTp+++/14YNGxQUFFRmXvv27XPVdf6yo6KiXPOoSwQZAIA12WOkYYvPhhjJ+b2WQ4wkBQYGql27dhUO/+abbyRJR48e1dGjRxUYGFjpeefn58vHx0fp6eny8fFxG3ZuuAgICHALQxXx9fV1+26z2dzazsyjtLTUtfwhQ4bo2WefLTOvqKizr20pb75n5lGXCDIAAGtyHJSSJ7m3JU+qkzMyF7Jv3z7NmDFDS5Ys0TvvvKNx48bp888/V6NGZ7ulbty40W2ab7/9Vpdddpl8fHx05ZVXqqSkRLm5ubruuuvqunz16NFD77//vuLi4tS4secxwdfXVyUlJTVYWfm8urMvAADlOrdjb1icdPdnzv+e3wG4lhQVFSknJ8ft88svv6ikpESjR4/WwIEDNWHCBC1btkzbt2/XCy+84DZ9ZmamZs6cqT179ujtt9/WSy+9pGnTpkmS2rdvr1GjRmns2LFatWqVMjIytGnTJs2bN09r1qyp1fWSnK8OOnr0qEaOHKnNmzdr3759+vTTTzVhwoQqBZO4uDitX79eOTk5OnbsWK3VS5ABAFiL45B7iBm/RmrV2/lftzBzqNZKWLt2raKiotw+1157rZ5++mkdOHBAixYtkuS8FLN48WI98sgj+v77713Tjx07VqdOnVKvXr00ZcoUTZs2TZMmnT27tGzZMo0dO1YPPvigOnTooFtvvVWbN29Wq1atam2dzoiOjtbXX3+tkpISDRgwQF27dtX06dMVGhrqdlbpYl544QWtW7dOsbGxuvLKK2utXtv/9Syut/Ly8mS32+VwOHjKLwB4icLCQmVkZKhNmzZVf03NmefIFPxc9jLSmTM1gS2k0e/X6kPxPNWvXz9dccUVevHFF80uxXQXOg4q+/tNHxkAgLU0sTtDSnlP9rXHSOP/VetP9oX3IMgAAKynib3ioFLeawtQbxFkAACoQ+e+agDVR2dfAABgWQQZAIBp6vn9JriImtj/BBkAQJ0788Ta4uJikyuBmc68qPL8pwRXBX1kAAB1rnHjxmratKl+/vln+fr6Vun5JLA+wzB08uRJ5ebmKjQ0tMyrGKqCIAMAqHM2m01RUVHKyMjQgQMHzC4HJgkNDVVkZGS15kGQAQCYws/PT5dddhmXlxooX1/fap2JOYMgAwAwTaNGjar+ZF/gHFyUBAAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQqYpCh+Q4VP4wxyHncAAAUGcIMpVV6JBWjJCSBkuOg+7DHAed7StGEGYAAKhDBJnKKsqXCn6Wju2Xkm4+G2YcB53fj+13Di/KN7NKAAAaFIJMZdkvkcavkcLizoaZzI1nQ0xYnHO4/RJz6wQAoAEhyFSFPcY9zLw+4LwQE2NufQAANDAEmaqyx0jDFru3DVtMiAEAwAQEmapyHJSSJ7m3JU8q2wEYAADUOoJMVZzbsTcsTrr7M/c+M4QZAADqFEGmshyHynbsbdW7bAfgip4zAwAAahxBprL8g6TAFmU79p7bATiwhXM8AABQJxqbXYBlNLFLo993Pifm/Fus7THS+H85Q0wTuzn1AQDQABFkqqKJveKgwvNjAACoc1xaAgAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlkWQAQAAlmVqkHn11VfVrVs3hYSEKCQkRPHx8frkk09cwwsLCzVlyhSFh4crKChII0aM0JEjR0ysGAAAeBNTg0xMTIyeeeYZpaena8uWLbrxxhs1dOhQ7dy5U5I0Y8YMrV69Wu+++65SU1N1+PBhDR8+3MySAQCAF7EZhmGYXcS5mjVrpueff15/+MMf1KJFC7311lv6wx/+IEn68ccf1alTJ6WlpalPnz6Vml9eXp7sdrscDodCQkJqs3QAAFBDKvv77TV9ZEpKSrRy5UoVFBQoPj5e6enpOn36tBISElzjdOzYUa1atVJaWlqF8ykqKlJeXp7bBwAA1E+mB5kffvhBQUFB8vf317333qvk5GR17txZOTk58vPzU2hoqNv4ERERysnJqXB+8+bNk91ud31iY2NreQ0AAIBZTA8yHTp00LZt27Rx40ZNnjxZ48aN065duzye3+zZs+VwOFyfrKysGqwWAAB4k8ZmF+Dn56d27dpJknr27KnNmzdrwYIFuuOOO1RcXKzjx4+7nZU5cuSIIiMjK5yfv7+//P39a7tsAADgBUw/I3O+0tJSFRUVqWfPnvL19dX69etdw/bs2aPMzEzFx8ebWCEAAPAWpp6RmT17tgYNGqRWrVrpxIkTeuutt5SSkqJPP/1Udrtd99xzj2bOnKlmzZopJCRE999/v+Lj4yt9xxIAAKjfTA0yubm5Gjt2rLKzs2W329WtWzd9+umnuummmyRJ8+fPV6NGjTRixAgVFRVp4MCBeuWVV8wsGQAAeBGve45MTeM5MgAAWI/lniMDAABQVQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWR4FmaysLB08eND1fdOmTZo+fboWL15cY4UBAABcjEdB5q677tKGDRskSTk5Obrpppu0adMmPfzww3riiSdqtEAAAICKeBRkduzYoV69ekmS/vnPf6pLly765ptv9OabbyopKakm6wMAAKiQR0Hm9OnT8vf3lyR9/vnn+v3vfy9J6tixo7Kzs2uuOgAAgAvwKMhcfvnleu211/TVV19p3bp1SkxMlCQdPnxY4eHhNVogAABARTwKMs8++6wWLVqkfv36aeTIkerevbsk6aOPPnJdcgIAAKhtNsMwDE8mLCkpUV5ensLCwlxt+/fvV9OmTdWyZcsaK7C68vLyZLfb5XA4FBISYnY5AACgEir7++3xc2QMw1B6eroWLVqkEydOSJL8/PzUtGlTT2cJAABQJY09mejAgQNKTExUZmamioqKdNNNNyk4OFjPPvusioqK9Nprr9V0nQAAAGV4dEZm2rRpuuqqq3Ts2DEFBAS42ocNG6b169fXWHEAAAAX4tEZma+++krffPON/Pz83Nrj4uJ06NChGikMAADgYjw6I1NaWqqSkpIy7QcPHlRwcHC1iwIAAKgMj4LMgAED9OKLL7q+22w25efna86cORo8eHBN1QYAAHBBHt1+ffDgQQ0cOFCGYWjv3r266qqrtHfvXjVv3lxffvklt18DAIBqqezvt8fPkfntt9+0cuVKbd++Xfn5+erRo4dGjRrl1vnXGxBkAACwnsr+fnvU2VeSGjdurNGjR3s6OQAAQLVVOsh89NFHlZ7pmZdIAgAA1KZKB5lbb721UuPZbLZy72gCAACoaZUOMqWlpbVZBwAAQJV5/K4lAAAAs3kcZNavX69bbrlFl156qS699FLdcsst+vzzz2uyNgAAgAvyKMi88sorSkxMVHBwsKZNm6Zp06YpJCREgwcP1ssvv1zTNQIAAJTLo+fIxMTEaNasWZo6dapb+8svv6y//vWvXvW+JZ4jAwCA9VT299ujMzLHjx9XYmJimfYBAwbI4XB4MksAAIAq8yjI/P73v1dycnKZ9g8//FC33HJLtYsCAACoDI+e7Nu5c2c9/fTTSklJUXx8vCTp22+/1ddff60HH3xQCxcudI37wAMP1EylAAAA5/Goj0ybNm0qN3ObTf/973+rXFRNoo8MAADWU6vvWsrIyPC4MAAAgJrCA/EAAIBleXRGxjAMvffee9qwYYNyc3PLvL5g1apVNVIcAADAhXgUZKZPn65FixbphhtuUEREhGw2W03XBQAAcFEeBZn//d//1apVqzR48OBqLXzevHlatWqVfvzxRwUEBOh3v/udnn32WXXo0ME1TmFhoR588EGtXLlSRUVFGjhwoF555RVFRERUa9kAAMD6POojY7fb1bZt22ovPDU1VVOmTNG3336rdevW6fTp0xowYIAKCgpc48yYMUOrV6/Wu+++q9TUVB0+fFjDhw+v9rIBAID1eXT79RtvvKG1a9fq9ddfV0BAQI0V8/PPP6tly5ZKTU1V37595XA41KJFC7311lv6wx/+IEn68ccf1alTJ6WlpalPnz4XnSe3XwMAYD21evv17bffrrffflstW7ZUXFycfH193YZv3brVk9m6Xm/QrFkzSVJ6erpOnz6thIQE1zgdO3ZUq1atKgwyRUVFKioqcn3Py8vzqBYAAOD9PAoy48aNU3p6ukaPHl1jnX1LS0s1ffp0XXPNNerSpYskKScnR35+fgoNDXUbNyIiQjk5OeXOZ968eZo7d2616wEAAN7PoyCzZs0affrpp7r22mtrrJApU6Zox44d+ve//12t+cyePVszZ850fc/Ly1NsbGx1ywMAAF7IoyATGxtbo/1Npk6dqo8//lhffvmlYmJiXO2RkZEqLi7W8ePH3c7KHDlyRJGRkeXOy9/fX/7+/jVWGwAA8F4e3bX0wgsv6C9/+Yv2799frYUbhqGpU6cqOTlZX3zxRZl3OPXs2VO+vr5av369q23Pnj3KzMx0vawSAAA0XB7dtRQWFqaTJ0/qt99+U9OmTct09j169Gil5nPffffprbfe0ocffuj27Bi73e66G2ry5Mn617/+paSkJIWEhOj++++XJH3zzTeVWgZ3LQEAYD21etfSiy++6Gldbl599VVJUr9+/dzaly1bpvHjx0uS5s+fr0aNGmnEiBFuD8QDAADw6IyMlXBGBgAA66nVMzLnKiwsVHFxsVsbgQEAANQFjzr7FhQUaOrUqWrZsqUCAwMVFhbm9gEAAKgLHgWZv/zlL/riiy/06quvyt/fX0uXLtXcuXMVHR2t5cuX13SNAAAA5fLo0tLq1au1fPly9evXTxMmTNB1112ndu3aqXXr1nrzzTc1atSomq4TAACgDI/OyBw9etT19uuQkBDX7dbXXnutvvzyy5qrDgAA4AI8CjJt27ZVRkaGJOdLHP/5z39Kcp6pOf+9SAAAALXFoyAzYcIEff/995KkWbNm6eWXX1aTJk00Y8YM/fnPf67RAgEAACpSI8+ROXDggNLT09WuXTt169atJuqqMTxHBgAA66ns73eVzsikpaXp448/dms70+n33nvv1d///ncVFRV5VjEAAEAVVSnIPPHEE9q5c6fr+w8//KB77rlHCQkJmj17tlavXq158+bVeJEAAADlqVKQ2bZtm/r37+/6vnLlSvXu3VtLlizRjBkztHDhQlfHXwAAgNpWpSBz7NgxRUREuL6npqZq0KBBru9XX321srKyaq46AACAC6hSkImIiHDddl1cXKytW7eqT58+ruEnTpyQr69vzVYIAABQgSoFmcGDB2vWrFn66quvNHv2bDVt2lTXXXeda/j27dt16aWX1niRAAAA5anSKwqefPJJDR8+XNdff72CgoL0xhtvyM/PzzX89ddf14ABA2q8SAAAgPJ49BwZh8OhoKAg+fj4uLUfPXpUQUFBbuHGbDxHBgAA66ns77dHL4202+3ltjdr1syT2QEAAHjEo1cUAAAAeAOCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDAAAsCyCDID6o9AhOQ6VP8xxyDkcQL1iapD58ssvNWTIEEVHR8tms+mDDz5wG24Yhh577DFFRUUpICBACQkJ2rt3rznFAvBuhQ5pxQgpabDkOOg+zHHQ2b5iBGEGqGdMDTIFBQXq3r27Xn755XKHP/fcc1q4cKFee+01bdy4UYGBgRo4cKAKCwvruFIAXq8oXyr4WTq2X0q6+WyYcRx0fj+23zm8KN/MKgHUMJthGIbZRUiSzWZTcnKybr31VknOszHR0dF68MEH9ac//UmS5HA4FBERoaSkJN15552Vmm9eXp7sdrscDodCQkJqq3wA3uDc0BIWJw1bLCVPOvt9/BrJHmNujQAqpbK/317bRyYjI0M5OTlKSEhwtdntdvXu3VtpaWkVTldUVKS8vDy3D4AGwh7jDCthcc7w8voAQgxQz3ltkMnJyZEkRUREuLVHRES4hpVn3rx5stvtrk9sbGyt1gnAy9hjnGdizjVsMSEGqKe8Nsh4avbs2XI4HK5PVlaW2SUBqEuOg87LSedKnlS2AzCAesFrg0xkZKQk6ciRI27tR44ccQ0rj7+/v0JCQtw+ABqI8/vI3P3Z2ctM53YABlBveG2QadOmjSIjI7V+/XpXW15enjZu3Kj4+HgTKwPglRyH3EPM+DVSq97ufWaSbq74OTMALKmxmQvPz8/XTz/95PqekZGhbdu2qVmzZmrVqpWmT5+up556SpdddpnatGmjRx99VNHR0a47mwDAxT9ICmzh/PO5HXvPdABOutk53D/IvBoB1DhTb79OSUnRDTfcUKZ93LhxSkpKkmEYmjNnjhYvXqzjx4/r2muv1SuvvKL27dtXehncfg00IIUO53Ni7JeUHeY45AwxTex1XxeAKqvs77fXPEemthBkAACwHss/RwYAAOBiCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIAAMCyCDIoX6FDchwqf5jjkHM4AAAmI8igrEKHtGKElDRYchx0H+Y46GxfMYIwAwAwHUEGZRXlSwU/S8f2S0k3nw0zjoPO78f2O4cX5ZtZJQAABBmUw36JNH6NFBZ3NsxkbjwbYsLinMPtl5hbJwCgwSPIoHz2GPcw8/qA80JMjLn1AQAgggwuxB4jDVvs3jZsMSEGAOA1CDKomOOglDzJvS15UtkOwAAAmIQgg/Kd27E3LE66+zP3PjOEGQCAFyDIoCzHobIde1v1LtsBuKLnzAAAUEcIMijLP0gKbFG2Y++5HYADWzjHAwDARI3NLgBeqIldGv2+8zkx599ibY+Rxv/LGWKa2M2pDwCA/0OQQfma2CsOKjw/BgDgJbi0BAAALIszMrigklJDmzKOKvdEoVoGN1GvNs3k08hmdlnwUEPZnw1lPRsS9ikqQpBBhdbuyNbc1buU7Sh0tUXZm2jOkM5K7BJlYmXwREPZnw1lPRsS9ikuxGYYhmF2EbUpLy9PdrtdDodDISEhZpdjGWt3ZGvyiq06/+A48/8/r47uwT8gFtJQ9mdDWc+GhH3acFX295s+MiijpNTQ3NW7yvzDIcnVNnf1LpWU1usMXG80lP3ZUNazIWGfojIIMihjU8ZRt1O45zMkZTsKtSnjaN0VBY81lP3ZUNazIWGfojIIMigj90TF/3B4Mh7M1VD2Z0NZz4aEfYrKIMigjJbBTWp0PJiroezPhrKeDQn7FJVBkEEZvdo0U5S9iSq6sdEm5x0Dvdo0q8uy4KGGsj8byno2JOxTVAZBBmX4NLJpzpDOklTmH5Az3+cM6cwzHCyioezPhrKeDQn7FJVBkEG5ErtE6dXRPRRpdz9lG2lvwu2OFtRQ9mdDWc+GhH2Ki+E5MrggnqZZvzSU/dlQ1rMhYZ82PJX9/SbIAA1BoaP8t5lLkuMQbzOH9+LYbbB4IB4Ap0KHtGKElDRYchx0H+Y46GxfMcI5HuBNOHZRCQQZoL4rypcKfpaO7ZeSbj77g+A46Px+bL9zeFG+mVUCZXHsohIIMkB9Z79EGr9GCos7+4OQufHsD0FYnHN4eafuATNx7KIS6CMDNBTn/l/sGa4fghizqgIujmO3QaKPDAB39hhp2GL3tmGL+SGA9+PYxQUQZICGwnFQSp7k3pY8qWwnSsDbcOziAggyQENw7qn5sDjp7s/c+x3wgwBvxbGLiyDIAPWd41DZzpGtepftROk4ZG6dwPk4dlEJBBmgvvMPkgJblO0caY85+4MQ2MI5HuBNOHZRCdy1BDQEPB0VVsWx22BV9ve7cR3WBMAsTewV/2PPMzjgzTh2cRFcWgIAAJZFkAEAAJZFkAEAAJZFkAEAAJZFkAEAAJZFkAEAAJZFkAEAAJZFkAEAAJbFA/EAwKJKSg1tyjiq3BOFahncRL3aNJNPI5vZZaEa2KdVR5ABAAtauyNbc1fvUraj0NUWZW+iOUM6K7FLlImVwVPsU89waQkALGbtjmxNXrHV7QdPknIchZq8YqvW7sg2qTJ4in3qOYIMAFhISamhuat3qby3/Z5pm7t6l0pK6/X7gOsV9mn1cGmprvzfG1xLgqPLXv88cfjCb3D1dNrqLJP1ZD3NrNdK+7SO13NTxlHlO44qUqeUo/Ays4zQr8p3BGhTxlHFX1p2uFXWs9os9He0WvvUQutZWywRZF5++WU9//zzysnJUffu3fXSSy+pV69eZpdVeYUOacUInTyWo5GnH9X3eUGuQd1D8vW275NqGhYpjX6/7M73dNrqLJP1ZD3NrNdK+9SE9Tx69Ge94feMwpWnO4sfVfY5P3xR+lUr/Z7UrwpR9tGuUnk/ehZZz2qx2N9Rj/epxdaztnj9paV33nlHM2fO1Jw5c7R161Z1795dAwcOVG5urtmlVV5Rvk4ey1HTgiwtLHxEUfpVkvMAXVj4iJoWZOnksRypKL/mpq3OMllP1tPMeq20T01Yz0j/3xSuPLVulKuVfk+6TbfS70m1bpSrcOUp0v83S69ntVjs76jH+9Ri61lbvD7I/O1vf9PEiRM1YcIEde7cWa+99pqaNm2q119/3ezSKq0kOFojTz+qA6UtXQdqD9t/XAfogdKWGnn6UZUER9fYtNVZJuvJeppZr5X2qRnreUWXy/VAk6cuON0DTZ7SFV0ut/R6VofV/o56uk+ttp61xauDTHFxsdLT05WQkOBqa9SokRISEpSWllbuNEVFRcrLy3P7mG1TxlF9nxekO4vP7vxV/o+7dvqdxc7Tc5syjtbYtNVZJuvJeppZr5X2qRnr6dPIpsm/76uRFUw3svhRTf5933KfPWKl9awOq/0d9XSfWm09a4tXB5lffvlFJSUlioiIcGuPiIhQTk5OudPMmzdPdrvd9YmNja2LUi8o94TzdrpshWvG6fvchs04fZ/reuiZ8Wpi2uos01OsZ/1aT7PqtdI+NWs9E7tE6bHRN+lp/+lu7U/7T9djo2+q8JkjVltPT1nx76gn+9SK61kbvDrIeGL27NlyOByuT1ZWltklqWVwE0nOa4jzfV9xGzbf9xXXNcYz49XEtNVZpqdYz/q1nmbVa6V9auZ6JsaWaFHQYre2RUGLlRhbUsFaWnM9PWHVv6NV3adWXc+a5tVBpnnz5vLx8dGRI0fc2o8cOaLIyMhyp/H391dISIjbx2y92jRT95B8t2uIw4sed7vG2D0kX73aNKuxaauzTNaT9TSzXivtU9PW03FQSrpZtmP7pbA46e7PpLA45/ekm53D68N6esiSf0c92KeWXM9aYDMMw6ufsNO7d2/16tVLL730kiSptLRUrVq10tSpUzVr1qyLTp+Xlye73S6Hw2FeqHEc0snFA9W0IMt1DTFb4W490k8GxqrppE8l+yU1M211lsl6sp5m1mulfWrSsaCkwdKZH7zxayR7jOuH8Gz7v7zj+LPS/jRxG3m0T622nlVU2d9vrz4jI0kzZ87UkiVL9MYbb2j37t2aPHmyCgoKNGHCBLNLqzz/IDUNi9TJwFg90OQp1zXEbIXrgSZPOXd6WKTzIUI1NW11lsl6sp5m1mulfWrSsaDAFu4/eJLzv+PXONsDW3jP8Wel/WniNvJon1ptPWuJ15+RkaS///3vrgfiXXHFFVq4cKF69+5dqWm94oyM1HCevsh61q/1NKteK+1TE4+Fcv+P13HI+44/K+3P6kxbA8us8j612npWQWV/vy0RZKrDa4IMAACotHpzaQkAAKAiBBkAAGBZBBkAAGBZBBkAAGBZBBkAAGBZBBkAAGBZBBkAAGBZBBkAAGBZBBkAAGBZjc0uoLadeXBxXl6eyZUAAIDKOvO7fbEXENT7IHPixAlJUmxsrMmVAACAqjpx4oTs9orf3VTv37VUWlqqw4cPKzg4WDabrcbmm5eXp9jYWGVlZfEOpwqwjS6ObXRxbKMLY/tcHNvo4rxxGxmGoRMnTig6OlqNGlXcE6ben5Fp1KiRYmJiam3+ISEhXrPTvRXb6OLYRhfHNrowts/FsY0uztu20YXOxJxBZ18AAGBZBBkAAGBZBBkP+fv7a86cOfL39ze7FK/FNro4ttHFsY0ujO1zcWyji7PyNqr3nX0BAED9xRkZAABgWQQZAABgWQQZAABgWQQZAABgWQQZD7388suKi4tTkyZN1Lt3b23atMnskrzG448/LpvN5vbp2LGj2WWZ6ssvv9SQIUMUHR0tm82mDz74wG24YRh67LHHFBUVpYCAACUkJGjv3r3mFGuCi22f8ePHlzmmEhMTzSnWJPPmzdPVV1+t4OBgtWzZUrfeeqv27NnjNk5hYaGmTJmi8PBwBQUFacSIETpy5IhJFdetymyffv36lTmO7r33XpMqrnuvvvqqunXr5nroXXx8vD755BPXcKsePwQZD7zzzjuaOXOm5syZo61bt6p79+4aOHCgcnNzzS7Na1x++eXKzs52ff7973+bXZKpCgoK1L17d7388svlDn/uuee0cOFCvfbaa9q4caMCAwM1cOBAFRYW1nGl5rjY9pGkxMREt2Pq7bffrsMKzZeamqopU6bo22+/1bp163T69GkNGDBABQUFrnFmzJih1atX691331VqaqoOHz6s4cOHm1h13anM9pGkiRMnuh1Hzz33nEkV172YmBg988wzSk9P15YtW3TjjTdq6NCh2rlzpyQLHz8GqqxXr17GlClTXN9LSkqM6OhoY968eSZW5T3mzJljdO/e3ewyvJYkIzk52fW9tLTUiIyMNJ5//nlX2/Hjxw1/f3/j7bffNqFCc52/fQzDMMaNG2cMHTrUlHq8VW5uriHJSE1NNQzDecz4+voa7777rmuc3bt3G5KMtLQ0s8o0zfnbxzAM4/rrrzemTZtmXlFeKCwszFi6dKmljx/OyFRRcXGx0tPTlZCQ4Gpr1KiREhISlJaWZmJl3mXv3r2Kjo5W27ZtNWrUKGVmZppdktfKyMhQTk6O2zFlt9vVu3dvjqlzpKSkqGXLlurQoYMmT56sX3/91eySTOVwOCRJzZo1kySlp6fr9OnTbsdRx44d1apVqwZ5HJ2/fc5488031bx5c3Xp0kWzZ8/WyZMnzSjPdCUlJVq5cqUKCgoUHx9v6eOn3r80sqb98ssvKikpUUREhFt7RESEfvzxR5Oq8i69e/dWUlKSOnTooOzsbM2dO1fXXXedduzYoeDgYLPL8zo5OTmSVO4xdWZYQ5eYmKjhw4erTZs22rdvn/7nf/5HgwYNUlpamnx8fMwur86VlpZq+vTpuuaaa9SlSxdJzuPIz89PoaGhbuM2xOOovO0jSXfddZdat26t6Ohobd++XQ899JD27NmjVatWmVht3frhhx8UHx+vwsJCBQUFKTk5WZ07d9a2bdsse/wQZFDjBg0a5Ppzt27d1Lt3b7Vu3Vr//Oc/dc8995hYGazqzjvvdP25a9eu6tatmy699FKlpKSof//+JlZmjilTpmjHjh0Nvu9ZRSraPpMmTXL9uWvXroqKilL//v21b98+XXrppXVdpik6dOigbdu2yeFw6L333tO4ceOUmppqdlnVwqWlKmrevLl8fHzK9OQ+cuSIIiMjTarKu4WGhqp9+/b66aefzC7FK505bjimKq9t27Zq3rx5gzympk6dqo8//lgbNmxQTEyMqz0yMlLFxcU6fvy42/gN7TiqaPuUp3fv3pLUoI4jPz8/tWvXTj179tS8efPUvXt3LViwwNLHD0Gmivz8/NSzZ0+tX7/e1VZaWqr169crPj7exMq8V35+vvbt26eoqCizS/FKbdq0UWRkpNsxlZeXp40bN3JMVeDgwYP69ddfG9QxZRiGpk6dquTkZH3xxRdq06aN2/CePXvK19fX7Tjas2ePMjMzG8RxdLHtU55t27ZJUoM6js5XWlqqoqIiax8/Zvc2tqKVK1ca/v7+RlJSkrFr1y5j0qRJRmhoqJGTk2N2aV7hwQcfNFJSUoyMjAzj66+/NhISEozmzZsbubm5ZpdmmhMnThjfffed8d133xmSjL/97W/Gd999Zxw4cMAwDMN45plnjNDQUOPDDz80tm/fbgwdOtRo06aNcerUKZMrrxsX2j4nTpww/vSnPxlpaWlGRkaG8fnnnxs9evQwLrvsMqOwsNDs0uvM5MmTDbvdbqSkpBjZ2dmuz8mTJ13j3HvvvUarVq2ML774wtiyZYsRHx9vxMfHm1h13bnY9vnpp5+MJ554wtiyZYuRkZFhfPjhh0bbtm2Nvn37mlx53Zk1a5aRmppqZGRkGNu3bzdmzZpl2Gw247PPPjMMw7rHD0HGQy+99JLRqlUrw8/Pz+jVq5fx7bffml2S17jjjjuMqKgow8/Pz7jkkkuMO+64w/jpp5/MLstUGzZsMCSV+YwbN84wDOct2I8++qgRERFh+Pv7G/379zf27NljbtF16ELb5+TJk8aAAQOMFi1aGL6+vkbr1q2NiRMnNrj/cShv+0gyli1b5hrn1KlTxn333WeEhYUZTZs2NYYNG2ZkZ2ebV3Qdutj2yczMNPr27Ws0a9bM8Pf3N9q1a2f8+c9/NhwOh7mF16G7777baN26teHn52e0aNHC6N+/vyvEGIZ1jx+bYRhG3Z3/AQAAqDn0kQEAAJZFkAEAAJZFkAEAAJZFkAEAAJZFkAEAAJZFkAEAAJZFkAEAAJZFkAEAAJZFkAFgqqSkJIWGhppdBgCLIsgAqND48eNls9lcn/DwcCUmJmr79u01tow77rhD//nPf2psfueKi4vTiy++WOXp+vXrp+nTp9d4PQBqHkEGwAUlJiYqOztb2dnZWr9+vRo3bqxbbrmlxuYfEBCgli1b1tj8ADQsBBkAF+Tv76/IyEhFRkbqiiuu0KxZs5SVlaWff/7ZNc5DDz2k9u3bq2nTpmrbtq0effRRnT592jX8+++/1w033KDg4GCFhISoZ8+e2rJli6Syl5YuNO75DMPQ448/rlatWsnf31/R0dF64IEHJDnPqhw4cEAzZsxwnVGSpF9//VUjR47UJZdcoqZNm6pr1656++23XfMcP368UlNTtWDBAtd0+/fvlyTt2LFDgwYNUlBQkCIiIjRmzBj98ssvrmnfe+89de3aVQEBAQoPD1dCQoIKCgqqtwMAXBBBBkCl5efna8WKFWrXrp3Cw8Nd7cHBwUpKStKuXbu0YMECLVmyRPPnz3cNHzVqlGJiYrR582alp6dr1qxZ8vX1LXcZVRn3/fff1/z587Vo0SLt3btXH3zwgbp27SpJWrVqlWJiYvTEE0+4zihJUmFhoXr27Kk1a9Zox44dmjRpksaMGaNNmzZJkhYsWKD4+HhNnDjRNV1sbKyOHz+uG2+8UVdeeaW2bNmitWvX6siRI7r99tslSdnZ2Ro5cqTuvvtu7d69WykpKRo+fLh4Ly9Qy8x9+TYAbzZu3DjDx8fHCAwMNAIDAw1JRlRUlJGenn7B6Z5//nmjZ8+eru/BwcFGUlJSueMuW7bMsNvtlRr3fC+88ILRvn17o7i4uNzhrVu3NubPn3/R+dx8883Ggw8+6Pp+/fXXG9OmTXMb58knnzQGDBjg1paVlWVIMvbs2WOkp6cbkoz9+/dXqnYANYMzMgAu6IYbbtC2bdu0bds2bdq0SQMHDtSgQYN04MAB1zjvvPOOrrnmGkVGRiooKEiPPPKIMjMzXcNnzpypP/7xj0pISNAzzzyjffv2Vbi8qox722236dSpU2rbtq0mTpyo5ORk/fbbbxdcn5KSEj355JPq2rWrmjVrpqCgIH366adu9Zbn+++/14YNGxQUFOT6dOzYUZK0b98+de/eXf3791fXrl112223acmSJTp27NgF5wmg+ggyAC4oMDBQ7dq1U7t27XT11Vdr6dKlKigo0JIlSyRJaWlpGjVqlAYPHqyPP/5Y3333nR5++GEVFxe75vH4449r586duvnmm/XFF1+oc+fOSk5OLnd5VRk3NjZWe/bs0SuvvKKAgADdd9996tu3r1v/nPM9//zzWrBggR566CFt2LBB27Zt08CBA93qLU9+fr6GDBniCnVnPnv37lXfvn3l4+OjdevW6ZNPPlHnzp310ksvqUOHDsrIyLjYJgZQDQQZAFVis9nUqFEjnTp1SpL0zTffqHXr1nr44Yd11VVX6bLLLnM7W3NG+/btNWPGDH322WcaPny4li1bVuEyqjJuQECAhgwZooULFyolJUVpaWn64YcfJEl+fn4qKSlxG//rr7/W0KFDNXr0aHXv3l1t27Ytc/t3edP16NFDO3fuVFxcnCvYnfkEBga6ts0111yjuXPn6rvvvpOfn1+FIQxAzSDIALigoqIi5eTkKCcnR7t379b999/vOjshSZdddpkyMzO1cuVK7du3TwsXLnT78T516pSmTp2qlJQUHThwQF9//bU2b96sTp06lVlWVcaVnHc8/eMf/9COHTv03//+VytWrFBAQIBat24tyfkcmS+//FKHDh1y3V102WWXad26dfrmm2+0e/du/b//9/905MgRt/nGxcVp48aN2r9/v3755ReVlpZqypQpOnr0qEaOHKnNmzdr3759+vTTTzVhwgSVlJRo48aN+utf/6otW7YoMzNTq1at0s8//1xh7QBqiNmddAB4r3HjxhmSXJ/g4GDj6quvNt577z238f785z8b4eHhRlBQkHHHHXcY8+fPd3XgLSoqMu68804jNjbW8PPzM6Kjo42pU6cap06dMgzDvbPvxcY9X3JystG7d28jJCTECAwMNPr06WN8/vnnruFpaWlGt27dDH9/f+PMP3e//vqrMXToUCMoKMho2bKl8cgjjxhjx441hg4d6ppuz549Rp8+fYyAgABDkpGRkWEYhmH85z//MYYNG2aEhoYaAQEBRseOHY3p06cbpaWlxq5du4yBAwcaLVq0MPz9/Y327dsbL730Ug3sBQAXYjMM7g0EAADWxKUlAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWQQZAABgWf8frP1e4F+3sjkAAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "n_samples = 100\n", - "n_qubits = len(my_circ.qubits)\n", - "\n", - "# Initialise the sample counter\n", - "sample_count = [0 for _ in range(2**n_qubits)]\n", - "\n", - "with CuTensorNetHandle() as libhandle:\n", - " my_mps.update_libhandle(libhandle)\n", - " \n", - " for _ in range(n_samples):\n", - " # Draw a sample\n", - " qubit_outcomes = my_mps.sample()\n", - " # Convert qubit outcomes to bitstring\n", - " bitstring = \"\".join(str(qubit_outcomes[q]) for q in my_circ.qubits)\n", - " # Convert bitstring to int\n", - " outcome = int(bitstring, 2)\n", - " # Update the sample dictionary\n", - " sample_count[outcome] += 1\n", - " \n", - "# Calculate the theoretical number of samples per bitstring\n", - "expected_count = [n_samples*abs(state_vector[i])**2 for i in range(2**n_qubits)]\n", - " \n", - "# Plot a comparison of theory vs sampled\n", - "plt.scatter(range(2**n_qubits), expected_count, label=\"Theory\")\n", - "plt.scatter(range(2**n_qubits), sample_count, label=\"Experiment\", marker='x')\n", - "plt.xlabel(\"Basis states\")\n", - "plt.ylabel(\"Samples\")\n", - "plt.legend()\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "id": "58d7a2c7-a2e8-4bd9-b55e-a8701aea4ea6", - "metadata": {}, - "source": [ - "We also provide methods to apply mid-circuit measurements via `my_mps.measure(qubits)` and postselection via `my_mps.postselect(qubit_outcomes)`. Their use is similar to that of `my_mps.sample()` shown above.\n", - "\n", - "**Note:** whereas `my_mps.sample()` does *not* change the state of the MPS, `my_mps.measure(qubits)` and `my_mps.postselect(qubit_outcomes)` do change it, projecting the state to the resulting outcome and removing the measured qubits." - ] - }, - { - "cell_type": "markdown", - "id": "13ce2db5-0b63-4dcb-8e43-345969d904b1", - "metadata": {}, - "source": [ - "### Inner products\n", - "\n", - "Using `vdot` you can obtain the inner product of two states in MPS form. This method does not change the internal data of neither of the MPS. Moreover, it can be used on the same `MPS` object for both inputs, yielding the squared norm of the state." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "e6edc6a9-0e31-4e45-b444-55c0790ca642", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "As expected, the squared norm of a state is 1\n", - "True\n" - ] - } - ], - "source": [ - "with CuTensorNetHandle() as libhandle:\n", - " my_mps.update_libhandle(libhandle)\n", - " norm_sq = my_mps.vdot(my_mps)\n", - "\n", - "print(\"As expected, the squared norm of a state is 1\")\n", - "print(np.isclose(norm_sq, 1))" - ] - }, - { - "cell_type": "markdown", - "id": "81167811-4ba7-4c97-9809-39e521d7ee3c", - "metadata": {}, - "source": [ - "Let's come up with another circuit on the same qubits and apply an inner product between the two `MPS` objects." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "b3db8b6d-424e-405d-94ce-deebc481dac0", - "metadata": {}, - "outputs": [], - "source": [ - "# Generate circuits\n", - "other_circ = Circuit(5)\n", - "other_circ.H(3)\n", - "other_circ.CZ(3, 4)\n", - "other_circ.XXPhase(0.3, 1, 2)\n", - "other_circ.Ry(0.7, 3)\n", - "\n", - "# Simulate them\n", - "with CuTensorNetHandle() as libhandle:\n", - " other_mps = simulate(libhandle, other_circ, ContractionAlg.MPSxGate, ConfigMPS())" - ] - }, - { - "cell_type": "markdown", - "id": "dc655ff8-cc67-4168-806d-112d28916d6c", - "metadata": {}, - "source": [ - "Let's calculate the inner product and check that it agrees with `pytket`'s state vector based computation." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "3a079908-dd3a-4b75-b3ac-5d150b9513f7", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Is the inner product correct?\n", - "True\n" - ] - } - ], - "source": [ - "with CuTensorNetHandle() as libhandle:\n", - " my_mps.update_libhandle(libhandle)\n", - " inner_product = my_mps.vdot(other_mps)\n", - " \n", - "my_state = my_circ.get_statevector()\n", - "other_state = other_circ.get_statevector()\n", - "\n", - "print(\"Is the inner product correct?\")\n", - "print(np.isclose(np.vdot(my_state, other_state), inner_product))" - ] - }, - { - "cell_type": "markdown", - "id": "910227dc-90bf-4b40-859d-4f5f12d039cc", - "metadata": {}, - "source": [ - "### Preparing the circuit\n", - "\n", - "If the circuit to be simulated contains gates that do not act between nearest neighbour qubits, an error message will be raised." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "349158fd-f88a-4262-9e46-741c1225939d", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - " \n", - "
\n", - "\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "bad_circ = Circuit(5)\n", - "bad_circ.H(1)\n", - "bad_circ.ZZPhase(0.3, 2, 3)\n", - "bad_circ.CX(0, 1)\n", - "bad_circ.Ry(0.8, 4)\n", - "bad_circ.CZ(3, 4)\n", - "bad_circ.XXPhase(0.7, 1, 2)\n", - "bad_circ.TK2(0.1, 0.2, 0.4, 1, 4)\n", - "\n", - "render_circuit_jupyter(bad_circ)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "181ea39a-d3c2-427c-91e7-7802a2eef94e", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "('Some two-qubit gate in the circuit is not acting between', 'nearest neighbour qubits. Consider using prepare_circuit().')\n" - ] - } - ], - "source": [ - "with CuTensorNetHandle() as libhandle:\n", - " try:\n", - " simulate(libhandle, bad_circ, ContractionAlg.MPSxGate, ConfigMPS())\n", - " except RuntimeError as e:\n", - " print(e)" - ] - }, - { - "cell_type": "markdown", - "id": "ba933c45-63a8-46ae-a578-47d6f59076e4", - "metadata": {}, - "source": [ - "As suggested by the error message, we can call `prepare_circuit` to use `pytket` routing capabilities to guarantee that the circuit can be run using our MPS approaches." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "a3229854-049f-4307-a7f9-73d58d968c16", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - " \n", - "
\n", - "\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{node[4]: q[0], node[1]: q[1], node[3]: q[2], node[2]: q[3], node[0]: q[4]}\n" - ] - } - ], - "source": [ - "prep_circ, qubit_map = prepare_circuit(bad_circ)\n", - "render_circuit_jupyter(prep_circ)\n", - "# Print the correspondence between qubit names in `prep_circuit` and the original qubits from `circuit` at the output\n", - "print(qubit_map)" - ] - }, - { - "cell_type": "markdown", - "id": "d668e317-a961-4c61-b5de-0e1faa73a46a", - "metadata": {}, - "source": [ - "The circuit can now be simulated as usual." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "af2c2dc7-0c22-4805-8181-81d044b1033e", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Did simulation succeed?\n", - "True\n" - ] - } - ], - "source": [ - "with CuTensorNetHandle() as libhandle:\n", - " prep_mps = simulate(libhandle, prep_circ, ContractionAlg.MPSxGate, ConfigMPS())\n", - " print(\"Did simulation succeed?\")\n", - " print(prep_mps.is_valid())" - ] - }, - { - "cell_type": "markdown", - "id": "0508ecf8-c7bb-40bf-a0be-7c892de28452", - "metadata": {}, - "source": [ - "# Approximate simulation\n", - "\n", - "We provide two policies for approximate simulation; these are supported by both of our current MPS contraction algorithms:\n", - "\n", - "* Bound the maximum value of the virtual bond dimension `chi`. If a bond dimension would increase past that point, we *truncate* (i.e. discard) the degrees of freedom that contribute the least to the state description. We can keep track of a lower bound of the error that this truncation causes.\n", - "* Provide a value for acceptable two-qubit gate fidelity `truncation_fidelity`. After each two-qubit gate we truncate the dimension of virtual bonds as much as we can while guaranteeing the target gate fidelity. The more fidelity you require, the longer it will take to simulate. **Note**: this is *not* the final fidelity of the output state, but the fidelity per gate.\n", - "\n", - "Values for `chi` and `truncation_fidelity` can be set via `ConfigMPS`. To showcase approximate simulation, let's define a circuit where exact MPS contraction starts struggling." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "4c536376-0259-406e-8a3a-98940fd227a8", - "metadata": {}, - "outputs": [], - "source": [ - "def random_line_circuit(n_qubits: int, layers: int) -> Circuit:\n", - " \"\"\"Random circuit with line connectivity.\"\"\"\n", - " c = Circuit(n_qubits)\n", - "\n", - " for i in range(layers):\n", - " # Layer of TK1 gates\n", - " for q in range(n_qubits):\n", - " c.TK1(np.random.rand(), np.random.rand(), np.random.rand(), q)\n", - "\n", - " # Layer of CX gates\n", - " offset = np.mod(i, 2) # Even layers connect (q0,q1), odd (q1,q2)\n", - " qubit_pairs = [\n", - " [c.qubits[i], c.qubits[i + 1]] for i in range(offset, n_qubits - 1, 2)\n", - " ]\n", - " # Direction of each CX gate is random\n", - " for pair in qubit_pairs:\n", - " np.random.shuffle(pair)\n", - "\n", - " for pair in qubit_pairs:\n", - " c.CX(pair[0], pair[1])\n", - "\n", - " return c" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "1e5c57c8-0089-4433-8d70-8e0b7975a0b5", - "metadata": {}, - "outputs": [], - "source": [ - "circuit = random_line_circuit(n_qubits = 20, layers = 20)" - ] - }, - { - "cell_type": "markdown", - "id": "3fbebcd4-5625-46f3-80a5-b4180872477f", - "metadata": {}, - "source": [ - "For exact contraction, `chi` must be allowed to be up to `2**(n_qubits // 2)`, meaning that if we set `n_qubits = 20` it would require `chi = 1024`; already too much for this particular circuit to be simulated in a gaming laptop using the current implementation. Instead, let's bound `chi` to a maximum of `16`. Doing so results in faster runtime, at the expense of losing output state fidelity." - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "250a3ac6-0777-4d68-a573-96f4a98e9b28", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Time taken by approximate contraction with bound chi:\n", - "1.89 seconds\n", - "\n", - "Lower bound of the fidelity:\n", - "0.3742\n" - ] - } - ], - "source": [ - "start = time()\n", - "with CuTensorNetHandle() as libhandle:\n", - " config = ConfigMPS(chi=16)\n", - " bound_chi_mps = simulate(libhandle, circuit, ContractionAlg.MPSxGate, config)\n", - "end = time()\n", - "print(\"Time taken by approximate contraction with bound chi:\")\n", - "print(f\"{round(end-start,2)} seconds\")\n", - "print(\"\\nLower bound of the fidelity:\")\n", - "print(round(bound_chi_mps.fidelity, 4))" - ] - }, - { - "cell_type": "markdown", - "id": "c12bbcab-19e5-4877-82b5-557ab2dd4c97", - "metadata": {}, - "source": [ - "Alternatively, we can fix `truncation_fidelity` and let `chi` increase as necessary to satisfy it." - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "3803bdf2-77c0-4a60-8afe-4933ea3ab75c", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Time taken by approximate contraction with fixed truncation fidelity:\n", - "2.89 seconds\n", - "\n", - "Lower bound of the fidelity:\n", - "0.9298\n" - ] - } - ], - "source": [ - "start = time()\n", - "with CuTensorNetHandle() as libhandle:\n", - " config = ConfigMPS(truncation_fidelity=0.999)\n", - " fixed_fidelity_mps = simulate(libhandle, circuit, ContractionAlg.MPSxGate, config)\n", - "end = time()\n", - "print(\"Time taken by approximate contraction with fixed truncation fidelity:\")\n", - "print(f\"{round(end-start,2)} seconds\")\n", - "print(\"\\nLower bound of the fidelity:\")\n", - "print(round(fixed_fidelity_mps.fidelity, 4))" - ] - }, - { - "cell_type": "markdown", - "id": "48bdac80-8579-4fbc-b182-5da7e947518f", - "metadata": {}, - "source": [ - "# Contraction algorithms" - ] - }, - { - "cell_type": "markdown", - "id": "dafce26c-7baf-4471-97c9-ba405f143fc2", - "metadata": {}, - "source": [ - "We currently offer two MPS-based simulation algorithms:\n", - " \n", - "* **MPSxGate**: Apply gates one by one to the MPS, canonicalising the MPS and truncating when necessary. In particular, we implemented the algorithm from the following paper: https://arxiv.org/abs/2002.07730.\n", - "* **MPSxMPO**: Maintain two MPS copies of the state as it evolves, one updated eagerly using the **MPSxGate** method and the other updated in batches of up to `k` layers of two-qubit gates. Whenever the second MPS is updated, both copies are synchronised and an optimisation algorithm is applied to increase the fidelity of the state. This algorithm is often referred to as DMRG-like simulation. In particular, we implemented the algorithm from the following paper: https://arxiv.org/abs/2207.05612.\n", - "\n", - "The `MPSxGate` algorithm is the one we have been using for all of the examples above. In comparison, the `MPSxMPO` algorithm provides the user with two new parameters to tune:\n", - "\n", - "* **k**: The maximum number of layers the MPO is allowed to have before being contracted. Increasing this might increase fidelity, but it will also increase resource requirements exponentially. Default value is `4`.\n", - "* **optim_delta**: Stopping criteria for the optimisation when contracting the `k` layers of MPO. Stops when the increase of fidelity between iterations is smaller than `optim_delta`. Default value is `1e-5`.\n", - "\n", - "Both `k` and `optim_delta` can be set via `ConfigMPS`. Below we compare `MPSxGate` versus `MPSxMPO` with default parameters and `MPSxMPO` with more resource-hungry parameters. The circuit used is the same as in the previous section." - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "13f6cf39-2228-4165-9fae-1ba36afb54b4", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "MPSxGate\n", - "\tTime taken: 1.89 seconds\n", - "\tLower bound of the fidelity: 0.3712\n" - ] - } - ], - "source": [ - "start = time()\n", - "with CuTensorNetHandle() as libhandle:\n", - " config = ConfigMPS(chi=16)\n", - " fixed_fidelity_mps = simulate(libhandle, circuit, ContractionAlg.MPSxGate, config)\n", - "end = time()\n", - "print(\"MPSxGate\")\n", - "print(f\"\\tTime taken: {round(end-start,2)} seconds\")\n", - "print(f\"\\tLower bound of the fidelity: {round(fixed_fidelity_mps.fidelity, 4)}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "d11cb199-fb17-4b56-917f-b090ac25a886", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "MPSxMPO, default parameters\n", - "\tTime taken: 27.17 seconds\n", - "\tLower bound of the fidelity: 0.3956\n" - ] - } - ], - "source": [ - "start = time()\n", - "with CuTensorNetHandle() as libhandle:\n", - " config = ConfigMPS(chi=16)\n", - " fixed_fidelity_mps = simulate(libhandle, circuit, ContractionAlg.MPSxMPO, config)\n", - "end = time()\n", - "print(\"MPSxMPO, default parameters\")\n", - "print(f\"\\tTime taken: {round(end-start,2)} seconds\")\n", - "print(f\"\\tLower bound of the fidelity: {round(fixed_fidelity_mps.fidelity, 4)}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "7eb19f55-0a1d-48ef-896c-0f679cf441d1", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "MPSxMPO, custom parameters\n", - "\tTime taken: 26.99 seconds\n", - "\tLower bound of the fidelity: 0.4209\n" - ] - } - ], - "source": [ - "start = time()\n", - "with CuTensorNetHandle() as libhandle:\n", - " config = ConfigMPS(k=8, optim_delta=1e-15, chi=16)\n", - " fixed_fidelity_mps = simulate(libhandle, circuit, ContractionAlg.MPSxMPO, config)\n", - "end = time()\n", - "print(\"MPSxMPO, custom parameters\")\n", - "print(f\"\\tTime taken: {round(end-start,2)} seconds\")\n", - "print(f\"\\tLower bound of the fidelity: {round(fixed_fidelity_mps.fidelity, 4)}\")" - ] - }, - { - "cell_type": "markdown", - "id": "ca93d95d-5d7e-49c8-8a21-34aeaa7feded", - "metadata": {}, - "source": [ - "**Note**: `MPSxMPO` also admits truncation policy in terms of `truncation_fidelity` instead of `chi`." - ] - }, - { - "cell_type": "markdown", - "id": "d1e0091f-e258-472f-aef5-bec2ad215e56", - "metadata": {}, - "source": [ - "# Using the logger" - ] - }, - { - "cell_type": "markdown", - "id": "7607b5bd-f332-4d97-963b-2a163d3fb194", - "metadata": {}, - "source": [ - "You can request a verbose log to be produced during simulation, by assigning the `loglevel` argument when creating a `ConfigMPS` instance. Currently, two log levels are supported (other than default, which is silent): \n", - "- `logging.INFO` will print information about progress percent, memory currently occupied by the MPS and current fidelity. Additionally, some high level information of the current stage of the simulation is provided, such as when `MPSxMPO` is applying optimisation sweeps.\n", - "- `logging.DEBUG` provides all of the messages from the loglevel above plus detailed information of the current operation being carried out and the values of important variables.\n", - "\n", - "**Note**: Due to technical issues with the `logging` module and Jupyter notebooks we need to reload the `logging` module. When working with python scripts and command line, just doing `import logging` is enough." - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "eb152bf3-a065-47bb-bc3e-2adb49277881", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from importlib import reload # Not needed in Python 2\n", - "import logging\n", - "reload(logging)" - ] - }, - { - "cell_type": "markdown", - "id": "fd8d8c7f-dcd2-40f9-940f-8dc8cfb31fac", - "metadata": {}, - "source": [ - "An example of the use of `logging.INFO` is provided below. " - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "318073fc-2ef4-492e-8c5a-1ba1ba0b7733", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[15:41:45] Simulation (INFO) - Ordering the gates in the circuit to reduce canonicalisation overhead.\n", - "[15:41:45] Simulation (INFO) - Running simulation...\n", - "[15:41:45] Simulation (INFO) - Progress... 0%\n", - "[15:41:45] Simulation (INFO) - Progress... 0%\n", - "[15:41:45] Simulation (INFO) - Progress... 0%\n", - "[15:41:45] Simulation (INFO) - Progress... 0%\n", - "[15:41:45] Simulation (INFO) - Progress... 0%\n", - "[15:41:45] Simulation (INFO) - Progress... 0%\n", - "[15:41:45] Simulation (INFO) - Progress... 1%\n", - "[15:41:45] Simulation (INFO) - Progress... 1%\n", - "[15:41:45] Simulation (INFO) - Progress... 1%\n", - "[15:41:45] Simulation (INFO) - Progress... 1%\n", - "[15:41:45] Simulation (INFO) - Progress... 1%\n", - "[15:41:45] Simulation (INFO) - Progress... 1%\n", - "[15:41:45] Simulation (INFO) - Progress... 2%\n", - "[15:41:45] Simulation (INFO) - Progress... 2%\n", - "[15:41:45] Simulation (INFO) - Progress... 2%\n", - "[15:41:45] Simulation (INFO) - Progress... 2%\n", - "[15:41:45] Simulation (INFO) - Progress... 2%\n", - "[15:41:45] Simulation (INFO) - Progress... 2%\n", - "[15:41:45] Simulation (INFO) - Progress... 3%\n", - "[15:41:45] Simulation (INFO) - Progress... 3%\n", - "[15:41:45] MPS (INFO) - MPS size (MiB)=0.00067138671875\n", - "[15:41:45] MPS (INFO) - MPS fidelity=1.0\n", - "[15:41:45] Simulation (INFO) - Progress... 3%\n", - "[15:41:45] Simulation (INFO) - Progress... 3%\n", - "[15:41:45] Simulation (INFO) - Progress... 3%\n", - "[15:41:45] Simulation (INFO) - Progress... 3%\n", - "[15:41:45] MPS (INFO) - MPS size (MiB)=0.000732421875\n", - "[15:41:45] MPS (INFO) - MPS fidelity=1.0\n", - "[15:41:45] Simulation (INFO) - Progress... 4%\n", - "[15:41:45] Simulation (INFO) - Progress... 4%\n", - "[15:41:45] Simulation (INFO) - Progress... 4%\n", - "[15:41:45] MPS (INFO) - MPS size (MiB)=0.0008544921875\n", - "[15:41:45] MPS (INFO) - MPS fidelity=1.0\n", - "[15:41:45] Simulation (INFO) - Progress... 4%\n", - "[15:41:45] Simulation (INFO) - Progress... 4%\n", - "[15:41:45] Simulation (INFO) - Progress... 4%\n", - "[15:41:45] MPS (INFO) - MPS size (MiB)=0.0008544921875\n", - "[15:41:45] MPS (INFO) - MPS fidelity=1.0\n", - "[15:41:45] Simulation (INFO) - Progress... 5%\n", - "[15:41:45] Simulation (INFO) - Progress... 5%\n", - "[15:41:45] Simulation (INFO) - Progress... 5%\n", - "[15:41:45] Simulation (INFO) - Progress... 5%\n", - "[15:41:45] MPS (INFO) - MPS size (MiB)=0.00091552734375\n", - "[15:41:45] MPS (INFO) - MPS fidelity=1.0\n", - "[15:41:45] Simulation (INFO) - Progress... 5%\n", - "[15:41:45] Simulation (INFO) - Progress... 5%\n", - "[15:41:45] Simulation (INFO) - Progress... 6%\n", - "[15:41:45] MPS (INFO) - MPS size (MiB)=0.00103759765625\n", - "[15:41:45] MPS (INFO) - MPS fidelity=1.0\n", - "[15:41:45] Simulation (INFO) - Progress... 6%\n", - "[15:41:45] Simulation (INFO) - Progress... 6%\n", - "[15:41:45] Simulation (INFO) - Progress... 6%\n", - "[15:41:45] MPS (INFO) - MPS size (MiB)=0.00128173828125\n", - "[15:41:45] MPS (INFO) - MPS fidelity=1.0000000000000002\n", - "[15:41:45] Simulation (INFO) - Progress... 6%\n", - "[15:41:45] Simulation (INFO) - Progress... 6%\n", - "[15:41:45] Simulation (INFO) - Progress... 7%\n", - "[15:41:45] MPS (INFO) - MPS size (MiB)=0.00164794921875\n", - "[15:41:45] MPS (INFO) - MPS fidelity=1.0000000000000002\n", - "[15:41:45] Simulation (INFO) - Progress... 7%\n", - "[15:41:45] Simulation (INFO) - Progress... 7%\n", - "[15:41:45] Simulation (INFO) - Progress... 7%\n", - "[15:41:45] MPS (INFO) - Applying variational optimisation.\n", - "[15:41:45] MPS (INFO) - Fidelity before optimisation=1.0000000000000002\n", - "[15:41:45] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:45] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.999999999999996\n", - "[15:41:45] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:46] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9999999999999964\n", - "[15:41:46] MPS (INFO) - Final fidelity after optimisation=0.9999999999999964\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.00164794921875\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9999999999999964\n", - "[15:41:46] Simulation (INFO) - Progress... 7%\n", - "[15:41:46] Simulation (INFO) - Progress... 7%\n", - "[15:41:46] Simulation (INFO) - Progress... 8%\n", - "[15:41:46] Simulation (INFO) - Progress... 8%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.001708984375\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9999999999999964\n", - "[15:41:46] Simulation (INFO) - Progress... 8%\n", - "[15:41:46] Simulation (INFO) - Progress... 8%\n", - "[15:41:46] Simulation (INFO) - Progress... 8%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.0018310546875\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9999999999999964\n", - "[15:41:46] Simulation (INFO) - Progress... 8%\n", - "[15:41:46] Simulation (INFO) - Progress... 9%\n", - "[15:41:46] Simulation (INFO) - Progress... 9%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.0020751953125\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9999999999999962\n", - "[15:41:46] Simulation (INFO) - Progress... 9%\n", - "[15:41:46] Simulation (INFO) - Progress... 9%\n", - "[15:41:46] Simulation (INFO) - Progress... 9%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.0025634765625\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9999999999999962\n", - "[15:41:46] Simulation (INFO) - Progress... 10%\n", - "[15:41:46] Simulation (INFO) - Progress... 10%\n", - "[15:41:46] Simulation (INFO) - Progress... 10%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.0030517578125\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9997200110071941\n", - "[15:41:46] Simulation (INFO) - Progress... 10%\n", - "[15:41:46] Simulation (INFO) - Progress... 10%\n", - "[15:41:46] Simulation (INFO) - Progress... 10%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.0030517578125\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9997200110071941\n", - "[15:41:46] Simulation (INFO) - Progress... 11%\n", - "[15:41:46] Simulation (INFO) - Progress... 11%\n", - "[15:41:46] Simulation (INFO) - Progress... 11%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.0030517578125\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9997200110071941\n", - "[15:41:46] Simulation (INFO) - Progress... 11%\n", - "[15:41:46] Simulation (INFO) - Progress... 11%\n", - "[15:41:46] Simulation (INFO) - Progress... 11%\n", - "[15:41:46] Simulation (INFO) - Progress... 12%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.00311279296875\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9997200110071941\n", - "[15:41:46] Simulation (INFO) - Progress... 12%\n", - "[15:41:46] Simulation (INFO) - Progress... 12%\n", - "[15:41:46] Simulation (INFO) - Progress... 12%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.00323486328125\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9997200110071941\n", - "[15:41:46] Simulation (INFO) - Progress... 12%\n", - "[15:41:46] Simulation (INFO) - Progress... 12%\n", - "[15:41:46] Simulation (INFO) - Progress... 13%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.00347900390625\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9997200110071941\n", - "[15:41:46] Simulation (INFO) - Progress... 13%\n", - "[15:41:46] Simulation (INFO) - Progress... 13%\n", - "[15:41:46] Simulation (INFO) - Progress... 13%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.00396728515625\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9997200110071943\n", - "[15:41:46] Simulation (INFO) - Progress... 13%\n", - "[15:41:46] Simulation (INFO) - Progress... 13%\n", - "[15:41:46] Simulation (INFO) - Progress... 14%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.00494384765625\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9997200110071943\n", - "[15:41:46] Simulation (INFO) - Progress... 14%\n", - "[15:41:46] Simulation (INFO) - Progress... 14%\n", - "[15:41:46] Simulation (INFO) - Progress... 14%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.0062255859375\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9992820662866865\n", - "[15:41:46] Simulation (INFO) - Progress... 14%\n", - "[15:41:46] Simulation (INFO) - Progress... 14%\n", - "[15:41:46] Simulation (INFO) - Progress... 15%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.006561279296875\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9986887177546038\n", - "[15:41:46] Simulation (INFO) - Progress... 15%\n", - "[15:41:46] Simulation (INFO) - Progress... 15%\n", - "[15:41:46] Simulation (INFO) - Progress... 15%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.006561279296875\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9986887177546038\n", - "[15:41:46] Simulation (INFO) - Progress... 15%\n", - "[15:41:46] Simulation (INFO) - Progress... 15%\n", - "[15:41:46] Simulation (INFO) - Progress... 16%\n", - "[15:41:46] MPS (INFO) - Applying variational optimisation.\n", - "[15:41:46] MPS (INFO) - Fidelity before optimisation=0.9986887177546038\n", - "[15:41:46] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:46] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9992190736072919\n", - "[15:41:46] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:46] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9992335371417129\n", - "[15:41:46] MPS (INFO) - Final fidelity after optimisation=0.9992335371417129\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.006561279296875\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9992335371417129\n", - "[15:41:46] Simulation (INFO) - Progress... 16%\n", - "[15:41:46] Simulation (INFO) - Progress... 16%\n", - "[15:41:46] Simulation (INFO) - Progress... 16%\n", - "[15:41:46] Simulation (INFO) - Progress... 16%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.006622314453125\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9992335371417131\n", - "[15:41:46] Simulation (INFO) - Progress... 16%\n", - "[15:41:46] Simulation (INFO) - Progress... 17%\n", - "[15:41:46] Simulation (INFO) - Progress... 17%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.006744384765625\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9992335371417131\n", - "[15:41:46] Simulation (INFO) - Progress... 17%\n", - "[15:41:46] Simulation (INFO) - Progress... 17%\n", - "[15:41:46] Simulation (INFO) - Progress... 17%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.006988525390625\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9992335371417131\n", - "[15:41:46] Simulation (INFO) - Progress... 17%\n", - "[15:41:46] Simulation (INFO) - Progress... 18%\n", - "[15:41:46] Simulation (INFO) - Progress... 18%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.007476806640625\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9992335371417131\n", - "[15:41:46] Simulation (INFO) - Progress... 18%\n", - "[15:41:46] Simulation (INFO) - Progress... 18%\n", - "[15:41:46] Simulation (INFO) - Progress... 18%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.007476806640625\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9982345023466558\n", - "[15:41:46] Simulation (INFO) - Progress... 18%\n", - "[15:41:46] Simulation (INFO) - Progress... 19%\n", - "[15:41:46] Simulation (INFO) - Progress... 19%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.008209228515625\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9975515249441151\n", - "[15:41:46] Simulation (INFO) - Progress... 19%\n", - "[15:41:46] Simulation (INFO) - Progress... 19%\n", - "[15:41:46] Simulation (INFO) - Progress... 19%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.00860595703125\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9967787323351995\n", - "[15:41:46] Simulation (INFO) - Progress... 20%\n", - "[15:41:46] Simulation (INFO) - Progress... 20%\n", - "[15:41:46] Simulation (INFO) - Progress... 20%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.00958251953125\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.996089833981607\n", - "[15:41:46] Simulation (INFO) - Progress... 20%\n", - "[15:41:46] Simulation (INFO) - Progress... 20%\n", - "[15:41:46] Simulation (INFO) - Progress... 20%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.009979248046875\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.996089833981607\n", - "[15:41:46] Simulation (INFO) - Progress... 21%\n", - "[15:41:46] Simulation (INFO) - Progress... 21%\n", - "[15:41:46] Simulation (INFO) - Progress... 21%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.009979248046875\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9960898339816068\n", - "[15:41:46] Simulation (INFO) - Progress... 21%\n", - "[15:41:46] Simulation (INFO) - Progress... 21%\n", - "[15:41:46] Simulation (INFO) - Progress... 21%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.009979248046875\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9960898339816068\n", - "[15:41:46] Simulation (INFO) - Progress... 22%\n", - "[15:41:46] Simulation (INFO) - Progress... 22%\n", - "[15:41:46] Simulation (INFO) - Progress... 22%\n", - "[15:41:46] Simulation (INFO) - Progress... 22%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.009979248046875\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9955765571586642\n", - "[15:41:46] Simulation (INFO) - Progress... 22%\n", - "[15:41:46] Simulation (INFO) - Progress... 22%\n", - "[15:41:46] Simulation (INFO) - Progress... 23%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.01007080078125\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9955765571586642\n", - "[15:41:46] Simulation (INFO) - Progress... 23%\n", - "[15:41:46] Simulation (INFO) - Progress... 23%\n", - "[15:41:46] Simulation (INFO) - Progress... 23%\n", - "[15:41:46] MPS (INFO) - MPS size (MiB)=0.01031494140625\n", - "[15:41:46] MPS (INFO) - MPS fidelity=0.9955765571586642\n", - "[15:41:46] Simulation (INFO) - Progress... 23%\n", - "[15:41:46] Simulation (INFO) - Progress... 23%\n", - "[15:41:46] Simulation (INFO) - Progress... 24%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.01080322265625\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9955765571586642\n", - "[15:41:47] Simulation (INFO) - Progress... 24%\n", - "[15:41:47] Simulation (INFO) - Progress... 24%\n", - "[15:41:47] Simulation (INFO) - Progress... 24%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.01153564453125\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9951866821980899\n", - "[15:41:47] Simulation (INFO) - Progress... 24%\n", - "[15:41:47] Simulation (INFO) - Progress... 24%\n", - "[15:41:47] Simulation (INFO) - Progress... 25%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.012542724609375\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9945684970193788\n", - "[15:41:47] Simulation (INFO) - Progress... 25%\n", - "[15:41:47] Simulation (INFO) - Progress... 25%\n", - "[15:41:47] Simulation (INFO) - Progress... 25%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.013336181640625\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9937800765622566\n", - "[15:41:47] Simulation (INFO) - Progress... 25%\n", - "[15:41:47] Simulation (INFO) - Progress... 25%\n", - "[15:41:47] Simulation (INFO) - Progress... 26%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.015167236328125\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9933657156418472\n", - "[15:41:47] Simulation (INFO) - Progress... 26%\n", - "[15:41:47] Simulation (INFO) - Progress... 26%\n", - "[15:41:47] Simulation (INFO) - Progress... 26%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.016326904296875\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9926168168757035\n", - "[15:41:47] Simulation (INFO) - Progress... 26%\n", - "[15:41:47] Simulation (INFO) - Progress... 26%\n", - "[15:41:47] Simulation (INFO) - Progress... 27%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.01806640625\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9919907474019105\n", - "[15:41:47] Simulation (INFO) - Progress... 27%\n", - "[15:41:47] Simulation (INFO) - Progress... 27%\n", - "[15:41:47] Simulation (INFO) - Progress... 27%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.01806640625\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9919907474019105\n", - "[15:41:47] Simulation (INFO) - Progress... 27%\n", - "[15:41:47] Simulation (INFO) - Progress... 27%\n", - "[15:41:47] Simulation (INFO) - Progress... 28%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.01806640625\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9919907474019105\n", - "[15:41:47] Simulation (INFO) - Progress... 28%\n", - "[15:41:47] Simulation (INFO) - Progress... 28%\n", - "[15:41:47] Simulation (INFO) - Progress... 28%\n", - "[15:41:47] MPS (INFO) - Applying variational optimisation.\n", - "[15:41:47] MPS (INFO) - Fidelity before optimisation=0.9919907474019105\n", - "[15:41:47] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:47] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9944986651228879\n", - "[15:41:47] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:47] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9945622863823858\n", - "[15:41:47] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:47] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9945744642325651\n", - "[15:41:47] MPS (INFO) - Final fidelity after optimisation=0.9945744642325651\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.01806640625\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9945744642325651\n", - "[15:41:47] Simulation (INFO) - Progress... 28%\n", - "[15:41:47] Simulation (INFO) - Progress... 28%\n", - "[15:41:47] Simulation (INFO) - Progress... 29%\n", - "[15:41:47] Simulation (INFO) - Progress... 29%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.01812744140625\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9945744642325651\n", - "[15:41:47] Simulation (INFO) - Progress... 29%\n", - "[15:41:47] Simulation (INFO) - Progress... 29%\n", - "[15:41:47] Simulation (INFO) - Progress... 29%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.018218994140625\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9945744642325651\n", - "[15:41:47] Simulation (INFO) - Progress... 30%\n", - "[15:41:47] Simulation (INFO) - Progress... 30%\n", - "[15:41:47] Simulation (INFO) - Progress... 30%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.018341064453125\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9945744642325651\n", - "[15:41:47] Simulation (INFO) - Progress... 30%\n", - "[15:41:47] Simulation (INFO) - Progress... 30%\n", - "[15:41:47] Simulation (INFO) - Progress... 30%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.018707275390625\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9945744642325651\n", - "[15:41:47] Simulation (INFO) - Progress... 31%\n", - "[15:41:47] Simulation (INFO) - Progress... 31%\n", - "[15:41:47] Simulation (INFO) - Progress... 31%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.019439697265625\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9939252776724739\n", - "[15:41:47] Simulation (INFO) - Progress... 31%\n", - "[15:41:47] Simulation (INFO) - Progress... 31%\n", - "[15:41:47] Simulation (INFO) - Progress... 31%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.021148681640625\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9939252776724739\n", - "[15:41:47] Simulation (INFO) - Progress... 32%\n", - "[15:41:47] Simulation (INFO) - Progress... 32%\n", - "[15:41:47] Simulation (INFO) - Progress... 32%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.02252197265625\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9936102094018504\n", - "[15:41:47] Simulation (INFO) - Progress... 32%\n", - "[15:41:47] Simulation (INFO) - Progress... 32%\n", - "[15:41:47] Simulation (INFO) - Progress... 32%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.02447509765625\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9932716193018882\n", - "[15:41:47] Simulation (INFO) - Progress... 33%\n", - "[15:41:47] Simulation (INFO) - Progress... 33%\n", - "[15:41:47] Simulation (INFO) - Progress... 33%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.027679443359375\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9926992945331796\n", - "[15:41:47] Simulation (INFO) - Progress... 33%\n", - "[15:41:47] Simulation (INFO) - Progress... 33%\n", - "[15:41:47] Simulation (INFO) - Progress... 33%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.031036376953125\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9917770056878091\n", - "[15:41:47] Simulation (INFO) - Progress... 34%\n", - "[15:41:47] Simulation (INFO) - Progress... 34%\n", - "[15:41:47] Simulation (INFO) - Progress... 34%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.034332275390625\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9910186053590768\n", - "[15:41:47] Simulation (INFO) - Progress... 34%\n", - "[15:41:47] Simulation (INFO) - Progress... 34%\n", - "[15:41:47] Simulation (INFO) - Progress... 34%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.035736083984375\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9903708169455654\n", - "[15:41:47] Simulation (INFO) - Progress... 35%\n", - "[15:41:47] Simulation (INFO) - Progress... 35%\n", - "[15:41:47] Simulation (INFO) - Progress... 35%\n", - "[15:41:47] MPS (INFO) - MPS size (MiB)=0.035736083984375\n", - "[15:41:47] MPS (INFO) - MPS fidelity=0.9903708169455654\n", - "[15:41:47] Simulation (INFO) - Progress... 35%\n", - "[15:41:47] Simulation (INFO) - Progress... 35%\n", - "[15:41:47] Simulation (INFO) - Progress... 35%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.035736083984375\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9903708169455654\n", - "[15:41:48] Simulation (INFO) - Progress... 36%\n", - "[15:41:48] Simulation (INFO) - Progress... 36%\n", - "[15:41:48] Simulation (INFO) - Progress... 36%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.035736083984375\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9903708169455652\n", - "[15:41:48] Simulation (INFO) - Progress... 36%\n", - "[15:41:48] Simulation (INFO) - Progress... 36%\n", - "[15:41:48] Simulation (INFO) - Progress... 36%\n", - "[15:41:48] Simulation (INFO) - Progress... 37%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.035797119140625\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9903708169455652\n", - "[15:41:48] Simulation (INFO) - Progress... 37%\n", - "[15:41:48] Simulation (INFO) - Progress... 37%\n", - "[15:41:48] Simulation (INFO) - Progress... 37%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.035919189453125\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9903708169455652\n", - "[15:41:48] Simulation (INFO) - Progress... 37%\n", - "[15:41:48] Simulation (INFO) - Progress... 37%\n", - "[15:41:48] Simulation (INFO) - Progress... 38%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.035919189453125\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9903532709139039\n", - "[15:41:48] Simulation (INFO) - Progress... 38%\n", - "[15:41:48] Simulation (INFO) - Progress... 38%\n", - "[15:41:48] Simulation (INFO) - Progress... 38%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.036163330078125\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9903532709139038\n", - "[15:41:48] Simulation (INFO) - Progress... 38%\n", - "[15:41:48] Simulation (INFO) - Progress... 38%\n", - "[15:41:48] Simulation (INFO) - Progress... 39%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.036651611328125\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9903532709139038\n", - "[15:41:48] Simulation (INFO) - Progress... 39%\n", - "[15:41:48] Simulation (INFO) - Progress... 39%\n", - "[15:41:48] Simulation (INFO) - Progress... 39%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.03765869140625\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9899182044513357\n", - "[15:41:48] Simulation (INFO) - Progress... 39%\n", - "[15:41:48] Simulation (INFO) - Progress... 40%\n", - "[15:41:48] Simulation (INFO) - Progress... 40%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.038116455078125\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9892830604520672\n", - "[15:41:48] Simulation (INFO) - Progress... 40%\n", - "[15:41:48] Simulation (INFO) - Progress... 40%\n", - "[15:41:48] Simulation (INFO) - Progress... 40%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.040313720703125\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9886878009898008\n", - "[15:41:48] Simulation (INFO) - Progress... 40%\n", - "[15:41:48] Simulation (INFO) - Progress... 41%\n", - "[15:41:48] Simulation (INFO) - Progress... 41%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.043121337890625\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9877332864162025\n", - "[15:41:48] Simulation (INFO) - Progress... 41%\n", - "[15:41:48] Simulation (INFO) - Progress... 41%\n", - "[15:41:48] Simulation (INFO) - Progress... 41%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.047698974609375\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9870492609117136\n", - "[15:41:48] Simulation (INFO) - Progress... 41%\n", - "[15:41:48] Simulation (INFO) - Progress... 42%\n", - "[15:41:48] Simulation (INFO) - Progress... 42%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.052581787109375\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9864155906572986\n", - "[15:41:48] Simulation (INFO) - Progress... 42%\n", - "[15:41:48] Simulation (INFO) - Progress... 42%\n", - "[15:41:48] Simulation (INFO) - Progress... 42%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.060150146484375\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.985553172457638\n", - "[15:41:48] Simulation (INFO) - Progress... 42%\n", - "[15:41:48] Simulation (INFO) - Progress... 43%\n", - "[15:41:48] Simulation (INFO) - Progress... 43%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.066925048828125\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9848524485301354\n", - "[15:41:48] Simulation (INFO) - Progress... 43%\n", - "[15:41:48] Simulation (INFO) - Progress... 43%\n", - "[15:41:48] Simulation (INFO) - Progress... 43%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.068695068359375\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9848524485301354\n", - "[15:41:48] Simulation (INFO) - Progress... 43%\n", - "[15:41:48] Simulation (INFO) - Progress... 44%\n", - "[15:41:48] Simulation (INFO) - Progress... 44%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.068695068359375\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9848524485301353\n", - "[15:41:48] Simulation (INFO) - Progress... 44%\n", - "[15:41:48] Simulation (INFO) - Progress... 44%\n", - "[15:41:48] Simulation (INFO) - Progress... 44%\n", - "[15:41:48] MPS (INFO) - MPS size (MiB)=0.068695068359375\n", - "[15:41:48] MPS (INFO) - MPS fidelity=0.9848524485301353\n", - "[15:41:48] Simulation (INFO) - Progress... 44%\n", - "[15:41:48] Simulation (INFO) - Progress... 45%\n", - "[15:41:48] Simulation (INFO) - Progress... 45%\n", - "[15:41:48] MPS (INFO) - Applying variational optimisation.\n", - "[15:41:48] MPS (INFO) - Fidelity before optimisation=0.9848524485301353\n", - "[15:41:48] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:48] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9873602516768857\n", - "[15:41:48] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:48] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9874400577886869\n", - "[15:41:48] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:48] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9874665951742544\n", - "[15:41:48] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:49] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.987479443857063\n", - "[15:41:49] MPS (INFO) - Final fidelity after optimisation=0.987479443857063\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.068695068359375\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.987479443857063\n", - "[15:41:49] Simulation (INFO) - Progress... 45%\n", - "[15:41:49] Simulation (INFO) - Progress... 45%\n", - "[15:41:49] Simulation (INFO) - Progress... 45%\n", - "[15:41:49] Simulation (INFO) - Progress... 45%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.068756103515625\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.987479443857063\n", - "[15:41:49] Simulation (INFO) - Progress... 46%\n", - "[15:41:49] Simulation (INFO) - Progress... 46%\n", - "[15:41:49] Simulation (INFO) - Progress... 46%\n", - "[15:41:49] Simulation (INFO) - Progress... 46%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.068878173828125\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.987479443857063\n", - "[15:41:49] Simulation (INFO) - Progress... 46%\n", - "[15:41:49] Simulation (INFO) - Progress... 46%\n", - "[15:41:49] Simulation (INFO) - Progress... 47%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.068878173828125\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.987479443857063\n", - "[15:41:49] Simulation (INFO) - Progress... 47%\n", - "[15:41:49] Simulation (INFO) - Progress... 47%\n", - "[15:41:49] Simulation (INFO) - Progress... 47%\n", - "[15:41:49] Simulation (INFO) - Progress... 47%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.069122314453125\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.987479443857063\n", - "[15:41:49] Simulation (INFO) - Progress... 47%\n", - "[15:41:49] Simulation (INFO) - Progress... 48%\n", - "[15:41:49] Simulation (INFO) - Progress... 48%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.069488525390625\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.987479443857063\n", - "[15:41:49] Simulation (INFO) - Progress... 48%\n", - "[15:41:49] Simulation (INFO) - Progress... 48%\n", - "[15:41:49] Simulation (INFO) - Progress... 48%\n", - "[15:41:49] MPS (INFO) - Applying variational optimisation.\n", - "[15:41:49] MPS (INFO) - Fidelity before optimisation=0.987479443857063\n", - "[15:41:49] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:49] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9874794438570587\n", - "[15:41:49] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:49] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9874794438570632\n", - "[15:41:49] MPS (INFO) - Final fidelity after optimisation=0.9874794438570632\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.069488525390625\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9874794438570632\n", - "[15:41:49] Simulation (INFO) - Progress... 48%\n", - "[15:41:49] Simulation (INFO) - Progress... 49%\n", - "[15:41:49] Simulation (INFO) - Progress... 49%\n", - "[15:41:49] Simulation (INFO) - Progress... 49%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.069854736328125\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9874794438570632\n", - "[15:41:49] Simulation (INFO) - Progress... 49%\n", - "[15:41:49] Simulation (INFO) - Progress... 49%\n", - "[15:41:49] Simulation (INFO) - Progress... 50%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.070831298828125\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9874794438570634\n", - "[15:41:49] Simulation (INFO) - Progress... 50%\n", - "[15:41:49] Simulation (INFO) - Progress... 50%\n", - "[15:41:49] Simulation (INFO) - Progress... 50%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.070831298828125\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9874794438570634\n", - "[15:41:49] Simulation (INFO) - Progress... 50%\n", - "[15:41:49] Simulation (INFO) - Progress... 50%\n", - "[15:41:49] Simulation (INFO) - Progress... 51%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.070831298828125\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9874794438570637\n", - "[15:41:49] Simulation (INFO) - Progress... 51%\n", - "[15:41:49] Simulation (INFO) - Progress... 51%\n", - "[15:41:49] Simulation (INFO) - Progress... 51%\n", - "[15:41:49] Simulation (INFO) - Progress... 51%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.071319580078125\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9874794438570637\n", - "[15:41:49] Simulation (INFO) - Progress... 51%\n", - "[15:41:49] Simulation (INFO) - Progress... 52%\n", - "[15:41:49] Simulation (INFO) - Progress... 52%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.072784423828125\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9874794438570637\n", - "[15:41:49] Simulation (INFO) - Progress... 52%\n", - "[15:41:49] Simulation (INFO) - Progress... 52%\n", - "[15:41:49] Simulation (INFO) - Progress... 52%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.072540283203125\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9866330729559818\n", - "[15:41:49] Simulation (INFO) - Progress... 52%\n", - "[15:41:49] Simulation (INFO) - Progress... 53%\n", - "[15:41:49] Simulation (INFO) - Progress... 53%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.073211669921875\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9866330729559818\n", - "[15:41:49] Simulation (INFO) - Progress... 53%\n", - "[15:41:49] Simulation (INFO) - Progress... 53%\n", - "[15:41:49] Simulation (INFO) - Progress... 53%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.073822021484375\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9866330729559818\n", - "[15:41:49] Simulation (INFO) - Progress... 53%\n", - "[15:41:49] Simulation (INFO) - Progress... 54%\n", - "[15:41:49] Simulation (INFO) - Progress... 54%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.074920654296875\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9866330729559817\n", - "[15:41:49] Simulation (INFO) - Progress... 54%\n", - "[15:41:49] Simulation (INFO) - Progress... 54%\n", - "[15:41:49] Simulation (INFO) - Progress... 54%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.074920654296875\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.985728058386732\n", - "[15:41:49] Simulation (INFO) - Progress... 54%\n", - "[15:41:49] Simulation (INFO) - Progress... 55%\n", - "[15:41:49] Simulation (INFO) - Progress... 55%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.076507568359375\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9848905743017655\n", - "[15:41:49] Simulation (INFO) - Progress... 55%\n", - "[15:41:49] Simulation (INFO) - Progress... 55%\n", - "[15:41:49] Simulation (INFO) - Progress... 55%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.0782470703125\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.984200933510651\n", - "[15:41:49] Simulation (INFO) - Progress... 55%\n", - "[15:41:49] Simulation (INFO) - Progress... 56%\n", - "[15:41:49] Simulation (INFO) - Progress... 56%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.08209228515625\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9833786711539604\n", - "[15:41:49] Simulation (INFO) - Progress... 56%\n", - "[15:41:49] Simulation (INFO) - Progress... 56%\n", - "[15:41:49] Simulation (INFO) - Progress... 56%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.08514404296875\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9829054162238393\n", - "[15:41:49] Simulation (INFO) - Progress... 56%\n", - "[15:41:49] Simulation (INFO) - Progress... 57%\n", - "[15:41:49] Simulation (INFO) - Progress... 57%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.093994140625\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9823548457232609\n", - "[15:41:49] Simulation (INFO) - Progress... 57%\n", - "[15:41:49] Simulation (INFO) - Progress... 57%\n", - "[15:41:49] Simulation (INFO) - Progress... 57%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.10003662109375\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9814914262501793\n", - "[15:41:49] Simulation (INFO) - Progress... 57%\n", - "[15:41:49] Simulation (INFO) - Progress... 58%\n", - "[15:41:49] Simulation (INFO) - Progress... 58%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.116302490234375\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9806647381641405\n", - "[15:41:49] Simulation (INFO) - Progress... 58%\n", - "[15:41:49] Simulation (INFO) - Progress... 58%\n", - "[15:41:49] Simulation (INFO) - Progress... 58%\n", - "[15:41:49] MPS (INFO) - MPS size (MiB)=0.118499755859375\n", - "[15:41:49] MPS (INFO) - MPS fidelity=0.9797474281526156\n", - "[15:41:50] Simulation (INFO) - Progress... 58%\n", - "[15:41:50] Simulation (INFO) - Progress... 59%\n", - "[15:41:50] Simulation (INFO) - Progress... 59%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.137542724609375\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9792041496059278\n", - "[15:41:50] Simulation (INFO) - Progress... 59%\n", - "[15:41:50] Simulation (INFO) - Progress... 59%\n", - "[15:41:50] Simulation (INFO) - Progress... 59%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.150360107421875\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9784485799686532\n", - "[15:41:50] Simulation (INFO) - Progress... 60%\n", - "[15:41:50] Simulation (INFO) - Progress... 60%\n", - "[15:41:50] Simulation (INFO) - Progress... 60%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.175567626953125\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.977661825354294\n", - "[15:41:50] Simulation (INFO) - Progress... 60%\n", - "[15:41:50] Simulation (INFO) - Progress... 60%\n", - "[15:41:50] Simulation (INFO) - Progress... 60%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.192779541015625\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9768742140627227\n", - "[15:41:50] Simulation (INFO) - Progress... 61%\n", - "[15:41:50] Simulation (INFO) - Progress... 61%\n", - "[15:41:50] Simulation (INFO) - Progress... 61%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.236358642578125\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9761008623122066\n", - "[15:41:50] Simulation (INFO) - Progress... 61%\n", - "[15:41:50] Simulation (INFO) - Progress... 61%\n", - "[15:41:50] Simulation (INFO) - Progress... 61%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.24725341796875\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9752497693902976\n", - "[15:41:50] Simulation (INFO) - Progress... 62%\n", - "[15:41:50] Simulation (INFO) - Progress... 62%\n", - "[15:41:50] Simulation (INFO) - Progress... 62%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.277008056640625\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9744546401208022\n", - "[15:41:50] Simulation (INFO) - Progress... 62%\n", - "[15:41:50] Simulation (INFO) - Progress... 62%\n", - "[15:41:50] Simulation (INFO) - Progress... 62%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.277008056640625\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9744546401208022\n", - "[15:41:50] Simulation (INFO) - Progress... 63%\n", - "[15:41:50] Simulation (INFO) - Progress... 63%\n", - "[15:41:50] Simulation (INFO) - Progress... 63%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.284820556640625\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9744546401208022\n", - "[15:41:50] Simulation (INFO) - Progress... 63%\n", - "[15:41:50] Simulation (INFO) - Progress... 63%\n", - "[15:41:50] Simulation (INFO) - Progress... 63%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.284820556640625\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9744546401208022\n", - "[15:41:50] Simulation (INFO) - Progress... 64%\n", - "[15:41:50] Simulation (INFO) - Progress... 64%\n", - "[15:41:50] Simulation (INFO) - Progress... 64%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.284820556640625\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9744546401208022\n", - "[15:41:50] Simulation (INFO) - Progress... 64%\n", - "[15:41:50] Simulation (INFO) - Progress... 64%\n", - "[15:41:50] Simulation (INFO) - Progress... 64%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.284820556640625\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9744546401208021\n", - "[15:41:50] Simulation (INFO) - Progress... 65%\n", - "[15:41:50] Simulation (INFO) - Progress... 65%\n", - "[15:41:50] Simulation (INFO) - Progress... 65%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.284820556640625\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9744546401208021\n", - "[15:41:50] Simulation (INFO) - Progress... 65%\n", - "[15:41:50] Simulation (INFO) - Progress... 65%\n", - "[15:41:50] Simulation (INFO) - Progress... 65%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.284820556640625\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9744546401208023\n", - "[15:41:50] Simulation (INFO) - Progress... 66%\n", - "[15:41:50] Simulation (INFO) - Progress... 66%\n", - "[15:41:50] Simulation (INFO) - Progress... 66%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.284820556640625\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9744546401208023\n", - "[15:41:50] Simulation (INFO) - Progress... 66%\n", - "[15:41:50] MPS (INFO) - MPS size (MiB)=0.284454345703125\n", - "[15:41:50] MPS (INFO) - MPS fidelity=0.9736774416720088\n", - "[15:41:50] Simulation (INFO) - Progress... 66%\n", - "[15:41:50] Simulation (INFO) - Progress... 66%\n", - "[15:41:50] Simulation (INFO) - Progress... 67%\n", - "[15:41:50] MPS (INFO) - Applying variational optimisation.\n", - "[15:41:50] MPS (INFO) - Fidelity before optimisation=0.9736774416720088\n", - "[15:41:50] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:50] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9822122042244481\n", - "[15:41:50] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:50] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9827583003913757\n", - "[15:41:50] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:50] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9829168903407746\n", - "[15:41:50] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:51] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.982979344248083\n", - "[15:41:51] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:51] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9830095477136648\n", - "[15:41:51] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:51] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9830260810282695\n", - "[15:41:51] MPS (INFO) - Final fidelity after optimisation=0.9830260810282695\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.284454345703125\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9825960930899819\n", - "[15:41:51] Simulation (INFO) - Progress... 67%\n", - "[15:41:51] Simulation (INFO) - Progress... 67%\n", - "[15:41:51] Simulation (INFO) - Progress... 67%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.285736083984375\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9819799784936092\n", - "[15:41:51] Simulation (INFO) - Progress... 67%\n", - "[15:41:51] Simulation (INFO) - Progress... 67%\n", - "[15:41:51] Simulation (INFO) - Progress... 68%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.288055419921875\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9811857364040928\n", - "[15:41:51] Simulation (INFO) - Progress... 68%\n", - "[15:41:51] Simulation (INFO) - Progress... 68%\n", - "[15:41:51] Simulation (INFO) - Progress... 68%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.2901611328125\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9805589329022365\n", - "[15:41:51] Simulation (INFO) - Progress... 68%\n", - "[15:41:51] Simulation (INFO) - Progress... 68%\n", - "[15:41:51] Simulation (INFO) - Progress... 69%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.29742431640625\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.979774777914149\n", - "[15:41:51] Simulation (INFO) - Progress... 69%\n", - "[15:41:51] Simulation (INFO) - Progress... 69%\n", - "[15:41:51] Simulation (INFO) - Progress... 69%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.3004150390625\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9790302714655779\n", - "[15:41:51] Simulation (INFO) - Progress... 69%\n", - "[15:41:51] Simulation (INFO) - Progress... 70%\n", - "[15:41:51] Simulation (INFO) - Progress... 70%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.3072509765625\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9781050017075105\n", - "[15:41:51] Simulation (INFO) - Progress... 70%\n", - "[15:41:51] Simulation (INFO) - Progress... 70%\n", - "[15:41:51] Simulation (INFO) - Progress... 70%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.32537841796875\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9772629361326794\n", - "[15:41:51] Simulation (INFO) - Progress... 70%\n", - "[15:41:51] Simulation (INFO) - Progress... 71%\n", - "[15:41:51] Simulation (INFO) - Progress... 71%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.349822998046875\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9763363776760228\n", - "[15:41:51] Simulation (INFO) - Progress... 71%\n", - "[15:41:51] Simulation (INFO) - Progress... 71%\n", - "[15:41:51] Simulation (INFO) - Progress... 71%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.358062744140625\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9754556940251295\n", - "[15:41:51] Simulation (INFO) - Progress... 71%\n", - "[15:41:51] Simulation (INFO) - Progress... 72%\n", - "[15:41:51] Simulation (INFO) - Progress... 72%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.365570068359375\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9745025792245486\n", - "[15:41:51] Simulation (INFO) - Progress... 72%\n", - "[15:41:51] Simulation (INFO) - Progress... 72%\n", - "[15:41:51] Simulation (INFO) - Progress... 72%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.365570068359375\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9745025792245484\n", - "[15:41:51] Simulation (INFO) - Progress... 72%\n", - "[15:41:51] Simulation (INFO) - Progress... 73%\n", - "[15:41:51] Simulation (INFO) - Progress... 73%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.365570068359375\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9745025792245484\n", - "[15:41:51] Simulation (INFO) - Progress... 73%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.365570068359375\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9745025792245482\n", - "[15:41:51] Simulation (INFO) - Progress... 73%\n", - "[15:41:51] Simulation (INFO) - Progress... 73%\n", - "[15:41:51] Simulation (INFO) - Progress... 73%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.365936279296875\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9745025792245482\n", - "[15:41:51] Simulation (INFO) - Progress... 74%\n", - "[15:41:51] Simulation (INFO) - Progress... 74%\n", - "[15:41:51] Simulation (INFO) - Progress... 74%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.366973876953125\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9739737340007235\n", - "[15:41:51] Simulation (INFO) - Progress... 74%\n", - "[15:41:51] Simulation (INFO) - Progress... 74%\n", - "[15:41:51] Simulation (INFO) - Progress... 74%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.369415283203125\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9734939221919511\n", - "[15:41:51] Simulation (INFO) - Progress... 75%\n", - "[15:41:51] Simulation (INFO) - Progress... 75%\n", - "[15:41:51] Simulation (INFO) - Progress... 75%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.374176025390625\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9728701873772134\n", - "[15:41:51] Simulation (INFO) - Progress... 75%\n", - "[15:41:51] Simulation (INFO) - Progress... 75%\n", - "[15:41:51] Simulation (INFO) - Progress... 75%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.378570556640625\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9720376362143338\n", - "[15:41:51] Simulation (INFO) - Progress... 76%\n", - "[15:41:51] Simulation (INFO) - Progress... 76%\n", - "[15:41:51] Simulation (INFO) - Progress... 76%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.383453369140625\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.971167337001675\n", - "[15:41:51] Simulation (INFO) - Progress... 76%\n", - "[15:41:51] Simulation (INFO) - Progress... 76%\n", - "[15:41:51] Simulation (INFO) - Progress... 76%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.39910888671875\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9703795628080001\n", - "[15:41:51] Simulation (INFO) - Progress... 77%\n", - "[15:41:51] Simulation (INFO) - Progress... 77%\n", - "[15:41:51] Simulation (INFO) - Progress... 77%\n", - "[15:41:51] MPS (INFO) - MPS size (MiB)=0.43072509765625\n", - "[15:41:51] MPS (INFO) - MPS fidelity=0.9695212202086415\n", - "[15:41:51] Simulation (INFO) - Progress... 77%\n", - "[15:41:51] Simulation (INFO) - Progress... 77%\n", - "[15:41:51] Simulation (INFO) - Progress... 77%\n", - "[15:41:52] MPS (INFO) - MPS size (MiB)=0.490478515625\n", - "[15:41:52] MPS (INFO) - MPS fidelity=0.9686850339371813\n", - "[15:41:52] Simulation (INFO) - Progress... 78%\n", - "[15:41:52] Simulation (INFO) - Progress... 78%\n", - "[15:41:52] Simulation (INFO) - Progress... 78%\n", - "[15:41:52] MPS (INFO) - MPS size (MiB)=0.5670166015625\n", - "[15:41:52] MPS (INFO) - MPS fidelity=0.9678550687968107\n", - "[15:41:52] Simulation (INFO) - Progress... 78%\n", - "[15:41:52] Simulation (INFO) - Progress... 78%\n", - "[15:41:52] Simulation (INFO) - Progress... 78%\n", - "[15:41:52] MPS (INFO) - MPS size (MiB)=0.61614990234375\n", - "[15:41:52] MPS (INFO) - MPS fidelity=0.9669360842897095\n", - "[15:41:52] Simulation (INFO) - Progress... 79%\n", - "[15:41:52] Simulation (INFO) - Progress... 79%\n", - "[15:41:52] Simulation (INFO) - Progress... 79%\n", - "[15:41:52] MPS (INFO) - MPS size (MiB)=0.64251708984375\n", - "[15:41:52] MPS (INFO) - MPS fidelity=0.9660548082975919\n", - "[15:41:52] Simulation (INFO) - Progress... 79%\n", - "[15:41:52] MPS (INFO) - MPS size (MiB)=0.64251708984375\n", - "[15:41:52] MPS (INFO) - MPS fidelity=0.9660548082975919\n", - "[15:41:52] Simulation (INFO) - Progress... 79%\n", - "[15:41:52] Simulation (INFO) - Progress... 80%\n", - "[15:41:52] Simulation (INFO) - Progress... 80%\n", - "[15:41:52] Simulation (INFO) - Progress... 80%\n", - "[15:41:52] MPS (INFO) - MPS size (MiB)=0.64251708984375\n", - "[15:41:52] MPS (INFO) - MPS fidelity=0.9660548082975922\n", - "[15:41:52] Simulation (INFO) - Progress... 80%\n", - "[15:41:52] Simulation (INFO) - Progress... 80%\n", - "[15:41:52] Simulation (INFO) - Progress... 80%\n", - "[15:41:52] MPS (INFO) - MPS size (MiB)=0.64251708984375\n", - "[15:41:52] MPS (INFO) - MPS fidelity=0.9660548082975922\n", - "[15:41:52] Simulation (INFO) - Progress... 81%\n", - "[15:41:52] Simulation (INFO) - Progress... 81%\n", - "[15:41:52] Simulation (INFO) - Progress... 81%\n", - "[15:41:52] Simulation (INFO) - Progress... 81%\n", - "[15:41:52] MPS (INFO) - MPS size (MiB)=0.64251708984375\n", - "[15:41:52] MPS (INFO) - MPS fidelity=0.9660548082975922\n", - "[15:41:52] Simulation (INFO) - Progress... 81%\n", - "[15:41:52] Simulation (INFO) - Progress... 81%\n", - "[15:41:52] Simulation (INFO) - Progress... 82%\n", - "[15:41:52] MPS (INFO) - Applying variational optimisation.\n", - "[15:41:52] MPS (INFO) - Fidelity before optimisation=0.9660548082975922\n", - "[15:41:52] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:52] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9714782872349863\n", - "[15:41:52] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:52] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9716864883910468\n", - "[15:41:52] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:52] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9717552987705841\n", - "[15:41:52] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:53] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9717903497055657\n", - "[15:41:53] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:53] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9718115474633439\n", - "[15:41:53] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:53] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9718256606609913\n", - "[15:41:53] MPS (INFO) - Final fidelity after optimisation=0.9718256606609913\n", - "[15:41:53] MPS (INFO) - MPS size (MiB)=0.64251708984375\n", - "[15:41:53] MPS (INFO) - MPS fidelity=0.9718256606609913\n", - "[15:41:53] Simulation (INFO) - Progress... 82%\n", - "[15:41:53] Simulation (INFO) - Progress... 82%\n", - "[15:41:53] Simulation (INFO) - Progress... 82%\n", - "[15:41:53] MPS (INFO) - MPS size (MiB)=0.64251708984375\n", - "[15:41:53] MPS (INFO) - MPS fidelity=0.9718256606609913\n", - "[15:41:53] Simulation (INFO) - Progress... 82%\n", - "[15:41:53] Simulation (INFO) - Progress... 82%\n", - "[15:41:53] Simulation (INFO) - Progress... 83%\n", - "[15:41:53] Simulation (INFO) - Progress... 83%\n", - "[15:41:53] MPS (INFO) - MPS size (MiB)=0.645721435546875\n", - "[15:41:53] MPS (INFO) - MPS fidelity=0.971304662959029\n", - "[15:41:53] Simulation (INFO) - Progress... 83%\n", - "[15:41:53] Simulation (INFO) - Progress... 83%\n", - "[15:41:53] Simulation (INFO) - Progress... 83%\n", - "[15:41:53] MPS (INFO) - MPS size (MiB)=0.645721435546875\n", - "[15:41:53] MPS (INFO) - MPS fidelity=0.971304662959029\n", - "[15:41:53] Simulation (INFO) - Progress... 83%\n", - "[15:41:53] Simulation (INFO) - Progress... 84%\n", - "[15:41:53] Simulation (INFO) - Progress... 84%\n", - "[15:41:53] MPS (INFO) - MPS size (MiB)=0.645721435546875\n", - "[15:41:53] MPS (INFO) - MPS fidelity=0.9713046629590292\n", - "[15:41:53] Simulation (INFO) - Progress... 84%\n", - "[15:41:53] Simulation (INFO) - Progress... 84%\n", - "[15:41:53] Simulation (INFO) - Progress... 84%\n", - "[15:41:53] MPS (INFO) - MPS size (MiB)=0.645721435546875\n", - "[15:41:53] MPS (INFO) - MPS fidelity=0.9713046629590292\n", - "[15:41:53] Simulation (INFO) - Progress... 84%\n", - "[15:41:53] Simulation (INFO) - Progress... 85%\n", - "[15:41:53] Simulation (INFO) - Progress... 85%\n", - "[15:41:53] Simulation (INFO) - Progress... 85%\n", - "[15:41:53] MPS (INFO) - MPS size (MiB)=0.65234375\n", - "[15:41:53] MPS (INFO) - MPS fidelity=0.9705519636179583\n", - "[15:41:53] Simulation (INFO) - Progress... 85%\n", - "[15:41:53] Simulation (INFO) - Progress... 85%\n", - "[15:41:53] Simulation (INFO) - Progress... 85%\n", - "[15:41:53] MPS (INFO) - MPS size (MiB)=0.6531982421875\n", - "[15:41:53] MPS (INFO) - MPS fidelity=0.9705519636179583\n", - "[15:41:53] Simulation (INFO) - Progress... 86%\n", - "[15:41:53] Simulation (INFO) - Progress... 86%\n", - "[15:41:53] Simulation (INFO) - Progress... 86%\n", - "[15:41:53] MPS (INFO) - MPS size (MiB)=0.6531982421875\n", - "[15:41:53] MPS (INFO) - MPS fidelity=0.9705519636179583\n", - "[15:41:53] Simulation (INFO) - Progress... 86%\n", - "[15:41:53] Simulation (INFO) - Progress... 86%\n", - "[15:41:53] Simulation (INFO) - Progress... 86%\n", - "[15:41:53] MPS (INFO) - Applying variational optimisation.\n", - "[15:41:53] MPS (INFO) - Fidelity before optimisation=0.9705519636179583\n", - "[15:41:53] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:53] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9710393809351289\n", - "[15:41:53] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:54] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9710417093966089\n", - "[15:41:54] MPS (INFO) - Final fidelity after optimisation=0.9710417093966089\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=0.6531982421875\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9710417093966089\n", - "[15:41:54] Simulation (INFO) - Progress... 87%\n", - "[15:41:54] Simulation (INFO) - Progress... 87%\n", - "[15:41:54] Simulation (INFO) - Progress... 87%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=0.6531982421875\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9710417093966089\n", - "[15:41:54] Simulation (INFO) - Progress... 87%\n", - "[15:41:54] Simulation (INFO) - Progress... 87%\n", - "[15:41:54] Simulation (INFO) - Progress... 87%\n", - "[15:41:54] Simulation (INFO) - Progress... 88%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=0.663360595703125\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9703316436673765\n", - "[15:41:54] Simulation (INFO) - Progress... 88%\n", - "[15:41:54] Simulation (INFO) - Progress... 88%\n", - "[15:41:54] Simulation (INFO) - Progress... 88%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=0.6771240234375\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9697826685947312\n", - "[15:41:54] Simulation (INFO) - Progress... 88%\n", - "[15:41:54] Simulation (INFO) - Progress... 88%\n", - "[15:41:54] Simulation (INFO) - Progress... 89%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=0.6890869140625\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9688822088585105\n", - "[15:41:54] Simulation (INFO) - Progress... 89%\n", - "[15:41:54] Simulation (INFO) - Progress... 89%\n", - "[15:41:54] Simulation (INFO) - Progress... 89%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=0.7198486328125\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9681016107658179\n", - "[15:41:54] Simulation (INFO) - Progress... 89%\n", - "[15:41:54] Simulation (INFO) - Progress... 90%\n", - "[15:41:54] Simulation (INFO) - Progress... 90%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=0.732025146484375\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.967139989859211\n", - "[15:41:54] Simulation (INFO) - Progress... 90%\n", - "[15:41:54] Simulation (INFO) - Progress... 90%\n", - "[15:41:54] Simulation (INFO) - Progress... 90%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=0.786224365234375\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9667532538346312\n", - "[15:41:54] Simulation (INFO) - Progress... 90%\n", - "[15:41:54] Simulation (INFO) - Progress... 91%\n", - "[15:41:54] Simulation (INFO) - Progress... 91%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=0.805267333984375\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9657875697333652\n", - "[15:41:54] Simulation (INFO) - Progress... 91%\n", - "[15:41:54] Simulation (INFO) - Progress... 91%\n", - "[15:41:54] Simulation (INFO) - Progress... 91%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=0.870452880859375\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9649987228797965\n", - "[15:41:54] Simulation (INFO) - Progress... 91%\n", - "[15:41:54] Simulation (INFO) - Progress... 92%\n", - "[15:41:54] Simulation (INFO) - Progress... 92%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=0.927581787109375\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9641126521361515\n", - "[15:41:54] Simulation (INFO) - Progress... 92%\n", - "[15:41:54] Simulation (INFO) - Progress... 92%\n", - "[15:41:54] Simulation (INFO) - Progress... 92%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=1.066741943359375\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9635105846805408\n", - "[15:41:54] Simulation (INFO) - Progress... 92%\n", - "[15:41:54] Simulation (INFO) - Progress... 93%\n", - "[15:41:54] Simulation (INFO) - Progress... 93%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=1.15728759765625\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.962589075282592\n", - "[15:41:54] Simulation (INFO) - Progress... 93%\n", - "[15:41:54] Simulation (INFO) - Progress... 93%\n", - "[15:41:54] Simulation (INFO) - Progress... 93%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=1.43927001953125\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9617602212979602\n", - "[15:41:54] Simulation (INFO) - Progress... 93%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=1.54986572265625\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9608510935810075\n", - "[15:41:54] Simulation (INFO) - Progress... 94%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=1.54986572265625\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9608510935810075\n", - "[15:41:54] Simulation (INFO) - Progress... 94%\n", - "[15:41:54] Simulation (INFO) - Progress... 94%\n", - "[15:41:54] Simulation (INFO) - Progress... 94%\n", - "[15:41:54] MPS (INFO) - MPS size (MiB)=1.54986572265625\n", - "[15:41:54] MPS (INFO) - MPS fidelity=0.9600320773213169\n", - "[15:41:54] Simulation (INFO) - Progress... 94%\n", - "[15:41:54] Simulation (INFO) - Progress... 94%\n", - "[15:41:54] Simulation (INFO) - Progress... 95%\n", - "[15:41:54] MPS (INFO) - Applying variational optimisation.\n", - "[15:41:54] MPS (INFO) - Fidelity before optimisation=0.9600320773213169\n", - "[15:41:54] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:54] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9659019975820374\n", - "[15:41:54] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:54] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9661427864728673\n", - "[15:41:54] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:55] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9662148782989015\n", - "[15:41:55] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:55] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9662495844052902\n", - "[15:41:55] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:55] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9662703863336176\n", - "[15:41:55] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:55] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9662844523829522\n", - "[15:41:55] MPS (INFO) - Final fidelity after optimisation=0.9662844523829522\n", - "[15:41:55] MPS (INFO) - MPS size (MiB)=1.55718994140625\n", - "[15:41:55] MPS (INFO) - MPS fidelity=0.9653257816354918\n", - "[15:41:55] Simulation (INFO) - Progress... 95%\n", - "[15:41:55] Simulation (INFO) - Progress... 95%\n", - "[15:41:55] Simulation (INFO) - Progress... 95%\n", - "[15:41:55] MPS (INFO) - MPS size (MiB)=1.58184814453125\n", - "[15:41:55] MPS (INFO) - MPS fidelity=0.9644004614054085\n", - "[15:41:55] Simulation (INFO) - Progress... 95%\n", - "[15:41:55] Simulation (INFO) - Progress... 95%\n", - "[15:41:55] Simulation (INFO) - Progress... 96%\n", - "[15:41:55] MPS (INFO) - MPS size (MiB)=1.660125732421875\n", - "[15:41:55] MPS (INFO) - MPS fidelity=0.9634633842376111\n", - "[15:41:55] Simulation (INFO) - Progress... 96%\n", - "[15:41:55] MPS (INFO) - MPS size (MiB)=1.660125732421875\n", - "[15:41:55] MPS (INFO) - MPS fidelity=0.9634633842376111\n", - "[15:41:55] Simulation (INFO) - Progress... 96%\n", - "[15:41:55] Simulation (INFO) - Progress... 96%\n", - "[15:41:55] Simulation (INFO) - Progress... 96%\n", - "[15:41:55] MPS (INFO) - MPS size (MiB)=1.660125732421875\n", - "[15:41:55] MPS (INFO) - MPS fidelity=0.9634633842376111\n", - "[15:41:55] Simulation (INFO) - Progress... 96%\n", - "[15:41:55] Simulation (INFO) - Progress... 97%\n", - "[15:41:55] Simulation (INFO) - Progress... 97%\n", - "[15:41:55] MPS (INFO) - MPS size (MiB)=1.662017822265625\n", - "[15:41:55] MPS (INFO) - MPS fidelity=0.9634633842376114\n", - "[15:41:55] Simulation (INFO) - Progress... 97%\n", - "[15:41:55] Simulation (INFO) - Progress... 97%\n", - "[15:41:55] Simulation (INFO) - Progress... 97%\n", - "[15:41:55] MPS (INFO) - MPS size (MiB)=1.700042724609375\n", - "[15:41:55] MPS (INFO) - MPS fidelity=0.9625939072465783\n", - "[15:41:55] Simulation (INFO) - Progress... 97%\n", - "[15:41:55] MPS (INFO) - MPS size (MiB)=1.700042724609375\n", - "[15:41:55] MPS (INFO) - MPS fidelity=0.9625939072465783\n", - "[15:41:55] Simulation (INFO) - Progress... 98%\n", - "[15:41:55] Simulation (INFO) - Progress... 98%\n", - "[15:41:55] Simulation (INFO) - Progress... 98%\n", - "[15:41:55] MPS (INFO) - MPS size (MiB)=1.700042724609375\n", - "[15:41:55] MPS (INFO) - MPS fidelity=0.9625939072465782\n", - "[15:41:55] Simulation (INFO) - Progress... 98%\n", - "[15:41:55] Simulation (INFO) - Progress... 98%\n", - "[15:41:55] Simulation (INFO) - Progress... 98%\n", - "[15:41:55] MPS (INFO) - MPS size (MiB)=1.700042724609375\n", - "[15:41:55] MPS (INFO) - MPS fidelity=0.9625939072465782\n", - "[15:41:55] Simulation (INFO) - Progress... 99%\n", - "[15:41:55] MPS (INFO) - MPS size (MiB)=1.700042724609375\n", - "[15:41:55] MPS (INFO) - MPS fidelity=0.9625939072465782\n", - "[15:41:55] Simulation (INFO) - Progress... 99%\n", - "[15:41:55] Simulation (INFO) - Progress... 99%\n", - "[15:41:55] Simulation (INFO) - Progress... 99%\n", - "[15:41:55] MPS (INFO) - MPS size (MiB)=1.700042724609375\n", - "[15:41:55] MPS (INFO) - MPS fidelity=0.9625939072465782\n", - "[15:41:55] Simulation (INFO) - Progress... 99%\n", - "[15:41:55] MPS (INFO) - Applying variational optimisation.\n", - "[15:41:55] MPS (INFO) - Fidelity before optimisation=0.9625939072465782\n", - "[15:41:56] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:56] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9640884677171835\n", - "[15:41:56] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:56] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9641174266253738\n", - "[15:41:56] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:56] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9641252811032455\n", - "[15:41:56] MPS (INFO) - Final fidelity after optimisation=0.9641252811032455\n", - "[15:41:56] MPS (INFO) - Applying variational optimisation.\n", - "[15:41:56] MPS (INFO) - Fidelity before optimisation=0.9641252811032455\n", - "[15:41:56] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:56] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.9641252811032449\n", - "[15:41:56] MPS (INFO) - Doing another optimisation sweep...\n", - "[15:41:56] MPS (INFO) - Optimisation sweep completed. Current fidelity=0.964125281103245\n", - "[15:41:56] MPS (INFO) - Final fidelity after optimisation=0.964125281103245\n", - "[15:41:56] Simulation (INFO) - Simulation completed.\n", - "[15:41:56] Simulation (INFO) - Final MPS size=1.700042724609375 MiB\n", - "[15:41:56] Simulation (INFO) - Final MPS fidelity=0.964125281103245\n" - ] - } - ], - "source": [ - "with CuTensorNetHandle() as libhandle:\n", - " config = ConfigMPS(truncation_fidelity=0.999, loglevel=logging.INFO)\n", - " simulate(libhandle, circuit, ContractionAlg.MPSxMPO, config)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1d4a14fe-2ea2-435b-836a-acf9587faad7", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "py-cuquantum-23.06.0-mypich-py3.9", - "language": "python", - "name": "py-cuquantum-23.06.0-mypich-py3.9" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.16" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} +{"cells": [{"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "from time import time\n", "import matplotlib.pyplot as plt\n", "from pytket import Circuit\n", "from pytket.circuit.display import render_circuit_jupyter"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["from pytket.extensions.cutensornet.structured_state import (\n", " CuTensorNetHandle,\n", " Config,\n", " SimulationAlgorithm,\n", " simulate,\n", " prepare_circuit_mps,\n", ")"]}, {"cell_type": "markdown", "metadata": {}, "source": ["# Introduction
\n", "This notebook provides examples of the usage of the MPS functionalities of `pytket_cutensornet`. For more information, see the docs at https://tket.quantinuum.com/extensions/pytket-cutensornet/api/index.html.
\n", "A Matrix Product State (MPS) represents a state on `n` qubits as a list of `n` tensors connected in a line as show below:
\n", "![MPS](images/mps.png)
\n", "Each of these circles corresponds to a tensor. We refer to each leg of a tensor as a *bond* and the number of bonds a tensor has is its *rank*. In code, a tensor is just a multidimensional array:
\n", "
\n", "```tensor[i][j][k] = v```
\n", "
\n", "In the case above, we are assigning an entry value `v` of a rank-3 tensor (one `[ ]` coordinate per bond). Each bond allows a different number of values for its indices; for instance `0 <= i < 4` would mean that the first bond of our tensor can take up to four different indices; we refer to this as the *dimension* of the bond. We refer to the bonds connecting different tensors in the MPS as *virtual bonds*; the maximum allowed value for the dimension of virtual bonds is often denoted by the greek letter `chi`. The open bonds are known as *physical bonds* and, in our case, each will correspond to a qubit; hence, they have dimension `2` -- the dimension of the vector space of a single qubit.
\n", "In essence, whenever we want to apply a gate to certain qubit we will connect a tensor (matrix) representing the gate to the corresponding physical bond and *contract* the network back to an MPS form (tensor contraction is a generalisation of matrix multiplication to multidimensional arrays). Whenever a two-qubit gate is applied, the entanglement information after contraction will be kept in the degrees of freedom of the virtual bonds. As such, the dimension of the virtual bonds will generally increase exponentially as we apply entangling gates, leading to large memory footprints of the tensors and, consequently, long runtime for tensor contraction. We provide functionalities to limit the growth of the dimension of the virtual bonds, keeping resource consumption in check. Read the *Approximate simulation* section on this notebook to learn more.
\n", "**NOTE**: MPS methods can only be applied to circuits that only contain gates that act between nearest-neighbours in a line. If your circuit does not satisfy this constraint, you can use the `prepare_circuit_mps` function (see the *Preparing the circuit* section); this will add multiple `SWAP` gates to the circuit that *need* to be simulated explicitly within the MPS, increasing the resources required considerably. In the future, we will support other tensor network state approaches that do not suffer so drastically from this restrictive connectivity.
\n", "**References**: To read more about MPS we recommend the following papers.
\n", "* For an introduction to MPS and its canonical form: https://arxiv.org/abs/1901.05824.
\n", "* For a description of the `MPSxGate` algorithm we provide: https://arxiv.org/abs/2002.07730.
\n", "* For a description of the `MPSxMPO` algorithm we provide: https://arxiv.org/abs/2207.05612.
\n", "* For insights on the reationship between truncation error and the error model in a quantum computer: https://arxiv.org/abs/2004.02388"]}, {"cell_type": "markdown", "metadata": {}, "source": ["# Basic functionality and exact simulation
\n", "Here we show an example of the basic use of our MPS methods. We first generate a simple `pytket` circuit to be simulated."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["my_circ = Circuit(5)\n", "my_circ.CX(3, 4)\n", "my_circ.H(2)\n", "my_circ.CZ(0, 1)\n", "my_circ.ZZPhase(0.1, 4, 3)\n", "my_circ.TK2(0.3, 0.5, 0.7, 2, 1)\n", "my_circ.Ry(0.2, 0)"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["render_circuit_jupyter(my_circ)"]}, {"cell_type": "markdown", "metadata": {}, "source": ["For **exact** simulation, simply call the `simulate` function on the circuit and choose a contraction algorithm. To learn more about the contraction algorithms we provide see the *Contraction algorithms* section of this notebook. You will also need to provide a configuration, the default one is provided by `Config()`. Custom settings of `Config` are discussed in the *Approximate simulation* section.
\n", "**NOTE**: whenever you wish to generate an `MPS` object or execute calculations on it you must do so within a `with CuTensorNetHandle() as libhandle:` block; this will initialise the cuTensorNetwork library for you, and destroy its handles at the end of the `with` block. You will need to pass the `libhandle` to the `MPS` object via the method that generates it (in the snippet below, `simulate`), or if already initialised, pass it via the `update_libhandle` method.
\n", "Due to the nature of Jupyter notebooks, we will be starting most of these cells with a `with CuTensorNetHandle() as libhandle:`. However, in a standard script, all of these cells would be grouped together and a single `with CuTensorNetHandle() as libhandle:` statement would be necessary at the beginning of the script."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["with CuTensorNetHandle() as libhandle:\n", " my_mps = simulate(libhandle, my_circ, SimulationAlgorithm.MPSxGate, Config())"]}, {"cell_type": "markdown", "metadata": {}, "source": ["Notice that `my_circ` uses a rich gateset -- in fact, every single-qubit and two-qubit gate supported by `pytket` can be used in our MPS approaches. Gates acting on more than two qubits are not currently supported."]}, {"cell_type": "markdown", "metadata": {}, "source": ["The output of `simulate` is an `MPS` object encoding the output state of the circuit.
\n", "### Obtain an amplitude from an MPS
\n", "Let's first see how to get the amplitude of the state `|10100>` from the output of the previous circuit."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["state = int(\"10100\", 2)\n", "with CuTensorNetHandle() as libhandle:\n", " my_mps.update_libhandle(libhandle)\n", " amplitude = my_mps.get_amplitude(state)\n", "print(amplitude)"]}, {"cell_type": "markdown", "metadata": {}, "source": ["Since this is a very small circuit, we can use `pytket`'s state vector simulator capabilities to verify that the state is correct by checking the amplitude of each of the computational states."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["state_vector = my_circ.get_statevector()\n", "n_qubits = len(my_circ.qubits)"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["correct_amplitude = [False] * (2**n_qubits)\n", "with CuTensorNetHandle() as libhandle:\n", " my_mps.update_libhandle(libhandle)\n", " for i in range(2**n_qubits):\n", " correct_amplitude[i] = np.isclose(state_vector[i], my_mps.get_amplitude(i))"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["print(\"Are all amplitudes correct?\")\n", "print(all(correct_amplitude))"]}, {"cell_type": "markdown", "metadata": {}, "source": ["### Sampling from an MPS
\n", "We can also sample from the output state of a circuit by calling `my_mps.sample`, where `my_mps` is the outcome of simulating the circuit."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["n_samples = 100\n", "n_qubits = len(my_circ.qubits)"]}, {"cell_type": "markdown", "metadata": {}, "source": ["Initialise the sample counter"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["sample_count = [0 for _ in range(2**n_qubits)]"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["with CuTensorNetHandle() as libhandle:\n", " my_mps.update_libhandle(libhandle)\n", " for _ in range(n_samples):\n", " # Draw a sample\n", " qubit_outcomes = my_mps.sample()\n", " # Convert qubit outcomes to bitstring\n", " bitstring = \"\".join(str(qubit_outcomes[q]) for q in my_circ.qubits)\n", " # Convert bitstring to int\n", " outcome = int(bitstring, 2)\n", " # Update the sample dictionary\n", " sample_count[outcome] += 1"]}, {"cell_type": "markdown", "metadata": {}, "source": ["Calculate the theoretical number of samples per bitstring"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["expected_count = [n_samples * abs(state_vector[i]) ** 2 for i in range(2**n_qubits)]"]}, {"cell_type": "markdown", "metadata": {}, "source": ["Plot a comparison of theory vs sampled"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["plt.scatter(range(2**n_qubits), expected_count, label=\"Theory\")\n", "plt.scatter(range(2**n_qubits), sample_count, label=\"Experiment\", marker=\"x\")\n", "plt.xlabel(\"Basis states\")\n", "plt.ylabel(\"Samples\")\n", "plt.legend()\n", "plt.show()"]}, {"cell_type": "markdown", "metadata": {}, "source": ["We also provide methods to apply mid-circuit measurements via `my_mps.measure(qubits)` and postselection via `my_mps.postselect(qubit_outcomes)`. Their use is similar to that of `my_mps.sample()` shown above.
\n", "**Note:** whereas `my_mps.sample()` does *not* change the state of the MPS, `my_mps.measure(qubits)` and `my_mps.postselect(qubit_outcomes)` do change it, projecting the state to the resulting outcome and removing the measured qubits."]}, {"cell_type": "markdown", "metadata": {}, "source": ["### Inner products
\n", "Using `vdot` you can obtain the inner product of two states in MPS form. This method does not change the internal data of neither of the MPS. Moreover, it can be used on the same `MPS` object for both inputs, yielding the squared norm of the state."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["with CuTensorNetHandle() as libhandle:\n", " my_mps.update_libhandle(libhandle)\n", " norm_sq = my_mps.vdot(my_mps)"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["print(\"As expected, the squared norm of a state is 1\")\n", "print(np.isclose(norm_sq, 1))"]}, {"cell_type": "markdown", "metadata": {}, "source": ["Let's come up with another circuit on the same qubits and apply an inner product between the two `MPS` objects."]}, {"cell_type": "markdown", "metadata": {}, "source": ["Generate circuits"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["other_circ = Circuit(5)\n", "other_circ.H(3)\n", "other_circ.CZ(3, 4)\n", "other_circ.XXPhase(0.3, 1, 2)\n", "other_circ.Ry(0.7, 3)"]}, {"cell_type": "markdown", "metadata": {}, "source": ["Simulate them"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["with CuTensorNetHandle() as libhandle:\n", " other_mps = simulate(libhandle, other_circ, SimulationAlgorithm.MPSxGate, Config())"]}, {"cell_type": "markdown", "metadata": {}, "source": ["Let's calculate the inner product and check that it agrees with `pytket`'s state vector based computation."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["with CuTensorNetHandle() as libhandle:\n", " my_mps.update_libhandle(libhandle)\n", " inner_product = my_mps.vdot(other_mps)"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["my_state = my_circ.get_statevector()\n", "other_state = other_circ.get_statevector()"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["print(\"Is the inner product correct?\")\n", "print(np.isclose(np.vdot(my_state, other_state), inner_product))"]}, {"cell_type": "markdown", "metadata": {}, "source": ["### Preparing the circuit
\n", "If the circuit to be simulated contains gates that do not act between nearest neighbour qubits, an error message will be raised."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["bad_circ = Circuit(5)\n", "bad_circ.H(1)\n", "bad_circ.ZZPhase(0.3, 2, 3)\n", "bad_circ.CX(0, 1)\n", "bad_circ.Ry(0.8, 4)\n", "bad_circ.CZ(3, 4)\n", "bad_circ.XXPhase(0.7, 1, 2)\n", "bad_circ.TK2(0.1, 0.2, 0.4, 1, 4)"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["render_circuit_jupyter(bad_circ)"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["with CuTensorNetHandle() as libhandle:\n", " try:\n", " simulate(libhandle, bad_circ, SimulationAlgorithm.MPSxGate, Config())\n", " except RuntimeError as e:\n", " print(e)"]}, {"cell_type": "markdown", "metadata": {}, "source": ["We can call `prepare_circuit_mps` to use `pytket` routing capabilities to guarantee that the circuit can be run using our MPS approaches."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["prep_circ, qubit_map = prepare_circuit_mps(bad_circ)\n", "render_circuit_jupyter(prep_circ)\n", "# Print the correspondence between qubit names in `prep_circuit` and the original qubits from `circuit` at the output\n", "print(qubit_map)"]}, {"cell_type": "markdown", "metadata": {}, "source": ["The circuit can now be simulated as usual."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["with CuTensorNetHandle() as libhandle:\n", " prep_mps = simulate(libhandle, prep_circ, SimulationAlgorithm.MPSxGate, Config())\n", " print(\"Did simulation succeed?\")\n", " print(prep_mps.is_valid())"]}, {"cell_type": "markdown", "metadata": {}, "source": ["# Approximate simulation
\n", "We provide two policies for approximate simulation; these are supported by both of our current MPS contraction algorithms:
\n", "* Bound the maximum value of the virtual bond dimension `chi`. If a bond dimension would increase past that point, we *truncate* (i.e. discard) the degrees of freedom that contribute the least to the state description. We can keep track of a lower bound of the error that this truncation causes.
\n", "* Provide a value for acceptable two-qubit gate fidelity `truncation_fidelity`. After each two-qubit gate we truncate the dimension of virtual bonds as much as we can while guaranteeing the target gate fidelity. The more fidelity you require, the longer it will take to simulate. **Note**: this is *not* the final fidelity of the output state, but the fidelity per gate.
\n", "Values for `chi` and `truncation_fidelity` can be set via `Config`. To showcase approximate simulation, let's define a circuit where exact MPS contraction starts struggling."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["def random_line_circuit(n_qubits: int, layers: int) -> Circuit:\n", " \"\"\"Random circuit with line connectivity.\"\"\"\n", " c = Circuit(n_qubits)\n", " for i in range(layers):\n", " # Layer of TK1 gates\n", " for q in range(n_qubits):\n", " c.TK1(np.random.rand(), np.random.rand(), np.random.rand(), q)\n\n", " # Layer of CX gates\n", " offset = np.mod(i, 2) # Even layers connect (q0,q1), odd (q1,q2)\n", " qubit_pairs = [\n", " [c.qubits[i], c.qubits[i + 1]] for i in range(offset, n_qubits - 1, 2)\n", " ]\n", " # Direction of each CX gate is random\n", " for pair in qubit_pairs:\n", " np.random.shuffle(pair)\n", " for pair in qubit_pairs:\n", " c.CX(pair[0], pair[1])\n", " return c"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["circuit = random_line_circuit(n_qubits=20, layers=20)"]}, {"cell_type": "markdown", "metadata": {}, "source": ["For exact contraction, `chi` must be allowed to be up to `2**(n_qubits // 2)`, meaning that if we set `n_qubits = 20` it would require `chi = 1024`; already too much for this particular circuit to be simulated in a gaming laptop using the current implementation. Instead, let's bound `chi` to a maximum of `16`. Doing so results in faster runtime, at the expense of losing output state fidelity."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["start = time()\n", "with CuTensorNetHandle() as libhandle:\n", " config = Config(chi=16)\n", " bound_chi_mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, config)\n", "end = time()\n", "print(\"Time taken by approximate contraction with bound chi:\")\n", "print(f\"{round(end-start,2)} seconds\")\n", "print(\"\\nLower bound of the fidelity:\")\n", "print(round(bound_chi_mps.fidelity, 4))"]}, {"cell_type": "markdown", "metadata": {}, "source": ["Alternatively, we can fix `truncation_fidelity` and let `chi` increase as necessary to satisfy it."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["start = time()\n", "with CuTensorNetHandle() as libhandle:\n", " config = Config(truncation_fidelity=0.999)\n", " fixed_fidelity_mps = simulate(\n", " libhandle, circuit, SimulationAlgorithm.MPSxGate, config\n", " )\n", "end = time()\n", "print(\"Time taken by approximate contraction with fixed truncation fidelity:\")\n", "print(f\"{round(end-start,2)} seconds\")\n", "print(\"\\nLower bound of the fidelity:\")\n", "print(round(fixed_fidelity_mps.fidelity, 4))"]}, {"cell_type": "markdown", "metadata": {}, "source": ["# Contraction algorithms"]}, {"cell_type": "markdown", "metadata": {}, "source": ["We currently offer two MPS-based simulation algorithms:
\n", "* **MPSxGate**: Apply gates one by one to the MPS, canonicalising the MPS and truncating when necessary. In particular, we implemented the algorithm from the following paper: https://arxiv.org/abs/2002.07730.
\n", "* **MPSxMPO**: Maintain two MPS copies of the state as it evolves, one updated eagerly using the **MPSxGate** method and the other updated in batches of up to `k` layers of two-qubit gates. Whenever the second MPS is updated, both copies are synchronised and an optimisation algorithm is applied to increase the fidelity of the state. This algorithm is often referred to as DMRG-like simulation. In particular, we implemented the algorithm from the following paper: https://arxiv.org/abs/2207.05612.
\n", "The `MPSxGate` algorithm is the one we have been using for all of the examples above. In comparison, the `MPSxMPO` algorithm provides the user with two new parameters to tune:
\n", "* **k**: The maximum number of layers the MPO is allowed to have before being contracted. Increasing this might increase fidelity, but it will also increase resource requirements exponentially. Default value is `4`.
\n", "* **optim_delta**: Stopping criteria for the optimisation when contracting the `k` layers of MPO. Stops when the increase of fidelity between iterations is smaller than `optim_delta`. Default value is `1e-5`.
\n", "Both `k` and `optim_delta` can be set via `Config`. Below we compare `MPSxGate` versus `MPSxMPO` with default parameters and `MPSxMPO` with more resource-hungry parameters. The circuit used is the same as in the previous section."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["start = time()\n", "with CuTensorNetHandle() as libhandle:\n", " config = Config(chi=16)\n", " fixed_fidelity_mps = simulate(\n", " libhandle, circuit, SimulationAlgorithm.MPSxGate, config\n", " )\n", "end = time()\n", "print(\"MPSxGate\")\n", "print(f\"\\tTime taken: {round(end-start,2)} seconds\")\n", "print(f\"\\tLower bound of the fidelity: {round(fixed_fidelity_mps.fidelity, 4)}\")"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["start = time()\n", "with CuTensorNetHandle() as libhandle:\n", " config = Config(chi=16)\n", " fixed_fidelity_mps = simulate(\n", " libhandle, circuit, SimulationAlgorithm.MPSxMPO, config\n", " )\n", "end = time()\n", "print(\"MPSxMPO, default parameters\")\n", "print(f\"\\tTime taken: {round(end-start,2)} seconds\")\n", "print(f\"\\tLower bound of the fidelity: {round(fixed_fidelity_mps.fidelity, 4)}\")"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["start = time()\n", "with CuTensorNetHandle() as libhandle:\n", " config = Config(k=8, optim_delta=1e-15, chi=16)\n", " fixed_fidelity_mps = simulate(\n", " libhandle, circuit, SimulationAlgorithm.MPSxMPO, config\n", " )\n", "end = time()\n", "print(\"MPSxMPO, custom parameters\")\n", "print(f\"\\tTime taken: {round(end-start,2)} seconds\")\n", "print(f\"\\tLower bound of the fidelity: {round(fixed_fidelity_mps.fidelity, 4)}\")"]}, {"cell_type": "markdown", "metadata": {}, "source": ["**Note**: `MPSxMPO` also admits truncation policy in terms of `truncation_fidelity` instead of `chi`."]}, {"cell_type": "markdown", "metadata": {}, "source": ["# Using the logger"]}, {"cell_type": "markdown", "metadata": {}, "source": ["You can request a verbose log to be produced during simulation, by assigning the `loglevel` argument when creating a `Config` instance. Currently, two log levels are supported (other than default, which is silent):
\n", "- `logging.INFO` will print information about progress percent, memory currently occupied by the MPS and current fidelity. Additionally, some high level information of the current stage of the simulation is provided, such as when `MPSxMPO` is applying optimisation sweeps.
\n", "- `logging.DEBUG` provides all of the messages from the loglevel above plus detailed information of the current operation being carried out and the values of important variables.
\n", "**Note**: Due to technical issues with the `logging` module and Jupyter notebooks we need to reload the `logging` module. When working with python scripts and command line, just doing `import logging` is enough."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["from importlib import reload # Not needed in Python 2\n", "import logging"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["reload(logging)"]}, {"cell_type": "markdown", "metadata": {}, "source": ["An example of the use of `logging.INFO` is provided below."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["with CuTensorNetHandle() as libhandle:\n", " config = Config(truncation_fidelity=0.999, loglevel=logging.INFO)\n", " simulate(libhandle, circuit, SimulationAlgorithm.MPSxMPO, config)"]}], "metadata": {"kernelspec": {"display_name": "Python 3", "language": "python", "name": "python3"}, "language_info": {"codemirror_mode": {"name": "ipython", "version": 3}, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.4"}}, "nbformat": 4, "nbformat_minor": 2} \ No newline at end of file diff --git a/examples/python/mps_tutorial.py b/examples/python/mps_tutorial.py new file mode 100644 index 00000000..a60f40ac --- /dev/null +++ b/examples/python/mps_tutorial.py @@ -0,0 +1,303 @@ +import numpy as np +from time import time +import matplotlib.pyplot as plt +from pytket import Circuit +from pytket.circuit.display import render_circuit_jupyter + +from pytket.extensions.cutensornet.structured_state import ( + CuTensorNetHandle, + Config, + SimulationAlgorithm, + simulate, + prepare_circuit_mps, +) + +# # Introduction +# This notebook provides examples of the usage of the MPS functionalities of `pytket_cutensornet`. For more information, see the docs at https://tket.quantinuum.com/extensions/pytket-cutensornet/api/index.html. +# A Matrix Product State (MPS) represents a state on `n` qubits as a list of `n` tensors connected in a line as show below: +# ![MPS](images/mps.png) +# Each of these circles corresponds to a tensor. We refer to each leg of a tensor as a *bond* and the number of bonds a tensor has is its *rank*. In code, a tensor is just a multidimensional array: +# +# ```tensor[i][j][k] = v``` +# +# In the case above, we are assigning an entry value `v` of a rank-3 tensor (one `[ ]` coordinate per bond). Each bond allows a different number of values for its indices; for instance `0 <= i < 4` would mean that the first bond of our tensor can take up to four different indices; we refer to this as the *dimension* of the bond. We refer to the bonds connecting different tensors in the MPS as *virtual bonds*; the maximum allowed value for the dimension of virtual bonds is often denoted by the greek letter `chi`. The open bonds are known as *physical bonds* and, in our case, each will correspond to a qubit; hence, they have dimension `2` -- the dimension of the vector space of a single qubit. +# In essence, whenever we want to apply a gate to certain qubit we will connect a tensor (matrix) representing the gate to the corresponding physical bond and *contract* the network back to an MPS form (tensor contraction is a generalisation of matrix multiplication to multidimensional arrays). Whenever a two-qubit gate is applied, the entanglement information after contraction will be kept in the degrees of freedom of the virtual bonds. As such, the dimension of the virtual bonds will generally increase exponentially as we apply entangling gates, leading to large memory footprints of the tensors and, consequently, long runtime for tensor contraction. We provide functionalities to limit the growth of the dimension of the virtual bonds, keeping resource consumption in check. Read the *Approximate simulation* section on this notebook to learn more. +# **NOTE**: MPS methods can only be applied to circuits that only contain gates that act between nearest-neighbours in a line. If your circuit does not satisfy this constraint, you can use the `prepare_circuit_mps` function (see the *Preparing the circuit* section); this will add multiple `SWAP` gates to the circuit that *need* to be simulated explicitly within the MPS, increasing the resources required considerably. In the future, we will support other tensor network state approaches that do not suffer so drastically from this restrictive connectivity. +# **References**: To read more about MPS we recommend the following papers. +# * For an introduction to MPS and its canonical form: https://arxiv.org/abs/1901.05824. +# * For a description of the `MPSxGate` algorithm we provide: https://arxiv.org/abs/2002.07730. +# * For a description of the `MPSxMPO` algorithm we provide: https://arxiv.org/abs/2207.05612. +# * For insights on the reationship between truncation error and the error model in a quantum computer: https://arxiv.org/abs/2004.02388 + +# # Basic functionality and exact simulation +# Here we show an example of the basic use of our MPS methods. We first generate a simple `pytket` circuit to be simulated. + +my_circ = Circuit(5) +my_circ.CX(3, 4) +my_circ.H(2) +my_circ.CZ(0, 1) +my_circ.ZZPhase(0.1, 4, 3) +my_circ.TK2(0.3, 0.5, 0.7, 2, 1) +my_circ.Ry(0.2, 0) + +render_circuit_jupyter(my_circ) + +# For **exact** simulation, simply call the `simulate` function on the circuit and choose a contraction algorithm. To learn more about the contraction algorithms we provide see the *Contraction algorithms* section of this notebook. You will also need to provide a configuration, the default one is provided by `Config()`. Custom settings of `Config` are discussed in the *Approximate simulation* section. +# **NOTE**: whenever you wish to generate an `MPS` object or execute calculations on it you must do so within a `with CuTensorNetHandle() as libhandle:` block; this will initialise the cuTensorNetwork library for you, and destroy its handles at the end of the `with` block. You will need to pass the `libhandle` to the `MPS` object via the method that generates it (in the snippet below, `simulate`), or if already initialised, pass it via the `update_libhandle` method. +# Due to the nature of Jupyter notebooks, we will be starting most of these cells with a `with CuTensorNetHandle() as libhandle:`. However, in a standard script, all of these cells would be grouped together and a single `with CuTensorNetHandle() as libhandle:` statement would be necessary at the beginning of the script. + +with CuTensorNetHandle() as libhandle: + my_mps = simulate(libhandle, my_circ, SimulationAlgorithm.MPSxGate, Config()) + +# Notice that `my_circ` uses a rich gateset -- in fact, every single-qubit and two-qubit gate supported by `pytket` can be used in our MPS approaches. Gates acting on more than two qubits are not currently supported. + +# The output of `simulate` is an `MPS` object encoding the output state of the circuit. +# ### Obtain an amplitude from an MPS +# Let's first see how to get the amplitude of the state `|10100>` from the output of the previous circuit. + +state = int("10100", 2) +with CuTensorNetHandle() as libhandle: + my_mps.update_libhandle(libhandle) + amplitude = my_mps.get_amplitude(state) +print(amplitude) + +# Since this is a very small circuit, we can use `pytket`'s state vector simulator capabilities to verify that the state is correct by checking the amplitude of each of the computational states. + +state_vector = my_circ.get_statevector() +n_qubits = len(my_circ.qubits) + +correct_amplitude = [False] * (2**n_qubits) +with CuTensorNetHandle() as libhandle: + my_mps.update_libhandle(libhandle) + for i in range(2**n_qubits): + correct_amplitude[i] = np.isclose(state_vector[i], my_mps.get_amplitude(i)) + +print("Are all amplitudes correct?") +print(all(correct_amplitude)) + +# ### Sampling from an MPS +# We can also sample from the output state of a circuit by calling `my_mps.sample`, where `my_mps` is the outcome of simulating the circuit. + +n_samples = 100 +n_qubits = len(my_circ.qubits) + +# Initialise the sample counter +sample_count = [0 for _ in range(2**n_qubits)] + +with CuTensorNetHandle() as libhandle: + my_mps.update_libhandle(libhandle) + + for _ in range(n_samples): + # Draw a sample + qubit_outcomes = my_mps.sample() + # Convert qubit outcomes to bitstring + bitstring = "".join(str(qubit_outcomes[q]) for q in my_circ.qubits) + # Convert bitstring to int + outcome = int(bitstring, 2) + # Update the sample dictionary + sample_count[outcome] += 1 + +# Calculate the theoretical number of samples per bitstring +expected_count = [n_samples * abs(state_vector[i]) ** 2 for i in range(2**n_qubits)] + +# Plot a comparison of theory vs sampled +plt.scatter(range(2**n_qubits), expected_count, label="Theory") +plt.scatter(range(2**n_qubits), sample_count, label="Experiment", marker="x") +plt.xlabel("Basis states") +plt.ylabel("Samples") +plt.legend() +plt.show() + +# We also provide methods to apply mid-circuit measurements via `my_mps.measure(qubits)` and postselection via `my_mps.postselect(qubit_outcomes)`. Their use is similar to that of `my_mps.sample()` shown above. +# **Note:** whereas `my_mps.sample()` does *not* change the state of the MPS, `my_mps.measure(qubits)` and `my_mps.postselect(qubit_outcomes)` do change it, projecting the state to the resulting outcome and removing the measured qubits. + +# ### Inner products +# Using `vdot` you can obtain the inner product of two states in MPS form. This method does not change the internal data of neither of the MPS. Moreover, it can be used on the same `MPS` object for both inputs, yielding the squared norm of the state. + +with CuTensorNetHandle() as libhandle: + my_mps.update_libhandle(libhandle) + norm_sq = my_mps.vdot(my_mps) + +print("As expected, the squared norm of a state is 1") +print(np.isclose(norm_sq, 1)) + +# Let's come up with another circuit on the same qubits and apply an inner product between the two `MPS` objects. + +# Generate circuits +other_circ = Circuit(5) +other_circ.H(3) +other_circ.CZ(3, 4) +other_circ.XXPhase(0.3, 1, 2) +other_circ.Ry(0.7, 3) + +# Simulate them +with CuTensorNetHandle() as libhandle: + other_mps = simulate(libhandle, other_circ, SimulationAlgorithm.MPSxGate, Config()) + +# Let's calculate the inner product and check that it agrees with `pytket`'s state vector based computation. + +with CuTensorNetHandle() as libhandle: + my_mps.update_libhandle(libhandle) + inner_product = my_mps.vdot(other_mps) + +my_state = my_circ.get_statevector() +other_state = other_circ.get_statevector() + +print("Is the inner product correct?") +print(np.isclose(np.vdot(my_state, other_state), inner_product)) + +# ### Preparing the circuit +# If the circuit to be simulated contains gates that do not act between nearest neighbour qubits, an error message will be raised. + +bad_circ = Circuit(5) +bad_circ.H(1) +bad_circ.ZZPhase(0.3, 2, 3) +bad_circ.CX(0, 1) +bad_circ.Ry(0.8, 4) +bad_circ.CZ(3, 4) +bad_circ.XXPhase(0.7, 1, 2) +bad_circ.TK2(0.1, 0.2, 0.4, 1, 4) + +render_circuit_jupyter(bad_circ) + +with CuTensorNetHandle() as libhandle: + try: + simulate(libhandle, bad_circ, SimulationAlgorithm.MPSxGate, Config()) + except RuntimeError as e: + print(e) + +# We can call `prepare_circuit_mps` to use `pytket` routing capabilities to guarantee that the circuit can be run using our MPS approaches. + +prep_circ, qubit_map = prepare_circuit_mps(bad_circ) +render_circuit_jupyter(prep_circ) +# Print the correspondence between qubit names in `prep_circuit` and the original qubits from `circuit` at the output +print(qubit_map) + +# The circuit can now be simulated as usual. + +with CuTensorNetHandle() as libhandle: + prep_mps = simulate(libhandle, prep_circ, SimulationAlgorithm.MPSxGate, Config()) + print("Did simulation succeed?") + print(prep_mps.is_valid()) + +# # Approximate simulation +# We provide two policies for approximate simulation; these are supported by both of our current MPS contraction algorithms: +# * Bound the maximum value of the virtual bond dimension `chi`. If a bond dimension would increase past that point, we *truncate* (i.e. discard) the degrees of freedom that contribute the least to the state description. We can keep track of a lower bound of the error that this truncation causes. +# * Provide a value for acceptable two-qubit gate fidelity `truncation_fidelity`. After each two-qubit gate we truncate the dimension of virtual bonds as much as we can while guaranteeing the target gate fidelity. The more fidelity you require, the longer it will take to simulate. **Note**: this is *not* the final fidelity of the output state, but the fidelity per gate. +# Values for `chi` and `truncation_fidelity` can be set via `Config`. To showcase approximate simulation, let's define a circuit where exact MPS contraction starts struggling. + + +def random_line_circuit(n_qubits: int, layers: int) -> Circuit: + """Random circuit with line connectivity.""" + c = Circuit(n_qubits) + + for i in range(layers): + # Layer of TK1 gates + for q in range(n_qubits): + c.TK1(np.random.rand(), np.random.rand(), np.random.rand(), q) + + # Layer of CX gates + offset = np.mod(i, 2) # Even layers connect (q0,q1), odd (q1,q2) + qubit_pairs = [ + [c.qubits[i], c.qubits[i + 1]] for i in range(offset, n_qubits - 1, 2) + ] + # Direction of each CX gate is random + for pair in qubit_pairs: + np.random.shuffle(pair) + + for pair in qubit_pairs: + c.CX(pair[0], pair[1]) + + return c + + +circuit = random_line_circuit(n_qubits=20, layers=20) + +# For exact contraction, `chi` must be allowed to be up to `2**(n_qubits // 2)`, meaning that if we set `n_qubits = 20` it would require `chi = 1024`; already too much for this particular circuit to be simulated in a gaming laptop using the current implementation. Instead, let's bound `chi` to a maximum of `16`. Doing so results in faster runtime, at the expense of losing output state fidelity. + +start = time() +with CuTensorNetHandle() as libhandle: + config = Config(chi=16) + bound_chi_mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, config) +end = time() +print("Time taken by approximate contraction with bound chi:") +print(f"{round(end-start,2)} seconds") +print("\nLower bound of the fidelity:") +print(round(bound_chi_mps.fidelity, 4)) + +# Alternatively, we can fix `truncation_fidelity` and let `chi` increase as necessary to satisfy it. + +start = time() +with CuTensorNetHandle() as libhandle: + config = Config(truncation_fidelity=0.999) + fixed_fidelity_mps = simulate( + libhandle, circuit, SimulationAlgorithm.MPSxGate, config + ) +end = time() +print("Time taken by approximate contraction with fixed truncation fidelity:") +print(f"{round(end-start,2)} seconds") +print("\nLower bound of the fidelity:") +print(round(fixed_fidelity_mps.fidelity, 4)) + +# # Contraction algorithms + +# We currently offer two MPS-based simulation algorithms: +# * **MPSxGate**: Apply gates one by one to the MPS, canonicalising the MPS and truncating when necessary. In particular, we implemented the algorithm from the following paper: https://arxiv.org/abs/2002.07730. +# * **MPSxMPO**: Maintain two MPS copies of the state as it evolves, one updated eagerly using the **MPSxGate** method and the other updated in batches of up to `k` layers of two-qubit gates. Whenever the second MPS is updated, both copies are synchronised and an optimisation algorithm is applied to increase the fidelity of the state. This algorithm is often referred to as DMRG-like simulation. In particular, we implemented the algorithm from the following paper: https://arxiv.org/abs/2207.05612. +# The `MPSxGate` algorithm is the one we have been using for all of the examples above. In comparison, the `MPSxMPO` algorithm provides the user with two new parameters to tune: +# * **k**: The maximum number of layers the MPO is allowed to have before being contracted. Increasing this might increase fidelity, but it will also increase resource requirements exponentially. Default value is `4`. +# * **optim_delta**: Stopping criteria for the optimisation when contracting the `k` layers of MPO. Stops when the increase of fidelity between iterations is smaller than `optim_delta`. Default value is `1e-5`. +# Both `k` and `optim_delta` can be set via `Config`. Below we compare `MPSxGate` versus `MPSxMPO` with default parameters and `MPSxMPO` with more resource-hungry parameters. The circuit used is the same as in the previous section. + +start = time() +with CuTensorNetHandle() as libhandle: + config = Config(chi=16) + fixed_fidelity_mps = simulate( + libhandle, circuit, SimulationAlgorithm.MPSxGate, config + ) +end = time() +print("MPSxGate") +print(f"\tTime taken: {round(end-start,2)} seconds") +print(f"\tLower bound of the fidelity: {round(fixed_fidelity_mps.fidelity, 4)}") + +start = time() +with CuTensorNetHandle() as libhandle: + config = Config(chi=16) + fixed_fidelity_mps = simulate( + libhandle, circuit, SimulationAlgorithm.MPSxMPO, config + ) +end = time() +print("MPSxMPO, default parameters") +print(f"\tTime taken: {round(end-start,2)} seconds") +print(f"\tLower bound of the fidelity: {round(fixed_fidelity_mps.fidelity, 4)}") + +start = time() +with CuTensorNetHandle() as libhandle: + config = Config(k=8, optim_delta=1e-15, chi=16) + fixed_fidelity_mps = simulate( + libhandle, circuit, SimulationAlgorithm.MPSxMPO, config + ) +end = time() +print("MPSxMPO, custom parameters") +print(f"\tTime taken: {round(end-start,2)} seconds") +print(f"\tLower bound of the fidelity: {round(fixed_fidelity_mps.fidelity, 4)}") + +# **Note**: `MPSxMPO` also admits truncation policy in terms of `truncation_fidelity` instead of `chi`. + +# # Using the logger + +# You can request a verbose log to be produced during simulation, by assigning the `loglevel` argument when creating a `Config` instance. Currently, two log levels are supported (other than default, which is silent): +# - `logging.INFO` will print information about progress percent, memory currently occupied by the MPS and current fidelity. Additionally, some high level information of the current stage of the simulation is provided, such as when `MPSxMPO` is applying optimisation sweeps. +# - `logging.DEBUG` provides all of the messages from the loglevel above plus detailed information of the current operation being carried out and the values of important variables. +# **Note**: Due to technical issues with the `logging` module and Jupyter notebooks we need to reload the `logging` module. When working with python scripts and command line, just doing `import logging` is enough. + +from importlib import reload # Not needed in Python 2 +import logging + +reload(logging) + +# An example of the use of `logging.INFO` is provided below. + +with CuTensorNetHandle() as libhandle: + config = Config(truncation_fidelity=0.999, loglevel=logging.INFO) + simulate(libhandle, circuit, SimulationAlgorithm.MPSxMPO, config) diff --git a/examples/python/ttn_tutorial.py b/examples/python/ttn_tutorial.py new file mode 100644 index 00000000..daa2325e --- /dev/null +++ b/examples/python/ttn_tutorial.py @@ -0,0 +1,123 @@ +import numpy as np +from time import time +import matplotlib.pyplot as plt +import networkx as nx +from pytket import Circuit +from pytket.circuit.display import render_circuit_jupyter + +from pytket.extensions.cutensornet.structured_state import ( + CuTensorNetHandle, + Config, + SimulationAlgorithm, + simulate, +) + +# # Introduction +# This notebook provides examples of the usage of the TTN functionalities of `pytket_cutensornet`. For more information, see the docs at https://tket.quantinuum.com/extensions/pytket-cutensornet/api/index.html. +# Some good references to learn about Tree Tensor Network state simulation: +# - For an introduction into TTN based simulation of quantum circuits: https://arxiv.org/abs/2206.01000 +# - For an introduction on some of the optimisation concerns that are relevant to TTN: https://arxiv.org/abs/2209.03196 +# The implementation in pytket-cutensornet differs from previously published literature. I am still experimenting with the algorithm. I intend to write up a document detailing the approach, once I reach a stable version. +# The main advantage of TTN over MPS is that it can be used to efficiently simulate circuits with richer qubit connectivity. This does **not** mean that TTN has an easy time simulating all-to-all connectivity, but it is far more flexible than MPS. TTN's strength is in simulating circuit where certain subsets of qubits interact densely with each other, and there is not that many gates acting on qubits in different subsets. + +# # How to use +# The interface for TTN matches that of MPS. As such, you should be able to run any code that uses `SimulationAlgorithm.MPSxGate` by replacing it with `SimulationAlgorithm.TTNxGate`. Calling `prepare_circuit_mps` is no longer necessary, since `TTNxGate` can apply gates between non-neighbouring qubits. +# **NOTE**: If you are new to pytket-cutensornet, it is highly recommended to start reading the `mps_tutorial.ipynb` notebook instead. More details about the use of the library are discussed there (for instance, why and when to call `CuTensorNetHandle()`). + + +def random_graph_circuit(n_qubits: int, edge_prob: float, layers: int) -> Circuit: + """Random circuit with qubit connectivity determined by a random graph.""" + c = Circuit(n_qubits) + + for i in range(layers): + # Layer of TK1 gates + for q in range(n_qubits): + c.TK1(np.random.rand(), np.random.rand(), np.random.rand(), q) + + # Layer of CX gates + graph = nx.erdos_renyi_graph(n_qubits, edge_prob, directed=True) + qubit_pairs = list(graph.edges) + for pair in qubit_pairs: + c.CX(pair[0], pair[1]) + + return c + + +# For **exact** simulation, you can call `simulate` directly, providing the default `Config()`: + +simple_circ = random_graph_circuit(n_qubits=10, edge_prob=0.1, layers=1) + +with CuTensorNetHandle() as libhandle: + my_ttn = simulate(libhandle, simple_circ, SimulationAlgorithm.TTNxGate, Config()) + +# ## Obtain an amplitude from a TTN +# Let's first see how to get the amplitude of the state `|10100>` from the output of the previous circuit. + +state = int("10100", 2) +with CuTensorNetHandle() as libhandle: + my_ttn.update_libhandle(libhandle) + amplitude = my_ttn.get_amplitude(state) +print(amplitude) + +# Since this is a very small circuit, we can use `pytket`'s state vector simulator capabilities to verify that the state is correct by checking the amplitude of each of the computational states. + +state_vector = simple_circ.get_statevector() +n_qubits = len(simple_circ.qubits) + +correct_amplitude = [False] * (2**n_qubits) +with CuTensorNetHandle() as libhandle: + my_ttn.update_libhandle(libhandle) + for i in range(2**n_qubits): + correct_amplitude[i] = np.isclose(state_vector[i], my_ttn.get_amplitude(i)) + +print("Are all amplitudes correct?") +print(all(correct_amplitude)) + +# ## Sampling from a TTN +# Sampling and measurement from a TTN state is not currently supported. This will be added in an upcoming release. + +# # Approximate simulation +# We provide two policies for approximate simulation: +# * Bound the maximum value of the virtual bond dimension `chi`. If a bond dimension would increase past that point, we *truncate* (i.e. discard) the degrees of freedom that contribute the least to the state description. We can keep track of a lower bound of the error that this truncation causes. +# * Provide a value for acceptable two-qubit gate fidelity `truncation_fidelity`. After each two-qubit gate we truncate the dimension of virtual bonds as much as we can while guaranteeing the target gate fidelity. The more fidelity you require, the longer it will take to simulate. **Note**: this is *not* the final fidelity of the output state, but the fidelity per gate. +# Values for `chi` and `truncation_fidelity` can be set via `Config`. To showcase approximate simulation, let's define a circuit where exact TTN contraction would not be enough. + +circuit = random_graph_circuit(n_qubits=30, edge_prob=0.1, layers=1) + +# We can simulate it using bounded `chi` as follows: + +start = time() +with CuTensorNetHandle() as libhandle: + config = Config(chi=64, float_precision=np.float32) + bound_chi_ttn = simulate(libhandle, circuit, SimulationAlgorithm.TTNxGate, config) +end = time() +print("Time taken by approximate contraction with bounded chi:") +print(f"{round(end-start,2)} seconds") +print("\nLower bound of the fidelity:") +print(round(bound_chi_ttn.fidelity, 4)) + +# Alternatively, we can fix `truncation_fidelity` and let the bond dimension increase as necessary to satisfy it. + +start = time() +with CuTensorNetHandle() as libhandle: + config = Config(truncation_fidelity=0.99, float_precision=np.float32) + fixed_fidelity_ttn = simulate( + libhandle, circuit, SimulationAlgorithm.TTNxGate, config + ) +end = time() +print("Time taken by approximate contraction with fixed truncation fidelity:") +print(f"{round(end-start,2)} seconds") +print("\nLower bound of the fidelity:") +print(round(fixed_fidelity_ttn.fidelity, 4)) + +# # Contraction algorithms + +# We currently offer only one TTN-based simulation algorithm. +# * **TTNxGate**: Apply gates one by one to the TTN, canonicalising the TTN and truncating when necessary. + +# # Using the logger + +# You can request a verbose log to be produced during simulation, by assigning the `loglevel` argument when creating a `Config` instance. Currently, two log levels are supported (other than default, which is silent): +# - `logging.INFO` will print information about progress percent, memory currently occupied by the TTN and current fidelity. Additionally, some high level information of the current stage of the simulation is provided. +# - `logging.DEBUG` provides all of the messages from the loglevel above plus detailed information of the current operation being carried out and the values of important variables. +# **Note**: Due to technical issues with the `logging` module and Jupyter notebooks we need to reload the `logging` module. When working with python scripts and command line, just doing `import logging` is enough. diff --git a/examples/ttn_tutorial.ipynb b/examples/ttn_tutorial.ipynb new file mode 100644 index 00000000..d00ef37b --- /dev/null +++ b/examples/ttn_tutorial.ipynb @@ -0,0 +1 @@ +{"cells": [{"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "from time import time\n", "import matplotlib.pyplot as plt\n", "import networkx as nx\n", "from pytket import Circuit\n", "from pytket.circuit.display import render_circuit_jupyter"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["from pytket.extensions.cutensornet.structured_state import (\n", " CuTensorNetHandle,\n", " Config,\n", " SimulationAlgorithm,\n", " simulate,\n", ")"]}, {"cell_type": "markdown", "metadata": {}, "source": ["# Introduction
\n", "This notebook provides examples of the usage of the TTN functionalities of `pytket_cutensornet`. For more information, see the docs at https://tket.quantinuum.com/extensions/pytket-cutensornet/api/index.html.
\n", "Some good references to learn about Tree Tensor Network state simulation:
\n", "- For an introduction into TTN based simulation of quantum circuits: https://arxiv.org/abs/2206.01000
\n", "- For an introduction on some of the optimisation concerns that are relevant to TTN: https://arxiv.org/abs/2209.03196
\n", "The implementation in pytket-cutensornet differs from previously published literature. I am still experimenting with the algorithm. I intend to write up a document detailing the approach, once I reach a stable version.
\n", "The main advantage of TTN over MPS is that it can be used to efficiently simulate circuits with richer qubit connectivity. This does **not** mean that TTN has an easy time simulating all-to-all connectivity, but it is far more flexible than MPS. TTN's strength is in simulating circuit where certain subsets of qubits interact densely with each other, and there is not that many gates acting on qubits in different subsets."]}, {"cell_type": "markdown", "metadata": {}, "source": ["# How to use
\n", "The interface for TTN matches that of MPS. As such, you should be able to run any code that uses `SimulationAlgorithm.MPSxGate` by replacing it with `SimulationAlgorithm.TTNxGate`. Calling `prepare_circuit_mps` is no longer necessary, since `TTNxGate` can apply gates between non-neighbouring qubits.
\n", "**NOTE**: If you are new to pytket-cutensornet, it is highly recommended to start reading the `mps_tutorial.ipynb` notebook instead. More details about the use of the library are discussed there (for instance, why and when to call `CuTensorNetHandle()`)."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["def random_graph_circuit(n_qubits: int, edge_prob: float, layers: int) -> Circuit:\n", " \"\"\"Random circuit with qubit connectivity determined by a random graph.\"\"\"\n", " c = Circuit(n_qubits)\n", " for i in range(layers):\n", " # Layer of TK1 gates\n", " for q in range(n_qubits):\n", " c.TK1(np.random.rand(), np.random.rand(), np.random.rand(), q)\n\n", " # Layer of CX gates\n", " graph = nx.erdos_renyi_graph(n_qubits, edge_prob, directed=True)\n", " qubit_pairs = list(graph.edges)\n", " for pair in qubit_pairs:\n", " c.CX(pair[0], pair[1])\n", " return c"]}, {"cell_type": "markdown", "metadata": {}, "source": ["For **exact** simulation, you can call `simulate` directly, providing the default `Config()`:"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["simple_circ = random_graph_circuit(n_qubits=10, edge_prob=0.1, layers=1)"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["with CuTensorNetHandle() as libhandle:\n", " my_ttn = simulate(libhandle, simple_circ, SimulationAlgorithm.TTNxGate, Config())"]}, {"cell_type": "markdown", "metadata": {}, "source": ["## Obtain an amplitude from a TTN
\n", "Let's first see how to get the amplitude of the state `|10100>` from the output of the previous circuit."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["state = int(\"10100\", 2)\n", "with CuTensorNetHandle() as libhandle:\n", " my_ttn.update_libhandle(libhandle)\n", " amplitude = my_ttn.get_amplitude(state)\n", "print(amplitude)"]}, {"cell_type": "markdown", "metadata": {}, "source": ["Since this is a very small circuit, we can use `pytket`'s state vector simulator capabilities to verify that the state is correct by checking the amplitude of each of the computational states."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["state_vector = simple_circ.get_statevector()\n", "n_qubits = len(simple_circ.qubits)"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["correct_amplitude = [False] * (2**n_qubits)\n", "with CuTensorNetHandle() as libhandle:\n", " my_ttn.update_libhandle(libhandle)\n", " for i in range(2**n_qubits):\n", " correct_amplitude[i] = np.isclose(state_vector[i], my_ttn.get_amplitude(i))"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["print(\"Are all amplitudes correct?\")\n", "print(all(correct_amplitude))"]}, {"cell_type": "markdown", "metadata": {}, "source": ["## Sampling from a TTN
\n", "Sampling and measurement from a TTN state is not currently supported. This will be added in an upcoming release."]}, {"cell_type": "markdown", "metadata": {}, "source": ["# Approximate simulation
\n", "We provide two policies for approximate simulation:
\n", "* Bound the maximum value of the virtual bond dimension `chi`. If a bond dimension would increase past that point, we *truncate* (i.e. discard) the degrees of freedom that contribute the least to the state description. We can keep track of a lower bound of the error that this truncation causes.
\n", "* Provide a value for acceptable two-qubit gate fidelity `truncation_fidelity`. After each two-qubit gate we truncate the dimension of virtual bonds as much as we can while guaranteeing the target gate fidelity. The more fidelity you require, the longer it will take to simulate. **Note**: this is *not* the final fidelity of the output state, but the fidelity per gate.
\n", "Values for `chi` and `truncation_fidelity` can be set via `Config`. To showcase approximate simulation, let's define a circuit where exact TTN contraction would not be enough."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["circuit = random_graph_circuit(n_qubits=30, edge_prob=0.1, layers=1)"]}, {"cell_type": "markdown", "metadata": {}, "source": ["We can simulate it using bounded `chi` as follows:"]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["start = time()\n", "with CuTensorNetHandle() as libhandle:\n", " config = Config(chi=64, float_precision=np.float32)\n", " bound_chi_ttn = simulate(libhandle, circuit, SimulationAlgorithm.TTNxGate, config)\n", "end = time()\n", "print(\"Time taken by approximate contraction with bounded chi:\")\n", "print(f\"{round(end-start,2)} seconds\")\n", "print(\"\\nLower bound of the fidelity:\")\n", "print(round(bound_chi_ttn.fidelity, 4))"]}, {"cell_type": "markdown", "metadata": {}, "source": ["Alternatively, we can fix `truncation_fidelity` and let the bond dimension increase as necessary to satisfy it."]}, {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": ["start = time()\n", "with CuTensorNetHandle() as libhandle:\n", " config = Config(truncation_fidelity=0.99, float_precision=np.float32)\n", " fixed_fidelity_ttn = simulate(\n", " libhandle, circuit, SimulationAlgorithm.TTNxGate, config\n", " )\n", "end = time()\n", "print(\"Time taken by approximate contraction with fixed truncation fidelity:\")\n", "print(f\"{round(end-start,2)} seconds\")\n", "print(\"\\nLower bound of the fidelity:\")\n", "print(round(fixed_fidelity_ttn.fidelity, 4))"]}, {"cell_type": "markdown", "metadata": {}, "source": ["# Contraction algorithms"]}, {"cell_type": "markdown", "metadata": {}, "source": ["We currently offer only one TTN-based simulation algorithm.
\n", "* **TTNxGate**: Apply gates one by one to the TTN, canonicalising the TTN and truncating when necessary."]}, {"cell_type": "markdown", "metadata": {}, "source": ["# Using the logger"]}, {"cell_type": "markdown", "metadata": {}, "source": ["You can request a verbose log to be produced during simulation, by assigning the `loglevel` argument when creating a `Config` instance. Currently, two log levels are supported (other than default, which is silent):
\n", "- `logging.INFO` will print information about progress percent, memory currently occupied by the TTN and current fidelity. Additionally, some high level information of the current stage of the simulation is provided.
\n", "- `logging.DEBUG` provides all of the messages from the loglevel above plus detailed information of the current operation being carried out and the values of important variables.
\n", "**Note**: Due to technical issues with the `logging` module and Jupyter notebooks we need to reload the `logging` module. When working with python scripts and command line, just doing `import logging` is enough."]}], "metadata": {"kernelspec": {"display_name": "Python 3", "language": "python", "name": "python3"}, "language_info": {"codemirror_mode": {"name": "ipython", "version": 3}, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.4"}}, "nbformat": 4, "nbformat_minor": 2} \ No newline at end of file diff --git a/lint-requirements.txt b/lint-requirements.txt index 3d9b6a8e..8788b2ea 100644 --- a/lint-requirements.txt +++ b/lint-requirements.txt @@ -1,2 +1,2 @@ -black~=23.12 -pylint~=3.0 \ No newline at end of file +black~=24.3 +pylint~=3.1 \ No newline at end of file diff --git a/pytket/extensions/cutensornet/mps/simulation.py b/pytket/extensions/cutensornet/mps/simulation.py deleted file mode 100644 index 79604570..00000000 --- a/pytket/extensions/cutensornet/mps/simulation.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright 2019-2024 Quantinuum -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -## -# http://www.apache.org/licenses/LICENSE-2.0 -## -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from enum import Enum - -from random import choice # type: ignore -from collections import defaultdict # type: ignore -import numpy as np # type: ignore - -from pytket.circuit import Circuit, Command, Qubit -from pytket.transform import Transform -from pytket.architecture import Architecture -from pytket.passes import DefaultMappingPass -from pytket.predicates import CompilationUnit - -from pytket.extensions.cutensornet.general import set_logger -from .mps import CuTensorNetHandle, ConfigMPS, MPS -from .mps_gate import MPSxGate -from .mps_mpo import MPSxMPO - - -class ContractionAlg(Enum): - """An enum to refer to the MPS contraction algorithm. - - Each enum value corresponds to the class with the same name; see its docs for - information of the algorithm. - """ - - MPSxGate = 0 - MPSxMPO = 1 - - -def simulate( - libhandle: CuTensorNetHandle, - circuit: Circuit, - algorithm: ContractionAlg, - config: ConfigMPS, -) -> MPS: - """Simulate the given circuit and return the ``MPS`` representing the final state. - - Note: - A ``libhandle`` should be created via a ``with CuTensorNet() as libhandle:`` - statement. The device where the MPS is stored will match the one specified - by the library handle. - - The input ``circuit`` must be composed of one-qubit and two-qubit gates only. - Any gateset supported by ``pytket`` can be used. - - Two-qubit gates must act between adjacent qubits, i.e. on ``circuit.qubits[i]`` - and ``circuit.qubits[i+1]`` for any ``i``. If this is not satisfied by your - circuit, consider using ``prepare_circuit()`` on it. - - Args: - libhandle: The cuTensorNet library handle that will be used to carry out - tensor operations on the MPS. - circuit: The pytket circuit to be simulated. - algorithm: Choose between the values of the ``ContractionAlg`` enum. - config: The configuration object for simulation. - - Returns: - An instance of ``MPS`` containing (an approximation of) the final state - of the circuit. - """ - logger = set_logger("Simulation", level=config.loglevel) - - if algorithm == ContractionAlg.MPSxGate: - mps = MPSxGate( # type: ignore - libhandle, - circuit.qubits, - config, - ) - - elif algorithm == ContractionAlg.MPSxMPO: - mps = MPSxMPO( # type: ignore - libhandle, - circuit.qubits, - config, - ) - - # Sort the gates so there isn't much overhead from canonicalising back and forth. - logger.info( - "Ordering the gates in the circuit to reduce canonicalisation overhead." - ) - sorted_gates = _get_sorted_gates(circuit) - - logger.info("Running simulation...") - # Apply the gates - for i, g in enumerate(sorted_gates): - mps.apply_gate(g) - logger.info(f"Progress... {(100*i) // len(sorted_gates)}%") - - # Apply the batched operations that are left (if any) - mps._flush() - - # Apply the batched operations that are left (if any) - mps._flush() - - # Apply the circuit's phase to the leftmost tensor (any would work) - mps.tensors[0] = mps.tensors[0] * np.exp(1j * np.pi * circuit.phase) - - logger.info("Simulation completed.") - logger.info(f"Final MPS size={mps.get_byte_size() / 2**20} MiB") - logger.info(f"Final MPS fidelity={mps.fidelity}") - return mps - - -def prepare_circuit(circuit: Circuit) -> tuple[Circuit, dict[Qubit, Qubit]]: - """Prepares a circuit in a specific, ``MPS``-friendly, manner. - - Returns an equivalent circuit with the appropriate structure to be simulated by - an ``MPS`` algorithm. - - Note: - The qubits in the output circuit will be renamed. Implicit SWAPs may be added - to the circuit, meaning that the logical qubit held at the ``node[i]`` qubit - at the beginning of the circuit may differ from the one it holds at the end. - - Args: - circuit: The circuit to be simulated. - - Returns: - A tuple with an equivalent circuit with the appropriate structure and a - map of qubit names at the end of the circuit to their corresponding - original names. - """ - - # Implement it in a line architecture - cu = CompilationUnit(circuit) - architecture = Architecture([(i, i + 1) for i in range(circuit.n_qubits - 1)]) - DefaultMappingPass(architecture).apply(cu) - prep_circ = cu.circuit - Transform.DecomposeBRIDGE().apply(prep_circ) - - qubit_map: dict[Qubit, Qubit] = {} - for orig_q, arch_q in cu.final_map.items(): - assert isinstance(orig_q, Qubit) - assert isinstance(arch_q, Qubit) - qubit_map[arch_q] = orig_q - - return (prep_circ, qubit_map) - - -def _get_sorted_gates(circuit: Circuit) -> list[Command]: - """Sorts the list of gates, placing 2-qubit gates close to each other first. - - Returns an equivalent list of commands fixing the order of parallel gates so that - 2-qubit gates that are close to each other first. This reduces the overhead of - canonicalisation of the MPS, since we try to apply as many gates as we can on one - end of the MPS before we go to the other end. - - Args: - circuit: The original circuit. - - Returns: - The same gates, ordered in a beneficial way. - """ - - all_gates = circuit.get_commands() - sorted_gates = [] - # Keep track of the qubit at the center of the canonical form; start arbitrarily - current_qubit = circuit.qubits[0] - # Entries from `all_gates` that are not yet in `sorted_gates` - remaining = set(range(len(all_gates))) - - # Create the list of indices of gates acting on each qubit - gate_indices: dict[Qubit, list[int]] = defaultdict(list) - for i, g in enumerate(all_gates): - for q in g.qubits: - gate_indices[q].append(i) - # Apply all 1-qubit gates at the beginning of the circuit - for q, indices in gate_indices.items(): - while indices and len(all_gates[indices[0]].qubits) == 1: - i = indices.pop(0) - sorted_gates.append(all_gates[i]) - remaining.remove(i) - # Decide which 2-qubit gate to apply next - while remaining: - q_index = circuit.qubits.index(current_qubit) - # Find distance from q_index to first qubit with an applicable 2-qubit gate - left_distance = None - prev_q = current_qubit - for i, q in enumerate(reversed(circuit.qubits[:q_index])): - if ( - gate_indices[prev_q] - and gate_indices[q] - and gate_indices[prev_q][0] == gate_indices[q][0] - ): - left_distance = i - break - prev_q = q - right_distance = None - prev_q = current_qubit - for i, q in enumerate(circuit.qubits[q_index + 1 :]): - if ( - gate_indices[prev_q] - and gate_indices[q] - and gate_indices[prev_q][0] == gate_indices[q][0] - ): - right_distance = i - break - prev_q = q - # Choose the shortest distance - if left_distance is None and right_distance is None: - raise RuntimeError( - "Some two-qubit gate in the circuit is not acting between", - "nearest neighbour qubits. Consider using prepare_circuit().", - ) - elif left_distance is None: - assert right_distance is not None - current_qubit = circuit.qubits[q_index + right_distance] - elif right_distance is None: - current_qubit = circuit.qubits[q_index - left_distance] - elif left_distance < right_distance: - current_qubit = circuit.qubits[q_index - left_distance] - elif left_distance > right_distance: - current_qubit = circuit.qubits[q_index + right_distance] - else: - current_qubit = circuit.qubits[ - q_index + choice([-left_distance, right_distance]) - ] - # Apply the gate - i = gate_indices[current_qubit][0] - next_gate = all_gates[i] - sorted_gates.append(next_gate) - remaining.remove(i) - # Apply all 1-qubit gates after this gate - for q in next_gate.qubits: - gate_indices[q].pop(0) # Remove the 2-qubit gate `next_gate` - indices = gate_indices[q] - while indices and len(all_gates[indices[0]].qubits) == 1: - i = indices.pop(0) - sorted_gates.append(all_gates[i]) - remaining.remove(i) - - assert len(all_gates) == len(sorted_gates) - return sorted_gates diff --git a/pytket/extensions/cutensornet/mps/__init__.py b/pytket/extensions/cutensornet/structured_state/__init__.py similarity index 52% rename from pytket/extensions/cutensornet/mps/__init__.py rename to pytket/extensions/cutensornet/structured_state/__init__.py index ccc950c9..be9dd9f5 100644 --- a/pytket/extensions/cutensornet/mps/__init__.py +++ b/pytket/extensions/cutensornet/structured_state/__init__.py @@ -11,27 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Module for circuit simulation by state evolution, with states represented as -Matrix Product States (MPS). Approximate tensor network contraction is supported. -For an example of its use, see ``examples/mps_tutorial.ipynb`` in +"""Module for circuit simulation by state evolution, where the state is +represented by a tensor network with a predefined structure. +Approximate tensor network contraction is supported. Both ``MPS`` and ``TTN`` +methods are provided. +For an example of its use, see the ``examples/`` folder at https://github.com/CQCL/pytket-cutensornet. """ -from .mps import ( - CuTensorNetHandle, - DirectionMPS, - ConfigMPS, - Handle, - Tensor, - MPS, -) +from .general import CuTensorNetHandle, Config, StructuredState +from .simulation import SimulationAlgorithm, simulate, prepare_circuit_mps -from .mps_gate import ( - MPSxGate, -) +from .mps import DirMPS, MPS +from .mps_gate import MPSxGate +from .mps_mpo import MPSxMPO -from .mps_mpo import ( - MPSxMPO, -) - -from .simulation import ContractionAlg, simulate, prepare_circuit +from .ttn import TTN, DirTTN +from .ttn_gate import TTNxGate diff --git a/pytket/extensions/cutensornet/structured_state/cut_rKaHyPar_sea20.ini b/pytket/extensions/cutensornet/structured_state/cut_rKaHyPar_sea20.ini new file mode 100644 index 00000000..14d10f58 --- /dev/null +++ b/pytket/extensions/cutensornet/structured_state/cut_rKaHyPar_sea20.ini @@ -0,0 +1,64 @@ +# general +mode=recursive +objective=cut +seed=-1 +cmaxnet=-1 +vcycles=0 +# main -> preprocessing -> min hash sparsifier +p-use-sparsifier=true +p-sparsifier-min-median-he-size=28 +p-sparsifier-max-hyperedge-size=1200 +p-sparsifier-max-cluster-size=10 +p-sparsifier-min-cluster-size=2 +p-sparsifier-num-hash-func=5 +p-sparsifier-combined-num-hash-func=100 +# main -> preprocessing -> community detection +p-detect-communities=true +p-detect-communities-in-ip=false +p-reuse-communities=false +p-max-louvain-pass-iterations=100 +p-min-eps-improvement=0.0001 +p-louvain-edge-weight=hybrid +p-large-he-threshold=1000 +# main -> preprocessing -> large he removal +p-smallest-maxnet-threshold=50000 +p-maxnet-removal-factor=0.01 +# main -> coarsening +c-type=heavy_lazy +c-s=3.25 +c-t=160 +# main -> coarsening -> rating +c-rating-score=heavy_edge +c-rating-use-communities=true +c-rating-heavy_node_penalty=multiplicative +c-rating-acceptance-criterion=best +c-fixed-vertex-acceptance-criterion=free_vertex_only +# main -> initial partitioning +i-mode=direct +i-technique=flat +# initial partitioning -> initial partitioning +i-algo=pool +i-runs=20 +# initial partitioning -> bin packing +i-bp-algorithm=worst_fit +i-bp-heuristic-prepacking=false +i-bp-early-restart=true +i-bp-late-restart=true +# initial partitioning -> local search +i-r-type=twoway_fm +i-r-runs=-1 +i-r-fm-stop=simple +i-r-fm-stop-i=50 +# main -> local search +r-type=twoway_fm_hyperflow_cutter +r-runs=-1 +r-fm-stop=adaptive_opt +r-fm-stop-alpha=1 +r-fm-stop-i=350 +# local_search -> flow scheduling and heuristics +r-flow-execution-policy=exponential +# local_search -> hyperflowcutter configuration +r-hfc-size-constraint=mf-style +r-hfc-scaling=16 +r-hfc-distance-based-piercing=true +r-hfc-mbc=true diff --git a/pytket/extensions/cutensornet/structured_state/general.py b/pytket/extensions/cutensornet/structured_state/general.py new file mode 100644 index 00000000..0d07c10e --- /dev/null +++ b/pytket/extensions/cutensornet/structured_state/general.py @@ -0,0 +1,402 @@ +# Copyright 2019-2024 Quantinuum +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +## +# http://www.apache.org/licenses/LICENSE-2.0 +## +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations # type: ignore +from abc import ABC, abstractmethod +import warnings +import logging +from typing import Any, Optional, Type + +import numpy as np # type: ignore + +from pytket.circuit import Command, Qubit +from pytket.pauli import QubitPauliString + +try: + import cupy as cp # type: ignore +except ImportError: + warnings.warn("local settings failed to import cupy", ImportWarning) +try: + import cuquantum.cutensornet as cutn # type: ignore +except ImportError: + warnings.warn("local settings failed to import cutensornet", ImportWarning) + + +# An alias for the CuPy type used for tensors +try: + Tensor = cp.ndarray +except NameError: + Tensor = Any + + +class CuTensorNetHandle: + """Initialise the cuTensorNet library with automatic workspace memory + management. + + Note: + Always use as ``with CuTensorNetHandle() as libhandle:`` so that cuTensorNet + handles are automatically destroyed at the end of execution. + + Attributes: + handle (int): The cuTensorNet library handle created by this initialisation. + device_id (int): The ID of the device (GPU) where cuTensorNet is initialised. + If not provided, defaults to ``cp.cuda.Device()``. + """ + + def __init__(self, device_id: Optional[int] = None): + self._is_destroyed = False + + # Make sure CuPy uses the specified device + cp.cuda.Device(device_id).use() + + dev = cp.cuda.Device() + self.device_id = int(dev) + + self.handle = cutn.create() + + def __enter__(self) -> CuTensorNetHandle: + return self + + def __exit__(self, exc_type: Any, exc_value: Any, exc_tb: Any) -> None: + cutn.destroy(self.handle) + self._is_destroyed = True + + +class Config: + """Configuration class for simulation using ``StructuredState``.""" + + def __init__( + self, + chi: Optional[int] = None, + truncation_fidelity: Optional[float] = None, + float_precision: Type[Any] = np.float64, + value_of_zero: float = 1e-16, + leaf_size: int = 8, + k: int = 4, + optim_delta: float = 1e-5, + loglevel: int = logging.WARNING, + ): + """Instantiate a configuration object for ``StructuredState`` simulation. + + Note: + Providing both a custom ``chi`` and ``truncation_fidelity`` will raise an + exception. Choose one or the other (or neither, for exact simulation). + + Args: + chi: The maximum value allowed for the dimension of the virtual + bonds. Higher implies better approximation but more + computational resources. If not provided, ``chi`` will be unbounded. + truncation_fidelity: Every time a two-qubit gate is applied, the virtual + bond will be truncated to the minimum dimension that satisfies + ``||^2 >= trucantion_fidelity``, where ``|psi>`` and ``|phi>`` + are the states before and after truncation (both normalised). + If not provided, it will default to its maximum value 1. + float_precision: The floating point precision used in tensor calculations; + choose from ``numpy`` types: ``np.float64`` or ``np.float32``. + Complex numbers are represented using two of such + ``float`` numbers. Default is ``np.float64``. + value_of_zero: Any number below this value will be considered equal to zero. + Even when no ``chi`` or ``truncation_fidelity`` is provided, singular + values below this number will be truncated. + We suggest to use a value slightly below what your chosen + ``float_precision`` can reasonably achieve. For instance, ``1e-16`` for + ``np.float64`` precision (default) and ``1e-7`` for ``np.float32``. + leaf_size: For ``TTN`` simulation only. Sets the maximum number of + qubits in a leaf node when using ``TTN``. Default is 8. + k: For ``MPSxMPO`` simulation only. Sets the maximum number of layers + the MPO is allowed to have before being contracted. Increasing this + might increase fidelity, but it will also increase resource requirements + exponentially. Default value is 4. + optim_delta: For ``MPSxMPO`` simulation only. Sets the stopping criteria for + the optimisation when contracting the ``k`` layers of MPO. Stops when + the increase of fidelity between iterations is smaller than this value. + Default value is ``1e-5``. + loglevel: Internal logger output level. Use 30 for warnings only, 20 for + verbose and 10 for debug mode. + + Raises: + ValueError: If both ``chi`` and ``truncation_fidelity`` are fixed. + ValueError: If the value of ``chi`` is set below 2. + ValueError: If the value of ``truncation_fidelity`` is not in [0,1]. + """ + _CHI_LIMIT = 2**60 + if ( + chi is not None + and chi < _CHI_LIMIT + and truncation_fidelity is not None + and truncation_fidelity != 1.0 + ): + raise ValueError("Cannot fix both chi and truncation_fidelity.") + if chi is None: + chi = _CHI_LIMIT # In practice, this is like having it be unbounded + if truncation_fidelity is None: + truncation_fidelity = 1 + + if chi < 2: + raise ValueError("The max virtual bond dim (chi) must be >= 2.") + if truncation_fidelity < 0 or truncation_fidelity > 1: + raise ValueError("Provide a value of truncation_fidelity in [0,1].") + + self.chi = chi + self.truncation_fidelity = truncation_fidelity + + if float_precision is None or float_precision == np.float64: # Double precision + self._real_t = np.float64 # type: ignore + self._complex_t = np.complex128 # type: ignore + self._atol = 1e-12 + elif float_precision == np.float32: # Single precision + self._real_t = np.float32 # type: ignore + self._complex_t = np.complex64 # type: ignore + self._atol = 1e-4 + else: + allowed_precisions = [np.float64, np.float32] + raise TypeError( + f"Value of float_precision must be in {allowed_precisions}." + ) + self.zero = value_of_zero + + if value_of_zero > self._atol / 1000: + warnings.warn( + "Your chosen value_of_zero is relatively large. " + "Faithfulness of final fidelity estimate is not guaranteed.", + UserWarning, + ) + + if leaf_size >= 65: # Imposed to avoid bond ID collisions + # More than 20 qubits is already unreasonable for a leaf anyway + raise ValueError("Maximum allowed leaf_size is 65.") + + self.leaf_size = leaf_size + self.k = k + self.optim_delta = 1e-5 + self.loglevel = loglevel + + def copy(self) -> Config: + """Standard copy of the contents.""" + return Config( + chi=self.chi, + truncation_fidelity=self.truncation_fidelity, + float_precision=self._real_t, # type: ignore + value_of_zero=self.zero, + leaf_size=self.leaf_size, + k=self.k, + optim_delta=self.optim_delta, + loglevel=self.loglevel, + ) + + +class StructuredState(ABC): + """Class representing a Tensor Network state.""" + + @abstractmethod + def is_valid(self) -> bool: + """Verify that the tensor network state is valid. + + Returns: + False if a violation was detected or True otherwise. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def apply_gate(self, gate: Command) -> StructuredState: + """Applies the gate to the StructuredState. + + Args: + gate: The gate to be applied. + + Returns: + ``self``, to allow for method chaining. + + Raises: + RuntimeError: If the ``CuTensorNetHandle`` is out of scope. + RuntimeError: If gate is not supported. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def apply_scalar(self, scalar: complex) -> StructuredState: + """Multiplies the state by a complex number. + + Args: + scalar: The complex number to be multiplied. + + Returns: + ``self``, to allow for method chaining. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def vdot(self, other: StructuredState) -> complex: + """Obtain the inner product of the two states: ````. + + It can be used to compute the squared norm of a state ``state`` as + ``state.vdot(state)``. The tensors within the state are not modified. + + Note: + The state that is conjugated is ``self``. + + Args: + other: The other ``StructuredState``. + + Returns: + The resulting complex number. + + Raises: + RuntimeError: If the two states do not have the same qubits. + RuntimeError: If the ``CuTensorNetHandle`` is out of scope. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def sample(self) -> dict[Qubit, int]: + """Returns a sample from a Z measurement applied on every qubit. + + Notes: + The contents of ``self`` are not updated. This is equivalent to applying + ``state = self.copy()`` then ``state.measure(state.get_qubits())``. + + Returns: + A dictionary mapping each qubit in the state to its 0 or 1 outcome. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def measure(self, qubits: set[Qubit]) -> dict[Qubit, int]: + """Applies a Z measurement on ``qubits``, updates the state and returns outcome. + + Notes: + After applying this function, ``self`` will contain the projected + state over the non-measured qubits. + + The resulting state has been normalised. + + Args: + qubits: The subset of qubits to be measured. + + Returns: + A dictionary mapping the given ``qubits`` to their measurement outcome, + i.e. either ``0`` or ``1``. + + Raises: + ValueError: If an element in ``qubits`` is not a qubit in the state. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def postselect(self, qubit_outcomes: dict[Qubit, int]) -> float: + """Applies a postselection, updates the states and returns its probability. + + Notes: + After applying this function, ``self`` will contain the projected + state over the non-postselected qubits. + + The resulting state has been normalised. + + Args: + qubit_outcomes: A dictionary mapping a subset of qubits to their + desired outcome value (either ``0`` or ``1``). + + Returns: + The probability of this postselection to occur in a measurement. + + Raises: + ValueError: If a key in ``qubit_outcomes`` is not a qubit in the state. + ValueError: If a value in ``qubit_outcomes`` is other than ``0`` or ``1``. + ValueError: If all of the qubits in the state are being postselected. + Instead, you may wish to use ``get_amplitude()``. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def expectation_value(self, pauli_string: QubitPauliString) -> float: + """Obtains the expectation value of the Pauli string observable. + + Args: + pauli_string: A pytket object representing a tensor product of Paulis. + + Returns: + The expectation value. + + Raises: + ValueError: If a key in ``pauli_string`` is not a qubit in the state. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def get_fidelity(self) -> float: + """Returns the current fidelity of the state.""" + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def get_statevector(self) -> np.ndarray: + """Returns the statevector with qubits in Increasing Lexicographic Order (ILO). + + Raises: + ValueError: If there are no qubits left in the state. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def get_amplitude(self, state: int) -> complex: + """Returns the amplitude of the chosen computational state. + + Notes: + The result is equivalent to ``state.get_statevector[b]``, but this method + is faster when querying a single amplitude (or just a few). + + Args: + state: The integer whose bitstring describes the computational state. + The qubits in the bitstring are in increasing lexicographic order. + + Returns: + The amplitude of the computational state in ``self``. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def get_qubits(self) -> set[Qubit]: + """Returns the set of qubits that ``self`` is defined on.""" + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def get_byte_size(self) -> int: + """Returns the number of bytes ``self`` currently occupies in GPU memory.""" + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def get_device_id(self) -> int: + """Returns the identifier of the device (GPU) where the tensors are stored.""" + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def update_libhandle(self, libhandle: CuTensorNetHandle) -> None: + """Update the ``CuTensorNetHandle`` used by ``self``. Multiple + objects may use the same handle. + + Args: + libhandle: The new cuTensorNet library handle. + + Raises: + RuntimeError: If the device (GPU) where ``libhandle`` was initialised + does not match the one where the tensors of ``self`` are stored. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def copy(self) -> StructuredState: + """Returns a deep copy of ``self`` on the same device.""" + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def _flush(self) -> None: + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") diff --git a/pytket/extensions/cutensornet/mps/mps.py b/pytket/extensions/cutensornet/structured_state/mps.py similarity index 79% rename from pytket/extensions/cutensornet/mps/mps.py rename to pytket/extensions/cutensornet/structured_state/mps.py index 6f072241..83329f25 100644 --- a/pytket/extensions/cutensornet/mps/mps.py +++ b/pytket/extensions/cutensornet/structured_state/mps.py @@ -13,8 +13,7 @@ # limitations under the License. from __future__ import annotations # type: ignore import warnings -import logging -from typing import Any, Optional, Union +from typing import Union from enum import Enum from random import random # type: ignore @@ -26,7 +25,6 @@ warnings.warn("local settings failed to import cupy", ImportWarning) try: import cuquantum as cq # type: ignore - import cuquantum.cutensornet as cutn # type: ignore from cuquantum.cutensornet import tensor # type: ignore except ImportError: warnings.warn("local settings failed to import cutensornet", ImportWarning) @@ -36,168 +34,17 @@ from pytket.extensions.cutensornet.general import set_logger -# An alias so that `intptr_t` from CuQuantum's API (which is not available in -# base python) has some meaningful type name. -Handle = int -# An alias for the CuPy type used for tensors -try: - Tensor = cp.ndarray -except NameError: - Tensor = Any +from .general import CuTensorNetHandle, Config, StructuredState, Tensor -class DirectionMPS(Enum): +class DirMPS(Enum): """An enum to refer to relative directions within the MPS.""" LEFT = 0 RIGHT = 1 -class CuTensorNetHandle: - """Initialise the cuTensorNet library with automatic workspace memory - management. - - Note: - Always use as ``with CuTensorNetHandle() as libhandle:`` so that cuTensorNet - handles are automatically destroyed at the end of execution. - - Attributes: - handle (int): The cuTensorNet library handle created by this initialisation. - device_id (int): The ID of the device (GPU) where cuTensorNet is initialised. - If not provided, defaults to ``cp.cuda.Device()``. - """ - - def __init__(self, device_id: Optional[int] = None): - self.handle = cutn.create() - self._is_destroyed = False - - # Make sure CuPy uses the specified device - cp.cuda.Device(device_id).use() - - dev = cp.cuda.Device() - self.device_id = int(dev) - - def __enter__(self) -> CuTensorNetHandle: - return self - - def __exit__(self, exc_type: Any, exc_value: Any, exc_tb: Any) -> None: - cutn.destroy(self.handle) - self._is_destroyed = True - - -class ConfigMPS: - """Configuration class for simulation using MPS.""" - - def __init__( - self, - chi: Optional[int] = None, - truncation_fidelity: Optional[float] = None, - k: int = 4, - optim_delta: float = 1e-5, - float_precision: Union[np.float32, np.float64] = np.float64, # type: ignore - value_of_zero: float = 1e-16, - loglevel: int = logging.WARNING, - ): - """Instantiate a configuration object for MPS simulation. - - Note: - Providing both a custom ``chi`` and ``truncation_fidelity`` will raise an - exception. Choose one or the other (or neither, for exact simulation). - - Args: - chi: The maximum value allowed for the dimension of the virtual - bonds. Higher implies better approximation but more - computational resources. If not provided, ``chi`` will be unbounded. - truncation_fidelity: Every time a two-qubit gate is applied, the virtual - bond will be truncated to the minimum dimension that satisfies - ``||^2 >= trucantion_fidelity``, where ``|psi>`` and ``|phi>`` - are the states before and after truncation (both normalised). - If not provided, it will default to its maximum value 1. - k: If using MPSxMPO, the maximum number of layers the MPO is allowed to - have before being contracted. Increasing this might increase fidelity, - but it will also increase resource requirements exponentially. - Ignored if not using MPSxMPO. Default value is 4. - optim_delta: If using MPSxMPO, stopping criteria for the optimisation when - contracting the ``k`` layers of MPO. Stops when the increase of fidelity - between iterations is smaller than ``optim_delta``. - Ignored if not using MPSxMPO. Default value is ``1e-5``. - float_precision: The floating point precision used in tensor calculations; - choose from ``numpy`` types: ``np.float64`` or ``np.float32``. - Complex numbers are represented using two of such - ``float`` numbers. Default is ``np.float64``. - value_of_zero: Any number below this value will be considered equal to zero. - Even when no ``chi`` or ``truncation_fidelity`` is provided, singular - values below this number will be truncated. - We suggest to use a value slightly below what your chosen - ``float_precision`` can reasonably achieve. For instance, ``1e-16`` for - ``np.float64`` precision (default) and ``1e-7`` for ``np.float32``. - loglevel: Internal logger output level. Use 30 for warnings only, 20 for - verbose and 10 for debug mode. - - Raises: - ValueError: If both ``chi`` and ``truncation_fidelity`` are fixed. - ValueError: If the value of ``chi`` is set below 2. - ValueError: If the value of ``truncation_fidelity`` is not in [0,1]. - """ - if ( - chi is not None - and truncation_fidelity is not None - and truncation_fidelity != 1.0 - ): - raise ValueError("Cannot fix both chi and truncation_fidelity.") - if chi is None: - chi = 2**60 # In practice, this is like having it be unbounded - if truncation_fidelity is None: - truncation_fidelity = 1 - - if chi < 2: - raise ValueError("The max virtual bond dim (chi) must be >= 2.") - if truncation_fidelity < 0 or truncation_fidelity > 1: - raise ValueError("Provide a value of truncation_fidelity in [0,1].") - - self.chi = chi - self.truncation_fidelity = truncation_fidelity - - if float_precision is None or float_precision == np.float64: # Double precision - self._real_t = np.float64 # type: ignore - self._complex_t = np.complex128 # type: ignore - self._atol = 1e-12 - elif float_precision == np.float32: # Single precision - self._real_t = np.float32 # type: ignore - self._complex_t = np.complex64 # type: ignore - self._atol = 1e-4 - else: - allowed_precisions = [np.float64, np.float32] - raise TypeError( - f"Value of float_precision must be in {allowed_precisions}." - ) - self.zero = value_of_zero - - if value_of_zero > self._atol / 1000: - warnings.warn( - "Your chosen value_of_zero is relatively large. " - "Faithfulness of final fidelity estimate is not guaranteed.", - UserWarning, - ) - - self.k = k - self.optim_delta = optim_delta - self.loglevel = loglevel - - def copy(self) -> ConfigMPS: - """Standard copy of the contents.""" - return ConfigMPS( - chi=self.chi, - truncation_fidelity=self.truncation_fidelity, - k=self.k, - optim_delta=self.optim_delta, - float_precision=self._real_t, # type: ignore - value_of_zero=self.zero, - loglevel=self.loglevel, - ) - - -class MPS: +class MPS(StructuredState): """Represents a state as a Matrix Product State. Attributes: @@ -206,7 +53,7 @@ class MPS: and ``tensors[i+1]`` are connected in the MPS via a bond. All of the tensors are rank three, with the dimensions listed in ``.shape`` matching the left, right and physical bonds, in that order. - canonical_form (dict[int, Optional[DirectionMPS]]): A dictionary mapping + canonical_form (dict[int, Optional[DirMPS]]): A dictionary mapping positions to the canonical form direction of the corresponding tensor, or ``None`` if it the tensor is not canonicalised. qubit_position (dict[pytket.circuit.Qubit, int]): A dictionary mapping circuit @@ -221,9 +68,9 @@ def __init__( self, libhandle: CuTensorNetHandle, qubits: list[Qubit], - config: ConfigMPS, + config: Config, ): - """Initialise an MPS on the computational state ``|0>``. + """Initialise an MPS on the computational state ``|0>`` Note: A ``libhandle`` should be created via a ``with CuTensorNet() as libhandle:`` @@ -246,24 +93,24 @@ def __init__( n_tensors = len(qubits) if n_tensors == 0: # There's no initialisation to be done - return None + pass elif n_tensors == 1: raise ValueError("Please, provide at least two qubits.") + else: + self.qubit_position = {q: i for i, q in enumerate(qubits)} - self.qubit_position = {q: i for i, q in enumerate(qubits)} - - # Create the list of tensors - self.tensors = [] - self.canonical_form = {i: None for i in range(n_tensors)} + # Create the list of tensors + self.tensors: list[Tensor] = [] + self.canonical_form = {i: None for i in range(n_tensors)} - # Append each of the tensors initialised in state |0> - m_shape = (1, 1, 2) # Two virtual bonds (dim=1) and one physical - for i in range(n_tensors): - m_tensor = cp.empty(m_shape, dtype=self._cfg._complex_t) - # Initialise the tensor to ket 0 - m_tensor[0][0][0] = 1 - m_tensor[0][0][1] = 0 - self.tensors.append(m_tensor) + # Append each of the tensors initialised in state |0> + m_shape = (1, 1, 2) # Two virtual bonds (dim=1) and one physical + for i in range(n_tensors): + m_tensor = cp.empty(m_shape, dtype=self._cfg._complex_t) + # Initialise the tensor to ket 0 + m_tensor[0][0][0] = 1 + m_tensor[0][0][1] = 0 + self.tensors.append(m_tensor) def is_valid(self) -> bool: """Verify that the MPS object is valid. @@ -358,6 +205,18 @@ def apply_gate(self, gate: Command) -> MPS: return self + def apply_scalar(self, scalar: complex) -> MPS: + """Multiplies the state by a complex number. + + Args: + scalar: The complex number to be multiplied. + + Returns: + ``self``, to allow for method chaining. + """ + self.tensors[0] *= scalar + return self + def canonicalise(self, l_pos: int, r_pos: int) -> None: """Canonicalises the MPS object. @@ -374,13 +233,13 @@ def canonicalise(self, l_pos: int, r_pos: int) -> None: self._logger.debug(f"Start canonicalisation... l_pos={l_pos}, r_pos={r_pos}") for pos in range(l_pos): - self.canonicalise_tensor(pos, form=DirectionMPS.LEFT) + self.canonicalise_tensor(pos, form=DirMPS.LEFT) for pos in reversed(range(r_pos + 1, len(self))): - self.canonicalise_tensor(pos, form=DirectionMPS.RIGHT) + self.canonicalise_tensor(pos, form=DirMPS.RIGHT) self._logger.debug(f"Finished canonicalisation.") - def canonicalise_tensor(self, pos: int, form: DirectionMPS) -> None: + def canonicalise_tensor(self, pos: int, form: DirMPS) -> None: """Canonicalises a tensor from an MPS object. Applies the necessary gauge transformations so that the tensor at @@ -393,7 +252,7 @@ def canonicalise_tensor(self, pos: int, form: DirectionMPS) -> None: connected to its left bond and physical bond. Similarly for RIGHT. Raises: - ValueError: If ``form`` is not a value in ``DirectionMPS``. + ValueError: If ``form`` is not a value in ``DirMPS``. RuntimeError: If the ``CuTensorNetHandle`` is out of scope. """ if form == self.canonical_form[pos]: @@ -419,7 +278,7 @@ def canonicalise_tensor(self, pos: int, form: DirectionMPS) -> None: T = self.tensors[pos] # Assign the bond IDs - if form == DirectionMPS.LEFT: + if form == DirMPS.LEFT: next_pos = pos + 1 Tnext = self.tensors[next_pos] T_bonds = "vsp" @@ -427,7 +286,7 @@ def canonicalise_tensor(self, pos: int, form: DirectionMPS) -> None: R_bonds = "as" Tnext_bonds = "sVP" result_bonds = "aVP" - elif form == DirectionMPS.RIGHT: + elif form == DirMPS.RIGHT: next_pos = pos - 1 Tnext = self.tensors[next_pos] T_bonds = "svp" @@ -436,7 +295,7 @@ def canonicalise_tensor(self, pos: int, form: DirectionMPS) -> None: Tnext_bonds = "VsP" result_bonds = "VaP" else: - raise ValueError("Argument form must be a value in DirectionMPS.") + raise ValueError("Argument form must be a value in DirMPS.") # Apply QR decomposition self._logger.debug(f"QR decompose a {T.nbytes / 2**20} MiB tensor.") @@ -465,7 +324,7 @@ def canonicalise_tensor(self, pos: int, form: DirectionMPS) -> None: self.tensors[next_pos] = result self.canonical_form[next_pos] = None - def vdot(self, other: MPS) -> complex: + def vdot(self, other: MPS) -> complex: # type: ignore """Obtain the inner product of the two MPS: ````. It can be used to compute the squared norm of an MPS ``mps`` as @@ -475,7 +334,7 @@ def vdot(self, other: MPS) -> complex: The state that is conjugated is ``self``. Args: - other: The other MPS to compare against. + other: The other MPS. Returns: The resulting complex number. @@ -820,6 +679,10 @@ def expectation_value(self, pauli_string: QubitPauliString) -> float: self._logger.debug(f"Expectation value is {value.real}.") return value.real + def get_fidelity(self) -> float: + """Returns the current fidelity of the state.""" + return self.fidelity + def get_statevector(self) -> np.ndarray: """Returns the statevector with qubits in Increasing Lexicographic Order (ILO). diff --git a/pytket/extensions/cutensornet/mps/mps_gate.py b/pytket/extensions/cutensornet/structured_state/mps_gate.py similarity index 87% rename from pytket/extensions/cutensornet/mps/mps_gate.py rename to pytket/extensions/cutensornet/structured_state/mps_gate.py index 06864530..8c57f868 100644 --- a/pytket/extensions/cutensornet/mps/mps_gate.py +++ b/pytket/extensions/cutensornet/structured_state/mps_gate.py @@ -96,27 +96,16 @@ def _apply_2q_gate(self, positions: tuple[int, int], gate: Op) -> MPSxGate: l_pos = min(positions) r_pos = max(positions) + # Always canonicalise. Even in the case of exact simulation (no truncation) + # canonicalisation may reduce the bond dimension (thanks to reduced QR). + self.canonicalise(l_pos, r_pos) + # Figure out the new dimension of the shared virtual bond new_dim = 2 * min( self.get_virtual_dimensions(l_pos)[0], self.get_virtual_dimensions(r_pos)[1], ) - # Canonicalisation may be required if `new_dim` is larger than `chi` - # or if set by `truncation_fidelity` - if new_dim > self._cfg.chi or self._cfg.truncation_fidelity < 1: - # If truncation required, convert to canonical form before - # contracting. Avoids the need to apply gauge transformations - # to the larger tensor resulting from the contraction. - self.canonicalise(l_pos, r_pos) - - # Since canonicalisation may change the dimension of the bonds, - # we need to recalculate the value of `new_dim` - new_dim = 2 * min( - self.get_virtual_dimensions(l_pos)[0], - self.get_virtual_dimensions(r_pos)[1], - ) - # Load the gate's unitary to the GPU memory gate_unitary = gate.get_unitary().astype(dtype=self._cfg._complex_t, copy=False) gate_tensor = cp.asarray(gate_unitary, dtype=self._cfg._complex_t) @@ -129,6 +118,7 @@ def _apply_2q_gate(self, positions: tuple[int, int], gate: Op) -> MPSxGate: # r -> physical bond of the right tensor in the MPS # L -> left bond of the outcome of the gate # R -> right bond of the outcome of the gate + # S -> shared bond of the gate tensor's SVD # a,b,c -> the virtual bonds of the tensors if l_pos == positions[0]: @@ -136,19 +126,27 @@ def _apply_2q_gate(self, positions: tuple[int, int], gate: Op) -> MPSxGate: else: # Implicit swap gate_bonds = "RLrl" - left_bonds = "abl" - right_bonds = "bcr" - result_bonds = "acLR" + # Apply SVD on the gate tensor to remove any zero singular values ASAP + svd_method = tensor.SVDMethod( + abs_cutoff=self._cfg.zero, + partition="U", # Contract S directly into U + ) + # Apply the SVD decomposition using the configuration defined above + U, S, V = tensor.decompose( + f"{gate_bonds}->SLl,SRr", gate_tensor, method=svd_method, options=options + ) + assert S is None # Due to "partition" option in SVDMethod # Contract self._logger.debug("Contracting the two-qubit gate with its site tensors...") T = cq.contract( - gate_bonds + "," + left_bonds + "," + right_bonds + "->" + result_bonds, - gate_tensor, + f"SLl,abl,SRr,bcr->acLR", + U, self.tensors[l_pos], + V, self.tensors[r_pos], options=options, - optimize={"path": [(0, 1), (0, 1)]}, + optimize={"path": [(0, 1), (0, 1), (0, 1)]}, ) self._logger.debug(f"Intermediate tensor of size (MiB)={T.nbytes / 2**20}") @@ -187,7 +185,7 @@ def _apply_2q_gate(self, positions: tuple[int, int], gate: Op) -> MPSxGate: # remove any singular values below ``self._cfg.zero``. self._logger.debug(f"Truncating singular values below={self._cfg.zero}.") if self._cfg.zero > self._cfg._atol / 1000: - self._logger.info( # This was raised as a warning in ConfigMPS already + self._logger.info( # This was raised as a warning in Config already "Your chosen value_of_zero is relatively large. " "Faithfulness of final fidelity estimate is not guaranteed." ) diff --git a/pytket/extensions/cutensornet/mps/mps_mpo.py b/pytket/extensions/cutensornet/structured_state/mps_mpo.py similarity index 89% rename from pytket/extensions/cutensornet/mps/mps_mpo.py rename to pytket/extensions/cutensornet/structured_state/mps_mpo.py index 6e1fa49a..192d2566 100644 --- a/pytket/extensions/cutensornet/mps/mps_mpo.py +++ b/pytket/extensions/cutensornet/structured_state/mps_mpo.py @@ -29,11 +29,9 @@ warnings.warn("local settings failed to import cutensornet", ImportWarning) from pytket.circuit import Op, Qubit +from .general import CuTensorNetHandle, Tensor, Config from .mps import ( - CuTensorNetHandle, - DirectionMPS, - ConfigMPS, - Tensor, + DirMPS, MPS, ) from .mps_gate import MPSxGate @@ -49,7 +47,7 @@ def __init__( self, libhandle: CuTensorNetHandle, qubits: list[Qubit], - config: ConfigMPS, + config: Config, ): """Initialise an MPS on the computational state ``|0>``. @@ -281,7 +279,7 @@ def _get_physical_bond(self, position: int) -> int: else: return self._new_bond_id() - def _get_column_bonds(self, position: int, direction: DirectionMPS) -> list[int]: + def _get_column_bonds(self, position: int, direction: DirMPS) -> list[int]: """Returns the unique identifier of all the left (right) virtual bonds of MPO tensors at ``position`` if ``direction`` is ``LEFT`` (``RIGHT``). @@ -290,17 +288,17 @@ def _get_column_bonds(self, position: int, direction: DirectionMPS) -> list[int] Raises: RuntimeError: If ``position`` is out of bounds. - ValueError: If ``direction`` is not a value in ``DirectionMPS``. + ValueError: If ``direction`` is not a value in ``DirMPS``. """ if position < 0 or position >= len(self): raise RuntimeError(f"Position {position} is out of bounds.") - if direction == DirectionMPS.LEFT: + if direction == DirMPS.LEFT: index = 1 # By convention, left bond at index 1 - elif direction == DirectionMPS.RIGHT: + elif direction == DirMPS.RIGHT: index = 2 # By convention, right bond at index 2 else: - raise ValueError("Argument form must be a value in DirectionMPS.") + raise ValueError("Argument form must be a value in DirMPS.") return [b_ids[index] for b_ids in self._bond_ids[position]] @@ -316,9 +314,9 @@ def _flush(self) -> None: l_cached_tensors: list[Tensor] = [] r_cached_tensors: list[Tensor] = [] - def update_sweep_cache(pos: int, direction: DirectionMPS) -> None: + def update_sweep_cache(pos: int, direction: DirMPS) -> None: """Given a position in the MPS and a sweeping direction (see - ``DirectionMPS``), calculate the tensor of the partial contraction + ``DirMPS``), calculate the tensor of the partial contraction of all MPS-MPO-vMPS* columns from ``pos`` towards ``direction``. Update the cache accordingly. Applies canonicalisation on the vMPS tensor before contracting. @@ -326,10 +324,10 @@ def update_sweep_cache(pos: int, direction: DirectionMPS) -> None: self._logger.debug("Updating the sweep cache...") # Canonicalise the tensor at ``pos`` - if direction == DirectionMPS.LEFT: - self._aux_mps.canonicalise_tensor(pos, form=DirectionMPS.RIGHT) - elif direction == DirectionMPS.RIGHT: - self._aux_mps.canonicalise_tensor(pos, form=DirectionMPS.LEFT) + if direction == DirMPS.LEFT: + self._aux_mps.canonicalise_tensor(pos, form=DirMPS.RIGHT) + elif direction == DirMPS.RIGHT: + self._aux_mps.canonicalise_tensor(pos, form=DirMPS.LEFT) # Glossary of bond IDs # p -> the physical bond of the MPS tensor @@ -363,26 +361,26 @@ def update_sweep_cache(pos: int, direction: DirectionMPS) -> None: interleaved_rep.append(mpo_bonds) # Also contract the previous (cached) tensor during the sweep - if direction == DirectionMPS.LEFT: + if direction == DirMPS.LEFT: if pos != len(self) - 1: # Otherwise, there is nothing cached yet interleaved_rep.append(r_cached_tensors[-1]) - r_cached_bonds = self._get_column_bonds(pos + 1, DirectionMPS.LEFT) + r_cached_bonds = self._get_column_bonds(pos + 1, DirMPS.LEFT) interleaved_rep.append(["r", "R"] + r_cached_bonds) - elif direction == DirectionMPS.RIGHT: + elif direction == DirMPS.RIGHT: if pos != 0: # Otherwise, there is nothing cached yet interleaved_rep.append(l_cached_tensors[-1]) - l_cached_bonds = self._get_column_bonds(pos - 1, DirectionMPS.RIGHT) + l_cached_bonds = self._get_column_bonds(pos - 1, DirMPS.RIGHT) interleaved_rep.append(["l", "L"] + l_cached_bonds) # Figure out the ID of the bonds of the contracted tensor - if direction == DirectionMPS.LEFT: + if direction == DirMPS.LEFT: # Take the left bond of each of the MPO tensors - result_bonds = self._get_column_bonds(pos, DirectionMPS.LEFT) + result_bonds = self._get_column_bonds(pos, DirMPS.LEFT) # Take the left virtual bond of both of the MPS interleaved_rep.append(["l", "L"] + result_bonds) - elif direction == DirectionMPS.RIGHT: + elif direction == DirMPS.RIGHT: # Take the right bond of each of the MPO tensors - result_bonds = self._get_column_bonds(pos, DirectionMPS.RIGHT) + result_bonds = self._get_column_bonds(pos, DirMPS.RIGHT) # Take the right virtual bond of both of the MPS interleaved_rep.append(["r", "R"] + result_bonds) @@ -390,11 +388,11 @@ def update_sweep_cache(pos: int, direction: DirectionMPS) -> None: T = cq.contract( *interleaved_rep, options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"samples": 1}, + optimize={"samples": 0}, ) - if direction == DirectionMPS.LEFT: + if direction == DirMPS.LEFT: r_cached_tensors.append(T) - elif direction == DirectionMPS.RIGHT: + elif direction == DirMPS.RIGHT: l_cached_tensors.append(T) self._logger.debug("Completed update of the sweep cache.") @@ -436,12 +434,12 @@ def update_variational_tensor( if left_tensor is not None: interleaved_rep.append(left_tensor) - left_tensor_bonds = self._get_column_bonds(pos - 1, DirectionMPS.RIGHT) + left_tensor_bonds = self._get_column_bonds(pos - 1, DirMPS.RIGHT) interleaved_rep.append(["l", "L"] + left_tensor_bonds) result_bonds[0] = "L" if right_tensor is not None: interleaved_rep.append(right_tensor) - right_tensor_bonds = self._get_column_bonds(pos + 1, DirectionMPS.LEFT) + right_tensor_bonds = self._get_column_bonds(pos + 1, DirMPS.LEFT) interleaved_rep.append(["r", "R"] + right_tensor_bonds) result_bonds[1] = "R" @@ -452,7 +450,7 @@ def update_variational_tensor( F = cq.contract( *interleaved_rep, options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"samples": 1}, + optimize={"samples": 0}, ) # Get the fidelity @@ -483,22 +481,22 @@ def update_variational_tensor( # Begin by doing a sweep towards the left that does not update # the variational tensors, but simply loads up the ``r_cached_tensors`` for pos in reversed(range(1, len(self))): - update_sweep_cache(pos, direction=DirectionMPS.LEFT) + update_sweep_cache(pos, direction=DirMPS.LEFT) prev_fidelity = -1.0 # Dummy value sweep_fidelity = 0.0 # Dummy value # Repeat sweeps until the fidelity converges - sweep_direction = DirectionMPS.RIGHT + sweep_direction = DirMPS.RIGHT while not np.isclose(prev_fidelity, sweep_fidelity, atol=self._cfg.optim_delta): self._logger.info(f"Doing another optimisation sweep...") prev_fidelity = sweep_fidelity - if sweep_direction == DirectionMPS.RIGHT: + if sweep_direction == DirMPS.RIGHT: sweep_fidelity = update_variational_tensor( pos=0, left_tensor=None, right_tensor=r_cached_tensors.pop() ) - update_sweep_cache(pos=0, direction=DirectionMPS.RIGHT) + update_sweep_cache(pos=0, direction=DirMPS.RIGHT) for pos in range(1, len(self) - 1): sweep_fidelity = update_variational_tensor( @@ -506,19 +504,19 @@ def update_variational_tensor( left_tensor=l_cached_tensors[-1], right_tensor=r_cached_tensors.pop(), ) - update_sweep_cache(pos, direction=DirectionMPS.RIGHT) + update_sweep_cache(pos, direction=DirMPS.RIGHT) # The last variational tensor is not updated; # it'll be the first in the next sweep - sweep_direction = DirectionMPS.LEFT + sweep_direction = DirMPS.LEFT - elif sweep_direction == DirectionMPS.LEFT: + elif sweep_direction == DirMPS.LEFT: sweep_fidelity = update_variational_tensor( pos=len(self) - 1, left_tensor=l_cached_tensors.pop(), right_tensor=None, ) - update_sweep_cache(pos=len(self) - 1, direction=DirectionMPS.LEFT) + update_sweep_cache(pos=len(self) - 1, direction=DirMPS.LEFT) for pos in reversed(range(1, len(self) - 1)): sweep_fidelity = update_variational_tensor( @@ -526,11 +524,11 @@ def update_variational_tensor( left_tensor=l_cached_tensors.pop(), right_tensor=r_cached_tensors[-1], ) - update_sweep_cache(pos, direction=DirectionMPS.LEFT) + update_sweep_cache(pos, direction=DirMPS.LEFT) # The last variational tensor is not updated; # it'll be the first in the next sweep - sweep_direction = DirectionMPS.RIGHT + sweep_direction = DirMPS.RIGHT self._logger.info( "Optimisation sweep completed. " diff --git a/pytket/extensions/cutensornet/structured_state/simulation.py b/pytket/extensions/cutensornet/structured_state/simulation.py new file mode 100644 index 00000000..0df64f1d --- /dev/null +++ b/pytket/extensions/cutensornet/structured_state/simulation.py @@ -0,0 +1,408 @@ +# Copyright 2019-2024 Quantinuum +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +## +# http://www.apache.org/licenses/LICENSE-2.0 +## +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional +import warnings +from enum import Enum + +from pathlib import Path +from collections import defaultdict # type: ignore +import numpy as np # type: ignore + +import networkx as nx # type: ignore + +try: + import kahypar # type: ignore +except ImportError: + warnings.warn("local settings failed to import kahypar", ImportWarning) + +from pytket.circuit import Circuit, Command, Qubit +from pytket.transform import Transform +from pytket.architecture import Architecture +from pytket.passes import DefaultMappingPass +from pytket.predicates import CompilationUnit + +from pytket.extensions.cutensornet.general import set_logger +from .general import CuTensorNetHandle, Config, StructuredState +from .mps_gate import MPSxGate +from .mps_mpo import MPSxMPO +from .ttn_gate import TTNxGate + + +class SimulationAlgorithm(Enum): + """An enum to refer to the StructuredState contraction algorithm. + + Each enum value corresponds to the class with the same name; see its docs for + information about the algorithm. + """ + + TTNxGate = 0 + MPSxGate = 1 + MPSxMPO = 2 + + +def simulate( + libhandle: CuTensorNetHandle, + circuit: Circuit, + algorithm: SimulationAlgorithm, + config: Config, +) -> StructuredState: + """Simulates the circuit and returns the ``StructuredState`` of the final state. + + Note: + A ``libhandle`` should be created via a ``with CuTensorNet() as libhandle:`` + statement. The device where the ``StructuredState`` is stored will match the one + specified by the library handle. + + The input ``circuit`` must be composed of one-qubit and two-qubit gates only. + Any gateset supported by ``pytket`` can be used. + + Args: + libhandle: The cuTensorNet library handle that will be used to carry out + tensor operations. + circuit: The pytket circuit to be simulated. + algorithm: Choose between the values of the ``SimulationAlgorithm`` enum. + config: The configuration object for simulation. + + Returns: + An instance of ``StructuredState`` for (an approximation of) the final state + of the circuit. The instance be of the class matching ``algorithm``. + """ + logger = set_logger("Simulation", level=config.loglevel) + + logger.info( + "Ordering the gates in the circuit to reduce canonicalisation overhead." + ) + if algorithm == SimulationAlgorithm.MPSxGate: + state = MPSxGate( # type: ignore + libhandle, + circuit.qubits, + config, + ) + sorted_gates = _get_sorted_gates(circuit, algorithm) + + elif algorithm == SimulationAlgorithm.MPSxMPO: + state = MPSxMPO( # type: ignore + libhandle, + circuit.qubits, + config, + ) + sorted_gates = _get_sorted_gates(circuit, algorithm) + + elif algorithm == SimulationAlgorithm.TTNxGate: + qubit_partition = _get_qubit_partition(circuit, config.leaf_size) + state = TTNxGate( # type: ignore + libhandle, + qubit_partition, + config, + ) + sorted_gates = _get_sorted_gates(circuit, algorithm, qubit_partition) + + logger.info("Running simulation...") + # Apply the gates + for i, g in enumerate(sorted_gates): + state.apply_gate(g) + logger.info(f"Progress... {(100*i) // len(sorted_gates)}%") + + # Apply the batched operations that are left (if any) + state._flush() + + # Apply the circuit's phase to the state + state.apply_scalar(np.exp(1j * np.pi * circuit.phase)) + + logger.info("Simulation completed.") + logger.info(f"Final StructuredState size={state.get_byte_size() / 2**20} MiB") + logger.info(f"Final StructuredState fidelity={state.fidelity}") + return state + + +def prepare_circuit_mps(circuit: Circuit) -> tuple[Circuit, dict[Qubit, Qubit]]: + """Transpiles the circuit for it to be ``MPS``-friendly. + + Returns an equivalent circuit with the appropriate structure to be simulated by + an ``MPS`` algorithm. + + Note: + The qubits in the output circuit will be renamed. Implicit SWAPs may be added + to the circuit, meaning that the logical qubit held at the ``node[i]`` qubit + at the beginning of the circuit may differ from the one it holds at the end. + + Args: + circuit: The circuit to be simulated. + + Returns: + A tuple with an equivalent circuit with the appropriate structure and a + map of qubit names at the end of the circuit to their corresponding + original names. + """ + + # Implement it in a line architecture + cu = CompilationUnit(circuit) + architecture = Architecture([(i, i + 1) for i in range(circuit.n_qubits - 1)]) + DefaultMappingPass(architecture).apply(cu) + prep_circ = cu.circuit + Transform.DecomposeBRIDGE().apply(prep_circ) + + qubit_map: dict[Qubit, Qubit] = {} + for orig_q, arch_q in cu.final_map.items(): + assert isinstance(orig_q, Qubit) + assert isinstance(arch_q, Qubit) + qubit_map[arch_q] = orig_q + + return (prep_circ, qubit_map) + + +def _get_qubit_partition( + circuit: Circuit, max_q_per_leaf: int +) -> dict[int, list[Qubit]]: + """Returns a qubit partition for a TTN. + + Proceeds by recursive bisection of the qubit connectivity graph, so that + qubits that interact with each other less are connected by a common ancestor + closer to the root. + + Args: + circuit: The circuit to be simulated. + max_q_per_leaf: The maximum allowed number of qubits per node leaf + + Returns: + A dictionary describing the partition in the format expected by TTN. + + Raises: + RuntimeError: If gate acts on more than 2 qubits. + """ + + # Scan the circuit and generate the edges of the connectivity graph + edge_weights: dict[tuple[Qubit, Qubit], int] = dict() + for cmd in circuit.get_commands(): + if cmd.op.is_gate(): + if cmd.op.n_qubits == 2: + edge = (min(cmd.qubits), max(cmd.qubits)) + + if edge in edge_weights: + edge_weights[edge] += 1 + else: + edge_weights[edge] = 1 + + elif cmd.op.n_qubits > 2: + raise RuntimeError( + "Gates must act on only 1 or 2 qubits! " + + f"This is not satisfied by {cmd}." + ) + + # Create the connectivity graph in NetworkX + connectivity_graph = nx.Graph() + connectivity_graph.add_nodes_from(circuit.qubits) + for (u, v), weight in edge_weights.items(): + connectivity_graph.add_edge(u, v, weight=weight) + + # Apply balanced bisections until each qubit group is small enough + partition = {0: circuit.qubits} + stop_bisec = False # Do at least one bisection (TTN reqs >1 leaf nodes) + + while not stop_bisec: + old_partition = partition.copy() + for key, group in old_partition.items(): + # Apply the balanced bisection on this group + (groupA, groupB) = _apply_kahypar_bisection( + connectivity_graph.subgraph(group), + ) + # Groups A and B are on the same subtree (key separated by +1) + partition[2 * key] = groupA + partition[2 * key + 1] = groupB + + # Stop if all groups have less than ``max_q_per_leaf`` qubits in them + stop_bisec = all(len(group) <= max_q_per_leaf for group in partition.values()) + + qubit_partition = {key: list(leaf_qubits) for key, leaf_qubits in partition.items()} + return qubit_partition + + +def _apply_kahypar_bisection( + graph: nx.Graph, +) -> tuple[list[Qubit], list[Qubit]]: + """Use KaHyPar to obtain a bisection of the graph. + + Returns: + Two lists, each containing the vertices in either group of the bisection. + """ + vertices = list(graph.nodes) + edges = list(graph.edges) + weight_dict = nx.get_edge_attributes(graph, "weight") + qubit_dict = {q: i for i, q in enumerate(vertices)} + + num_vertices = len(vertices) + num_edges = len(edges) + k = 2 # Number of groups in the partition + epsilon = 0.03 # Imbalance tolerance + + # Special case where the graph has no edges; KaHyPar cannot deal with it + if num_edges == 0: + # Just split the list of vertices in half + return (vertices[: num_vertices // 2], vertices[num_vertices // 2 :]) + + # KaHyPar expects the list of edges to be provided as a continuous set of vertices + # ``edge_stream`` where ``edge_indices`` indicates where each new edge begins + # (the latter is necessary because KaHyPar can accept hyperedges) + edge_stream = [qubit_dict[vertex] for edge in edges for vertex in edge] + edge_indices = [0] + [2 * (i + 1) for i in range(num_edges)] + edge_weights = [weight_dict[edge] for edge in edges] + vertex_weights = [1 for _ in range(num_vertices)] + + hypergraph = kahypar.Hypergraph( + num_vertices, + num_edges, + edge_indices, + edge_stream, + k, + edge_weights, + vertex_weights, + ) + + # Set up the configuration for KaHyPar + context = kahypar.Context() + context.setK(k) + context.setEpsilon(epsilon) + context.suppressOutput(True) + + # Load the default configuration file provided by the KaHyPar devs + ini_file = str(Path(__file__).parent / "cut_rKaHyPar_sea20.ini") + context.loadINIconfiguration(ini_file) + + # Run the partitioner + kahypar.partition(hypergraph, context) + partition_dict = {i: hypergraph.blockID(i) for i in range(hypergraph.numNodes())} + + # Obtain the two groups of qubits from ``partition_dict`` + groupA = [vertices[i] for i, block in partition_dict.items() if block == 0] + groupB = [vertices[i] for i, block in partition_dict.items() if block == 1] + + return (groupA, groupB) + + +def _get_sorted_gates( + circuit: Circuit, + algorithm: SimulationAlgorithm, + qubit_partition: Optional[dict[int, list[Qubit]]] = None, +) -> list[Command]: + """Sorts the list of gates so that there's less canonicalisation during simulation. + + Returns an equivalent list of commands fixing the order of parallel gates so that + 2-qubit gates that are close together are applied one after the other. This reduces + the overhead of canonicalisation during simulation. + + Args: + circuit: The original circuit. + algorithm: The simulation algorithm that will be used on this circuit. + qubit_partition: For TTN simulation algorithms only. A partition of the + qubits in the circuit into disjoint groups, describing the hierarchical + structure of the TTN. + + Returns: + The same gates, ordered in a beneficial way for the given algorithm. + """ + all_gates = circuit.get_commands() + sorted_gates = [] + # Entries from `all_gates` that are not yet in `sorted_gates` + remaining = set(range(len(all_gates))) + + # Do some precomputation depending on the algorithm + if algorithm in [SimulationAlgorithm.TTNxGate]: + if qubit_partition is None: + raise RuntimeError("You must provide a qubit partition!") + + leaf_of_qubit: dict[Qubit, int] = dict() + for leaf, qubits in qubit_partition.items(): + for q in qubits: + leaf_of_qubit[q] = leaf + + elif algorithm in [SimulationAlgorithm.MPSxGate, SimulationAlgorithm.MPSxMPO]: + idx_of_qubit = {q: i for i, q in enumerate(circuit.qubits)} + + else: + raise RuntimeError(f"Sorting gates for {algorithm} not supported.") + + # Create the list of indices of gates acting on each qubit + gate_indices: dict[Qubit, list[int]] = defaultdict(list) + for i, g in enumerate(all_gates): + for q in g.qubits: + gate_indices[q].append(i) + # Schedule all 1-qubit gates at the beginning of the circuit + for q, indices in gate_indices.items(): + while indices and len(all_gates[indices[0]].qubits) == 1: + i = indices.pop(0) + sorted_gates.append(all_gates[i]) + remaining.remove(i) + + # Decide which 2-qubit gate to apply next + last_qubits = [circuit.qubits[0], circuit.qubits[0]] # Arbitrary choice at start + while remaining: + # Gather all gates that have nothing in front of them at one of its qubits + reachable_gates = [gates[0] for gates in gate_indices.values() if gates] + # Among them, find those that are available in both qubits + available_gates: list[int] = [] + for gate_idx in reachable_gates: + gate_qubits = all_gates[gate_idx].qubits + assert len(gate_qubits) == 2 # Sanity check: all of them are 2-qubit gates + # If the first gate in both qubits coincides, then this gate is available + if gate_indices[gate_qubits[0]][0] == gate_indices[gate_qubits[1]][0]: + assert gate_indices[gate_qubits[0]][0] == gate_idx + available_gates.append(gate_idx) + # Sanity check: there is at least one available 2-qubit gate + assert available_gates + + # Find distance from last_qubits to current applicable 2-qubit gates + gate_distance: dict[int, int] = dict() + for gate_idx in available_gates: + gate_qubits = all_gates[gate_idx].qubits + + # Criterion for distance depends on the simulation algorithm + if algorithm in [SimulationAlgorithm.TTNxGate]: + gate_distance[gate_idx] = max( # Max common ancestor distance + leaf_of_qubit[last_qubits[0]] ^ leaf_of_qubit[gate_qubits[0]], + leaf_of_qubit[last_qubits[0]] ^ leaf_of_qubit[gate_qubits[1]], + leaf_of_qubit[last_qubits[1]] ^ leaf_of_qubit[gate_qubits[0]], + leaf_of_qubit[last_qubits[1]] ^ leaf_of_qubit[gate_qubits[1]], + ) + elif algorithm in [ + SimulationAlgorithm.MPSxGate, + SimulationAlgorithm.MPSxMPO, + ]: + gate_distance[gate_idx] = max( # Max linear distance between qubits + abs(idx_of_qubit[last_qubits[0]] - idx_of_qubit[gate_qubits[0]]), + abs(idx_of_qubit[last_qubits[0]] - idx_of_qubit[gate_qubits[1]]), + abs(idx_of_qubit[last_qubits[1]] - idx_of_qubit[gate_qubits[0]]), + abs(idx_of_qubit[last_qubits[1]] - idx_of_qubit[gate_qubits[1]]), + ) + else: + raise RuntimeError(f"Sorting gates for {algorithm} not supported.") + + # Choose the gate with shortest distance + chosen_gate_idx = min(gate_distance, key=gate_distance.get) # type: ignore + chosen_gate = all_gates[chosen_gate_idx] + + # Schedule the gate + last_qubits = chosen_gate.qubits + sorted_gates.append(chosen_gate) + remaining.remove(chosen_gate_idx) + # Schedule all 1-qubit gates after this gate + for q in last_qubits: + gate_indices[q].pop(0) # Remove the 2-qubit `chosen_gate` + indices = gate_indices[q] + while indices and len(all_gates[indices[0]].qubits) == 1: + i = indices.pop(0) + sorted_gates.append(all_gates[i]) + remaining.remove(i) + + assert len(all_gates) == len(sorted_gates) + return sorted_gates diff --git a/pytket/extensions/cutensornet/structured_state/ttn.py b/pytket/extensions/cutensornet/structured_state/ttn.py new file mode 100644 index 00000000..47e9770f --- /dev/null +++ b/pytket/extensions/cutensornet/structured_state/ttn.py @@ -0,0 +1,847 @@ +# Copyright 2019-2024 Quantinuum +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +## +# http://www.apache.org/licenses/LICENSE-2.0 +## +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations # type: ignore +import warnings +from typing import Optional, Union +from enum import IntEnum + +import math # type: ignore +import numpy as np # type: ignore + +try: + import cupy as cp # type: ignore +except ImportError: + warnings.warn("local settings failed to import cupy", ImportWarning) +try: + import cuquantum as cq # type: ignore + from cuquantum.cutensornet import tensor # type: ignore +except ImportError: + warnings.warn("local settings failed to import cutensornet", ImportWarning) + +from pytket.circuit import Command, Op, Qubit +from pytket.pauli import QubitPauliString + +from pytket.extensions.cutensornet.general import set_logger + +from .general import CuTensorNetHandle, Config, StructuredState, Tensor + + +class DirTTN(IntEnum): + """An enum to refer to relative directions within the TTN.""" + + PARENT = -1 + LEFT = 0 + RIGHT = 1 + + +# An alias for the TTN path from root to a TreeNode +RootPath = tuple[DirTTN, ...] + + +class TreeNode: + """Represents a single tensor in the TTN. + + The shape of the tensor agrees with the convention set in ``DirTTN``, meaning + that ``tensor.shape[DirTTN.PARENT]`` corresponds to the dimension of the bond + connecting this tree node with its parent. Notice that, since DirTTN.PARENT is + -1, this is always the last entry. + + In the case the TreeNode is a leaf, it will contain only one virtual bond + (the parent) and as many physical bonds as qubits in the group it represents. + These qubits will correspond to bonds from ``0`` to ``len(tensor.shape)-2``. + """ + + def __init__(self, tensor: Tensor, is_leaf: bool = False): + self.tensor = tensor + self.is_leaf = is_leaf + self.canonical_form: Optional[DirTTN] = None + + def copy(self) -> TreeNode: + new_node = TreeNode( + self.tensor.copy(), + is_leaf=self.is_leaf, + ) + new_node.canonical_form = self.canonical_form + return new_node + + +class TTN(StructuredState): + """Represents a state as a Tree Tensor Network. + + Attributes: + nodes (dict[RootPath, TreeNode]): A dictionary providing the tree node + of the given root path in the TTN. + qubit_position (dict[pytket.circuit.Qubit, tuple[RootPath, int]]): A dictionary + mapping circuit qubits to their address in the TTN. + fidelity (float): A lower bound of the fidelity, obtained by multiplying + the fidelities after each contraction. The fidelity of a contraction + corresponds to ``||^2`` where ``|psi>`` and ``|phi>`` are the + states before and after truncation (assuming both are normalised). + """ + + def __init__( + self, + libhandle: CuTensorNetHandle, + qubit_partition: dict[int, list[Qubit]], + config: Config, + ): + """Initialise a TTN on the computational state ``|0>``. + + Note: + A ``libhandle`` should be created via a ``with CuTensorNet() as libhandle:`` + statement. The device where the TTN is stored will match the one specified + by the library handle. + + The current implementation requires the keys of ``qubit_partition`` to be + integers from ``0`` to ``2^l - 1`` for some ``l``. + + Args: + libhandle: The cuTensorNet library handle that will be used to carry out + tensor operations on the TTN. + qubit_partition: A partition of the qubits in the circuit into disjoint + groups, describing the hierarchical structure of the TTN. Each key + identifies a leaf of the TTN, with its corresponding value indicating + the list of qubits represented by the leaf. The leaves are numbered + from left to right on a planar representation of the tree. Hence, the + smaller half of the keys correspond to leaves in the left subtree and + the rest are in the right subtree; providing recursive bipartitions. + config: The object describing the configuration for simulation. + + Raises: + ValueError: If the keys of ``qubit_partition`` do not range from ``0`` to + ``2^l - 1`` for some ``l``. + ValueError: If a ``Qubit`` is repeated in ``qubit_partition``. + ValueError: If there is only one entry in ``qubit_partition``. + """ + self._lib = libhandle + self._cfg = config + self._logger = set_logger("TTN", level=config.loglevel) + self.fidelity = 1.0 + self.nodes: dict[RootPath, TreeNode] = dict() + self.qubit_position: dict[Qubit, tuple[RootPath, int]] = dict() + + n_groups = len(qubit_partition) + if n_groups == 0: # There's no initialisation to be done + pass + elif n_groups == 1: + raise ValueError( + "Only one entry to qubit_partition provided." + "Introduce a finer partition of qubits." + ) + else: + n_levels = math.floor(math.log2(n_groups)) + if n_groups != 2**n_levels: + raise ValueError( + "The number of entries in qubit_partition must be a power of two." + ) + + # Create the TreeNodes of the different groups of qubits + for k, qubits in qubit_partition.items(): + if k < 0 or k >= n_groups: + raise ValueError( + f"Keys of qubit_partition must range from 0 to {n_groups-1}." + ) + + # Calculate the root path of this group + path = [] + for l in reversed(range(n_levels)): + if k < 2**l: + path.append(DirTTN.LEFT) + else: + path.append(DirTTN.RIGHT) + k -= 2**l + + # Add each qubit to the qubit_position dictionary + for i, q in enumerate(qubits): + if q in self.qubit_position: + raise ValueError( + f"Qubit {q} appears more than once in qubit_partition." + ) + self.qubit_position[q] = (tuple(path), i) + + # This tensor has a physical bond per qubit and one virtual bond at the + # end for the parent (dim=1) + shape = tuple([2] * len(qubits) + [1]) + # Initialise the tensor of this group of qubits to |0> + tensor = cp.zeros(shape=shape, dtype=self._cfg._complex_t) + ket_zero_entry = tuple(0 for _ in shape) # Index 0 on all bonds + tensor[ket_zero_entry] = 1 # Amplitude of |0> set to 1 + + # Create the TreeNode + node = TreeNode(tensor, is_leaf=True) + self.nodes[tuple(path)] = node + + # Create the internal TreeNodes + paths: list[list[DirTTN]] = [[]] + for _ in range(n_levels): + # Create the TreeNode at this path + for p in paths: + tensor = cp.ones(shape=(1, 1, 1), dtype=self._cfg._complex_t) + self.nodes[tuple(p)] = TreeNode(tensor) + # Generate the paths for the next level + paths = [ + p + [direction] + for p in paths + for direction in [DirTTN.LEFT, DirTTN.RIGHT] + ] + self._logger.debug(f"qubit_position={self.qubit_position}") + self._logger.debug(f"All root paths: {list(self.nodes.keys())}") + + def is_valid(self) -> bool: + """Verify that the TTN object is valid. + + Specifically, verify that the TTN does not exceed the dimension limit ``chi`` + specified in the ``Config`` object, that physical bonds have dimension 2, + that all tensors except the leaves are rank three and that tensors have shapes + consistent with the bond dimensions. + + Returns: + False if a violation was detected or True otherwise. + """ + chi_ok = all( + self.get_dimension(path, DirTTN.PARENT) <= self._cfg.chi + for path in self.nodes.keys() + ) + phys_ok = all( + self.nodes[path].tensor.shape[bond] == 2 + for path, bond in self.qubit_position.values() + ) + rank_ok = all( + node.is_leaf or len(node.tensor.shape) == 3 for node in self.nodes.values() + ) + shape_ok = all( + self.get_dimension(path, DirTTN.PARENT) + == self.get_dimension(path[:-1], path[-1]) + for path in self.nodes.keys() + if len(path) != 0 + ) + shape_ok = shape_ok and self.get_dimension((), DirTTN.PARENT) == 1 + + # Debugger logging + self._logger.debug( + "Checking validity of TTN... " + f"chi_ok={chi_ok}, " + f"phys_ok={phys_ok}, " + f"rank_ok={rank_ok}, " + f"shape_ok={shape_ok}" + ) + return chi_ok and phys_ok and rank_ok and shape_ok + + def apply_gate(self, gate: Command) -> TTN: + """Apply the gate to the TTN. + + Note: + Only single-qubit gates and two-qubit gates are supported. + + Args: + gate: The gate to be applied. + + Returns: + ``self``, to allow for method chaining. + + Raises: + RuntimeError: If the ``CuTensorNetHandle`` is out of scope. + RuntimeError: If gate acts on more than 2 qubits. + """ + if self._lib._is_destroyed: + raise RuntimeError( + "The cuTensorNet library handle is out of scope.", + "See the documentation of update_libhandle and CuTensorNetHandle.", + ) + + self._logger.debug(f"Applying gate {gate}") + + if len(gate.qubits) == 1: + self._apply_1q_gate(gate.qubits[0], gate.op) + + elif len(gate.qubits) == 2: + self._apply_2q_gate(gate.qubits[0], gate.qubits[1], gate.op) + + else: + # NOTE: This could be supported if gate acts on same group of qubits + raise RuntimeError( + "Gates must act on only 1 or 2 qubits! " + + f"This is not satisfied by {gate}." + ) + + return self + + def apply_scalar(self, scalar: complex) -> TTN: + """Multiplies the state by a complex number. + + Args: + scalar: The complex number to be multiplied. + + Returns: + ``self``, to allow for method chaining. + """ + self.nodes[()].tensor *= scalar + return self + + def canonicalise( + self, center: Union[RootPath, Qubit], unsafe: bool = False + ) -> Tensor: + """Canonicalise the TTN so that all tensors are isometries from ``center``. + + Args: + center: Identifies the bond that is to become the center of the canonical + form. If it is a ``RootPath`` it refers to the parent bond of + ``self.nodes[center]``. If it is a ``Qubit`` it refers to its physical + bond. + unsafe: If True, the final state will be different than the starting one. + Specifically, the information in the returned bond tensor at ``center`` + is removed from the TTN. It is expected that the caller will reintroduce + the bond tensor after some processing (e.g. after SVD truncation). + + Returns: + The bond tensor created at ``center`` when canonicalisation is complete. + Applying SVD to this tensor yields the global SVD of the TTN. + + Raises: + ValueError: If the ``center`` is ``tuple()``. + """ + self._logger.debug(f"Canonicalising to {str(center)}") + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + + if isinstance(center, Qubit): + target_path = self.qubit_position[center][0] + assert not unsafe # Unsafe disallowed when ``center`` is a qubit + elif center == (): + raise ValueError("There is no bond at path ().") + else: + target_path = center + + # Separate nodes to be canonicalised towards children from those towards parent + towards_child = [] + towards_parent = [] + for path in self.nodes.keys(): + # Nodes towards children are closer to the root and coincide in the path + if len(path) < len(target_path) and all( + path[l] == target_path[l] for l in range(len(path)) + ): + towards_child.append(path) + # If the center is a physical bond (qubit), its node is skipped + elif path == target_path and isinstance(center, Qubit): + continue + # All other nodes are canonicalised towards their parent + else: + towards_parent.append(path) + # Sanity checks + assert len(towards_child) != 0 + assert len(towards_parent) != 0 + + # Glossary of bond IDs + # chr(x) -> bond of the x-th qubit in the node (if it is a leaf) + # l -> left child bond of the TTN node + # r -> right child bond of the TTN node + # p -> parent bond of the TTN node + # s -> bond between Q and R after decomposition + + # Canonicalise nodes towards parent, start from the furthest away from root + for path in sorted(towards_parent, key=len, reverse=True): + self._logger.debug(f"Canonicalising node at {path} towards parent.") + + # If already in desired canonical form, do nothing + if self.nodes[path].canonical_form == DirTTN.PARENT: + self._logger.debug("Skipping, already in canonical form.") + continue + + # Otherwise, apply QR decomposition + if self.nodes[path].is_leaf: + n_qbonds = len(self.nodes[path].tensor.shape) - 1 # Num of qubit bonds + q_bonds = "".join(chr(x) for x in range(n_qbonds)) + node_bonds = q_bonds + "p" + Q_bonds = q_bonds + "s" + else: + node_bonds = "lrp" + Q_bonds = "lrs" + R_bonds = "sp" + + Q, R = tensor.decompose( + node_bonds + "->" + Q_bonds + "," + R_bonds, + self.nodes[path].tensor, + method=tensor.QRMethod(), + options=options, + ) + + # Update the tensor + self.nodes[path].tensor = Q + self.nodes[path].canonical_form = DirTTN.PARENT + + # Contract R with the parent node + if path[-1] == DirTTN.LEFT: + R_bonds = "sl" + result_bonds = "srp" + else: + R_bonds = "sr" + result_bonds = "lsp" + node_bonds = "lrp" + + parent_node = self.nodes[path[:-1]] + parent_node.tensor = cq.contract( + R_bonds + "," + node_bonds + "->" + result_bonds, + R, + parent_node.tensor, + options=options, + optimize={"path": [(0, 1)]}, + ) + # The canonical form of the parent node is lost + parent_node.canonical_form = None + + self._logger.debug(f"Node canonicalised. Shape: {Q.shape}") + + # Canonicalise the rest of the nodes, from the root up to the center + for path in sorted(towards_child, key=len): + # Identify the direction of the canonicalisation + target_direction = target_path[len(path)] + # Sanity checks + assert not self.nodes[path].is_leaf + assert target_direction != DirTTN.PARENT + + self._logger.debug( + f"Canonicalising node at {path} towards {str(target_direction)}." + ) + + # If already in the desired canonical form, do nothing + if self.nodes[path].canonical_form == target_direction: + self._logger.debug("Skipping, already in canonical form.") + continue + + # Otherwise, apply QR decomposition + if target_direction == DirTTN.LEFT: + Q_bonds = "srp" + R_bonds = "ls" + else: + Q_bonds = "lsp" + R_bonds = "rs" + node_bonds = "lrp" + + Q, R = tensor.decompose( + node_bonds + "->" + Q_bonds + "," + R_bonds, + self.nodes[path].tensor, + method=tensor.QRMethod(), + options=options, + ) + + # If the child bond is not the center yet, contract R with child node + child_path = tuple(list(path) + [target_direction]) + if child_path != target_path: + child_node = self.nodes[child_path] + + # Contract R with the child node + child_node.tensor = cq.contract( + "lrp,ps->lrs", + child_node.tensor, + R, + options=options, + optimize={"path": [(0, 1)]}, + ) + + # The canonical form of the child node is lost + child_node.canonical_form = None + # Update the tensor + self.nodes[path].tensor = Q + self.nodes[path].canonical_form = target_direction + + self._logger.debug(f"Node canonicalised. Shape: {Q.shape}") + + # If ``center`` is not a physical bond, we are done canonicalising and R is + # the tensor to return. Otherwise, we need to do a final contraction and QR + # decomposition on the leaf node corresponding to ``target_path``. + if isinstance(center, Qubit): + self._logger.debug( + f"Applying QR decomposition on leaf node at {target_path}." + ) + + leaf_node = self.nodes[target_path] + n_qbonds = len(leaf_node.tensor.shape) - 1 # Number of qubit bonds + q_bonds = "".join(chr(x) for x in range(n_qbonds)) + node_bonds = q_bonds + "p" + new_bonds = q_bonds + "s" + R_bonds = "ps" + + # Contract R with the leaf node + leaf_node.tensor = cq.contract( + node_bonds + "," + R_bonds + "->" + new_bonds, + leaf_node.tensor, + R, + options=options, + optimize={"path": [(0, 1)]}, + ) + + # The canonical form of the leaf node is lost + leaf_node.canonical_form = None + # Update the parent tensor + parent_path = target_path[:-1] + self.nodes[parent_path].tensor = Q + self.nodes[parent_path].canonical_form = target_path[-1] + self._logger.debug(f"Node canonicalised. Shape: {Q.shape}") + + # Finally, apply QR decomposition on the leaf_node to obtain the R + # tensor to be returned + target_bond = self.qubit_position[center][1] + Q_bonds = node_bonds[:target_bond] + "s" + node_bonds[target_bond + 1 :] + R_bonds = chr(target_bond) + "s" + + Q, R = tensor.decompose( + node_bonds + "->" + Q_bonds + "," + R_bonds, + leaf_node.tensor, + method=tensor.QRMethod(), + options=options, + ) + # Note: Since R is not contracted with any other tensor, we cannot update + # the leaf node to Q. That'd change the state represented by the TTN. + + # Otherwise, if ``unsafe`` is enabled, update the last tensor to Q + elif unsafe: + self.nodes[target_path[:-1]].tensor = Q + self.nodes[target_path[:-1]].canonical_form = target_path[-1] + + self._logger.debug(f"Node canonicalised (unsafe!). Shape: {Q.shape}") + + self._logger.debug( + f"Finished canonicalisation. Returning R tensor of shape {R.shape}" + ) + return R + + def vdot(self, other: TTN) -> complex: # type: ignore + """Obtain the inner product of the two TTN: ````. + + It can be used to compute the squared norm of a TTN ``ttn`` as + ``ttn.vdot(ttn)``. The tensors within the TTN are not modified. + + Note: + The state that is conjugated is ``self``. + + Args: + other: The other TTN. + + Returns: + The resulting complex number. + + Raises: + RuntimeError: If the two TTNs do not have the same qubits. + RuntimeError: If the ``CuTensorNetHandle`` is out of scope. + """ + if self._lib._is_destroyed: + raise RuntimeError( + "The cuTensorNet library handle is out of scope.", + "See the documentation of update_libhandle and CuTensorNetHandle.", + ) + + if len(self.qubit_position) != len(other.qubit_position): + raise RuntimeError("Number of qubits do not match.") + if self.get_qubits() != other.get_qubits(): + raise RuntimeError( + "The sets of qubits are not the same." + "\n\tself has {self.get_qubits()}" + "\n\tother has {other.get_qubits()}" + ) + if len(self.qubit_position) == 0: + raise RuntimeError("There are no qubits in the TTN.") + + self._logger.debug("Applying vdot between two TTNs.") + + # We convert both TTNs to their interleaved representation and + # contract them using cuQuantum. A single sample is enough for + # contraction path optimisation, since there is little to optimise. + ttn1 = self.get_interleaved_representation(conj=True) + ttn2 = other.get_interleaved_representation(conj=False) + interleaved_rep = ttn1 + ttn2 + [[]] # Discards dim=1 bonds with [] + result = cq.contract( + *interleaved_rep, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"samples": 0}, # There is little to no optimisation to be done + ) + + self._logger.debug(f"Result from vdot={result}") + return complex(result) + + def sample(self) -> dict[Qubit, int]: + """Returns a sample from a Z measurement applied on every qubit. + + Notes: + The contents of ``self`` are not updated. This is equivalent to applying + ``state = self.copy()`` then ``state.measure(state.get_qubits())``. + + Returns: + A dictionary mapping each qubit in the state to its 0 or 1 outcome. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + def measure(self, qubits: set[Qubit]) -> dict[Qubit, int]: + """Applies a Z measurement on ``qubits``, updates the state and returns outcome. + + Notes: + After applying this function, ``self`` will contain the projected + state over the non-measured qubits. + + The resulting state has been normalised. + + Args: + qubits: The subset of qubits to be measured. + + Returns: + A dictionary mapping the given ``qubits`` to their measurement outcome, + i.e. either ``0`` or ``1``. + + Raises: + ValueError: If an element in ``qubits`` is not a qubit in the state. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + def postselect(self, qubit_outcomes: dict[Qubit, int]) -> float: + """Applies a postselection, updates the states and returns its probability. + + Notes: + After applying this function, ``self`` will contain the projected + state over the non-postselected qubits. + + The resulting state has been normalised. + + Args: + qubit_outcomes: A dictionary mapping a subset of qubits to their + desired outcome value (either ``0`` or ``1``). + + Returns: + The probability of this postselection to occur in a measurement. + + Raises: + ValueError: If a key in ``qubit_outcomes`` is not a qubit in the state. + ValueError: If a value in ``qubit_outcomes`` is other than ``0`` or ``1``. + ValueError: If all of the qubits in the state are being postselected. + Instead, you may wish to use ``get_amplitude()``. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + def expectation_value(self, pauli_string: QubitPauliString) -> float: + """Obtains the expectation value of the Pauli string observable. + + Args: + pauli_string: A pytket object representing a tensor product of Paulis. + + Returns: + The expectation value. + + Raises: + ValueError: If a key in ``pauli_string`` is not a qubit in the state. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + def get_fidelity(self) -> float: + """Returns the current fidelity of the state.""" + return self.fidelity + + def get_statevector(self) -> np.ndarray: + """Returns the statevector represented by the TTN, with qubits ordered + in Increasing Lexicographic Order (ILO). + Raises: + ValueError: If there are no qubits left in the TTN. + """ + if len(self.get_qubits()) == 0: + raise ValueError("There are no qubits left in this TTN.") + + # Create the interleaved representation with all tensors + interleaved_rep = self.get_interleaved_representation() + + # Specify the output bond IDs in ILO order + output_bonds = [] + for q in sorted(self.get_qubits()): + output_bonds.append(str(q)) + interleaved_rep.append(output_bonds) + + # Contract + result_tensor = cq.contract( + *interleaved_rep, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"samples": 0}, # There is little to no optimisation to be done + ) + + # Convert to numpy vector and flatten + statevector: np.ndarray = cp.asnumpy(result_tensor).flatten() + return statevector + + def get_amplitude(self, state: int) -> complex: + """Returns the amplitude of the chosen computational state. + + Notes: + The result is equivalent to ``self.get_statevector[b]``, but this method + is faster when querying a single amplitude. + + Args: + state: The integer whose bitstring describes the computational state. + The qubits in the bitstring are in increasing lexicographic order. + + Returns: + The amplitude of the computational state in the TTN. + """ + + interleaved_rep = self.get_interleaved_representation() + ilo_qubits = sorted(self.get_qubits()) + + for i, q in enumerate(ilo_qubits): + # Create the tensors for each qubit in ``state`` + bitvalue = 1 if state & 2 ** (len(ilo_qubits) - i - 1) else 0 + tensor = cp.zeros(shape=(2,), dtype=self._cfg._complex_t) + tensor[bitvalue] = 1 + # Append it to the interleaved representation + interleaved_rep.append(tensor) + interleaved_rep.append([str(q)]) # The bond + # Ignore the dim=1 tensors in the output + interleaved_rep.append([]) + + # Contract + result = cq.contract( + *interleaved_rep, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"samples": 0}, # There is little to no optimisation to be done + ) + + self._logger.debug(f"Amplitude of state {state} is {result}.") + return complex(result) + + def get_qubits(self) -> set[Qubit]: + """Returns the set of qubits that this TTN is defined on.""" + return set(self.qubit_position.keys()) + + def get_interleaved_representation( + self, conj: bool = False + ) -> list[Union[Tensor, str]]: + """Returns the interleaved representation of the TTN used by cuQuantum. + + Args: + conj: If True, all tensors are conjugated and bonds IDs are prefixed + with * (except physical bonds). Defaults to False. + """ + self._logger.debug("Creating interleaved representation...") + + # Auxiliar dictionary of physical bonds to qubit IDs + qubit_id = { + location: str(qubit) for qubit, location in self.qubit_position.items() + } + + interleaved_rep = [] + for path, node in self.nodes.items(): + # Append the tensor + if conj: + interleaved_rep.append(node.tensor.conj()) + else: + interleaved_rep.append(node.tensor) + + # Create the ID for the parent bond + parentID = "".join(str(int(d)) for d in path) + if conj: + parentID = "*" + parentID + + # Append the bonds + if node.is_leaf: + bonds = [] + for b in range(len(node.tensor.shape) - 1): + bonds.append(qubit_id[(path, b)]) + bonds.append(parentID) + else: + bonds = [parentID + "0", parentID + "1", parentID] + + interleaved_rep.append(bonds) + self._logger.debug(f"Bond IDs: {bonds}") + + return interleaved_rep + + def get_dimension(self, path: RootPath, direction: DirTTN) -> int: + """Returns the dimension of bond ``dir`` of the node at ``path``. + + Args: + path: The path to a node in the TTN. + direction: The direction of the bond. + + Returns: + The dimension of the specified bond. + + Raises: + ValueError: If ``path`` is not in the TTN. + """ + if path not in self.nodes: + raise ValueError(f"The path {path} is not in the TTN.") + + dim: int = self.nodes[path].tensor.shape[direction] + return dim + + def get_byte_size(self) -> int: + """ + Returns: + The number of bytes the TTN currently occupies in GPU memory. + """ + return sum(node.tensor.nbytes for node in self.nodes.values()) + + def get_device_id(self) -> int: + """ + Returns: + The identifier of the device (GPU) where the tensors are stored. + """ + return int(self.nodes[tuple()].tensor.device) + + def update_libhandle(self, libhandle: CuTensorNetHandle) -> None: + """Update the ``CuTensorNetHandle`` used by this ``TTN`` object. Multiple + objects may use the same handle. + + Args: + libhandle: The new cuTensorNet library handle. + + Raises: + RuntimeError: If the device (GPU) where ``libhandle`` was initialised + does not match the one where the tensors of the TTN are stored. + """ + if libhandle.device_id != self.get_device_id(): + raise RuntimeError( + "Device of libhandle is not the one where the TTN is stored.", + f"{libhandle.device_id} != {self.get_device_id()}", + ) + self._lib = libhandle + + def copy(self) -> TTN: + """ + Returns: + A deep copy of the TTN on the same device. + """ + + # Create a dummy object + new_ttn = TTN(self._lib, qubit_partition=dict(), config=self._cfg.copy()) + # Copy all data + new_ttn.fidelity = self.fidelity + new_ttn.nodes = {path: node.copy() for path, node in self.nodes.items()} + new_ttn.qubit_position = self.qubit_position.copy() + + self._logger.debug( + "Successfully copied a TTN " + f"of size {new_ttn.get_byte_size() / 2**20} MiB." + ) + return new_ttn + + def _apply_1q_gate(self, qubit: Qubit, gate: Op) -> TTN: + raise NotImplementedError( + "TTN is a base class with no contraction algorithm implemented." + + " You must use a subclass of TTN, such as TTNxGate." + ) + + def _apply_2q_gate(self, q0: Qubit, q1: Qubit, gate: Op) -> TTN: + raise NotImplementedError( + "TTN is a base class with no contraction algorithm implemented." + + " You must use a subclass of TTN, such as TTNxGate." + ) + + def _flush(self) -> None: + # Does nothing in the general MPS case; but children classes with batched + # gate contraction will redefine this method so that the last batch of + # gates is applied. + return None diff --git a/pytket/extensions/cutensornet/structured_state/ttn_gate.py b/pytket/extensions/cutensornet/structured_state/ttn_gate.py new file mode 100644 index 00000000..d5183939 --- /dev/null +++ b/pytket/extensions/cutensornet/structured_state/ttn_gate.py @@ -0,0 +1,690 @@ +# Copyright 2019-2024 Quantinuum +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +## +# http://www.apache.org/licenses/LICENSE-2.0 +## +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations # type: ignore +import warnings + +try: + import cupy as cp # type: ignore +except ImportError: + warnings.warn("local settings failed to import cupy", ImportWarning) +try: + import cuquantum as cq # type: ignore + from cuquantum.cutensornet import tensor # type: ignore + from cuquantum.cutensornet.experimental import contract_decompose # type: ignore +except ImportError: + warnings.warn("local settings failed to import cutensornet", ImportWarning) + +from pytket.circuit import Op, Qubit +from .ttn import TTN, DirTTN, RootPath + + +class TTNxGate(TTN): + """Implements a gate-by-gate contraction algorithm to calculate the output state + of a circuit as a ``TTN``. + """ + + def _apply_1q_gate(self, qubit: Qubit, gate: Op) -> TTNxGate: + """Applies the 1-qubit gate to the TTN. + + This does not increase the dimension of any bond. + + Args: + qubit: The qubit that this gate is applied to. + gate: The gate to be applied. + + Returns: + ``self``, to allow for method chaining. + """ + + # Load the gate's unitary to the GPU memory + gate_unitary = gate.get_unitary().astype(dtype=self._cfg._complex_t, copy=False) + gate_tensor = cp.asarray(gate_unitary, dtype=self._cfg._complex_t) + + path, target = self.qubit_position[qubit] + node_tensor = self.nodes[path].tensor + n_qbonds = ( + len(node_tensor.shape) - 1 + ) # Total number of physical bonds in this node + + # Glossary of bond IDs + # qX -> where X is the X-th physical bond (qubit) in the TTN node + # p -> the parent bond of the TTN node + # i -> the input bond of the gate + # o -> the output bond of the gate + + node_bonds = [f"q{x}" for x in range(n_qbonds)] + ["p"] + result_bonds = node_bonds.copy() + node_bonds[target] = "i" # Target bond must match with the gate input bond + result_bonds[target] = "o" # After contraction it matches the output bond + + # Contract + new_tensor = cq.contract( + node_tensor, + node_bonds, + gate_tensor, + ["o", "i"], + result_bonds, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"path": [(0, 1)]}, + ) + + # Update ``self.nodes`` + # NOTE: Canonicalisation of the node does not change + self.nodes[path].tensor = new_tensor + return self + + def _apply_2q_gate(self, q0: Qubit, q1: Qubit, gate: Op) -> TTNxGate: + """Applies the 2-qubit gate to the TTN. + + Truncation is automatically applied according to the parameters + in the ``Config`` object passed to this ``TTN``. + The TTN is converted to canonical form before truncating. + + Args: + q0: The 0-th qubit the gate acts upon. + q1: The 1-st qubit the gate acts upon. + gate: The gate to be applied. + + Returns: + ``self``, to allow for method chaining. + """ + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + + # Load the gate's unitary to the GPU memory + gate_unitary = gate.get_unitary().astype(dtype=self._cfg._complex_t, copy=False) + gate_tensor = cp.asarray(gate_unitary, dtype=self._cfg._complex_t) + # Reshape into a rank-4 tensor + gate_tensor = cp.reshape(gate_tensor, (2, 2, 2, 2)) + + (path_q0, bond_q0) = self.qubit_position[q0] + (path_q1, bond_q1) = self.qubit_position[q1] + + # Glossary of bond IDs + # a -> the input bond of the gate on q0 + # b -> the input bond of the gate on q1 + # A -> the output bond of the gate on q0 + # B -> the output bond of the gate on q1 + # S -> the shared bond of the gate tensor's SVD + # l -> left child bond of the TTN node + # r -> right child bond of the TTN node + # p -> the parent bond of the TTN node + # s -> the shared bond resulting from a decomposition + # chr(x) -> bond of the x-th qubit in a leaf node + gate_bonds = "ABab" + + # If the two qubits are in the same leaf node, contract the gate with it. + # No truncation is required. + if path_q0 == path_q1: + leaf_node = self.nodes[path_q0] + n_qbonds = len(leaf_node.tensor.shape) - 1 # Num of qubit bonds + aux_bonds = [chr(x) for x in range(n_qbonds)] + aux_bonds[bond_q0] = "a" + aux_bonds[bond_q1] = "b" + leaf_bonds = "".join(aux_bonds) + "p" + aux_bonds[bond_q0] = "A" + aux_bonds[bond_q1] = "B" + result_bonds = "".join(aux_bonds) + "p" + + self.nodes[path_q0].tensor = cq.contract( + f"{leaf_bonds},{gate_bonds}->{result_bonds}", + leaf_node.tensor, + gate_tensor, + options=options, + optimize={"path": [(0, 1)]}, + ) + + self._logger.debug( + "The qubits the gate acts on are on the same group. " + "Gate trivially applied, no dimensions changed." + ) + return self + + # Otherwise, we must include the gate in the common ancestor tensor and + # rewire the inputs and outputs. First, identify common path and direction + common_dirs = [] + for d0, d1 in zip(path_q0, path_q1): + if d0 == d1: + common_dirs.append(d0) + else: + break + common_path = tuple(common_dirs) + + # We begin by canonicalising to the left child bond of the common ancestor. + # This canonicalisation could be done later (just before truncation), but + # doing it now will prevent the need to recanonicalise the tensors that have + # grown (by a factor of x16) when introducing this gate. + # The choice of the left child bond is arbitrary, any bond in the TTN that + # is in the arc connecting qL to qR would have worked. + # + # NOTE: In fact, none of the tensors that are affected by the gate need to + # be canonicalised ahead of time, but I don't expect the saving would be + # particularly noticeable, and it'd require some non-trivial refactoring + # of `canonicalise()`. + self.canonicalise(center=(*common_path, DirTTN.LEFT)) + + # Apply SVD on the gate tensor to remove any zero singular values ASAP + svd_method = tensor.SVDMethod( + abs_cutoff=self._cfg.zero, + partition="U", # Contract S directly into U + ) + # Apply the SVD decomposition using the configuration defined above + U, S, V = tensor.decompose( + f"{gate_bonds}->SAa,SBb", gate_tensor, method=svd_method, options=options + ) + assert S is None # Due to "partition" option in SVDMethod + + # The overall strategy is to connect the `U` tensor above with the physical bond + # for `q0` in the TTN, so that its bond `A` becomes the new physical bond and + # the bond `S` is left dangling (open). We combine this `gate_tensor` with the + # leaf node of `q0` and QR-decompose the result; where the Q tensor will be the + # new (canonicalised) leaf node and R becomes our `msg_tensor`. The latter + # contains the open bond `S` and our objective is to "push" this `msg_tensor` + # through the TTN towards the leaf node of `q1`. Here, "push through" means + # contract with the next tensor, and apply QR decomposition, so that the + # `msg_tensor` carrying `b` and `B` ends up one bond closer to `q1`. + # Once `msg_tensor` is directly connected to the leaf node containing `q1`, we + # just need to apply the `V` tensor above to `q1` and connect its `S` bond with + # that of the `msg_tensor`. + bonds_to_q0 = [ # Bonds in the "arc" from the common ancestor to `q0` + path_q0[:i] for i in range(len(common_path) + 1, len(path_q0) + 1) + ] + # Sanity checks: + assert all( + len(bond_address) != len(common_path) for bond_address in bonds_to_q0 + ) + assert len(bonds_to_q0) == 1 or len(bonds_to_q0[0]) < len(bonds_to_q0[1]) + assert len(bonds_to_q0[-1]) == len(path_q0) + + bonds_to_q1 = [ # Bonds in the "arc" from the common ancestor to `q1` + path_q1[:i] for i in range(len(common_path) + 1, len(path_q1) + 1) + ] + # Sanity checks: + assert all( + len(bond_address) != len(common_path) for bond_address in bonds_to_q1 + ) + assert len(bonds_to_q1) == 1 or len(bonds_to_q1[0]) < len(bonds_to_q1[1]) + assert len(bonds_to_q1[-1]) == len(path_q1) + + # The `msg_tensor` has three bonds. Our convention will be that the first bond + # always corresponds to `S`, the second bond connects the `msg_tensor` + # to the TTN in the child direction and the third connects it to the TTN + # in the `DirTTN.PARENT` direction. If we label the second bond with `l`, then + # the third bond will be labelled `L` (and vice versa). Same for `r` and `p`. + + # We begin applying the gate to the TTN by contracting `U` into the + # leaf node containing `q0`, with the `S` bond of the former left open. + # We immediately QR-decompose the resulting tensor, so that Q becomes the new + # (canonicalised) leaf node and R becomes the `msg_tensor` that we will be + # "pushing" through the rest of the arc towards `q1`. + leaf_node = self.nodes[path_q0] + n_qbonds = len(leaf_node.tensor.shape) - 1 # Num of qubit bonds + aux_bonds = [chr(x) for x in range(n_qbonds)] + aux_bonds[bond_q0] = "a" + leaf_bonds = "".join(aux_bonds) + "p" + aux_bonds[bond_q0] = "A" + Q_bonds = "".join(aux_bonds) + "s" + R_bonds = "Ssp" # The `msg_tensor` + U_bonds = "SAa" + + # Apply the contraction followed by a QR decomposition + leaf_node.tensor, msg_tensor = contract_decompose( + f"{leaf_bonds},{U_bonds}->{Q_bonds},{R_bonds}", + leaf_node.tensor, + U, + algorithm={"qr_method": tensor.QRMethod()}, + options=options, + optimize={"path": [(0, 1)]}, + ) + # Update the canonical form of the leaf node + leaf_node.canonical_form = DirTTN.PARENT + + # We must push the `msg_tensor` all the way to the common ancestor + # of `q0` and `q1`. + bond_addresses = list(reversed(bonds_to_q0)) # From `q0` to the ancestor + + # For all of these nodes; push `msg_tensor` through to their parent bond + for child_bond in bond_addresses[:-1]: # Doesn't do it on common ancestor! + child_dir = child_bond[-1] + parent_bond = child_bond[:-1] + node = self.nodes[parent_bond] + + node_bonds = "lrp" + msg_bonds = "SLl" if child_dir == DirTTN.LEFT else "SRr" + Q_bonds = "Lrs" if child_dir == DirTTN.LEFT else "lRs" + R_bonds = "Ssp" # The new `msg_tensor` + + self._logger.debug( + f"Pushing msg_tensor ({msg_tensor.nbytes // 2**20} MiB) through node " + f"({node.tensor.nbytes // 2**20} MiB) at {parent_bond}." + ) + + # Apply the contraction followed by a QR decomposition + node.tensor, msg_tensor = contract_decompose( + f"{node_bonds},{msg_bonds}->{Q_bonds},{R_bonds}", + node.tensor, + msg_tensor, + algorithm={"qr_method": tensor.QRMethod()}, + options=options, + optimize={"path": [(0, 1)]}, + ) + # Update the canonical form of the node + node.canonical_form = DirTTN.PARENT + + # The `msg_tensor` is now on a child bond of the common ancestor. + # We must push it through to the other child node. + child_bond = bond_addresses[-1] # This is where msg_tensor currently is + child_dir = child_bond[-1] + parent_bond = child_bond[:-1] + common_ancestor_node = self.nodes[parent_bond] + + node_bonds = "lrp" + msg_bonds = "SLl" if child_dir == DirTTN.LEFT else "SRr" + Q_bonds = "Lsp" if child_dir == DirTTN.LEFT else "sRp" + R_bonds = "Srs" if child_dir == DirTTN.LEFT else "Sls" # The new `msg_tensor` + + self._logger.debug( + f"Pushing msg_tensor ({msg_tensor.nbytes // 2**20} MiB) through node " + f"({common_ancestor_node.tensor.nbytes // 2**20} MiB) at {parent_bond}." + ) + + # Apply the contraction followed by a QR decomposition + common_ancestor_node.tensor, msg_tensor = contract_decompose( + f"{node_bonds},{msg_bonds}->{Q_bonds},{R_bonds}", + common_ancestor_node.tensor, + msg_tensor, + algorithm={"qr_method": tensor.QRMethod()}, + options=options, + optimize={"path": [(0, 1)]}, + ) + # Update the canonical form of the node + if child_dir == DirTTN.LEFT: + common_ancestor_node.canonical_form = DirTTN.RIGHT + else: + common_ancestor_node.canonical_form = DirTTN.LEFT + + # We must push the `msg_tensor` from the common ancestor to the leaf node + # containing `q1`. + bond_addresses = bonds_to_q1 # From ancestor to `q1` + + # For all of these nodes; push `msg_tensor` through to their child bond + for child_bond in bond_addresses[1:]: # Skip common ancestor: already pushed + child_dir = child_bond[-1] + parent_bond = child_bond[:-1] + node = self.nodes[parent_bond] + + node_bonds = "lrp" + msg_bonds = "SpP" + Q_bonds = "srP" if child_dir == DirTTN.LEFT else "lsP" + R_bonds = "Sls" if child_dir == DirTTN.LEFT else "Srs" # New `msg_tensor` + + self._logger.debug( + f"Pushing msg_tensor ({msg_tensor.nbytes // 2**20} MiB) through node " + f"({node.tensor.nbytes // 2**20} MiB) at {parent_bond}." + ) + + # Apply the contraction followed by a QR decomposition + node.tensor, msg_tensor = contract_decompose( + f"{node_bonds},{msg_bonds}->{Q_bonds},{R_bonds}", + node.tensor, + msg_tensor, + algorithm={"qr_method": tensor.QRMethod()}, + options=options, + optimize={"path": [(0, 1)]}, + ) + # Update the canonical form of the node + node.canonical_form = child_dir + + # Finally, the `msg_tensor` is in the parent bond of the leaf node of `q1`. + # All we need to do is contract the `msg_tensor` and `V` into the leaf. + leaf_node = self.nodes[path_q1] + n_qbonds = len(leaf_node.tensor.shape) - 1 # Num of qubit bonds + aux_bonds = [chr(x) for x in range(n_qbonds)] + aux_bonds[bond_q1] = "b" # Connect `b` to `q1` + leaf_bonds = "".join(aux_bonds) + "p" + msg_bonds = "SpP" + V_bonds = "SBb" + aux_bonds[bond_q1] = "B" # `B` becomes the new physical bond `q1` + result_bonds = "".join(aux_bonds) + "P" + + # Apply the contraction + leaf_node.tensor = cq.contract( + f"{leaf_bonds},{V_bonds},{msg_bonds}->{result_bonds}", + leaf_node.tensor, + V, + msg_tensor, + options=options, + optimize={"path": [(0, 1), (0, 1)]}, + ) + # The leaf node lost its canonical form + leaf_node.canonical_form = None + + # Truncate (if needed) bonds along the arc from `q1` to `q0`. + # We truncate in this direction to take advantage of the canonicalisation + # of the TTN we achieved while pushing the `msg_tensor` from `q0` to `q1`. + if self._cfg.truncation_fidelity < 1: + # Truncate as much as possible before violating the truncation fidelity + self._fidelity_bound_sequential_weighted_truncation( + list(reversed(bonds_to_q1)), bonds_to_q0 + ) + + else: + # Truncate so that all bonds have dimension less or equal to chi + self._chi_sequential_truncation(list(reversed(bonds_to_q1)), bonds_to_q0) + + return self + + def _fidelity_bound_sequential_weighted_truncation( + self, + bonds_from_q1_to_ancestor: list[RootPath], + bonds_from_ancestor_to_q0: list[RootPath], + ) -> None: + """Truncate as much as possible up to the truncation fidelity. + + Our target is to assign a local truncation fidelity `f_i` to each bond `i` in + the input lists so that the lower bound of the fidelity satisfies: + + self.fidelity * prod(f_i) < self.fidelity * truncation_fidelity (A) + + Let e_i = 1 - f_i, where we refer to `e_i` as the "truncation error at bond i". + We can use that when e_i is close to zero, the bound: + + prod(1 - e_i) > 1 - sum(e_i) (B) + + is fairly tight, with an inaccuracy of an additive O(e_i^2) term. Hence, for + simplicity we take prod(f_i) ~ 1 - sum(e_i). Let + + `admissible_error` = 1 - `truncation_fidelity` (C) + + and assign each e_i = w_i * `admissible_error` where 0 < w_i < 1 is a weight + factor such that sum(w_i) = 1. Thus, if each bond `i` is truncated to a fidelity + + f_i = 1 - w_i * `admissible_error` (D) + + then the total fidelity factor on the LHS of equation (A) should approximate + `truncation_fidelity`. There is risk of overshooting with truncation and + end up with a new `self.fidelity` slightly lower than the target, but this + + should be fine in practice, since `self.fidelity` is a lower bound anyway. + Each of the `w_i` weight factors is assigned depending on the bond dimension, + with larger bonds given higher weight, so they are truncated more aggressively. + + Args: + bonds_from_q1_to_ancestor: A list of bonds (each as their RootPath address). + These bonds will be truncated. The list must be ordered in such a way + that consecutive bonds share a common tensor and such that the first + bond in the list corresponds to the leaf node that `q0` is assigned to + and the last bond in the list corresponds to child bond of the common + ancestor between the leaves of `q0` and `q1`. + bonds_from_ancestor_q1: Same as above, but the list starts from the other + child bond of the common ancestor and ends at the leaf node that `q1` + is assigned to. Together, these two lists provide a path in the TTN + from the leaf node of `q0` to the leaf node of `q1`. + """ + self._logger.debug("Starting sequential weighted truncation (fidelity bound).") + initial_fidelity = self.fidelity + + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + admissible_error = 1 - self._cfg.truncation_fidelity + + # Combine the two lists of bonds, but remember at which entry the direction + # of the path is switched from going towards root to going towards leaves. + truncation_bonds = bonds_from_q1_to_ancestor + bonds_from_ancestor_to_q0 + switch_direction_at = len(bonds_from_q1_to_ancestor) + towards_root = True # First half of truncation_bonds is path towards ancestor + + # Obtain the dimension of each bond + dimensions = [ + self.get_dimension(bond, DirTTN.PARENT) for bond in truncation_bonds + ] + # Assign the weight `w_i` of each bond. + # NOTE: currently uses w_i = dim_i / sum(dim_i), for no other reason that it is + # simple. Better weight functions may exist and research on this is desirable. + weights = [dim / sum(dimensions) for dim in dimensions] + # Assign a fidelity `f_i` to each bond. + bond_fidelities = [1 - w * admissible_error for w in weights] + + # Truncate each bond as much as possible up to its assigned bond fidelity + for i, bond_address in enumerate(truncation_bonds): + dimension_before = self.get_dimension(bond_address, DirTTN.PARENT) + + # Canonicalise to this bond (unsafely, so we must reintroduce bond_tensor) + bond_tensor = self.canonicalise(bond_address, unsafe=True) + + # Flip ``towards_root`` if we have reached the common ancestor + # i.e. if the ``bond_tensor`` needs to go towards a child tensor rather + # than towards the parent + if switch_direction_at == i: + towards_root = False + + # Apply SVD decomposition to truncate as much as possible before exceeding + # a `discarded_weight_cutoff` of `1 - f_i`. Contract S directly into U/V and + # normalise the singular values so that the sum of its squares is equal + # to one (i.e. the TTN is a normalised state after truncation). + self._logger.debug( + f"Truncating at {bond_address} to target fidelity={bond_fidelities[i]}" + ) + + svd_method = tensor.SVDMethod( + abs_cutoff=self._cfg.zero, + discarded_weight_cutoff=1 - bond_fidelities[i], + partition="V" if towards_root else "U", # Contract S to parent or child + normalization="L2", # Sum of squares singular values must equal 1 + ) + + # Apply the SVD decomposition using the configuration defined above + U, S, V, svd_info = tensor.decompose( + "cp->cs,sp", + bond_tensor, + method=svd_method, + options=options, + return_info=True, + ) + assert S is None # Due to "partition" option in SVDMethod + + # discarded_weight is calculated within cuTensorNet as: + # sum([s**2 for s in S']) + # discarded_weight = 1 - ------------------------- + # sum([s**2 for s in S]) + # where S is the list of original singular values and S' is the set of + # singular values that remain after truncation (before normalisation). + # It can be shown that the fidelity ||^2 (for |phi> and |psi> + # unit vectors before and after truncation) is equal to 1 - disc_weight. + # + # We multiply the fidelity of the current step to the overall fidelity + # to keep track of a lower bound for the fidelity. + this_fidelity = 1.0 - svd_info.discarded_weight + self.fidelity *= this_fidelity + dimension_after = V.shape[0] + + # Contract U and V into the TTN. This reintroduces the data of bond_tensor + # back into the TTN, as required by ``canonicalise(.., unsafe=True)``. + self._contract_decomp_bond_tensor_into_ttn(U, V, bond_address) + + # The next node in the path towards qR loses its canonical form, since + # S was contracted to it (either via U or V) + if towards_root: + self.nodes[bond_address[:-1]].canonical_form = None + else: + self.nodes[bond_address].canonical_form = None + + # Report to logger + self._logger.debug(f"Truncation done. Truncation fidelity={this_fidelity}") + self._logger.debug( + f"Reduced bond dimension from {dimension_before} to {dimension_after}." + ) + + self._logger.debug( + "Finished sequential weighted truncation (fidelity bound). " + f"Fidelity factor = {self.fidelity / initial_fidelity}" + ) + + # Sanity check: reached the common ancestor and changed direction + assert not towards_root + + def _chi_sequential_truncation( + self, + bonds_from_q1_to_ancestor: list[RootPath], + bonds_from_ancestor_to_q0: list[RootPath], + ) -> None: + """Truncate all bonds in the input lists to have a dimension of chi or lower. + + The lists of bonds are explored sequentially, truncating the bonds + one by one. + + Args: + bonds_from_q1_to_ancestor: A list of bonds (each as their RootPath address). + These bonds will be truncated. The list must be ordered in such a way + that consecutive bonds share a common tensor and such that the first + bond in the list corresponds to the leaf node that `q0` is assigned to + and the last bond in the list corresponds to child bond of the common + ancestor between the leaves of `q0` and `q1`. + bonds_from_ancestor_q1: Same as above, but the list starts from the other + child bond of the common ancestor and ends at the leaf node that `q1` + is assigned to. Together, these two lists provide a path in the TTN + from the leaf node of `q0` to the leaf node of `q1`. + """ + self._logger.debug("Starting sequential truncation (chi bound).") + initial_fidelity = self.fidelity + + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + + # Combine the two lists of bonds, but remember at which entry the direction + # of the path is switched from going towards root to going towards leaves. + truncation_bonds = bonds_from_q1_to_ancestor + bonds_from_ancestor_to_q0 + switch_direction_at = len(bonds_from_q1_to_ancestor) + towards_root = True # First half of truncation_bonds is path towards ancestor + + for i, bond_address in enumerate(truncation_bonds): + dimension_before = self.get_dimension(bond_address, DirTTN.PARENT) + + # Canonicalise to this bond (unsafely, so we must reintroduce bond_tensor) + bond_tensor = self.canonicalise(bond_address, unsafe=True) + + # Flip ``towards_root`` if we have reached the common ancestor + # i.e. if the ``bond_tensor`` needs to go towards a child tensor rather + # than towards the parent + if switch_direction_at == i: + towards_root = False + + # Apply SVD decomposition on bond_tensor and truncate up to + # `self._cfg.chi`. Ask cuTensorNet to contract S directly into U/V and + # normalise the singular values so that the sum of its squares is equal + # to one (i.e. the TTN is a normalised state after truncation). + self._logger.debug( + f"Truncating at {bond_address} to (or below) chosen chi={self._cfg.chi}" + ) + + svd_method = tensor.SVDMethod( + abs_cutoff=self._cfg.zero, + max_extent=self._cfg.chi, + partition="V" if towards_root else "U", # Contract S to parent or child + normalization="L2", # Sum of squares equal 1 + ) + + U, S, V, svd_info = tensor.decompose( + "cp->cs,sp", + bond_tensor, + method=svd_method, + options=options, + return_info=True, + ) + assert S is None # Due to "partition" option in SVDMethod + + # discarded_weight is calculated within cuTensorNet as: + # sum([s**2 for s in S']) + # discarded_weight = 1 - ------------------------- + # sum([s**2 for s in S]) + # where S is the list of original singular values and S' is the set of + # singular values that remain after truncation (before normalisation). + # It can be shown that the fidelity ||^2 (for |phi> and |psi> + # unit vectors before and after truncation) is equal to 1 - disc_weight. + # + # We multiply the fidelity of the current step to the overall fidelity + # to keep track of a lower bound for the fidelity. + this_fidelity = 1.0 - svd_info.discarded_weight + self.fidelity *= this_fidelity + dimension_after = V.shape[0] + + # Contract U and V into the TTN. This reintroduces the data of bond_tensor + # back into the TTN, as required by ``canonicalise(.., unsafe=True)``. + self._contract_decomp_bond_tensor_into_ttn(U, V, bond_address) + + # The next node in the path towards qR loses its canonical form, since + # S was contracted to it (either via U or V) + if towards_root: + self.nodes[bond_address[:-1]].canonical_form = None + else: + self.nodes[bond_address].canonical_form = None + + # Report to logger + self._logger.debug(f"Truncation done. Truncation fidelity={this_fidelity}") + self._logger.debug( + f"Reduced bond dimension from {dimension_before} to {dimension_after}." + ) + + self._logger.debug( + "Finished sequential truncation (chi bound). " + f"Fidelity factor = {self.fidelity / initial_fidelity}" + ) + + # Sanity check: reached the common ancestor and changed direction + assert not towards_root + + def _contract_decomp_bond_tensor_into_ttn( + self, U: cp.ndarray, V: cp.ndarray, bond_address: RootPath + ) -> None: + """Contracts a decomposed bond_tensor back into the TTN. + + Args: + U: The tensor of the decomposition adjacent to the child node of the bond. + V: The tensor of the decomposition adjacent to the parent node of the bond. + bond_address: The address to the bond that was decomposed; explicitly, the + DirTTN.PARENT bond of the corresponding child node. + """ + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + + # Contract V to the parent node of the bond + direction = bond_address[-1] + if direction == DirTTN.LEFT: + indices = "lrp,sl->srp" + else: + indices = "lrp,sr->lsp" + self.nodes[bond_address[:-1]].tensor = cq.contract( + indices, + self.nodes[bond_address[:-1]].tensor, + V, + options=options, + optimize={"path": [(0, 1)]}, + ) + + # Contract U to the child node of the bond + if self.nodes[bond_address].is_leaf: + n_qbonds = ( + len(self.nodes[bond_address].tensor.shape) - 1 + ) # Total number of physical bonds in this node + node_bonds = [f"q{x}" for x in range(n_qbonds)] + ["p"] + else: + node_bonds = ["l", "r", "p"] + result_bonds = node_bonds.copy() + result_bonds[-1] = "s" + + self.nodes[bond_address].tensor = cq.contract( + self.nodes[bond_address].tensor, + node_bonds, + U, + ["p", "s"], + result_bonds, + options=options, + optimize={"path": [(0, 1)]}, + ) diff --git a/pytket/extensions/cutensornet/tensor_network_convert.py b/pytket/extensions/cutensornet/tensor_network_convert.py index 62394edd..23a715a1 100644 --- a/pytket/extensions/cutensornet/tensor_network_convert.py +++ b/pytket/extensions/cutensornet/tensor_network_convert.py @@ -178,7 +178,7 @@ def _assign_node_tensors(self, adj: bool = False) -> List[Any]: self._input_nodes = [] self._output_nodes = [] for i, node in reversed(list(enumerate(self._network.nodes(data=True)))): - if node[1]["desc"] not in ("Input", "Output"): + if node[1]["desc"] not in ("Input", "Output", "Create"): n_out_edges = len(list(self._network.out_edges(node[0]))) if n_out_edges > 1: src_ports = [ @@ -213,7 +213,7 @@ def _assign_node_tensors(self, adj: bool = False) -> List[Any]: else: if node[1]["desc"] == "Output": self._output_nodes.append(i) - if node[1]["desc"] == "Input": + if node[1]["desc"] == "Input" or node[1]["desc"] == "Create": self._input_nodes.append(i) node_tensors.append(np.array([1, 0], dtype="complex128")) if adj: diff --git a/setup.py b/setup.py index abc65423..c7d90902 100644 --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ license="Apache 2", packages=find_namespace_packages(include=["pytket.*"]), include_package_data=True, - install_requires=["pytket ~= 1.24"], + install_requires=["pytket ~= 1.26"], classifiers=[ "Environment :: Console", "Programming Language :: Python :: 3.10", diff --git a/tests/conftest.py b/tests/conftest.py index 32ae62c2..69e6d961 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -50,6 +50,18 @@ def quantum_volume_circuit(n_qubits: int) -> Circuit: return c +@pytest.fixture +def q5_empty() -> Circuit: + circuit = Circuit(5) + return circuit + + +@pytest.fixture +def q8_empty() -> Circuit: + circuit = Circuit(8) + return circuit + + @pytest.fixture def q2_x0() -> Circuit: circuit = Circuit(2) @@ -174,8 +186,20 @@ def q4_multicontrols() -> Circuit: @pytest.fixture -def q5_empty() -> Circuit: - circuit = Circuit(5) +def q4_with_creates() -> Circuit: + circuit = Circuit(4) + circuit.qubit_create_all() + + circuit.S(1) + circuit.Rz(0.3, 0) + circuit.Ry(0.1, 2) + circuit.TK1(0.2, 0.9, 0.8, 3) + circuit.TK2(0.6, 0.5, 0.7, 1, 2) + circuit.X(0) + circuit.H(2) + circuit.V(1) + circuit.Z(3) + return circuit @@ -191,6 +215,16 @@ def q5_h0s1rz2ry3tk4tk13() -> Circuit: return circuit +@pytest.fixture +def q8_x0h2v5z6() -> Circuit: + circuit = Circuit(8) + circuit.X(0) + circuit.H(2) + circuit.V(5) + circuit.Z(6) + return circuit + + @pytest.fixture def q5_line_circ_30_layers() -> Circuit: np.random.seed(1) @@ -207,3 +241,15 @@ def q20_line_circ_20_layers() -> Circuit: def q6_qvol() -> Circuit: np.random.seed(1) return quantum_volume_circuit(n_qubits=6) + + +@pytest.fixture +def q8_qvol() -> Circuit: + np.random.seed(1) + return quantum_volume_circuit(n_qubits=8) + + +@pytest.fixture +def q15_qvol() -> Circuit: + np.random.seed(1) + return quantum_volume_circuit(n_qubits=15) diff --git a/tests/test_cutensornet_backend.py b/tests/test_cutensornet_backend.py index 726f8c7b..1bf20219 100644 --- a/tests/test_cutensornet_backend.py +++ b/tests/test_cutensornet_backend.py @@ -111,6 +111,7 @@ def test_expectation_value() -> None: pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore pytest.lazy_fixture("q4_lcu1"), # type: ignore pytest.lazy_fixture("q4_multicontrols"), # type: ignore + pytest.lazy_fixture("q4_with_creates"), # type: ignore ], ) def test_compile_convert_statevec_overlap(circuit: Circuit) -> None: diff --git a/tests/test_mps.py b/tests/test_structured_state.py similarity index 52% rename from tests/test_mps.py rename to tests/test_structured_state.py index ab2c5b0d..5617ca44 100644 --- a/tests/test_mps.py +++ b/tests/test_structured_state.py @@ -1,4 +1,4 @@ -from typing import Any +from typing import Any, Union import random # type: ignore import pytest @@ -8,16 +8,19 @@ from pytket.circuit import Circuit, Qubit, OpType # type: ignore from pytket.pauli import Pauli, QubitPauliString # type: ignore -from pytket.extensions.cutensornet.mps import ( +from pytket.extensions.cutensornet.structured_state import ( CuTensorNetHandle, - ConfigMPS, + Config, MPS, MPSxGate, MPSxMPO, + TTNxGate, + DirTTN, simulate, - prepare_circuit, - ContractionAlg, + prepare_circuit_mps, + SimulationAlgorithm, ) +from pytket.extensions.cutensornet.structured_state.ttn import RootPath from pytket.extensions.cutensornet.utils import circuit_statevector_postselect @@ -26,8 +29,9 @@ def test_libhandle_manager() -> None: # Proper use of library handle with CuTensorNetHandle() as libhandle: - mps = MPS(libhandle, circ.qubits, ConfigMPS()) - assert np.isclose(mps.vdot(mps), 1, atol=mps._cfg._atol) + cfg = Config() + mps = MPS(libhandle, circ.qubits, cfg) + assert np.isclose(mps.vdot(mps), 1, atol=cfg._atol) # Catch exception due to library handle out of scope with pytest.raises(RuntimeError): @@ -35,25 +39,67 @@ def test_libhandle_manager() -> None: def test_init() -> None: - circ = Circuit(5) + circ = Circuit(8) + qubit_partition = {i: [q] for i, q in enumerate(circ.qubits)} with CuTensorNetHandle() as libhandle: - mps_gate = MPSxGate(libhandle, circ.qubits, ConfigMPS()) + mps_gate = MPSxGate(libhandle, circ.qubits, Config()) assert mps_gate.is_valid() - mps_mpo = MPSxMPO(libhandle, circ.qubits, ConfigMPS()) + mps_mpo = MPSxMPO(libhandle, circ.qubits, Config()) assert mps_mpo.is_valid() + ttn_gate = TTNxGate(libhandle, qubit_partition, Config()) + assert ttn_gate.is_valid() + + +@pytest.mark.parametrize( + "algorithm", + [ + SimulationAlgorithm.MPSxGate, + SimulationAlgorithm.MPSxMPO, + SimulationAlgorithm.TTNxGate, + ], +) +def test_copy(algorithm: SimulationAlgorithm) -> None: + simple_circ = Circuit(2).H(0).H(1).CX(0, 1) + with CuTensorNetHandle() as libhandle: -def test_canonicalise() -> None: + # Default config + cfg = Config() + state = simulate(libhandle, simple_circ, algorithm, cfg) + assert state.is_valid() + copy_state = state.copy() + assert copy_state.is_valid() + assert np.isclose(copy_state.vdot(state), 1.0, atol=cfg._atol) + + # Bounded chi + cfg = Config(chi=8) + state = simulate(libhandle, simple_circ, algorithm, cfg) + assert state.is_valid() + copy_state = state.copy() + assert copy_state.is_valid() + assert np.isclose(copy_state.vdot(state), 1.0, atol=cfg._atol) + + # Bounded truncation_fidelity + cfg = Config(truncation_fidelity=0.9999) + state = simulate(libhandle, simple_circ, algorithm, cfg) + assert state.is_valid() + copy_state = state.copy() + assert copy_state.is_valid() + assert np.isclose(copy_state.vdot(state), 1.0, atol=cfg._atol) + + +def test_canonicalise_mps() -> None: cp.random.seed(1) circ = Circuit(5) with CuTensorNetHandle() as libhandle: - mps_gate = MPSxGate(libhandle, circ.qubits, ConfigMPS()) + cfg = Config() + mps_gate = MPSxGate(libhandle, circ.qubits, cfg) # Fill up the tensors with random entries # Leftmost tensor - T_d = cp.empty(shape=(1, 4, 2), dtype=mps_gate._cfg._complex_t) + T_d = cp.empty(shape=(1, 4, 2), dtype=cfg._complex_t) for i1 in range(T_d.shape[1]): for i2 in range(T_d.shape[2]): T_d[0][i1][i2] = cp.random.rand() + 1j * cp.random.rand() @@ -61,7 +107,7 @@ def test_canonicalise() -> None: # Middle tensors for pos in range(1, len(mps_gate) - 1): - T_d = cp.empty(shape=(4, 4, 2), dtype=mps_gate._cfg._complex_t) + T_d = cp.empty(shape=(4, 4, 2), dtype=cfg._complex_t) for i0 in range(T_d.shape[0]): for i1 in range(T_d.shape[1]): for i2 in range(T_d.shape[2]): @@ -69,7 +115,7 @@ def test_canonicalise() -> None: mps_gate.tensors[pos] = T_d # Rightmost tensor - T_d = cp.empty(shape=(4, 1, 2), dtype=mps_gate._cfg._complex_t) + T_d = cp.empty(shape=(4, 1, 2), dtype=cfg._complex_t) for i0 in range(T_d.shape[0]): for i2 in range(T_d.shape[2]): T_d[i0][0][i2] = cp.random.rand() + 1j * cp.random.rand() @@ -89,7 +135,7 @@ def test_canonicalise() -> None: # Check that canonicalisation did not change the vector overlap = mps_gate.vdot(mps_copy) - assert np.isclose(overlap, norm_sq, atol=mps_gate._cfg._atol) + assert np.isclose(overlap, norm_sq, atol=cfg._atol) # Check that the corresponding tensors are in orthogonal form for pos in range(len(mps_gate)): @@ -107,13 +153,106 @@ def test_canonicalise() -> None: assert cp.allclose(result, cp.eye(result.shape[0])) +@pytest.mark.parametrize( + "center", + [ + (DirTTN.RIGHT,), + (DirTTN.LEFT, DirTTN.RIGHT), + (DirTTN.LEFT, DirTTN.RIGHT, DirTTN.RIGHT), + Qubit("q", [2]), + ], +) +def test_canonicalise_ttn(center: Union[RootPath, Qubit]) -> None: + cp.random.seed(1) + n_levels = 3 + n_qubits = 2**n_levels + max_dim = 8 + + circ = Circuit(n_qubits) + qubit_partition = {i: [q] for i, q in enumerate(circ.qubits)} + + with CuTensorNetHandle() as libhandle: + ttn = TTNxGate(libhandle, qubit_partition, Config()) + + # Fill up the tensors with random entries + for path, node in ttn.nodes.items(): + if node.is_leaf: + T = cp.empty(shape=(2, max_dim), dtype=ttn._cfg._complex_t) + for i0 in range(T.shape[0]): + for i1 in range(T.shape[1]): + T[i0][i1] = cp.random.rand() + 1j * cp.random.rand() + else: + shape = (max_dim, max_dim, max_dim if len(path) != 0 else 1) + T = cp.empty(shape=shape, dtype=ttn._cfg._complex_t) + for i0 in range(shape[0]): + for i1 in range(shape[1]): + for i2 in range(shape[2]): + T[i0][i1][i2] = cp.random.rand() + 1j * cp.random.rand() + node.tensor = T + + assert ttn.is_valid() + + # Calculate the norm of the TTN + norm_sq = ttn.vdot(ttn) + + # Keep a copy of the non-canonicalised TTN + ttn_copy = ttn.copy() + + # Canonicalise at target path + R = ttn.canonicalise(center) + assert ttn.is_valid() + + # Check that canonicalisation did not change the vector + overlap = ttn.vdot(ttn_copy) + assert np.isclose(overlap / norm_sq, 1.0, atol=ttn._cfg._atol) + + # Check that the tensor R returned agrees with the norm + overlap_R = cq.contract("ud,ud->", R, R.conj()) + assert np.isclose(overlap_R / norm_sq, 1.0, atol=ttn._cfg._atol) + + # Check that the corresponding tensors are in orthogonal form + for path, node in ttn.nodes.items(): + # If it's the node just below the center of canonicalisation, it + # cannot be in orthogonal form + if isinstance(center, Qubit): + if path == ttn.qubit_position[center][0]: + assert node.canonical_form is None + continue + else: + if path == center[:-1]: + assert node.canonical_form is None + continue + # Otherwise, it should be in orthogonal form + assert node.canonical_form is not None + + T = node.tensor + + if node.is_leaf: + assert node.canonical_form == DirTTN.PARENT + result = cq.contract("qp,qP->pP", T, T.conj()) + + elif node.canonical_form == DirTTN.PARENT: + result = cq.contract("lrp,lrP->pP", T, T.conj()) + + elif node.canonical_form == DirTTN.LEFT: + result = cq.contract("lrp,Lrp->lL", T, T.conj()) + + elif node.canonical_form == DirTTN.RIGHT: + result = cq.contract("lrp,lRp->rR", T, T.conj()) + + # Check that the result is the identity + assert cp.allclose(result, cp.eye(result.shape[0])) + + @pytest.mark.parametrize( "circuit", [ pytest.lazy_fixture("q5_empty"), # type: ignore + pytest.lazy_fixture("q8_empty"), # type: ignore pytest.lazy_fixture("q2_x0"), # type: ignore pytest.lazy_fixture("q2_x1"), # type: ignore pytest.lazy_fixture("q2_v0"), # type: ignore + pytest.lazy_fixture("q8_x0h2v5z6"), # type: ignore pytest.lazy_fixture("q2_x0cx01"), # type: ignore pytest.lazy_fixture("q2_x1cx10x1"), # type: ignore pytest.lazy_fixture("q2_x0cx01cx10"), # type: ignore @@ -124,51 +263,58 @@ def test_canonicalise() -> None: pytest.lazy_fixture("q2_lcu3"), # type: ignore pytest.lazy_fixture("q3_v0cx02"), # type: ignore pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore - # pytest.lazy_fixture("q4_lcu1"), # MPS doesn't support n-qubit gates with n>2 + pytest.lazy_fixture("q4_with_creates"), # type: ignore pytest.lazy_fixture("q5_h0s1rz2ry3tk4tk13"), # type: ignore pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore pytest.lazy_fixture("q6_qvol"), # type: ignore + pytest.lazy_fixture("q8_qvol"), # type: ignore ], ) @pytest.mark.parametrize( "algorithm", [ - ContractionAlg.MPSxGate, - ContractionAlg.MPSxMPO, + SimulationAlgorithm.MPSxGate, + SimulationAlgorithm.MPSxMPO, + SimulationAlgorithm.TTNxGate, ], ) -def test_exact_circ_sim(circuit: Circuit, algorithm: ContractionAlg) -> None: - prep_circ, _ = prepare_circuit(circuit) +def test_exact_circ_sim(circuit: Circuit, algorithm: SimulationAlgorithm) -> None: + if algorithm in [SimulationAlgorithm.MPSxGate, SimulationAlgorithm.MPSxMPO]: + circuit, _ = prepare_circuit_mps(circuit) + n_qubits = len(circuit.qubits) - state = prep_circ.get_statevector() + state_vec = circuit.get_statevector() with CuTensorNetHandle() as libhandle: - mps = simulate(libhandle, prep_circ, algorithm, ConfigMPS()) - assert mps.is_valid() + cfg = Config(leaf_size=2) + state = simulate(libhandle, circuit, algorithm, cfg) + assert state.is_valid() # Check that there was no approximation - assert np.isclose(mps.fidelity, 1.0, atol=mps._cfg._atol) + assert np.isclose(state.get_fidelity(), 1.0, atol=cfg._atol) # Check that overlap is 1 - assert np.isclose(mps.vdot(mps), 1.0, atol=mps._cfg._atol) + assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) # Check that all of the amplitudes are correct for b in range(2**n_qubits): assert np.isclose( - mps.get_amplitude(b), - state[b], - atol=mps._cfg._atol, + state.get_amplitude(b), + state_vec[b], + atol=cfg._atol, ) # Check that the statevector is correct - assert np.allclose(mps.get_statevector(), state, atol=mps._cfg._atol) + assert np.allclose(state.get_statevector(), state_vec, atol=cfg._atol) @pytest.mark.parametrize( "circuit", [ pytest.lazy_fixture("q5_empty"), # type: ignore + pytest.lazy_fixture("q8_empty"), # type: ignore pytest.lazy_fixture("q2_x0"), # type: ignore pytest.lazy_fixture("q2_x1"), # type: ignore pytest.lazy_fixture("q2_v0"), # type: ignore + pytest.lazy_fixture("q8_x0h2v5z6"), # type: ignore pytest.lazy_fixture("q2_x0cx01"), # type: ignore pytest.lazy_fixture("q2_x1cx10x1"), # type: ignore pytest.lazy_fixture("q2_x0cx01cx10"), # type: ignore @@ -179,37 +325,44 @@ def test_exact_circ_sim(circuit: Circuit, algorithm: ContractionAlg) -> None: pytest.lazy_fixture("q2_lcu3"), # type: ignore pytest.lazy_fixture("q3_v0cx02"), # type: ignore pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore - # pytest.lazy_fixture("q4_lcu1"), # MPS doesn't support n-qubit gates with n>2 + pytest.lazy_fixture("q4_with_creates"), # type: ignore pytest.lazy_fixture("q5_h0s1rz2ry3tk4tk13"), # type: ignore pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore pytest.lazy_fixture("q6_qvol"), # type: ignore + pytest.lazy_fixture("q8_qvol"), # type: ignore ], ) @pytest.mark.parametrize( "algorithm", [ - ContractionAlg.MPSxGate, - ContractionAlg.MPSxMPO, + SimulationAlgorithm.MPSxGate, + SimulationAlgorithm.MPSxMPO, + SimulationAlgorithm.TTNxGate, ], ) -def test_approx_circ_sim_gate_fid(circuit: Circuit, algorithm: ContractionAlg) -> None: - prep_circ, _ = prepare_circuit(circuit) +def test_approx_circ_sim_gate_fid( + circuit: Circuit, algorithm: SimulationAlgorithm +) -> None: + if algorithm in [SimulationAlgorithm.MPSxGate, SimulationAlgorithm.MPSxMPO]: + circuit, _ = prepare_circuit_mps(circuit) + with CuTensorNetHandle() as libhandle: - mps = simulate( - libhandle, prep_circ, algorithm, ConfigMPS(truncation_fidelity=0.99) - ) - assert mps.is_valid() + cfg = Config(truncation_fidelity=0.99, leaf_size=2) + state = simulate(libhandle, circuit, algorithm, cfg) + assert state.is_valid() # Check that overlap is 1 - assert np.isclose(mps.vdot(mps), 1.0, atol=mps._cfg._atol) + assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) @pytest.mark.parametrize( "circuit", [ pytest.lazy_fixture("q5_empty"), # type: ignore + pytest.lazy_fixture("q8_empty"), # type: ignore pytest.lazy_fixture("q2_x0"), # type: ignore pytest.lazy_fixture("q2_x1"), # type: ignore pytest.lazy_fixture("q2_v0"), # type: ignore + pytest.lazy_fixture("q8_x0h2v5z6"), # type: ignore pytest.lazy_fixture("q2_x0cx01"), # type: ignore pytest.lazy_fixture("q2_x1cx10x1"), # type: ignore pytest.lazy_fixture("q2_x0cx01cx10"), # type: ignore @@ -220,26 +373,31 @@ def test_approx_circ_sim_gate_fid(circuit: Circuit, algorithm: ContractionAlg) - pytest.lazy_fixture("q2_lcu3"), # type: ignore pytest.lazy_fixture("q3_v0cx02"), # type: ignore pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore - # pytest.lazy_fixture("q4_lcu1"), # MPS doesn't support n-qubit gates with n>2 + pytest.lazy_fixture("q4_with_creates"), # type: ignore pytest.lazy_fixture("q5_h0s1rz2ry3tk4tk13"), # type: ignore pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore pytest.lazy_fixture("q6_qvol"), # type: ignore + pytest.lazy_fixture("q8_qvol"), # type: ignore ], ) @pytest.mark.parametrize( "algorithm", [ - ContractionAlg.MPSxGate, - ContractionAlg.MPSxMPO, + SimulationAlgorithm.MPSxGate, + SimulationAlgorithm.MPSxMPO, + SimulationAlgorithm.TTNxGate, ], ) -def test_approx_circ_sim_chi(circuit: Circuit, algorithm: ContractionAlg) -> None: - prep_circ, _ = prepare_circuit(circuit) +def test_approx_circ_sim_chi(circuit: Circuit, algorithm: SimulationAlgorithm) -> None: + if algorithm in [SimulationAlgorithm.MPSxGate, SimulationAlgorithm.MPSxMPO]: + circuit, _ = prepare_circuit_mps(circuit) + with CuTensorNetHandle() as libhandle: - mps = simulate(libhandle, prep_circ, algorithm, ConfigMPS(chi=4)) - assert mps.is_valid() + cfg = Config(chi=4, leaf_size=2) + state = simulate(libhandle, circuit, algorithm, cfg) + assert state.is_valid() # Check that overlap is 1 - assert np.isclose(mps.vdot(mps), 1.0, atol=mps._cfg._atol) + assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) @pytest.mark.parametrize( @@ -249,6 +407,7 @@ def test_approx_circ_sim_chi(circuit: Circuit, algorithm: ContractionAlg) -> Non pytest.lazy_fixture("q2_x0cx01cx10"), # type: ignore pytest.lazy_fixture("q2_lcu2"), # type: ignore pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore + pytest.lazy_fixture("q4_with_creates"), # type: ignore pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore pytest.lazy_fixture("q6_qvol"), # type: ignore ], @@ -256,8 +415,9 @@ def test_approx_circ_sim_chi(circuit: Circuit, algorithm: ContractionAlg) -> Non @pytest.mark.parametrize( "algorithm", [ - ContractionAlg.MPSxGate, - ContractionAlg.MPSxMPO, + SimulationAlgorithm.MPSxGate, + SimulationAlgorithm.MPSxMPO, + SimulationAlgorithm.TTNxGate, ], ) @pytest.mark.parametrize( @@ -268,40 +428,44 @@ def test_approx_circ_sim_chi(circuit: Circuit, algorithm: ContractionAlg) -> Non ], ) def test_float_point_options( - circuit: Circuit, algorithm: ContractionAlg, fp_precision: Any + circuit: Circuit, algorithm: SimulationAlgorithm, fp_precision: Any ) -> None: - prep_circ, _ = prepare_circuit(circuit) + if algorithm in [SimulationAlgorithm.MPSxGate, SimulationAlgorithm.MPSxMPO]: + circuit, _ = prepare_circuit_mps(circuit) with CuTensorNetHandle() as libhandle: # Exact - mps = simulate( - libhandle, prep_circ, algorithm, ConfigMPS(float_precision=fp_precision) - ) - assert mps.is_valid() + cfg = Config(float_precision=fp_precision, leaf_size=2) + state = simulate(libhandle, circuit, algorithm, cfg) + assert state.is_valid() # Check that overlap is 1 - assert np.isclose(mps.vdot(mps), 1.0, atol=mps._cfg._atol) + assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) # Approximate, bound truncation fidelity - mps = simulate( + cfg = Config( + truncation_fidelity=0.99, float_precision=fp_precision, leaf_size=2 + ) + state = simulate( libhandle, - prep_circ, + circuit, algorithm, - ConfigMPS(truncation_fidelity=0.99, float_precision=fp_precision), + cfg, ) - assert mps.is_valid() + assert state.is_valid() # Check that overlap is 1 - assert np.isclose(mps.vdot(mps), 1.0, atol=mps._cfg._atol) + assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) # Approximate, bound chi - mps = simulate( + cfg = Config(chi=4, float_precision=fp_precision, leaf_size=2) + state = simulate( libhandle, - prep_circ, + circuit, algorithm, - ConfigMPS(chi=4, float_precision=fp_precision), + cfg, ) - assert mps.is_valid() + assert state.is_valid() # Check that overlap is 1 - assert np.isclose(mps.vdot(mps), 1.0, atol=mps._cfg._atol) + assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) @pytest.mark.parametrize( @@ -310,47 +474,74 @@ def test_float_point_options( pytest.lazy_fixture("q20_line_circ_20_layers"), # type: ignore ], ) -def test_circ_approx_explicit(circuit: Circuit) -> None: +def test_circ_approx_explicit_mps(circuit: Circuit) -> None: random.seed(1) with CuTensorNetHandle() as libhandle: # Finite gate fidelity # Check for MPSxGate + cfg = Config(truncation_fidelity=0.99, leaf_size=4, float_precision=np.float32) mps_gate = simulate( libhandle, circuit, - ContractionAlg.MPSxGate, - ConfigMPS(truncation_fidelity=0.99), + SimulationAlgorithm.MPSxGate, + cfg, ) - assert np.isclose(mps_gate.fidelity, 0.4, atol=1e-1) + assert np.isclose(mps_gate.get_fidelity(), 0.4, atol=1e-1) assert mps_gate.is_valid() - assert np.isclose(mps_gate.vdot(mps_gate), 1.0, atol=mps_gate._cfg._atol) + assert np.isclose(mps_gate.vdot(mps_gate), 1.0, atol=cfg._atol) # Check for MPSxMPO mps_mpo = simulate( libhandle, circuit, - ContractionAlg.MPSxMPO, - ConfigMPS(truncation_fidelity=0.99), + SimulationAlgorithm.MPSxMPO, + cfg, ) - assert np.isclose(mps_mpo.fidelity, 0.6, atol=1e-1) + assert np.isclose(mps_mpo.get_fidelity(), 0.6, atol=1e-1) assert mps_mpo.is_valid() - assert np.isclose(mps_mpo.vdot(mps_mpo), 1.0, atol=mps_mpo._cfg._atol) + assert np.isclose(mps_mpo.vdot(mps_mpo), 1.0, atol=cfg._atol) # Fixed virtual bond dimension # Check for MPSxGate - mps_gate = simulate( - libhandle, circuit, ContractionAlg.MPSxGate, ConfigMPS(chi=8) - ) - assert np.isclose(mps_gate.fidelity, 0.03, atol=1e-2) + cfg = Config(chi=8, leaf_size=4, float_precision=np.float32) + mps_gate = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, cfg) + assert np.isclose(mps_gate.get_fidelity(), 0.03, atol=1e-2) assert mps_gate.is_valid() - assert np.isclose(mps_gate.vdot(mps_gate), 1.0, atol=mps_gate._cfg._atol) + assert np.isclose(mps_gate.vdot(mps_gate), 1.0, atol=cfg._atol) # Check for MPSxMPO - mps_mpo = simulate(libhandle, circuit, ContractionAlg.MPSxMPO, ConfigMPS(chi=8)) - assert np.isclose(mps_mpo.fidelity, 0.04, atol=1e-2) + mps_mpo = simulate(libhandle, circuit, SimulationAlgorithm.MPSxMPO, cfg) + assert np.isclose(mps_mpo.get_fidelity(), 0.05, atol=1e-2) assert mps_mpo.is_valid() - assert np.isclose(mps_mpo.vdot(mps_mpo), 1.0, atol=mps_mpo._cfg._atol) + assert np.isclose(mps_mpo.vdot(mps_mpo), 1.0, atol=cfg._atol) + + +@pytest.mark.parametrize( + "circuit", + [ + pytest.lazy_fixture("q15_qvol"), # type: ignore + ], +) +def test_circ_approx_explicit_ttn(circuit: Circuit) -> None: + random.seed(1) + + with CuTensorNetHandle() as libhandle: + # Finite gate fidelity + # Check for TTNxGate + cfg = Config(truncation_fidelity=0.99, leaf_size=3, float_precision=np.float32) + ttn_gate = simulate(libhandle, circuit, SimulationAlgorithm.TTNxGate, cfg) + assert np.isclose(ttn_gate.get_fidelity(), 0.769, atol=1e-3) + assert ttn_gate.is_valid() + assert np.isclose(ttn_gate.vdot(ttn_gate), 1.0, atol=cfg._atol) + + # Fixed virtual bond dimension + # Check for TTNxGate + cfg = Config(chi=120, leaf_size=3, float_precision=np.float32) + ttn_gate = simulate(libhandle, circuit, SimulationAlgorithm.TTNxGate, cfg) + assert np.isclose(ttn_gate.get_fidelity(), 0.857, atol=1e-3) + assert ttn_gate.is_valid() + assert np.isclose(ttn_gate.vdot(ttn_gate), 1.0, atol=cfg._atol) @pytest.mark.parametrize( @@ -385,10 +576,11 @@ def test_postselect_2q_circ(circuit: Circuit, postselect_dict: dict) -> None: sv = sv / np.sqrt(sv_prob) # Normalise with CuTensorNetHandle() as libhandle: - mps = simulate(libhandle, circuit, ContractionAlg.MPSxGate, ConfigMPS()) + cfg = Config() + mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, cfg) prob = mps.postselect(postselect_dict) - assert np.isclose(prob, sv_prob, atol=mps._cfg._atol) - assert np.allclose(mps.get_statevector(), sv, atol=mps._cfg._atol) + assert np.isclose(prob, sv_prob, atol=cfg._atol) + assert np.allclose(mps.get_statevector(), sv, atol=cfg._atol) @pytest.mark.parametrize( @@ -415,10 +607,11 @@ def test_postselect_circ(circuit: Circuit, postselect_dict: dict) -> None: sv = sv / np.sqrt(sv_prob) # Normalise with CuTensorNetHandle() as libhandle: - mps = simulate(libhandle, circuit, ContractionAlg.MPSxGate, ConfigMPS()) + cfg = Config() + mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, cfg) prob = mps.postselect(postselect_dict) - assert np.isclose(prob, sv_prob, atol=mps._cfg._atol) - assert np.allclose(mps.get_statevector(), sv, atol=mps._cfg._atol) + assert np.isclose(prob, sv_prob, atol=cfg._atol) + assert np.allclose(mps.get_statevector(), sv, atol=cfg._atol) @pytest.mark.parametrize( @@ -434,6 +627,7 @@ def test_postselect_circ(circuit: Circuit, postselect_dict: dict) -> None: pytest.lazy_fixture("q2_lcu2"), # type: ignore pytest.lazy_fixture("q2_lcu3"), # type: ignore pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore + pytest.lazy_fixture("q4_with_creates"), # type: ignore pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore ], ) @@ -460,9 +654,10 @@ def test_expectation_value(circuit: Circuit, observable: QubitPauliString) -> No # Simulate the circuit and obtain the expectation value with CuTensorNetHandle() as libhandle: - mps = simulate(libhandle, circuit, ContractionAlg.MPSxGate, ConfigMPS()) + cfg = Config() + mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, cfg) assert np.isclose( - mps.expectation_value(observable), expectation_value, atol=mps._cfg._atol + mps.expectation_value(observable), expectation_value, atol=cfg._atol ) @@ -490,7 +685,7 @@ def test_sample_circ_2q(circuit: Circuit) -> None: # Compute the samples sample_dict = {0: 0, 1: 0, 2: 0, 3: 0} with CuTensorNetHandle() as libhandle: - mps = simulate(libhandle, circuit, ContractionAlg.MPSxGate, ConfigMPS()) + mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, Config()) # Take samples measuring both qubits at once for _ in range(n_samples): @@ -517,7 +712,7 @@ def test_measure_circ(circuit: Circuit) -> None: qB = circuit.qubits[-3] # Third list significant qubit with CuTensorNetHandle() as libhandle: - mps = simulate(libhandle, circuit, ContractionAlg.MPSxGate, ConfigMPS()) + mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, Config()) # Compute the probabilities of each outcome p = {(0, 0): 0.0, (0, 1): 0.0, (1, 0): 0.0, (1, 1): 0.0} diff --git a/tests/test_tensor_network_convert.py b/tests/test_tensor_network_convert.py index f6d09a03..d221a264 100644 --- a/tests/test_tensor_network_convert.py +++ b/tests/test_tensor_network_convert.py @@ -54,6 +54,7 @@ def circuit_overlap_contract(circuit_ket: Circuit) -> float: pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore pytest.lazy_fixture("q4_lcu1"), # type: ignore pytest.lazy_fixture("q4_multicontrols"), # type: ignore + pytest.lazy_fixture("q4_with_creates"), # type: ignore ], ) def test_convert_statevec_overlap(circuit: Circuit) -> None: