diff --git a/.bazelversion b/.bazelversion index 03f488b07..f3c238740 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1,2 @@ -5.3.0 +6.5.0 +# NOTE: Update Bazel version in tensorflow/tools/ci_build/release/common.sh.oss \ No newline at end of file diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 43f043e98..763fec9e0 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -11,7 +11,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions/setup-python@v1 with: - python-version: '3.9' + python-version: '3.10' architecture: 'x64' - name: Install Lint tools run: pip install --upgrade pip setuptools; pip install -r requirements.txt; @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions/setup-python@v1 with: - python-version: '3.9' + python-version: '3.10' architecture: 'x64' - name: Install Format tools run: pip install --upgrade pip setuptools; pip install -r requirements.txt; sudo apt-get install -y clang-format-6.0 @@ -41,7 +41,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions/setup-python@v1 with: - python-version: '3.9' + python-version: '3.10' architecture: 'x64' - name: Install Bazel on CI run: ./scripts/ci_install.sh @@ -61,7 +61,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions/setup-python@v1 with: - python-version: '3.9' + python-version: '3.10' architecture: 'x64' - name: Install Bazel on CI run: ./scripts/ci_install.sh @@ -70,23 +70,28 @@ jobs: - name: Full Library Test run: ./scripts/test_all.sh - leak-tests: - name: Memory Leak tests - runs-on: ubuntu-20.04 - needs: [lint, format] - - steps: - - uses: actions/checkout@v1 - - uses: actions/setup-python@v1 - with: - python-version: '3.9' - architecture: 'x64' - - name: Install Bazel on CI - run: ./scripts/ci_install.sh - - name: Configure CI TF - run: echo "Y\n" | ./configure.sh - - name: Leak Test qsim and src - run: ./scripts/msan_test.sh + # 2024-11-30 [mhucka] temporarily turning off leak-tests because it produces + # false positives on GH that we can't immediately address. TODO: if updating + # TFQ to use Clang and the latest TF does not resolve this, find a way to + # skip the handful of failing tests and renable the rest of the msan tests. + # + # leak-tests: + # name: Memory Leak tests + # runs-on: ubuntu-20.04 + # needs: [lint, format] + # + # steps: + # - uses: actions/checkout@v1 + # - uses: actions/setup-python@v1 + # with: + # python-version: '3.10' + # architecture: 'x64' + # - name: Install Bazel on CI + # run: ./scripts/ci_install.sh + # - name: Configure CI TF + # run: echo "Y\n" | ./configure.sh + # - name: Leak Test qsim and src + # run: ./scripts/msan_test.sh tutorials-test: name: Tutorial tests @@ -97,7 +102,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions/setup-python@v1 with: - python-version: '3.9' + python-version: '3.10' architecture: 'x64' - name: Install notebook dependencies run: pip install --upgrade pip seaborn==0.10.0 diff --git a/.github/workflows/cirq_compatibility.yaml b/.github/workflows/cirq_compatibility.yaml index f5e5b9629..1ad2e4195 100644 --- a/.github/workflows/cirq_compatibility.yaml +++ b/.github/workflows/cirq_compatibility.yaml @@ -1,24 +1,171 @@ -name: Cirq Compatibility +# Copyright 2024 The TensorFlow Quantum Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# Summary: GitHub CI workflow for testing TFQ against Cirq releases +# +# This workflow is executed every night on a schedule. By default, this +# workflow will save Bazel build artifacts if an error occurs during a run. +# +# For testing, this workflow can be invoked manually from the GitHub page at +# https://github.com/tensorflow/quantum/actions/workflows/cirq_compatibility.yaml +# Clicking the "Run workflow" button there will present a form interface with +# options for overridding some of the parameters for the run. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +name: Cirq compatibility tests + +# Default values. These can be overridden when workflow dispatch is used. +env: + # Python version to test against. + py_version: '3.10' + # Bazel version. Note: this needs to match what is used in TF & TFQ. + bazel_version: 6.5.0 + # Machine architecture. + arch: x64 + # Additional .bazelrc options to use. + bazelrc_additions: | + common --announce_rc + build --verbose_failures + test --test_timeout=3000 on: + # Nightly runs. schedule: - - cron: "0 0 * * *" + - cron: 0 0 * * * + # Manual on-demand invocations. + workflow_dispatch: + inputs: + py_version: + description: Version of Python to use + bazel_version: + description: Version of Bazel Python to use + arch: + description: Computer architecture to use + use_bazel_disk_cache: + description: Use Bazel disk_cache between runs? + type: boolean + default: true + cache_bazel_tests: + description: Allow Bazel to cache test results? + type: boolean + default: true + save_artifacts: + description: Make Bazel build outputs downloadable? + type: boolean + default: true jobs: - consistency: - name: Nightly Compatibility - runs-on: ubuntu-16.04 + test-compatibility: + name: Run TFQ tests + runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v1 - - uses: actions/setup-python@v1 + - name: Check out a copy of the TFQ git repository + uses: actions/checkout@v4 + + - name: Set up Python + id: python + uses: actions/setup-python@v5 + with: + python-version: ${{github.event.inputs.py_version || env.py_version}} + architecture: ${{github.event.inputs.arch || env.arch}} + cache: pip + + - name: Install TensorFlow Quantum dependencies + run: | + pip install --upgrade pip setuptools wheel + pip install -r requirements.txt + + - name: Install the nightly build version of Cirq + run: | + pip install -U cirq --pre + + - name: Configure Bazel options + run: | + # If we didn't get a cache hit on the installed Python environment, + # something's changed, and we want to make sure to re-run all tests. + if [[ "${{steps.python.outputs.cache-hit}}" == "true" + && "${{github.event.inputs.cache_bazel_tests}}" != "false" ]]; then + echo "cache_bazel_tests=auto" >> "$GITHUB_ENV" + else + echo "cache_bazel_tests=no" >> "$GITHUB_ENV" + fi + # Use the disk cache unless told not to. + if [[ "${{github.event.inputs.use_bazel_disk_cache}}" != "false" ]]; then + echo "use_bazel_disk_cache=true" >> "$GITHUB_ENV" + else + echo "use_bazel_disk_cache=false" >> "$GITHUB_ENV" + fi + + - name: Set up Bazel with caching + if: env.use_bazel_disk_cache == 'true' + uses: bazel-contrib/setup-bazel@0.9.1 + env: + USE_BAZEL_VERSION: ${{github.event.inputs.bazel_version || env.bazel_version}} + with: + disk-cache: ${{github.workflow}} + bazelisk-cache: true + external-cache: true + repository-cache: true + bazelrc: | + ${{env.bazelrc_additions}} + test --cache_test_results=${{env.cache_bazel_tests}} + + - name: Set up Bazel without caching + if: env.use_bazel_disk_cache == 'false' + uses: bazel-contrib/setup-bazel@0.9.1 + env: + USE_BAZEL_VERSION: ${{github.event.inputs.bazel_version || env.bazel_version}} + with: + bazelrc: | + ${{env.bazelrc_additions}} + test --cache_test_results=${{env.cache_bazel_tests}} + + - name: Configure TFQ + run: | + set -x -e + # Save information to the run log, in case it's needed for debugging. + which python + python --version + python -c 'import site; print(site.getsitepackages())' + python -c 'import tensorflow; print(tensorflow.version.VERSION)' + python -c 'import cirq; print(cirq.__version__)' + # Run the TFQ configuration script. + printf "Y\n" | ./configure.sh + + - name: Run TFQ tests + # TODO: when the msan tests are working again, replace the "touch" + # line with ./scripts/msan_test.sh 2>&1 | tee msan-tests-output.log + run: | + set -x -e + ./scripts/test_all.sh 2>&1 | tee main-tests-output.log + touch msan-tests-output.log + + - name: Make Bazel artifacts downloadable (if desired) + if: >- + github.event.inputs.save_artifacts == 'true' + && (failure() || github.event_name == 'workflow_dispatch') + uses: actions/upload-artifact@v4 with: - python-version: '3.8' - architecture: 'x64' - - name: Install Bazel on CI - run: ./scripts/ci_install.sh - - name: Configure CI TF - run: echo "Y\n" | ./configure.sh - - name: Install Cirq nightly - run: pip install -U cirq --pre - - name: Nightly tests - run: ./scripts/test_all.sh + name: bazel-out + retention-days: 7 + include-hidden-files: true + path: | + main-tests-output.log + msan-tests-output.log + /home/runner/.bazel/execroot/__main__/bazel-out/ + !/home/runner/.bazel/execroot/__main__/bazel-out/**/*.so + !/home/runner/.bazel/execroot/__main__/bazel-out/**/*.o + !/home/runner/.bazel/execroot/__main__/bazel-out/**/_objs + !/home/runner/.bazel/execroot/__main__/bazel-out/**/_solib_k8 diff --git a/WORKSPACE b/WORKSPACE index b47e25d2f..5f0025e9a 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -1,10 +1,13 @@ # This file includes external dependencies that are required to compile the # TensorFlow op. + + load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -EIGEN_COMMIT = "3bb6a48d8c171cf20b5f8e48bfb4e424fbd4f79e" -EIGEN_SHA256 = "eca9847b3fe6249e0234a342b78f73feec07d29f534e914ba5f920f3e09383a3" + + +EIGEN_COMMIT = "aa6964bf3a34fd607837dd8123bc42465185c4f8" http_archive( @@ -16,7 +19,6 @@ cc_library( visibility = ["//visibility:public"], ) """, - sha256 = EIGEN_SHA256, strip_prefix = "eigen-{commit}".format(commit = EIGEN_COMMIT), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/gitlab.com/libeigen/eigen/-/archive/{commit}/eigen-{commit}.tar.gz".format(commit = EIGEN_COMMIT), @@ -27,35 +29,41 @@ cc_library( http_archive( name = "qsim", sha256 = "b9c1eba09a885a938b5e73dfc2e02f5231cf3b01d899415caa24769346a731d5", + # patches = [ + # "//third_party/tf:qsim.patch", + # ], strip_prefix = "qsim-0.13.3", urls = ["https://github.com/quantumlib/qsim/archive/refs/tags/v0.13.3.zip"], ) http_archive( name = "org_tensorflow", - sha256 = "e52cda3bae45f0ae0fccd4055e9fa29892b414f70e2df94df9a3a10319c75fff", - strip_prefix = "tensorflow-2.11.0", + patches = [ + "//third_party/tf:tf.patch", + ], + # sha256 = "e52cda3bae45f0ae0fccd4055e9fa29892b414f70e2df94df9a3a10319c75fff", + strip_prefix = "tensorflow-2.15.0", urls = [ - "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.11.0.zip", + "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.15.0.zip", ], ) -load("@org_tensorflow//tensorflow:workspace3.bzl", "workspace") +load("@org_tensorflow//tensorflow:workspace3.bzl", "tf_workspace3") -workspace() +tf_workspace3() -load("@org_tensorflow//tensorflow:workspace2.bzl", "workspace") +load("@org_tensorflow//tensorflow:workspace2.bzl", "tf_workspace2") -workspace() +tf_workspace2() -load("@org_tensorflow//tensorflow:workspace1.bzl", "workspace") +load("@org_tensorflow//tensorflow:workspace1.bzl", "tf_workspace1") -workspace() +tf_workspace1() -load("@org_tensorflow//tensorflow:workspace0.bzl", "workspace") +load("@org_tensorflow//tensorflow:workspace0.bzl", "tf_workspace0") -workspace() +tf_workspace0() load("//third_party/tf:tf_configure.bzl", "tf_configure") @@ -72,4 +80,3 @@ bind( name = "six", actual = "@six_archive//:six", ) - diff --git a/configure.sh b/configure.sh index ff42047fe..36e2d08a6 100755 --- a/configure.sh +++ b/configure.sh @@ -73,21 +73,21 @@ done # Check if it's installed -if [[ $(pip show tensorflow) == *tensorflow* ]] || [[ $(pip show tf-nightly) == *tf-nightly* ]]; then - echo 'Using installed tensorflow' -else - # Uninstall CPU version if it is installed. - if [[ $(pip show tensorflow-cpu) == *tensorflow-cpu* ]]; then - echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' - pip uninstall tensorflow - elif [[ $(pip show tf-nightly-cpu) == *tf-nightly-cpu* ]]; then - echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' - pip uninstall tf-nightly - fi - # Install GPU version - echo 'Installing tensorflow .....\n' - pip install tensorflow -fi +# if [[ $(pip show tensorflow) == *tensorflow* ]] || [[ $(pip show tf-nightly) == *tf-nightly* ]]; then +# echo 'Using installed tensorflow' +# else +# # Uninstall CPU version if it is installed. +# if [[ $(pip show tensorflow-cpu) == *tensorflow-cpu* ]]; then +# echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' +# pip uninstall tensorflow +# elif [[ $(pip show tf-nightly-cpu) == *tf-nightly-cpu* ]]; then +# echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' +# pip uninstall tf-nightly +# fi +# # Install GPU version +# echo 'Installing tensorflow .....\n' +# pip install tensorflow +# fi diff --git a/docs/install.md b/docs/install.md index 3de77ecf9..61338735c 100644 --- a/docs/install.md +++ b/docs/install.md @@ -10,14 +10,14 @@ There are a few ways to set up your environment to use TensorFlow Quantum (TFQ): Python's pip package manager. * Or build TensorFlow Quantum from source. -TensorFlow Quantum is supported on Python 3.7, 3.8, and 3.9 and depends directly on [Cirq](https://github.com/quantumlib/Cirq). +TensorFlow Quantum is supported on Python 3.9, 3.10, and 3.11 and depends directly on [Cirq](https://github.com/quantumlib/Cirq). ## Pip package ### Requirements -* pip 19.0 or later (requires `manylinux2010` support) -* [TensorFlow == 2.11.0](https://www.tensorflow.org/install/pip) +* pip 19.0 or later (requires `manylinux2014` support) +* [TensorFlow == 2.15.0](https://www.tensorflow.org/install/pip) See the [TensorFlow install guide](https://www.tensorflow.org/install/pip) to set up your Python development environment and an (optional) virtual environment. @@ -27,7 +27,7 @@ Upgrade `pip` and install TensorFlow
@@ -57,13 +57,13 @@ The following steps are tested for Ubuntu-like systems. ### 1. Set up a Python 3 development environment -First we need the Python 3.8 development tools. +First we need the Python 3.10 development tools.pip3 install --upgrade pip
-pip3 install tensorflow==2.11.0
+pip3 install tensorflow==2.15.0
@@ -72,7 +72,7 @@ First we need the Python 3.8 development tools. Go to your workspace directory and make a virtual environment for TFQ development.sudo apt update
-sudo apt-get install pkg-config zip g++ zlib1g-dev unzip python3.8
-sudo apt install python3.8 python3.8-dev python3.8-venv python3-pip
-python3.8 -m pip install --upgrade pip
+sudo apt-get install pkg-config zip g++ zlib1g-dev unzip python3.10
+sudo apt install python3.10 python3.10-dev python3.10-venv python3-pip
+python3.10 -m pip install --upgrade pip
-@@ -84,21 +84,20 @@ As noted in the TensorFlow guide, the Bazel build system will be required. -Our latest source builds use TensorFlow 2.11.0. To ensure compatibility we use `bazel` version 5.3.0. To remove any existing version of Bazel: - +Our latest source builds use TensorFlow 2.15.0. To ensure compatibility we use `bazel` version 6.5.0. To remove any existing version of Bazel:python3.8 -m venv quantum_env
+python3.10 -m venv quantum_env
source quantum_env/bin/activate
sudo apt-get remove bazel
-Download and install `bazel` version 5.3.0:
+Download and install `bazel` version 6.5.0:
- wget https://github.com/bazelbuild/bazel/releases/download/5.3.0/bazel_5.3.0-linux-x86_64.deb
+ wget https://github.com/bazelbuild/bazel/releases/download/6.5.0/bazel_6.5.0-linux-x86_64.deb
- sudo dpkg -i bazel_5.3.0-linux-x86_64.deb
+ sudo dpkg -i bazel_6.5.0-linux-x86_64.deb
@@ -122,7 +121,7 @@ Finally, confirm installation of the correct `bazel` version:
### 4. Build TensorFlow from source
Here we adapt instructions from the TensorFlow [build from source](https://www.tensorflow.org/install/source)
-guide, see the link for further details. TensorFlow Quantum is compatible with TensorFlow version 2.11.0.
+guide, see the link for further details. TensorFlow Quantum is compatible with TensorFlow version 2.15.0.
Download the
TensorFlow source code:
@@ -131,7 +130,7 @@ Download the
Be sure the virtual environment you created in step 2 is activated. Then, install the TensorFlow dependencies: @@ -141,7 +140,7 @@ Be sure the virtual environment you created in step 2 is activated. Then, instalgit clone https://github.com/tensorflow/tensorflow.git
cd tensorflow
-git checkout v2.11.0
+git checkout v2.15.0
pip install -U pip six numpy wheel setuptools mock 'future>=0.17.1'
pip install -U keras_applications --no-deps
pip install -U keras_preprocessing --no-deps
- pip install numpy==1.24.2
+ pip install numpy==1.23.5
pip install packaging requests
diff --git a/docs/tutorials/barren_plateaus.ipynb b/docs/tutorials/barren_plateaus.ipynb
index 3c9176eaa..94be3a950 100644
--- a/docs/tutorials/barren_plateaus.ipynb
+++ b/docs/tutorials/barren_plateaus.ipynb
@@ -97,7 +97,7 @@
},
"outputs": [],
"source": [
- "!pip install tensorflow==2.7.0"
+ "!pip install tensorflow==2.15.0"
]
},
{
@@ -120,7 +120,7 @@
},
"outputs": [],
"source": [
- "!pip install tensorflow-quantum==0.7.2"
+ "!pip install tensorflow-quantum==0.7.3"
]
},
{
diff --git a/docs/tutorials/gradients.ipynb b/docs/tutorials/gradients.ipynb
index 072718bcf..5eef20cb9 100644
--- a/docs/tutorials/gradients.ipynb
+++ b/docs/tutorials/gradients.ipynb
@@ -99,7 +99,7 @@
},
"outputs": [],
"source": [
- "!pip install tensorflow==2.7.0"
+ "!pip install tensorflow==2.15.0"
]
},
{
@@ -122,7 +122,7 @@
},
"outputs": [],
"source": [
- "!pip install tensorflow-quantum==0.7.2"
+ "!pip install tensorflow-quantum==0.7.3"
]
},
{
diff --git a/docs/tutorials/hello_many_worlds.ipynb b/docs/tutorials/hello_many_worlds.ipynb
index 229136219..49c88893b 100644
--- a/docs/tutorials/hello_many_worlds.ipynb
+++ b/docs/tutorials/hello_many_worlds.ipynb
@@ -103,7 +103,7 @@
},
"outputs": [],
"source": [
- "!pip install tensorflow==2.7.0"
+ "!pip install tensorflow==2.15.0"
]
},
{
@@ -129,7 +129,7 @@
},
"outputs": [],
"source": [
- "!pip install tensorflow-quantum==0.7.2"
+ "!pip install tensorflow-quantum==0.7.3"
]
},
{
@@ -255,7 +255,7 @@
"# Create a circuit on these qubits using the parameters you created above.\n",
"circuit = cirq.Circuit(\n",
" cirq.rx(a).on(q0),\n",
- " cirq.ry(b).on(q1), cirq.CNOT(control=q0, target=q1))\n",
+ " cirq.ry(b).on(q1), cirq.CNOT(q0, q1))\n",
"\n",
"SVGCircuit(circuit)"
]
diff --git a/docs/tutorials/mnist.ipynb b/docs/tutorials/mnist.ipynb
index 91405ed26..7efe3abe4 100644
--- a/docs/tutorials/mnist.ipynb
+++ b/docs/tutorials/mnist.ipynb
@@ -97,7 +97,7 @@
},
"outputs": [],
"source": [
- "!pip install tensorflow==2.7.0"
+ "!pip install tensorflow==2.15.0"
]
},
{
@@ -120,7 +120,7 @@
},
"outputs": [],
"source": [
- "!pip install tensorflow-quantum==0.7.2"
+ "!pip install tensorflow-quantum==0.7.3"
]
},
{
diff --git a/docs/tutorials/noise.ipynb b/docs/tutorials/noise.ipynb
index 0a0ebc290..4e40e72a3 100644
--- a/docs/tutorials/noise.ipynb
+++ b/docs/tutorials/noise.ipynb
@@ -83,7 +83,7 @@
},
"outputs": [],
"source": [
- "!pip install tensorflow==2.7.0 tensorflow-quantum==0.7.2"
+ "!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.3"
]
},
{
diff --git a/docs/tutorials/qcnn.ipynb b/docs/tutorials/qcnn.ipynb
index f53182701..7b566a5b0 100644
--- a/docs/tutorials/qcnn.ipynb
+++ b/docs/tutorials/qcnn.ipynb
@@ -105,7 +105,7 @@
},
"outputs": [],
"source": [
- "!pip install tensorflow==2.7.0"
+ "!pip install tensorflow==2.15.0"
]
},
{
@@ -131,7 +131,7 @@
},
"outputs": [],
"source": [
- "!pip install tensorflow-quantum==0.7.2"
+ "!pip install tensorflow-quantum==0.7.3"
]
},
{
@@ -554,7 +554,7 @@
" source_basis_selector = one_qubit_unitary(source_qubit, symbols[3:6])\n",
" pool_circuit.append(sink_basis_selector)\n",
" pool_circuit.append(source_basis_selector)\n",
- " pool_circuit.append(cirq.CNOT(control=source_qubit, target=sink_qubit))\n",
+ " pool_circuit.append(cirq.CNOT(source_qubit, sink_qubit))\n",
" pool_circuit.append(sink_basis_selector**-1)\n",
" return pool_circuit"
]
diff --git a/docs/tutorials/quantum_data.ipynb b/docs/tutorials/quantum_data.ipynb
index 9e78b9493..8877807dc 100644
--- a/docs/tutorials/quantum_data.ipynb
+++ b/docs/tutorials/quantum_data.ipynb
@@ -111,7 +111,7 @@
}
],
"source": [
- "!pip install tensorflow==2.7.0 tensorflow-quantum==0.7.2"
+ "!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.3"
]
},
{
diff --git a/docs/tutorials/quantum_reinforcement_learning.ipynb b/docs/tutorials/quantum_reinforcement_learning.ipynb
index fba0291e6..bc7644883 100644
--- a/docs/tutorials/quantum_reinforcement_learning.ipynb
+++ b/docs/tutorials/quantum_reinforcement_learning.ipynb
@@ -123,7 +123,7 @@
},
"outputs": [],
"source": [
- "!pip install tensorflow==2.7.0"
+ "!pip install tensorflow==2.15.0"
]
},
{
@@ -143,7 +143,7 @@
},
"outputs": [],
"source": [
- "!pip install tensorflow-quantum==0.7.2"
+ "!pip install tensorflow-quantum==0.7.3"
]
},
{
diff --git a/docs/tutorials/research_tools.ipynb b/docs/tutorials/research_tools.ipynb
index 538fcf46c..8f091f14c 100644
--- a/docs/tutorials/research_tools.ipynb
+++ b/docs/tutorials/research_tools.ipynb
@@ -83,7 +83,7 @@
},
"outputs": [],
"source": [
- "!pip install tensorflow==2.7.0 tensorflow-quantum==0.7.2 tensorboard_plugin_profile==2.4.0"
+ "!pip install tensorflow==2.15.0 tensorflow-quantum==0.7.3 tensorboard_plugin_profile==2.15.0"
]
},
{
@@ -155,8 +155,8 @@
"source": [
"def generate_circuit(qubits):\n",
" \"\"\"Generate a random circuit on qubits.\"\"\"\n",
- " random_circuit = cirq.generate_boixo_2018_supremacy_circuits_v2(\n",
- " qubits, cz_depth=2, seed=1234)\n",
+ " random_circuit = cirq.experiments.random_rotations_between_grid_interaction_layers_circuit(\n",
+ " qubits, depth=2)\n",
" return random_circuit\n",
"\n",
"def generate_data(circuit, n_samples):\n",
diff --git a/release/setup.py b/release/setup.py
index 24424b613..fd11b6fd0 100644
--- a/release/setup.py
+++ b/release/setup.py
@@ -50,16 +50,12 @@ def finalize_options(self):
self.install_lib = self.install_platlib
-REQUIRED_PACKAGES = [
- 'cirq-core==0.13.1', 'cirq-google>=0.13.1', 'sympy == 1.8',
- 'googleapis-common-protos==1.52.0', 'google-api-core==1.21.0',
- 'google-auth==1.18.0', 'protobuf==3.19.4'
-]
+REQUIRED_PACKAGES = ['cirq-core==1.3.0', 'cirq-google==1.3.0', 'sympy == 1.12']
# placed as extra to not have required overwrite existing nightly installs if
# they exist.
-EXTRA_PACKAGES = ['tensorflow == 2.11.0']
-CUR_VERSION = '0.7.3'
+EXTRA_PACKAGES = ['tensorflow == 2.15.0']
+CUR_VERSION = '0.7.4'
class BinaryDistribution(Distribution):
@@ -107,6 +103,8 @@ def has_ext_modules(self):
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
+ 'Programming Language :: Python :: 3.11',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
diff --git a/requirements.txt b/requirements.txt
index ae8700878..d0fc187eb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,15 +1,8 @@
-cirq-core==0.13.1
-cirq-google==0.13.1
-sympy==1.8
+cirq-core==1.3.0
+cirq-google==1.3.0
+sympy==1.12
numpy==1.24.2 # TensorFlow can detect if it was built against other versions.
nbformat==4.4.0
pylint==2.4.4
-yapf==0.28.0
-tensorflow==2.11.0
-# Needed for compatibility with cirq program protos.
-googleapis-common-protos==1.52.0
-google-api-core==1.21.0
-google-auth==1.18.0
-google-api-python-client==1.8.0
-grpcio==1.34.1
-protobuf==3.19.4
+yapf==0.40.2
+tensorflow==2.15.0
diff --git a/scripts/ci_install.sh b/scripts/ci_install.sh
index 04e6b3159..28c58c2d7 100755
--- a/scripts/ci_install.sh
+++ b/scripts/ci_install.sh
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-wget https://github.com/bazelbuild/bazel/releases/download/5.3.0/bazel_5.3.0-linux-x86_64.deb
-sudo dpkg -i bazel_5.3.0-linux-x86_64.deb
+wget https://github.com/bazelbuild/bazel/releases/download/6.5.0/bazel_6.5.0-linux-x86_64.deb
+sudo dpkg -i bazel_6.5.0-linux-x86_64.deb
pip install --upgrade pip setuptools wheel
pip install -r requirements.txt
\ No newline at end of file
diff --git a/scripts/test_all.sh b/scripts/test_all.sh
index 7a9fc7824..5d5405fac 100755
--- a/scripts/test_all.sh
+++ b/scripts/test_all.sh
@@ -14,7 +14,7 @@
# limitations under the License.
# ==============================================================================
echo "Testing All Bazel py_test and cc_tests.";
-test_outputs=$(bazel test -c opt --experimental_repo_remote_exec --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" --cxxopt="-std=c++17" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --notest_keep_going --test_output=errors //tensorflow_quantum/...)
+test_outputs=$(bazel test -c opt --experimental_repo_remote_exec --test_output=errors --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" --cxxopt="-std=c++17" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" //tensorflow_quantum/...)
exit_code=$?
if [ "$exit_code" == "0" ]; then
echo "Testing Complete!";
diff --git a/tensorflow_quantum/core/ops/BUILD b/tensorflow_quantum/core/ops/BUILD
index 8087a8d3b..504cc2657 100644
--- a/tensorflow_quantum/core/ops/BUILD
+++ b/tensorflow_quantum/core/ops/BUILD
@@ -7,10 +7,10 @@ licenses(["notice"])
# Export for the PIP package.
exports_files(["__init__.py"])
-config_setting(
- name = "windows",
- constraint_values = ["@bazel_tools//platforms:windows"],
-)
+# config_setting(
+# name = "windows",
+# constraint_values = ["@bazel_tools//platforms:windows"],
+# )
py_library(
name = "ops",
@@ -38,45 +38,45 @@ cc_binary(
srcs = [
"tfq_adj_grad_op.cc",
],
- copts = select({
- ":windows": [
- "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
- "/D_USE_MATH_DEFINES",
- "/DEIGEN_MPL2_ONLY",
- "/DEIGEN_MAX_ALIGN_BYTES=64",
- "/DEIGEN_HAS_TYPE_TRAITS=0",
- "/DTF_USE_SNAPPY",
- "/showIncludes",
- "/MD",
- "/O2",
- "/DNDEBUG",
- "/w",
- "-DWIN32_LEAN_AND_MEAN",
- "-DNOGDI",
- "/d2ReducedOptimizeHugeFunctions",
- "/arch:AVX",
- "/std:c++17",
- "-DTENSORFLOW_MONOLITHIC_BUILD",
- "/DPLATFORM_WINDOWS",
- "/DEIGEN_HAS_C99_MATH",
- "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
- "/DEIGEN_AVOID_STL_ARRAY",
- "/Iexternal/gemmlowp",
- "/wd4018",
- "/wd4577",
- "/DNOGDI",
- "/UTF_COMPILE_LIBRARY",
- ],
- "//conditions:default": [
- "-pthread",
- "-std=c++17",
- "-D_GLIBCXX_USE_CXX11_ABI=1",
- ],
- }),
- features = select({
- ":windows": ["windows_export_all_symbols"],
- "//conditions:default": [],
- }),
+ # copts = select({
+ # ":windows": [
+ # "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
+ # "/D_USE_MATH_DEFINES",
+ # "/DEIGEN_MPL2_ONLY",
+ # "/DEIGEN_MAX_ALIGN_BYTES=64",
+ # "/DEIGEN_HAS_TYPE_TRAITS=0",
+ # "/DTF_USE_SNAPPY",
+ # "/showIncludes",
+ # "/MD",
+ # "/O2",
+ # "/DNDEBUG",
+ # "/w",
+ # "-DWIN32_LEAN_AND_MEAN",
+ # "-DNOGDI",
+ # "/d2ReducedOptimizeHugeFunctions",
+ # "/arch:AVX",
+ # "/std:c++17",
+ # "-DTENSORFLOW_MONOLITHIC_BUILD",
+ # "/DPLATFORM_WINDOWS",
+ # "/DEIGEN_HAS_C99_MATH",
+ # "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
+ # "/DEIGEN_AVOID_STL_ARRAY",
+ # "/Iexternal/gemmlowp",
+ # "/wd4018",
+ # "/wd4577",
+ # "/DNOGDI",
+ # "/UTF_COMPILE_LIBRARY",
+ # ],
+ # "//conditions:default": [
+ # "-pthread",
+ # "-std=c++17",
+ # "-D_GLIBCXX_USE_CXX11_ABI=1",
+ # ],
+ # }),
+ # features = select({
+ # ":windows": ["windows_export_all_symbols"],
+ # "//conditions:default": [],
+ # }),
linkshared = 1,
deps = [
":parse_context",
@@ -101,45 +101,45 @@ cc_binary(
"tfq_ps_symbol_replace_op.cc",
"tfq_ps_weights_from_symbols_op.cc",
],
- copts = select({
- ":windows": [
- "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
- "/D_USE_MATH_DEFINES",
- "/DEIGEN_MPL2_ONLY",
- "/DEIGEN_MAX_ALIGN_BYTES=64",
- "/DEIGEN_HAS_TYPE_TRAITS=0",
- "/DTF_USE_SNAPPY",
- "/showIncludes",
- "/MD",
- "/O2",
- "/DNDEBUG",
- "/w",
- "-DWIN32_LEAN_AND_MEAN",
- "-DNOGDI",
- "/d2ReducedOptimizeHugeFunctions",
- "/arch:AVX",
- "/std:c++17",
- "-DTENSORFLOW_MONOLITHIC_BUILD",
- "/DPLATFORM_WINDOWS",
- "/DEIGEN_HAS_C99_MATH",
- "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
- "/DEIGEN_AVOID_STL_ARRAY",
- "/Iexternal/gemmlowp",
- "/wd4018",
- "/wd4577",
- "/DNOGDI",
- "/UTF_COMPILE_LIBRARY",
- ],
- "//conditions:default": [
- "-pthread",
- "-std=c++17",
- "-D_GLIBCXX_USE_CXX11_ABI=1",
- ],
- }),
- features = select({
- ":windows": ["windows_export_all_symbols"],
- "//conditions:default": [],
- }),
+ # copts = select({
+ # ":windows": [
+ # "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
+ # "/D_USE_MATH_DEFINES",
+ # "/DEIGEN_MPL2_ONLY",
+ # "/DEIGEN_MAX_ALIGN_BYTES=64",
+ # "/DEIGEN_HAS_TYPE_TRAITS=0",
+ # "/DTF_USE_SNAPPY",
+ # "/showIncludes",
+ # "/MD",
+ # "/O2",
+ # "/DNDEBUG",
+ # "/w",
+ # "-DWIN32_LEAN_AND_MEAN",
+ # "-DNOGDI",
+ # "/d2ReducedOptimizeHugeFunctions",
+ # "/arch:AVX",
+ # "/std:c++17",
+ # "-DTENSORFLOW_MONOLITHIC_BUILD",
+ # "/DPLATFORM_WINDOWS",
+ # "/DEIGEN_HAS_C99_MATH",
+ # "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
+ # "/DEIGEN_AVOID_STL_ARRAY",
+ # "/Iexternal/gemmlowp",
+ # "/wd4018",
+ # "/wd4577",
+ # "/DNOGDI",
+ # "/UTF_COMPILE_LIBRARY",
+ # ],
+ # "//conditions:default": [
+ # "-pthread",
+ # "-std=c++17",
+ # "-D_GLIBCXX_USE_CXX11_ABI=1",
+ # ],
+ # }),
+ # features = select({
+ # ":windows": ["windows_export_all_symbols"],
+ # "//conditions:default": [],
+ # }),
linkshared = 1,
deps = [
":parse_context",
@@ -159,45 +159,45 @@ cc_binary(
"tfq_simulate_samples_op.cc",
"tfq_simulate_state_op.cc",
],
- copts = select({
- ":windows": [
- "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
- "/D_USE_MATH_DEFINES",
- "/DEIGEN_MPL2_ONLY",
- "/DEIGEN_MAX_ALIGN_BYTES=64",
- "/DEIGEN_HAS_TYPE_TRAITS=0",
- "/DTF_USE_SNAPPY",
- "/showIncludes",
- "/MD",
- "/O2",
- "/DNDEBUG",
- "/w",
- "-DWIN32_LEAN_AND_MEAN",
- "-DNOGDI",
- "/d2ReducedOptimizeHugeFunctions",
- "/arch:AVX",
- "/std:c++17",
- "-DTENSORFLOW_MONOLITHIC_BUILD",
- "/DPLATFORM_WINDOWS",
- "/DEIGEN_HAS_C99_MATH",
- "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
- "/DEIGEN_AVOID_STL_ARRAY",
- "/Iexternal/gemmlowp",
- "/wd4018",
- "/wd4577",
- "/DNOGDI",
- "/UTF_COMPILE_LIBRARY",
- ],
- "//conditions:default": [
- "-pthread",
- "-std=c++17",
- "-D_GLIBCXX_USE_CXX11_ABI=1",
- ],
- }),
- features = select({
- ":windows": ["windows_export_all_symbols"],
- "//conditions:default": [],
- }),
+ # copts = select({
+ # ":windows": [
+ # "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
+ # "/D_USE_MATH_DEFINES",
+ # "/DEIGEN_MPL2_ONLY",
+ # "/DEIGEN_MAX_ALIGN_BYTES=64",
+ # "/DEIGEN_HAS_TYPE_TRAITS=0",
+ # "/DTF_USE_SNAPPY",
+ # "/showIncludes",
+ # "/MD",
+ # "/O2",
+ # "/DNDEBUG",
+ # "/w",
+ # "-DWIN32_LEAN_AND_MEAN",
+ # "-DNOGDI",
+ # "/d2ReducedOptimizeHugeFunctions",
+ # "/arch:AVX",
+ # "/std:c++17",
+ # "-DTENSORFLOW_MONOLITHIC_BUILD",
+ # "/DPLATFORM_WINDOWS",
+ # "/DEIGEN_HAS_C99_MATH",
+ # "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
+ # "/DEIGEN_AVOID_STL_ARRAY",
+ # "/Iexternal/gemmlowp",
+ # "/wd4018",
+ # "/wd4577",
+ # "/DNOGDI",
+ # "/UTF_COMPILE_LIBRARY",
+ # ],
+ # "//conditions:default": [
+ # "-pthread",
+ # "-std=c++17",
+ # "-D_GLIBCXX_USE_CXX11_ABI=1",
+ # ],
+ # }),
+ # features = select({
+ # ":windows": ["windows_export_all_symbols"],
+ # "//conditions:default": [],
+ # }),
linkshared = 1,
deps = [
":parse_context",
@@ -225,45 +225,45 @@ cc_binary(
"tfq_circuit_append_op.cc",
"tfq_resolve_parameters_op.cc",
],
- copts = select({
- ":windows": [
- "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
- "/D_USE_MATH_DEFINES",
- "/DEIGEN_MPL2_ONLY",
- "/DEIGEN_MAX_ALIGN_BYTES=64",
- "/DEIGEN_HAS_TYPE_TRAITS=0",
- "/DTF_USE_SNAPPY",
- "/showIncludes",
- "/MD",
- "/O2",
- "/DNDEBUG",
- "/w",
- "-DWIN32_LEAN_AND_MEAN",
- "-DNOGDI",
- "/d2ReducedOptimizeHugeFunctions",
- "/arch:AVX",
- "/std:c++17",
- "-DTENSORFLOW_MONOLITHIC_BUILD",
- "/DPLATFORM_WINDOWS",
- "/DEIGEN_HAS_C99_MATH",
- "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
- "/DEIGEN_AVOID_STL_ARRAY",
- "/Iexternal/gemmlowp",
- "/wd4018",
- "/wd4577",
- "/DNOGDI",
- "/UTF_COMPILE_LIBRARY",
- ],
- "//conditions:default": [
- "-pthread",
- "-std=c++17",
- "-D_GLIBCXX_USE_CXX11_ABI=1",
- ],
- }),
- features = select({
- ":windows": ["windows_export_all_symbols"],
- "//conditions:default": [],
- }),
+ # copts = select({
+ # ":windows": [
+ # "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
+ # "/D_USE_MATH_DEFINES",
+ # "/DEIGEN_MPL2_ONLY",
+ # "/DEIGEN_MAX_ALIGN_BYTES=64",
+ # "/DEIGEN_HAS_TYPE_TRAITS=0",
+ # "/DTF_USE_SNAPPY",
+ # "/showIncludes",
+ # "/MD",
+ # "/O2",
+ # "/DNDEBUG",
+ # "/w",
+ # "-DWIN32_LEAN_AND_MEAN",
+ # "-DNOGDI",
+ # "/d2ReducedOptimizeHugeFunctions",
+ # "/arch:AVX",
+ # "/std:c++17",
+ # "-DTENSORFLOW_MONOLITHIC_BUILD",
+ # "/DPLATFORM_WINDOWS",
+ # "/DEIGEN_HAS_C99_MATH",
+ # "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
+ # "/DEIGEN_AVOID_STL_ARRAY",
+ # "/Iexternal/gemmlowp",
+ # "/wd4018",
+ # "/wd4577",
+ # "/DNOGDI",
+ # "/UTF_COMPILE_LIBRARY",
+ # ],
+ # "//conditions:default": [
+ # "-pthread",
+ # "-std=c++17",
+ # "-D_GLIBCXX_USE_CXX11_ABI=1",
+ # ],
+ # }),
+ # features = select({
+ # ":windows": ["windows_export_all_symbols"],
+ # "//conditions:default": [],
+ # }),
linkshared = 1,
deps = [
":parse_context",
@@ -273,8 +273,10 @@ cc_binary(
"//tensorflow_quantum/core/src:program_resolution",
"@com_google_absl//absl/container:flat_hash_map",
"@com_google_absl//absl/container:inlined_vector",
+ "@com_google_absl//absl/functional:any_invocable",
"@com_google_absl//absl/types:optional",
"@com_google_absl//absl/types:span",
+ "@com_google_absl//absl/status:statusor",
"@local_config_tf//:libtensorflow_framework",
"@local_config_tf//:tf_header_lib",
],
@@ -284,41 +286,41 @@ cc_library(
name = "parse_context",
srcs = ["parse_context.cc"],
hdrs = ["parse_context.h"],
- copts = select({
- ":windows": [
- "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
- "/D_USE_MATH_DEFINES",
- "/DEIGEN_MPL2_ONLY",
- "/DEIGEN_MAX_ALIGN_BYTES=64",
- "/DEIGEN_HAS_TYPE_TRAITS=0",
- "/DTF_USE_SNAPPY",
- "/showIncludes",
- "/MD",
- "/O2",
- "/DNDEBUG",
- "/w",
- "-DWIN32_LEAN_AND_MEAN",
- "-DNOGDI",
- "/d2ReducedOptimizeHugeFunctions",
- "/arch:AVX",
- "/std:c++17",
- "-DTENSORFLOW_MONOLITHIC_BUILD",
- "/DPLATFORM_WINDOWS",
- "/DEIGEN_HAS_C99_MATH",
- "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
- "/DEIGEN_AVOID_STL_ARRAY",
- "/Iexternal/gemmlowp",
- "/wd4018",
- "/wd4577",
- "/DNOGDI",
- "/UTF_COMPILE_LIBRARY",
- ],
- "//conditions:default": [
- "-pthread",
- "-std=c++17",
- "-D_GLIBCXX_USE_CXX11_ABI=1",
- ],
- }),
+ # copts = select({
+ # ":windows": [
+ # "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
+ # "/D_USE_MATH_DEFINES",
+ # "/DEIGEN_MPL2_ONLY",
+ # "/DEIGEN_MAX_ALIGN_BYTES=64",
+ # "/DEIGEN_HAS_TYPE_TRAITS=0",
+ # "/DTF_USE_SNAPPY",
+ # "/showIncludes",
+ # "/MD",
+ # "/O2",
+ # "/DNDEBUG",
+ # "/w",
+ # "-DWIN32_LEAN_AND_MEAN",
+ # "-DNOGDI",
+ # "/d2ReducedOptimizeHugeFunctions",
+ # "/arch:AVX",
+ # "/std:c++17",
+ # "-DTENSORFLOW_MONOLITHIC_BUILD",
+ # "/DPLATFORM_WINDOWS",
+ # "/DEIGEN_HAS_C99_MATH",
+ # "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
+ # "/DEIGEN_AVOID_STL_ARRAY",
+ # "/Iexternal/gemmlowp",
+ # "/wd4018",
+ # "/wd4577",
+ # "/DNOGDI",
+ # "/UTF_COMPILE_LIBRARY",
+ # ],
+ # "//conditions:default": [
+ # "-pthread",
+ # "-std=c++17",
+ # "-D_GLIBCXX_USE_CXX11_ABI=1",
+ # ],
+ # }),
deps = [
":tfq_simulate_utils",
"//tensorflow_quantum/core/proto:pauli_sum_cc_proto",
@@ -327,6 +329,8 @@ cc_library(
"//tensorflow_quantum/core/src:program_resolution",
"@com_google_absl//absl/container:flat_hash_map",
"@com_google_absl//absl/container:inlined_vector",
+ "@com_google_absl//absl/functional:any_invocable",
+ "@com_google_absl//absl/status:statusor",
"@local_config_tf//:libtensorflow_framework",
"@local_config_tf//:tf_header_lib",
],
@@ -337,45 +341,45 @@ cc_binary(
srcs = [
"tfq_calculate_unitary_op.cc",
],
- copts = select({
- ":windows": [
- "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
- "/D_USE_MATH_DEFINES",
- "/DEIGEN_MPL2_ONLY",
- "/DEIGEN_MAX_ALIGN_BYTES=64",
- "/DEIGEN_HAS_TYPE_TRAITS=0",
- "/DTF_USE_SNAPPY",
- "/showIncludes",
- "/MD",
- "/O2",
- "/DNDEBUG",
- "/w",
- "-DWIN32_LEAN_AND_MEAN",
- "-DNOGDI",
- "/d2ReducedOptimizeHugeFunctions",
- "/arch:AVX",
- "/std:c++17",
- "-DTENSORFLOW_MONOLITHIC_BUILD",
- "/DPLATFORM_WINDOWS",
- "/DEIGEN_HAS_C99_MATH",
- "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
- "/DEIGEN_AVOID_STL_ARRAY",
- "/Iexternal/gemmlowp",
- "/wd4018",
- "/wd4577",
- "/DNOGDI",
- "/UTF_COMPILE_LIBRARY",
- ],
- "//conditions:default": [
- "-pthread",
- "-std=c++17",
- "-D_GLIBCXX_USE_CXX11_ABI=1",
- ],
- }),
- features = select({
- ":windows": ["windows_export_all_symbols"],
- "//conditions:default": [],
- }),
+ # copts = select({
+ # ":windows": [
+ # "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
+ # "/D_USE_MATH_DEFINES",
+ # "/DEIGEN_MPL2_ONLY",
+ # "/DEIGEN_MAX_ALIGN_BYTES=64",
+ # "/DEIGEN_HAS_TYPE_TRAITS=0",
+ # "/DTF_USE_SNAPPY",
+ # "/showIncludes",
+ # "/MD",
+ # "/O2",
+ # "/DNDEBUG",
+ # "/w",
+ # "-DWIN32_LEAN_AND_MEAN",
+ # "-DNOGDI",
+ # "/d2ReducedOptimizeHugeFunctions",
+ # "/arch:AVX",
+ # "/std:c++17",
+ # "-DTENSORFLOW_MONOLITHIC_BUILD",
+ # "/DPLATFORM_WINDOWS",
+ # "/DEIGEN_HAS_C99_MATH",
+ # "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
+ # "/DEIGEN_AVOID_STL_ARRAY",
+ # "/Iexternal/gemmlowp",
+ # "/wd4018",
+ # "/wd4577",
+ # "/DNOGDI",
+ # "/UTF_COMPILE_LIBRARY",
+ # ],
+ # "//conditions:default": [
+ # "-pthread",
+ # "-std=c++17",
+ # "-D_GLIBCXX_USE_CXX11_ABI=1",
+ # ],
+ # }),
+ # features = select({
+ # ":windows": ["windows_export_all_symbols"],
+ # "//conditions:default": [],
+ # }),
linkshared = 1,
deps = [
":parse_context",
@@ -387,6 +391,7 @@ cc_binary(
"//tensorflow_quantum/core/src:util_qsim",
"@com_google_absl//absl/container:flat_hash_map",
"@com_google_absl//absl/container:inlined_vector",
+ "@com_google_absl//absl/functional:any_invocable",
"@com_google_absl//absl/types:optional",
"@com_google_absl//absl/types:span",
"@local_config_tf//:libtensorflow_framework",
@@ -399,41 +404,41 @@ cc_library(
name = "tfq_simulate_utils",
srcs = ["tfq_simulate_utils.cc"],
hdrs = ["tfq_simulate_utils.h"],
- copts = select({
- ":windows": [
- "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
- "/D_USE_MATH_DEFINES",
- "/DEIGEN_MPL2_ONLY",
- "/DEIGEN_MAX_ALIGN_BYTES=64",
- "/DEIGEN_HAS_TYPE_TRAITS=0",
- "/DTF_USE_SNAPPY",
- "/showIncludes",
- "/MD",
- "/O2",
- "/DNDEBUG",
- "/w",
- "-DWIN32_LEAN_AND_MEAN",
- "-DNOGDI",
- "/d2ReducedOptimizeHugeFunctions",
- "/arch:AVX",
- "/std:c++17",
- "-DTENSORFLOW_MONOLITHIC_BUILD",
- "/DPLATFORM_WINDOWS",
- "/DEIGEN_HAS_C99_MATH",
- "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
- "/DEIGEN_AVOID_STL_ARRAY",
- "/Iexternal/gemmlowp",
- "/wd4018",
- "/wd4577",
- "/DNOGDI",
- "/UTF_COMPILE_LIBRARY",
- ],
- "//conditions:default": [
- "-pthread",
- "-std=c++17",
- "-D_GLIBCXX_USE_CXX11_ABI=1",
- ],
- }),
+ # copts = select({
+ # ":windows": [
+ # "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
+ # "/D_USE_MATH_DEFINES",
+ # "/DEIGEN_MPL2_ONLY",
+ # "/DEIGEN_MAX_ALIGN_BYTES=64",
+ # "/DEIGEN_HAS_TYPE_TRAITS=0",
+ # "/DTF_USE_SNAPPY",
+ # "/showIncludes",
+ # "/MD",
+ # "/O2",
+ # "/DNDEBUG",
+ # "/w",
+ # "-DWIN32_LEAN_AND_MEAN",
+ # "-DNOGDI",
+ # "/d2ReducedOptimizeHugeFunctions",
+ # "/arch:AVX",
+ # "/std:c++17",
+ # "-DTENSORFLOW_MONOLITHIC_BUILD",
+ # "/DPLATFORM_WINDOWS",
+ # "/DEIGEN_HAS_C99_MATH",
+ # "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
+ # "/DEIGEN_AVOID_STL_ARRAY",
+ # "/Iexternal/gemmlowp",
+ # "/wd4018",
+ # "/wd4577",
+ # "/DNOGDI",
+ # "/UTF_COMPILE_LIBRARY",
+ # ],
+ # "//conditions:default": [
+ # "-pthread",
+ # "-std=c++17",
+ # "-D_GLIBCXX_USE_CXX11_ABI=1",
+ # ],
+ # }),
deps = [
"@local_config_tf//:libtensorflow_framework",
"@local_config_tf//:tf_header_lib",
diff --git a/tensorflow_quantum/core/ops/batch_util_test.py b/tensorflow_quantum/core/ops/batch_util_test.py
index 6b11becd7..074b01fcb 100644
--- a/tensorflow_quantum/core/ops/batch_util_test.py
+++ b/tensorflow_quantum/core/ops/batch_util_test.py
@@ -16,6 +16,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
diff --git a/tensorflow_quantum/core/ops/circuit_execution_ops.py b/tensorflow_quantum/core/ops/circuit_execution_ops.py
index 1158e98e4..d82a1c83d 100644
--- a/tensorflow_quantum/core/ops/circuit_execution_ops.py
+++ b/tensorflow_quantum/core/ops/circuit_execution_ops.py
@@ -24,10 +24,12 @@
class TFQStateVectorSimulator(enum.Enum):
"""Enum to make specifying TFQ simulators user-friendly."""
+ # pylint: disable=invalid-name
expectation = tfq_simulate_ops.tfq_simulate_expectation
samples = tfq_simulate_ops.tfq_simulate_samples
state = tfq_simulate_ops.tfq_simulate_state
sampled_expectation = tfq_simulate_ops.tfq_simulate_sampled_expectation
+ # pylint: enable=invalid-name
def _check_quantum_concurrent(quantum_concurrent):
@@ -37,9 +39,9 @@ def _check_quantum_concurrent(quantum_concurrent):
def get_expectation_op(
- backend=None,
- *,
- quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
+ backend=None,
+ *,
+ quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
"""Get a TensorFlow op that will calculate batches of expectation values.
This function produces a non-differentiable TF op that will calculate
@@ -150,9 +152,9 @@ def get_expectation_op(
def get_sampling_op(
- backend=None,
- *,
- quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
+ backend=None,
+ *,
+ quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
"""Get a Tensorflow op that produces samples from given quantum circuits.
This function produces a non-differentiable op that will calculate
@@ -242,9 +244,9 @@ def get_sampling_op(
def get_state_op(
- backend=None,
- *,
- quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
+ backend=None,
+ *,
+ quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
"""Get a TensorFlow op that produces states from given quantum circuits.
This function produces a non-differentiable op that will calculate
@@ -332,9 +334,9 @@ def get_state_op(
def get_sampled_expectation_op(
- backend=None,
- *,
- quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
+ backend=None,
+ *,
+ quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
"""Get a TensorFlow op that will calculate sampled expectation values.
This function produces a non-differentiable TF op that will calculate
diff --git a/tensorflow_quantum/core/ops/circuit_execution_ops_test.py b/tensorflow_quantum/core/ops/circuit_execution_ops_test.py
index f94297cdc..f89349d70 100644
--- a/tensorflow_quantum/core/ops/circuit_execution_ops_test.py
+++ b/tensorflow_quantum/core/ops/circuit_execution_ops_test.py
@@ -16,17 +16,16 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
-from unittest import mock
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from scipy import stats
import cirq
-import cirq_google
from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops
from tensorflow_quantum.python import util
@@ -95,13 +94,6 @@ def test_get_expectation_inputs(self):
circuit_execution_ops.get_expectation_op(
backend=cirq.DensityMatrixSimulator())
circuit_execution_ops.get_expectation_op()
- with self.assertRaisesRegex(NotImplementedError,
- expected_regex='Sample-based'):
- mock_engine = mock.Mock()
- circuit_execution_ops.get_expectation_op(
- cirq_google.QuantumEngineSampler(engine=mock_engine,
- processor_id='test',
- gate_set=cirq_google.XMON))
with self.assertRaisesRegex(
TypeError,
expected_regex="cirq.sim.simulator.SimulatesExpectationValues"):
@@ -118,11 +110,6 @@ def test_get_sampled_expectation_inputs(self):
backend=cirq.Simulator())
circuit_execution_ops.get_sampled_expectation_op(
backend=cirq.DensityMatrixSimulator())
- mock_engine = mock.Mock()
- circuit_execution_ops.get_sampled_expectation_op(
- cirq_google.QuantumEngineSampler(engine=mock_engine,
- processor_id='test',
- gate_set=cirq_google.XMON))
with self.assertRaisesRegex(TypeError, expected_regex="a Cirq.Sampler"):
circuit_execution_ops.get_sampled_expectation_op(backend="junk")
@@ -137,11 +124,6 @@ def test_get_samples_inputs(self):
circuit_execution_ops.get_sampling_op(backend=cirq.Simulator())
circuit_execution_ops.get_sampling_op(
backend=cirq.DensityMatrixSimulator())
- mock_engine = mock.Mock()
- circuit_execution_ops.get_sampling_op(
- backend=cirq_google.QuantumEngineSampler(engine=mock_engine,
- processor_id='test',
- gate_set=cirq_google.XMON))
with self.assertRaisesRegex(TypeError,
expected_regex="Expected a Cirq.Sampler"):
circuit_execution_ops.get_sampling_op(backend="junk")
@@ -159,15 +141,6 @@ def test_get_state_inputs(self):
with self.assertRaisesRegex(TypeError,
expected_regex="Cirq.SimulatesFinalState"):
circuit_execution_ops.get_state_op(backend="junk")
- with self.assertRaisesRegex(TypeError,
- expected_regex="Cirq.SimulatesFinalState"):
- mock_engine = mock.Mock()
- circuit_execution_ops.get_state_op(
- backend=cirq_google.QuantumEngineSampler(
- engine=mock_engine,
- processor_id='test',
- gate_set=cirq_google.XMON))
-
with self.assertRaisesRegex(TypeError,
expected_regex="must be type bool."):
circuit_execution_ops.get_state_op(quantum_concurrent='junk')
diff --git a/tensorflow_quantum/core/ops/cirq_ops.py b/tensorflow_quantum/core/ops/cirq_ops.py
index 2650e812b..472f9173b 100644
--- a/tensorflow_quantum/core/ops/cirq_ops.py
+++ b/tensorflow_quantum/core/ops/cirq_ops.py
@@ -19,7 +19,6 @@
import numpy as np
import tensorflow as tf
import cirq
-import cirq_google
from tensorflow_quantum.core.ops import batch_util
from tensorflow_quantum.core.proto import pauli_sum_pb2
@@ -472,7 +471,6 @@ def _no_grad(grad):
if not isinstance(num_samples.dtype.as_numpy_dtype(), numbers.Integral):
raise TypeError("num_samples tensor must be of integer type")
- serialized_programs = programs
programs, resolvers = _batch_deserialize_helper(programs, symbol_names,
symbol_values)
@@ -491,49 +489,12 @@ def _no_grad(grad):
]
max_n_qubits = max(len(p.all_qubits()) for p in programs)
- if isinstance(sampler, cirq_google.QuantumEngineSampler):
- # group samples from identical circuits to reduce communication
- # overhead. Have to keep track of the order in which things came
- # in to make sure the output is ordered correctly
- to_be_grouped = [
- (ser_prog.numpy(), resolver, index)
- for index, (
- ser_prog,
- resolver) in enumerate(zip(serialized_programs, resolvers))
- ]
-
- grouped = _group_tuples(to_be_grouped)
-
- # start all the necessary jobs
- results_mapping = {}
- for key, value in grouped.items():
- program = programs[value[0][1]]
- resolvers = [x[0] for x in value]
- orders = [x[1] for x in value]
-
- # sampler.run_sweep blocks until results are in, so go around it
- result = sampler._engine.run_sweep(
- program=program,
- params=resolvers,
- repetitions=num_samples,
- processor_ids=sampler._processor_ids,
- gate_set=sampler._gate_set)
- results_mapping[result] = orders
-
- # get all results
- cirq_results = [None] * len(programs)
- for key, value in results_mapping.items():
- this_results = key.results()
- for result, index in zip(this_results, value):
- cirq_results[index] = result
-
- else:
- # All other cirq.Samplers handled here.
- cirq_results = []
- for results in sampler.run_batch(programs,
- params_list=resolvers,
- repetitions=num_samples):
- cirq_results.extend(results)
+ # All other cirq.Samplers handled here.
+ cirq_results = []
+ for results in sampler.run_batch(programs,
+ params_list=resolvers,
+ repetitions=num_samples):
+ cirq_results.extend(results)
results = []
for r in cirq_results:
diff --git a/tensorflow_quantum/core/ops/cirq_ops_test.py b/tensorflow_quantum/core/ops/cirq_ops_test.py
index 1b54771a9..0d87855d2 100644
--- a/tensorflow_quantum/core/ops/cirq_ops_test.py
+++ b/tensorflow_quantum/core/ops/cirq_ops_test.py
@@ -16,16 +16,15 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
-from unittest import mock
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
import cirq
-import cirq_google
from tensorflow_quantum.core.ops import cirq_ops
from tensorflow_quantum.core.serialize import serializer
@@ -348,11 +347,6 @@ def test_get_cirq_sampling_op(self):
cirq_ops._get_cirq_samples()
cirq_ops._get_cirq_samples(cirq.Simulator())
cirq_ops._get_cirq_samples(cirq.DensityMatrixSimulator())
- mock_engine = mock.Mock()
- cirq_ops._get_cirq_samples(
- cirq_google.QuantumEngineSampler(engine=mock_engine,
- processor_id='test',
- gate_set=cirq_google.XMON))
def test_cirq_sampling_op_inputs(self):
"""test input checking in the cirq sampling op."""
@@ -451,7 +445,7 @@ class DummySampler(cirq.Sampler):
def run_sweep(self, program, params, repetitions):
"""Returns all ones in the correct sample shape."""
return [
- cirq.Result(
+ cirq.ResultDict(
params=param,
measurements={
'tfq':
diff --git a/tensorflow_quantum/core/ops/math_ops/BUILD b/tensorflow_quantum/core/ops/math_ops/BUILD
index 6eb8a0320..3c390f8e3 100644
--- a/tensorflow_quantum/core/ops/math_ops/BUILD
+++ b/tensorflow_quantum/core/ops/math_ops/BUILD
@@ -7,10 +7,10 @@ licenses(["notice"])
# Export for the PIP package.
exports_files(["__init__.py"])
-config_setting(
- name = "windows",
- constraint_values = ["@bazel_tools//platforms:windows"],
-)
+# config_setting(
+# name = "windows",
+# constraint_values = ["@bazel_tools//platforms:windows"],
+# )
cc_binary(
name = "_tfq_math_ops.so",
@@ -21,45 +21,45 @@ cc_binary(
"tfq_simulate_1d_samples.cc",
"tfq_simulate_1d_sampled_expectation.cc",
],
- copts = select({
- ":windows": [
- "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
- "/D_USE_MATH_DEFINES",
- "/DEIGEN_MPL2_ONLY",
- "/DEIGEN_MAX_ALIGN_BYTES=64",
- "/DEIGEN_HAS_TYPE_TRAITS=0",
- "/DTF_USE_SNAPPY",
- "/showIncludes",
- "/MD",
- "/O2",
- "/DNDEBUG",
- "/w",
- "-DWIN32_LEAN_AND_MEAN",
- "-DNOGDI",
- "/d2ReducedOptimizeHugeFunctions",
- "/arch:AVX",
- "/std:c++17",
- "-DTENSORFLOW_MONOLITHIC_BUILD",
- "/DPLATFORM_WINDOWS",
- "/DEIGEN_HAS_C99_MATH",
- "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
- "/DEIGEN_AVOID_STL_ARRAY",
- "/Iexternal/gemmlowp",
- "/wd4018",
- "/wd4577",
- "/DNOGDI",
- "/UTF_COMPILE_LIBRARY",
- ],
- "//conditions:default": [
- "-pthread",
- "-std=c++17",
- "-D_GLIBCXX_USE_CXX11_ABI=1",
- ],
- }),
- features = select({
- ":windows": ["windows_export_all_symbols"],
- "//conditions:default": [],
- }),
+ # copts = select({
+ # ":windows": [
+ # "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
+ # "/D_USE_MATH_DEFINES",
+ # "/DEIGEN_MPL2_ONLY",
+ # "/DEIGEN_MAX_ALIGN_BYTES=64",
+ # "/DEIGEN_HAS_TYPE_TRAITS=0",
+ # "/DTF_USE_SNAPPY",
+ # "/showIncludes",
+ # "/MD",
+ # "/O2",
+ # "/DNDEBUG",
+ # "/w",
+ # "-DWIN32_LEAN_AND_MEAN",
+ # "-DNOGDI",
+ # "/d2ReducedOptimizeHugeFunctions",
+ # "/arch:AVX",
+ # "/std:c++17",
+ # "-DTENSORFLOW_MONOLITHIC_BUILD",
+ # "/DPLATFORM_WINDOWS",
+ # "/DEIGEN_HAS_C99_MATH",
+ # "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
+ # "/DEIGEN_AVOID_STL_ARRAY",
+ # "/Iexternal/gemmlowp",
+ # "/wd4018",
+ # "/wd4577",
+ # "/DNOGDI",
+ # "/UTF_COMPILE_LIBRARY",
+ # ],
+ # "//conditions:default": [
+ # "-pthread",
+ # "-std=c++17",
+ # "-D_GLIBCXX_USE_CXX11_ABI=1",
+ # ],
+ # }),
+ # features = select({
+ # ":windows": ["windows_export_all_symbols"],
+ # "//conditions:default": [],
+ # }),
linkshared = 1,
deps = [
# cirq cc proto
@@ -68,10 +68,12 @@ cc_binary(
"//tensorflow_quantum/core/src:adj_util",
"//tensorflow_quantum/core/src:circuit_parser_qsim",
"//tensorflow_quantum/core/src:util_qsim",
+ "@com_google_absl//absl/functional:any_invocable",
"@qsim//lib:mps_simulator",
"@qsim//lib:mps_statespace",
"@qsim//lib:qsim_lib",
- "@eigen//:eigen3",
+
+ # "@eigen//:eigen3",
# tensorflow core framework
# tensorflow core lib
# tensorflow core protos
diff --git a/tensorflow_quantum/core/ops/math_ops/fidelity_op_test.py b/tensorflow_quantum/core/ops/math_ops/fidelity_op_test.py
index 1ab8031ea..9d45d013a 100644
--- a/tensorflow_quantum/core/ops/math_ops/fidelity_op_test.py
+++ b/tensorflow_quantum/core/ops/math_ops/fidelity_op_test.py
@@ -16,6 +16,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
diff --git a/tensorflow_quantum/core/ops/math_ops/inner_product_grad_test.py b/tensorflow_quantum/core/ops/math_ops/inner_product_grad_test.py
index 7fa74ee40..898c7461b 100644
--- a/tensorflow_quantum/core/ops/math_ops/inner_product_grad_test.py
+++ b/tensorflow_quantum/core/ops/math_ops/inner_product_grad_test.py
@@ -16,6 +16,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
diff --git a/tensorflow_quantum/core/ops/math_ops/inner_product_op_test.py b/tensorflow_quantum/core/ops/math_ops/inner_product_op_test.py
index ae83857b6..7322506b7 100644
--- a/tensorflow_quantum/core/ops/math_ops/inner_product_op_test.py
+++ b/tensorflow_quantum/core/ops/math_ops/inner_product_op_test.py
@@ -16,6 +16,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
diff --git a/tensorflow_quantum/core/ops/math_ops/simulate_mps_test.py b/tensorflow_quantum/core/ops/math_ops/simulate_mps_test.py
index 114706d64..d80629861 100644
--- a/tensorflow_quantum/core/ops/math_ops/simulate_mps_test.py
+++ b/tensorflow_quantum/core/ops/math_ops/simulate_mps_test.py
@@ -16,6 +16,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc
index 74751f9cc..6a5c9db49 100644
--- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc
+++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc
@@ -54,6 +54,11 @@ class TfqInnerProductOp : public tensorflow::OpKernel {
tensorflow::errors::InvalidArgument(absl::StrCat(
"Expected 4 inputs, got ", num_inputs, " inputs.")));
+ OP_REQUIRES(
+ context, context->input(3).dims() == 2,
+ tensorflow::errors::InvalidArgument(absl::StrCat(
+ "other_programs must be rank 2. Got ", context->input(3).dims())));
+
// Create the output Tensor.
const int output_dim_batch_size = context->input(0).dim_size(0);
const int output_dim_internal_size = context->input(3).dim_size(1);
diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc
index 3db493b11..198f92c63 100644
--- a/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc
+++ b/tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc
@@ -55,6 +55,11 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel {
tensorflow::errors::InvalidArgument(absl::StrCat(
"Expected 5 inputs, got ", num_inputs, " inputs.")));
+ OP_REQUIRES(
+ context, context->input(3).dims() == 2,
+ tensorflow::errors::InvalidArgument(absl::StrCat(
+ "other_programs must be rank 2. Got ", context->input(3).dims())));
+
// Create the output Tensor.
const int output_dim_batch_size = context->input(0).dim_size(0);
const int output_dim_internal_size = context->input(3).dim_size(1);
diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_expectation.cc b/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_expectation.cc
index c00b43a9b..aacf82cb6 100644
--- a/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_expectation.cc
+++ b/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_expectation.cc
@@ -63,6 +63,11 @@ class TfqSimulateMPS1DExpectationOp : public tensorflow::OpKernel {
tensorflow::errors::InvalidArgument(absl::StrCat(
"Expected 4 inputs, got ", num_inputs, " inputs.")));
+ OP_REQUIRES(
+ context, context->input(3).dims() == 2,
+ tensorflow::errors::InvalidArgument(absl::StrCat(
+ "pauli_sums must be rank 2. Got ", context->input(3).dims())));
+
// Create the output Tensor.
const int output_dim_batch_size = context->input(0).dim_size(0);
const int output_dim_op_size = context->input(3).dim_size(1);
diff --git a/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_sampled_expectation.cc b/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_sampled_expectation.cc
index ba94e8c72..e7014eb46 100644
--- a/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_sampled_expectation.cc
+++ b/tensorflow_quantum/core/ops/math_ops/tfq_simulate_1d_sampled_expectation.cc
@@ -65,6 +65,11 @@ class TfqSimulateMPS1DSampledExpectationOp : public tensorflow::OpKernel {
tensorflow::errors::InvalidArgument(absl::StrCat(
"Expected 5 inputs, got ", num_inputs, " inputs.")));
+ OP_REQUIRES(
+ context, context->input(3).dims() == 2,
+ tensorflow::errors::InvalidArgument(absl::StrCat(
+ "pauli_sums must be rank 2. Got ", context->input(3).dims())));
+
// Create the output Tensor.
const int output_dim_batch_size = context->input(0).dim_size(0);
const int output_dim_op_size = context->input(3).dim_size(1);
diff --git a/tensorflow_quantum/core/ops/noise/BUILD b/tensorflow_quantum/core/ops/noise/BUILD
index 3758037e5..8548b791f 100644
--- a/tensorflow_quantum/core/ops/noise/BUILD
+++ b/tensorflow_quantum/core/ops/noise/BUILD
@@ -7,10 +7,6 @@ licenses(["notice"])
# Export for the PIP package.
exports_files(["__init__.py"])
-config_setting(
- name = "windows",
- constraint_values = ["@bazel_tools//platforms:windows"],
-)
cc_binary(
name = "_tfq_noise_ops.so",
@@ -19,45 +15,45 @@ cc_binary(
"tfq_noisy_sampled_expectation.cc",
"tfq_noisy_samples.cc"
],
- copts = select({
- ":windows": [
- "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
- "/D_USE_MATH_DEFINES",
- "/DEIGEN_MPL2_ONLY",
- "/DEIGEN_MAX_ALIGN_BYTES=64",
- "/DEIGEN_HAS_TYPE_TRAITS=0",
- "/DTF_USE_SNAPPY",
- "/showIncludes",
- "/MD",
- "/O2",
- "/DNDEBUG",
- "/w",
- "-DWIN32_LEAN_AND_MEAN",
- "-DNOGDI",
- "/d2ReducedOptimizeHugeFunctions",
- "/arch:AVX",
- "/std:c++17",
- "-DTENSORFLOW_MONOLITHIC_BUILD",
- "/DPLATFORM_WINDOWS",
- "/DEIGEN_HAS_C99_MATH",
- "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
- "/DEIGEN_AVOID_STL_ARRAY",
- "/Iexternal/gemmlowp",
- "/wd4018",
- "/wd4577",
- "/DNOGDI",
- "/UTF_COMPILE_LIBRARY",
- ],
- "//conditions:default": [
- "-pthread",
- "-std=c++17",
- "-D_GLIBCXX_USE_CXX11_ABI=1",
- ],
- }),
- features = select({
- ":windows": ["windows_export_all_symbols"],
- "//conditions:default": [],
- }),
+ # copts = select({
+ # ":windows": [
+ # "/D__CLANG_SUPPORT_DYN_ANNOTATION__",
+ # "/D_USE_MATH_DEFINES",
+ # "/DEIGEN_MPL2_ONLY",
+ # "/DEIGEN_MAX_ALIGN_BYTES=64",
+ # "/DEIGEN_HAS_TYPE_TRAITS=0",
+ # "/DTF_USE_SNAPPY",
+ # "/showIncludes",
+ # "/MD",
+ # "/O2",
+ # "/DNDEBUG",
+ # "/w",
+ # "-DWIN32_LEAN_AND_MEAN",
+ # "-DNOGDI",
+ # "/d2ReducedOptimizeHugeFunctions",
+ # "/arch:AVX",
+ # "/std:c++17",
+ # "-DTENSORFLOW_MONOLITHIC_BUILD",
+ # "/DPLATFORM_WINDOWS",
+ # "/DEIGEN_HAS_C99_MATH",
+ # "/DTENSORFLOW_USE_EIGEN_THREADPOOL",
+ # "/DEIGEN_AVOID_STL_ARRAY",
+ # "/Iexternal/gemmlowp",
+ # "/wd4018",
+ # "/wd4577",
+ # "/DNOGDI",
+ # "/UTF_COMPILE_LIBRARY",
+ # ],
+ # "//conditions:default": [
+ # "-pthread",
+ # "-std=c++17",
+ # "-D_GLIBCXX_USE_CXX11_ABI=1",
+ # ],
+ # }),
+ # features = select({
+ # ":windows": ["windows_export_all_symbols"],
+ # "//conditions:default": [],
+ # }),
linkshared = 1,
deps = [
# cirq cc proto
diff --git a/tensorflow_quantum/core/ops/noise/noisy_expectation_op_test.py b/tensorflow_quantum/core/ops/noise/noisy_expectation_op_test.py
index 1e73500b8..953829318 100644
--- a/tensorflow_quantum/core/ops/noise/noisy_expectation_op_test.py
+++ b/tensorflow_quantum/core/ops/noise/noisy_expectation_op_test.py
@@ -16,6 +16,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
diff --git a/tensorflow_quantum/core/ops/noise/noisy_sampled_expectation_op_test.py b/tensorflow_quantum/core/ops/noise/noisy_sampled_expectation_op_test.py
index 35d1cc113..c76122070 100644
--- a/tensorflow_quantum/core/ops/noise/noisy_sampled_expectation_op_test.py
+++ b/tensorflow_quantum/core/ops/noise/noisy_sampled_expectation_op_test.py
@@ -16,6 +16,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
diff --git a/tensorflow_quantum/core/ops/noise/noisy_samples_op_test.py b/tensorflow_quantum/core/ops/noise/noisy_samples_op_test.py
index b952e8d40..7790bda8b 100644
--- a/tensorflow_quantum/core/ops/noise/noisy_samples_op_test.py
+++ b/tensorflow_quantum/core/ops/noise/noisy_samples_op_test.py
@@ -16,6 +16,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc
index c67fa01f7..5cbf0b50d 100644
--- a/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc
+++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc
@@ -65,6 +65,11 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
tensorflow::errors::InvalidArgument(absl::StrCat(
"Expected 5 inputs, got ", num_inputs, " inputs.")));
+ OP_REQUIRES(
+ context, context->input(3).dims() == 2,
+ tensorflow::errors::InvalidArgument(absl::StrCat(
+ "pauli_sums must be rank 2. Got ", context->input(3).dims())));
+
// Create the output Tensor.
const int output_dim_batch_size = context->input(0).dim_size(0);
const int output_dim_op_size = context->input(3).dim_size(1);
diff --git a/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc b/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc
index aa0c85691..89263b56a 100644
--- a/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc
+++ b/tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc
@@ -66,6 +66,11 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
tensorflow::errors::InvalidArgument(absl::StrCat(
"Expected 5 inputs, got ", num_inputs, " inputs.")));
+ OP_REQUIRES(
+ context, context->input(3).dims() == 2,
+ tensorflow::errors::InvalidArgument(absl::StrCat(
+ "pauli_sums must be rank 2. Got ", context->input(3).dims())));
+
// Create the output Tensor.
const int output_dim_batch_size = context->input(0).dim_size(0);
const int output_dim_op_size = context->input(3).dim_size(1);
diff --git a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc
index e7252baee..c96a9cb0d 100644
--- a/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc
+++ b/tensorflow_quantum/core/ops/tfq_adj_grad_op.cc
@@ -55,6 +55,16 @@ class TfqAdjointGradientOp : public tensorflow::OpKernel {
tensorflow::errors::InvalidArgument(absl::StrCat(
"Expected 5 inputs, got ", num_inputs, " inputs.")));
+ OP_REQUIRES(
+ context, context->input(2).dims() == 2,
+ tensorflow::errors::InvalidArgument(absl::StrCat(
+ "symbol_values must be rank 2. Got ", context->input(3).dims())));
+
+ OP_REQUIRES(
+ context, context->input(3).dims() == 2,
+ tensorflow::errors::InvalidArgument(absl::StrCat(
+ "pauli_sums must be rank 2. Got ", context->input(3).dims())));
+
// Create the output Tensor.
const int output_dim_batch_size = context->input(0).dim_size(0);
const int output_dim_param_size = context->input(2).dim_size(1);
diff --git a/tensorflow_quantum/core/ops/tfq_adj_grad_op_test.py b/tensorflow_quantum/core/ops/tfq_adj_grad_op_test.py
index 388bb163f..bba67db0a 100644
--- a/tensorflow_quantum/core/ops/tfq_adj_grad_op_test.py
+++ b/tensorflow_quantum/core/ops/tfq_adj_grad_op_test.py
@@ -16,6 +16,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
diff --git a/tensorflow_quantum/core/ops/tfq_ps_util_ops_test.py b/tensorflow_quantum/core/ops/tfq_ps_util_ops_test.py
index 14bccd9bf..96059b1d4 100644
--- a/tensorflow_quantum/core/ops/tfq_ps_util_ops_test.py
+++ b/tensorflow_quantum/core/ops/tfq_ps_util_ops_test.py
@@ -16,6 +16,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
diff --git a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc
index bca6d2f63..7583437ca 100644
--- a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc
+++ b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc
@@ -54,6 +54,11 @@ class TfqSimulateExpectationOp : public tensorflow::OpKernel {
tensorflow::errors::InvalidArgument(absl::StrCat(
"Expected 4 inputs, got ", num_inputs, " inputs.")));
+ OP_REQUIRES(
+ context, context->input(3).dims() == 2,
+ tensorflow::errors::InvalidArgument(absl::StrCat(
+ "pauli_sums must be rank 2. Got ", context->input(3).dims())));
+
// Create the output Tensor.
const int output_dim_batch_size = context->input(0).dim_size(0);
const int output_dim_op_size = context->input(3).dim_size(1);
diff --git a/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py b/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py
index 4cdbe42e5..93c1770e2 100644
--- a/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py
+++ b/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py
@@ -16,6 +16,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
diff --git a/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc
index e0ed05a49..b9f9ee982 100644
--- a/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc
+++ b/tensorflow_quantum/core/ops/tfq_simulate_sampled_expectation_op.cc
@@ -58,6 +58,11 @@ class TfqSimulateSampledExpectationOp : public tensorflow::OpKernel {
tensorflow::errors::InvalidArgument(absl::StrCat(
"Expected 5 inputs, got ", num_inputs, " inputs.")));
+ OP_REQUIRES(
+ context, context->input(3).dims() == 2,
+ tensorflow::errors::InvalidArgument(absl::StrCat(
+ "pauli_sums must be rank 2. Got ", context->input(3).dims())));
+
// Create the output Tensor.
const int output_dim_batch_size = context->input(0).dim_size(0);
const int output_dim_op_size = context->input(3).dim_size(1);
diff --git a/tensorflow_quantum/core/ops/tfq_unitary_op_test.py b/tensorflow_quantum/core/ops/tfq_unitary_op_test.py
index 212094056..5bba1df59 100644
--- a/tensorflow_quantum/core/ops/tfq_unitary_op_test.py
+++ b/tensorflow_quantum/core/ops/tfq_unitary_op_test.py
@@ -16,6 +16,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
diff --git a/tensorflow_quantum/core/ops/tfq_utility_ops_test.py b/tensorflow_quantum/core/ops/tfq_utility_ops_test.py
index 00c5ff791..8faf14aaf 100644
--- a/tensorflow_quantum/core/ops/tfq_utility_ops_test.py
+++ b/tensorflow_quantum/core/ops/tfq_utility_ops_test.py
@@ -16,6 +16,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
diff --git a/tensorflow_quantum/core/serialize/op_deserializer.py b/tensorflow_quantum/core/serialize/op_deserializer.py
index 667ee0ef0..bda2a8f0b 100644
--- a/tensorflow_quantum/core/serialize/op_deserializer.py
+++ b/tensorflow_quantum/core/serialize/op_deserializer.py
@@ -57,10 +57,10 @@ def qubit_from_proto(proto_id):
def _arg_from_proto(
- arg_proto,
- *,
- arg_function_language,
- required_arg_name=None,
+ arg_proto,
+ *,
+ arg_function_language,
+ required_arg_name=None,
):
"""Extracts a python value from an argument value proto.
Args:
diff --git a/tensorflow_quantum/core/serialize/op_deserializer_test.py b/tensorflow_quantum/core/serialize/op_deserializer_test.py
index ce1748b65..50c6decbe 100644
--- a/tensorflow_quantum/core/serialize/op_deserializer_test.py
+++ b/tensorflow_quantum/core/serialize/op_deserializer_test.py
@@ -15,6 +15,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
@@ -38,13 +39,16 @@ def op_proto(json_dict):
@cirq.value_equality
-class GateWithAttribute(cirq.SingleQubitGate):
+class GateWithAttribute(cirq.Gate):
"""GateAttribute helper class."""
def __init__(self, val, not_req=None):
self.val = val
self.not_req = not_req
+ def num_qubits(self):
+ return 1
+
def _value_equality_values_(self):
return (self.val,)
diff --git a/tensorflow_quantum/core/serialize/op_serializer.py b/tensorflow_quantum/core/serialize/op_serializer.py
index 509216def..bdb1effc5 100644
--- a/tensorflow_quantum/core/serialize/op_serializer.py
+++ b/tensorflow_quantum/core/serialize/op_serializer.py
@@ -176,11 +176,11 @@ def can_serialize_operation(self, op):
return supported_gate_type and self.can_serialize_predicate(op)
def to_proto(
- self,
- op,
- msg=None,
- *,
- arg_function_language='',
+ self,
+ op,
+ msg=None,
+ *,
+ arg_function_language='',
):
"""Returns the cirq_google.api.v2.Operation message as a proto dict."""
diff --git a/tensorflow_quantum/core/serialize/op_serializer_test.py b/tensorflow_quantum/core/serialize/op_serializer_test.py
index a485091e7..921d1e020 100644
--- a/tensorflow_quantum/core/serialize/op_serializer_test.py
+++ b/tensorflow_quantum/core/serialize/op_serializer_test.py
@@ -15,6 +15,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
@@ -38,32 +39,41 @@ def op_proto(json):
return op
-class GateWithAttribute(cirq.SingleQubitGate):
+class GateWithAttribute(cirq.Gate):
"""GateAttribute helper class."""
def __init__(self, val):
self.val = val
+ def num_qubits(self):
+ return 1
+
-class GateWithProperty(cirq.SingleQubitGate):
+class GateWithProperty(cirq.Gate):
"""GateProperty helper class."""
def __init__(self, val, not_req=None):
self._val = val
self._not_req = not_req
+ def num_qubits(self):
+ return 1
+
@property
def val(self):
"""get val."""
return self._val
-class GateWithMethod(cirq.SingleQubitGate):
+class GateWithMethod(cirq.Gate):
"""GateMethod helper class."""
def __init__(self, val):
self._val = val
+ def num_qubits(self):
+ return 1
+
def get_val(self):
"""get val."""
return self._val
diff --git a/tensorflow_quantum/core/serialize/serializable_gate_set.py b/tensorflow_quantum/core/serialize/serializable_gate_set.py
index 977f05dfb..bdf1e97b1 100644
--- a/tensorflow_quantum/core/serialize/serializable_gate_set.py
+++ b/tensorflow_quantum/core/serialize/serializable_gate_set.py
@@ -140,11 +140,11 @@ def serialize(self, program, msg=None, *, arg_function_language=None):
return msg
def serialize_op(
- self,
- op,
- msg=None,
- *,
- arg_function_language='',
+ self,
+ op,
+ msg=None,
+ *,
+ arg_function_language='',
):
"""Serialize an Operation to cirq_google.api.v2.Operation proto.
@@ -195,10 +195,10 @@ def deserialize(self, proto, device=None):
raise NotImplementedError('Program proto does not contain a circuit.')
def deserialize_op(
- self,
- operation_proto,
- *,
- arg_function_language='',
+ self,
+ operation_proto,
+ *,
+ arg_function_language='',
):
"""Deserialize an Operation from a cirq_google.api.v2.Operation.
@@ -231,10 +231,10 @@ def _serialize_circuit(self, circuit, msg, *, arg_function_language):
arg_function_language=arg_function_language)
def _deserialize_circuit(
- self,
- circuit_proto,
- *,
- arg_function_language,
+ self,
+ circuit_proto,
+ *,
+ arg_function_language,
):
moments = []
for i, moment_proto in enumerate(circuit_proto.moments):
diff --git a/tensorflow_quantum/core/serialize/serializable_gate_set_test.py b/tensorflow_quantum/core/serialize/serializable_gate_set_test.py
index 1e6fd9861..e8f94b1db 100644
--- a/tensorflow_quantum/core/serialize/serializable_gate_set_test.py
+++ b/tensorflow_quantum/core/serialize/serializable_gate_set_test.py
@@ -15,6 +15,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
diff --git a/tensorflow_quantum/core/serialize/serializer_test.py b/tensorflow_quantum/core/serialize/serializer_test.py
index a43da0cf0..3a89a03ea 100644
--- a/tensorflow_quantum/core/serialize/serializer_test.py
+++ b/tensorflow_quantum/core/serialize/serializer_test.py
@@ -16,6 +16,7 @@
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
+
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
@@ -93,16 +94,26 @@ def _build_op_proto(gate_id, arg_names, arg_vals, qubit_ids):
circuit_proto = program_proto.circuit
circuit_proto.scheduling_strategy = circuit_proto.MOMENT_BY_MOMENT
- circuit_proto.moments.add(operations=[program_pb2.Operation(
- gate = program_pb2.Gate(id=gate_id),
- args = {arg_names[i]: (program_pb2.Arg(symbol=arg_vals[i]) \
- if isinstance(arg_vals[i], str) else \
- program_pb2.Arg(
- arg_value=program_pb2.ArgValue(
- float_value=np.round(float(arg_vals[i]), 6)))) \
- for i in range(len(arg_vals))},
- qubits=[program_pb2.Qubit(
- id=q_id) for q_id in qubit_ids])])
+
+ qubit_protos = [program_pb2.Qubit(id=q_id) for q_id in qubit_ids]
+
+ def _create_arg(value):
+ """Creates a program_pb2.Arg based on the value type."""
+ if isinstance(value, str):
+ return program_pb2.Arg(symbol=value)
+ return program_pb2.Arg(arg_value=program_pb2.ArgValue(
+ float_value=round(float(value), 6)))
+
+ qubit_protos = [program_pb2.Qubit(id=q_id) for q_id in qubit_ids]
+ all_operations = [
+ program_pb2.Operation(gate=program_pb2.Gate(id=gate_id),
+ args={
+ name: _create_arg(value)
+ for name, value in zip(arg_names, arg_vals)
+ },
+ qubits=qubit_protos)
+ ]
+ circuit_proto.moments.add(operations=all_operations)
# Add in empty control information
t = program_proto.circuit.moments[0].operations[0]
diff --git a/tensorflow_quantum/core/src/BUILD b/tensorflow_quantum/core/src/BUILD
index 5595a5ca2..9b9419b5b 100644
--- a/tensorflow_quantum/core/src/BUILD
+++ b/tensorflow_quantum/core/src/BUILD
@@ -56,6 +56,7 @@ cc_library(
"//tensorflow_quantum/core/proto:projector_sum_cc_proto",
"@com_google_absl//absl/container:flat_hash_map",
"@com_google_absl//absl/status",
+ "@com_google_absl//absl/status:statusor",
"@local_config_tf//:libtensorflow_framework",
"@local_config_tf//:tf_header_lib",
"@qsim//lib:channel",
@@ -99,6 +100,8 @@ cc_library(
"//tensorflow_quantum/core/proto:pauli_sum_cc_proto",
"//tensorflow_quantum/core/proto:projector_sum_cc_proto",
"@com_google_absl//absl/container:inlined_vector", # unclear why needed.
+ "@com_google_absl//absl/status",
+ "@com_google_absl//absl/status:statusor",
"@local_config_tf//:libtensorflow_framework",
"@local_config_tf//:tf_header_lib",
"@qsim//lib:qsim_lib",
@@ -114,6 +117,9 @@ cc_test(
":util_qsim",
"@com_google_absl//absl/container:flat_hash_map",
"@com_google_googletest//:gtest_main",
+ "@com_google_absl//absl/functional:any_invocable",
+ "@com_google_absl//absl/status:statusor",
+ "@com_google_absl//absl/status",
"@local_config_tf//:libtensorflow_framework",
"@local_config_tf//:tf_header_lib",
"@qsim//lib:qsim_lib",
diff --git a/tensorflow_quantum/core/src/circuit_parser_qsim.cc b/tensorflow_quantum/core/src/circuit_parser_qsim.cc
index 1024d28c7..2b3e81d19 100644
--- a/tensorflow_quantum/core/src/circuit_parser_qsim.cc
+++ b/tensorflow_quantum/core/src/circuit_parser_qsim.cc
@@ -58,8 +58,7 @@ inline Status ParseProtoArg(
// iterator