Skip to content

Commit

Permalink
Merge pull request #15 from jaeyoo/upgrade_cirq
Browse files Browse the repository at this point in the history
Upgrade cirq from 0.13.1 to ~= 1.0 and etc.
  • Loading branch information
jaeyoo authored May 3, 2023
2 parents 598c415 + 1e3a13e commit 0b59038
Show file tree
Hide file tree
Showing 53 changed files with 1,095 additions and 800 deletions.
2 changes: 1 addition & 1 deletion docs/tutorials/hello_many_worlds.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@
"# Create a circuit on these qubits using the parameters you created above.\n",
"circuit = cirq.Circuit(\n",
" cirq.rx(a).on(q0),\n",
" cirq.ry(b).on(q1), cirq.CNOT(control=q0, target=q1))\n",
" cirq.ry(b).on(q1), cirq.CNOT(q0, q1))\n",
"\n",
"SVGCircuit(circuit)"
]
Expand Down
4 changes: 2 additions & 2 deletions release/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,15 +51,15 @@ def finalize_options(self):


REQUIRED_PACKAGES = [
'cirq-core==0.13.1', 'cirq-google==0.13.1', 'sympy == 1.8',
'cirq-core~=1.0', 'cirq-google~=1.0', 'sympy == 1.8',
'googleapis-common-protos==1.52.0', 'google-api-core==1.21.0',
'google-auth==1.18.0', 'protobuf==3.19.5'
]

# placed as extra to not have required overwrite existing nightly installs if
# they exist.
EXTRA_PACKAGES = ['tensorflow == 2.11.0']
CUR_VERSION = '0.7.3'
CUR_VERSION = '0.7.4'


class BinaryDistribution(Distribution):
Expand Down
4 changes: 2 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
cirq-core==0.13.1
cirq-google==0.13.1
cirq-core~=1.0
cirq-google~=1.0
sympy==1.8
numpy==1.24.2 # TensorFlow can detect if it was built against other versions.
nbformat==4.4.0
Expand Down
12 changes: 11 additions & 1 deletion scripts/test_all.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,17 @@
# limitations under the License.
# ==============================================================================
echo "Testing All Bazel py_test and cc_tests.";
test_outputs=$(bazel test -c opt --experimental_repo_remote_exec --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" --cxxopt="-std=c++17" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --notest_keep_going --test_output=errors //tensorflow_quantum/...)
ENABLE_CUDA=${1}

if [[ ${ENABLE_CUDA} == "gpu" ]]; then
echo "GPU mode. CUDA config is set."
CUDA_CONFIG="--config=cuda"
else
echo "CPU mode."
CUDA_CONFIG=""
fi

test_outputs=$(bazel test -c opt ${CUDA_CONFIG} --experimental_repo_remote_exec --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1" --cxxopt="-std=c++17" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --test_output=errors //tensorflow_quantum/...)
exit_code=$?
if [ "$exit_code" == "0" ]; then
echo "Testing Complete!";
Expand Down
32 changes: 10 additions & 22 deletions tensorflow_quantum/core/ops/circuit_execution_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from scipy import stats
import cirq
import cirq_google
from cirq_google.engine.abstract_processor import AbstractProcessor

from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops
from tensorflow_quantum.python import util
Expand Down Expand Up @@ -115,11 +116,9 @@ def test_get_expectation_inputs(self):
circuit_execution_ops.get_expectation_op()
with self.assertRaisesRegex(NotImplementedError,
expected_regex='Sample-based'):
mock_engine = mock.Mock()
mock_processor = mock.create_autospec(AbstractProcessor)
circuit_execution_ops.get_expectation_op(
cirq_google.QuantumEngineSampler(engine=mock_engine,
processor_id='test',
gate_set=cirq_google.XMON))
cirq_google.ProcessorSampler(processor=mock_processor))
with self.assertRaisesRegex(
TypeError,
expected_regex="cirq.sim.simulator.SimulatesExpectationValues"):
Expand All @@ -145,11 +144,9 @@ def test_get_sampled_expectation_inputs(self):
backend=cirq.Simulator())
circuit_execution_ops.get_sampled_expectation_op(
backend=cirq.DensityMatrixSimulator())
mock_engine = mock.Mock()
mock_processor = mock.create_autospec(AbstractProcessor)
circuit_execution_ops.get_sampled_expectation_op(
cirq_google.QuantumEngineSampler(engine=mock_engine,
processor_id='test',
gate_set=cirq_google.XMON))
cirq_google.ProcessorSampler(processor=mock_processor))
with self.assertRaisesRegex(TypeError, expected_regex="a Cirq.Sampler"):
circuit_execution_ops.get_sampled_expectation_op(backend="junk")

Expand All @@ -174,11 +171,9 @@ def test_get_samples_inputs(self):
circuit_execution_ops.get_sampling_op(backend=cirq.Simulator())
circuit_execution_ops.get_sampling_op(
backend=cirq.DensityMatrixSimulator())
mock_engine = mock.Mock()
mock_processor = mock.create_autospec(AbstractProcessor)
circuit_execution_ops.get_sampling_op(
backend=cirq_google.QuantumEngineSampler(engine=mock_engine,
processor_id='test',
gate_set=cirq_google.XMON))
backend=cirq_google.ProcessorSampler(processor=mock_processor))
with self.assertRaisesRegex(TypeError,
expected_regex="Expected a Cirq.Sampler"):
circuit_execution_ops.get_sampling_op(backend="junk")
Expand Down Expand Up @@ -207,12 +202,9 @@ def test_get_state_inputs(self):
circuit_execution_ops.get_state_op(backend="junk")
with self.assertRaisesRegex(TypeError,
expected_regex="Cirq.SimulatesFinalState"):
mock_engine = mock.Mock()
mock_processor = mock.create_autospec(AbstractProcessor)
circuit_execution_ops.get_state_op(
backend=cirq_google.QuantumEngineSampler(
engine=mock_engine,
processor_id='test',
gate_set=cirq_google.XMON))
backend=cirq_google.ProcessorSampler(processor=mock_processor))

with self.assertRaisesRegex(TypeError,
expected_regex="must be type bool."):
Expand Down Expand Up @@ -339,7 +331,7 @@ def test_simulate_state_large(self, op_and_sim):
symbol_names = []
circuit_batch, resolver_batch = \
util.random_circuit_resolver_batch(
cirq.GridQubit.rect(4, 4), 5)
cirq.GridQubit.rect(3, 3), 5)

symbol_values_array = np.array(
[[resolver[symbol]
Expand All @@ -351,10 +343,6 @@ def test_simulate_state_large(self, op_and_sim):

cirq_states = batch_util.batch_calculate_state(circuit_batch,
resolver_batch, sim)
# Due to numpy memory allocation error with large circuits,
# we deallocate these variables.
del circuit_batch
del resolver_batch

self.assertAllClose(cirq_states, op_states, atol=1e-5, rtol=1e-5)

Expand Down
2 changes: 1 addition & 1 deletion tensorflow_quantum/core/ops/cirq_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ def _no_grad(grad):
]
max_n_qubits = max(len(p.all_qubits()) for p in programs)

if isinstance(sampler, cirq_google.QuantumEngineSampler):
if isinstance(sampler, cirq_google.ProcessorSampler):
# group samples from identical circuits to reduce communication
# overhead. Have to keep track of the order in which things came
# in to make sure the output is ordered correctly
Expand Down
11 changes: 6 additions & 5 deletions tensorflow_quantum/core/ops/cirq_ops_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from absl.testing import parameterized
import cirq
import cirq_google
from cirq_google.engine.abstract_processor import AbstractProcessor

from tensorflow_quantum.core.ops import cirq_ops
from tensorflow_quantum.core.serialize import serializer
Expand Down Expand Up @@ -348,11 +349,9 @@ def test_get_cirq_sampling_op(self):
cirq_ops._get_cirq_samples()
cirq_ops._get_cirq_samples(cirq.Simulator())
cirq_ops._get_cirq_samples(cirq.DensityMatrixSimulator())
mock_engine = mock.Mock()
mock_processor = mock.create_autospec(AbstractProcessor)
cirq_ops._get_cirq_samples(
cirq_google.QuantumEngineSampler(engine=mock_engine,
processor_id='test',
gate_set=cirq_google.XMON))
cirq_google.ProcessorSampler(processor=mock_processor))

def test_cirq_sampling_op_inputs(self):
"""test input checking in the cirq sampling op."""
Expand Down Expand Up @@ -451,7 +450,9 @@ class DummySampler(cirq.Sampler):
def run_sweep(self, program, params, repetitions):
"""Returns all ones in the correct sample shape."""
return [
cirq.Result(
cirq_google.EngineResult(
job_id="1",
job_finished_time="1",
params=param,
measurements={
'tfq':
Expand Down
12 changes: 6 additions & 6 deletions tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ class TfqInnerProductOp : public tensorflow::OpKernel {
// Simulate programs one by one. Parallelizing over state vectors
// we no longer parallelize over circuits. Each time we encounter a
// a larger circuit we will grow the Statevector as necessary.
for (int i = 0; i < fused_circuits.size(); i++) {
for (size_t i = 0; i < fused_circuits.size(); i++) {
int nq = num_qubits[i];
if (nq > largest_nq) {
// need to switch to larger statespace.
Expand All @@ -186,18 +186,18 @@ class TfqInnerProductOp : public tensorflow::OpKernel {
// the state if there is a possibility that circuit[i] and
// circuit[i + 1] produce the same state.
ss.SetStateZero(sv);
for (int j = 0; j < fused_circuits[i].size(); j++) {
for (size_t j = 0; j < fused_circuits[i].size(); j++) {
qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv);
}
for (int j = 0; j < other_fused_circuits[i].size(); j++) {
for (size_t j = 0; j < other_fused_circuits[i].size(); j++) {
// (#679) Just ignore empty program
if (fused_circuits[i].size() == 0) {
(*output_tensor)(i, j) = std::complex<float>(1, 0);
continue;
}

ss.SetStateZero(scratch);
for (int k = 0; k < other_fused_circuits[i][j].size(); k++) {
for (size_t k = 0; k < other_fused_circuits[i][j].size(); k++) {
qsim::ApplyFusedGate(sim, other_fused_circuits[i][j][k], scratch);
}

Expand Down Expand Up @@ -255,13 +255,13 @@ class TfqInnerProductOp : public tensorflow::OpKernel {
// no need to update scratch_state since ComputeExpectation
// will take care of things for us.
ss.SetStateZero(sv);
for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) {
for (size_t j = 0; j < fused_circuits[cur_batch_index].size(); j++) {
qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv);
}
}

ss.SetStateZero(scratch);
for (int k = 0;
for (size_t k = 0;
k <
other_fused_circuits[cur_batch_index][cur_internal_index].size();
k++) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -398,13 +398,13 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel {
// if applicable compute control qubit mask and control value bits.
uint64_t mask = 0;
uint64_t cbits = 0;
for (int k = 0; k < cur_gate.controlled_by.size(); k++) {
for (size_t k = 0; k < cur_gate.controlled_by.size(); k++) {
uint64_t control_loc = cur_gate.controlled_by[k];
mask |= uint64_t{1} << control_loc;
cbits |= ((cur_gate.cmask >> k) & 1) << control_loc;
}

for (int k = 0;
for (size_t k = 0;
k < gradient_gates[cur_batch_index][l - 1].grad_gates.size();
k++) {
// Copy sv_adj onto scratch2 in anticipation of non-unitary
Expand Down
28 changes: 14 additions & 14 deletions tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,8 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {

tensorflow::GuardedPhiloxRandom random_gen;
int max_n_shots = 1;
for (int i = 0; i < num_samples.size(); i++) {
for (int j = 0; j < num_samples[i].size(); j++) {
for (size_t i = 0; i < num_samples.size(); i++) {
for (size_t j = 0; j < num_samples[i].size(); j++) {
max_n_shots = std::max(max_n_shots, num_samples[i][j]);
}
}
Expand All @@ -188,12 +188,12 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
// Simulate programs one by one. Parallelizing over state vectors
// we no longer parallelize over circuits. Each time we encounter a
// a larger circuit we will grow the Statevector as necessary.
for (int i = 0; i < ncircuits.size(); i++) {
for (size_t i = 0; i < ncircuits.size(); i++) {
int nq = num_qubits[i];

// (#679) Just ignore empty program
if (ncircuits[i].channels.size() == 0) {
for (int j = 0; j < pauli_sums[i].size(); j++) {
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
(*output_tensor)(i, j) = -2.0;
}
continue;
Expand All @@ -220,7 +220,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
sv, unused_stats);

// Use this trajectory as a source for all expectation calculations.
for (int j = 0; j < pauli_sums[i].size(); j++) {
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
if (run_samples[j] >= num_samples[i][j]) {
continue;
}
Expand All @@ -232,14 +232,14 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
run_samples[j]++;
}
bool break_loop = true;
for (int j = 0; j < num_samples[i].size(); j++) {
for (size_t j = 0; j < num_samples[i].size(); j++) {
if (run_samples[j] < num_samples[i][j]) {
break_loop = false;
break;
}
}
if (break_loop) {
for (int j = 0; j < num_samples[i].size(); j++) {
for (size_t j = 0; j < num_samples[i].size(); j++) {
rolling_sums[j] /= num_samples[i][j];
(*output_tensor)(i, j) = static_cast<float>(rolling_sums[j]);
}
Expand Down Expand Up @@ -280,8 +280,8 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {

tensorflow::GuardedPhiloxRandom random_gen;
int max_n_shots = 1;
for (int i = 0; i < num_samples.size(); i++) {
for (int j = 0; j < num_samples[i].size(); j++) {
for (size_t i = 0; i < num_samples.size(); i++) {
for (size_t j = 0; j < num_samples[i].size(); j++) {
max_n_shots = std::max(max_n_shots, num_samples[i][j]);
}
}
Expand All @@ -304,13 +304,13 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
random_gen.ReserveSamples128(ncircuits.size() * max_n_shots + 1);
tensorflow::random::SimplePhilox rand_source(&local_gen);

for (int i = 0; i < ncircuits.size(); i++) {
for (size_t i = 0; i < ncircuits.size(); i++) {
int nq = num_qubits[i];
int rep_offset = rep_offsets[start][i];

// (#679) Just ignore empty program
if (ncircuits[i].channels.size() == 0) {
for (int j = 0; j < pauli_sums[i].size(); j++) {
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
(*output_tensor)(i, j) = -2.0;
}
continue;
Expand All @@ -337,7 +337,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
sim, sv, unused_stats);

// Compute expectations across all ops using this trajectory.
for (int j = 0; j < pauli_sums[i].size(); j++) {
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads;
if (run_samples[j] >= p_reps + rep_offset) {
continue;
Expand All @@ -354,7 +354,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {

// Check if we have run enough trajectories for all ops.
bool break_loop = true;
for (int j = 0; j < num_samples[i].size(); j++) {
for (size_t j = 0; j < num_samples[i].size(); j++) {
int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads;
if (run_samples[j] < p_reps + rep_offset) {
break_loop = false;
Expand All @@ -364,7 +364,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
if (break_loop) {
// Lock writing to this batch index in output_tensor.
batch_locks[i].lock();
for (int j = 0; j < num_samples[i].size(); j++) {
for (size_t j = 0; j < num_samples[i].size(); j++) {
rolling_sums[j] /= num_samples[i][j];
(*output_tensor)(i, j) += static_cast<float>(rolling_sums[j]);
}
Expand Down
Loading

0 comments on commit 0b59038

Please sign in to comment.