Skip to content

Commit b4c7ea1

Browse files
Merge pull request tensorflow#776 from Sinestro38/master
Fix warnings and errors for TF/absl/C++ changes
2 parents 140277d + 25313f4 commit b4c7ea1

21 files changed

+204
-163
lines changed

tensorflow_quantum/core/ops/math_ops/tfq_inner_product.cc

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ class TfqInnerProductOp : public tensorflow::OpKernel {
174174
// Simulate programs one by one. Parallelizing over state vectors
175175
// we no longer parallelize over circuits. Each time we encounter a
176176
// a larger circuit we will grow the Statevector as necessary.
177-
for (int i = 0; i < fused_circuits.size(); i++) {
177+
for (size_t i = 0; i < fused_circuits.size(); i++) {
178178
int nq = num_qubits[i];
179179
if (nq > largest_nq) {
180180
// need to switch to larger statespace.
@@ -186,18 +186,18 @@ class TfqInnerProductOp : public tensorflow::OpKernel {
186186
// the state if there is a possibility that circuit[i] and
187187
// circuit[i + 1] produce the same state.
188188
ss.SetStateZero(sv);
189-
for (int j = 0; j < fused_circuits[i].size(); j++) {
189+
for (size_t j = 0; j < fused_circuits[i].size(); j++) {
190190
qsim::ApplyFusedGate(sim, fused_circuits[i][j], sv);
191191
}
192-
for (int j = 0; j < other_fused_circuits[i].size(); j++) {
192+
for (size_t j = 0; j < other_fused_circuits[i].size(); j++) {
193193
// (#679) Just ignore empty program
194194
if (fused_circuits[i].size() == 0) {
195195
(*output_tensor)(i, j) = std::complex<float>(1, 0);
196196
continue;
197197
}
198198

199199
ss.SetStateZero(scratch);
200-
for (int k = 0; k < other_fused_circuits[i][j].size(); k++) {
200+
for (size_t k = 0; k < other_fused_circuits[i][j].size(); k++) {
201201
qsim::ApplyFusedGate(sim, other_fused_circuits[i][j][k], scratch);
202202
}
203203

@@ -255,13 +255,13 @@ class TfqInnerProductOp : public tensorflow::OpKernel {
255255
// no need to update scratch_state since ComputeExpectation
256256
// will take care of things for us.
257257
ss.SetStateZero(sv);
258-
for (int j = 0; j < fused_circuits[cur_batch_index].size(); j++) {
258+
for (size_t j = 0; j < fused_circuits[cur_batch_index].size(); j++) {
259259
qsim::ApplyFusedGate(sim, fused_circuits[cur_batch_index][j], sv);
260260
}
261261
}
262262

263263
ss.SetStateZero(scratch);
264-
for (int k = 0;
264+
for (size_t k = 0;
265265
k <
266266
other_fused_circuits[cur_batch_index][cur_internal_index].size();
267267
k++) {

tensorflow_quantum/core/ops/math_ops/tfq_inner_product_grad.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -398,13 +398,13 @@ class TfqInnerProductGradOp : public tensorflow::OpKernel {
398398
// if applicable compute control qubit mask and control value bits.
399399
uint64_t mask = 0;
400400
uint64_t cbits = 0;
401-
for (int k = 0; k < cur_gate.controlled_by.size(); k++) {
401+
for (size_t k = 0; k < cur_gate.controlled_by.size(); k++) {
402402
uint64_t control_loc = cur_gate.controlled_by[k];
403403
mask |= uint64_t{1} << control_loc;
404404
cbits |= ((cur_gate.cmask >> k) & 1) << control_loc;
405405
}
406406

407-
for (int k = 0;
407+
for (size_t k = 0;
408408
k < gradient_gates[cur_batch_index][l - 1].grad_gates.size();
409409
k++) {
410410
// Copy sv_adj onto scratch2 in anticipation of non-unitary

tensorflow_quantum/core/ops/noise/tfq_noisy_expectation.cc

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -175,8 +175,8 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
175175

176176
tensorflow::GuardedPhiloxRandom random_gen;
177177
int max_n_shots = 1;
178-
for (int i = 0; i < num_samples.size(); i++) {
179-
for (int j = 0; j < num_samples[i].size(); j++) {
178+
for (size_t i = 0; i < num_samples.size(); i++) {
179+
for (size_t j = 0; j < num_samples[i].size(); j++) {
180180
max_n_shots = std::max(max_n_shots, num_samples[i][j]);
181181
}
182182
}
@@ -188,12 +188,12 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
188188
// Simulate programs one by one. Parallelizing over state vectors
189189
// we no longer parallelize over circuits. Each time we encounter a
190190
// a larger circuit we will grow the Statevector as necessary.
191-
for (int i = 0; i < ncircuits.size(); i++) {
191+
for (size_t i = 0; i < ncircuits.size(); i++) {
192192
int nq = num_qubits[i];
193193

194194
// (#679) Just ignore empty program
195195
if (ncircuits[i].channels.size() == 0) {
196-
for (int j = 0; j < pauli_sums[i].size(); j++) {
196+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
197197
(*output_tensor)(i, j) = -2.0;
198198
}
199199
continue;
@@ -220,7 +220,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
220220
sv, unused_stats);
221221

222222
// Use this trajectory as a source for all expectation calculations.
223-
for (int j = 0; j < pauli_sums[i].size(); j++) {
223+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
224224
if (run_samples[j] >= num_samples[i][j]) {
225225
continue;
226226
}
@@ -232,14 +232,14 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
232232
run_samples[j]++;
233233
}
234234
bool break_loop = true;
235-
for (int j = 0; j < num_samples[i].size(); j++) {
235+
for (size_t j = 0; j < num_samples[i].size(); j++) {
236236
if (run_samples[j] < num_samples[i][j]) {
237237
break_loop = false;
238238
break;
239239
}
240240
}
241241
if (break_loop) {
242-
for (int j = 0; j < num_samples[i].size(); j++) {
242+
for (size_t j = 0; j < num_samples[i].size(); j++) {
243243
rolling_sums[j] /= num_samples[i][j];
244244
(*output_tensor)(i, j) = static_cast<float>(rolling_sums[j]);
245245
}
@@ -280,8 +280,8 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
280280

281281
tensorflow::GuardedPhiloxRandom random_gen;
282282
int max_n_shots = 1;
283-
for (int i = 0; i < num_samples.size(); i++) {
284-
for (int j = 0; j < num_samples[i].size(); j++) {
283+
for (size_t i = 0; i < num_samples.size(); i++) {
284+
for (size_t j = 0; j < num_samples[i].size(); j++) {
285285
max_n_shots = std::max(max_n_shots, num_samples[i][j]);
286286
}
287287
}
@@ -304,13 +304,13 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
304304
random_gen.ReserveSamples128(ncircuits.size() * max_n_shots + 1);
305305
tensorflow::random::SimplePhilox rand_source(&local_gen);
306306

307-
for (int i = 0; i < ncircuits.size(); i++) {
307+
for (size_t i = 0; i < ncircuits.size(); i++) {
308308
int nq = num_qubits[i];
309309
int rep_offset = rep_offsets[start][i];
310310

311311
// (#679) Just ignore empty program
312312
if (ncircuits[i].channels.size() == 0) {
313-
for (int j = 0; j < pauli_sums[i].size(); j++) {
313+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
314314
(*output_tensor)(i, j) = -2.0;
315315
}
316316
continue;
@@ -337,7 +337,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
337337
sim, sv, unused_stats);
338338

339339
// Compute expectations across all ops using this trajectory.
340-
for (int j = 0; j < pauli_sums[i].size(); j++) {
340+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
341341
int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads;
342342
if (run_samples[j] >= p_reps + rep_offset) {
343343
continue;
@@ -354,7 +354,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
354354

355355
// Check if we have run enough trajectories for all ops.
356356
bool break_loop = true;
357-
for (int j = 0; j < num_samples[i].size(); j++) {
357+
for (size_t j = 0; j < num_samples[i].size(); j++) {
358358
int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads;
359359
if (run_samples[j] < p_reps + rep_offset) {
360360
break_loop = false;
@@ -364,7 +364,7 @@ class TfqNoisyExpectationOp : public tensorflow::OpKernel {
364364
if (break_loop) {
365365
// Lock writing to this batch index in output_tensor.
366366
batch_locks[i].lock();
367-
for (int j = 0; j < num_samples[i].size(); j++) {
367+
for (size_t j = 0; j < num_samples[i].size(); j++) {
368368
rolling_sums[j] /= num_samples[i][j];
369369
(*output_tensor)(i, j) += static_cast<float>(rolling_sums[j]);
370370
}

tensorflow_quantum/core/ops/noise/tfq_noisy_sampled_expectation.cc

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -177,8 +177,8 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
177177
tensorflow::GuardedPhiloxRandom random_gen;
178178
int max_psum_length = 1;
179179
int max_n_shots = 1;
180-
for (int i = 0; i < pauli_sums.size(); i++) {
181-
for (int j = 0; j < pauli_sums[i].size(); j++) {
180+
for (size_t i = 0; i < pauli_sums.size(); i++) {
181+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
182182
max_psum_length =
183183
std::max(max_psum_length, pauli_sums[i][j].terms().size());
184184
max_n_shots = std::max(max_n_shots, num_samples[i][j]);
@@ -192,12 +192,12 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
192192
// Simulate programs one by one. Parallelizing over state vectors
193193
// we no longer parallelize over circuits. Each time we encounter a
194194
// a larger circuit we will grow the Statevector as necessary.
195-
for (int i = 0; i < ncircuits.size(); i++) {
195+
for (size_t i = 0; i < ncircuits.size(); i++) {
196196
int nq = num_qubits[i];
197197

198198
// (#679) Just ignore empty program
199199
if (ncircuits[i].channels.empty()) {
200-
for (int j = 0; j < pauli_sums[i].size(); j++) {
200+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
201201
(*output_tensor)(i, j) = -2.0;
202202
}
203203
continue;
@@ -224,7 +224,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
224224
sv, unused_stats);
225225

226226
// Use this trajectory as a source for all expectation calculations.
227-
for (int j = 0; j < pauli_sums[i].size(); j++) {
227+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
228228
if (run_samples[j] >= num_samples[i][j]) {
229229
continue;
230230
}
@@ -236,14 +236,14 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
236236
run_samples[j]++;
237237
}
238238
bool break_loop = true;
239-
for (int j = 0; j < num_samples[i].size(); j++) {
239+
for (size_t j = 0; j < num_samples[i].size(); j++) {
240240
if (run_samples[j] < num_samples[i][j]) {
241241
break_loop = false;
242242
break;
243243
}
244244
}
245245
if (break_loop) {
246-
for (int j = 0; j < num_samples[i].size(); j++) {
246+
for (size_t j = 0; j < num_samples[i].size(); j++) {
247247
rolling_sums[j] /= num_samples[i][j];
248248
(*output_tensor)(i, j) = static_cast<float>(rolling_sums[j]);
249249
}
@@ -285,8 +285,8 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
285285
tensorflow::GuardedPhiloxRandom random_gen;
286286
int max_psum_length = 1;
287287
int max_n_shots = 1;
288-
for (int i = 0; i < pauli_sums.size(); i++) {
289-
for (int j = 0; j < pauli_sums[i].size(); j++) {
288+
for (size_t i = 0; i < pauli_sums.size(); i++) {
289+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
290290
max_psum_length =
291291
std::max(max_psum_length, pauli_sums[i][j].terms().size());
292292
max_n_shots = std::max(max_n_shots, num_samples[i][j]);
@@ -310,13 +310,13 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
310310
auto local_gen = random_gen.ReserveSamples128(num_rand);
311311
tensorflow::random::SimplePhilox rand_source(&local_gen);
312312

313-
for (int i = 0; i < ncircuits.size(); i++) {
313+
for (size_t i = 0; i < ncircuits.size(); i++) {
314314
int nq = num_qubits[i];
315315
int rep_offset = rep_offsets[start][i];
316316

317317
// (#679) Just ignore empty program
318318
if (ncircuits[i].channels.empty()) {
319-
for (int j = 0; j < pauli_sums[i].size(); j++) {
319+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
320320
(*output_tensor)(i, j) = -2.0;
321321
}
322322
continue;
@@ -343,7 +343,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
343343
sim, sv, unused_stats);
344344

345345
// Compute expectations across all ops using this trajectory.
346-
for (int j = 0; j < pauli_sums[i].size(); j++) {
346+
for (size_t j = 0; j < pauli_sums[i].size(); j++) {
347347
int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads;
348348
if (run_samples[j] >= p_reps + rep_offset) {
349349
continue;
@@ -360,7 +360,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
360360

361361
// Check if we have run enough trajectories for all ops.
362362
bool break_loop = true;
363-
for (int j = 0; j < num_samples[i].size(); j++) {
363+
for (size_t j = 0; j < num_samples[i].size(); j++) {
364364
int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads;
365365
if (run_samples[j] < p_reps + rep_offset) {
366366
break_loop = false;
@@ -370,7 +370,7 @@ class TfqNoisySampledExpectationOp : public tensorflow::OpKernel {
370370
if (break_loop) {
371371
// Lock writing to this batch index in output_tensor.
372372
batch_locks[i].lock();
373-
for (int j = 0; j < num_samples[i].size(); j++) {
373+
for (size_t j = 0; j < num_samples[i].size(); j++) {
374374
rolling_sums[j] /= num_samples[i][j];
375375
(*output_tensor)(i, j) += static_cast<float>(rolling_sums[j]);
376376
}

tensorflow_quantum/core/ops/noise/tfq_noisy_samples.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel {
159159
// Simulate programs one by one. Parallelizing over state vectors
160160
// we no longer parallelize over circuits. Each time we encounter a
161161
// a larger circuit we will grow the Statevector as nescessary.
162-
for (int i = 0; i < ncircuits.size(); i++) {
162+
for (size_t i = 0; i < ncircuits.size(); i++) {
163163
int nq = num_qubits[i];
164164

165165
if (nq > largest_nq) {
@@ -252,7 +252,7 @@ class TfqNoisySamplesOp : public tensorflow::OpKernel {
252252
auto local_gen = random_gen.ReserveSamples32(needed_random);
253253
tensorflow::random::SimplePhilox rand_source(&local_gen);
254254

255-
for (int i = 0; i < ncircuits.size(); i++) {
255+
for (size_t i = 0; i < ncircuits.size(); i++) {
256256
int nq = num_qubits[i];
257257
int j = start > 0 ? offset_prefix_sum[start - 1][i] : 0;
258258
int needed_samples = offset_prefix_sum[start][i] - j;

0 commit comments

Comments
 (0)