From 9259ccb3d81108896ef5bd7d1aa2f632ff5f3e1f Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Mon, 27 Apr 2020 23:56:58 -0700 Subject: [PATCH] Do not record gradient_tape on gradient calculation. PiperOrigin-RevId: 308772699 --- tensorflow_privacy/privacy/optimizers/dp_optimizer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tensorflow_privacy/privacy/optimizers/dp_optimizer.py b/tensorflow_privacy/privacy/optimizers/dp_optimizer.py index 91b72da..210ade8 100644 --- a/tensorflow_privacy/privacy/optimizers/dp_optimizer.py +++ b/tensorflow_privacy/privacy/optimizers/dp_optimizer.py @@ -96,7 +96,8 @@ def make_optimizer_class(cls): """Process one microbatch (record) with privacy helper.""" microbatch_loss = tf.reduce_mean( input_tensor=tf.gather(microbatches_losses, [i])) - grads = gradient_tape.gradient(microbatch_loss, var_list) + with gradient_tape.stop_recording(): + grads = gradient_tape.gradient(microbatch_loss, var_list) sample_state = self._dp_sum_query.accumulate_record( sample_params, sample_state, grads) return sample_state