Add assert that the training is private for TF1 vectorized optimizer.
In Keras training in TF 2.0+, compute_gradients() is not called but apply_gradients() is called. W/o calling compute_gradients() dp gradient is not computed, and a normal gradient is used. PiperOrigin-RevId: 461021412
This commit is contained in:
parent
64c6b5ea25
commit
328795aa36
2 changed files with 33 additions and 0 deletions
|
@ -103,6 +103,7 @@ def make_vectorized_optimizer_class(cls):
|
|||
self._l2_norm_clip = l2_norm_clip
|
||||
self._noise_multiplier = noise_multiplier
|
||||
self._num_microbatches = num_microbatches
|
||||
self._was_compute_gradients_called = False
|
||||
|
||||
def compute_gradients(self,
|
||||
loss,
|
||||
|
@ -113,6 +114,7 @@ def make_vectorized_optimizer_class(cls):
|
|||
grad_loss=None,
|
||||
gradient_tape=None):
|
||||
"""DP-SGD version of base class method."""
|
||||
self._was_compute_gradients_called = True
|
||||
if callable(loss):
|
||||
# TF is running in Eager mode
|
||||
raise NotImplementedError('Vectorized optimizer unavailable for TF2.')
|
||||
|
@ -175,6 +177,17 @@ def make_vectorized_optimizer_class(cls):
|
|||
|
||||
return list(zip(final_grads, var_list))
|
||||
|
||||
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
|
||||
# pylint: disable=g-doc-args, g-doc-return-or-yield
|
||||
"""DP-SGD version of base class method."""
|
||||
assert self._was_compute_gradients_called, (
|
||||
'compute_gradients() on the differentially private optimizer was not'
|
||||
' called. Which means that the training is not differentially '
|
||||
'private. It happens for example in Keras training in TensorFlow '
|
||||
'2.0+.')
|
||||
return super(DPOptimizerClass, self).apply_gradients(
|
||||
grads_and_vars=grads_and_vars, global_step=global_step, name=name)
|
||||
|
||||
return DPOptimizerClass
|
||||
|
||||
|
||||
|
|
|
@ -197,6 +197,26 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
|||
# Test standard deviation is close to l2_norm_clip * noise_multiplier.
|
||||
self.assertNear(np.std(grads), 2.0 * 4.0, 0.5)
|
||||
|
||||
@parameterized.named_parameters(('DPGradientDescent', VectorizedDPSGD),
|
||||
('DPAdagrad', VectorizedDPAdagrad),
|
||||
('DPAdam', VectorizedDPAdam))
|
||||
def testAssertOnNoCallOfComputeGradients(self, cls):
|
||||
opt = cls(
|
||||
l2_norm_clip=4.0,
|
||||
noise_multiplier=2.0,
|
||||
num_microbatches=1,
|
||||
learning_rate=2.0)
|
||||
|
||||
with self.assertRaises(AssertionError):
|
||||
grads_and_vars = tf.Variable([0.0])
|
||||
opt.apply_gradients(grads_and_vars)
|
||||
|
||||
# Expect no call exception if compute_gradients is called.
|
||||
var0 = tf.Variable([0.0])
|
||||
data0 = tf.Variable([[0.0]])
|
||||
grads_and_vars = opt.compute_gradients(self._loss(data0, var0), [var0])
|
||||
opt.apply_gradients(grads_and_vars)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
tf.test.main()
|
||||
|
|
Loading…
Reference in a new issue