From febafd830d43bbdc15b196c0d61aba389a15e4c5 Mon Sep 17 00:00:00 2001 From: Nicolas Papernot Date: Mon, 29 Apr 2019 14:00:20 -0700 Subject: [PATCH] update API calls for TF2 PiperOrigin-RevId: 245817981 --- privacy/dp_query/gaussian_query.py | 15 ++++++++----- privacy/dp_query/normalized_query.py | 8 +++++-- privacy/optimizers/dp_optimizer.py | 14 +++++++++--- tutorials/mnist_dpsgd_tutorial_eager.py | 30 +++++++++++++------------ 4 files changed, 43 insertions(+), 24 deletions(-) diff --git a/privacy/dp_query/gaussian_query.py b/privacy/dp_query/gaussian_query.py index a29648d..8dbd8c9 100644 --- a/privacy/dp_query/gaussian_query.py +++ b/privacy/dp_query/gaussian_query.py @@ -46,8 +46,8 @@ class GaussianSumQuery(dp_query.DPQuery): stddev: The stddev of the noise added to the sum. ledger: The privacy ledger to which queries should be recorded. """ - self._l2_norm_clip = tf.to_float(l2_norm_clip) - self._stddev = tf.to_float(stddev) + self._l2_norm_clip = tf.cast(l2_norm_clip, tf.float32) + self._stddev = tf.cast(stddev, tf.float32) self._ledger = ledger def initial_global_state(self): @@ -127,8 +127,13 @@ class GaussianSumQuery(dp_query.DPQuery): A tuple (estimate, new_global_state) where "estimate" is the estimated sum of the records and "new_global_state" is the updated global state. """ - def add_noise(v): - return v + tf.random_normal(tf.shape(v), stddev=self._stddev) + if LooseVersion(tf.__version__) < LooseVersion('2.0.0'): + def add_noise(v): + return v + tf.random_normal(tf.shape(v), stddev=self._stddev) + else: + random_normal = tf.random_normal_initializer(stddev=self._stddev) + def add_noise(v): + return v + random_normal(tf.shape(v)) return nest.map_structure(add_noise, sample_state), global_state @@ -162,4 +167,4 @@ class GaussianAverageQuery(normalized_query.NormalizedQuery): """ super(GaussianAverageQuery, self).__init__( numerator_query=GaussianSumQuery(l2_norm_clip, sum_stddev, ledger), - denominator=tf.to_float(denominator)) + denominator=tf.cast(denominator, tf.float32)) diff --git a/privacy/dp_query/normalized_query.py b/privacy/dp_query/normalized_query.py index de78fe0..57e96d0 100644 --- a/privacy/dp_query/normalized_query.py +++ b/privacy/dp_query/normalized_query.py @@ -19,11 +19,15 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +from distutils.version import LooseVersion import tensorflow as tf from privacy.dp_query import dp_query -nest = tf.contrib.framework.nest +if LooseVersion(tf.__version__) < LooseVersion('2.0.0'): + nest = tf.contrib.framework.nest +else: + nest = tf.nest class NormalizedQuery(dp_query.DPQuery): @@ -37,7 +41,7 @@ class NormalizedQuery(dp_query.DPQuery): denominator: A value for the denominator. """ self._numerator = numerator_query - self._denominator = tf.to_float(denominator) + self._denominator = tf.cast(denominator, tf.float32) def initial_global_state(self): """Returns the initial global state for the NormalizedQuery.""" diff --git a/privacy/optimizers/dp_optimizer.py b/privacy/optimizers/dp_optimizer.py index a0f7ebf..64a16c1 100644 --- a/privacy/optimizers/dp_optimizer.py +++ b/privacy/optimizers/dp_optimizer.py @@ -17,6 +17,7 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +from distutils.version import LooseVersion import tensorflow as tf from privacy.analysis import privacy_ledger @@ -25,8 +26,15 @@ from privacy.dp_query import gaussian_query def make_optimizer_class(cls): """Constructs a DP optimizer class from an existing one.""" - if (tf.train.Optimizer.compute_gradients.__code__ is - not cls.compute_gradients.__code__): + if LooseVersion(tf.__version__) < LooseVersion('2.0.0'): + parent_code = tf.train.Optimizer.compute_gradients.__code__ + child_code = cls.compute_gradients.__code__ + GATE_OP = tf.train.Optimizer.GATE_OP # pylint: disable=invalid-name + else: + parent_code = tf.optimizers.Optimizer.compute_gradients.__code__ + child_code = cls._compute_gradients.__code__ # pylint: disable=protected-access + GATE_OP = None # pylint: disable=invalid-name + if child_code is not parent_code: tf.logging.warning( 'WARNING: Calling make_optimizer_class() on class %s that overrides ' 'method compute_gradients(). Check to ensure that ' @@ -55,7 +63,7 @@ def make_optimizer_class(cls): def compute_gradients(self, loss, var_list, - gate_gradients=tf.train.Optimizer.GATE_OP, + gate_gradients=GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, grad_loss=None, diff --git a/tutorials/mnist_dpsgd_tutorial_eager.py b/tutorials/mnist_dpsgd_tutorial_eager.py index 1e9b423..356b276 100644 --- a/tutorials/mnist_dpsgd_tutorial_eager.py +++ b/tutorials/mnist_dpsgd_tutorial_eager.py @@ -16,6 +16,8 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +from absl import app +from absl import flags import numpy as np import tensorflow as tf @@ -32,18 +34,18 @@ except: # pylint: disable=bare-except tf.enable_eager_execution() -tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, ' - 'train with vanilla SGD.') -tf.flags.DEFINE_float('learning_rate', 0.15, 'Learning rate for training') -tf.flags.DEFINE_float('noise_multiplier', 1.1, - 'Ratio of the standard deviation to the clipping norm') -tf.flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm') -tf.flags.DEFINE_integer('batch_size', 250, 'Batch size') -tf.flags.DEFINE_integer('epochs', 60, 'Number of epochs') -tf.flags.DEFINE_integer('microbatches', 250, 'Number of microbatches ' - '(must evenly divide batch_size)') +flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, ' + 'train with vanilla SGD.') +flags.DEFINE_float('learning_rate', 0.15, 'Learning rate for training') +flags.DEFINE_float('noise_multiplier', 1.1, + 'Ratio of the standard deviation to the clipping norm') +flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm') +flags.DEFINE_integer('batch_size', 250, 'Batch size') +flags.DEFINE_integer('epochs', 60, 'Number of epochs') +flags.DEFINE_integer('microbatches', 250, 'Number of microbatches ' + '(must evenly divide batch_size)') -FLAGS = tf.app.flags.FLAGS +FLAGS = flags.FLAGS def compute_epsilon(steps): @@ -118,8 +120,8 @@ def main(_): # In Eager mode, the optimizer takes a function that returns the loss. def loss_fn(): logits = mnist_model(images, training=True) # pylint: disable=undefined-loop-variable,cell-var-from-loop - loss = tf.losses.sparse_softmax_cross_entropy( - labels, logits, reduction=tf.losses.Reduction.NONE) # pylint: disable=undefined-loop-variable,cell-var-from-loop + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=labels, logits=logits) # pylint: disable=undefined-loop-variable,cell-var-from-loop # If training without privacy, the loss is a scalar not a vector. if not FLAGS.dpsgd: loss = tf.reduce_mean(loss) @@ -149,4 +151,4 @@ def main(_): print('Trained with vanilla non-private SGD optimizer') if __name__ == '__main__': - tf.app.run(main) + app.run(main)