From 0ebd134d990747da5bd94ac17946d32f7afa8bc1 Mon Sep 17 00:00:00 2001 From: Nicolas Papernot Date: Mon, 18 Mar 2019 22:41:42 -0700 Subject: [PATCH] Closes #33 PiperOrigin-RevId: 239129202 --- privacy/optimizers/dp_optimizer.py | 23 +++++++++++++++-------- tutorials/mnist_dpsgd_tutorial.py | 9 +++++++-- tutorials/mnist_dpsgd_tutorial_eager.py | 8 +++++++- tutorials/mnist_dpsgd_tutorial_keras.py | 9 +++++++-- 4 files changed, 36 insertions(+), 13 deletions(-) diff --git a/privacy/optimizers/dp_optimizer.py b/privacy/optimizers/dp_optimizer.py index b5ce15d..12dafc6 100644 --- a/privacy/optimizers/dp_optimizer.py +++ b/privacy/optimizers/dp_optimizer.py @@ -183,14 +183,21 @@ def make_gaussian_optimizer_class(cls): return DPGaussianOptimizerClass +# Compatibility with tf 1 and 2 APIs +try: + AdagradOptimizer = tf.train.AdagradOptimizer + AdamOptimizer = tf.train.AdamOptimizer + GradientDescentOptimizer = tf.train.GradientDescentOptimizer +except: # pylint: disable=bare-except + AdagradOptimizer = tf.optimizers.Adagrad + AdamOptimizer = tf.optimizers.Adam + GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name -DPAdagradOptimizer = make_optimizer_class(tf.train.AdagradOptimizer) -DPAdamOptimizer = make_optimizer_class(tf.train.AdamOptimizer) -DPGradientDescentOptimizer = make_optimizer_class( - tf.train.GradientDescentOptimizer) +DPAdagradOptimizer = make_optimizer_class(AdagradOptimizer) +DPAdamOptimizer = make_optimizer_class(AdamOptimizer) +DPGradientDescentOptimizer = make_optimizer_class(GradientDescentOptimizer) -DPAdagradGaussianOptimizer = make_gaussian_optimizer_class( - tf.train.AdagradOptimizer) -DPAdamGaussianOptimizer = make_gaussian_optimizer_class(tf.train.AdamOptimizer) +DPAdagradGaussianOptimizer = make_gaussian_optimizer_class(AdagradOptimizer) +DPAdamGaussianOptimizer = make_gaussian_optimizer_class(AdamOptimizer) DPGradientDescentGaussianOptimizer = make_gaussian_optimizer_class( - tf.train.GradientDescentOptimizer) + GradientDescentOptimizer) diff --git a/tutorials/mnist_dpsgd_tutorial.py b/tutorials/mnist_dpsgd_tutorial.py index 6282124..454ce88 100644 --- a/tutorials/mnist_dpsgd_tutorial.py +++ b/tutorials/mnist_dpsgd_tutorial.py @@ -25,6 +25,12 @@ from privacy.analysis.rdp_accountant import compute_rdp from privacy.analysis.rdp_accountant import get_privacy_spent from privacy.optimizers import dp_optimizer +# Compatibility with tf 1 and 2 APIs +try: + GradientDescentOptimizer = tf.train.GradientDescentOptimizer +except: # pylint: disable=bare-except + GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name + tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, ' 'train with vanilla SGD.') tf.flags.DEFINE_float('learning_rate', .15, 'Learning rate for training') @@ -81,8 +87,7 @@ def cnn_model_fn(features, labels, mode): population_size=60000) opt_loss = vector_loss else: - optimizer = tf.train.GradientDescentOptimizer( - learning_rate=FLAGS.learning_rate) + optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate) opt_loss = scalar_loss global_step = tf.train.get_global_step() train_op = optimizer.minimize(loss=opt_loss, global_step=global_step) diff --git a/tutorials/mnist_dpsgd_tutorial_eager.py b/tutorials/mnist_dpsgd_tutorial_eager.py index 785c59f..1adb88f 100644 --- a/tutorials/mnist_dpsgd_tutorial_eager.py +++ b/tutorials/mnist_dpsgd_tutorial_eager.py @@ -24,6 +24,12 @@ from privacy.analysis.rdp_accountant import get_privacy_spent from privacy.optimizers.dp_optimizer import DPGradientDescentOptimizer from privacy.optimizers.gaussian_query import GaussianAverageQuery +# Compatibility with tf 1 and 2 APIs +try: + GradientDescentOptimizer = tf.train.GradientDescentOptimizer +except: # pylint: disable=bare-except + GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name + tf.enable_eager_execution() tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, ' @@ -97,7 +103,7 @@ def main(_): FLAGS.microbatches, learning_rate=FLAGS.learning_rate) else: - opt = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate) + opt = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate) # Training loop. steps_per_epoch = 60000 // FLAGS.batch_size diff --git a/tutorials/mnist_dpsgd_tutorial_keras.py b/tutorials/mnist_dpsgd_tutorial_keras.py index 12d2de1..0638412 100644 --- a/tutorials/mnist_dpsgd_tutorial_keras.py +++ b/tutorials/mnist_dpsgd_tutorial_keras.py @@ -55,6 +55,12 @@ from privacy.analysis.rdp_accountant import get_privacy_spent from privacy.optimizers.dp_optimizer import DPGradientDescentOptimizer from privacy.optimizers.gaussian_query import GaussianAverageQuery +# Compatibility with tf 1 and 2 APIs +try: + GradientDescentOptimizer = tf.train.GradientDescentOptimizer +except: # pylint: disable=bare-except + GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name + tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, ' 'train with vanilla SGD.') tf.flags.DEFINE_float('learning_rate', 0.15, 'Learning rate for training') @@ -133,8 +139,7 @@ def main(unused_argv): learning_rate=FLAGS.learning_rate, unroll_microbatches=True) else: - optimizer = tf.train.GradientDescentOptimizer( - learning_rate=FLAGS.learning_rate) + optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate) def keras_loss_fn(labels, logits): """This removes the mandatory named arguments for this loss fn."""