PiperOrigin-RevId: 239129202
This commit is contained in:
Nicolas Papernot 2019-03-18 22:41:42 -07:00 committed by A. Unique TensorFlower
parent 0aad84ab3f
commit 0ebd134d99
4 changed files with 36 additions and 13 deletions

View file

@ -183,14 +183,21 @@ def make_gaussian_optimizer_class(cls):
return DPGaussianOptimizerClass return DPGaussianOptimizerClass
# Compatibility with tf 1 and 2 APIs
try:
AdagradOptimizer = tf.train.AdagradOptimizer
AdamOptimizer = tf.train.AdamOptimizer
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
except: # pylint: disable=bare-except
AdagradOptimizer = tf.optimizers.Adagrad
AdamOptimizer = tf.optimizers.Adam
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name
DPAdagradOptimizer = make_optimizer_class(tf.train.AdagradOptimizer) DPAdagradOptimizer = make_optimizer_class(AdagradOptimizer)
DPAdamOptimizer = make_optimizer_class(tf.train.AdamOptimizer) DPAdamOptimizer = make_optimizer_class(AdamOptimizer)
DPGradientDescentOptimizer = make_optimizer_class( DPGradientDescentOptimizer = make_optimizer_class(GradientDescentOptimizer)
tf.train.GradientDescentOptimizer)
DPAdagradGaussianOptimizer = make_gaussian_optimizer_class( DPAdagradGaussianOptimizer = make_gaussian_optimizer_class(AdagradOptimizer)
tf.train.AdagradOptimizer) DPAdamGaussianOptimizer = make_gaussian_optimizer_class(AdamOptimizer)
DPAdamGaussianOptimizer = make_gaussian_optimizer_class(tf.train.AdamOptimizer)
DPGradientDescentGaussianOptimizer = make_gaussian_optimizer_class( DPGradientDescentGaussianOptimizer = make_gaussian_optimizer_class(
tf.train.GradientDescentOptimizer) GradientDescentOptimizer)

View file

@ -25,6 +25,12 @@ from privacy.analysis.rdp_accountant import compute_rdp
from privacy.analysis.rdp_accountant import get_privacy_spent from privacy.analysis.rdp_accountant import get_privacy_spent
from privacy.optimizers import dp_optimizer from privacy.optimizers import dp_optimizer
# Compatibility with tf 1 and 2 APIs
try:
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
except: # pylint: disable=bare-except
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name
tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, ' tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, '
'train with vanilla SGD.') 'train with vanilla SGD.')
tf.flags.DEFINE_float('learning_rate', .15, 'Learning rate for training') tf.flags.DEFINE_float('learning_rate', .15, 'Learning rate for training')
@ -81,8 +87,7 @@ def cnn_model_fn(features, labels, mode):
population_size=60000) population_size=60000)
opt_loss = vector_loss opt_loss = vector_loss
else: else:
optimizer = tf.train.GradientDescentOptimizer( optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
learning_rate=FLAGS.learning_rate)
opt_loss = scalar_loss opt_loss = scalar_loss
global_step = tf.train.get_global_step() global_step = tf.train.get_global_step()
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step) train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)

View file

@ -24,6 +24,12 @@ from privacy.analysis.rdp_accountant import get_privacy_spent
from privacy.optimizers.dp_optimizer import DPGradientDescentOptimizer from privacy.optimizers.dp_optimizer import DPGradientDescentOptimizer
from privacy.optimizers.gaussian_query import GaussianAverageQuery from privacy.optimizers.gaussian_query import GaussianAverageQuery
# Compatibility with tf 1 and 2 APIs
try:
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
except: # pylint: disable=bare-except
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name
tf.enable_eager_execution() tf.enable_eager_execution()
tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, ' tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, '
@ -97,7 +103,7 @@ def main(_):
FLAGS.microbatches, FLAGS.microbatches,
learning_rate=FLAGS.learning_rate) learning_rate=FLAGS.learning_rate)
else: else:
opt = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate) opt = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
# Training loop. # Training loop.
steps_per_epoch = 60000 // FLAGS.batch_size steps_per_epoch = 60000 // FLAGS.batch_size

View file

@ -55,6 +55,12 @@ from privacy.analysis.rdp_accountant import get_privacy_spent
from privacy.optimizers.dp_optimizer import DPGradientDescentOptimizer from privacy.optimizers.dp_optimizer import DPGradientDescentOptimizer
from privacy.optimizers.gaussian_query import GaussianAverageQuery from privacy.optimizers.gaussian_query import GaussianAverageQuery
# Compatibility with tf 1 and 2 APIs
try:
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
except: # pylint: disable=bare-except
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name
tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, ' tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, '
'train with vanilla SGD.') 'train with vanilla SGD.')
tf.flags.DEFINE_float('learning_rate', 0.15, 'Learning rate for training') tf.flags.DEFINE_float('learning_rate', 0.15, 'Learning rate for training')
@ -133,8 +139,7 @@ def main(unused_argv):
learning_rate=FLAGS.learning_rate, learning_rate=FLAGS.learning_rate,
unroll_microbatches=True) unroll_microbatches=True)
else: else:
optimizer = tf.train.GradientDescentOptimizer( optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
learning_rate=FLAGS.learning_rate)
def keras_loss_fn(labels, logits): def keras_loss_fn(labels, logits):
"""This removes the mandatory named arguments for this loss fn.""" """This removes the mandatory named arguments for this loss fn."""