forked from 626_privacy/tensorflow_privacy
parent
0aad84ab3f
commit
0ebd134d99
4 changed files with 36 additions and 13 deletions
|
@ -183,14 +183,21 @@ def make_gaussian_optimizer_class(cls):
|
|||
|
||||
return DPGaussianOptimizerClass
|
||||
|
||||
# Compatibility with tf 1 and 2 APIs
|
||||
try:
|
||||
AdagradOptimizer = tf.train.AdagradOptimizer
|
||||
AdamOptimizer = tf.train.AdamOptimizer
|
||||
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||
except: # pylint: disable=bare-except
|
||||
AdagradOptimizer = tf.optimizers.Adagrad
|
||||
AdamOptimizer = tf.optimizers.Adam
|
||||
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name
|
||||
|
||||
DPAdagradOptimizer = make_optimizer_class(tf.train.AdagradOptimizer)
|
||||
DPAdamOptimizer = make_optimizer_class(tf.train.AdamOptimizer)
|
||||
DPGradientDescentOptimizer = make_optimizer_class(
|
||||
tf.train.GradientDescentOptimizer)
|
||||
DPAdagradOptimizer = make_optimizer_class(AdagradOptimizer)
|
||||
DPAdamOptimizer = make_optimizer_class(AdamOptimizer)
|
||||
DPGradientDescentOptimizer = make_optimizer_class(GradientDescentOptimizer)
|
||||
|
||||
DPAdagradGaussianOptimizer = make_gaussian_optimizer_class(
|
||||
tf.train.AdagradOptimizer)
|
||||
DPAdamGaussianOptimizer = make_gaussian_optimizer_class(tf.train.AdamOptimizer)
|
||||
DPAdagradGaussianOptimizer = make_gaussian_optimizer_class(AdagradOptimizer)
|
||||
DPAdamGaussianOptimizer = make_gaussian_optimizer_class(AdamOptimizer)
|
||||
DPGradientDescentGaussianOptimizer = make_gaussian_optimizer_class(
|
||||
tf.train.GradientDescentOptimizer)
|
||||
GradientDescentOptimizer)
|
||||
|
|
|
@ -25,6 +25,12 @@ from privacy.analysis.rdp_accountant import compute_rdp
|
|||
from privacy.analysis.rdp_accountant import get_privacy_spent
|
||||
from privacy.optimizers import dp_optimizer
|
||||
|
||||
# Compatibility with tf 1 and 2 APIs
|
||||
try:
|
||||
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||
except: # pylint: disable=bare-except
|
||||
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name
|
||||
|
||||
tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, '
|
||||
'train with vanilla SGD.')
|
||||
tf.flags.DEFINE_float('learning_rate', .15, 'Learning rate for training')
|
||||
|
@ -81,8 +87,7 @@ def cnn_model_fn(features, labels, mode):
|
|||
population_size=60000)
|
||||
opt_loss = vector_loss
|
||||
else:
|
||||
optimizer = tf.train.GradientDescentOptimizer(
|
||||
learning_rate=FLAGS.learning_rate)
|
||||
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
|
||||
opt_loss = scalar_loss
|
||||
global_step = tf.train.get_global_step()
|
||||
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
||||
|
|
|
@ -24,6 +24,12 @@ from privacy.analysis.rdp_accountant import get_privacy_spent
|
|||
from privacy.optimizers.dp_optimizer import DPGradientDescentOptimizer
|
||||
from privacy.optimizers.gaussian_query import GaussianAverageQuery
|
||||
|
||||
# Compatibility with tf 1 and 2 APIs
|
||||
try:
|
||||
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||
except: # pylint: disable=bare-except
|
||||
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name
|
||||
|
||||
tf.enable_eager_execution()
|
||||
|
||||
tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, '
|
||||
|
@ -97,7 +103,7 @@ def main(_):
|
|||
FLAGS.microbatches,
|
||||
learning_rate=FLAGS.learning_rate)
|
||||
else:
|
||||
opt = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
|
||||
opt = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
|
||||
|
||||
# Training loop.
|
||||
steps_per_epoch = 60000 // FLAGS.batch_size
|
||||
|
|
|
@ -55,6 +55,12 @@ from privacy.analysis.rdp_accountant import get_privacy_spent
|
|||
from privacy.optimizers.dp_optimizer import DPGradientDescentOptimizer
|
||||
from privacy.optimizers.gaussian_query import GaussianAverageQuery
|
||||
|
||||
# Compatibility with tf 1 and 2 APIs
|
||||
try:
|
||||
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||
except: # pylint: disable=bare-except
|
||||
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name
|
||||
|
||||
tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, '
|
||||
'train with vanilla SGD.')
|
||||
tf.flags.DEFINE_float('learning_rate', 0.15, 'Learning rate for training')
|
||||
|
@ -133,8 +139,7 @@ def main(unused_argv):
|
|||
learning_rate=FLAGS.learning_rate,
|
||||
unroll_microbatches=True)
|
||||
else:
|
||||
optimizer = tf.train.GradientDescentOptimizer(
|
||||
learning_rate=FLAGS.learning_rate)
|
||||
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
|
||||
|
||||
def keras_loss_fn(labels, logits):
|
||||
"""This removes the mandatory named arguments for this loss fn."""
|
||||
|
|
Loading…
Reference in a new issue