Clean-up pass to eliminate warnings: replacing deprecated endpoints with recommended versions and annotating test sizes.
PiperOrigin-RevId: 246901723
This commit is contained in:
parent
85280ab568
commit
9cece21d92
6 changed files with 80 additions and 50 deletions
|
@ -13,17 +13,21 @@ py_library(
|
|||
deps = [
|
||||
":dp_query",
|
||||
":normalized_query",
|
||||
"//third_party/py/distutils",
|
||||
"//third_party/py/tensorflow",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "gaussian_query_test",
|
||||
size = "small",
|
||||
srcs = ["gaussian_query_test.py"],
|
||||
deps = [
|
||||
":gaussian_query",
|
||||
":test_utils",
|
||||
"//third_party/py/absl/testing:parameterized",
|
||||
"//third_party/py/numpy",
|
||||
"//third_party/py/six",
|
||||
"//third_party/py/tensorflow",
|
||||
],
|
||||
)
|
||||
|
@ -33,12 +37,14 @@ py_library(
|
|||
srcs = ["no_privacy_query.py"],
|
||||
deps = [
|
||||
":dp_query",
|
||||
"//third_party/py/distutils",
|
||||
"//third_party/py/tensorflow",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "no_privacy_query_test",
|
||||
size = "small",
|
||||
srcs = ["no_privacy_query_test.py"],
|
||||
deps = [
|
||||
":no_privacy_query",
|
||||
|
@ -53,12 +59,14 @@ py_library(
|
|||
srcs = ["normalized_query.py"],
|
||||
deps = [
|
||||
":dp_query",
|
||||
"//third_party/py/distutils",
|
||||
"//third_party/py/tensorflow",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "normalized_query_test",
|
||||
size = "small",
|
||||
srcs = ["normalized_query_test.py"],
|
||||
deps = [
|
||||
":gaussian_query",
|
||||
|
@ -73,18 +81,22 @@ py_library(
|
|||
srcs = ["nested_query.py"],
|
||||
deps = [
|
||||
":dp_query",
|
||||
"//third_party/py/distutils",
|
||||
"//third_party/py/tensorflow",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "nested_query_test",
|
||||
size = "small",
|
||||
srcs = ["nested_query_test.py"],
|
||||
deps = [
|
||||
":gaussian_query",
|
||||
":nested_query",
|
||||
":test_utils",
|
||||
"//third_party/py/absl/testing:parameterized",
|
||||
"//third_party/py/distutils",
|
||||
"//third_party/py/numpy",
|
||||
"//third_party/py/tensorflow",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -27,9 +27,9 @@ from privacy.dp_query import gaussian_query
|
|||
def make_optimizer_class(cls):
|
||||
"""Constructs a DP optimizer class from an existing one."""
|
||||
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
|
||||
parent_code = tf.train.Optimizer.compute_gradients.__code__
|
||||
parent_code = tf.compat.v1.train.Optimizer.compute_gradients.__code__
|
||||
child_code = cls.compute_gradients.__code__
|
||||
GATE_OP = tf.train.Optimizer.GATE_OP # pylint: disable=invalid-name
|
||||
GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP # pylint: disable=invalid-name
|
||||
else:
|
||||
parent_code = tf.optimizers.Optimizer._compute_gradients.__code__ # pylint: disable=protected-access
|
||||
child_code = cls._compute_gradients.__code__ # pylint: disable=protected-access
|
||||
|
@ -211,9 +211,9 @@ def make_gaussian_optimizer_class(cls):
|
|||
|
||||
# Compatibility with tf 1 and 2 APIs
|
||||
try:
|
||||
AdagradOptimizer = tf.train.AdagradOptimizer
|
||||
AdamOptimizer = tf.train.AdamOptimizer
|
||||
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||
AdagradOptimizer = tf.compat.v1.train.AdagradOptimizer
|
||||
AdamOptimizer = tf.compat.v1.train.AdamOptimizer
|
||||
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
||||
except: # pylint: disable=bare-except
|
||||
AdagradOptimizer = tf.optimizers.Adagrad
|
||||
AdamOptimizer = tf.optimizers.Adam
|
||||
|
|
|
@ -36,6 +36,10 @@ from __future__ import division
|
|||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
|
||||
from absl import app
|
||||
from absl import flags
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import tensorflow_datasets as tfds
|
||||
|
@ -45,20 +49,22 @@ from privacy.analysis.rdp_accountant import compute_rdp
|
|||
from privacy.analysis.rdp_accountant import get_privacy_spent
|
||||
from privacy.optimizers import dp_optimizer
|
||||
|
||||
tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, '
|
||||
flags.DEFINE_boolean(
|
||||
'dpsgd', True, 'If True, train with DP-SGD. If False, '
|
||||
'train with vanilla SGD.')
|
||||
tf.flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
|
||||
tf.flags.DEFINE_float('noise_multiplier', 0.001,
|
||||
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
|
||||
flags.DEFINE_float('noise_multiplier', 0.001,
|
||||
'Ratio of the standard deviation to the clipping norm')
|
||||
tf.flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
|
||||
tf.flags.DEFINE_integer('batch_size', 256, 'Batch size')
|
||||
tf.flags.DEFINE_integer('epochs', 60, 'Number of epochs')
|
||||
tf.flags.DEFINE_integer('microbatches', 256, 'Number of microbatches '
|
||||
flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
|
||||
flags.DEFINE_integer('batch_size', 256, 'Batch size')
|
||||
flags.DEFINE_integer('epochs', 60, 'Number of epochs')
|
||||
flags.DEFINE_integer(
|
||||
'microbatches', 256, 'Number of microbatches '
|
||||
'(must evenly divide batch_size)')
|
||||
tf.flags.DEFINE_string('model_dir', None, 'Model directory')
|
||||
tf.flags.DEFINE_string('data_dir', None, 'Directory containing the PTB data.')
|
||||
flags.DEFINE_string('model_dir', None, 'Model directory')
|
||||
flags.DEFINE_string('data_dir', None, 'Directory containing the PTB data.')
|
||||
|
||||
FLAGS = tf.flags.FLAGS
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
SEQ_LEN = 80
|
||||
NB_TRAIN = 45000
|
||||
|
@ -217,4 +223,4 @@ def main(unused_argv):
|
|||
print('Trained with vanilla non-private SGD optimizer')
|
||||
|
||||
if __name__ == '__main__':
|
||||
tf.app.run()
|
||||
app.run(main)
|
||||
|
|
|
@ -18,6 +18,9 @@ from __future__ import absolute_import
|
|||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from absl import app
|
||||
from absl import flags
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
|
@ -28,26 +31,28 @@ from privacy.optimizers import dp_optimizer
|
|||
|
||||
# Compatibility with tf 1 and 2 APIs
|
||||
try:
|
||||
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
||||
except: # pylint: disable=bare-except
|
||||
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name
|
||||
|
||||
tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, '
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
flags.DEFINE_boolean(
|
||||
'dpsgd', True, 'If True, train with DP-SGD. If False, '
|
||||
'train with vanilla SGD.')
|
||||
tf.flags.DEFINE_float('learning_rate', .15, 'Learning rate for training')
|
||||
tf.flags.DEFINE_float('noise_multiplier', 1.1,
|
||||
flags.DEFINE_float('learning_rate', .15, 'Learning rate for training')
|
||||
flags.DEFINE_float('noise_multiplier', 1.1,
|
||||
'Ratio of the standard deviation to the clipping norm')
|
||||
tf.flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
|
||||
tf.flags.DEFINE_integer('batch_size', 256, 'Batch size')
|
||||
tf.flags.DEFINE_integer('epochs', 60, 'Number of epochs')
|
||||
tf.flags.DEFINE_integer('microbatches', 256, 'Number of microbatches '
|
||||
flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
|
||||
flags.DEFINE_integer('batch_size', 256, 'Batch size')
|
||||
flags.DEFINE_integer('epochs', 60, 'Number of epochs')
|
||||
flags.DEFINE_integer(
|
||||
'microbatches', 256, 'Number of microbatches '
|
||||
'(must evenly divide batch_size)')
|
||||
tf.flags.DEFINE_string('model_dir', None, 'Model directory')
|
||||
|
||||
FLAGS = tf.flags.FLAGS
|
||||
flags.DEFINE_string('model_dir', None, 'Model directory')
|
||||
|
||||
|
||||
class EpsilonPrintingTrainingHook(tf.train.SessionRunHook):
|
||||
class EpsilonPrintingTrainingHook(tf.estimator.SessionRunHook):
|
||||
"""Training hook to print current value of epsilon after an epoch."""
|
||||
|
||||
def __init__(self, ledger):
|
||||
|
@ -203,4 +208,4 @@ def main(unused_argv):
|
|||
print('Test accuracy after %d epochs is: %.3f' % (epoch, test_accuracy))
|
||||
|
||||
if __name__ == '__main__':
|
||||
tf.app.run()
|
||||
app.run(main)
|
||||
|
|
|
@ -18,7 +18,9 @@ from __future__ import print_function
|
|||
|
||||
from absl import app
|
||||
from absl import flags
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
|
@ -28,8 +30,8 @@ from privacy.dp_query.gaussian_query import GaussianAverageQuery
|
|||
from privacy.optimizers.dp_optimizer import DPGradientDescentOptimizer
|
||||
|
||||
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
|
||||
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||
tf.enable_eager_execution()
|
||||
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
||||
tf.compat.v1.enable_eager_execution()
|
||||
else:
|
||||
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name
|
||||
|
||||
|
|
|
@ -17,6 +17,9 @@ from __future__ import absolute_import
|
|||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from absl import app
|
||||
from absl import flags
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
|
@ -27,23 +30,25 @@ from privacy.optimizers.dp_optimizer import DPGradientDescentOptimizer
|
|||
|
||||
# Compatibility with tf 1 and 2 APIs
|
||||
try:
|
||||
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
||||
except: # pylint: disable=bare-except
|
||||
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name
|
||||
|
||||
tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, '
|
||||
flags.DEFINE_boolean(
|
||||
'dpsgd', True, 'If True, train with DP-SGD. If False, '
|
||||
'train with vanilla SGD.')
|
||||
tf.flags.DEFINE_float('learning_rate', 0.15, 'Learning rate for training')
|
||||
tf.flags.DEFINE_float('noise_multiplier', 1.1,
|
||||
flags.DEFINE_float('learning_rate', 0.15, 'Learning rate for training')
|
||||
flags.DEFINE_float('noise_multiplier', 1.1,
|
||||
'Ratio of the standard deviation to the clipping norm')
|
||||
tf.flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
|
||||
tf.flags.DEFINE_integer('batch_size', 250, 'Batch size')
|
||||
tf.flags.DEFINE_integer('epochs', 60, 'Number of epochs')
|
||||
tf.flags.DEFINE_integer('microbatches', 250, 'Number of microbatches '
|
||||
flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
|
||||
flags.DEFINE_integer('batch_size', 250, 'Batch size')
|
||||
flags.DEFINE_integer('epochs', 60, 'Number of epochs')
|
||||
flags.DEFINE_integer(
|
||||
'microbatches', 250, 'Number of microbatches '
|
||||
'(must evenly divide batch_size)')
|
||||
tf.flags.DEFINE_string('model_dir', None, 'Model directory')
|
||||
flags.DEFINE_string('model_dir', None, 'Model directory')
|
||||
|
||||
FLAGS = tf.flags.FLAGS
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def compute_epsilon(steps):
|
||||
|
@ -146,4 +151,4 @@ def main(unused_argv):
|
|||
print('Trained with vanilla non-private SGD optimizer')
|
||||
|
||||
if __name__ == '__main__':
|
||||
tf.app.run()
|
||||
app.run(main)
|
||||
|
|
Loading…
Reference in a new issue