Clean-up pass to eliminate warnings: replacing deprecated endpoints with recommended versions and annotating test sizes.

PiperOrigin-RevId: 246901723
This commit is contained in:
Ilya Mironov 2019-05-06 14:50:04 -07:00 committed by A. Unique TensorFlower
parent 85280ab568
commit 9cece21d92
6 changed files with 80 additions and 50 deletions

View file

@ -13,17 +13,21 @@ py_library(
deps = [ deps = [
":dp_query", ":dp_query",
":normalized_query", ":normalized_query",
"//third_party/py/distutils",
"//third_party/py/tensorflow", "//third_party/py/tensorflow",
], ],
) )
py_test( py_test(
name = "gaussian_query_test", name = "gaussian_query_test",
size = "small",
srcs = ["gaussian_query_test.py"], srcs = ["gaussian_query_test.py"],
deps = [ deps = [
":gaussian_query", ":gaussian_query",
":test_utils", ":test_utils",
"//third_party/py/absl/testing:parameterized", "//third_party/py/absl/testing:parameterized",
"//third_party/py/numpy",
"//third_party/py/six",
"//third_party/py/tensorflow", "//third_party/py/tensorflow",
], ],
) )
@ -33,12 +37,14 @@ py_library(
srcs = ["no_privacy_query.py"], srcs = ["no_privacy_query.py"],
deps = [ deps = [
":dp_query", ":dp_query",
"//third_party/py/distutils",
"//third_party/py/tensorflow", "//third_party/py/tensorflow",
], ],
) )
py_test( py_test(
name = "no_privacy_query_test", name = "no_privacy_query_test",
size = "small",
srcs = ["no_privacy_query_test.py"], srcs = ["no_privacy_query_test.py"],
deps = [ deps = [
":no_privacy_query", ":no_privacy_query",
@ -53,12 +59,14 @@ py_library(
srcs = ["normalized_query.py"], srcs = ["normalized_query.py"],
deps = [ deps = [
":dp_query", ":dp_query",
"//third_party/py/distutils",
"//third_party/py/tensorflow", "//third_party/py/tensorflow",
], ],
) )
py_test( py_test(
name = "normalized_query_test", name = "normalized_query_test",
size = "small",
srcs = ["normalized_query_test.py"], srcs = ["normalized_query_test.py"],
deps = [ deps = [
":gaussian_query", ":gaussian_query",
@ -73,18 +81,22 @@ py_library(
srcs = ["nested_query.py"], srcs = ["nested_query.py"],
deps = [ deps = [
":dp_query", ":dp_query",
"//third_party/py/distutils",
"//third_party/py/tensorflow", "//third_party/py/tensorflow",
], ],
) )
py_test( py_test(
name = "nested_query_test", name = "nested_query_test",
size = "small",
srcs = ["nested_query_test.py"], srcs = ["nested_query_test.py"],
deps = [ deps = [
":gaussian_query", ":gaussian_query",
":nested_query", ":nested_query",
":test_utils", ":test_utils",
"//third_party/py/absl/testing:parameterized", "//third_party/py/absl/testing:parameterized",
"//third_party/py/distutils",
"//third_party/py/numpy",
"//third_party/py/tensorflow", "//third_party/py/tensorflow",
], ],
) )

View file

@ -27,9 +27,9 @@ from privacy.dp_query import gaussian_query
def make_optimizer_class(cls): def make_optimizer_class(cls):
"""Constructs a DP optimizer class from an existing one.""" """Constructs a DP optimizer class from an existing one."""
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'): if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
parent_code = tf.train.Optimizer.compute_gradients.__code__ parent_code = tf.compat.v1.train.Optimizer.compute_gradients.__code__
child_code = cls.compute_gradients.__code__ child_code = cls.compute_gradients.__code__
GATE_OP = tf.train.Optimizer.GATE_OP # pylint: disable=invalid-name GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP # pylint: disable=invalid-name
else: else:
parent_code = tf.optimizers.Optimizer._compute_gradients.__code__ # pylint: disable=protected-access parent_code = tf.optimizers.Optimizer._compute_gradients.__code__ # pylint: disable=protected-access
child_code = cls._compute_gradients.__code__ # pylint: disable=protected-access child_code = cls._compute_gradients.__code__ # pylint: disable=protected-access
@ -211,9 +211,9 @@ def make_gaussian_optimizer_class(cls):
# Compatibility with tf 1 and 2 APIs # Compatibility with tf 1 and 2 APIs
try: try:
AdagradOptimizer = tf.train.AdagradOptimizer AdagradOptimizer = tf.compat.v1.train.AdagradOptimizer
AdamOptimizer = tf.train.AdamOptimizer AdamOptimizer = tf.compat.v1.train.AdamOptimizer
GradientDescentOptimizer = tf.train.GradientDescentOptimizer GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
except: # pylint: disable=bare-except except: # pylint: disable=bare-except
AdagradOptimizer = tf.optimizers.Adagrad AdagradOptimizer = tf.optimizers.Adagrad
AdamOptimizer = tf.optimizers.Adam AdamOptimizer = tf.optimizers.Adam

View file

@ -36,6 +36,10 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import os import os
from absl import app
from absl import flags
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
import tensorflow_datasets as tfds import tensorflow_datasets as tfds
@ -45,20 +49,22 @@ from privacy.analysis.rdp_accountant import compute_rdp
from privacy.analysis.rdp_accountant import get_privacy_spent from privacy.analysis.rdp_accountant import get_privacy_spent
from privacy.optimizers import dp_optimizer from privacy.optimizers import dp_optimizer
tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, ' flags.DEFINE_boolean(
'train with vanilla SGD.') 'dpsgd', True, 'If True, train with DP-SGD. If False, '
tf.flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training') 'train with vanilla SGD.')
tf.flags.DEFINE_float('noise_multiplier', 0.001, flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
'Ratio of the standard deviation to the clipping norm') flags.DEFINE_float('noise_multiplier', 0.001,
tf.flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm') 'Ratio of the standard deviation to the clipping norm')
tf.flags.DEFINE_integer('batch_size', 256, 'Batch size') flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
tf.flags.DEFINE_integer('epochs', 60, 'Number of epochs') flags.DEFINE_integer('batch_size', 256, 'Batch size')
tf.flags.DEFINE_integer('microbatches', 256, 'Number of microbatches ' flags.DEFINE_integer('epochs', 60, 'Number of epochs')
'(must evenly divide batch_size)') flags.DEFINE_integer(
tf.flags.DEFINE_string('model_dir', None, 'Model directory') 'microbatches', 256, 'Number of microbatches '
tf.flags.DEFINE_string('data_dir', None, 'Directory containing the PTB data.') '(must evenly divide batch_size)')
flags.DEFINE_string('model_dir', None, 'Model directory')
flags.DEFINE_string('data_dir', None, 'Directory containing the PTB data.')
FLAGS = tf.flags.FLAGS FLAGS = flags.FLAGS
SEQ_LEN = 80 SEQ_LEN = 80
NB_TRAIN = 45000 NB_TRAIN = 45000
@ -217,4 +223,4 @@ def main(unused_argv):
print('Trained with vanilla non-private SGD optimizer') print('Trained with vanilla non-private SGD optimizer')
if __name__ == '__main__': if __name__ == '__main__':
tf.app.run() app.run(main)

View file

@ -18,6 +18,9 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
from absl import app
from absl import flags
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
@ -28,26 +31,28 @@ from privacy.optimizers import dp_optimizer
# Compatibility with tf 1 and 2 APIs # Compatibility with tf 1 and 2 APIs
try: try:
GradientDescentOptimizer = tf.train.GradientDescentOptimizer GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
except: # pylint: disable=bare-except except: # pylint: disable=bare-except
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name
tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, ' FLAGS = flags.FLAGS
'train with vanilla SGD.')
tf.flags.DEFINE_float('learning_rate', .15, 'Learning rate for training')
tf.flags.DEFINE_float('noise_multiplier', 1.1,
'Ratio of the standard deviation to the clipping norm')
tf.flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
tf.flags.DEFINE_integer('batch_size', 256, 'Batch size')
tf.flags.DEFINE_integer('epochs', 60, 'Number of epochs')
tf.flags.DEFINE_integer('microbatches', 256, 'Number of microbatches '
'(must evenly divide batch_size)')
tf.flags.DEFINE_string('model_dir', None, 'Model directory')
FLAGS = tf.flags.FLAGS flags.DEFINE_boolean(
'dpsgd', True, 'If True, train with DP-SGD. If False, '
'train with vanilla SGD.')
flags.DEFINE_float('learning_rate', .15, 'Learning rate for training')
flags.DEFINE_float('noise_multiplier', 1.1,
'Ratio of the standard deviation to the clipping norm')
flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
flags.DEFINE_integer('batch_size', 256, 'Batch size')
flags.DEFINE_integer('epochs', 60, 'Number of epochs')
flags.DEFINE_integer(
'microbatches', 256, 'Number of microbatches '
'(must evenly divide batch_size)')
flags.DEFINE_string('model_dir', None, 'Model directory')
class EpsilonPrintingTrainingHook(tf.train.SessionRunHook): class EpsilonPrintingTrainingHook(tf.estimator.SessionRunHook):
"""Training hook to print current value of epsilon after an epoch.""" """Training hook to print current value of epsilon after an epoch."""
def __init__(self, ledger): def __init__(self, ledger):
@ -203,4 +208,4 @@ def main(unused_argv):
print('Test accuracy after %d epochs is: %.3f' % (epoch, test_accuracy)) print('Test accuracy after %d epochs is: %.3f' % (epoch, test_accuracy))
if __name__ == '__main__': if __name__ == '__main__':
tf.app.run() app.run(main)

View file

@ -18,7 +18,9 @@ from __future__ import print_function
from absl import app from absl import app
from absl import flags from absl import flags
from distutils.version import LooseVersion from distutils.version import LooseVersion
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
@ -28,8 +30,8 @@ from privacy.dp_query.gaussian_query import GaussianAverageQuery
from privacy.optimizers.dp_optimizer import DPGradientDescentOptimizer from privacy.optimizers.dp_optimizer import DPGradientDescentOptimizer
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'): if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
GradientDescentOptimizer = tf.train.GradientDescentOptimizer GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
tf.enable_eager_execution() tf.compat.v1.enable_eager_execution()
else: else:
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name

View file

@ -17,6 +17,9 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
from absl import app
from absl import flags
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
@ -27,23 +30,25 @@ from privacy.optimizers.dp_optimizer import DPGradientDescentOptimizer
# Compatibility with tf 1 and 2 APIs # Compatibility with tf 1 and 2 APIs
try: try:
GradientDescentOptimizer = tf.train.GradientDescentOptimizer GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
except: # pylint: disable=bare-except except: # pylint: disable=bare-except
GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name GradientDescentOptimizer = tf.optimizers.SGD # pylint: disable=invalid-name
tf.flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, ' flags.DEFINE_boolean(
'train with vanilla SGD.') 'dpsgd', True, 'If True, train with DP-SGD. If False, '
tf.flags.DEFINE_float('learning_rate', 0.15, 'Learning rate for training') 'train with vanilla SGD.')
tf.flags.DEFINE_float('noise_multiplier', 1.1, flags.DEFINE_float('learning_rate', 0.15, 'Learning rate for training')
'Ratio of the standard deviation to the clipping norm') flags.DEFINE_float('noise_multiplier', 1.1,
tf.flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm') 'Ratio of the standard deviation to the clipping norm')
tf.flags.DEFINE_integer('batch_size', 250, 'Batch size') flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
tf.flags.DEFINE_integer('epochs', 60, 'Number of epochs') flags.DEFINE_integer('batch_size', 250, 'Batch size')
tf.flags.DEFINE_integer('microbatches', 250, 'Number of microbatches ' flags.DEFINE_integer('epochs', 60, 'Number of epochs')
'(must evenly divide batch_size)') flags.DEFINE_integer(
tf.flags.DEFINE_string('model_dir', None, 'Model directory') 'microbatches', 250, 'Number of microbatches '
'(must evenly divide batch_size)')
flags.DEFINE_string('model_dir', None, 'Model directory')
FLAGS = tf.flags.FLAGS FLAGS = flags.FLAGS
def compute_epsilon(steps): def compute_epsilon(steps):
@ -146,4 +151,4 @@ def main(unused_argv):
print('Trained with vanilla non-private SGD optimizer') print('Trained with vanilla non-private SGD optimizer')
if __name__ == '__main__': if __name__ == '__main__':
tf.app.run() app.run(main)