From c8b1c97b471f33c779200a6b212b4264cbfc3c2e Mon Sep 17 00:00:00 2001 From: Steve Chien Date: Tue, 6 Apr 2021 13:29:05 -0700 Subject: [PATCH] Small updates in preparation for auto-generating documentation. PiperOrigin-RevId: 367073829 --- .../privacy/optimizers/dp_optimizer.py | 15 ++++++++++----- .../privacy/optimizers/dp_optimizer_keras.py | 9 ++++++--- .../optimizers/dp_optimizer_keras_vectorized.py | 8 +++++--- .../privacy/optimizers/dp_optimizer_vectorized.py | 10 ++++++---- 4 files changed, 27 insertions(+), 15 deletions(-) diff --git a/tensorflow_privacy/privacy/optimizers/dp_optimizer.py b/tensorflow_privacy/privacy/optimizers/dp_optimizer.py index 1e4c281..fb44361 100644 --- a/tensorflow_privacy/privacy/optimizers/dp_optimizer.py +++ b/tensorflow_privacy/privacy/optimizers/dp_optimizer.py @@ -53,14 +53,14 @@ def make_optimizer_class(cls): """Initialize the DPOptimizerClass. Args: - dp_sum_query: DPQuery object, specifying differential privacy + dp_sum_query: `DPQuery` object, specifying differential privacy mechanism to use. - num_microbatches: How many microbatches into which the minibatch is - split. If None, will default to the size of the minibatch, and + num_microbatches: Number of microbatches into which each minibatch is + split. If `None`, will default to the size of the minibatch, and per-example gradients will be computed. unroll_microbatches: If true, processes microbatches within a Python - loop instead of a tf.while_loop. Can be used if using a tf.while_loop - raises an exception. + loop instead of a `tf.while_loop`. Can be used if using a + `tf.while_loop` raises an exception. """ super(DPOptimizerClass, self).__init__(*args, **kwargs) self._dp_sum_query = dp_sum_query @@ -205,6 +205,8 @@ def make_optimizer_class(cls): return super(DPOptimizerClass, self).apply_gradients(grads_and_vars, global_step, name) + DPOptimizerClass.__doc__ = ('DP subclass of {}.').format(cls.__name__) + return DPOptimizerClass @@ -265,6 +267,9 @@ def make_gaussian_optimizer_class(cls): def ledger(self): return self._dp_sum_query.ledger + DPGaussianOptimizerClass.__doc__ = ('DP subclass of {} using Gaussian ' + 'averaging.').format(cls.__name__) + return DPGaussianOptimizerClass AdagradOptimizer = tf.train.AdagradOptimizer diff --git a/tensorflow_privacy/privacy/optimizers/dp_optimizer_keras.py b/tensorflow_privacy/privacy/optimizers/dp_optimizer_keras.py index 5efa61d..d5ffe02 100644 --- a/tensorflow_privacy/privacy/optimizers/dp_optimizer_keras.py +++ b/tensorflow_privacy/privacy/optimizers/dp_optimizer_keras.py @@ -49,9 +49,9 @@ def make_keras_optimizer_class(cls): """Initialize the DPOptimizerClass. Args: - l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients) - noise_multiplier: Ratio of the standard deviation to the clipping norm - num_microbatches: The number of microbatches into which each minibatch + l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients). + noise_multiplier: Ratio of the standard deviation to the clipping norm. + num_microbatches: Number of microbatches into which each minibatch is split. """ super(DPOptimizerClass, self).__init__(*args, **kwargs) @@ -169,6 +169,9 @@ def make_keras_optimizer_class(cls): return super(DPOptimizerClass, self).apply_gradients(grads_and_vars, global_step, name) + DPOptimizerClass.__doc__ = ('DP subclass of {} using Gaussian ' + 'averaging.').format(cls.__name__) + return DPOptimizerClass diff --git a/tensorflow_privacy/privacy/optimizers/dp_optimizer_keras_vectorized.py b/tensorflow_privacy/privacy/optimizers/dp_optimizer_keras_vectorized.py index 9be6b31..5d736b2 100644 --- a/tensorflow_privacy/privacy/optimizers/dp_optimizer_keras_vectorized.py +++ b/tensorflow_privacy/privacy/optimizers/dp_optimizer_keras_vectorized.py @@ -62,9 +62,9 @@ def make_vectorized_keras_optimizer_class(cls): """Initialize the DPOptimizerClass. Args: - l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients) - noise_multiplier: Ratio of the standard deviation to the clipping norm - num_microbatches: The number of microbatches into which each minibatch + l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients). + noise_multiplier: Ratio of the standard deviation to the clipping norm. + num_microbatches: Number of microbatches into which each minibatch is split. """ super(DPOptimizerClass, self).__init__(*args, **kwargs) @@ -177,6 +177,8 @@ def make_vectorized_keras_optimizer_class(cls): return super(DPOptimizerClass, self).apply_gradients(grads_and_vars, global_step, name) + DPOptimizerClass.__doc__ = ('Vectorized DP subclass of {} using Gaussian ' + 'averaging.').format(cls.__name__) return DPOptimizerClass diff --git a/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized.py b/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized.py index 5edafd4..54517ce 100644 --- a/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized.py +++ b/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized.py @@ -51,10 +51,10 @@ def make_vectorized_optimizer_class(cls): """Initialize the DPOptimizerClass. Args: - l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients) - noise_multiplier: Ratio of the standard deviation to the clipping norm - num_microbatches: How many microbatches into which the minibatch is - split. If None, will default to the size of the minibatch, and + l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients). + noise_multiplier: Ratio of the standard deviation to the clipping norm. + num_microbatches: Number of microbatches into which each minibatch is + split. If `None`, will default to the size of the minibatch, and per-example gradients will be computed. """ super(DPOptimizerClass, self).__init__(*args, **kwargs) @@ -136,6 +136,8 @@ def make_vectorized_optimizer_class(cls): return list(zip(final_grads, var_list)) + DPOptimizerClass.__doc__ = ('Vectorized DP subclass of {} using Gaussian ' + 'averaging.').format(cls.__name__) return DPOptimizerClass