Small updates in preparation for auto-generating documentation.

PiperOrigin-RevId: 367073829
This commit is contained in:
Steve Chien 2021-04-06 13:29:05 -07:00 committed by A. Unique TensorFlower
parent 693dd666c3
commit c8b1c97b47
4 changed files with 27 additions and 15 deletions

View file

@ -53,14 +53,14 @@ def make_optimizer_class(cls):
"""Initialize the DPOptimizerClass. """Initialize the DPOptimizerClass.
Args: Args:
dp_sum_query: DPQuery object, specifying differential privacy dp_sum_query: `DPQuery` object, specifying differential privacy
mechanism to use. mechanism to use.
num_microbatches: How many microbatches into which the minibatch is num_microbatches: Number of microbatches into which each minibatch is
split. If None, will default to the size of the minibatch, and split. If `None`, will default to the size of the minibatch, and
per-example gradients will be computed. per-example gradients will be computed.
unroll_microbatches: If true, processes microbatches within a Python unroll_microbatches: If true, processes microbatches within a Python
loop instead of a tf.while_loop. Can be used if using a tf.while_loop loop instead of a `tf.while_loop`. Can be used if using a
raises an exception. `tf.while_loop` raises an exception.
""" """
super(DPOptimizerClass, self).__init__(*args, **kwargs) super(DPOptimizerClass, self).__init__(*args, **kwargs)
self._dp_sum_query = dp_sum_query self._dp_sum_query = dp_sum_query
@ -205,6 +205,8 @@ def make_optimizer_class(cls):
return super(DPOptimizerClass, return super(DPOptimizerClass,
self).apply_gradients(grads_and_vars, global_step, name) self).apply_gradients(grads_and_vars, global_step, name)
DPOptimizerClass.__doc__ = ('DP subclass of {}.').format(cls.__name__)
return DPOptimizerClass return DPOptimizerClass
@ -265,6 +267,9 @@ def make_gaussian_optimizer_class(cls):
def ledger(self): def ledger(self):
return self._dp_sum_query.ledger return self._dp_sum_query.ledger
DPGaussianOptimizerClass.__doc__ = ('DP subclass of {} using Gaussian '
'averaging.').format(cls.__name__)
return DPGaussianOptimizerClass return DPGaussianOptimizerClass
AdagradOptimizer = tf.train.AdagradOptimizer AdagradOptimizer = tf.train.AdagradOptimizer

View file

@ -49,9 +49,9 @@ def make_keras_optimizer_class(cls):
"""Initialize the DPOptimizerClass. """Initialize the DPOptimizerClass.
Args: Args:
l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients) l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients).
noise_multiplier: Ratio of the standard deviation to the clipping norm noise_multiplier: Ratio of the standard deviation to the clipping norm.
num_microbatches: The number of microbatches into which each minibatch num_microbatches: Number of microbatches into which each minibatch
is split. is split.
""" """
super(DPOptimizerClass, self).__init__(*args, **kwargs) super(DPOptimizerClass, self).__init__(*args, **kwargs)
@ -169,6 +169,9 @@ def make_keras_optimizer_class(cls):
return super(DPOptimizerClass, return super(DPOptimizerClass,
self).apply_gradients(grads_and_vars, global_step, name) self).apply_gradients(grads_and_vars, global_step, name)
DPOptimizerClass.__doc__ = ('DP subclass of {} using Gaussian '
'averaging.').format(cls.__name__)
return DPOptimizerClass return DPOptimizerClass

View file

@ -62,9 +62,9 @@ def make_vectorized_keras_optimizer_class(cls):
"""Initialize the DPOptimizerClass. """Initialize the DPOptimizerClass.
Args: Args:
l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients) l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients).
noise_multiplier: Ratio of the standard deviation to the clipping norm noise_multiplier: Ratio of the standard deviation to the clipping norm.
num_microbatches: The number of microbatches into which each minibatch num_microbatches: Number of microbatches into which each minibatch
is split. is split.
""" """
super(DPOptimizerClass, self).__init__(*args, **kwargs) super(DPOptimizerClass, self).__init__(*args, **kwargs)
@ -177,6 +177,8 @@ def make_vectorized_keras_optimizer_class(cls):
return super(DPOptimizerClass, return super(DPOptimizerClass,
self).apply_gradients(grads_and_vars, global_step, name) self).apply_gradients(grads_and_vars, global_step, name)
DPOptimizerClass.__doc__ = ('Vectorized DP subclass of {} using Gaussian '
'averaging.').format(cls.__name__)
return DPOptimizerClass return DPOptimizerClass

View file

@ -51,10 +51,10 @@ def make_vectorized_optimizer_class(cls):
"""Initialize the DPOptimizerClass. """Initialize the DPOptimizerClass.
Args: Args:
l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients) l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients).
noise_multiplier: Ratio of the standard deviation to the clipping norm noise_multiplier: Ratio of the standard deviation to the clipping norm.
num_microbatches: How many microbatches into which the minibatch is num_microbatches: Number of microbatches into which each minibatch is
split. If None, will default to the size of the minibatch, and split. If `None`, will default to the size of the minibatch, and
per-example gradients will be computed. per-example gradients will be computed.
""" """
super(DPOptimizerClass, self).__init__(*args, **kwargs) super(DPOptimizerClass, self).__init__(*args, **kwargs)
@ -136,6 +136,8 @@ def make_vectorized_optimizer_class(cls):
return list(zip(final_grads, var_list)) return list(zip(final_grads, var_list))
DPOptimizerClass.__doc__ = ('Vectorized DP subclass of {} using Gaussian '
'averaging.').format(cls.__name__)
return DPOptimizerClass return DPOptimizerClass