Skips adding noise when noise_multiplier is 0 for fast clipping.
PiperOrigin-RevId: 522396275
This commit is contained in:
parent
de9836883d
commit
c4628d5dbc
1 changed files with 11 additions and 8 deletions
|
@ -231,16 +231,19 @@ def make_dp_model_class(cls):
|
|||
self._num_microbatches,
|
||||
)
|
||||
)
|
||||
grads = gradient_clipping_utils.add_aggregate_noise(
|
||||
self,
|
||||
clipped_grads,
|
||||
eff_num_microbatches,
|
||||
self._l2_norm_clip,
|
||||
self._noise_multiplier,
|
||||
)
|
||||
if self._noise_multiplier > 0:
|
||||
grads = gradient_clipping_utils.add_aggregate_noise(
|
||||
self,
|
||||
clipped_grads,
|
||||
eff_num_microbatches,
|
||||
self._l2_norm_clip,
|
||||
self._noise_multiplier,
|
||||
)
|
||||
else:
|
||||
grads = clipped_grads
|
||||
output_metrics[privatized_loss_name] = weighted_loss
|
||||
else:
|
||||
logging.info('Computing gradients using microbatching.')
|
||||
logging.info('Computing gradients using original clipping algorithm.')
|
||||
# Computes per-example clipped gradients directly. This is called
|
||||
# if at least one of the layers cannot use the "fast" gradient clipping
|
||||
# algorithm.
|
||||
|
|
Loading…
Reference in a new issue