Skips adding noise when noise_multiplier is 0 for fast clipping.
PiperOrigin-RevId: 522396275
This commit is contained in:
parent
de9836883d
commit
c4628d5dbc
1 changed files with 11 additions and 8 deletions
|
@ -231,16 +231,19 @@ def make_dp_model_class(cls):
|
||||||
self._num_microbatches,
|
self._num_microbatches,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
grads = gradient_clipping_utils.add_aggregate_noise(
|
if self._noise_multiplier > 0:
|
||||||
self,
|
grads = gradient_clipping_utils.add_aggregate_noise(
|
||||||
clipped_grads,
|
self,
|
||||||
eff_num_microbatches,
|
clipped_grads,
|
||||||
self._l2_norm_clip,
|
eff_num_microbatches,
|
||||||
self._noise_multiplier,
|
self._l2_norm_clip,
|
||||||
)
|
self._noise_multiplier,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
grads = clipped_grads
|
||||||
output_metrics[privatized_loss_name] = weighted_loss
|
output_metrics[privatized_loss_name] = weighted_loss
|
||||||
else:
|
else:
|
||||||
logging.info('Computing gradients using microbatching.')
|
logging.info('Computing gradients using original clipping algorithm.')
|
||||||
# Computes per-example clipped gradients directly. This is called
|
# Computes per-example clipped gradients directly. This is called
|
||||||
# if at least one of the layers cannot use the "fast" gradient clipping
|
# if at least one of the layers cannot use the "fast" gradient clipping
|
||||||
# algorithm.
|
# algorithm.
|
||||||
|
|
Loading…
Reference in a new issue