Fix DP optimizers to handle gradients that are None.
PiperOrigin-RevId: 244429987
This commit is contained in:
parent
134b7d2093
commit
31219a5f3f
1 changed files with 4 additions and 1 deletions
|
@ -111,7 +111,10 @@ def make_optimizer_class(cls):
|
||||||
tf.reduce_mean(tf.gather(microbatches_losses,
|
tf.reduce_mean(tf.gather(microbatches_losses,
|
||||||
[i])), var_list, gate_gradients,
|
[i])), var_list, gate_gradients,
|
||||||
aggregation_method, colocate_gradients_with_ops, grad_loss))
|
aggregation_method, colocate_gradients_with_ops, grad_loss))
|
||||||
grads_list = list(grads)
|
grads_list = [
|
||||||
|
g if g is not None else tf.zeros_like(v)
|
||||||
|
for (g, v) in zip(list(grads), var_list)
|
||||||
|
]
|
||||||
sample_state = self._dp_average_query.accumulate_record(
|
sample_state = self._dp_average_query.accumulate_record(
|
||||||
sample_params, sample_state, grads_list)
|
sample_params, sample_state, grads_list)
|
||||||
return sample_state
|
return sample_state
|
||||||
|
|
Loading…
Reference in a new issue