forked from 626_privacy/tensorflow_privacy
Use mean loss within each microbatch.
PiperOrigin-RevId: 233832864
This commit is contained in:
parent
72305bcb10
commit
f37c9d1ea1
2 changed files with 8 additions and 8 deletions
|
@ -71,7 +71,8 @@ def make_optimizer_class(cls):
|
||||||
def process_microbatch(i, sample_state):
|
def process_microbatch(i, sample_state):
|
||||||
"""Process one microbatch (record) with privacy helper."""
|
"""Process one microbatch (record) with privacy helper."""
|
||||||
grads, _ = zip(*super(cls, self).compute_gradients(
|
grads, _ = zip(*super(cls, self).compute_gradients(
|
||||||
tf.gather(microbatches_losses, [i]), var_list, gate_gradients,
|
tf.reduce_mean(tf.gather(microbatches_losses,
|
||||||
|
[i])), var_list, gate_gradients,
|
||||||
aggregation_method, colocate_gradients_with_ops, grad_loss))
|
aggregation_method, colocate_gradients_with_ops, grad_loss))
|
||||||
grads_list = list(grads)
|
grads_list = list(grads)
|
||||||
sample_state = self._dp_average_query.accumulate_record(
|
sample_state = self._dp_average_query.accumulate_record(
|
||||||
|
@ -155,4 +156,3 @@ DPAdagradGaussianOptimizer = make_gaussian_optimizer_class(
|
||||||
DPAdamGaussianOptimizer = make_gaussian_optimizer_class(tf.train.AdamOptimizer)
|
DPAdamGaussianOptimizer = make_gaussian_optimizer_class(tf.train.AdamOptimizer)
|
||||||
DPGradientDescentGaussianOptimizer = make_gaussian_optimizer_class(
|
DPGradientDescentGaussianOptimizer = make_gaussian_optimizer_class(
|
||||||
tf.train.GradientDescentOptimizer)
|
tf.train.GradientDescentOptimizer)
|
||||||
|
|
||||||
|
|
|
@ -37,16 +37,16 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
# Parameters for testing: optimizer, num_microbatches, expected answer.
|
# Parameters for testing: optimizer, num_microbatches, expected answer.
|
||||||
@parameterized.named_parameters(
|
@parameterized.named_parameters(
|
||||||
('DPGradientDescent 1', dp_optimizer.DPGradientDescentOptimizer, 1,
|
('DPGradientDescent 1', dp_optimizer.DPGradientDescentOptimizer, 1,
|
||||||
[-10.0, -10.0]),
|
[-2.5, -2.5]),
|
||||||
('DPGradientDescent 2', dp_optimizer.DPGradientDescentOptimizer, 2,
|
('DPGradientDescent 2', dp_optimizer.DPGradientDescentOptimizer, 2,
|
||||||
[-5.0, -5.0]),
|
[-2.5, -2.5]),
|
||||||
('DPGradientDescent 4', dp_optimizer.DPGradientDescentOptimizer, 4,
|
('DPGradientDescent 4', dp_optimizer.DPGradientDescentOptimizer, 4,
|
||||||
[-2.5, -2.5]),
|
[-2.5, -2.5]),
|
||||||
('DPAdagrad 1', dp_optimizer.DPAdagradOptimizer, 1, [-10.0, -10.0]),
|
('DPAdagrad 1', dp_optimizer.DPAdagradOptimizer, 1, [-2.5, -2.5]),
|
||||||
('DPAdagrad 2', dp_optimizer.DPAdagradOptimizer, 2, [-5.0, -5.0]),
|
('DPAdagrad 2', dp_optimizer.DPAdagradOptimizer, 2, [-2.5, -2.5]),
|
||||||
('DPAdagrad 4', dp_optimizer.DPAdagradOptimizer, 4, [-2.5, -2.5]),
|
('DPAdagrad 4', dp_optimizer.DPAdagradOptimizer, 4, [-2.5, -2.5]),
|
||||||
('DPAdam 1', dp_optimizer.DPAdamOptimizer, 1, [-10.0, -10.0]),
|
('DPAdam 1', dp_optimizer.DPAdamOptimizer, 1, [-2.5, -2.5]),
|
||||||
('DPAdam 2', dp_optimizer.DPAdamOptimizer, 2, [-5.0, -5.0]),
|
('DPAdam 2', dp_optimizer.DPAdamOptimizer, 2, [-2.5, -2.5]),
|
||||||
('DPAdam 4', dp_optimizer.DPAdamOptimizer, 4, [-2.5, -2.5]))
|
('DPAdam 4', dp_optimizer.DPAdamOptimizer, 4, [-2.5, -2.5]))
|
||||||
def testBaseline(self, cls, num_microbatches, expected_answer):
|
def testBaseline(self, cls, num_microbatches, expected_answer):
|
||||||
with self.cached_session() as sess:
|
with self.cached_session() as sess:
|
||||||
|
|
Loading…
Reference in a new issue