Expose differentially private RMSPropOptimizer.
PiperOrigin-RevId: 311072544
This commit is contained in:
parent
10335f6177
commit
da9fb28b37
2 changed files with 20 additions and 6 deletions
|
@ -228,12 +228,15 @@ def make_gaussian_optimizer_class(cls):
|
||||||
AdagradOptimizer = tf.train.AdagradOptimizer
|
AdagradOptimizer = tf.train.AdagradOptimizer
|
||||||
AdamOptimizer = tf.train.AdamOptimizer
|
AdamOptimizer = tf.train.AdamOptimizer
|
||||||
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||||
|
RMSPropOptimizer = tf.train.RMSPropOptimizer
|
||||||
|
|
||||||
DPAdagradOptimizer = make_optimizer_class(AdagradOptimizer)
|
DPAdagradOptimizer = make_optimizer_class(AdagradOptimizer)
|
||||||
DPAdamOptimizer = make_optimizer_class(AdamOptimizer)
|
DPAdamOptimizer = make_optimizer_class(AdamOptimizer)
|
||||||
DPGradientDescentOptimizer = make_optimizer_class(GradientDescentOptimizer)
|
DPGradientDescentOptimizer = make_optimizer_class(GradientDescentOptimizer)
|
||||||
|
DPRMSPropOptimizer = make_optimizer_class(RMSPropOptimizer)
|
||||||
|
|
||||||
DPAdagradGaussianOptimizer = make_gaussian_optimizer_class(AdagradOptimizer)
|
DPAdagradGaussianOptimizer = make_gaussian_optimizer_class(AdagradOptimizer)
|
||||||
DPAdamGaussianOptimizer = make_gaussian_optimizer_class(AdamOptimizer)
|
DPAdamGaussianOptimizer = make_gaussian_optimizer_class(AdamOptimizer)
|
||||||
DPGradientDescentGaussianOptimizer = make_gaussian_optimizer_class(
|
DPGradientDescentGaussianOptimizer = make_gaussian_optimizer_class(
|
||||||
GradientDescentOptimizer)
|
GradientDescentOptimizer)
|
||||||
|
DPRMSPropGaussianOptimizer = make_gaussian_optimizer_class(RMSPropOptimizer)
|
||||||
|
|
|
@ -48,7 +48,13 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
('DPAdagrad 4', dp_optimizer.DPAdagradOptimizer, 4, [-2.5, -2.5]),
|
('DPAdagrad 4', dp_optimizer.DPAdagradOptimizer, 4, [-2.5, -2.5]),
|
||||||
('DPAdam 1', dp_optimizer.DPAdamOptimizer, 1, [-2.5, -2.5]),
|
('DPAdam 1', dp_optimizer.DPAdamOptimizer, 1, [-2.5, -2.5]),
|
||||||
('DPAdam 2', dp_optimizer.DPAdamOptimizer, 2, [-2.5, -2.5]),
|
('DPAdam 2', dp_optimizer.DPAdamOptimizer, 2, [-2.5, -2.5]),
|
||||||
('DPAdam 4', dp_optimizer.DPAdamOptimizer, 4, [-2.5, -2.5]))
|
('DPAdam 4', dp_optimizer.DPAdamOptimizer, 4, [-2.5, -2.5]),
|
||||||
|
('DPRMSPropOptimizer 1', dp_optimizer.DPRMSPropOptimizer, 1,
|
||||||
|
[-2.5, -2.5]),
|
||||||
|
('DPRMSPropOptimizer 2', dp_optimizer.DPRMSPropOptimizer, 2,
|
||||||
|
[-2.5, -2.5]),
|
||||||
|
('DPRMSPropOptimizer 4', dp_optimizer.DPRMSPropOptimizer, 4, [-2.5, -2.5])
|
||||||
|
)
|
||||||
def testBaseline(self, cls, num_microbatches, expected_answer):
|
def testBaseline(self, cls, num_microbatches, expected_answer):
|
||||||
with self.cached_session() as sess:
|
with self.cached_session() as sess:
|
||||||
var0 = tf.Variable([1.0, 2.0])
|
var0 = tf.Variable([1.0, 2.0])
|
||||||
|
@ -76,7 +82,8 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
@parameterized.named_parameters(
|
@parameterized.named_parameters(
|
||||||
('DPGradientDescent', dp_optimizer.DPGradientDescentOptimizer),
|
('DPGradientDescent', dp_optimizer.DPGradientDescentOptimizer),
|
||||||
('DPAdagrad', dp_optimizer.DPAdagradOptimizer),
|
('DPAdagrad', dp_optimizer.DPAdagradOptimizer),
|
||||||
('DPAdam', dp_optimizer.DPAdamOptimizer))
|
('DPAdam', dp_optimizer.DPAdamOptimizer),
|
||||||
|
('DPRMSPropOptimizer', dp_optimizer.DPRMSPropOptimizer))
|
||||||
def testClippingNorm(self, cls):
|
def testClippingNorm(self, cls):
|
||||||
with self.cached_session() as sess:
|
with self.cached_session() as sess:
|
||||||
var0 = tf.Variable([0.0, 0.0])
|
var0 = tf.Variable([0.0, 0.0])
|
||||||
|
@ -99,7 +106,8 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
@parameterized.named_parameters(
|
@parameterized.named_parameters(
|
||||||
('DPGradientDescent', dp_optimizer.DPGradientDescentOptimizer),
|
('DPGradientDescent', dp_optimizer.DPGradientDescentOptimizer),
|
||||||
('DPAdagrad', dp_optimizer.DPAdagradOptimizer),
|
('DPAdagrad', dp_optimizer.DPAdagradOptimizer),
|
||||||
('DPAdam', dp_optimizer.DPAdamOptimizer))
|
('DPAdam', dp_optimizer.DPAdamOptimizer),
|
||||||
|
('DPRMSPropOptimizer', dp_optimizer.DPRMSPropOptimizer))
|
||||||
def testNoiseMultiplier(self, cls):
|
def testNoiseMultiplier(self, cls):
|
||||||
with self.cached_session() as sess:
|
with self.cached_session() as sess:
|
||||||
var0 = tf.Variable([0.0])
|
var0 = tf.Variable([0.0])
|
||||||
|
@ -182,7 +190,8 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
@parameterized.named_parameters(
|
@parameterized.named_parameters(
|
||||||
('DPGradientDescent', dp_optimizer.DPGradientDescentOptimizer),
|
('DPGradientDescent', dp_optimizer.DPGradientDescentOptimizer),
|
||||||
('DPAdagrad', dp_optimizer.DPAdagradOptimizer),
|
('DPAdagrad', dp_optimizer.DPAdagradOptimizer),
|
||||||
('DPAdam', dp_optimizer.DPAdamOptimizer))
|
('DPAdam', dp_optimizer.DPAdamOptimizer),
|
||||||
|
('DPRMSPropOptimizer', dp_optimizer.DPRMSPropOptimizer))
|
||||||
def testUnrollMicrobatches(self, cls):
|
def testUnrollMicrobatches(self, cls):
|
||||||
with self.cached_session() as sess:
|
with self.cached_session() as sess:
|
||||||
var0 = tf.Variable([1.0, 2.0])
|
var0 = tf.Variable([1.0, 2.0])
|
||||||
|
@ -213,7 +222,8 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
@parameterized.named_parameters(
|
@parameterized.named_parameters(
|
||||||
('DPGradientDescent', dp_optimizer.DPGradientDescentGaussianOptimizer),
|
('DPGradientDescent', dp_optimizer.DPGradientDescentGaussianOptimizer),
|
||||||
('DPAdagrad', dp_optimizer.DPAdagradGaussianOptimizer),
|
('DPAdagrad', dp_optimizer.DPAdagradGaussianOptimizer),
|
||||||
('DPAdam', dp_optimizer.DPAdamGaussianOptimizer))
|
('DPAdam', dp_optimizer.DPAdamGaussianOptimizer),
|
||||||
|
('DPRMSPropOptimizer', dp_optimizer.DPRMSPropGaussianOptimizer))
|
||||||
def testDPGaussianOptimizerClass(self, cls):
|
def testDPGaussianOptimizerClass(self, cls):
|
||||||
with self.cached_session() as sess:
|
with self.cached_session() as sess:
|
||||||
var0 = tf.Variable([0.0])
|
var0 = tf.Variable([0.0])
|
||||||
|
@ -241,7 +251,8 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
@parameterized.named_parameters(
|
@parameterized.named_parameters(
|
||||||
('DPGradientDescent', dp_optimizer.DPGradientDescentOptimizer),
|
('DPGradientDescent', dp_optimizer.DPGradientDescentOptimizer),
|
||||||
('DPAdagrad', dp_optimizer.DPAdagradOptimizer),
|
('DPAdagrad', dp_optimizer.DPAdagradOptimizer),
|
||||||
('DPAdam', dp_optimizer.DPAdamOptimizer))
|
('DPAdam', dp_optimizer.DPAdamOptimizer),
|
||||||
|
('DPRMSPropOptimizer', dp_optimizer.DPRMSPropOptimizer))
|
||||||
def testAssertOnNoCallOfComputeGradients(self, cls):
|
def testAssertOnNoCallOfComputeGradients(self, cls):
|
||||||
dp_sum_query = gaussian_query.GaussianSumQuery(1.0e9, 0.0)
|
dp_sum_query = gaussian_query.GaussianSumQuery(1.0e9, 0.0)
|
||||||
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=1.0)
|
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=1.0)
|
||||||
|
|
Loading…
Reference in a new issue