From b37aef1751a74a23a78018808b026cb29d501b3a Mon Sep 17 00:00:00 2001 From: Michael Reneer Date: Fri, 28 Jan 2022 12:30:35 -0800 Subject: [PATCH] Fixed some lint errors in TensorFlow Privacy. * Fixed `g-backslash-continuation` * Fixed `g-generic-assert` * Fixed `g-generic-assert` * Fixed `raise-missing-from` * Fixed `unused-argument` PiperOrigin-RevId: 424931881 --- tensorflow_privacy/privacy/bolt_on/losses.py | 8 ++-- .../privacy/bolt_on/losses_test.py | 2 +- .../privacy/bolt_on/optimizers.py | 9 ++--- .../privacy/bolt_on/optimizers_test.py | 40 ++++++++----------- 4 files changed, 24 insertions(+), 35 deletions(-) diff --git a/tensorflow_privacy/privacy/bolt_on/losses.py b/tensorflow_privacy/privacy/bolt_on/losses.py index fe02e2d..c8a8cc7 100644 --- a/tensorflow_privacy/privacy/bolt_on/losses.py +++ b/tensorflow_privacy/privacy/bolt_on/losses.py @@ -169,17 +169,15 @@ class StrongConvexHuber(losses.Loss, StrongConvexMixin): """See super class.""" max_class_weight = self.max_class_weight(class_weight, self.dtype) delta = _ops.convert_to_tensor_v2(self.delta, dtype=self.dtype) - return self.C * max_class_weight / (delta * - tf.constant(2, dtype=self.dtype)) + \ - self.reg_lambda + return self.C * max_class_weight / ( + delta * tf.constant(2, dtype=self.dtype)) + self.reg_lambda def lipchitz_constant(self, class_weight): """See super class.""" # if class_weight is provided, # it should be a vector of the same size of number of classes max_class_weight = self.max_class_weight(class_weight, self.dtype) - lc = self.C * max_class_weight + \ - self.reg_lambda * self.radius() + lc = self.C * max_class_weight + self.reg_lambda * self.radius() return lc def kernel_regularizer(self): diff --git a/tensorflow_privacy/privacy/bolt_on/losses_test.py b/tensorflow_privacy/privacy/bolt_on/losses_test.py index 5a94b83..4b99f03 100644 --- a/tensorflow_privacy/privacy/bolt_on/losses_test.py +++ b/tensorflow_privacy/privacy/bolt_on/losses_test.py @@ -90,7 +90,7 @@ class StrongConvexMixinTests(keras_parameterized.TestCase): """ loss = StrongConvexMixin() ret = getattr(loss, fn, None)(*args) - self.assertEqual(ret, None) + self.assertNone(ret) class BinaryCrossesntropyTests(keras_parameterized.TestCase): diff --git a/tensorflow_privacy/privacy/bolt_on/optimizers.py b/tensorflow_privacy/privacy/bolt_on/optimizers.py index 87b621e..1d5156b 100644 --- a/tensorflow_privacy/privacy/bolt_on/optimizers.py +++ b/tensorflow_privacy/privacy/bolt_on/optimizers.py @@ -194,9 +194,8 @@ class BoltOn(optimizer_v2.OptimizerV2): distribution = self.noise_distribution.lower() if distribution == _accepted_distributions[0]: # laplace per_class_epsilon = self.epsilon / (output_dim) - l2_sensitivity = (2 * - loss.lipchitz_constant(self.class_weights)) / \ - (loss.gamma() * self.n_samples * self.batch_size) + l2_sensitivity = (2 * loss.lipchitz_constant(self.class_weights)) / ( + loss.gamma() * self.n_samples * self.batch_size) unit_vector = tf.random.normal( shape=(input_dim, output_dim), mean=0, @@ -239,11 +238,11 @@ class BoltOn(optimizer_v2.OptimizerV2): optim = object.__getattribute__(self, '_internal_optimizer') try: return object.__getattribute__(optim, name) - except AttributeError: + except AttributeError as e: raise AttributeError( "Neither '{0}' nor '{1}' object has attribute '{2}'" ''.format(self.__class__.__name__, - self._internal_optimizer.__class__.__name__, name)) + self._internal_optimizer.__class__.__name__, name)) from e def __setattr__(self, key, value): """Set attribute to self instance if its the internal optimizer. diff --git a/tensorflow_privacy/privacy/bolt_on/optimizers_test.py b/tensorflow_privacy/privacy/bolt_on/optimizers_test.py index 8591fc3..be2a4e2 100644 --- a/tensorflow_privacy/privacy/bolt_on/optimizers_test.py +++ b/tensorflow_privacy/privacy/bolt_on/optimizers_test.py @@ -213,9 +213,8 @@ class BoltonOptimizerTest(keras_parameterized.TestCase): loss = TestLoss(1, 1, 1) bolton = opt.BoltOn(TestOptimizer(), loss) model = TestModel(1) - model.layers[0].kernel = \ - model.layers[0].kernel_initializer((model.layer_input_shape[0], - model.n_outputs)) + model.layers[0].kernel = model.layers[0].kernel_initializer( + (model.layer_input_shape[0], model.n_outputs)) bolton._is_init = True # pylint: disable=protected-access bolton.layers = model.layers bolton.epsilon = 2 @@ -282,9 +281,8 @@ class BoltonOptimizerTest(keras_parameterized.TestCase): bolton = opt.BoltOn(TestOptimizer(), loss) model = TestModel(n_out, shape, init_value) model.compile(bolton, loss) - model.layers[0].kernel = \ - model.layers[0].kernel_initializer((model.layer_input_shape[0], - model.n_outputs)) + model.layers[0].kernel = model.layers[0].kernel_initializer( + (model.layer_input_shape[0], model.n_outputs)) bolton._is_init = True # pylint: disable=protected-access bolton.layers = model.layers bolton.epsilon = 2 @@ -321,9 +319,8 @@ class BoltonOptimizerTest(keras_parameterized.TestCase): bolton = opt.BoltOn(TestOptimizer(), loss) model = TestModel(1, (1,), 1) model.compile(bolton, loss) - model.layers[0].kernel = \ - model.layers[0].kernel_initializer((model.layer_input_shape[0], - model.n_outputs)) + model.layers[0].kernel = model.layers[0].kernel_initializer( + (model.layer_input_shape[0], model.n_outputs)) with bolton(noise, epsilon, model.layers, class_weights, 1, 1) as _: pass return _ops.convert_to_tensor_v2(bolton.epsilon, dtype=tf.float32) @@ -360,9 +357,8 @@ class BoltonOptimizerTest(keras_parameterized.TestCase): bolton = opt.BoltOn(TestOptimizer(), loss) model = TestModel(1, (1,), 1) model.compile(bolton, loss) - model.layers[0].kernel = \ - model.layers[0].kernel_initializer((model.layer_input_shape[0], - model.n_outputs)) + model.layers[0].kernel = model.layers[0].kernel_initializer( + (model.layer_input_shape[0], model.n_outputs)) with bolton(noise, epsilon, model.layers, 1, 1, 1) as _: pass @@ -392,9 +388,8 @@ class BoltonOptimizerTest(keras_parameterized.TestCase): bolton = opt.BoltOn(TestOptimizer(), loss) model = TestModel(1, (1,), 1) model.compile(bolton, loss) - model.layers[0].kernel = \ - model.layers[0].kernel_initializer((model.layer_input_shape[0], - model.n_outputs)) + model.layers[0].kernel = model.layers[0].kernel_initializer( + (model.layer_input_shape[0], model.n_outputs)) getattr(bolton, fn)(*args) with self.assertRaisesRegexp(Exception, err_msg): # pylint: disable=deprecated-method @@ -463,12 +458,10 @@ class BoltonOptimizerTest(keras_parameterized.TestCase): bolton = opt.BoltOn(optimizer, loss) model = TestModel(3) model.compile(optimizer, loss) - model.layers[0].kernel = \ - model.layers[0].kernel_initializer((model.layer_input_shape[0], - model.n_outputs)) - model.layers[0].kernel = \ - model.layers[0].kernel_initializer((model.layer_input_shape[0], - model.n_outputs)) + model.layers[0].kernel = model.layers[0].kernel_initializer( + (model.layer_input_shape[0], model.n_outputs)) + model.layers[0].kernel = model.layers[0].kernel_initializer( + (model.layer_input_shape[0], model.n_outputs)) bolton._is_init = True # pylint: disable=protected-access bolton.layers = model.layers bolton.epsilon = 2 @@ -505,9 +498,8 @@ class BoltonOptimizerTest(keras_parameterized.TestCase): bolton = opt.BoltOn(TestOptimizer(), loss) model = TestModel(1, (1,), 1) model.compile(bolton, loss) - model.layers[0].kernel = \ - model.layers[0].kernel_initializer((model.layer_input_shape[0], - model.n_outputs)) + model.layers[0].kernel = model.layers[0].kernel_initializer( + (model.layer_input_shape[0], model.n_outputs)) bolton._is_init = True # pylint: disable=protected-access bolton.noise_distribution = 'laplace' bolton.epsilon = 1