From ddf17c90914fa97bc71bf2b1d8668f48b09e4899 Mon Sep 17 00:00:00 2001 From: npapernot Date: Mon, 29 Jul 2019 21:55:14 +0000 Subject: [PATCH] more lint --- privacy/bolton/losses.py | 57 ++++++++++++++++--------------- privacy/bolton/models.py | 26 ++++++++------ privacy/bolton/models_test.py | 5 +-- privacy/bolton/optimizers_test.py | 8 ++--- 4 files changed, 51 insertions(+), 45 deletions(-) diff --git a/privacy/bolton/losses.py b/privacy/bolton/losses.py index 6aa270f..9bd15e8 100644 --- a/privacy/bolton/losses.py +++ b/privacy/bolton/losses.py @@ -11,21 +11,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Loss functions for bolton method""" +"""Loss functions for bolton method.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function + import tensorflow as tf -from tensorflow.python.keras import losses -from tensorflow.python.keras.utils import losses_utils from tensorflow.python.framework import ops as _ops +from tensorflow.python.keras import losses from tensorflow.python.keras.regularizers import L1L2 +from tensorflow.python.keras.utils import losses_utils from tensorflow.python.platform import tf_logging as logging class StrongConvexMixin: - """ + """Strong Convex Mixin base class. + Strong Convex Mixin base class for any loss function that will be used with Bolton model. Subclasses must be strongly convex and implement the associated constants. They must also conform to the requirements of tf losses @@ -85,7 +87,7 @@ class StrongConvexMixin: return None def max_class_weight(self, class_weight, dtype): - """the maximum weighting in class weights (max value) as a scalar tensor + """The maximum weighting in class weights (max value) as a scalar tensor. Args: class_weight: class weights used @@ -103,7 +105,7 @@ class StrongConvexHuber(losses.Loss, StrongConvexMixin): def __init__(self, reg_lambda, - C, + c_arg, radius_constant, delta, reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE, @@ -117,31 +119,30 @@ class StrongConvexHuber(losses.Loss, StrongConvexMixin): delta: delta value in huber loss. When to switch from quadratic to absolute deviation. reduction: reduction type to use. See super class - name: Name of the loss instance dtype: tf datatype to use for tensor conversions. Returns: Loss values per sample. """ - if C <= 0: - raise ValueError('c: {0}, should be >= 0'.format(C)) + if c_arg <= 0: + raise ValueError("c: {0}, should be >= 0".format(c_arg)) if reg_lambda <= 0: raise ValueError("reg lambda: {0} must be positive".format(reg_lambda)) if radius_constant <= 0: - raise ValueError('radius_constant: {0}, should be >= 0'.format( + raise ValueError("radius_constant: {0}, should be >= 0".format( radius_constant )) if delta <= 0: - raise ValueError('delta: {0}, should be >= 0'.format( + raise ValueError("delta: {0}, should be >= 0".format( delta )) - self.C = C # pylint: disable=invalid-name + self.C = c_arg # pylint: disable=invalid-name self.delta = delta self.radius_constant = radius_constant self.dtype = dtype self.reg_lambda = tf.constant(reg_lambda, dtype=self.dtype) super(StrongConvexHuber, self).__init__( - name='strongconvexhuber', + name="strongconvexhuber", reduction=reduction, ) @@ -179,7 +180,7 @@ class StrongConvexHuber(losses.Loss, StrongConvexMixin): max_class_weight = self.max_class_weight(class_weight, self.dtype) delta = _ops.convert_to_tensor_v2(self.delta, dtype=self.dtype - ) + ) return self.C * max_class_weight / (delta * tf.constant(2, dtype=self.dtype)) + \ self.reg_lambda @@ -213,53 +214,53 @@ class StrongConvexBinaryCrossentropy( def __init__(self, reg_lambda, - C, + c_arg, radius_constant, from_logits=True, label_smoothing=0, reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE, dtype=tf.float32): - """ + """StrongConvexBinaryCrossentropy class. + Args: reg_lambda: Weight regularization constant - C: Penalty parameter C of the loss term + c_arg: Penalty parameter C of the loss term radius_constant: constant defining the length of the radius - reduction: reduction type to use. See super class from_logits: True if the input are unscaled logits. False if they are already scaled. label_smoothing: amount of smoothing to perform on labels relaxation of trust in labels, e.g. (1 -> 1-x, 0 -> 0+x). Note, the impact of this parameter's effect on privacy is not known and thus the default should be used. - name: Name of the loss instance + reduction: reduction type to use. See super class dtype: tf datatype to use for tensor conversions. """ if label_smoothing != 0: - logging.warning('The impact of label smoothing on privacy is unknown. ' - 'Use label smoothing at your own risk as it may not ' - 'guarantee privacy.') + logging.warning("The impact of label smoothing on privacy is unknown. " + "Use label smoothing at your own risk as it may not " + "guarantee privacy.") if reg_lambda <= 0: raise ValueError("reg lambda: {0} must be positive".format(reg_lambda)) - if C <= 0: - raise ValueError('c: {0}, should be >= 0'.format(C)) + if c_arg <= 0: + raise ValueError("c: {0}, should be >= 0".format(c_arg)) if radius_constant <= 0: - raise ValueError('radius_constant: {0}, should be >= 0'.format( + raise ValueError("radius_constant: {0}, should be >= 0".format( radius_constant )) self.dtype = dtype - self.C = C # pylint: disable=invalid-name + self.C = c_arg # pylint: disable=invalid-name self.reg_lambda = tf.constant(reg_lambda, dtype=self.dtype) super(StrongConvexBinaryCrossentropy, self).__init__( reduction=reduction, - name='strongconvexbinarycrossentropy', + name="strongconvexbinarycrossentropy", from_logits=from_logits, label_smoothing=label_smoothing, ) self.radius_constant = radius_constant def call(self, y_true, y_pred): - """Computes loss + """Computes loss. Args: y_true: Ground truth values. diff --git a/privacy/bolton/models.py b/privacy/bolton/models.py index 8883d34..b1aa367 100644 --- a/privacy/bolton/models.py +++ b/privacy/bolton/models.py @@ -125,17 +125,21 @@ class BoltonModel(Model): # pylint: disable=abstract-method 4. Use a strongly convex loss function (see compile) See super implementation for more details. - Args: - x: - y: - batch_size: - class_weight: the class weights to be used. Can be a scalar or 1D tensor - whose dim == n_classes. - n_samples: the number of individual samples in x. - epsilon: privacy parameter, which trades off between utility an privacy. - See the bolton paper for more description. - noise_distribution: the distribution to pull noise from. - steps_per_epoch: + Args: + x: + y: + batch_size: + class_weight: the class weights to be used. Can be a scalar or 1D tensor + whose dim == n_classes. + n_samples: the number of individual samples in x. + epsilon: privacy parameter, which trades off between utility an privacy. + See the bolton paper for more description. + noise_distribution: the distribution to pull noise from. + steps_per_epoch: + kwargs: kwargs to keras Model.fit. See super. + + Returns: + output """ if class_weight is None: class_weight_ = self.calculate_class_weights(class_weight) diff --git a/privacy/bolton/models_test.py b/privacy/bolton/models_test.py index 4f1b3ab..ead15eb 100644 --- a/privacy/bolton/models_test.py +++ b/privacy/bolton/models_test.py @@ -32,10 +32,10 @@ from privacy.bolton.optimizers import Bolton class TestLoss(losses.Loss, StrongConvexMixin): """Test loss function for testing Bolton model.""" - def __init__(self, reg_lambda, C_arg, radius_constant, name='test'): + def __init__(self, reg_lambda, c_arg, radius_constant, name='test'): super(TestLoss, self).__init__(name=name) self.reg_lambda = reg_lambda - self.C = C_arg # pylint: disable=invalid-name + self.C = c_arg # pylint: disable=invalid-name self.radius_constant = radius_constant def radius(self): @@ -506,6 +506,7 @@ class FitTests(keras_parameterized.TestCase): 'num_classes': 2, 'err_msg': 'Detected array length:'}, ]) + def test_class_errors(self, class_weights, class_counts, diff --git a/privacy/bolton/optimizers_test.py b/privacy/bolton/optimizers_test.py index 32a9f63..d5adbe1 100644 --- a/privacy/bolton/optimizers_test.py +++ b/privacy/bolton/optimizers_test.py @@ -68,10 +68,10 @@ class TestModel(Model): # pylint: disable=abstract-method class TestLoss(losses.Loss, StrongConvexMixin): """Test loss function for testing Bolton model.""" - def __init__(self, reg_lambda, C_arg, radius_constant, name='test'): + def __init__(self, reg_lambda, c_arg, radius_constant, name='test'): super(TestLoss, self).__init__(name=name) self.reg_lambda = reg_lambda - self.C = C_arg # pylint: disable=invalid-name + self.C = c_arg # pylint: disable=invalid-name self.radius_constant = radius_constant def radius(self): @@ -80,7 +80,7 @@ class TestLoss(losses.Loss, StrongConvexMixin): W is a convex set that forms the hypothesis space. Returns: - radius + a tensor """ return _ops.convert_to_tensor_v2(self.radius_constant, dtype=tf.float32) @@ -107,7 +107,7 @@ class TestLoss(losses.Loss, StrongConvexMixin): class_weight: class weights used Returns: - L + constant L """ return _ops.convert_to_tensor_v2(1, dtype=tf.float32)