more lint

This commit is contained in:
npapernot 2019-07-29 21:55:14 +00:00
parent f06443d50e
commit ddf17c9091
4 changed files with 51 additions and 45 deletions

View file

@ -11,21 +11,23 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Loss functions for bolton method""" """Loss functions for bolton method."""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import tensorflow as tf import tensorflow as tf
from tensorflow.python.keras import losses
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import ops as _ops
from tensorflow.python.keras import losses
from tensorflow.python.keras.regularizers import L1L2 from tensorflow.python.keras.regularizers import L1L2
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.platform import tf_logging as logging from tensorflow.python.platform import tf_logging as logging
class StrongConvexMixin: class StrongConvexMixin:
""" """Strong Convex Mixin base class.
Strong Convex Mixin base class for any loss function that will be used with Strong Convex Mixin base class for any loss function that will be used with
Bolton model. Subclasses must be strongly convex and implement the Bolton model. Subclasses must be strongly convex and implement the
associated constants. They must also conform to the requirements of tf losses associated constants. They must also conform to the requirements of tf losses
@ -85,7 +87,7 @@ class StrongConvexMixin:
return None return None
def max_class_weight(self, class_weight, dtype): def max_class_weight(self, class_weight, dtype):
"""the maximum weighting in class weights (max value) as a scalar tensor """The maximum weighting in class weights (max value) as a scalar tensor.
Args: Args:
class_weight: class weights used class_weight: class weights used
@ -103,7 +105,7 @@ class StrongConvexHuber(losses.Loss, StrongConvexMixin):
def __init__(self, def __init__(self,
reg_lambda, reg_lambda,
C, c_arg,
radius_constant, radius_constant,
delta, delta,
reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE, reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
@ -117,31 +119,30 @@ class StrongConvexHuber(losses.Loss, StrongConvexMixin):
delta: delta value in huber loss. When to switch from quadratic to delta: delta value in huber loss. When to switch from quadratic to
absolute deviation. absolute deviation.
reduction: reduction type to use. See super class reduction: reduction type to use. See super class
name: Name of the loss instance
dtype: tf datatype to use for tensor conversions. dtype: tf datatype to use for tensor conversions.
Returns: Returns:
Loss values per sample. Loss values per sample.
""" """
if C <= 0: if c_arg <= 0:
raise ValueError('c: {0}, should be >= 0'.format(C)) raise ValueError("c: {0}, should be >= 0".format(c_arg))
if reg_lambda <= 0: if reg_lambda <= 0:
raise ValueError("reg lambda: {0} must be positive".format(reg_lambda)) raise ValueError("reg lambda: {0} must be positive".format(reg_lambda))
if radius_constant <= 0: if radius_constant <= 0:
raise ValueError('radius_constant: {0}, should be >= 0'.format( raise ValueError("radius_constant: {0}, should be >= 0".format(
radius_constant radius_constant
)) ))
if delta <= 0: if delta <= 0:
raise ValueError('delta: {0}, should be >= 0'.format( raise ValueError("delta: {0}, should be >= 0".format(
delta delta
)) ))
self.C = C # pylint: disable=invalid-name self.C = c_arg # pylint: disable=invalid-name
self.delta = delta self.delta = delta
self.radius_constant = radius_constant self.radius_constant = radius_constant
self.dtype = dtype self.dtype = dtype
self.reg_lambda = tf.constant(reg_lambda, dtype=self.dtype) self.reg_lambda = tf.constant(reg_lambda, dtype=self.dtype)
super(StrongConvexHuber, self).__init__( super(StrongConvexHuber, self).__init__(
name='strongconvexhuber', name="strongconvexhuber",
reduction=reduction, reduction=reduction,
) )
@ -179,7 +180,7 @@ class StrongConvexHuber(losses.Loss, StrongConvexMixin):
max_class_weight = self.max_class_weight(class_weight, self.dtype) max_class_weight = self.max_class_weight(class_weight, self.dtype)
delta = _ops.convert_to_tensor_v2(self.delta, delta = _ops.convert_to_tensor_v2(self.delta,
dtype=self.dtype dtype=self.dtype
) )
return self.C * max_class_weight / (delta * return self.C * max_class_weight / (delta *
tf.constant(2, dtype=self.dtype)) + \ tf.constant(2, dtype=self.dtype)) + \
self.reg_lambda self.reg_lambda
@ -213,53 +214,53 @@ class StrongConvexBinaryCrossentropy(
def __init__(self, def __init__(self,
reg_lambda, reg_lambda,
C, c_arg,
radius_constant, radius_constant,
from_logits=True, from_logits=True,
label_smoothing=0, label_smoothing=0,
reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE, reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
dtype=tf.float32): dtype=tf.float32):
""" """StrongConvexBinaryCrossentropy class.
Args: Args:
reg_lambda: Weight regularization constant reg_lambda: Weight regularization constant
C: Penalty parameter C of the loss term c_arg: Penalty parameter C of the loss term
radius_constant: constant defining the length of the radius radius_constant: constant defining the length of the radius
reduction: reduction type to use. See super class
from_logits: True if the input are unscaled logits. False if they are from_logits: True if the input are unscaled logits. False if they are
already scaled. already scaled.
label_smoothing: amount of smoothing to perform on labels label_smoothing: amount of smoothing to perform on labels
relaxation of trust in labels, e.g. (1 -> 1-x, 0 -> 0+x). Note, the relaxation of trust in labels, e.g. (1 -> 1-x, 0 -> 0+x). Note, the
impact of this parameter's effect on privacy is not known and thus the impact of this parameter's effect on privacy is not known and thus the
default should be used. default should be used.
name: Name of the loss instance reduction: reduction type to use. See super class
dtype: tf datatype to use for tensor conversions. dtype: tf datatype to use for tensor conversions.
""" """
if label_smoothing != 0: if label_smoothing != 0:
logging.warning('The impact of label smoothing on privacy is unknown. ' logging.warning("The impact of label smoothing on privacy is unknown. "
'Use label smoothing at your own risk as it may not ' "Use label smoothing at your own risk as it may not "
'guarantee privacy.') "guarantee privacy.")
if reg_lambda <= 0: if reg_lambda <= 0:
raise ValueError("reg lambda: {0} must be positive".format(reg_lambda)) raise ValueError("reg lambda: {0} must be positive".format(reg_lambda))
if C <= 0: if c_arg <= 0:
raise ValueError('c: {0}, should be >= 0'.format(C)) raise ValueError("c: {0}, should be >= 0".format(c_arg))
if radius_constant <= 0: if radius_constant <= 0:
raise ValueError('radius_constant: {0}, should be >= 0'.format( raise ValueError("radius_constant: {0}, should be >= 0".format(
radius_constant radius_constant
)) ))
self.dtype = dtype self.dtype = dtype
self.C = C # pylint: disable=invalid-name self.C = c_arg # pylint: disable=invalid-name
self.reg_lambda = tf.constant(reg_lambda, dtype=self.dtype) self.reg_lambda = tf.constant(reg_lambda, dtype=self.dtype)
super(StrongConvexBinaryCrossentropy, self).__init__( super(StrongConvexBinaryCrossentropy, self).__init__(
reduction=reduction, reduction=reduction,
name='strongconvexbinarycrossentropy', name="strongconvexbinarycrossentropy",
from_logits=from_logits, from_logits=from_logits,
label_smoothing=label_smoothing, label_smoothing=label_smoothing,
) )
self.radius_constant = radius_constant self.radius_constant = radius_constant
def call(self, y_true, y_pred): def call(self, y_true, y_pred):
"""Computes loss """Computes loss.
Args: Args:
y_true: Ground truth values. y_true: Ground truth values.

View file

@ -125,17 +125,21 @@ class BoltonModel(Model): # pylint: disable=abstract-method
4. Use a strongly convex loss function (see compile) 4. Use a strongly convex loss function (see compile)
See super implementation for more details. See super implementation for more details.
Args: Args:
x: x:
y: y:
batch_size: batch_size:
class_weight: the class weights to be used. Can be a scalar or 1D tensor class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes. whose dim == n_classes.
n_samples: the number of individual samples in x. n_samples: the number of individual samples in x.
epsilon: privacy parameter, which trades off between utility an privacy. epsilon: privacy parameter, which trades off between utility an privacy.
See the bolton paper for more description. See the bolton paper for more description.
noise_distribution: the distribution to pull noise from. noise_distribution: the distribution to pull noise from.
steps_per_epoch: steps_per_epoch:
kwargs: kwargs to keras Model.fit. See super.
Returns:
output
""" """
if class_weight is None: if class_weight is None:
class_weight_ = self.calculate_class_weights(class_weight) class_weight_ = self.calculate_class_weights(class_weight)

View file

@ -32,10 +32,10 @@ from privacy.bolton.optimizers import Bolton
class TestLoss(losses.Loss, StrongConvexMixin): class TestLoss(losses.Loss, StrongConvexMixin):
"""Test loss function for testing Bolton model.""" """Test loss function for testing Bolton model."""
def __init__(self, reg_lambda, C_arg, radius_constant, name='test'): def __init__(self, reg_lambda, c_arg, radius_constant, name='test'):
super(TestLoss, self).__init__(name=name) super(TestLoss, self).__init__(name=name)
self.reg_lambda = reg_lambda self.reg_lambda = reg_lambda
self.C = C_arg # pylint: disable=invalid-name self.C = c_arg # pylint: disable=invalid-name
self.radius_constant = radius_constant self.radius_constant = radius_constant
def radius(self): def radius(self):
@ -506,6 +506,7 @@ class FitTests(keras_parameterized.TestCase):
'num_classes': 2, 'num_classes': 2,
'err_msg': 'Detected array length:'}, 'err_msg': 'Detected array length:'},
]) ])
def test_class_errors(self, def test_class_errors(self,
class_weights, class_weights,
class_counts, class_counts,

View file

@ -68,10 +68,10 @@ class TestModel(Model): # pylint: disable=abstract-method
class TestLoss(losses.Loss, StrongConvexMixin): class TestLoss(losses.Loss, StrongConvexMixin):
"""Test loss function for testing Bolton model.""" """Test loss function for testing Bolton model."""
def __init__(self, reg_lambda, C_arg, radius_constant, name='test'): def __init__(self, reg_lambda, c_arg, radius_constant, name='test'):
super(TestLoss, self).__init__(name=name) super(TestLoss, self).__init__(name=name)
self.reg_lambda = reg_lambda self.reg_lambda = reg_lambda
self.C = C_arg # pylint: disable=invalid-name self.C = c_arg # pylint: disable=invalid-name
self.radius_constant = radius_constant self.radius_constant = radius_constant
def radius(self): def radius(self):
@ -80,7 +80,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
W is a convex set that forms the hypothesis space. W is a convex set that forms the hypothesis space.
Returns: Returns:
radius a tensor
""" """
return _ops.convert_to_tensor_v2(self.radius_constant, dtype=tf.float32) return _ops.convert_to_tensor_v2(self.radius_constant, dtype=tf.float32)
@ -107,7 +107,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
class_weight: class weights used class_weight: class weights used
Returns: Returns:
L constant L
""" """
return _ops.convert_to_tensor_v2(1, dtype=tf.float32) return _ops.convert_to_tensor_v2(1, dtype=tf.float32)