format fixes

This commit is contained in:
npapernot 2019-07-29 21:20:40 +00:00
parent 0317ce8077
commit 19ce36777d
6 changed files with 76 additions and 61 deletions

View file

@ -212,12 +212,12 @@ class StrongConvexBinaryCrossentropy(
"""Strongly Convex BinaryCrossentropy loss using l2 weight regularization.""" """Strongly Convex BinaryCrossentropy loss using l2 weight regularization."""
def __init__(self, def __init__(self,
reg_lambda: float, reg_lambda,
C: float, C,
radius_constant: float, radius_constant,
from_logits: bool = True, from_logits=True,
label_smoothing: float = 0, label_smoothing=0,
reduction: str = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE, reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
dtype=tf.float32): dtype=tf.float32):
""" """
Args: Args:

View file

@ -367,12 +367,13 @@ class HuberTests(keras_parameterized.TestCase):
}, },
]) ])
def test_calculation(self, logits, y_true, delta, result): def test_calculation(self, logits, y_true, delta, result):
"""Test the call method to ensure it returns the correct value """Test the call method to ensure it returns the correct value.
Args: Args:
logits: unscaled output of model logits: unscaled output of model
y_true: label y_true: label
result: correct loss calculation value delta:
result: correct loss calculation value
""" """
logits = tf.Variable(logits, False, dtype=tf.float32) logits = tf.Variable(logits, False, dtype=tf.float32)
y_true = tf.Variable(y_true, False, dtype=tf.float32) y_true = tf.Variable(y_true, False, dtype=tf.float32)

View file

@ -86,8 +86,16 @@ class BoltonModel(Model): # pylint: disable=abstract-method
**kwargs): # pylint: disable=arguments-differ **kwargs): # pylint: disable=arguments-differ
"""See super class. Default optimizer used in Bolton method is SGD. """See super class. Default optimizer used in Bolton method is SGD.
Missing args. Args:
optimizer:
loss:
metrics:
loss_weights:
sample_weight_mode:
weighted_metrics:
target_tensors:
distribute:
kernel_initializer:
""" """
if not isinstance(loss, StrongConvexMixin): if not isinstance(loss, StrongConvexMixin):
raise ValueError('loss function must be a Strongly Convex and therefore ' raise ValueError('loss function must be a Strongly Convex and therefore '
@ -126,15 +134,15 @@ class BoltonModel(Model): # pylint: disable=abstract-method
**kwargs): # pylint: disable=arguments-differ **kwargs): # pylint: disable=arguments-differ
"""Reroutes to super fit with Bolton delta-epsilon privacy requirements. """Reroutes to super fit with Bolton delta-epsilon privacy requirements.
Note, inputs must be normalized s.t. ||x|| < 1. Note, inputs must be normalized s.t. ||x|| < 1.
Requirements are as follows: Requirements are as follows:
1. Adds noise to weights after training (output perturbation). 1. Adds noise to weights after training (output perturbation).
2. Projects weights to R after each batch 2. Projects weights to R after each batch
3. Limits learning rate 3. Limits learning rate
4. Use a strongly convex loss function (see compile) 4. Use a strongly convex loss function (see compile)
See super implementation for more details. See super implementation for more details.
Args: Args:
n_samples: the number of individual samples in x. n_samples: the number of individual samples in x.
epsilon: privacy parameter, which trades off between utility an privacy. epsilon: privacy parameter, which trades off between utility an privacy.
See the bolton paper for more description. See the bolton paper for more description.
@ -189,20 +197,20 @@ class BoltonModel(Model): # pylint: disable=abstract-method
n_samples=None, n_samples=None,
steps_per_epoch=None, steps_per_epoch=None,
**kwargs): # pylint: disable=arguments-differ **kwargs): # pylint: disable=arguments-differ
"""Fit with a generator.. """Fit with a generator.
This method is the same as fit except for when the passed dataset This method is the same as fit except for when the passed dataset
is a generator. See super method and fit for more details. is a generator. See super method and fit for more details.
Args: Args:
n_samples: number of individual samples in x generator:
class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes.
noise_distribution: the distribution to get noise from. noise_distribution: the distribution to get noise from.
epsilon: privacy parameter, which trades off utility and privacy. See epsilon: privacy parameter, which trades off utility and privacy. See
Bolton paper for more description. Bolton paper for more description.
class_weight: the class weights to be used. Can be a scalar or 1D tensor n_samples: number of individual samples in x
whose dim == n_classes. steps_per_epoch:
See the super method for descriptions on the rest of the arguments.
""" """
if class_weight is None: if class_weight is None:
class_weight = self.calculate_class_weights(class_weight) class_weight = self.calculate_class_weights(class_weight)
@ -242,7 +250,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
the number of samples for each class the number of samples for each class
num_classes: If class_weights is not None, then the number of num_classes: If class_weights is not None, then the number of
classes. classes.
Returns: Returns:
class_weights as 1D tensor, to be passed to model's fit method. class_weights as 1D tensor, to be passed to model's fit method.
""" """
# Value checking # Value checking

View file

@ -32,10 +32,10 @@ from privacy.bolton.optimizers import Bolton
class TestLoss(losses.Loss, StrongConvexMixin): class TestLoss(losses.Loss, StrongConvexMixin):
"""Test loss function for testing Bolton model.""" """Test loss function for testing Bolton model."""
def __init__(self, reg_lambda, C, radius_constant, name='test'): def __init__(self, reg_lambda, C_arg, radius_constant, name='test'):
super(TestLoss, self).__init__(name=name) super(TestLoss, self).__init__(name=name)
self.reg_lambda = reg_lambda self.reg_lambda = reg_lambda
self.C = C # pylint: disable=invalid-name self.C = C_arg # pylint: disable=invalid-name
self.radius_constant = radius_constant self.radius_constant = radius_constant
def radius(self): def radius(self):
@ -43,7 +43,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
W is a convex set that forms the hypothesis space. W is a convex set that forms the hypothesis space.
Returns: Returns:
radius radius
""" """
return _ops.convert_to_tensor_v2(1, dtype=tf.float32) return _ops.convert_to_tensor_v2(1, dtype=tf.float32)
@ -70,7 +70,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
Args: Args:
class_weight: class weights used class_weight: class weights used
Returns: Returns:
L L
""" """
return _ops.convert_to_tensor_v2(1, dtype=tf.float32) return _ops.convert_to_tensor_v2(1, dtype=tf.float32)
@ -207,7 +207,7 @@ class InitTests(keras_parameterized.TestCase):
n_outputs: number of output neurons n_outputs: number of output neurons
loss: instantiated TestLoss instance loss: instantiated TestLoss instance
optimizer: instanced TestOptimizer instance optimizer: instanced TestOptimizer instance
""" """
# test compilaton of invalid tf.optimizer and non instantiated loss. # test compilaton of invalid tf.optimizer and non instantiated loss.
with self.cached_session(): with self.cached_session():
with self.assertRaises((ValueError, AttributeError)): with self.assertRaises((ValueError, AttributeError)):
@ -228,9 +228,10 @@ def _cat_dataset(n_samples, input_dim, n_classes, generator=False):
input_dim: input dimensionality input_dim: input dimensionality
n_classes: output dimensionality n_classes: output dimensionality
generator: False for array, True for generator generator: False for array, True for generator
Returns: Returns:
X as (n_samples, input_dim), Y as (n_samples, n_outputs) X as (n_samples, input_dim), Y as (n_samples, n_outputs)
""" """
x_stack = [] x_stack = []
y_stack = [] y_stack = []
for i_class in range(n_classes): for i_class in range(n_classes):
@ -512,7 +513,7 @@ class FitTests(keras_parameterized.TestCase):
num_classes, num_classes,
err_msg): err_msg):
"""Tests the BOltonModel calculate_class_weights method. """Tests the BOltonModel calculate_class_weights method.
This test passes invalid params which should raise the expected errors. This test passes invalid params which should raise the expected errors.
Args: Args:
@ -520,7 +521,7 @@ class FitTests(keras_parameterized.TestCase):
class_counts: count of number of samples for each class class_counts: count of number of samples for each class
num_classes: number of outputs neurons num_classes: number of outputs neurons
err_msg: err_msg:
""" """
clf = models.BoltonModel(1, 1) clf = models.BoltonModel(1, 1)
with self.assertRaisesRegexp(ValueError, err_msg): # pylint: disable=deprecated-method with self.assertRaisesRegexp(ValueError, err_msg): # pylint: disable=deprecated-method
clf.calculate_class_weights(class_weights, clf.calculate_class_weights(class_weights,

View file

@ -298,9 +298,9 @@ class Bolton(optimizer_v2.OptimizerV2):
return self return self
def __call__(self, def __call__(self,
noise_distribution: str, noise_distribution,
epsilon: float, epsilon,
layers: list, layers,
class_weights, class_weights,
n_samples, n_samples,
batch_size batch_size

View file

@ -51,11 +51,8 @@ class TestModel(Model): # pylint: disable=abstract-method
Args: Args:
n_outputs: number of output neurons n_outputs: number of output neurons
epsilon: level of privacy guarantee input_shape:
noise_distribution: distribution to pull weight perturbations from init_value:
weights_initializer: initializer for weights
seed: random seed to use
dtype: data type to use for tensors
""" """
super(TestModel, self).__init__(name='bolton', dynamic=False) super(TestModel, self).__init__(name='bolton', dynamic=False)
self.n_outputs = n_outputs self.n_outputs = n_outputs
@ -71,18 +68,19 @@ class TestModel(Model): # pylint: disable=abstract-method
class TestLoss(losses.Loss, StrongConvexMixin): class TestLoss(losses.Loss, StrongConvexMixin):
"""Test loss function for testing Bolton model.""" """Test loss function for testing Bolton model."""
def __init__(self, reg_lambda, C, radius_constant, name='test'): def __init__(self, reg_lambda, C_arg, radius_constant, name='test'):
super(TestLoss, self).__init__(name=name) super(TestLoss, self).__init__(name=name)
self.reg_lambda = reg_lambda self.reg_lambda = reg_lambda
self.C = C # pylint: disable=invalid-name self.C = C_arg # pylint: disable=invalid-name
self.radius_constant = radius_constant self.radius_constant = radius_constant
def radius(self): def radius(self):
"""Radius, R, of the hypothesis space W. """Radius, R, of the hypothesis space W.
W is a convex set that forms the hypothesis space. W is a convex set that forms the hypothesis space.
Returns: radius Returns:
radius
""" """
return _ops.convert_to_tensor_v2(self.radius_constant, dtype=tf.float32) return _ops.convert_to_tensor_v2(self.radius_constant, dtype=tf.float32)
@ -105,10 +103,11 @@ class TestLoss(losses.Loss, StrongConvexMixin):
def lipchitz_constant(self, class_weight): # pylint: disable=unused-argument def lipchitz_constant(self, class_weight): # pylint: disable=unused-argument
"""Lipchitz constant, L. """Lipchitz constant, L.
Args: Args:
class_weight: class weights used class_weight: class weights used
Returns: L Returns:
L
""" """
return _ops.convert_to_tensor_v2(1, dtype=tf.float32) return _ops.convert_to_tensor_v2(1, dtype=tf.float32)
@ -143,7 +142,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
class TestOptimizer(OptimizerV2): class TestOptimizer(OptimizerV2):
"""Optimizer used for testing the Bolton optimizer""" """Optimizer used for testing the Bolton optimizer."""
def __init__(self): def __init__(self):
super(TestOptimizer, self).__init__('test') super(TestOptimizer, self).__init__('test')
@ -263,8 +262,12 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
def test_project(self, r, shape, n_out, init_value, result): def test_project(self, r, shape, n_out, init_value, result):
"""test that a fn of Bolton optimizer is working as expected. """test that a fn of Bolton optimizer is working as expected.
Missing args: Args:
r:
shape:
n_out:
init_value:
result:
""" """
tf.random.set_seed(1) tf.random.set_seed(1)
@tf.function @tf.function
@ -451,7 +454,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
]) ])
def test_not_reroute_fn(self, fn, args): def test_not_reroute_fn(self, fn, args):
"""Test function is not rerouted. """Test function is not rerouted.
Test that a fn that should not be rerouted to the internal optimizer is Test that a fn that should not be rerouted to the internal optimizer is
in fact not rerouted. in fact not rerouted.
@ -490,7 +493,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
]) ])
def test_reroute_attr(self, attr): def test_reroute_attr(self, attr):
"""Test a function is rerouted. """Test a function is rerouted.
Test that attribute of internal optimizer is correctly rerouted to the Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer. internal optimizer.
@ -509,7 +512,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
]) ])
def test_attribute_error(self, attr): def test_attribute_error(self, attr):
"""Test rerouting of attributes. """Test rerouting of attributes.
Test that attribute of internal optimizer is correctly rerouted to the Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer internal optimizer
@ -524,7 +527,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
class SchedulerTest(keras_parameterized.TestCase): class SchedulerTest(keras_parameterized.TestCase):
"""GammaBeta Scheduler tests""" """GammaBeta Scheduler tests."""
@parameterized.named_parameters([ @parameterized.named_parameters([
{'testcase_name': 'not in context', {'testcase_name': 'not in context',
@ -533,10 +536,10 @@ class SchedulerTest(keras_parameterized.TestCase):
} }
]) ])
def test_bad_call(self, err_msg): def test_bad_call(self, err_msg):
""" test that attribute of internal optimizer is correctly rerouted to """Test attribute of internal opt correctly rerouted to the internal opt.
the internal optimizer
Missing args Args:
err_msg:
""" """
scheduler = opt.GammaBetaDecreasingStep() scheduler = opt.GammaBetaDecreasingStep()
with self.assertRaisesRegexp(Exception, err_msg): # pylint: disable=deprecated-method with self.assertRaisesRegexp(Exception, err_msg): # pylint: disable=deprecated-method
@ -555,11 +558,13 @@ class SchedulerTest(keras_parameterized.TestCase):
]) ])
def test_call(self, step, res): def test_call(self, step, res):
"""Test call. """Test call.
Test that attribute of internal optimizer is correctly rerouted to the Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer internal optimizer
Missing Args: Args:
step:
res:
""" """
beta = _ops.convert_to_tensor_v2(2, dtype=tf.float32) beta = _ops.convert_to_tensor_v2(2, dtype=tf.float32)
gamma = _ops.convert_to_tensor_v2(1, dtype=tf.float32) gamma = _ops.convert_to_tensor_v2(1, dtype=tf.float32)