format fixes

This commit is contained in:
npapernot 2019-07-29 21:20:40 +00:00
parent 0317ce8077
commit 19ce36777d
6 changed files with 76 additions and 61 deletions

View file

@ -212,12 +212,12 @@ class StrongConvexBinaryCrossentropy(
"""Strongly Convex BinaryCrossentropy loss using l2 weight regularization."""
def __init__(self,
reg_lambda: float,
C: float,
radius_constant: float,
from_logits: bool = True,
label_smoothing: float = 0,
reduction: str = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
reg_lambda,
C,
radius_constant,
from_logits=True,
label_smoothing=0,
reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
dtype=tf.float32):
"""
Args:

View file

@ -367,12 +367,13 @@ class HuberTests(keras_parameterized.TestCase):
},
])
def test_calculation(self, logits, y_true, delta, result):
"""Test the call method to ensure it returns the correct value
"""Test the call method to ensure it returns the correct value.
Args:
logits: unscaled output of model
y_true: label
result: correct loss calculation value
Args:
logits: unscaled output of model
y_true: label
delta:
result: correct loss calculation value
"""
logits = tf.Variable(logits, False, dtype=tf.float32)
y_true = tf.Variable(y_true, False, dtype=tf.float32)

View file

@ -86,8 +86,16 @@ class BoltonModel(Model): # pylint: disable=abstract-method
**kwargs): # pylint: disable=arguments-differ
"""See super class. Default optimizer used in Bolton method is SGD.
Missing args.
Args:
optimizer:
loss:
metrics:
loss_weights:
sample_weight_mode:
weighted_metrics:
target_tensors:
distribute:
kernel_initializer:
"""
if not isinstance(loss, StrongConvexMixin):
raise ValueError('loss function must be a Strongly Convex and therefore '
@ -126,15 +134,15 @@ class BoltonModel(Model): # pylint: disable=abstract-method
**kwargs): # pylint: disable=arguments-differ
"""Reroutes to super fit with Bolton delta-epsilon privacy requirements.
Note, inputs must be normalized s.t. ||x|| < 1.
Requirements are as follows:
Note, inputs must be normalized s.t. ||x|| < 1.
Requirements are as follows:
1. Adds noise to weights after training (output perturbation).
2. Projects weights to R after each batch
3. Limits learning rate
4. Use a strongly convex loss function (see compile)
See super implementation for more details.
See super implementation for more details.
Args:
Args:
n_samples: the number of individual samples in x.
epsilon: privacy parameter, which trades off between utility an privacy.
See the bolton paper for more description.
@ -189,20 +197,20 @@ class BoltonModel(Model): # pylint: disable=abstract-method
n_samples=None,
steps_per_epoch=None,
**kwargs): # pylint: disable=arguments-differ
"""Fit with a generator..
"""Fit with a generator.
This method is the same as fit except for when the passed dataset
is a generator. See super method and fit for more details.
Args:
n_samples: number of individual samples in x
generator:
class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes.
noise_distribution: the distribution to get noise from.
epsilon: privacy parameter, which trades off utility and privacy. See
Bolton paper for more description.
class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes.
See the super method for descriptions on the rest of the arguments.
n_samples: number of individual samples in x
steps_per_epoch:
"""
if class_weight is None:
class_weight = self.calculate_class_weights(class_weight)
@ -242,7 +250,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
the number of samples for each class
num_classes: If class_weights is not None, then the number of
classes.
Returns:
Returns:
class_weights as 1D tensor, to be passed to model's fit method.
"""
# Value checking

View file

@ -32,10 +32,10 @@ from privacy.bolton.optimizers import Bolton
class TestLoss(losses.Loss, StrongConvexMixin):
"""Test loss function for testing Bolton model."""
def __init__(self, reg_lambda, C, radius_constant, name='test'):
def __init__(self, reg_lambda, C_arg, radius_constant, name='test'):
super(TestLoss, self).__init__(name=name)
self.reg_lambda = reg_lambda
self.C = C # pylint: disable=invalid-name
self.C = C_arg # pylint: disable=invalid-name
self.radius_constant = radius_constant
def radius(self):
@ -43,7 +43,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
W is a convex set that forms the hypothesis space.
Returns:
Returns:
radius
"""
return _ops.convert_to_tensor_v2(1, dtype=tf.float32)
@ -70,7 +70,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
Args:
class_weight: class weights used
Returns:
Returns:
L
"""
return _ops.convert_to_tensor_v2(1, dtype=tf.float32)
@ -207,7 +207,7 @@ class InitTests(keras_parameterized.TestCase):
n_outputs: number of output neurons
loss: instantiated TestLoss instance
optimizer: instanced TestOptimizer instance
"""
"""
# test compilaton of invalid tf.optimizer and non instantiated loss.
with self.cached_session():
with self.assertRaises((ValueError, AttributeError)):
@ -228,9 +228,10 @@ def _cat_dataset(n_samples, input_dim, n_classes, generator=False):
input_dim: input dimensionality
n_classes: output dimensionality
generator: False for array, True for generator
Returns:
X as (n_samples, input_dim), Y as (n_samples, n_outputs)
"""
"""
x_stack = []
y_stack = []
for i_class in range(n_classes):
@ -512,7 +513,7 @@ class FitTests(keras_parameterized.TestCase):
num_classes,
err_msg):
"""Tests the BOltonModel calculate_class_weights method.
This test passes invalid params which should raise the expected errors.
Args:
@ -520,7 +521,7 @@ class FitTests(keras_parameterized.TestCase):
class_counts: count of number of samples for each class
num_classes: number of outputs neurons
err_msg:
"""
"""
clf = models.BoltonModel(1, 1)
with self.assertRaisesRegexp(ValueError, err_msg): # pylint: disable=deprecated-method
clf.calculate_class_weights(class_weights,

View file

@ -298,9 +298,9 @@ class Bolton(optimizer_v2.OptimizerV2):
return self
def __call__(self,
noise_distribution: str,
epsilon: float,
layers: list,
noise_distribution,
epsilon,
layers,
class_weights,
n_samples,
batch_size

View file

@ -51,11 +51,8 @@ class TestModel(Model): # pylint: disable=abstract-method
Args:
n_outputs: number of output neurons
epsilon: level of privacy guarantee
noise_distribution: distribution to pull weight perturbations from
weights_initializer: initializer for weights
seed: random seed to use
dtype: data type to use for tensors
input_shape:
init_value:
"""
super(TestModel, self).__init__(name='bolton', dynamic=False)
self.n_outputs = n_outputs
@ -71,18 +68,19 @@ class TestModel(Model): # pylint: disable=abstract-method
class TestLoss(losses.Loss, StrongConvexMixin):
"""Test loss function for testing Bolton model."""
def __init__(self, reg_lambda, C, radius_constant, name='test'):
def __init__(self, reg_lambda, C_arg, radius_constant, name='test'):
super(TestLoss, self).__init__(name=name)
self.reg_lambda = reg_lambda
self.C = C # pylint: disable=invalid-name
self.C = C_arg # pylint: disable=invalid-name
self.radius_constant = radius_constant
def radius(self):
"""Radius, R, of the hypothesis space W.
W is a convex set that forms the hypothesis space.
W is a convex set that forms the hypothesis space.
Returns: radius
Returns:
radius
"""
return _ops.convert_to_tensor_v2(self.radius_constant, dtype=tf.float32)
@ -105,10 +103,11 @@ class TestLoss(losses.Loss, StrongConvexMixin):
def lipchitz_constant(self, class_weight): # pylint: disable=unused-argument
"""Lipchitz constant, L.
Args:
class_weight: class weights used
Args:
class_weight: class weights used
Returns: L
Returns:
L
"""
return _ops.convert_to_tensor_v2(1, dtype=tf.float32)
@ -143,7 +142,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
class TestOptimizer(OptimizerV2):
"""Optimizer used for testing the Bolton optimizer"""
"""Optimizer used for testing the Bolton optimizer."""
def __init__(self):
super(TestOptimizer, self).__init__('test')
@ -263,8 +262,12 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
def test_project(self, r, shape, n_out, init_value, result):
"""test that a fn of Bolton optimizer is working as expected.
Missing args:
Args:
r:
shape:
n_out:
init_value:
result:
"""
tf.random.set_seed(1)
@tf.function
@ -451,7 +454,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
])
def test_not_reroute_fn(self, fn, args):
"""Test function is not rerouted.
Test that a fn that should not be rerouted to the internal optimizer is
in fact not rerouted.
@ -490,7 +493,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
])
def test_reroute_attr(self, attr):
"""Test a function is rerouted.
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer.
@ -509,7 +512,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
])
def test_attribute_error(self, attr):
"""Test rerouting of attributes.
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer
@ -524,7 +527,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
class SchedulerTest(keras_parameterized.TestCase):
"""GammaBeta Scheduler tests"""
"""GammaBeta Scheduler tests."""
@parameterized.named_parameters([
{'testcase_name': 'not in context',
@ -533,10 +536,10 @@ class SchedulerTest(keras_parameterized.TestCase):
}
])
def test_bad_call(self, err_msg):
""" test that attribute of internal optimizer is correctly rerouted to
the internal optimizer
"""Test attribute of internal opt correctly rerouted to the internal opt.
Missing args
Args:
err_msg:
"""
scheduler = opt.GammaBetaDecreasingStep()
with self.assertRaisesRegexp(Exception, err_msg): # pylint: disable=deprecated-method
@ -555,11 +558,13 @@ class SchedulerTest(keras_parameterized.TestCase):
])
def test_call(self, step, res):
"""Test call.
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer
Missing Args:
Args:
step:
res:
"""
beta = _ops.convert_to_tensor_v2(2, dtype=tf.float32)
gamma = _ops.convert_to_tensor_v2(1, dtype=tf.float32)