more lint

This commit is contained in:
npapernot 2019-07-29 22:09:21 +00:00
parent 32c76e588a
commit ed93cf6f44
4 changed files with 113 additions and 108 deletions

View file

@ -117,29 +117,29 @@ class BoltonModel(Model): # pylint: disable=abstract-method
**kwargs): # pylint: disable=arguments-differ
"""Reroutes to super fit with Bolton delta-epsilon privacy requirements.
Note, inputs must be normalized s.t. ||x|| < 1.
Requirements are as follows:
1. Adds noise to weights after training (output perturbation).
2. Projects weights to R after each batch
3. Limits learning rate
4. Use a strongly convex loss function (see compile)
See super implementation for more details.
Note, inputs must be normalized s.t. ||x|| < 1.
Requirements are as follows:
1. Adds noise to weights after training (output perturbation).
2. Projects weights to R after each batch
3. Limits learning rate
4. Use a strongly convex loss function (see compile)
See super implementation for more details.
Args:
x:
y:
batch_size:
class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes.
n_samples: the number of individual samples in x.
epsilon: privacy parameter, which trades off between utility an privacy.
See the bolton paper for more description.
noise_distribution: the distribution to pull noise from.
steps_per_epoch:
kwargs: kwargs to keras Model.fit. See super.
Args:
x:
y:
batch_size:
class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes.
n_samples: the number of individual samples in x.
epsilon: privacy parameter, which trades off between utility an privacy.
See the bolton paper for more description.
noise_distribution: the distribution to pull noise from.
steps_per_epoch:
kwargs: kwargs to keras Model.fit. See super.
Returns:
output
Returns:
output
"""
if class_weight is None:
class_weight_ = self.calculate_class_weights(class_weight)
@ -188,18 +188,18 @@ class BoltonModel(Model): # pylint: disable=abstract-method
**kwargs): # pylint: disable=arguments-differ
"""Fit with a generator.
This method is the same as fit except for when the passed dataset
is a generator. See super method and fit for more details.
This method is the same as fit except for when the passed dataset
is a generator. See super method and fit for more details.
Args:
generator:
class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes.
noise_distribution: the distribution to get noise from.
epsilon: privacy parameter, which trades off utility and privacy. See
Bolton paper for more description.
n_samples: number of individual samples in x
steps_per_epoch:
Args:
generator:
class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes.
noise_distribution: the distribution to get noise from.
epsilon: privacy parameter, which trades off utility and privacy. See
Bolton paper for more description.
n_samples: number of individual samples in x
steps_per_epoch:
"""
if class_weight is None:
class_weight = self.calculate_class_weights(class_weight)
@ -233,14 +233,14 @@ class BoltonModel(Model): # pylint: disable=abstract-method
num_classes=None):
"""Calculates class weighting to be used in training.
Args:
class_weights: str specifying type, array giving weights, or None.
class_counts: If class_weights is not None, then an array of
the number of samples for each class
num_classes: If class_weights is not None, then the number of
classes.
Returns:
class_weights as 1D tensor, to be passed to model's fit method.
Args:
class_weights: str specifying type, array giving weights, or None.
class_counts: If class_weights is not None, then an array of
the number of samples for each class
num_classes: If class_weights is not None, then the number of
classes.
Returns:
class_weights as 1D tensor, to be passed to model's fit method.
"""
# Value checking
class_keys = ['balanced']

View file

@ -203,10 +203,10 @@ class InitTests(keras_parameterized.TestCase):
def test_bad_compile(self, n_outputs, loss, optimizer):
"""test bad compilations of BoltonModel that should raise errors.
Args:
n_outputs: number of output neurons
loss: instantiated TestLoss instance
optimizer: instantiated TestOptimizer instance
Args:
n_outputs: number of output neurons
loss: instantiated TestLoss instance
optimizer: instantiated TestOptimizer instance
"""
# test compilaton of invalid tf.optimizer and non instantiated loss.
with self.cached_session():
@ -218,19 +218,19 @@ class InitTests(keras_parameterized.TestCase):
def _cat_dataset(n_samples, input_dim, n_classes, generator=False):
"""Creates a categorically encoded dataset.
Creates a categorically encoded dataset (y is categorical).
returns the specified dataset either as a static array or as a generator.
Will have evenly split samples across each output class.
Each output class will be a different point in the input space.
Creates a categorically encoded dataset (y is categorical).
returns the specified dataset either as a static array or as a generator.
Will have evenly split samples across each output class.
Each output class will be a different point in the input space.
Args:
n_samples: number of rows
input_dim: input dimensionality
n_classes: output dimensionality
generator: False for array, True for generator
Args:
n_samples: number of rows
input_dim: input dimensionality
n_classes: output dimensionality
generator: False for array, True for generator
Returns:
X as (n_samples, input_dim), Y as (n_samples, n_outputs)
Returns:
X as (n_samples, input_dim), Y as (n_samples, n_outputs)
"""
x_stack = []
y_stack = []
@ -514,13 +514,13 @@ class FitTests(keras_parameterized.TestCase):
err_msg):
"""Tests the BOltonModel calculate_class_weights method.
This test passes invalid params which should raise the expected errors.
This test passes invalid params which should raise the expected errors.
Args:
class_weights: the class_weights to use.
class_counts: count of number of samples for each class.
num_classes: number of outputs neurons.
err_msg: The expected error message.
Args:
class_weights: the class_weights to use.
class_counts: count of number of samples for each class.
num_classes: number of outputs neurons.
err_msg: The expected error message.
"""
clf = models.BoltonModel(1, 1)
with self.assertRaisesRegexp(ValueError, err_msg): # pylint: disable=deprecated-method

View file

@ -119,11 +119,11 @@ class Bolton(optimizer_v2.OptimizerV2):
):
"""Constructor.
Args:
optimizer: Optimizer_v2 or subclass to be used as the optimizer
(wrapped).
loss: StrongConvexLoss function that the model is being compiled with.
dtype: dtype
Args:
optimizer: Optimizer_v2 or subclass to be used as the optimizer
(wrapped).
loss: StrongConvexLoss function that the model is being compiled with.
dtype: dtype
"""
if not isinstance(loss, StrongConvexMixin):
@ -182,15 +182,15 @@ class Bolton(optimizer_v2.OptimizerV2):
def get_noise(self, input_dim, output_dim):
"""Sample noise to be added to weights for privacy guarantee.
Args:
input_dim: the input dimensionality for the weights
output_dim the output dimensionality for the weights
Args:
input_dim: the input dimensionality for the weights
output_dim the output dimensionality for the weights
Returns:
Noise in shape of layer's weights to be added to the weights.
Returns:
Noise in shape of layer's weights to be added to the weights.
Raises:
Exception:
Raises:
Exception:
"""
if not self._is_init:
raise Exception('This method must be called from within the optimizer\'s '
@ -228,7 +228,9 @@ class Bolton(optimizer_v2.OptimizerV2):
return self._internal_optimizer.from_config(*args, **kwargs)
def __getattr__(self, name):
"""return _internal_optimizer off self instance, and everything else
"""Get attr.
return _internal_optimizer off self instance, and everything else
from the _internal_optimizer instance.
Args:
@ -253,6 +255,7 @@ class Bolton(optimizer_v2.OptimizerV2):
def __setattr__(self, key, value):
""" Set attribute to self instance if its the internal optimizer.
Reroute everything else to the _internal_optimizer.
Args:
@ -318,6 +321,7 @@ class Bolton(optimizer_v2.OptimizerV2):
batch_size
):
"""Accepts required values for bolton method from context entry point.
Stores them on the optimizer for use throughout fitting.
Args:
@ -327,7 +331,7 @@ class Bolton(optimizer_v2.OptimizerV2):
layers: list of Keras/Tensorflow layers. Can be found as model.layers
class_weights: class_weights used, which may either be a scalar or 1D
tensor with dim == n_classes.
n_samples number of rows/individual samples in the training set
n_samples: number of rows/individual samples in the training set
batch_size: batch size used.
"""
if epsilon <= 0:

View file

@ -77,10 +77,10 @@ class TestLoss(losses.Loss, StrongConvexMixin):
def radius(self):
"""Radius, R, of the hypothesis space W.
W is a convex set that forms the hypothesis space.
W is a convex set that forms the hypothesis space.
Returns:
a tensor
Returns:
a tensor
"""
return _ops.convert_to_tensor_v2(self.radius_constant, dtype=tf.float32)
@ -103,11 +103,11 @@ class TestLoss(losses.Loss, StrongConvexMixin):
def lipchitz_constant(self, class_weight): # pylint: disable=unused-argument
"""Lipchitz constant, L.
Args:
class_weight: class weights used
Args:
class_weight: class weights used
Returns:
constant L
Returns:
constant L
"""
return _ops.convert_to_tensor_v2(1, dtype=tf.float32)
@ -262,12 +262,12 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
def test_project(self, r, shape, n_out, init_value, result):
"""test that a fn of Bolton optimizer is working as expected.
Args:
r: Radius value for StrongConvex loss function.
shape: input_dimensionality
n_out: output dimensionality
init_value: the initial value for 'constant' kernel initializer
result: the expected output after projection.
Args:
r: Radius value for StrongConvex loss function.
shape: input_dimensionality
n_out: output dimensionality
init_value: the initial value for 'constant' kernel initializer
result: the expected output after projection.
"""
tf.random.set_seed(1)
@tf.function
@ -455,12 +455,12 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
def test_not_reroute_fn(self, fn, args):
"""Test function is not rerouted.
Test that a fn that should not be rerouted to the internal optimizer is
in fact not rerouted.
Test that a fn that should not be rerouted to the internal optimizer is
in fact not rerouted.
Args:
fn: fn to test
args: arguments to that fn
Args:
fn: fn to test
args: arguments to that fn
"""
@tf.function
def test_run(fn, args):
@ -494,11 +494,11 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
def test_reroute_attr(self, attr):
"""Test a function is rerouted.
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer.
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer.
Args:
attr: attribute to test
Args:
attr: attribute to test
"""
loss = TestLoss(1, 1, 1)
internal_optimizer = TestOptimizer()
@ -513,11 +513,11 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
def test_attribute_error(self, attr):
"""Test rerouting of attributes.
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer
Args:
attr: attribute to test
Args:
attr: attribute to test
"""
loss = TestLoss(1, 1, 1)
internal_optimizer = TestOptimizer()
@ -538,8 +538,8 @@ class SchedulerTest(keras_parameterized.TestCase):
def test_bad_call(self, err_msg):
"""Test attribute of internal opt correctly rerouted to the internal opt.
Args:
err_msg: The expected error message from the scheduler bad call.
Args:
err_msg: The expected error message from the scheduler bad call.
"""
scheduler = opt.GammaBetaDecreasingStep()
with self.assertRaisesRegexp(Exception, err_msg): # pylint: disable=deprecated-method
@ -558,12 +558,13 @@ class SchedulerTest(keras_parameterized.TestCase):
])
def test_call(self, step, res):
"""Test call.
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer
Args:
step: step number to 'GammaBetaDecreasingStep' 'Scheduler'.
res: expected result from call to 'GammaBetaDecreasingStep' 'Scheduler'.
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer
Args:
step: step number to 'GammaBetaDecreasingStep' 'Scheduler'.
res: expected result from call to 'GammaBetaDecreasingStep' 'Scheduler'.
"""
beta = _ops.convert_to_tensor_v2(2, dtype=tf.float32)
gamma = _ops.convert_to_tensor_v2(1, dtype=tf.float32)