more lint

This commit is contained in:
npapernot 2019-07-29 22:09:21 +00:00
parent 32c76e588a
commit ed93cf6f44
4 changed files with 113 additions and 108 deletions

View file

@ -117,29 +117,29 @@ class BoltonModel(Model): # pylint: disable=abstract-method
**kwargs): # pylint: disable=arguments-differ **kwargs): # pylint: disable=arguments-differ
"""Reroutes to super fit with Bolton delta-epsilon privacy requirements. """Reroutes to super fit with Bolton delta-epsilon privacy requirements.
Note, inputs must be normalized s.t. ||x|| < 1. Note, inputs must be normalized s.t. ||x|| < 1.
Requirements are as follows: Requirements are as follows:
1. Adds noise to weights after training (output perturbation). 1. Adds noise to weights after training (output perturbation).
2. Projects weights to R after each batch 2. Projects weights to R after each batch
3. Limits learning rate 3. Limits learning rate
4. Use a strongly convex loss function (see compile) 4. Use a strongly convex loss function (see compile)
See super implementation for more details. See super implementation for more details.
Args: Args:
x: x:
y: y:
batch_size: batch_size:
class_weight: the class weights to be used. Can be a scalar or 1D tensor class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes. whose dim == n_classes.
n_samples: the number of individual samples in x. n_samples: the number of individual samples in x.
epsilon: privacy parameter, which trades off between utility an privacy. epsilon: privacy parameter, which trades off between utility an privacy.
See the bolton paper for more description. See the bolton paper for more description.
noise_distribution: the distribution to pull noise from. noise_distribution: the distribution to pull noise from.
steps_per_epoch: steps_per_epoch:
kwargs: kwargs to keras Model.fit. See super. kwargs: kwargs to keras Model.fit. See super.
Returns: Returns:
output output
""" """
if class_weight is None: if class_weight is None:
class_weight_ = self.calculate_class_weights(class_weight) class_weight_ = self.calculate_class_weights(class_weight)
@ -188,18 +188,18 @@ class BoltonModel(Model): # pylint: disable=abstract-method
**kwargs): # pylint: disable=arguments-differ **kwargs): # pylint: disable=arguments-differ
"""Fit with a generator. """Fit with a generator.
This method is the same as fit except for when the passed dataset This method is the same as fit except for when the passed dataset
is a generator. See super method and fit for more details. is a generator. See super method and fit for more details.
Args: Args:
generator: generator:
class_weight: the class weights to be used. Can be a scalar or 1D tensor class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes. whose dim == n_classes.
noise_distribution: the distribution to get noise from. noise_distribution: the distribution to get noise from.
epsilon: privacy parameter, which trades off utility and privacy. See epsilon: privacy parameter, which trades off utility and privacy. See
Bolton paper for more description. Bolton paper for more description.
n_samples: number of individual samples in x n_samples: number of individual samples in x
steps_per_epoch: steps_per_epoch:
""" """
if class_weight is None: if class_weight is None:
class_weight = self.calculate_class_weights(class_weight) class_weight = self.calculate_class_weights(class_weight)
@ -233,14 +233,14 @@ class BoltonModel(Model): # pylint: disable=abstract-method
num_classes=None): num_classes=None):
"""Calculates class weighting to be used in training. """Calculates class weighting to be used in training.
Args: Args:
class_weights: str specifying type, array giving weights, or None. class_weights: str specifying type, array giving weights, or None.
class_counts: If class_weights is not None, then an array of class_counts: If class_weights is not None, then an array of
the number of samples for each class the number of samples for each class
num_classes: If class_weights is not None, then the number of num_classes: If class_weights is not None, then the number of
classes. classes.
Returns: Returns:
class_weights as 1D tensor, to be passed to model's fit method. class_weights as 1D tensor, to be passed to model's fit method.
""" """
# Value checking # Value checking
class_keys = ['balanced'] class_keys = ['balanced']

View file

@ -203,10 +203,10 @@ class InitTests(keras_parameterized.TestCase):
def test_bad_compile(self, n_outputs, loss, optimizer): def test_bad_compile(self, n_outputs, loss, optimizer):
"""test bad compilations of BoltonModel that should raise errors. """test bad compilations of BoltonModel that should raise errors.
Args: Args:
n_outputs: number of output neurons n_outputs: number of output neurons
loss: instantiated TestLoss instance loss: instantiated TestLoss instance
optimizer: instantiated TestOptimizer instance optimizer: instantiated TestOptimizer instance
""" """
# test compilaton of invalid tf.optimizer and non instantiated loss. # test compilaton of invalid tf.optimizer and non instantiated loss.
with self.cached_session(): with self.cached_session():
@ -218,19 +218,19 @@ class InitTests(keras_parameterized.TestCase):
def _cat_dataset(n_samples, input_dim, n_classes, generator=False): def _cat_dataset(n_samples, input_dim, n_classes, generator=False):
"""Creates a categorically encoded dataset. """Creates a categorically encoded dataset.
Creates a categorically encoded dataset (y is categorical). Creates a categorically encoded dataset (y is categorical).
returns the specified dataset either as a static array or as a generator. returns the specified dataset either as a static array or as a generator.
Will have evenly split samples across each output class. Will have evenly split samples across each output class.
Each output class will be a different point in the input space. Each output class will be a different point in the input space.
Args: Args:
n_samples: number of rows n_samples: number of rows
input_dim: input dimensionality input_dim: input dimensionality
n_classes: output dimensionality n_classes: output dimensionality
generator: False for array, True for generator generator: False for array, True for generator
Returns: Returns:
X as (n_samples, input_dim), Y as (n_samples, n_outputs) X as (n_samples, input_dim), Y as (n_samples, n_outputs)
""" """
x_stack = [] x_stack = []
y_stack = [] y_stack = []
@ -514,13 +514,13 @@ class FitTests(keras_parameterized.TestCase):
err_msg): err_msg):
"""Tests the BOltonModel calculate_class_weights method. """Tests the BOltonModel calculate_class_weights method.
This test passes invalid params which should raise the expected errors. This test passes invalid params which should raise the expected errors.
Args: Args:
class_weights: the class_weights to use. class_weights: the class_weights to use.
class_counts: count of number of samples for each class. class_counts: count of number of samples for each class.
num_classes: number of outputs neurons. num_classes: number of outputs neurons.
err_msg: The expected error message. err_msg: The expected error message.
""" """
clf = models.BoltonModel(1, 1) clf = models.BoltonModel(1, 1)
with self.assertRaisesRegexp(ValueError, err_msg): # pylint: disable=deprecated-method with self.assertRaisesRegexp(ValueError, err_msg): # pylint: disable=deprecated-method

View file

@ -119,11 +119,11 @@ class Bolton(optimizer_v2.OptimizerV2):
): ):
"""Constructor. """Constructor.
Args: Args:
optimizer: Optimizer_v2 or subclass to be used as the optimizer optimizer: Optimizer_v2 or subclass to be used as the optimizer
(wrapped). (wrapped).
loss: StrongConvexLoss function that the model is being compiled with. loss: StrongConvexLoss function that the model is being compiled with.
dtype: dtype dtype: dtype
""" """
if not isinstance(loss, StrongConvexMixin): if not isinstance(loss, StrongConvexMixin):
@ -182,15 +182,15 @@ class Bolton(optimizer_v2.OptimizerV2):
def get_noise(self, input_dim, output_dim): def get_noise(self, input_dim, output_dim):
"""Sample noise to be added to weights for privacy guarantee. """Sample noise to be added to weights for privacy guarantee.
Args: Args:
input_dim: the input dimensionality for the weights input_dim: the input dimensionality for the weights
output_dim the output dimensionality for the weights output_dim the output dimensionality for the weights
Returns: Returns:
Noise in shape of layer's weights to be added to the weights. Noise in shape of layer's weights to be added to the weights.
Raises: Raises:
Exception: Exception:
""" """
if not self._is_init: if not self._is_init:
raise Exception('This method must be called from within the optimizer\'s ' raise Exception('This method must be called from within the optimizer\'s '
@ -228,7 +228,9 @@ class Bolton(optimizer_v2.OptimizerV2):
return self._internal_optimizer.from_config(*args, **kwargs) return self._internal_optimizer.from_config(*args, **kwargs)
def __getattr__(self, name): def __getattr__(self, name):
"""return _internal_optimizer off self instance, and everything else """Get attr.
return _internal_optimizer off self instance, and everything else
from the _internal_optimizer instance. from the _internal_optimizer instance.
Args: Args:
@ -253,6 +255,7 @@ class Bolton(optimizer_v2.OptimizerV2):
def __setattr__(self, key, value): def __setattr__(self, key, value):
""" Set attribute to self instance if its the internal optimizer. """ Set attribute to self instance if its the internal optimizer.
Reroute everything else to the _internal_optimizer. Reroute everything else to the _internal_optimizer.
Args: Args:
@ -318,6 +321,7 @@ class Bolton(optimizer_v2.OptimizerV2):
batch_size batch_size
): ):
"""Accepts required values for bolton method from context entry point. """Accepts required values for bolton method from context entry point.
Stores them on the optimizer for use throughout fitting. Stores them on the optimizer for use throughout fitting.
Args: Args:
@ -327,7 +331,7 @@ class Bolton(optimizer_v2.OptimizerV2):
layers: list of Keras/Tensorflow layers. Can be found as model.layers layers: list of Keras/Tensorflow layers. Can be found as model.layers
class_weights: class_weights used, which may either be a scalar or 1D class_weights: class_weights used, which may either be a scalar or 1D
tensor with dim == n_classes. tensor with dim == n_classes.
n_samples number of rows/individual samples in the training set n_samples: number of rows/individual samples in the training set
batch_size: batch size used. batch_size: batch size used.
""" """
if epsilon <= 0: if epsilon <= 0:

View file

@ -77,10 +77,10 @@ class TestLoss(losses.Loss, StrongConvexMixin):
def radius(self): def radius(self):
"""Radius, R, of the hypothesis space W. """Radius, R, of the hypothesis space W.
W is a convex set that forms the hypothesis space. W is a convex set that forms the hypothesis space.
Returns: Returns:
a tensor a tensor
""" """
return _ops.convert_to_tensor_v2(self.radius_constant, dtype=tf.float32) return _ops.convert_to_tensor_v2(self.radius_constant, dtype=tf.float32)
@ -103,11 +103,11 @@ class TestLoss(losses.Loss, StrongConvexMixin):
def lipchitz_constant(self, class_weight): # pylint: disable=unused-argument def lipchitz_constant(self, class_weight): # pylint: disable=unused-argument
"""Lipchitz constant, L. """Lipchitz constant, L.
Args: Args:
class_weight: class weights used class_weight: class weights used
Returns: Returns:
constant L constant L
""" """
return _ops.convert_to_tensor_v2(1, dtype=tf.float32) return _ops.convert_to_tensor_v2(1, dtype=tf.float32)
@ -262,12 +262,12 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
def test_project(self, r, shape, n_out, init_value, result): def test_project(self, r, shape, n_out, init_value, result):
"""test that a fn of Bolton optimizer is working as expected. """test that a fn of Bolton optimizer is working as expected.
Args: Args:
r: Radius value for StrongConvex loss function. r: Radius value for StrongConvex loss function.
shape: input_dimensionality shape: input_dimensionality
n_out: output dimensionality n_out: output dimensionality
init_value: the initial value for 'constant' kernel initializer init_value: the initial value for 'constant' kernel initializer
result: the expected output after projection. result: the expected output after projection.
""" """
tf.random.set_seed(1) tf.random.set_seed(1)
@tf.function @tf.function
@ -455,12 +455,12 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
def test_not_reroute_fn(self, fn, args): def test_not_reroute_fn(self, fn, args):
"""Test function is not rerouted. """Test function is not rerouted.
Test that a fn that should not be rerouted to the internal optimizer is Test that a fn that should not be rerouted to the internal optimizer is
in fact not rerouted. in fact not rerouted.
Args: Args:
fn: fn to test fn: fn to test
args: arguments to that fn args: arguments to that fn
""" """
@tf.function @tf.function
def test_run(fn, args): def test_run(fn, args):
@ -494,11 +494,11 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
def test_reroute_attr(self, attr): def test_reroute_attr(self, attr):
"""Test a function is rerouted. """Test a function is rerouted.
Test that attribute of internal optimizer is correctly rerouted to the Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer. internal optimizer.
Args: Args:
attr: attribute to test attr: attribute to test
""" """
loss = TestLoss(1, 1, 1) loss = TestLoss(1, 1, 1)
internal_optimizer = TestOptimizer() internal_optimizer = TestOptimizer()
@ -513,11 +513,11 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
def test_attribute_error(self, attr): def test_attribute_error(self, attr):
"""Test rerouting of attributes. """Test rerouting of attributes.
Test that attribute of internal optimizer is correctly rerouted to the Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer internal optimizer
Args: Args:
attr: attribute to test attr: attribute to test
""" """
loss = TestLoss(1, 1, 1) loss = TestLoss(1, 1, 1)
internal_optimizer = TestOptimizer() internal_optimizer = TestOptimizer()
@ -538,8 +538,8 @@ class SchedulerTest(keras_parameterized.TestCase):
def test_bad_call(self, err_msg): def test_bad_call(self, err_msg):
"""Test attribute of internal opt correctly rerouted to the internal opt. """Test attribute of internal opt correctly rerouted to the internal opt.
Args: Args:
err_msg: The expected error message from the scheduler bad call. err_msg: The expected error message from the scheduler bad call.
""" """
scheduler = opt.GammaBetaDecreasingStep() scheduler = opt.GammaBetaDecreasingStep()
with self.assertRaisesRegexp(Exception, err_msg): # pylint: disable=deprecated-method with self.assertRaisesRegexp(Exception, err_msg): # pylint: disable=deprecated-method
@ -558,12 +558,13 @@ class SchedulerTest(keras_parameterized.TestCase):
]) ])
def test_call(self, step, res): def test_call(self, step, res):
"""Test call. """Test call.
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer
Args: Test that attribute of internal optimizer is correctly rerouted to the
step: step number to 'GammaBetaDecreasingStep' 'Scheduler'. internal optimizer
res: expected result from call to 'GammaBetaDecreasingStep' 'Scheduler'.
Args:
step: step number to 'GammaBetaDecreasingStep' 'Scheduler'.
res: expected result from call to 'GammaBetaDecreasingStep' 'Scheduler'.
""" """
beta = _ops.convert_to_tensor_v2(2, dtype=tf.float32) beta = _ops.convert_to_tensor_v2(2, dtype=tf.float32)
gamma = _ops.convert_to_tensor_v2(1, dtype=tf.float32) gamma = _ops.convert_to_tensor_v2(1, dtype=tf.float32)