more fixes

This commit is contained in:
npapernot 2019-07-25 16:13:32 +00:00
parent 8e6bcf9b4a
commit 8974a95b9a
8 changed files with 114 additions and 107 deletions

View file

@ -16,7 +16,7 @@ import sys
from distutils.version import LooseVersion from distutils.version import LooseVersion
import tensorflow as tf import tensorflow as tf
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'): if LooseVersion(tf.__version__) < LooseVersion("2.0.0"):
raise ImportError("Please upgrade your version " raise ImportError("Please upgrade your version "
"of tensorflow from: {0} to at least 2.0.0 to " "of tensorflow from: {0} to at least 2.0.0 to "
"use privacy/bolton".format(LooseVersion(tf.__version__))) "use privacy/bolton".format(LooseVersion(tf.__version__)))

View file

@ -102,11 +102,11 @@ class StrongConvexHuber(losses.Loss, StrongConvexMixin):
"""Strong Convex version of Huber loss using l2 weight regularization.""" """Strong Convex version of Huber loss using l2 weight regularization."""
def __init__(self, def __init__(self,
reg_lambda: float, reg_lambda,
C: float, C,
radius_constant: float, radius_constant,
delta: float, delta,
reduction: str = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE, reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
dtype=tf.float32): dtype=tf.float32):
"""Constructor. """Constructor.

View file

@ -261,8 +261,9 @@ class HuberTests(keras_parameterized.TestCase):
Args: Args:
reg_lambda: initialization value for reg_lambda arg reg_lambda: initialization value for reg_lambda arg
C: initialization value for C arg c: initialization value for C arg
radius_constant: initialization value for radius_constant arg radius_constant: initialization value for radius_constant arg
delta: the delta parameter for the huber loss
""" """
# test valid domains for each variable # test valid domains for each variable
loss = StrongConvexHuber(reg_lambda, c, radius_constant, delta) loss = StrongConvexHuber(reg_lambda, c, radius_constant, delta)
@ -295,11 +296,11 @@ class HuberTests(keras_parameterized.TestCase):
}, },
]) ])
def test_bad_init_params(self, reg_lambda, c, radius_constant, delta): def test_bad_init_params(self, reg_lambda, c, radius_constant, delta):
"""Test invalid domain for given params. Should return ValueError """Test invalid domain for given params. Should return ValueError.
Args: Args:
reg_lambda: initialization value for reg_lambda arg reg_lambda: initialization value for reg_lambda arg
C: initialization value for C arg c: initialization value for C arg
radius_constant: initialization value for radius_constant arg radius_constant: initialization value for radius_constant arg
delta: the delta parameter for the huber loss delta: the delta parameter for the huber loss
""" """
@ -406,7 +407,7 @@ class HuberTests(keras_parameterized.TestCase):
}, },
]) ])
def test_fns(self, init_args, fn, args, result): def test_fns(self, init_args, fn, args, result):
"""Test that fn of BinaryCrossentropy loss returns the correct result """Test that fn of BinaryCrossentropy loss returns the correct result.
Args: Args:
init_args: init values for loss instance init_args: init values for loss instance

View file

@ -86,10 +86,12 @@ class BoltonModel(Model): # pylint: disable=abstract-method
**kwargs): # pylint: disable=arguments-differ **kwargs): # pylint: disable=arguments-differ
"""See super class. Default optimizer used in Bolton method is SGD. """See super class. Default optimizer used in Bolton method is SGD.
Missing args.
""" """
if not isinstance(loss, StrongConvexMixin): if not isinstance(loss, StrongConvexMixin):
raise ValueError("loss function must be a Strongly Convex and therefore " raise ValueError('loss function must be a Strongly Convex and therefore '
"extend the StrongConvexMixin.") 'extend the StrongConvexMixin.')
if not self._layers_instantiated: # compile may be called multiple times if not self._layers_instantiated: # compile may be called multiple times
# for instance, if the input/outputs are not defined until fit. # for instance, if the input/outputs are not defined until fit.
self.output_layer = tf.keras.layers.Dense( self.output_layer = tf.keras.layers.Dense(
@ -150,7 +152,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
data_size = n_samples data_size = n_samples
elif hasattr(x, 'shape'): elif hasattr(x, 'shape'):
data_size = x.shape[0] data_size = x.shape[0]
elif hasattr(x, "__len__"): elif hasattr(x, '__len__'):
data_size = len(x) data_size = len(x)
else: else:
data_size = None data_size = None
@ -187,9 +189,11 @@ class BoltonModel(Model): # pylint: disable=abstract-method
n_samples=None, n_samples=None,
steps_per_epoch=None, steps_per_epoch=None,
**kwargs): # pylint: disable=arguments-differ **kwargs): # pylint: disable=arguments-differ
""" """Fit with a generator..
This method is the same as fit except for when the passed dataset This method is the same as fit except for when the passed dataset
is a generator. See super method and fit for more details. is a generator. See super method and fit for more details.
Args: Args:
n_samples: number of individual samples in x n_samples: number of individual samples in x
noise_distribution: the distribution to get noise from. noise_distribution: the distribution to get noise from.
@ -206,7 +210,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
data_size = n_samples data_size = n_samples
elif hasattr(generator, 'shape'): elif hasattr(generator, 'shape'):
data_size = generator.shape[0] data_size = generator.shape[0]
elif hasattr(generator, "__len__"): elif hasattr(generator, '__len__'):
data_size = len(generator) data_size = len(generator)
else: else:
data_size = None data_size = None
@ -238,7 +242,8 @@ class BoltonModel(Model): # pylint: disable=abstract-method
the number of samples for each class the number of samples for each class
num_classes: If class_weights is not None, then the number of num_classes: If class_weights is not None, then the number of
classes. classes.
Returns: class_weights as 1D tensor, to be passed to model's fit method. Returns:
class_weights as 1D tensor, to be passed to model's fit method.
""" """
# Value checking # Value checking
class_keys = ['balanced'] class_keys = ['balanced']
@ -246,14 +251,14 @@ class BoltonModel(Model): # pylint: disable=abstract-method
if isinstance(class_weights, str): if isinstance(class_weights, str):
is_string = True is_string = True
if class_weights not in class_keys: if class_weights not in class_keys:
raise ValueError("Detected string class_weights with " raise ValueError('Detected string class_weights with '
"value: {0}, which is not one of {1}." 'value: {0}, which is not one of {1}.'
"Please select a valid class_weight type" 'Please select a valid class_weight type'
"or pass an array".format(class_weights, 'or pass an array'.format(class_weights,
class_keys)) class_keys))
if class_counts is None: if class_counts is None:
raise ValueError("Class counts must be provided if using " raise ValueError('Class counts must be provided if using '
"class_weights=%s" % class_weights) 'class_weights=%s' % class_weights)
class_counts_shape = tf.Variable(class_counts, class_counts_shape = tf.Variable(class_counts,
trainable=False, trainable=False,
dtype=self._dtype).shape dtype=self._dtype).shape
@ -261,12 +266,12 @@ class BoltonModel(Model): # pylint: disable=abstract-method
raise ValueError('class counts must be a 1D array.' raise ValueError('class counts must be a 1D array.'
'Detected: {0}'.format(class_counts_shape)) 'Detected: {0}'.format(class_counts_shape))
if num_classes is None: if num_classes is None:
raise ValueError("num_classes must be provided if using " raise ValueError('num_classes must be provided if using '
"class_weights=%s" % class_weights) 'class_weights=%s' % class_weights)
elif class_weights is not None: elif class_weights is not None:
if num_classes is None: if num_classes is None:
raise ValueError("You must pass a value for num_classes if " raise ValueError('You must pass a value for num_classes if '
"creating an array of class_weights") 'creating an array of class_weights')
# performing class weight calculation # performing class weight calculation
if class_weights is None: if class_weights is None:
class_weights = 1 class_weights = 1
@ -280,11 +285,11 @@ class BoltonModel(Model): # pylint: disable=abstract-method
else: else:
class_weights = _ops.convert_to_tensor_v2(class_weights) class_weights = _ops.convert_to_tensor_v2(class_weights)
if len(class_weights.shape) != 1: if len(class_weights.shape) != 1:
raise ValueError("Detected class_weights shape: {0} instead of " raise ValueError('Detected class_weights shape: {0} instead of '
"1D array".format(class_weights.shape)) '1D array'.format(class_weights.shape))
if class_weights.shape[0] != num_classes: if class_weights.shape[0] != num_classes:
raise ValueError( raise ValueError(
"Detected array length: {0} instead of: {1}".format( 'Detected array length: {0} instead of: {1}'.format(
class_weights.shape[0], class_weights.shape[0],
num_classes)) num_classes))
return class_weights return class_weights

View file

@ -17,17 +17,16 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.optimizer_v2.optimizer_v2 import OptimizerV2
from tensorflow.python.keras import losses
from tensorflow.python.framework import ops as _ops
from tensorflow.python.keras.regularizers import L1L2
from absl.testing import parameterized from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.framework import ops as _ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import losses
from tensorflow.python.keras.optimizer_v2.optimizer_v2 import OptimizerV2
from tensorflow.python.keras.regularizers import L1L2
from privacy.bolton import models from privacy.bolton import models
from privacy.bolton.optimizers import Bolton
from privacy.bolton.losses import StrongConvexMixin from privacy.bolton.losses import StrongConvexMixin
from privacy.bolton.optimizers import Bolton
class TestLoss(losses.Loss, StrongConvexMixin): class TestLoss(losses.Loss, StrongConvexMixin):
@ -41,9 +40,11 @@ class TestLoss(losses.Loss, StrongConvexMixin):
def radius(self): def radius(self):
"""Radius, R, of the hypothesis space W. """Radius, R, of the hypothesis space W.
W is a convex set that forms the hypothesis space. W is a convex set that forms the hypothesis space.
Returns: radius Returns:
radius
""" """
return _ops.convert_to_tensor_v2(1, dtype=tf.float32) return _ops.convert_to_tensor_v2(1, dtype=tf.float32)
@ -69,7 +70,8 @@ class TestLoss(losses.Loss, StrongConvexMixin):
Args: Args:
class_weight: class weights used class_weight: class weights used
Returns: L Returns:
L
""" """
return _ops.convert_to_tensor_v2(1, dtype=tf.float32) return _ops.convert_to_tensor_v2(1, dtype=tf.float32)
@ -81,11 +83,10 @@ class TestLoss(losses.Loss, StrongConvexMixin):
) )
def max_class_weight(self, class_weight): def max_class_weight(self, class_weight):
"""the maximum weighting in class weights (max value) as a scalar tensor """the maximum weighting in class weights (max value) as a scalar tensor.
Args: Args:
class_weight: class weights used class_weight: class weights used
dtype: the data type for tensor conversions.
Returns: Returns:
maximum class weighting as tensor scalar maximum class weighting as tensor scalar
@ -104,7 +105,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
class TestOptimizer(OptimizerV2): class TestOptimizer(OptimizerV2):
"""Test optimizer used for testing Bolton model""" """Test optimizer used for testing Bolton model."""
def __init__(self): def __init__(self):
super(TestOptimizer, self).__init__('test') super(TestOptimizer, self).__init__('test')
@ -152,7 +153,7 @@ class InitTests(keras_parameterized.TestCase):
}, },
]) ])
def test_bad_init_params(self, n_outputs): def test_bad_init_params(self, n_outputs):
"""test bad initializations of BoltonModel that should raise errors """test bad initializations of BoltonModel that should raise errors.
Args: Args:
n_outputs: number of output neurons n_outputs: number of output neurons
@ -174,7 +175,7 @@ class InitTests(keras_parameterized.TestCase):
}, },
]) ])
def test_compile(self, n_outputs, loss, optimizer): def test_compile(self, n_outputs, loss, optimizer):
"""test compilation of BoltonModel """test compilation of BoltonModel.
Args: Args:
n_outputs: number of output neurons n_outputs: number of output neurons
@ -200,7 +201,7 @@ class InitTests(keras_parameterized.TestCase):
} }
]) ])
def test_bad_compile(self, n_outputs, loss, optimizer): def test_bad_compile(self, n_outputs, loss, optimizer):
"""test bad compilations of BoltonModel that should raise errors """test bad compilations of BoltonModel that should raise errors.
Args: Args:
n_outputs: number of output neurons n_outputs: number of output neurons
@ -215,7 +216,8 @@ class InitTests(keras_parameterized.TestCase):
def _cat_dataset(n_samples, input_dim, n_classes, generator=False): def _cat_dataset(n_samples, input_dim, n_classes, generator=False):
""" """Creates a categorically encoded dataset.
Creates a categorically encoded dataset (y is categorical). Creates a categorically encoded dataset (y is categorical).
returns the specified dataset either as a static array or as a generator. returns the specified dataset either as a static array or as a generator.
Will have evenly split samples across each output class. Will have evenly split samples across each output class.
@ -246,6 +248,7 @@ def _cat_dataset(n_samples, input_dim, n_classes, generator=False):
return dataset return dataset
return x_set, y_set return x_set, y_set
def _do_fit(n_samples, def _do_fit(n_samples,
input_dim, input_dim,
n_outputs, n_outputs,
@ -301,7 +304,7 @@ def _do_fit(n_samples,
class FitTests(keras_parameterized.TestCase): class FitTests(keras_parameterized.TestCase):
"""Test cases for keras model fitting""" """Test cases for keras model fitting."""
# @test_util.run_all_in_graph_and_eager_modes # @test_util.run_all_in_graph_and_eager_modes
@parameterized.named_parameters([ @parameterized.named_parameters([
@ -323,7 +326,7 @@ class FitTests(keras_parameterized.TestCase):
}, },
]) ])
def test_fit(self, generator, reset_n_samples): def test_fit(self, generator, reset_n_samples):
"""Tests fitting of BoltonModel """Tests fitting of BoltonModel.
Args: Args:
generator: True for generator test, False for iterator test. generator: True for generator test, False for iterator test.
@ -355,7 +358,7 @@ class FitTests(keras_parameterized.TestCase):
}, },
]) ])
def test_fit_gen(self, generator): def test_fit_gen(self, generator):
"""Tests the fit_generator method of BoltonModel """Tests the fit_generator method of BoltonModel.
Args: Args:
generator: True to test with a generator dataset generator: True to test with a generator dataset
@ -392,7 +395,7 @@ class FitTests(keras_parameterized.TestCase):
}, },
]) ])
def test_bad_fit(self, generator, reset_n_samples, distribution): def test_bad_fit(self, generator, reset_n_samples, distribution):
"""Tests fitting with invalid parameters, which should raise an error """Tests fitting with invalid parameters, which should raise an error.
Args: Args:
generator: True to test with generator, False is iterator generator: True to test with generator, False is iterator
@ -442,9 +445,8 @@ class FitTests(keras_parameterized.TestCase):
class_weights, class_weights,
class_counts, class_counts,
num_classes, num_classes,
result result):
): """Tests the BOltonModel calculate_class_weights method.
"""Tests the BOltonModel calculate_class_weights method
Args: Args:
class_weights: the class_weights to use class_weights: the class_weights to use
@ -496,26 +498,28 @@ class FitTests(keras_parameterized.TestCase):
'class_weights': [[1], [1]], 'class_weights': [[1], [1]],
'class_counts': None, 'class_counts': None,
'num_classes': 2, 'num_classes': 2,
'err_msg': "Detected class_weights shape"}, 'err_msg': 'Detected class_weights shape'},
{'testcase_name': 'class counts array, wrong number classes', {'testcase_name': 'class counts array, wrong number classes',
'class_weights': [1, 1, 1], 'class_weights': [1, 1, 1],
'class_counts': None, 'class_counts': None,
'num_classes': 2, 'num_classes': 2,
'err_msg': 'Detected array length:'}, 'err_msg': 'Detected array length:'},
]) ])
def test_class_errors(self, def test_class_errors(self,
class_weights, class_weights,
class_counts, class_counts,
num_classes, num_classes,
err_msg): err_msg):
"""Tests the BOltonModel calculate_class_weights method with invalid params """Tests the BOltonModel calculate_class_weights method.
which should raise the expected errors.
This test passes invalid params which should raise the expected errors.
Args: Args:
class_weights: the class_weights to use class_weights: the class_weights to use
class_counts: count of number of samples for each class class_counts: count of number of samples for each class
num_classes: number of outputs neurons num_classes: number of outputs neurons
result: expected result err_msg:
""" """
clf = models.BoltonModel(1, 1) clf = models.BoltonModel(1, 1)
with self.assertRaisesRegexp(ValueError, err_msg): # pylint: disable=deprecated-method with self.assertRaisesRegexp(ValueError, err_msg): # pylint: disable=deprecated-method

View file

@ -108,8 +108,8 @@ class Bolton(optimizer_v2.OptimizerV2):
Descent-based Analytics by Xi Wu et. al. Descent-based Analytics by Xi Wu et. al.
""" """
def __init__(self, # pylint: disable=super-init-not-called def __init__(self, # pylint: disable=super-init-not-called
optimizer: optimizer_v2.OptimizerV2, optimizer,
loss: StrongConvexMixin, loss,
dtype=tf.float32, dtype=tf.float32,
): ):
"""Constructor. """Constructor.

View file

@ -263,12 +263,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
def test_project(self, r, shape, n_out, init_value, result): def test_project(self, r, shape, n_out, init_value, result):
"""test that a fn of Bolton optimizer is working as expected. """test that a fn of Bolton optimizer is working as expected.
Args: Missing args:
fn: method of Optimizer to test
args: args to optimizer fn
result: the expected result
test_attr: None if the fn returns the test result. Otherwise, this is
the attribute of Bolton to check against result with.
""" """
tf.random.set_seed(1) tf.random.set_seed(1)
@ -455,8 +450,10 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
'args': [1, 1]}, 'args': [1, 1]},
]) ])
def test_not_reroute_fn(self, fn, args): def test_not_reroute_fn(self, fn, args):
"""Test that a fn that should not be rerouted to the internal optimizer is """Test function is not rerouted.
in face not rerouted.
Test that a fn that should not be rerouted to the internal optimizer is
in fact not rerouted.
Args: Args:
fn: fn to test fn: fn to test
@ -492,12 +489,13 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
'attr': '_iterations'} 'attr': '_iterations'}
]) ])
def test_reroute_attr(self, attr): def test_reroute_attr(self, attr):
""" test that attribute of internal optimizer is correctly rerouted to """Test a function is rerouted.
the internal optimizer
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer.
Args: Args:
attr: attribute to test attr: attribute to test
result: result after checking attribute
""" """
loss = TestLoss(1, 1, 1) loss = TestLoss(1, 1, 1)
internal_optimizer = TestOptimizer() internal_optimizer = TestOptimizer()
@ -510,12 +508,13 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
'attr': '_not_valid'} 'attr': '_not_valid'}
]) ])
def test_attribute_error(self, attr): def test_attribute_error(self, attr):
"""Test that attribute of internal optimizer is correctly rerouted to """Test rerouting of attributes.
the internal optimizer
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer
Args: Args:
attr: attribute to test attr: attribute to test
result: result after checking attribute
""" """
loss = TestLoss(1, 1, 1) loss = TestLoss(1, 1, 1)
internal_optimizer = TestOptimizer() internal_optimizer = TestOptimizer()
@ -537,9 +536,7 @@ class SchedulerTest(keras_parameterized.TestCase):
""" test that attribute of internal optimizer is correctly rerouted to """ test that attribute of internal optimizer is correctly rerouted to
the internal optimizer the internal optimizer
Args: Missing args
attr: attribute to test
result: result after checking attribute
""" """
scheduler = opt.GammaBetaDecreasingStep() scheduler = opt.GammaBetaDecreasingStep()
with self.assertRaisesRegexp(Exception, err_msg): # pylint: disable=deprecated-method with self.assertRaisesRegexp(Exception, err_msg): # pylint: disable=deprecated-method
@ -557,12 +554,12 @@ class SchedulerTest(keras_parameterized.TestCase):
'res': 0.333333333}, 'res': 0.333333333},
]) ])
def test_call(self, step, res): def test_call(self, step, res):
""" test that attribute of internal optimizer is correctly rerouted to """Test call.
the internal optimizer
Args: Test that attribute of internal optimizer is correctly rerouted to the
attr: attribute to test internal optimizer
result: result after checking attribute
Missing Args:
""" """
beta = _ops.convert_to_tensor_v2(2, dtype=tf.float32) beta = _ops.convert_to_tensor_v2(2, dtype=tf.float32)
gamma = _ops.convert_to_tensor_v2(1, dtype=tf.float32) gamma = _ops.convert_to_tensor_v2(1, dtype=tf.float32)

View file

@ -116,7 +116,7 @@ try:
noise_distribution=noise_distribution, noise_distribution=noise_distribution,
verbose=0) verbose=0)
except ValueError as e: except ValueError as e:
print(e) print e
# ------- # -------
# And now, re running with the parameter set. # And now, re running with the parameter set.
# ------- # -------