conflicts in opt test

This commit is contained in:
npapernot 2019-07-29 21:27:54 +00:00
commit d10d7b0148
6 changed files with 105 additions and 60 deletions

View file

@ -56,7 +56,7 @@ class StrongConvexMixin:
Args:
class_weight: the class weights as scalar or 1d tensor, where its
dimensionality is equal to the number of outputs.
dimensionality is equal to the number of outputs.
Returns:
Beta
@ -115,7 +115,7 @@ class StrongConvexHuber(losses.Loss, StrongConvexMixin):
C: Penalty parameter C of the loss term
radius_constant: constant defining the length of the radius
delta: delta value in huber loss. When to switch from quadratic to
absolute deviation.
absolute deviation.
reduction: reduction type to use. See super class
name: Name of the loss instance
dtype: tf datatype to use for tensor conversions.

View file

@ -76,17 +76,12 @@ class BoltonModel(Model): # pylint: disable=abstract-method
def compile(self,
optimizer,
loss,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
distribute=None,
kernel_initializer=tf.initializers.GlorotUniform,
**kwargs): # pylint: disable=arguments-differ
"""See super class. Default optimizer used in Bolton method is SGD.
Args:
<<<<<<< HEAD
optimizer:
loss:
metrics:
@ -96,6 +91,14 @@ class BoltonModel(Model): # pylint: disable=abstract-method
target_tensors:
distribute:
kernel_initializer:
=======
optimizer: The optimizer to use. This will be automatically wrapped
with the Bolton Optimizer.
loss: The loss function to use. Must be a StrongConvex loss (extend the
StrongConvexMixin).
kernel_initializer: The kernel initializer to use for the single layer.
kwargs: kwargs to keras Model.compile. See super.
>>>>>>> 71c4a11eb9ad66a78fb13428987366887ea20beb
"""
if not isinstance(loss, StrongConvexMixin):
raise ValueError('loss function must be a Strongly Convex and therefore '
@ -112,15 +115,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
optimizer = optimizers.get(optimizer)
optimizer = Bolton(optimizer, loss)
super(BoltonModel, self).compile(optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=sample_weight_mode,
weighted_metrics=weighted_metrics,
target_tensors=target_tensors,
distribute=distribute,
**kwargs)
super(BoltonModel, self).compile(optimizer, loss=loss, **kwargs)
def fit(self,
x=None,
@ -142,6 +137,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
4. Use a strongly convex loss function (see compile)
See super implementation for more details.
<<<<<<< HEAD
Args:
n_samples: the number of individual samples in x.
epsilon: privacy parameter, which trades off between utility an privacy.
@ -149,8 +145,17 @@ class BoltonModel(Model): # pylint: disable=abstract-method
noise_distribution: the distribution to pull noise from.
class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes.
=======
Args:
n_samples: the number of individual samples in x.
epsilon: privacy parameter, which trades off between utility an privacy.
See the bolton paper for more description.
noise_distribution: the distribution to pull noise from.
class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes.
>>>>>>> 71c4a11eb9ad66a78fb13428987366887ea20beb
See the super method for descriptions on the rest of the arguments.
See the super method for descriptions on the rest of the arguments.
"""
if class_weight is None:
class_weight_ = self.calculate_class_weights(class_weight)
@ -201,6 +206,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
This method is the same as fit except for when the passed dataset
is a generator. See super method and fit for more details.
<<<<<<< HEAD
Args:
generator:
@ -211,6 +217,18 @@ class BoltonModel(Model): # pylint: disable=abstract-method
Bolton paper for more description.
n_samples: number of individual samples in x
steps_per_epoch:
=======
Args:
n_samples: number of individual samples in x
noise_distribution: the distribution to get noise from.
epsilon: privacy parameter, which trades off utility and privacy. See
Bolton paper for more description.
class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes.
See the super method for descriptions on the rest of the arguments.
>>>>>>> 71c4a11eb9ad66a78fb13428987366887ea20beb
"""
if class_weight is None:
class_weight = self.calculate_class_weights(class_weight)
@ -244,6 +262,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
num_classes=None):
"""Calculates class weighting to be used in training.
<<<<<<< HEAD
Args:
class_weights: str specifying type, array giving weights, or None.
class_counts: If class_weights is not None, then an array of
@ -252,6 +271,16 @@ class BoltonModel(Model): # pylint: disable=abstract-method
classes.
Returns:
class_weights as 1D tensor, to be passed to model's fit method.
=======
Args:
class_weights: str specifying type, array giving weights, or None.
class_counts: If class_weights is not None, then an array of
the number of samples for each class
num_classes: If class_weights is not None, then the number of
classes.
Returns:
class_weights as 1D tensor, to be passed to model's fit method.
>>>>>>> 71c4a11eb9ad66a78fb13428987366887ea20beb
"""
# Value checking
class_keys = ['balanced']

View file

@ -175,12 +175,12 @@ class InitTests(keras_parameterized.TestCase):
},
])
def test_compile(self, n_outputs, loss, optimizer):
"""test compilation of BoltonModel.
"""Test compilation of BoltonModel.
Args:
n_outputs: number of output neurons
loss: instantiated TestLoss instance
optimizer: instanced TestOptimizer instance
optimizer: instantiated TestOptimizer instance
"""
# test compilation of valid tf.optimizer and tf.loss
with self.cached_session():
@ -206,8 +206,13 @@ class InitTests(keras_parameterized.TestCase):
Args:
n_outputs: number of output neurons
loss: instantiated TestLoss instance
<<<<<<< HEAD
optimizer: instanced TestOptimizer instance
"""
=======
optimizer: instantiated TestOptimizer instance
"""
>>>>>>> 71c4a11eb9ad66a78fb13428987366887ea20beb
# test compilaton of invalid tf.optimizer and non instantiated loss.
with self.cached_session():
with self.assertRaises((ValueError, AttributeError)):
@ -263,17 +268,17 @@ def _do_fit(n_samples,
"""Instantiate necessary components for fitting and perform a model fit.
Args:
n_samples: number of samples in dataset
input_dim: the sample dimensionality
n_outputs: number of output neurons
epsilon: privacy parameter
generator: True to create a generator, False to use an iterator
batch_size: batch_size to use
reset_n_samples: True to set _samples to None prior to fitting.
False does nothing
optimizer: instance of TestOptimizer
loss: instance of TestLoss
distribution: distribution to get noise from.
n_samples: number of samples in dataset
input_dim: the sample dimensionality
n_outputs: number of output neurons
epsilon: privacy parameter
generator: True to create a generator, False to use an iterator
batch_size: batch_size to use
reset_n_samples: True to set _samples to None prior to fitting.
False does nothing
optimizer: instance of TestOptimizer
loss: instance of TestLoss
distribution: distribution to get noise from.
Returns: BoltonModel instsance
"""
@ -330,8 +335,8 @@ class FitTests(keras_parameterized.TestCase):
"""Tests fitting of BoltonModel.
Args:
generator: True for generator test, False for iterator test.
reset_n_samples: True to reset the n_samples to None, False does nothing
generator: True for generator test, False for iterator test.
reset_n_samples: True to reset the n_samples to None, False does nothing
"""
loss = TestLoss(1, 1, 1)
optimizer = Bolton(TestOptimizer(), loss)
@ -399,10 +404,10 @@ class FitTests(keras_parameterized.TestCase):
"""Tests fitting with invalid parameters, which should raise an error.
Args:
generator: True to test with generator, False is iterator
reset_n_samples: True to reset the n_samples param to None prior to
passing it to fit
distribution: distribution to get noise from.
generator: True to test with generator, False is iterator
reset_n_samples: True to reset the n_samples param to None prior to
passing it to fit
distribution: distribution to get noise from.
"""
with self.assertRaises(ValueError):
loss = TestLoss(1, 1, 1)
@ -506,13 +511,13 @@ class FitTests(keras_parameterized.TestCase):
'num_classes': 2,
'err_msg': 'Detected array length:'},
])
def test_class_errors(self,
class_weights,
class_counts,
num_classes,
err_msg):
"""Tests the BOltonModel calculate_class_weights method.
<<<<<<< HEAD
This test passes invalid params which should raise the expected errors.
@ -522,6 +527,17 @@ class FitTests(keras_parameterized.TestCase):
num_classes: number of outputs neurons
err_msg:
"""
=======
This test passes invalid params which should raise the expected errors.
Args:
class_weights: the class_weights to use.
class_counts: count of number of samples for each class.
num_classes: number of outputs neurons.
err_msg: The expected error message.
"""
>>>>>>> 71c4a11eb9ad66a78fb13428987366887ea20beb
clf = models.BoltonModel(1, 1)
with self.assertRaisesRegexp(ValueError, err_msg): # pylint: disable=deprecated-method
clf.calculate_class_weights(class_weights,

View file

@ -310,12 +310,11 @@ class Bolton(optimizer_v2.OptimizerV2):
Args:
noise_distribution: the noise distribution to pick.
see _accepted_distributions and get_noise for
possible values.
see _accepted_distributions and get_noise for possible values.
epsilon: privacy parameter. Lower gives more privacy but less utility.
layers: list of Keras/Tensorflow layers. Can be found as model.layers
class_weights: class_weights used, which may either be a scalar or 1D
tensor with dim == n_classes.
tensor with dim == n_classes.
n_samples number of rows/individual samples in the training set
batch_size: batch size used.
"""

View file

@ -208,7 +208,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
args: args to optimizer fn
result: the expected result
test_attr: None if the fn returns the test result. Otherwise, this is
the attribute of Bolton to check against result with.
the attribute of Bolton to check against result with.
"""
tf.random.set_seed(1)
@ -263,11 +263,11 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
"""test that a fn of Bolton optimizer is working as expected.
Args:
r:
shape:
n_out:
init_value:
result:
r: Radius value for StrongConvex loss function.
shape: input_dimensionality
n_out: output dimensionality
init_value: the initial value for 'constant' kernel initializer
result: the expected output after projection.
"""
tf.random.set_seed(1)
@tf.function
@ -301,9 +301,9 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
"""Tests the context manager functionality of the optimizer.
Args:
noise: noise distribution to pick
epsilon: epsilon privacy parameter to use
class_weights: class_weights to use
noise: noise distribution to pick
epsilon: epsilon privacy parameter to use
class_weights: class_weights to use
"""
@tf.function
def test_run():
@ -334,9 +334,9 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
"""Tests the context domains.
Args:
noise: noise distribution to pick
epsilon: epsilon privacy parameter to use
err_msg: the expected error message
noise: noise distribution to pick
epsilon: epsilon privacy parameter to use
err_msg: the expected error message
"""
@ -454,7 +454,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
])
def test_not_reroute_fn(self, fn, args):
"""Test function is not rerouted.
Test that a fn that should not be rerouted to the internal optimizer is
in fact not rerouted.
@ -493,7 +493,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
])
def test_reroute_attr(self, attr):
"""Test a function is rerouted.
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer.
@ -512,7 +512,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
])
def test_attribute_error(self, attr):
"""Test rerouting of attributes.
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer
@ -539,7 +539,7 @@ class SchedulerTest(keras_parameterized.TestCase):
"""Test attribute of internal opt correctly rerouted to the internal opt.
Args:
err_msg:
err_msg: The expected error message from the scheduler bad call.
"""
scheduler = opt.GammaBetaDecreasingStep()
with self.assertRaisesRegexp(Exception, err_msg): # pylint: disable=deprecated-method
@ -558,13 +558,12 @@ class SchedulerTest(keras_parameterized.TestCase):
])
def test_call(self, step, res):
"""Test call.
Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer
Args:
step:
res:
step: step number to 'GammaBetaDecreasingStep' 'Scheduler'.
res: expected result from call to 'GammaBetaDecreasingStep' 'Scheduler'.
"""
beta = _ops.convert_to_tensor_v2(2, dtype=tf.float32)
gamma = _ops.convert_to_tensor_v2(1, dtype=tf.float32)

View file

@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tutorial for bolton module, the model and the optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=wrong-import-position
from privacy.bolton import losses # pylint: disable=wrong-import-position