conflicts in opt test

This commit is contained in:
npapernot 2019-07-29 21:27:54 +00:00
commit d10d7b0148
6 changed files with 105 additions and 60 deletions

View file

@ -76,17 +76,12 @@ class BoltonModel(Model): # pylint: disable=abstract-method
def compile(self, def compile(self,
optimizer, optimizer,
loss, loss,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
distribute=None,
kernel_initializer=tf.initializers.GlorotUniform, kernel_initializer=tf.initializers.GlorotUniform,
**kwargs): # pylint: disable=arguments-differ **kwargs): # pylint: disable=arguments-differ
"""See super class. Default optimizer used in Bolton method is SGD. """See super class. Default optimizer used in Bolton method is SGD.
Args: Args:
<<<<<<< HEAD
optimizer: optimizer:
loss: loss:
metrics: metrics:
@ -96,6 +91,14 @@ class BoltonModel(Model): # pylint: disable=abstract-method
target_tensors: target_tensors:
distribute: distribute:
kernel_initializer: kernel_initializer:
=======
optimizer: The optimizer to use. This will be automatically wrapped
with the Bolton Optimizer.
loss: The loss function to use. Must be a StrongConvex loss (extend the
StrongConvexMixin).
kernel_initializer: The kernel initializer to use for the single layer.
kwargs: kwargs to keras Model.compile. See super.
>>>>>>> 71c4a11eb9ad66a78fb13428987366887ea20beb
""" """
if not isinstance(loss, StrongConvexMixin): if not isinstance(loss, StrongConvexMixin):
raise ValueError('loss function must be a Strongly Convex and therefore ' raise ValueError('loss function must be a Strongly Convex and therefore '
@ -112,15 +115,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
optimizer = optimizers.get(optimizer) optimizer = optimizers.get(optimizer)
optimizer = Bolton(optimizer, loss) optimizer = Bolton(optimizer, loss)
super(BoltonModel, self).compile(optimizer, super(BoltonModel, self).compile(optimizer, loss=loss, **kwargs)
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=sample_weight_mode,
weighted_metrics=weighted_metrics,
target_tensors=target_tensors,
distribute=distribute,
**kwargs)
def fit(self, def fit(self,
x=None, x=None,
@ -142,6 +137,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
4. Use a strongly convex loss function (see compile) 4. Use a strongly convex loss function (see compile)
See super implementation for more details. See super implementation for more details.
<<<<<<< HEAD
Args: Args:
n_samples: the number of individual samples in x. n_samples: the number of individual samples in x.
epsilon: privacy parameter, which trades off between utility an privacy. epsilon: privacy parameter, which trades off between utility an privacy.
@ -149,6 +145,15 @@ class BoltonModel(Model): # pylint: disable=abstract-method
noise_distribution: the distribution to pull noise from. noise_distribution: the distribution to pull noise from.
class_weight: the class weights to be used. Can be a scalar or 1D tensor class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes. whose dim == n_classes.
=======
Args:
n_samples: the number of individual samples in x.
epsilon: privacy parameter, which trades off between utility an privacy.
See the bolton paper for more description.
noise_distribution: the distribution to pull noise from.
class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes.
>>>>>>> 71c4a11eb9ad66a78fb13428987366887ea20beb
See the super method for descriptions on the rest of the arguments. See the super method for descriptions on the rest of the arguments.
""" """
@ -201,6 +206,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
This method is the same as fit except for when the passed dataset This method is the same as fit except for when the passed dataset
is a generator. See super method and fit for more details. is a generator. See super method and fit for more details.
<<<<<<< HEAD
Args: Args:
generator: generator:
@ -211,6 +217,18 @@ class BoltonModel(Model): # pylint: disable=abstract-method
Bolton paper for more description. Bolton paper for more description.
n_samples: number of individual samples in x n_samples: number of individual samples in x
steps_per_epoch: steps_per_epoch:
=======
Args:
n_samples: number of individual samples in x
noise_distribution: the distribution to get noise from.
epsilon: privacy parameter, which trades off utility and privacy. See
Bolton paper for more description.
class_weight: the class weights to be used. Can be a scalar or 1D tensor
whose dim == n_classes.
See the super method for descriptions on the rest of the arguments.
>>>>>>> 71c4a11eb9ad66a78fb13428987366887ea20beb
""" """
if class_weight is None: if class_weight is None:
class_weight = self.calculate_class_weights(class_weight) class_weight = self.calculate_class_weights(class_weight)
@ -244,6 +262,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
num_classes=None): num_classes=None):
"""Calculates class weighting to be used in training. """Calculates class weighting to be used in training.
<<<<<<< HEAD
Args: Args:
class_weights: str specifying type, array giving weights, or None. class_weights: str specifying type, array giving weights, or None.
class_counts: If class_weights is not None, then an array of class_counts: If class_weights is not None, then an array of
@ -252,6 +271,16 @@ class BoltonModel(Model): # pylint: disable=abstract-method
classes. classes.
Returns: Returns:
class_weights as 1D tensor, to be passed to model's fit method. class_weights as 1D tensor, to be passed to model's fit method.
=======
Args:
class_weights: str specifying type, array giving weights, or None.
class_counts: If class_weights is not None, then an array of
the number of samples for each class
num_classes: If class_weights is not None, then the number of
classes.
Returns:
class_weights as 1D tensor, to be passed to model's fit method.
>>>>>>> 71c4a11eb9ad66a78fb13428987366887ea20beb
""" """
# Value checking # Value checking
class_keys = ['balanced'] class_keys = ['balanced']

View file

@ -175,12 +175,12 @@ class InitTests(keras_parameterized.TestCase):
}, },
]) ])
def test_compile(self, n_outputs, loss, optimizer): def test_compile(self, n_outputs, loss, optimizer):
"""test compilation of BoltonModel. """Test compilation of BoltonModel.
Args: Args:
n_outputs: number of output neurons n_outputs: number of output neurons
loss: instantiated TestLoss instance loss: instantiated TestLoss instance
optimizer: instanced TestOptimizer instance optimizer: instantiated TestOptimizer instance
""" """
# test compilation of valid tf.optimizer and tf.loss # test compilation of valid tf.optimizer and tf.loss
with self.cached_session(): with self.cached_session():
@ -206,8 +206,13 @@ class InitTests(keras_parameterized.TestCase):
Args: Args:
n_outputs: number of output neurons n_outputs: number of output neurons
loss: instantiated TestLoss instance loss: instantiated TestLoss instance
<<<<<<< HEAD
optimizer: instanced TestOptimizer instance optimizer: instanced TestOptimizer instance
""" """
=======
optimizer: instantiated TestOptimizer instance
"""
>>>>>>> 71c4a11eb9ad66a78fb13428987366887ea20beb
# test compilaton of invalid tf.optimizer and non instantiated loss. # test compilaton of invalid tf.optimizer and non instantiated loss.
with self.cached_session(): with self.cached_session():
with self.assertRaises((ValueError, AttributeError)): with self.assertRaises((ValueError, AttributeError)):
@ -506,13 +511,13 @@ class FitTests(keras_parameterized.TestCase):
'num_classes': 2, 'num_classes': 2,
'err_msg': 'Detected array length:'}, 'err_msg': 'Detected array length:'},
]) ])
def test_class_errors(self, def test_class_errors(self,
class_weights, class_weights,
class_counts, class_counts,
num_classes, num_classes,
err_msg): err_msg):
"""Tests the BOltonModel calculate_class_weights method. """Tests the BOltonModel calculate_class_weights method.
<<<<<<< HEAD
This test passes invalid params which should raise the expected errors. This test passes invalid params which should raise the expected errors.
@ -522,6 +527,17 @@ class FitTests(keras_parameterized.TestCase):
num_classes: number of outputs neurons num_classes: number of outputs neurons
err_msg: err_msg:
""" """
=======
This test passes invalid params which should raise the expected errors.
Args:
class_weights: the class_weights to use.
class_counts: count of number of samples for each class.
num_classes: number of outputs neurons.
err_msg: The expected error message.
"""
>>>>>>> 71c4a11eb9ad66a78fb13428987366887ea20beb
clf = models.BoltonModel(1, 1) clf = models.BoltonModel(1, 1)
with self.assertRaisesRegexp(ValueError, err_msg): # pylint: disable=deprecated-method with self.assertRaisesRegexp(ValueError, err_msg): # pylint: disable=deprecated-method
clf.calculate_class_weights(class_weights, clf.calculate_class_weights(class_weights,

View file

@ -310,8 +310,7 @@ class Bolton(optimizer_v2.OptimizerV2):
Args: Args:
noise_distribution: the noise distribution to pick. noise_distribution: the noise distribution to pick.
see _accepted_distributions and get_noise for see _accepted_distributions and get_noise for possible values.
possible values.
epsilon: privacy parameter. Lower gives more privacy but less utility. epsilon: privacy parameter. Lower gives more privacy but less utility.
layers: list of Keras/Tensorflow layers. Can be found as model.layers layers: list of Keras/Tensorflow layers. Can be found as model.layers
class_weights: class_weights used, which may either be a scalar or 1D class_weights: class_weights used, which may either be a scalar or 1D

View file

@ -263,11 +263,11 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
"""test that a fn of Bolton optimizer is working as expected. """test that a fn of Bolton optimizer is working as expected.
Args: Args:
r: r: Radius value for StrongConvex loss function.
shape: shape: input_dimensionality
n_out: n_out: output dimensionality
init_value: init_value: the initial value for 'constant' kernel initializer
result: result: the expected output after projection.
""" """
tf.random.set_seed(1) tf.random.set_seed(1)
@tf.function @tf.function
@ -539,7 +539,7 @@ class SchedulerTest(keras_parameterized.TestCase):
"""Test attribute of internal opt correctly rerouted to the internal opt. """Test attribute of internal opt correctly rerouted to the internal opt.
Args: Args:
err_msg: err_msg: The expected error message from the scheduler bad call.
""" """
scheduler = opt.GammaBetaDecreasingStep() scheduler = opt.GammaBetaDecreasingStep()
with self.assertRaisesRegexp(Exception, err_msg): # pylint: disable=deprecated-method with self.assertRaisesRegexp(Exception, err_msg): # pylint: disable=deprecated-method
@ -558,13 +558,12 @@ class SchedulerTest(keras_parameterized.TestCase):
]) ])
def test_call(self, step, res): def test_call(self, step, res):
"""Test call. """Test call.
Test that attribute of internal optimizer is correctly rerouted to the Test that attribute of internal optimizer is correctly rerouted to the
internal optimizer internal optimizer
Args: Args:
step: step: step number to 'GammaBetaDecreasingStep' 'Scheduler'.
res: res: expected result from call to 'GammaBetaDecreasingStep' 'Scheduler'.
""" """
beta = _ops.convert_to_tensor_v2(2, dtype=tf.float32) beta = _ops.convert_to_tensor_v2(2, dtype=tf.float32)
gamma = _ops.convert_to_tensor_v2(1, dtype=tf.float32) gamma = _ops.convert_to_tensor_v2(1, dtype=tf.float32)

View file

@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Tutorial for bolton module, the model and the optimizer.""" """Tutorial for bolton module, the model and the optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function from __future__ import print_function
import tensorflow as tf # pylint: disable=wrong-import-position import tensorflow as tf # pylint: disable=wrong-import-position
from privacy.bolton import losses # pylint: disable=wrong-import-position from privacy.bolton import losses # pylint: disable=wrong-import-position