forked from 626_privacy/tensorflow_privacy
format fixes
This commit is contained in:
parent
0317ce8077
commit
19ce36777d
6 changed files with 76 additions and 61 deletions
|
@ -212,12 +212,12 @@ class StrongConvexBinaryCrossentropy(
|
|||
"""Strongly Convex BinaryCrossentropy loss using l2 weight regularization."""
|
||||
|
||||
def __init__(self,
|
||||
reg_lambda: float,
|
||||
C: float,
|
||||
radius_constant: float,
|
||||
from_logits: bool = True,
|
||||
label_smoothing: float = 0,
|
||||
reduction: str = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
|
||||
reg_lambda,
|
||||
C,
|
||||
radius_constant,
|
||||
from_logits=True,
|
||||
label_smoothing=0,
|
||||
reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
|
||||
dtype=tf.float32):
|
||||
"""
|
||||
Args:
|
||||
|
|
|
@ -367,11 +367,12 @@ class HuberTests(keras_parameterized.TestCase):
|
|||
},
|
||||
])
|
||||
def test_calculation(self, logits, y_true, delta, result):
|
||||
"""Test the call method to ensure it returns the correct value
|
||||
"""Test the call method to ensure it returns the correct value.
|
||||
|
||||
Args:
|
||||
logits: unscaled output of model
|
||||
y_true: label
|
||||
delta:
|
||||
result: correct loss calculation value
|
||||
"""
|
||||
logits = tf.Variable(logits, False, dtype=tf.float32)
|
||||
|
|
|
@ -86,8 +86,16 @@ class BoltonModel(Model): # pylint: disable=abstract-method
|
|||
**kwargs): # pylint: disable=arguments-differ
|
||||
"""See super class. Default optimizer used in Bolton method is SGD.
|
||||
|
||||
Missing args.
|
||||
|
||||
Args:
|
||||
optimizer:
|
||||
loss:
|
||||
metrics:
|
||||
loss_weights:
|
||||
sample_weight_mode:
|
||||
weighted_metrics:
|
||||
target_tensors:
|
||||
distribute:
|
||||
kernel_initializer:
|
||||
"""
|
||||
if not isinstance(loss, StrongConvexMixin):
|
||||
raise ValueError('loss function must be a Strongly Convex and therefore '
|
||||
|
@ -189,20 +197,20 @@ class BoltonModel(Model): # pylint: disable=abstract-method
|
|||
n_samples=None,
|
||||
steps_per_epoch=None,
|
||||
**kwargs): # pylint: disable=arguments-differ
|
||||
"""Fit with a generator..
|
||||
"""Fit with a generator.
|
||||
|
||||
This method is the same as fit except for when the passed dataset
|
||||
is a generator. See super method and fit for more details.
|
||||
|
||||
Args:
|
||||
n_samples: number of individual samples in x
|
||||
generator:
|
||||
class_weight: the class weights to be used. Can be a scalar or 1D tensor
|
||||
whose dim == n_classes.
|
||||
noise_distribution: the distribution to get noise from.
|
||||
epsilon: privacy parameter, which trades off utility and privacy. See
|
||||
Bolton paper for more description.
|
||||
class_weight: the class weights to be used. Can be a scalar or 1D tensor
|
||||
whose dim == n_classes.
|
||||
|
||||
See the super method for descriptions on the rest of the arguments.
|
||||
n_samples: number of individual samples in x
|
||||
steps_per_epoch:
|
||||
"""
|
||||
if class_weight is None:
|
||||
class_weight = self.calculate_class_weights(class_weight)
|
||||
|
|
|
@ -32,10 +32,10 @@ from privacy.bolton.optimizers import Bolton
|
|||
class TestLoss(losses.Loss, StrongConvexMixin):
|
||||
"""Test loss function for testing Bolton model."""
|
||||
|
||||
def __init__(self, reg_lambda, C, radius_constant, name='test'):
|
||||
def __init__(self, reg_lambda, C_arg, radius_constant, name='test'):
|
||||
super(TestLoss, self).__init__(name=name)
|
||||
self.reg_lambda = reg_lambda
|
||||
self.C = C # pylint: disable=invalid-name
|
||||
self.C = C_arg # pylint: disable=invalid-name
|
||||
self.radius_constant = radius_constant
|
||||
|
||||
def radius(self):
|
||||
|
@ -228,6 +228,7 @@ def _cat_dataset(n_samples, input_dim, n_classes, generator=False):
|
|||
input_dim: input dimensionality
|
||||
n_classes: output dimensionality
|
||||
generator: False for array, True for generator
|
||||
|
||||
Returns:
|
||||
X as (n_samples, input_dim), Y as (n_samples, n_outputs)
|
||||
"""
|
||||
|
|
|
@ -298,9 +298,9 @@ class Bolton(optimizer_v2.OptimizerV2):
|
|||
return self
|
||||
|
||||
def __call__(self,
|
||||
noise_distribution: str,
|
||||
epsilon: float,
|
||||
layers: list,
|
||||
noise_distribution,
|
||||
epsilon,
|
||||
layers,
|
||||
class_weights,
|
||||
n_samples,
|
||||
batch_size
|
||||
|
|
|
@ -51,11 +51,8 @@ class TestModel(Model): # pylint: disable=abstract-method
|
|||
|
||||
Args:
|
||||
n_outputs: number of output neurons
|
||||
epsilon: level of privacy guarantee
|
||||
noise_distribution: distribution to pull weight perturbations from
|
||||
weights_initializer: initializer for weights
|
||||
seed: random seed to use
|
||||
dtype: data type to use for tensors
|
||||
input_shape:
|
||||
init_value:
|
||||
"""
|
||||
super(TestModel, self).__init__(name='bolton', dynamic=False)
|
||||
self.n_outputs = n_outputs
|
||||
|
@ -71,10 +68,10 @@ class TestModel(Model): # pylint: disable=abstract-method
|
|||
class TestLoss(losses.Loss, StrongConvexMixin):
|
||||
"""Test loss function for testing Bolton model."""
|
||||
|
||||
def __init__(self, reg_lambda, C, radius_constant, name='test'):
|
||||
def __init__(self, reg_lambda, C_arg, radius_constant, name='test'):
|
||||
super(TestLoss, self).__init__(name=name)
|
||||
self.reg_lambda = reg_lambda
|
||||
self.C = C # pylint: disable=invalid-name
|
||||
self.C = C_arg # pylint: disable=invalid-name
|
||||
self.radius_constant = radius_constant
|
||||
|
||||
def radius(self):
|
||||
|
@ -82,7 +79,8 @@ class TestLoss(losses.Loss, StrongConvexMixin):
|
|||
|
||||
W is a convex set that forms the hypothesis space.
|
||||
|
||||
Returns: radius
|
||||
Returns:
|
||||
radius
|
||||
"""
|
||||
return _ops.convert_to_tensor_v2(self.radius_constant, dtype=tf.float32)
|
||||
|
||||
|
@ -108,7 +106,8 @@ class TestLoss(losses.Loss, StrongConvexMixin):
|
|||
Args:
|
||||
class_weight: class weights used
|
||||
|
||||
Returns: L
|
||||
Returns:
|
||||
L
|
||||
"""
|
||||
return _ops.convert_to_tensor_v2(1, dtype=tf.float32)
|
||||
|
||||
|
@ -143,7 +142,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
|
|||
|
||||
|
||||
class TestOptimizer(OptimizerV2):
|
||||
"""Optimizer used for testing the Bolton optimizer"""
|
||||
"""Optimizer used for testing the Bolton optimizer."""
|
||||
|
||||
def __init__(self):
|
||||
super(TestOptimizer, self).__init__('test')
|
||||
|
@ -263,8 +262,12 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
|
|||
def test_project(self, r, shape, n_out, init_value, result):
|
||||
"""test that a fn of Bolton optimizer is working as expected.
|
||||
|
||||
Missing args:
|
||||
|
||||
Args:
|
||||
r:
|
||||
shape:
|
||||
n_out:
|
||||
init_value:
|
||||
result:
|
||||
"""
|
||||
tf.random.set_seed(1)
|
||||
@tf.function
|
||||
|
@ -524,7 +527,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
|
|||
|
||||
|
||||
class SchedulerTest(keras_parameterized.TestCase):
|
||||
"""GammaBeta Scheduler tests"""
|
||||
"""GammaBeta Scheduler tests."""
|
||||
|
||||
@parameterized.named_parameters([
|
||||
{'testcase_name': 'not in context',
|
||||
|
@ -533,10 +536,10 @@ class SchedulerTest(keras_parameterized.TestCase):
|
|||
}
|
||||
])
|
||||
def test_bad_call(self, err_msg):
|
||||
""" test that attribute of internal optimizer is correctly rerouted to
|
||||
the internal optimizer
|
||||
"""Test attribute of internal opt correctly rerouted to the internal opt.
|
||||
|
||||
Missing args
|
||||
Args:
|
||||
err_msg:
|
||||
"""
|
||||
scheduler = opt.GammaBetaDecreasingStep()
|
||||
with self.assertRaisesRegexp(Exception, err_msg): # pylint: disable=deprecated-method
|
||||
|
@ -559,7 +562,9 @@ class SchedulerTest(keras_parameterized.TestCase):
|
|||
Test that attribute of internal optimizer is correctly rerouted to the
|
||||
internal optimizer
|
||||
|
||||
Missing Args:
|
||||
Args:
|
||||
step:
|
||||
res:
|
||||
"""
|
||||
beta = _ops.convert_to_tensor_v2(2, dtype=tf.float32)
|
||||
gamma = _ops.convert_to_tensor_v2(1, dtype=tf.float32)
|
||||
|
|
Loading…
Reference in a new issue