forked from 626_privacy/tensorflow_privacy
many fixes
This commit is contained in:
parent
fe90e3c596
commit
8e6bcf9b4a
6 changed files with 135 additions and 144 deletions
|
@ -20,7 +20,7 @@ if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
|
|||
raise ImportError("Please upgrade your version "
|
||||
"of tensorflow from: {0} to at least 2.0.0 to "
|
||||
"use privacy/bolton".format(LooseVersion(tf.__version__)))
|
||||
if hasattr(sys, 'skip_tf_privacy_import'): # Useful for standalone scripts.
|
||||
if hasattr(sys, "skip_tf_privacy_import"): # Useful for standalone scripts.
|
||||
pass
|
||||
else:
|
||||
from privacy.bolton.models import BoltonModel
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Unit testing for losses.py"""
|
||||
"""Unit testing for losses."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
@ -20,11 +20,11 @@ from __future__ import print_function
|
|||
from contextlib import contextmanager
|
||||
from io import StringIO
|
||||
import sys
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras.regularizers import L1L2
|
||||
from absl.testing import parameterized
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.keras.regularizers import L1L2
|
||||
from privacy.bolton.losses import StrongConvexBinaryCrossentropy
|
||||
from privacy.bolton.losses import StrongConvexHuber
|
||||
from privacy.bolton.losses import StrongConvexMixin
|
||||
|
@ -43,7 +43,7 @@ def captured_output():
|
|||
|
||||
|
||||
class StrongConvexMixinTests(keras_parameterized.TestCase):
|
||||
"""Tests for the StrongConvexMixin"""
|
||||
"""Tests for the StrongConvexMixin."""
|
||||
@parameterized.named_parameters([
|
||||
{'testcase_name': 'beta not implemented',
|
||||
'fn': 'beta',
|
||||
|
@ -58,6 +58,7 @@ class StrongConvexMixinTests(keras_parameterized.TestCase):
|
|||
'fn': 'radius',
|
||||
'args': []},
|
||||
])
|
||||
|
||||
def test_not_implemented(self, fn, args):
|
||||
"""Test that the given fn's are not implemented on the mixin.
|
||||
|
||||
|
@ -75,7 +76,7 @@ class StrongConvexMixinTests(keras_parameterized.TestCase):
|
|||
'args': []},
|
||||
])
|
||||
def test_return_none(self, fn, args):
|
||||
"""Test that fn of Mixin returns None
|
||||
"""Test that fn of Mixin returns None.
|
||||
|
||||
Args:
|
||||
fn: fn of Mixin to test
|
||||
|
@ -94,7 +95,7 @@ class BinaryCrossesntropyTests(keras_parameterized.TestCase):
|
|||
'reg_lambda': 1,
|
||||
'C': 1,
|
||||
'radius_constant': 1
|
||||
}, # pylint: disable=invalid-name
|
||||
}, # pylint: disable=invalid-name
|
||||
])
|
||||
def test_init_params(self, reg_lambda, C, radius_constant):
|
||||
"""Test initialization for given arguments.
|
||||
|
@ -113,20 +114,20 @@ class BinaryCrossesntropyTests(keras_parameterized.TestCase):
|
|||
'reg_lambda': 1,
|
||||
'C': -1,
|
||||
'radius_constant': 1
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'negative radius',
|
||||
'reg_lambda': 1,
|
||||
'C': 1,
|
||||
'radius_constant': -1
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'negative lambda',
|
||||
'reg_lambda': -1,
|
||||
'C': 1,
|
||||
'radius_constant': 1
|
||||
}, # pylint: disable=invalid-name
|
||||
}, # pylint: disable=invalid-name
|
||||
])
|
||||
def test_bad_init_params(self, reg_lambda, C, radius_constant):
|
||||
"""Test invalid domain for given params. Should return ValueError
|
||||
"""Test invalid domain for given params. Should return ValueError.
|
||||
|
||||
Args:
|
||||
reg_lambda: initialization value for reg_lambda arg
|
||||
|
@ -149,20 +150,20 @@ class BinaryCrossesntropyTests(keras_parameterized.TestCase):
|
|||
'logits': [-10000],
|
||||
'y_true': [1],
|
||||
'result': 10000,
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'positivee gradient positive logits',
|
||||
'logits': [10000],
|
||||
'y_true': [0],
|
||||
'result': 10000,
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'both negative',
|
||||
'logits': [-10000],
|
||||
'y_true': [0],
|
||||
'result': 0
|
||||
},
|
||||
},
|
||||
])
|
||||
def test_calculation(self, logits, y_true, result):
|
||||
"""Test the call method to ensure it returns the correct value
|
||||
"""Test the call method to ensure it returns the correct value.
|
||||
|
||||
Args:
|
||||
logits: unscaled output of model
|
||||
|
@ -181,28 +182,28 @@ class BinaryCrossesntropyTests(keras_parameterized.TestCase):
|
|||
'fn': 'beta',
|
||||
'args': [1],
|
||||
'result': tf.constant(2, dtype=tf.float32)
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'gamma',
|
||||
'fn': 'gamma',
|
||||
'init_args': [1, 1, 1],
|
||||
'args': [],
|
||||
'result': tf.constant(1, dtype=tf.float32),
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'lipchitz constant',
|
||||
'fn': 'lipchitz_constant',
|
||||
'init_args': [1, 1, 1],
|
||||
'args': [1],
|
||||
'result': tf.constant(2, dtype=tf.float32),
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'kernel regularizer',
|
||||
'fn': 'kernel_regularizer',
|
||||
'init_args': [1, 1, 1],
|
||||
'args': [],
|
||||
'result': L1L2(l2=0.5),
|
||||
},
|
||||
},
|
||||
])
|
||||
def test_fns(self, init_args, fn, args, result):
|
||||
"""Test that fn of BinaryCrossentropy loss returns the correct result
|
||||
"""Test that fn of BinaryCrossentropy loss returns the correct result.
|
||||
|
||||
Args:
|
||||
init_args: init values for loss instance
|
||||
|
@ -226,7 +227,7 @@ class BinaryCrossesntropyTests(keras_parameterized.TestCase):
|
|||
'fn': None,
|
||||
'args': None,
|
||||
'print_res': 'The impact of label smoothing on privacy is unknown.'
|
||||
},
|
||||
},
|
||||
])
|
||||
def test_prints(self, init_args, fn, args, print_res):
|
||||
"""Test logger warning from StrongConvexBinaryCrossentropy.
|
||||
|
@ -245,7 +246,7 @@ class BinaryCrossesntropyTests(keras_parameterized.TestCase):
|
|||
|
||||
|
||||
class HuberTests(keras_parameterized.TestCase):
|
||||
"""tests for BinaryCrossesntropy StrongConvex loss"""
|
||||
"""tests for BinaryCrossesntropy StrongConvex loss."""
|
||||
|
||||
@parameterized.named_parameters([
|
||||
{'testcase_name': 'normal',
|
||||
|
@ -253,10 +254,10 @@ class HuberTests(keras_parameterized.TestCase):
|
|||
'c': 1,
|
||||
'radius_constant': 1,
|
||||
'delta': 1,
|
||||
},
|
||||
},
|
||||
])
|
||||
def test_init_params(self, reg_lambda, c, radius_constant, delta):
|
||||
"""Test initialization for given arguments
|
||||
"""Test initialization for given arguments.
|
||||
|
||||
Args:
|
||||
reg_lambda: initialization value for reg_lambda arg
|
||||
|
@ -273,25 +274,25 @@ class HuberTests(keras_parameterized.TestCase):
|
|||
'c': -1,
|
||||
'radius_constant': 1,
|
||||
'delta': 1
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'negative radius',
|
||||
'reg_lambda': 1,
|
||||
'c': 1,
|
||||
'radius_constant': -1,
|
||||
'delta': 1
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'negative lambda',
|
||||
'reg_lambda': -1,
|
||||
'c': 1,
|
||||
'radius_constant': 1,
|
||||
'delta': 1
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'negative delta',
|
||||
'reg_lambda': 1,
|
||||
'c': 1,
|
||||
'radius_constant': 1,
|
||||
'delta': -1
|
||||
},
|
||||
},
|
||||
])
|
||||
def test_bad_init_params(self, reg_lambda, c, radius_constant, delta):
|
||||
"""Test invalid domain for given params. Should return ValueError
|
||||
|
@ -320,49 +321,49 @@ class HuberTests(keras_parameterized.TestCase):
|
|||
'y_true': 1,
|
||||
'delta': 1,
|
||||
'result': 0.01*0.25,
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'delta=1,y_true=1 1-z< h decision boundary',
|
||||
'logits': 0.1,
|
||||
'y_true': 1,
|
||||
'delta': 1,
|
||||
'result': 1.9**2 * 0.25,
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'delta=1,y_true=1 z < 1-h decision boundary',
|
||||
'logits': -0.1,
|
||||
'y_true': 1,
|
||||
'delta': 1,
|
||||
'result': 1.1,
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'delta=2,y_true=1 z>1+h decision boundary',
|
||||
'logits': 3.1,
|
||||
'y_true': 1,
|
||||
'delta': 2,
|
||||
'result': 0,
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'delta=2,y_true=1 z<1+h decision boundary',
|
||||
'logits': 2.9,
|
||||
'y_true': 1,
|
||||
'delta': 2,
|
||||
'result': 0.01*0.125,
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'delta=2,y_true=1 1-z < h decision boundary',
|
||||
'logits': 1.1,
|
||||
'y_true': 1,
|
||||
'delta': 2,
|
||||
'result': 1.9**2 * 0.125,
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'delta=2,y_true=1 z < 1-h decision boundary',
|
||||
'logits': -1.1,
|
||||
'y_true': 1,
|
||||
'delta': 2,
|
||||
'result': 2.1,
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'delta=1,y_true=-1 z>1+h decision boundary',
|
||||
'logits': -2.1,
|
||||
'y_true': -1,
|
||||
'delta': 1,
|
||||
'result': 0,
|
||||
},
|
||||
},
|
||||
])
|
||||
def test_calculation(self, logits, y_true, delta, result):
|
||||
"""Test the call method to ensure it returns the correct value
|
||||
|
@ -384,25 +385,25 @@ class HuberTests(keras_parameterized.TestCase):
|
|||
'fn': 'beta',
|
||||
'args': [1],
|
||||
'result': tf.Variable(1.5, dtype=tf.float32)
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'gamma',
|
||||
'fn': 'gamma',
|
||||
'init_args': [1, 1, 1, 1],
|
||||
'args': [],
|
||||
'result': tf.Variable(1, dtype=tf.float32),
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'lipchitz constant',
|
||||
'fn': 'lipchitz_constant',
|
||||
'init_args': [1, 1, 1, 1],
|
||||
'args': [1],
|
||||
'result': tf.Variable(2, dtype=tf.float32),
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'kernel regularizer',
|
||||
'fn': 'kernel_regularizer',
|
||||
'init_args': [1, 1, 1, 1],
|
||||
'args': [],
|
||||
'result': L1L2(l2=0.5),
|
||||
},
|
||||
},
|
||||
])
|
||||
def test_fns(self, init_args, fn, args, result):
|
||||
"""Test that fn of BinaryCrossentropy loss returns the correct result
|
||||
|
|
|
@ -11,15 +11,15 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Bolton model for bolton method of differentially private ML"""
|
||||
"""Bolton model for bolton method of differentially private ML."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.keras.models import Model
|
||||
from tensorflow.python.keras import optimizers
|
||||
from tensorflow.python.framework import ops as _ops
|
||||
from tensorflow.python.keras import optimizers
|
||||
from tensorflow.python.keras.models import Model
|
||||
from privacy.bolton.losses import StrongConvexMixin
|
||||
from privacy.bolton.optimizers import Bolton
|
||||
|
||||
|
@ -44,9 +44,8 @@ class BoltonModel(Model): # pylint: disable=abstract-method
|
|||
def __init__(self,
|
||||
n_outputs,
|
||||
seed=1,
|
||||
dtype=tf.float32
|
||||
):
|
||||
""" private constructor.
|
||||
dtype=tf.float32):
|
||||
"""Private constructor.
|
||||
|
||||
Args:
|
||||
n_outputs: number of output classes to predict.
|
||||
|
@ -64,7 +63,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
|
|||
self._dtype = dtype
|
||||
|
||||
def call(self, inputs): # pylint: disable=arguments-differ
|
||||
"""Forward pass of network
|
||||
"""Forward pass of network.
|
||||
|
||||
Args:
|
||||
inputs: inputs to neural network
|
||||
|
@ -111,8 +110,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
|
|||
weighted_metrics=weighted_metrics,
|
||||
target_tensors=target_tensors,
|
||||
distribute=distribute,
|
||||
**kwargs
|
||||
)
|
||||
**kwargs)
|
||||
|
||||
def fit(self,
|
||||
x=None,
|
||||
|
@ -158,8 +156,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
|
|||
data_size = None
|
||||
batch_size_ = self._validate_or_infer_batch_size(batch_size,
|
||||
steps_per_epoch,
|
||||
x
|
||||
)
|
||||
x)
|
||||
# inferring batch_size to be passed to optimizer. batch_size must remain its
|
||||
# initial value when passed to super().fit()
|
||||
if batch_size_ is None:
|
||||
|
@ -173,15 +170,13 @@ class BoltonModel(Model): # pylint: disable=abstract-method
|
|||
self.layers,
|
||||
class_weight_,
|
||||
data_size,
|
||||
batch_size_,
|
||||
) as _:
|
||||
batch_size_) as _:
|
||||
out = super(BoltonModel, self).fit(x=x,
|
||||
y=y,
|
||||
batch_size=batch_size,
|
||||
class_weight=class_weight,
|
||||
steps_per_epoch=steps_per_epoch,
|
||||
**kwargs
|
||||
)
|
||||
**kwargs)
|
||||
return out
|
||||
|
||||
def fit_generator(self,
|
||||
|
@ -191,8 +186,7 @@ class BoltonModel(Model): # pylint: disable=abstract-method
|
|||
epsilon=2,
|
||||
n_samples=None,
|
||||
steps_per_epoch=None,
|
||||
**kwargs
|
||||
): # pylint: disable=arguments-differ
|
||||
**kwargs): # pylint: disable=arguments-differ
|
||||
"""
|
||||
This method is the same as fit except for when the passed dataset
|
||||
is a generator. See super method and fit for more details.
|
||||
|
@ -218,28 +212,24 @@ class BoltonModel(Model): # pylint: disable=abstract-method
|
|||
data_size = None
|
||||
batch_size = self._validate_or_infer_batch_size(None,
|
||||
steps_per_epoch,
|
||||
generator
|
||||
)
|
||||
generator)
|
||||
with self.optimizer(noise_distribution,
|
||||
epsilon,
|
||||
self.layers,
|
||||
class_weight,
|
||||
data_size,
|
||||
batch_size
|
||||
) as _:
|
||||
batch_size) as _:
|
||||
out = super(BoltonModel, self).fit_generator(
|
||||
generator,
|
||||
class_weight=class_weight,
|
||||
steps_per_epoch=steps_per_epoch,
|
||||
**kwargs
|
||||
)
|
||||
**kwargs)
|
||||
return out
|
||||
|
||||
def calculate_class_weights(self,
|
||||
class_weights=None,
|
||||
class_counts=None,
|
||||
num_classes=None
|
||||
):
|
||||
num_classes=None):
|
||||
"""Calculates class weighting to be used in training.
|
||||
|
||||
Args:
|
||||
|
@ -283,10 +273,8 @@ class BoltonModel(Model): # pylint: disable=abstract-method
|
|||
elif is_string and class_weights == 'balanced':
|
||||
num_samples = sum(class_counts)
|
||||
weighted_counts = tf.dtypes.cast(tf.math.multiply(num_classes,
|
||||
class_counts,
|
||||
),
|
||||
self._dtype
|
||||
)
|
||||
class_counts),
|
||||
self._dtype)
|
||||
class_weights = tf.Variable(num_samples, dtype=self._dtype) / \
|
||||
tf.Variable(weighted_counts, dtype=self._dtype)
|
||||
else:
|
||||
|
@ -298,7 +286,5 @@ class BoltonModel(Model): # pylint: disable=abstract-method
|
|||
raise ValueError(
|
||||
"Detected array length: {0} instead of: {1}".format(
|
||||
class_weights.shape[0],
|
||||
num_classes
|
||||
)
|
||||
)
|
||||
num_classes))
|
||||
return class_weights
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Unit testing for models.py"""
|
||||
"""Unit testing for models."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
@ -29,8 +29,10 @@ from privacy.bolton import models
|
|||
from privacy.bolton.optimizers import Bolton
|
||||
from privacy.bolton.losses import StrongConvexMixin
|
||||
|
||||
|
||||
class TestLoss(losses.Loss, StrongConvexMixin):
|
||||
"""Test loss function for testing Bolton model"""
|
||||
"""Test loss function for testing Bolton model."""
|
||||
|
||||
def __init__(self, reg_lambda, C, radius_constant, name='test'):
|
||||
super(TestLoss, self).__init__(name=name)
|
||||
self.reg_lambda = reg_lambda
|
||||
|
@ -103,6 +105,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
|
|||
|
||||
class TestOptimizer(OptimizerV2):
|
||||
"""Test optimizer used for testing Bolton model"""
|
||||
|
||||
def __init__(self):
|
||||
super(TestOptimizer, self).__init__('test')
|
||||
|
||||
|
@ -128,10 +131,10 @@ class InitTests(keras_parameterized.TestCase):
|
|||
@parameterized.named_parameters([
|
||||
{'testcase_name': 'normal',
|
||||
'n_outputs': 1,
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'many outputs',
|
||||
'n_outputs': 100,
|
||||
},
|
||||
},
|
||||
])
|
||||
def test_init_params(self, n_outputs):
|
||||
"""Test initialization of BoltonModel.
|
||||
|
@ -146,7 +149,7 @@ class InitTests(keras_parameterized.TestCase):
|
|||
@parameterized.named_parameters([
|
||||
{'testcase_name': 'invalid n_outputs',
|
||||
'n_outputs': -1,
|
||||
},
|
||||
},
|
||||
])
|
||||
def test_bad_init_params(self, n_outputs):
|
||||
"""test bad initializations of BoltonModel that should raise errors
|
||||
|
@ -163,12 +166,12 @@ class InitTests(keras_parameterized.TestCase):
|
|||
'n_outputs': 1,
|
||||
'loss': TestLoss(1, 1, 1),
|
||||
'optimizer': 'adam',
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'test compile',
|
||||
'n_outputs': 100,
|
||||
'loss': TestLoss(1, 1, 1),
|
||||
'optimizer': TestOptimizer(),
|
||||
},
|
||||
},
|
||||
])
|
||||
def test_compile(self, n_outputs, loss, optimizer):
|
||||
"""test compilation of BoltonModel
|
||||
|
@ -189,12 +192,12 @@ class InitTests(keras_parameterized.TestCase):
|
|||
'n_outputs': 1,
|
||||
'loss': losses.BinaryCrossentropy(),
|
||||
'optimizer': 'adam',
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'Not valid optimizer',
|
||||
'n_outputs': 1,
|
||||
'loss': TestLoss(1, 1, 1),
|
||||
'optimizer': 'ada',
|
||||
}
|
||||
}
|
||||
])
|
||||
def test_bad_compile(self, n_outputs, loss, optimizer):
|
||||
"""test bad compilations of BoltonModel that should raise errors
|
||||
|
@ -293,8 +296,7 @@ def _do_fit(n_samples,
|
|||
batch_size=batch_size,
|
||||
n_samples=n_samples,
|
||||
noise_distribution=distribution,
|
||||
epsilon=epsilon
|
||||
)
|
||||
epsilon=epsilon)
|
||||
return clf
|
||||
|
||||
|
||||
|
@ -306,19 +308,19 @@ class FitTests(keras_parameterized.TestCase):
|
|||
{'testcase_name': 'iterator fit',
|
||||
'generator': False,
|
||||
'reset_n_samples': True,
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'iterator fit no samples',
|
||||
'generator': False,
|
||||
'reset_n_samples': True,
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'generator fit',
|
||||
'generator': True,
|
||||
'reset_n_samples': False,
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'with callbacks',
|
||||
'generator': True,
|
||||
'reset_n_samples': False,
|
||||
},
|
||||
},
|
||||
])
|
||||
def test_fit(self, generator, reset_n_samples):
|
||||
"""Tests fitting of BoltonModel
|
||||
|
@ -350,7 +352,7 @@ class FitTests(keras_parameterized.TestCase):
|
|||
@parameterized.named_parameters([
|
||||
{'testcase_name': 'generator fit',
|
||||
'generator': True,
|
||||
},
|
||||
},
|
||||
])
|
||||
def test_fit_gen(self, generator):
|
||||
"""Tests the fit_generator method of BoltonModel
|
||||
|
@ -382,12 +384,12 @@ class FitTests(keras_parameterized.TestCase):
|
|||
'generator': True,
|
||||
'reset_n_samples': True,
|
||||
'distribution': 'laplace'
|
||||
},
|
||||
},
|
||||
{'testcase_name': 'invalid distribution',
|
||||
'generator': True,
|
||||
'reset_n_samples': True,
|
||||
'distribution': 'not_valid'
|
||||
},
|
||||
},
|
||||
])
|
||||
def test_bad_fit(self, generator, reset_n_samples, distribution):
|
||||
"""Tests fitting with invalid parameters, which should raise an error
|
||||
|
@ -453,8 +455,7 @@ class FitTests(keras_parameterized.TestCase):
|
|||
clf = models.BoltonModel(1, 1)
|
||||
expected = clf.calculate_class_weights(class_weights,
|
||||
class_counts,
|
||||
num_classes
|
||||
)
|
||||
num_classes)
|
||||
|
||||
if hasattr(expected, 'numpy'):
|
||||
expected = expected.numpy()
|
||||
|
@ -467,13 +468,13 @@ class FitTests(keras_parameterized.TestCase):
|
|||
'class_weights': 'not_valid',
|
||||
'class_counts': 1,
|
||||
'num_classes': 1,
|
||||
'err_msg': "Detected string class_weights with value: not_valid"},
|
||||
'err_msg': 'Detected string class_weights with value: not_valid'},
|
||||
{'testcase_name': 'no class counts',
|
||||
'class_weights': 'balanced',
|
||||
'class_counts': None,
|
||||
'num_classes': 1,
|
||||
'err_msg': "Class counts must be provided if "
|
||||
"using class_weights=balanced"},
|
||||
'err_msg': 'Class counts must be provided if '
|
||||
'using class_weights=balanced'},
|
||||
{'testcase_name': 'no num classes',
|
||||
'class_weights': 'balanced',
|
||||
'class_counts': [1],
|
||||
|
@ -489,8 +490,8 @@ class FitTests(keras_parameterized.TestCase):
|
|||
'class_weights': [1],
|
||||
'class_counts': None,
|
||||
'num_classes': None,
|
||||
'err_msg': "You must pass a value for num_classes if "
|
||||
"creating an array of class_weights"},
|
||||
'err_msg': 'You must pass a value for num_classes if '
|
||||
'creating an array of class_weights'},
|
||||
{'testcase_name': 'class counts array, improper shape',
|
||||
'class_weights': [[1], [1]],
|
||||
'class_counts': None,
|
||||
|
@ -500,14 +501,13 @@ class FitTests(keras_parameterized.TestCase):
|
|||
'class_weights': [1, 1, 1],
|
||||
'class_counts': None,
|
||||
'num_classes': 2,
|
||||
'err_msg': "Detected array length:"},
|
||||
'err_msg': 'Detected array length:'},
|
||||
])
|
||||
def test_class_errors(self,
|
||||
class_weights,
|
||||
class_counts,
|
||||
num_classes,
|
||||
err_msg
|
||||
):
|
||||
err_msg):
|
||||
"""Tests the BOltonModel calculate_class_weights method with invalid params
|
||||
which should raise the expected errors.
|
||||
|
||||
|
@ -521,8 +521,7 @@ class FitTests(keras_parameterized.TestCase):
|
|||
with self.assertRaisesRegexp(ValueError, err_msg): # pylint: disable=deprecated-method
|
||||
clf.calculate_class_weights(class_weights,
|
||||
class_counts,
|
||||
num_classes
|
||||
)
|
||||
num_classes)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -11,29 +11,30 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Unit testing for optimizers.py"""
|
||||
"""Unit testing for optimizers."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.keras.optimizer_v2.optimizer_v2 import OptimizerV2
|
||||
from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.keras.regularizers import L1L2
|
||||
from tensorflow.python.keras.initializers import constant
|
||||
from tensorflow.python.keras import losses
|
||||
from tensorflow.python.keras.models import Model
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python import ops as _ops
|
||||
from absl.testing import parameterized
|
||||
from privacy.bolton.losses import StrongConvexMixin
|
||||
import tensorflow as tf
|
||||
from tensorflow.python import ops as _ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.keras import losses
|
||||
from tensorflow.python.keras.initializers import constant
|
||||
from tensorflow.python.keras.models import Model
|
||||
from tensorflow.python.keras.optimizer_v2.optimizer_v2 import OptimizerV2
|
||||
from tensorflow.python.keras.regularizers import L1L2
|
||||
from tensorflow.python.platform import test
|
||||
from privacy.bolton import optimizers as opt
|
||||
from privacy.bolton.losses import StrongConvexMixin
|
||||
|
||||
|
||||
class TestModel(Model): # pylint: disable=abstract-method
|
||||
"""Bolton episilon-delta model.
|
||||
|
||||
Uses 4 key steps to achieve privacy guarantees:
|
||||
1. Adds noise to weights after training (output perturbation).
|
||||
2. Projects weights to R after each batch
|
||||
|
@ -68,7 +69,8 @@ class TestModel(Model): # pylint: disable=abstract-method
|
|||
|
||||
|
||||
class TestLoss(losses.Loss, StrongConvexMixin):
|
||||
"""Test loss function for testing Bolton model"""
|
||||
"""Test loss function for testing Bolton model."""
|
||||
|
||||
def __init__(self, reg_lambda, C, radius_constant, name='test'):
|
||||
super(TestLoss, self).__init__(name=name)
|
||||
self.reg_lambda = reg_lambda
|
||||
|
@ -77,6 +79,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
|
|||
|
||||
def radius(self):
|
||||
"""Radius, R, of the hypothesis space W.
|
||||
|
||||
W is a convex set that forms the hypothesis space.
|
||||
|
||||
Returns: radius
|
||||
|
@ -117,7 +120,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
|
|||
)
|
||||
|
||||
def max_class_weight(self, class_weight, dtype=tf.float32):
|
||||
"""the maximum weighting in class weights (max value) as a scalar tensor
|
||||
"""the maximum weighting in class weights (max value) as a scalar tensor.
|
||||
|
||||
Args:
|
||||
class_weight: class weights used
|
||||
|
@ -141,6 +144,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
|
|||
|
||||
class TestOptimizer(OptimizerV2):
|
||||
"""Optimizer used for testing the Bolton optimizer"""
|
||||
|
||||
def __init__(self):
|
||||
super(TestOptimizer, self).__init__('test')
|
||||
self.not_private = 'test'
|
||||
|
@ -180,8 +184,9 @@ class TestOptimizer(OptimizerV2):
|
|||
def limit_learning_rate(self):
|
||||
return 'test'
|
||||
|
||||
|
||||
class BoltonOptimizerTest(keras_parameterized.TestCase):
|
||||
"""Bolton Optimizer tests"""
|
||||
"""Bolton Optimizer tests."""
|
||||
@test_util.run_all_in_graph_and_eager_modes
|
||||
@parameterized.named_parameters([
|
||||
{'testcase_name': 'getattr',
|
||||
|
@ -195,6 +200,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
|
|||
'result': None,
|
||||
'test_attr': ''},
|
||||
])
|
||||
|
||||
def test_fn(self, fn, args, result, test_attr):
|
||||
"""test that a fn of Bolton optimizer is working as expected.
|
||||
|
||||
|
@ -294,7 +300,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
|
|||
'class_weights': 1},
|
||||
])
|
||||
def test_context_manager(self, noise, epsilon, class_weights):
|
||||
"""Tests the context manager functionality of the optimizer
|
||||
"""Tests the context manager functionality of the optimizer.
|
||||
|
||||
Args:
|
||||
noise: noise distribution to pick
|
||||
|
@ -327,7 +333,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
|
|||
'err_msg': 'Detected epsilon: -1. Valid range is 0 < epsilon <inf'},
|
||||
])
|
||||
def test_context_domains(self, noise, epsilon, err_msg):
|
||||
"""
|
||||
"""Tests the context domains.
|
||||
|
||||
Args:
|
||||
noise: noise distribution to pick
|
||||
|
@ -408,7 +414,9 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
|
|||
'args': [1, 1]},
|
||||
])
|
||||
def test_rerouted_function(self, fn, args):
|
||||
""" tests that a method of the internal optimizer is correctly routed from
|
||||
"""Tests rerouted function.
|
||||
|
||||
Tests that a method of the internal optimizer is correctly routed from
|
||||
the Bolton instance to the internal optimizer instance (TestOptimizer,
|
||||
here).
|
||||
|
||||
|
@ -495,15 +503,14 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
|
|||
internal_optimizer = TestOptimizer()
|
||||
optimizer = opt.Bolton(internal_optimizer, loss)
|
||||
self.assertEqual(getattr(optimizer, attr),
|
||||
getattr(internal_optimizer, attr)
|
||||
)
|
||||
getattr(internal_optimizer, attr))
|
||||
|
||||
@parameterized.named_parameters([
|
||||
{'testcase_name': 'attr does not exist',
|
||||
'attr': '_not_valid'}
|
||||
])
|
||||
def test_attribute_error(self, attr):
|
||||
""" test that attribute of internal optimizer is correctly rerouted to
|
||||
"""Test that attribute of internal optimizer is correctly rerouted to
|
||||
the internal optimizer
|
||||
|
||||
Args:
|
||||
|
@ -516,6 +523,7 @@ class BoltonOptimizerTest(keras_parameterized.TestCase):
|
|||
with self.assertRaises(AttributeError):
|
||||
getattr(optimizer, attr)
|
||||
|
||||
|
||||
class SchedulerTest(keras_parameterized.TestCase):
|
||||
"""GammaBeta Scheduler tests"""
|
||||
|
||||
|
@ -523,7 +531,7 @@ class SchedulerTest(keras_parameterized.TestCase):
|
|||
{'testcase_name': 'not in context',
|
||||
'err_msg': 'Please initialize the GammaBetaDecreasingStep Learning Rate'
|
||||
' Scheduler'
|
||||
}
|
||||
}
|
||||
])
|
||||
def test_bad_call(self, err_msg):
|
||||
""" test that attribute of internal optimizer is correctly rerouted to
|
||||
|
|
|
@ -11,14 +11,11 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Tutorial for bolton module, the model and the optimizer."""
|
||||
import sys
|
||||
|
||||
sys.path.append('..')
|
||||
import tensorflow as tf # pylint: disable=wrong-import-position
|
||||
from privacy.bolton import losses # pylint: disable=wrong-import-position
|
||||
from privacy.bolton import models # pylint: disable=wrong-import-position
|
||||
from privacy.bolton.optimizers import Bolton # pylint: disable=wrong-import-position
|
||||
# -------
|
||||
# First, we will create a binary classification dataset with a single output
|
||||
# dimension. The samples for each label are repeated data points at different
|
||||
|
@ -59,9 +56,9 @@ loss = losses.StrongConvexBinaryCrossentropy(reg_lambda, C, radius_constant)
|
|||
# For simplicity, we pick all parameters of the StrongConvexBinaryCrossentropy
|
||||
# to be 1; these are all tunable and their impact can be read in losses.
|
||||
# StrongConvexBinaryCrossentropy.We then compile the model with the chosen
|
||||
# optimizer and loss, which will automatically wrap the chosen optimizer with the
|
||||
# Bolton Optimizer, ensuring the required components function as required for
|
||||
# privacy guarantees.
|
||||
# optimizer and loss, which will automatically wrap the chosen optimizer with
|
||||
# the Bolton Optimizer, ensuring the required components function as required
|
||||
# for privacy guarantees.
|
||||
# -------
|
||||
bolt.compile(optimizer, loss)
|
||||
# -------
|
||||
|
@ -69,13 +66,13 @@ bolt.compile(optimizer, loss)
|
|||
# the dataset and model.These parameters are:
|
||||
# 1. the class_weights used
|
||||
# 2. the number of samples in the dataset
|
||||
# 3. the batch size which the model will try to infer, if possible. If not, you
|
||||
# will be required to pass these explicitly to the fit method.
|
||||
# 3. the batch size which the model will try to infer, if possible. If not,
|
||||
# you will be required to pass these explicitly to the fit method.
|
||||
#
|
||||
# As well, there are two privacy parameters than can be altered:
|
||||
# 1. epsilon, a float
|
||||
# 2. noise_distribution, a valid string indicating the distriution to use (must be
|
||||
# implemented)
|
||||
# 2. noise_distribution, a valid string indicating the distriution to use (must
|
||||
# be implemented)
|
||||
#
|
||||
# The BoltonModel offers a helper method,.calculate_class_weight to aid in
|
||||
# class_weight calculation.
|
||||
|
@ -117,8 +114,7 @@ try:
|
|||
batch_size=batch_size,
|
||||
n_samples=n_samples,
|
||||
noise_distribution=noise_distribution,
|
||||
verbose=0
|
||||
)
|
||||
verbose=0)
|
||||
except ValueError as e:
|
||||
print(e)
|
||||
# -------
|
||||
|
@ -131,8 +127,7 @@ bolt.fit(generator,
|
|||
batch_size=batch_size,
|
||||
n_samples=n_samples,
|
||||
noise_distribution=noise_distribution,
|
||||
verbose=0
|
||||
)
|
||||
verbose=0)
|
||||
# -------
|
||||
# You don't have to use the bolton model to use the Bolton method.
|
||||
# There are only a few requirements:
|
||||
|
@ -140,16 +135,18 @@ bolt.fit(generator,
|
|||
# 2. instantiate the optimizer and use it as a context around the fit operation.
|
||||
# -------
|
||||
# -------------------- Part 2, using the Optimizer
|
||||
from privacy.bolton.optimizers import Bolton # pylint: disable=wrong-import-position
|
||||
|
||||
# -------
|
||||
# Here, we create our own model and setup the Bolton optimizer.
|
||||
# -------
|
||||
|
||||
|
||||
class TestModel(tf.keras.Model): # pylint: disable=abstract-method
|
||||
|
||||
def __init__(self, reg_layer, number_of_outputs=1):
|
||||
super(TestModel, self).__init__(name='test')
|
||||
self.output_layer = tf.keras.layers.Dense(number_of_outputs,
|
||||
kernel_regularizer=reg_layer
|
||||
)
|
||||
kernel_regularizer=reg_layer)
|
||||
|
||||
def call(self, inputs): # pylint: disable=arguments-differ
|
||||
return self.output_layer(inputs)
|
||||
|
|
Loading…
Reference in a new issue