Update TensorFlow Privacy to use Python 3 super().

PiperOrigin-RevId: 424916118
This commit is contained in:
Michael Reneer 2022-01-28 11:26:05 -08:00 committed by A. Unique TensorFlower
parent 9050f18b59
commit 7396ad62da
22 changed files with 30 additions and 35 deletions

View file

@ -527,7 +527,7 @@ class RdpAccountant(privacy_accountant.PrivacyAccountant):
orders: Optional[Collection[float]] = None, orders: Optional[Collection[float]] = None,
neighboring_relation: NeighborRel = NeighborRel.ADD_OR_REMOVE_ONE, neighboring_relation: NeighborRel = NeighborRel.ADD_OR_REMOVE_ONE,
): ):
super(RdpAccountant, self).__init__(neighboring_relation) super().__init__(neighboring_relation)
if orders is None: if orders is None:
# Default orders chosen to give good coverage for Gaussian mechanism in # Default orders chosen to give good coverage for Gaussian mechanism in
# the privacy regime of interest. In the future, more orders might be # the privacy regime of interest. In the future, more orders might be

View file

@ -138,10 +138,7 @@ class StrongConvexHuber(losses.Loss, StrongConvexMixin):
self.radius_constant = radius_constant self.radius_constant = radius_constant
self.dtype = dtype self.dtype = dtype
self.reg_lambda = tf.constant(reg_lambda, dtype=self.dtype) self.reg_lambda = tf.constant(reg_lambda, dtype=self.dtype)
super(StrongConvexHuber, self).__init__( super().__init__(reduction=reduction, name="strongconvexhuber")
name="strongconvexhuber",
reduction=reduction,
)
def call(self, y_true, y_pred): def call(self, y_true, y_pred):
"""Computes loss. """Computes loss.
@ -248,7 +245,7 @@ class StrongConvexBinaryCrossentropy(
self.dtype = dtype self.dtype = dtype
self.C = c_arg # pylint: disable=invalid-name self.C = c_arg # pylint: disable=invalid-name
self.reg_lambda = tf.constant(reg_lambda, dtype=self.dtype) self.reg_lambda = tf.constant(reg_lambda, dtype=self.dtype)
super(StrongConvexBinaryCrossentropy, self).__init__( super().__init__(
reduction=reduction, reduction=reduction,
name="strongconvexbinarycrossentropy", name="strongconvexbinarycrossentropy",
from_logits=from_logits, from_logits=from_logits,

View file

@ -28,7 +28,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
"""Test loss function for testing BoltOn model.""" """Test loss function for testing BoltOn model."""
def __init__(self, reg_lambda, c_arg, radius_constant, name='test'): def __init__(self, reg_lambda, c_arg, radius_constant, name='test'):
super(TestLoss, self).__init__(name=name) super().__init__(name=name)
self.reg_lambda = reg_lambda self.reg_lambda = reg_lambda
self.C = c_arg # pylint: disable=invalid-name self.C = c_arg # pylint: disable=invalid-name
self.radius_constant = radius_constant self.radius_constant = radius_constant
@ -103,7 +103,7 @@ class TestOptimizer(OptimizerV2):
"""Test optimizer used for testing BoltOn model.""" """Test optimizer used for testing BoltOn model."""
def __init__(self): def __init__(self):
super(TestOptimizer, self).__init__('test') super().__init__('test')
def compute_gradients(self): def compute_gradients(self):
return 0 return 0

View file

@ -51,7 +51,7 @@ class TestModel(Model): # pylint: disable=abstract-method
input_shape: input_shape:
init_value: init_value:
""" """
super(TestModel, self).__init__(name='bolton', dynamic=False) super().__init__(name='bolton', dynamic=False)
self.n_outputs = n_outputs self.n_outputs = n_outputs
self.layer_input_shape = input_shape self.layer_input_shape = input_shape
self.output_layer = tf.keras.layers.Dense( self.output_layer = tf.keras.layers.Dense(
@ -142,7 +142,7 @@ class TestOptimizer(OptimizerV2):
"""Optimizer used for testing the BoltOn optimizer.""" """Optimizer used for testing the BoltOn optimizer."""
def __init__(self): def __init__(self):
super(TestOptimizer, self).__init__('test') super().__init__('test')
self.not_private = 'test' self.not_private = 'test'
self.iterations = tf.constant(1, dtype=tf.float32) self.iterations = tf.constant(1, dtype=tf.float32)
self._iterations = tf.constant(1, dtype=tf.float32) self._iterations = tf.constant(1, dtype=tf.float32)

View file

@ -138,4 +138,4 @@ class NestedSumQuery(NestedQuery, dp_query.SumAggregationDPQuery):
tree.map_structure(check, queries) tree.map_structure(check, queries)
super(NestedSumQuery, self).__init__(queries) super().__init__(queries)

View file

@ -45,8 +45,7 @@ class NoPrivacyAverageQuery(dp_query.SumAggregationDPQuery):
def initial_sample_state(self, template): def initial_sample_state(self, template):
"""Implements `tensorflow_privacy.DPQuery.initial_sample_state`.""" """Implements `tensorflow_privacy.DPQuery.initial_sample_state`."""
return (super(NoPrivacyAverageQuery, return super().initial_sample_state(template), tf.constant(0.0)
self).initial_sample_state(template), tf.constant(0.0))
def preprocess_record(self, params, record, weight=1): def preprocess_record(self, params, record, weight=1):
"""Implements `tensorflow_privacy.DPQuery.preprocess_record`. """Implements `tensorflow_privacy.DPQuery.preprocess_record`.

View file

@ -198,7 +198,7 @@ class NoPrivacyQuantileEstimatorQuery(QuantileEstimatorQuery):
updating is preferred for non-negative records like vector norms that updating is preferred for non-negative records like vector norms that
could potentially be very large or very close to zero. could potentially be very large or very close to zero.
""" """
super(NoPrivacyQuantileEstimatorQuery, self).__init__( super().__init__(
initial_estimate, initial_estimate,
target_quantile, target_quantile,
learning_rate, learning_rate,

View file

@ -32,7 +32,7 @@ class DPBinaryClassHead(tf.estimator.BinaryClassHead):
loss_reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE, loss_reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
loss_fn=None, loss_fn=None,
name=None): name=None):
super(DPBinaryClassHead, self).__init__( super().__init__(
weight_column=weight_column, weight_column=weight_column,
thresholds=thresholds, thresholds=thresholds,
label_vocabulary=label_vocabulary, label_vocabulary=label_vocabulary,

View file

@ -61,7 +61,7 @@ class DNNClassifier(tf.estimator.Estimator):
config=config, config=config,
batch_norm=batch_norm) batch_norm=batch_norm)
super(DNNClassifier, self).__init__( super().__init__(
model_fn=_model_fn, model_fn=_model_fn,
model_dir=model_dir, model_dir=model_dir,
config=config, config=config,

View file

@ -32,7 +32,7 @@ class DPMultiClassHead(tf.estimator.MultiClassHead):
loss_reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE, loss_reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
loss_fn=None, loss_fn=None,
name=None): name=None):
super(DPMultiClassHead, self).__init__( super().__init__(
n_classes=n_classes, n_classes=n_classes,
weight_column=weight_column, weight_column=weight_column,
label_vocabulary=label_vocabulary, label_vocabulary=label_vocabulary,

View file

@ -36,7 +36,7 @@ class DPMultiLabelHead(tf.estimator.MultiLabelHead):
name=None): name=None):
if loss_reduction == tf.keras.losses.Reduction.NONE: if loss_reduction == tf.keras.losses.Reduction.NONE:
loss_reduction = tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE loss_reduction = tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE
super(DPMultiLabelHead, self).__init__( super().__init__(
n_classes=n_classes, n_classes=n_classes,
weight_column=weight_column, weight_column=weight_column,
thresholds=thresholds, thresholds=thresholds,

View file

@ -62,7 +62,7 @@ class DNNClassifier(tf.estimator.Estimator):
config=config, config=config,
batch_norm=batch_norm) batch_norm=batch_norm)
super(DNNClassifier, self).__init__( super().__init__(
model_fn=_model_fn, model_fn=_model_fn,
model_dir=model_dir, model_dir=model_dir,
config=config, config=config,

View file

@ -76,7 +76,7 @@ def make_dp_model_class(cls):
**kwargs: These will be passed on to the base class `__init__` **kwargs: These will be passed on to the base class `__init__`
method. method.
""" """
super(DPModelClass, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self._l2_norm_clip = l2_norm_clip self._l2_norm_clip = l2_norm_clip
self._noise_multiplier = noise_multiplier self._noise_multiplier = noise_multiplier

View file

@ -115,7 +115,7 @@ def make_optimizer_class(cls):
*args: These will be passed on to the base class `__init__` method. *args: These will be passed on to the base class `__init__` method.
**kwargs: These will be passed on to the base class `__init__` method. **kwargs: These will be passed on to the base class `__init__` method.
""" """
super(DPOptimizerClass, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self._dp_sum_query = dp_sum_query self._dp_sum_query = dp_sum_query
self._num_microbatches = num_microbatches self._num_microbatches = num_microbatches
self._global_state = None self._global_state = None

View file

@ -24,7 +24,7 @@ class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self): def setUp(self):
tf.enable_eager_execution() tf.enable_eager_execution()
super(DPOptimizerEagerTest, self).setUp() super().setUp()
def _loss_fn(self, val0, val1): def _loss_fn(self, val0, val1):
return 0.5 * tf.reduce_sum( return 0.5 * tf.reduce_sum(

View file

@ -151,7 +151,7 @@ def make_keras_optimizer_class(cls):
*args: These will be passed on to the base class `__init__` method. *args: These will be passed on to the base class `__init__` method.
**kwargs: These will be passed on to the base class `__init__` method. **kwargs: These will be passed on to the base class `__init__` method.
""" """
super(DPOptimizerClass, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.gradient_accumulation_steps = gradient_accumulation_steps self.gradient_accumulation_steps = gradient_accumulation_steps
self._l2_norm_clip = l2_norm_clip self._l2_norm_clip = l2_norm_clip
self._noise_multiplier = noise_multiplier self._noise_multiplier = noise_multiplier
@ -162,14 +162,13 @@ def make_keras_optimizer_class(cls):
self._was_dp_gradients_called = False self._was_dp_gradients_called = False
def _create_slots(self, var_list): def _create_slots(self, var_list):
super(DPOptimizerClass, self)._create_slots(var_list) super()._create_slots(var_list)
if self.gradient_accumulation_steps > 1: if self.gradient_accumulation_steps > 1:
for var in var_list: for var in var_list:
self.add_slot(var, 'grad_acc') self.add_slot(var, 'grad_acc')
def _prepare_local(self, var_device, var_dtype, apply_state): def _prepare_local(self, var_device, var_dtype, apply_state):
super(DPOptimizerClass, self)._prepare_local( super()._prepare_local(var_device, var_dtype, apply_state)
var_device, var_dtype, apply_state)
if self.gradient_accumulation_steps > 1: if self.gradient_accumulation_steps > 1:
apply_update = tf.math.equal( apply_update = tf.math.equal(
tf.math.floormod(self.iterations + 1, tf.math.floormod(self.iterations + 1,

View file

@ -117,7 +117,7 @@ def make_vectorized_keras_optimizer_class(cls):
*args: These will be passed on to the base class `__init__` method. *args: These will be passed on to the base class `__init__` method.
**kwargs: These will be passed on to the base class `__init__` method. **kwargs: These will be passed on to the base class `__init__` method.
""" """
super(DPOptimizerClass, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self._l2_norm_clip = l2_norm_clip self._l2_norm_clip = l2_norm_clip
self._noise_multiplier = noise_multiplier self._noise_multiplier = noise_multiplier
self._num_microbatches = num_microbatches self._num_microbatches = num_microbatches
@ -204,7 +204,7 @@ def make_vectorized_keras_optimizer_class(cls):
def process_microbatch(microbatch_loss): def process_microbatch(microbatch_loss):
"""Compute clipped grads for one microbatch.""" """Compute clipped grads for one microbatch."""
mean_loss = tf.reduce_mean(input_tensor=microbatch_loss) mean_loss = tf.reduce_mean(input_tensor=microbatch_loss)
grads = super(DPOptimizerClass, self).get_gradients(mean_loss, params) grads = cls.get_gradients(self, mean_loss, params)
grads_list = [ grads_list = [
g if g is not None else tf.zeros_like(v) g if g is not None else tf.zeros_like(v)
for (g, v) in zip(list(grads), params) for (g, v) in zip(list(grads), params)

View file

@ -100,7 +100,7 @@ def make_vectorized_optimizer_class(cls):
*args: These will be passed on to the base class `__init__` method. *args: These will be passed on to the base class `__init__` method.
**kwargs: These will be passed on to the base class `__init__` method. **kwargs: These will be passed on to the base class `__init__` method.
""" """
super(DPOptimizerClass, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self._l2_norm_clip = l2_norm_clip self._l2_norm_clip = l2_norm_clip
self._noise_multiplier = noise_multiplier self._noise_multiplier = noise_multiplier
self._num_microbatches = num_microbatches self._num_microbatches = num_microbatches

View file

@ -404,7 +404,7 @@
" **kwargs):\n", " **kwargs):\n",
" if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n", " if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n",
" kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n", " kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n",
" super(Attention, self).__init__(**kwargs)\n", " super().__init__(**kwargs)\n",
" self.units = units\n", " self.units = units\n",
" self.activation = activations.get(activation)\n", " self.activation = activations.get(activation)\n",
" self.use_bias = use_bias\n", " self.use_bias = use_bias\n",
@ -523,7 +523,7 @@
" 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n", " 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n",
" 'bias_constraint': constraints.serialize(self.bias_constraint)\n", " 'bias_constraint': constraints.serialize(self.bias_constraint)\n",
" }\n", " }\n",
" base_config = super(Attention, self).get_config()\n", " base_config = super().get_config()\n",
" return dict(list(base_config.items()) + list(config.items()))" " return dict(list(base_config.items()) + list(config.items()))"
] ]
}, },
@ -553,7 +553,7 @@
" **kwargs):\n", " **kwargs):\n",
" if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n", " if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n",
" kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n", " kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n",
" super(DenseTransposeTied, self).__init__(**kwargs)\n", " super().__init__(**kwargs)\n",
" self.units = units\n", " self.units = units\n",
" # We add these two properties to save the tied weights\n", " # We add these two properties to save the tied weights\n",
" self.tied_to = tied_to\n", " self.tied_to = tied_to\n",

View file

@ -244,7 +244,7 @@ class SingleMembershipProbabilityResultTest(absltest.TestCase):
class AttackResultsCollectionTest(absltest.TestCase): class AttackResultsCollectionTest(absltest.TestCase):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(AttackResultsCollectionTest, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.some_attack_result = SingleAttackResult( self.some_attack_result = SingleAttackResult(
slice_spec=SingleSliceSpec(None), slice_spec=SingleSliceSpec(None),

View file

@ -29,7 +29,7 @@ from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_s
class PrivacyReportTest(absltest.TestCase): class PrivacyReportTest(absltest.TestCase):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(PrivacyReportTest, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
# Classifier that achieves an AUC of 0.5. # Classifier that achieves an AUC of 0.5.
self.imperfect_classifier_result = SingleAttackResult( self.imperfect_classifier_result = SingleAttackResult(

View file

@ -144,7 +144,7 @@ bolt.fit_generator(generator,
class TestModel(tf.keras.Model): # pylint: disable=abstract-method class TestModel(tf.keras.Model): # pylint: disable=abstract-method
def __init__(self, reg_layer, number_of_outputs=1): def __init__(self, reg_layer, number_of_outputs=1):
super(TestModel, self).__init__(name='test') super().__init__(name='test')
self.output_layer = tf.keras.layers.Dense(number_of_outputs, self.output_layer = tf.keras.layers.Dense(number_of_outputs,
kernel_regularizer=reg_layer) kernel_regularizer=reg_layer)