forked from 626_privacy/tensorflow_privacy
Update TensorFlow Privacy to use Python 3 super()
.
PiperOrigin-RevId: 424916118
This commit is contained in:
parent
9050f18b59
commit
7396ad62da
22 changed files with 30 additions and 35 deletions
|
@ -527,7 +527,7 @@ class RdpAccountant(privacy_accountant.PrivacyAccountant):
|
|||
orders: Optional[Collection[float]] = None,
|
||||
neighboring_relation: NeighborRel = NeighborRel.ADD_OR_REMOVE_ONE,
|
||||
):
|
||||
super(RdpAccountant, self).__init__(neighboring_relation)
|
||||
super().__init__(neighboring_relation)
|
||||
if orders is None:
|
||||
# Default orders chosen to give good coverage for Gaussian mechanism in
|
||||
# the privacy regime of interest. In the future, more orders might be
|
||||
|
|
|
@ -138,10 +138,7 @@ class StrongConvexHuber(losses.Loss, StrongConvexMixin):
|
|||
self.radius_constant = radius_constant
|
||||
self.dtype = dtype
|
||||
self.reg_lambda = tf.constant(reg_lambda, dtype=self.dtype)
|
||||
super(StrongConvexHuber, self).__init__(
|
||||
name="strongconvexhuber",
|
||||
reduction=reduction,
|
||||
)
|
||||
super().__init__(reduction=reduction, name="strongconvexhuber")
|
||||
|
||||
def call(self, y_true, y_pred):
|
||||
"""Computes loss.
|
||||
|
@ -248,7 +245,7 @@ class StrongConvexBinaryCrossentropy(
|
|||
self.dtype = dtype
|
||||
self.C = c_arg # pylint: disable=invalid-name
|
||||
self.reg_lambda = tf.constant(reg_lambda, dtype=self.dtype)
|
||||
super(StrongConvexBinaryCrossentropy, self).__init__(
|
||||
super().__init__(
|
||||
reduction=reduction,
|
||||
name="strongconvexbinarycrossentropy",
|
||||
from_logits=from_logits,
|
||||
|
|
|
@ -28,7 +28,7 @@ class TestLoss(losses.Loss, StrongConvexMixin):
|
|||
"""Test loss function for testing BoltOn model."""
|
||||
|
||||
def __init__(self, reg_lambda, c_arg, radius_constant, name='test'):
|
||||
super(TestLoss, self).__init__(name=name)
|
||||
super().__init__(name=name)
|
||||
self.reg_lambda = reg_lambda
|
||||
self.C = c_arg # pylint: disable=invalid-name
|
||||
self.radius_constant = radius_constant
|
||||
|
@ -103,7 +103,7 @@ class TestOptimizer(OptimizerV2):
|
|||
"""Test optimizer used for testing BoltOn model."""
|
||||
|
||||
def __init__(self):
|
||||
super(TestOptimizer, self).__init__('test')
|
||||
super().__init__('test')
|
||||
|
||||
def compute_gradients(self):
|
||||
return 0
|
||||
|
|
|
@ -51,7 +51,7 @@ class TestModel(Model): # pylint: disable=abstract-method
|
|||
input_shape:
|
||||
init_value:
|
||||
"""
|
||||
super(TestModel, self).__init__(name='bolton', dynamic=False)
|
||||
super().__init__(name='bolton', dynamic=False)
|
||||
self.n_outputs = n_outputs
|
||||
self.layer_input_shape = input_shape
|
||||
self.output_layer = tf.keras.layers.Dense(
|
||||
|
@ -142,7 +142,7 @@ class TestOptimizer(OptimizerV2):
|
|||
"""Optimizer used for testing the BoltOn optimizer."""
|
||||
|
||||
def __init__(self):
|
||||
super(TestOptimizer, self).__init__('test')
|
||||
super().__init__('test')
|
||||
self.not_private = 'test'
|
||||
self.iterations = tf.constant(1, dtype=tf.float32)
|
||||
self._iterations = tf.constant(1, dtype=tf.float32)
|
||||
|
|
|
@ -138,4 +138,4 @@ class NestedSumQuery(NestedQuery, dp_query.SumAggregationDPQuery):
|
|||
|
||||
tree.map_structure(check, queries)
|
||||
|
||||
super(NestedSumQuery, self).__init__(queries)
|
||||
super().__init__(queries)
|
||||
|
|
|
@ -45,8 +45,7 @@ class NoPrivacyAverageQuery(dp_query.SumAggregationDPQuery):
|
|||
|
||||
def initial_sample_state(self, template):
|
||||
"""Implements `tensorflow_privacy.DPQuery.initial_sample_state`."""
|
||||
return (super(NoPrivacyAverageQuery,
|
||||
self).initial_sample_state(template), tf.constant(0.0))
|
||||
return super().initial_sample_state(template), tf.constant(0.0)
|
||||
|
||||
def preprocess_record(self, params, record, weight=1):
|
||||
"""Implements `tensorflow_privacy.DPQuery.preprocess_record`.
|
||||
|
|
|
@ -198,7 +198,7 @@ class NoPrivacyQuantileEstimatorQuery(QuantileEstimatorQuery):
|
|||
updating is preferred for non-negative records like vector norms that
|
||||
could potentially be very large or very close to zero.
|
||||
"""
|
||||
super(NoPrivacyQuantileEstimatorQuery, self).__init__(
|
||||
super().__init__(
|
||||
initial_estimate,
|
||||
target_quantile,
|
||||
learning_rate,
|
||||
|
|
|
@ -32,7 +32,7 @@ class DPBinaryClassHead(tf.estimator.BinaryClassHead):
|
|||
loss_reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
|
||||
loss_fn=None,
|
||||
name=None):
|
||||
super(DPBinaryClassHead, self).__init__(
|
||||
super().__init__(
|
||||
weight_column=weight_column,
|
||||
thresholds=thresholds,
|
||||
label_vocabulary=label_vocabulary,
|
||||
|
|
|
@ -61,7 +61,7 @@ class DNNClassifier(tf.estimator.Estimator):
|
|||
config=config,
|
||||
batch_norm=batch_norm)
|
||||
|
||||
super(DNNClassifier, self).__init__(
|
||||
super().__init__(
|
||||
model_fn=_model_fn,
|
||||
model_dir=model_dir,
|
||||
config=config,
|
||||
|
|
|
@ -32,7 +32,7 @@ class DPMultiClassHead(tf.estimator.MultiClassHead):
|
|||
loss_reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
|
||||
loss_fn=None,
|
||||
name=None):
|
||||
super(DPMultiClassHead, self).__init__(
|
||||
super().__init__(
|
||||
n_classes=n_classes,
|
||||
weight_column=weight_column,
|
||||
label_vocabulary=label_vocabulary,
|
||||
|
|
|
@ -36,7 +36,7 @@ class DPMultiLabelHead(tf.estimator.MultiLabelHead):
|
|||
name=None):
|
||||
if loss_reduction == tf.keras.losses.Reduction.NONE:
|
||||
loss_reduction = tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE
|
||||
super(DPMultiLabelHead, self).__init__(
|
||||
super().__init__(
|
||||
n_classes=n_classes,
|
||||
weight_column=weight_column,
|
||||
thresholds=thresholds,
|
||||
|
|
|
@ -62,7 +62,7 @@ class DNNClassifier(tf.estimator.Estimator):
|
|||
config=config,
|
||||
batch_norm=batch_norm)
|
||||
|
||||
super(DNNClassifier, self).__init__(
|
||||
super().__init__(
|
||||
model_fn=_model_fn,
|
||||
model_dir=model_dir,
|
||||
config=config,
|
||||
|
|
|
@ -76,7 +76,7 @@ def make_dp_model_class(cls):
|
|||
**kwargs: These will be passed on to the base class `__init__`
|
||||
method.
|
||||
"""
|
||||
super(DPModelClass, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
self._l2_norm_clip = l2_norm_clip
|
||||
self._noise_multiplier = noise_multiplier
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ def make_optimizer_class(cls):
|
|||
*args: These will be passed on to the base class `__init__` method.
|
||||
**kwargs: These will be passed on to the base class `__init__` method.
|
||||
"""
|
||||
super(DPOptimizerClass, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
self._dp_sum_query = dp_sum_query
|
||||
self._num_microbatches = num_microbatches
|
||||
self._global_state = None
|
||||
|
|
|
@ -24,7 +24,7 @@ class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
|
|||
|
||||
def setUp(self):
|
||||
tf.enable_eager_execution()
|
||||
super(DPOptimizerEagerTest, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
def _loss_fn(self, val0, val1):
|
||||
return 0.5 * tf.reduce_sum(
|
||||
|
|
|
@ -151,7 +151,7 @@ def make_keras_optimizer_class(cls):
|
|||
*args: These will be passed on to the base class `__init__` method.
|
||||
**kwargs: These will be passed on to the base class `__init__` method.
|
||||
"""
|
||||
super(DPOptimizerClass, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
self.gradient_accumulation_steps = gradient_accumulation_steps
|
||||
self._l2_norm_clip = l2_norm_clip
|
||||
self._noise_multiplier = noise_multiplier
|
||||
|
@ -162,14 +162,13 @@ def make_keras_optimizer_class(cls):
|
|||
self._was_dp_gradients_called = False
|
||||
|
||||
def _create_slots(self, var_list):
|
||||
super(DPOptimizerClass, self)._create_slots(var_list)
|
||||
super()._create_slots(var_list)
|
||||
if self.gradient_accumulation_steps > 1:
|
||||
for var in var_list:
|
||||
self.add_slot(var, 'grad_acc')
|
||||
|
||||
def _prepare_local(self, var_device, var_dtype, apply_state):
|
||||
super(DPOptimizerClass, self)._prepare_local(
|
||||
var_device, var_dtype, apply_state)
|
||||
super()._prepare_local(var_device, var_dtype, apply_state)
|
||||
if self.gradient_accumulation_steps > 1:
|
||||
apply_update = tf.math.equal(
|
||||
tf.math.floormod(self.iterations + 1,
|
||||
|
|
|
@ -117,7 +117,7 @@ def make_vectorized_keras_optimizer_class(cls):
|
|||
*args: These will be passed on to the base class `__init__` method.
|
||||
**kwargs: These will be passed on to the base class `__init__` method.
|
||||
"""
|
||||
super(DPOptimizerClass, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
self._l2_norm_clip = l2_norm_clip
|
||||
self._noise_multiplier = noise_multiplier
|
||||
self._num_microbatches = num_microbatches
|
||||
|
@ -204,7 +204,7 @@ def make_vectorized_keras_optimizer_class(cls):
|
|||
def process_microbatch(microbatch_loss):
|
||||
"""Compute clipped grads for one microbatch."""
|
||||
mean_loss = tf.reduce_mean(input_tensor=microbatch_loss)
|
||||
grads = super(DPOptimizerClass, self).get_gradients(mean_loss, params)
|
||||
grads = cls.get_gradients(self, mean_loss, params)
|
||||
grads_list = [
|
||||
g if g is not None else tf.zeros_like(v)
|
||||
for (g, v) in zip(list(grads), params)
|
||||
|
|
|
@ -100,7 +100,7 @@ def make_vectorized_optimizer_class(cls):
|
|||
*args: These will be passed on to the base class `__init__` method.
|
||||
**kwargs: These will be passed on to the base class `__init__` method.
|
||||
"""
|
||||
super(DPOptimizerClass, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
self._l2_norm_clip = l2_norm_clip
|
||||
self._noise_multiplier = noise_multiplier
|
||||
self._num_microbatches = num_microbatches
|
||||
|
|
|
@ -404,7 +404,7 @@
|
|||
" **kwargs):\n",
|
||||
" if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n",
|
||||
" kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n",
|
||||
" super(Attention, self).__init__(**kwargs)\n",
|
||||
" super().__init__(**kwargs)\n",
|
||||
" self.units = units\n",
|
||||
" self.activation = activations.get(activation)\n",
|
||||
" self.use_bias = use_bias\n",
|
||||
|
@ -523,7 +523,7 @@
|
|||
" 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n",
|
||||
" 'bias_constraint': constraints.serialize(self.bias_constraint)\n",
|
||||
" }\n",
|
||||
" base_config = super(Attention, self).get_config()\n",
|
||||
" base_config = super().get_config()\n",
|
||||
" return dict(list(base_config.items()) + list(config.items()))"
|
||||
]
|
||||
},
|
||||
|
@ -553,7 +553,7 @@
|
|||
" **kwargs):\n",
|
||||
" if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n",
|
||||
" kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n",
|
||||
" super(DenseTransposeTied, self).__init__(**kwargs)\n",
|
||||
" super().__init__(**kwargs)\n",
|
||||
" self.units = units\n",
|
||||
" # We add these two properties to save the tied weights\n",
|
||||
" self.tied_to = tied_to\n",
|
||||
|
|
|
@ -244,7 +244,7 @@ class SingleMembershipProbabilityResultTest(absltest.TestCase):
|
|||
class AttackResultsCollectionTest(absltest.TestCase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(AttackResultsCollectionTest, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.some_attack_result = SingleAttackResult(
|
||||
slice_spec=SingleSliceSpec(None),
|
||||
|
|
|
@ -29,7 +29,7 @@ from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_s
|
|||
class PrivacyReportTest(absltest.TestCase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(PrivacyReportTest, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
# Classifier that achieves an AUC of 0.5.
|
||||
self.imperfect_classifier_result = SingleAttackResult(
|
||||
|
|
|
@ -144,7 +144,7 @@ bolt.fit_generator(generator,
|
|||
class TestModel(tf.keras.Model): # pylint: disable=abstract-method
|
||||
|
||||
def __init__(self, reg_layer, number_of_outputs=1):
|
||||
super(TestModel, self).__init__(name='test')
|
||||
super().__init__(name='test')
|
||||
self.output_layer = tf.keras.layers.Dense(number_of_outputs,
|
||||
kernel_regularizer=reg_layer)
|
||||
|
||||
|
|
Loading…
Reference in a new issue