From 230193172584dd2062184e22ef10ac74584943f3 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 10 Mar 2020 14:16:25 -0700 Subject: [PATCH] Fix issue with importing tensorflow.compat.v1. PiperOrigin-RevId: 300175680 --- .../privacy/analysis/privacy_ledger.py | 8 +++---- .../privacy/analysis/privacy_ledger_test.py | 18 +++++++------- .../privacy/analysis/tensor_buffer.py | 24 +++++++++---------- .../analysis/tensor_buffer_test_eager.py | 2 +- .../analysis/tensor_buffer_test_graph.py | 4 ++-- .../privacy/dp_query/gaussian_query.py | 2 +- .../privacy/dp_query/gaussian_query_test.py | 7 +++--- .../quantile_adaptive_clip_sum_query_test.py | 12 +++++----- .../privacy/optimizers/dp_optimizer.py | 14 +++++------ .../optimizers/dp_optimizer_eager_test.py | 8 +++---- .../privacy/optimizers/dp_optimizer_test.py | 16 ++++++------- .../optimizers/dp_optimizer_vectorized.py | 14 +++++------ .../dp_optimizer_vectorized_test.py | 14 +++++------ tutorials/mnist_dpsgd_tutorial.py | 12 +++++----- tutorials/mnist_dpsgd_tutorial_eager.py | 4 ++-- tutorials/mnist_dpsgd_tutorial_keras.py | 4 ++-- tutorials/mnist_dpsgd_tutorial_vectorized.py | 12 +++++----- 17 files changed, 87 insertions(+), 88 deletions(-) diff --git a/tensorflow_privacy/privacy/analysis/privacy_ledger.py b/tensorflow_privacy/privacy/analysis/privacy_ledger.py index 65feab0..28755f2 100644 --- a/tensorflow_privacy/privacy/analysis/privacy_ledger.py +++ b/tensorflow_privacy/privacy/analysis/privacy_ledger.py @@ -111,7 +111,7 @@ class PrivacyLedger(object): def _do_record_query(): with tf.control_dependencies( - [tf.compat.v1.assign(self._query_count, self._query_count + 1)]): + [tf.assign(self._query_count, self._query_count + 1)]): return self._query_buffer.append( [self._sample_count, l2_norm_bound, noise_stddev]) @@ -120,14 +120,14 @@ class PrivacyLedger(object): def finalize_sample(self): """Finalizes sample and records sample ledger entry.""" with tf.control_dependencies([ - tf.compat.v1.assign(self._sample_var, [ + tf.assign(self._sample_var, [ self._population_size, self._selection_probability, self._query_count ]) ]): with tf.control_dependencies([ - tf.compat.v1.assign(self._sample_count, self._sample_count + 1), - tf.compat.v1.assign(self._query_count, 0) + tf.assign(self._sample_count, self._sample_count + 1), + tf.assign(self._query_count, 0) ]): return self._sample_buffer.append(self._sample_var) diff --git a/tensorflow_privacy/privacy/analysis/privacy_ledger_test.py b/tensorflow_privacy/privacy/analysis/privacy_ledger_test.py index 7165316..3a6b00f 100644 --- a/tensorflow_privacy/privacy/analysis/privacy_ledger_test.py +++ b/tensorflow_privacy/privacy/analysis/privacy_ledger_test.py @@ -25,7 +25,7 @@ from tensorflow_privacy.privacy.dp_query import gaussian_query from tensorflow_privacy.privacy.dp_query import nested_query from tensorflow_privacy.privacy.dp_query import test_utils -tf.compat.v1.enable_eager_execution() +tf.enable_eager_execution() class PrivacyLedgerTest(tf.test.TestCase): @@ -63,8 +63,8 @@ class PrivacyLedgerTest(tf.test.TestCase): query, population_size, selection_probability) # First sample. - tf.compat.v1.assign(population_size, 10) - tf.compat.v1.assign(selection_probability, 0.1) + tf.assign(population_size, 10) + tf.assign(selection_probability, 0.1) test_utils.run_query(query, [record1, record2]) expected_queries = [[10.0, 0.0]] @@ -75,8 +75,8 @@ class PrivacyLedgerTest(tf.test.TestCase): self.assertAllClose(sample_1.queries, expected_queries) # Second sample. - tf.compat.v1.assign(population_size, 20) - tf.compat.v1.assign(selection_probability, 0.2) + tf.assign(population_size, 20) + tf.assign(selection_probability, 0.2) test_utils.run_query(query, [record1, record2]) formatted = query.ledger.get_formatted_ledger_eager() @@ -106,8 +106,8 @@ class PrivacyLedgerTest(tf.test.TestCase): record2 = [5.0, [1.0, 2.0]] # First sample. - tf.compat.v1.assign(population_size, 10) - tf.compat.v1.assign(selection_probability, 0.1) + tf.assign(population_size, 10) + tf.assign(selection_probability, 0.1) test_utils.run_query(query, [record1, record2]) expected_queries = [[4.0, 2.0], [5.0, 1.0]] @@ -118,8 +118,8 @@ class PrivacyLedgerTest(tf.test.TestCase): self.assertAllClose(sorted(sample_1.queries), sorted(expected_queries)) # Second sample. - tf.compat.v1.assign(population_size, 20) - tf.compat.v1.assign(selection_probability, 0.2) + tf.assign(population_size, 20) + tf.assign(selection_probability, 0.2) test_utils.run_query(query, [record1, record2]) formatted = query.ledger.get_formatted_ledger_eager() diff --git a/tensorflow_privacy/privacy/analysis/tensor_buffer.py b/tensorflow_privacy/privacy/analysis/tensor_buffer.py index 039be8b..92f93bc 100644 --- a/tensorflow_privacy/privacy/analysis/tensor_buffer.py +++ b/tensorflow_privacy/privacy/analysis/tensor_buffer.py @@ -50,10 +50,10 @@ class TensorBuffer(object): raise ValueError('Shape cannot be scalar.') shape = [capacity] + shape - with tf.compat.v1.variable_scope(self._name): + with tf.variable_scope(self._name): # We need to use a placeholder as the initial value to allow resizing. - self._buffer = tf.compat.v1.Variable( - initial_value=tf.compat.v1.placeholder_with_default( + self._buffer = tf.Variable( + initial_value=tf.placeholder_with_default( tf.zeros(shape, dtype), shape=None), trainable=False, name='buffer', @@ -82,18 +82,18 @@ class TensorBuffer(object): padding = tf.zeros_like(self._buffer, self._buffer.dtype) new_buffer = tf.concat([self._buffer, padding], axis=0) if tf.executing_eagerly(): - with tf.compat.v1.variable_scope(self._name, reuse=True): - self._buffer = tf.compat.v1.get_variable( + with tf.variable_scope(self._name, reuse=True): + self._buffer = tf.get_variable( name='buffer', dtype=self._dtype, initializer=new_buffer, trainable=False) - return self._buffer, tf.compat.v1.assign( + return self._buffer, tf.assign( self._capacity, tf.multiply(self._capacity, 2)) else: - return tf.compat.v1.assign( + return tf.assign( self._buffer, new_buffer, - validate_shape=False), tf.compat.v1.assign( + validate_shape=False), tf.assign( self._capacity, tf.multiply(self._capacity, 2)) update_buffer, update_capacity = tf.cond( @@ -103,18 +103,18 @@ class TensorBuffer(object): with tf.control_dependencies([update_buffer, update_capacity]): with tf.control_dependencies([ - tf.compat.v1.assert_less( + tf.assert_less( self._current_size, self._capacity, message='Appending past end of TensorBuffer.'), - tf.compat.v1.assert_equal( + tf.assert_equal( tf.shape(input=value), tf.shape(input=self._buffer)[1:], message='Appending value of inconsistent shape.') ]): with tf.control_dependencies( - [tf.compat.v1.assign(self._buffer[self._current_size, :], value)]): - return tf.compat.v1.assign_add(self._current_size, 1) + [tf.assign(self._buffer[self._current_size, :], value)]): + return tf.assign_add(self._current_size, 1) @property def values(self): diff --git a/tensorflow_privacy/privacy/analysis/tensor_buffer_test_eager.py b/tensorflow_privacy/privacy/analysis/tensor_buffer_test_eager.py index fd22284..12fadf9 100644 --- a/tensorflow_privacy/privacy/analysis/tensor_buffer_test_eager.py +++ b/tensorflow_privacy/privacy/analysis/tensor_buffer_test_eager.py @@ -21,7 +21,7 @@ import tensorflow.compat.v1 as tf from tensorflow_privacy.privacy.analysis import tensor_buffer -tf.compat.v1.enable_eager_execution() +tf.enable_eager_execution() class TensorBufferTest(tf.test.TestCase): diff --git a/tensorflow_privacy/privacy/analysis/tensor_buffer_test_graph.py b/tensorflow_privacy/privacy/analysis/tensor_buffer_test_graph.py index d2cd340..65265f4 100644 --- a/tensorflow_privacy/privacy/analysis/tensor_buffer_test_graph.py +++ b/tensorflow_privacy/privacy/analysis/tensor_buffer_test_graph.py @@ -38,7 +38,7 @@ class TensorBufferTest(tf.test.TestCase): values = my_buffer.values current_size = my_buffer.current_size capacity = my_buffer.capacity - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) v, cs, cap = sess.run([values, current_size, capacity]) self.assertAllEqual(v, [value1, value2]) @@ -60,7 +60,7 @@ class TensorBufferTest(tf.test.TestCase): values = my_buffer.values current_size = my_buffer.current_size capacity = my_buffer.capacity - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) v, cs, cap = sess.run([values, current_size, capacity]) self.assertAllEqual(v, [value1, value2, value3]) diff --git a/tensorflow_privacy/privacy/dp_query/gaussian_query.py b/tensorflow_privacy/privacy/dp_query/gaussian_query.py index c119490..790015e 100644 --- a/tensorflow_privacy/privacy/dp_query/gaussian_query.py +++ b/tensorflow_privacy/privacy/dp_query/gaussian_query.py @@ -96,7 +96,7 @@ class GaussianSumQuery(dp_query.SumAggregationDPQuery): return v + tf.random.normal( tf.shape(input=v), stddev=global_state.stddev) else: - random_normal = tf.compat.v1.random_normal_initializer( + random_normal = tf.random_normal_initializer( stddev=global_state.stddev) def add_noise(v): diff --git a/tensorflow_privacy/privacy/dp_query/gaussian_query_test.py b/tensorflow_privacy/privacy/dp_query/gaussian_query_test.py index d330946..0e8e3db 100644 --- a/tensorflow_privacy/privacy/dp_query/gaussian_query_test.py +++ b/tensorflow_privacy/privacy/dp_query/gaussian_query_test.py @@ -59,14 +59,13 @@ class GaussianQueryTest(tf.test.TestCase, parameterized.TestCase): record2 = tf.constant([4.0, -3.0]) # Not clipped. l2_norm_clip = tf.Variable(5.0) - l2_norm_clip_placeholder = tf.compat.v1.placeholder(tf.float32) - assign_l2_norm_clip = tf.compat.v1.assign(l2_norm_clip, - l2_norm_clip_placeholder) + l2_norm_clip_placeholder = tf.placeholder(tf.float32) + assign_l2_norm_clip = tf.assign(l2_norm_clip, l2_norm_clip_placeholder) query = gaussian_query.GaussianSumQuery( l2_norm_clip=l2_norm_clip, stddev=0.0) query_result, _ = test_utils.run_query(query, [record1, record2]) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) result = sess.run(query_result) expected = [1.0, 1.0] self.assertAllClose(result, expected) diff --git a/tensorflow_privacy/privacy/dp_query/quantile_adaptive_clip_sum_query_test.py b/tensorflow_privacy/privacy/dp_query/quantile_adaptive_clip_sum_query_test.py index fef4d88..ebab03a 100644 --- a/tensorflow_privacy/privacy/dp_query/quantile_adaptive_clip_sum_query_test.py +++ b/tensorflow_privacy/privacy/dp_query/quantile_adaptive_clip_sum_query_test.py @@ -27,7 +27,7 @@ from tensorflow_privacy.privacy.analysis import privacy_ledger from tensorflow_privacy.privacy.dp_query import quantile_adaptive_clip_sum_query from tensorflow_privacy.privacy.dp_query import test_utils -tf.compat.v1.enable_eager_execution() +tf.enable_eager_execution() class QuantileAdaptiveClipSumQueryTest( @@ -323,7 +323,7 @@ class QuantileAdaptiveClipSumQueryTest( global_state = query.initial_global_state() for t in range(50): - tf.compat.v1.assign(learning_rate, 1.0 / np.sqrt(t + 1)) + tf.assign(learning_rate, 1.0 / np.sqrt(t + 1)) _, global_state = test_utils.run_query(query, records, global_state) actual_clip = global_state.sum_state.l2_norm_clip @@ -350,8 +350,8 @@ class QuantileAdaptiveClipSumQueryTest( query, population_size, selection_probability) # First sample. - tf.compat.v1.assign(population_size, 10) - tf.compat.v1.assign(selection_probability, 0.1) + tf.assign(population_size, 10) + tf.assign(selection_probability, 0.1) _, global_state = test_utils.run_query(query, [record1, record2]) expected_queries = [[10.0, 10.0], [0.5, 0.0]] @@ -362,8 +362,8 @@ class QuantileAdaptiveClipSumQueryTest( self.assertAllClose(sample_1.queries, expected_queries) # Second sample. - tf.compat.v1.assign(population_size, 20) - tf.compat.v1.assign(selection_probability, 0.2) + tf.assign(population_size, 20) + tf.assign(selection_probability, 0.2) test_utils.run_query(query, [record1, record2], global_state) formatted = query.ledger.get_formatted_ledger_eager() diff --git a/tensorflow_privacy/privacy/optimizers/dp_optimizer.py b/tensorflow_privacy/privacy/optimizers/dp_optimizer.py index 62641b4..e05839a 100644 --- a/tensorflow_privacy/privacy/optimizers/dp_optimizer.py +++ b/tensorflow_privacy/privacy/optimizers/dp_optimizer.py @@ -27,9 +27,9 @@ from tensorflow_privacy.privacy.dp_query import gaussian_query def make_optimizer_class(cls): """Constructs a DP optimizer class from an existing one.""" - parent_code = tf.compat.v1.train.Optimizer.compute_gradients.__code__ + parent_code = tf.train.Optimizer.compute_gradients.__code__ child_code = cls.compute_gradients.__code__ - GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP # pylint: disable=invalid-name + GATE_OP = tf.train.Optimizer.GATE_OP # pylint: disable=invalid-name if child_code is not parent_code: logging.warning( 'WARNING: Calling make_optimizer_class() on class %s that overrides ' @@ -146,8 +146,8 @@ def make_optimizer_class(cls): if var_list is None: var_list = ( - tf.compat.v1.trainable_variables() + tf.compat.v1.get_collection( - tf.compat.v1.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)) + tf.trainable_variables() + tf.get_collection( + tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)) sample_state = self._dp_sum_query.initial_sample_state(var_list) @@ -213,9 +213,9 @@ def make_gaussian_optimizer_class(cls): return DPGaussianOptimizerClass -AdagradOptimizer = tf.compat.v1.train.AdagradOptimizer -AdamOptimizer = tf.compat.v1.train.AdamOptimizer -GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer +AdagradOptimizer = tf.train.AdagradOptimizer +AdamOptimizer = tf.train.AdamOptimizer +GradientDescentOptimizer = tf.train.GradientDescentOptimizer DPAdagradOptimizer = make_optimizer_class(AdagradOptimizer) DPAdamOptimizer = make_optimizer_class(AdamOptimizer) diff --git a/tensorflow_privacy/privacy/optimizers/dp_optimizer_eager_test.py b/tensorflow_privacy/privacy/optimizers/dp_optimizer_eager_test.py index 489aebe..f64e6e3 100644 --- a/tensorflow_privacy/privacy/optimizers/dp_optimizer_eager_test.py +++ b/tensorflow_privacy/privacy/optimizers/dp_optimizer_eager_test.py @@ -30,7 +30,7 @@ from tensorflow_privacy.privacy.optimizers import dp_optimizer class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): - tf.compat.v1.enable_eager_execution() + tf.enable_eager_execution() super(DPOptimizerEagerTest, self).setUp() def _loss_fn(self, val0, val1): @@ -64,7 +64,7 @@ class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase): num_microbatches=num_microbatches, learning_rate=2.0) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) @@ -89,7 +89,7 @@ class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase): opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], self.evaluate(var0)) @@ -113,7 +113,7 @@ class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase): opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0], self.evaluate(var0)) diff --git a/tensorflow_privacy/privacy/optimizers/dp_optimizer_test.py b/tensorflow_privacy/privacy/optimizers/dp_optimizer_test.py index a3658f5..c9c214d 100644 --- a/tensorflow_privacy/privacy/optimizers/dp_optimizer_test.py +++ b/tensorflow_privacy/privacy/optimizers/dp_optimizer_test.py @@ -63,7 +63,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): num_microbatches=num_microbatches, learning_rate=2.0) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) @@ -87,7 +87,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], self.evaluate(var0)) @@ -110,7 +110,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0], self.evaluate(var0)) @@ -126,7 +126,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): @mock.patch('absl.logging.warning') def testComputeGradientsOverrideWarning(self, mock_logging): - class SimpleOptimizer(tf.compat.v1.train.Optimizer): + class SimpleOptimizer(tf.train.Optimizer): def compute_gradients(self): return 0 @@ -153,7 +153,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): dp_sum_query, num_microbatches=1, learning_rate=1.0) - global_step = tf.compat.v1.train.get_global_step() + global_step = tf.train.get_global_step() train_op = optimizer.minimize(loss=vector_loss, global_step=global_step) return tf.estimator.EstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) @@ -167,7 +167,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): true_weights) + true_bias + np.random.normal( scale=0.1, size=(200, 1)).astype(np.float32) - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + train_input_fn = tf.estimator.inputs.numpy_input_fn( x={'x': train_data}, y=train_labels, batch_size=20, @@ -200,7 +200,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): learning_rate=2.0, unroll_microbatches=True) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) @@ -225,7 +225,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): num_microbatches=1, learning_rate=2.0) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0], self.evaluate(var0)) diff --git a/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized.py b/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized.py index 2d3f7fd..6a2b257 100644 --- a/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized.py +++ b/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized.py @@ -21,11 +21,11 @@ from absl import logging import tensorflow.compat.v1 as tf -AdagradOptimizer = tf.compat.v1.train.AdagradOptimizer -AdamOptimizer = tf.compat.v1.train.AdamOptimizer -GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer -parent_code = tf.compat.v1.train.Optimizer.compute_gradients.__code__ -GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP # pylint: disable=invalid-name +AdagradOptimizer = tf.train.AdagradOptimizer +AdamOptimizer = tf.train.AdamOptimizer +GradientDescentOptimizer = tf.train.GradientDescentOptimizer +parent_code = tf.train.Optimizer.compute_gradients.__code__ +GATE_OP = tf.train.Optimizer.GATE_OP # pylint: disable=invalid-name def make_vectorized_optimizer_class(cls): @@ -90,8 +90,8 @@ def make_vectorized_optimizer_class(cls): if var_list is None: var_list = ( - tf.compat.v1.trainable_variables() + tf.compat.v1.get_collection( - tf.compat.v1.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)) + tf.trainable_variables() + tf.get_collection( + tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)) def process_microbatch(microbatch_loss): """Compute clipped grads for one microbatch.""" diff --git a/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized_test.py b/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized_test.py index d254817..fb56095 100644 --- a/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized_test.py +++ b/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized_test.py @@ -58,7 +58,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): num_microbatches=num_microbatches, learning_rate=2.0) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) @@ -82,7 +82,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): num_microbatches=1, learning_rate=2.0) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], self.evaluate(var0)) @@ -105,7 +105,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): num_microbatches=1, learning_rate=2.0) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0], self.evaluate(var0)) @@ -121,7 +121,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): @mock.patch('absl.logging.warning') def testComputeGradientsOverrideWarning(self, mock_logging): - class SimpleOptimizer(tf.compat.v1.train.Optimizer): + class SimpleOptimizer(tf.train.Optimizer): def compute_gradients(self): return 0 @@ -147,7 +147,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): noise_multiplier=0., num_microbatches=1, learning_rate=1.0) - global_step = tf.compat.v1.train.get_global_step() + global_step = tf.train.get_global_step() train_op = optimizer.minimize(loss=vector_loss, global_step=global_step) return tf.estimator.EstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) @@ -161,7 +161,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): true_weights) + true_bias + np.random.normal( scale=0.1, size=(200, 1)).astype(np.float32) - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + train_input_fn = tf.estimator.inputs.numpy_input_fn( x={'x': train_data}, y=train_labels, batch_size=20, @@ -188,7 +188,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): num_microbatches=1, learning_rate=2.0) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0], self.evaluate(var0)) diff --git a/tutorials/mnist_dpsgd_tutorial.py b/tutorials/mnist_dpsgd_tutorial.py index 7b96434..0e884a5 100644 --- a/tutorials/mnist_dpsgd_tutorial.py +++ b/tutorials/mnist_dpsgd_tutorial.py @@ -29,7 +29,7 @@ from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp_from_ from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent from tensorflow_privacy.privacy.optimizers import dp_optimizer -GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer +GradientDescentOptimizer = tf.train.GradientDescentOptimizer FLAGS = flags.FLAGS @@ -130,7 +130,7 @@ def cnn_model_fn(features, labels, mode): optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate) training_hooks = [] opt_loss = scalar_loss - global_step = tf.compat.v1.train.get_global_step() + global_step = tf.train.get_global_step() train_op = optimizer.minimize(loss=opt_loss, global_step=global_step) # In the following, we pass the mean of the loss (scalar_loss) rather than # the vector_loss because tf.estimator requires a scalar loss. This is only @@ -145,7 +145,7 @@ def cnn_model_fn(features, labels, mode): elif mode == tf.estimator.ModeKeys.EVAL: eval_metric_ops = { 'accuracy': - tf.compat.v1.metrics.accuracy( + tf.metrics.accuracy( labels=labels, predictions=tf.argmax(input=logits, axis=1)) } @@ -178,7 +178,7 @@ def load_mnist(): def main(unused_argv): - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + tf.logging.set_verbosity(tf.logging.INFO) if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0: raise ValueError('Number of microbatches should divide evenly batch_size') @@ -190,13 +190,13 @@ def main(unused_argv): model_dir=FLAGS.model_dir) # Create tf.Estimator input functions for the training and test data. - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + train_input_fn = tf.estimator.inputs.numpy_input_fn( x={'x': train_data}, y=train_labels, batch_size=FLAGS.batch_size, num_epochs=FLAGS.epochs, shuffle=True) - eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + eval_input_fn = tf.estimator.inputs.numpy_input_fn( x={'x': test_data}, y=test_labels, num_epochs=1, diff --git a/tutorials/mnist_dpsgd_tutorial_eager.py b/tutorials/mnist_dpsgd_tutorial_eager.py index 7857731..1c4060f 100644 --- a/tutorials/mnist_dpsgd_tutorial_eager.py +++ b/tutorials/mnist_dpsgd_tutorial_eager.py @@ -26,8 +26,8 @@ from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer -GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer -tf.compat.v1.enable_eager_execution() +GradientDescentOptimizer = tf.train.GradientDescentOptimizer +tf.enable_eager_execution() flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, ' 'train with vanilla SGD.') diff --git a/tutorials/mnist_dpsgd_tutorial_keras.py b/tutorials/mnist_dpsgd_tutorial_keras.py index 8726017..927cb07 100644 --- a/tutorials/mnist_dpsgd_tutorial_keras.py +++ b/tutorials/mnist_dpsgd_tutorial_keras.py @@ -28,7 +28,7 @@ from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer -GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer +GradientDescentOptimizer = tf.train.GradientDescentOptimizer flags.DEFINE_boolean( 'dpsgd', True, 'If True, train with DP-SGD. If False, ' @@ -121,7 +121,7 @@ def main(unused_argv): learning_rate=FLAGS.learning_rate) # Compute vector of per-example loss rather than its mean over a minibatch. loss = tf.keras.losses.CategoricalCrossentropy( - from_logits=True, reduction=tf.compat.v1.losses.Reduction.NONE) + from_logits=True, reduction=tf.losses.Reduction.NONE) else: optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate) loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True) diff --git a/tutorials/mnist_dpsgd_tutorial_vectorized.py b/tutorials/mnist_dpsgd_tutorial_vectorized.py index 8c1347f..a4bc78b 100644 --- a/tutorials/mnist_dpsgd_tutorial_vectorized.py +++ b/tutorials/mnist_dpsgd_tutorial_vectorized.py @@ -47,7 +47,7 @@ FLAGS = flags.FLAGS NUM_TRAIN_EXAMPLES = 60000 -GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer +GradientDescentOptimizer = tf.train.GradientDescentOptimizer def compute_epsilon(steps): @@ -106,7 +106,7 @@ def cnn_model_fn(features, labels, mode): else: optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate) opt_loss = scalar_loss - global_step = tf.compat.v1.train.get_global_step() + global_step = tf.compat.get_global_step() train_op = optimizer.minimize(loss=opt_loss, global_step=global_step) # In the following, we pass the mean of the loss (scalar_loss) rather than # the vector_loss because tf.estimator requires a scalar loss. This is only @@ -120,7 +120,7 @@ def cnn_model_fn(features, labels, mode): elif mode == tf.estimator.ModeKeys.EVAL: eval_metric_ops = { 'accuracy': - tf.compat.v1.metrics.accuracy( + tf.metrics.accuracy( labels=labels, predictions=tf.argmax(input=logits, axis=1)) } @@ -153,7 +153,7 @@ def load_mnist(): def main(unused_argv): - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + tf.logging.set_verbosity(tf.logging.INFO) if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0: raise ValueError('Number of microbatches should divide evenly batch_size') @@ -165,13 +165,13 @@ def main(unused_argv): model_dir=FLAGS.model_dir) # Create tf.Estimator input functions for the training and test data. - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + train_input_fn = tf.estimator.inputs.numpy_input_fn( x={'x': train_data}, y=train_labels, batch_size=FLAGS.batch_size, num_epochs=FLAGS.epochs, shuffle=True) - eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + eval_input_fn = tf.estimator.inputs.numpy_input_fn( x={'x': test_data}, y=test_labels, num_epochs=1,