Ensure that TF 1.0 API is referenced at the call site in TensorFlow Privacy.
This change makes it easy to search for usage of TF 1.0 API and updates the TF imports across TFP to be written consistently. PiperOrigin-RevId: 427043028
This commit is contained in:
parent
5dc3475e17
commit
28db674240
22 changed files with 122 additions and 128 deletions
|
@ -46,7 +46,7 @@ class TensorBuffer(object):
|
||||||
raise ValueError('Shape cannot be scalar.')
|
raise ValueError('Shape cannot be scalar.')
|
||||||
shape = [capacity] + shape
|
shape = [capacity] + shape
|
||||||
|
|
||||||
with tf.variable_scope(self._name):
|
with tf.compat.v1.variable_scope(self._name):
|
||||||
# We need to use a placeholder as the initial value to allow resizing.
|
# We need to use a placeholder as the initial value to allow resizing.
|
||||||
self._buffer = tf.Variable(
|
self._buffer = tf.Variable(
|
||||||
initial_value=tf.placeholder_with_default(
|
initial_value=tf.placeholder_with_default(
|
||||||
|
@ -78,19 +78,19 @@ class TensorBuffer(object):
|
||||||
padding = tf.zeros_like(self._buffer, self._buffer.dtype)
|
padding = tf.zeros_like(self._buffer, self._buffer.dtype)
|
||||||
new_buffer = tf.concat([self._buffer, padding], axis=0)
|
new_buffer = tf.concat([self._buffer, padding], axis=0)
|
||||||
if tf.executing_eagerly():
|
if tf.executing_eagerly():
|
||||||
with tf.variable_scope(self._name, reuse=True):
|
with tf.compat.v1.variable_scope(self._name, reuse=True):
|
||||||
self._buffer = tf.get_variable(
|
self._buffer = tf.get_variable(
|
||||||
name='buffer',
|
name='buffer',
|
||||||
dtype=self._dtype,
|
dtype=self._dtype,
|
||||||
initializer=new_buffer,
|
initializer=new_buffer,
|
||||||
trainable=False)
|
trainable=False)
|
||||||
return self._buffer, tf.assign(self._capacity,
|
return self._buffer, tf.compat.v1.assign(
|
||||||
tf.multiply(self._capacity, 2))
|
self._capacity, tf.multiply(self._capacity, 2))
|
||||||
else:
|
else:
|
||||||
return tf.assign(
|
return tf.compat.v1.assign(
|
||||||
self._buffer, new_buffer,
|
self._buffer, new_buffer,
|
||||||
validate_shape=False), tf.assign(self._capacity,
|
validate_shape=False), tf.compat.v1.assign(
|
||||||
tf.multiply(self._capacity, 2))
|
self._capacity, tf.multiply(self._capacity, 2))
|
||||||
|
|
||||||
update_buffer, update_capacity = tf.cond(
|
update_buffer, update_capacity = tf.cond(
|
||||||
pred=tf.equal(self._current_size, self._capacity),
|
pred=tf.equal(self._current_size, self._capacity),
|
||||||
|
@ -109,8 +109,8 @@ class TensorBuffer(object):
|
||||||
message='Appending value of inconsistent shape.')
|
message='Appending value of inconsistent shape.')
|
||||||
]):
|
]):
|
||||||
with tf.control_dependencies(
|
with tf.control_dependencies(
|
||||||
[tf.assign(self._buffer[self._current_size, :], value)]):
|
[tf.compat.v1.assign(self._buffer[self._current_size, :], value)]):
|
||||||
return tf.assign_add(self._current_size, 1)
|
return tf.compat.v1.assign_add(self._current_size, 1)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def values(self):
|
def values(self):
|
||||||
|
|
|
@ -12,11 +12,11 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import tensorflow.compat.v1 as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
from tensorflow_privacy.privacy.analysis import tensor_buffer
|
from tensorflow_privacy.privacy.analysis import tensor_buffer
|
||||||
|
|
||||||
tf.enable_eager_execution()
|
tf.compat.v1.enable_eager_execution()
|
||||||
|
|
||||||
|
|
||||||
class TensorBufferTest(tf.test.TestCase):
|
class TensorBufferTest(tf.test.TestCase):
|
||||||
|
|
|
@ -12,8 +12,7 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import tensorflow.compat.v1 as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
from tensorflow_privacy.privacy.analysis import tensor_buffer
|
from tensorflow_privacy.privacy.analysis import tensor_buffer
|
||||||
|
|
||||||
|
|
||||||
|
@ -33,7 +32,7 @@ class TensorBufferTest(tf.test.TestCase):
|
||||||
values = my_buffer.values
|
values = my_buffer.values
|
||||||
current_size = my_buffer.current_size
|
current_size = my_buffer.current_size
|
||||||
capacity = my_buffer.capacity
|
capacity = my_buffer.capacity
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
|
|
||||||
v, cs, cap = sess.run([values, current_size, capacity])
|
v, cs, cap = sess.run([values, current_size, capacity])
|
||||||
self.assertAllEqual(v, [value1, value2])
|
self.assertAllEqual(v, [value1, value2])
|
||||||
|
@ -55,7 +54,7 @@ class TensorBufferTest(tf.test.TestCase):
|
||||||
values = my_buffer.values
|
values = my_buffer.values
|
||||||
current_size = my_buffer.current_size
|
current_size = my_buffer.current_size
|
||||||
capacity = my_buffer.capacity
|
capacity = my_buffer.capacity
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
|
|
||||||
v, cs, cap = sess.run([values, current_size, capacity])
|
v, cs, cap = sess.run([values, current_size, capacity])
|
||||||
self.assertAllEqual(v, [value1, value2, value3])
|
self.assertAllEqual(v, [value1, value2, value3])
|
||||||
|
|
|
@ -14,8 +14,7 @@
|
||||||
|
|
||||||
from absl.testing import parameterized
|
from absl.testing import parameterized
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.compat.v1 as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
from tensorflow_privacy.privacy.dp_query import gaussian_query
|
from tensorflow_privacy.privacy.dp_query import gaussian_query
|
||||||
from tensorflow_privacy.privacy.dp_query import test_utils
|
from tensorflow_privacy.privacy.dp_query import test_utils
|
||||||
|
|
||||||
|
@ -50,13 +49,14 @@ class GaussianQueryTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
record2 = tf.constant([4.0, -3.0]) # Not clipped.
|
record2 = tf.constant([4.0, -3.0]) # Not clipped.
|
||||||
|
|
||||||
l2_norm_clip = tf.Variable(5.0)
|
l2_norm_clip = tf.Variable(5.0)
|
||||||
l2_norm_clip_placeholder = tf.placeholder(tf.float32)
|
l2_norm_clip_placeholder = tf.compat.v1.placeholder(tf.float32)
|
||||||
assign_l2_norm_clip = tf.assign(l2_norm_clip, l2_norm_clip_placeholder)
|
assign_l2_norm_clip = tf.compat.v1.assign(l2_norm_clip,
|
||||||
|
l2_norm_clip_placeholder)
|
||||||
query = gaussian_query.GaussianSumQuery(
|
query = gaussian_query.GaussianSumQuery(
|
||||||
l2_norm_clip=l2_norm_clip, stddev=0.0)
|
l2_norm_clip=l2_norm_clip, stddev=0.0)
|
||||||
query_result, _ = test_utils.run_query(query, [record1, record2])
|
query_result, _ = test_utils.run_query(query, [record1, record2])
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
result = sess.run(query_result)
|
result = sess.run(query_result)
|
||||||
expected = [1.0, 1.0]
|
expected = [1.0, 1.0]
|
||||||
self.assertAllClose(result, expected)
|
self.assertAllClose(result, expected)
|
||||||
|
|
|
@ -15,12 +15,12 @@
|
||||||
from absl.testing import parameterized
|
from absl.testing import parameterized
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.compat.v1 as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
from tensorflow_privacy.privacy.dp_query import quantile_adaptive_clip_sum_query
|
from tensorflow_privacy.privacy.dp_query import quantile_adaptive_clip_sum_query
|
||||||
from tensorflow_privacy.privacy.dp_query import test_utils
|
from tensorflow_privacy.privacy.dp_query import test_utils
|
||||||
|
|
||||||
tf.enable_eager_execution()
|
tf.compat.v1.enable_eager_execution()
|
||||||
|
|
||||||
|
|
||||||
class QuantileAdaptiveClipSumQueryTest(tf.test.TestCase,
|
class QuantileAdaptiveClipSumQueryTest(tf.test.TestCase,
|
||||||
|
@ -278,7 +278,7 @@ class QuantileAdaptiveClipSumQueryTest(tf.test.TestCase,
|
||||||
global_state = query.initial_global_state()
|
global_state = query.initial_global_state()
|
||||||
|
|
||||||
for t in range(50):
|
for t in range(50):
|
||||||
tf.assign(learning_rate, 1.0 / np.sqrt(t + 1))
|
tf.compat.v1.assign(learning_rate, 1.0 / np.sqrt(t + 1))
|
||||||
_, global_state = test_utils.run_query(query, records, global_state)
|
_, global_state = test_utils.run_query(query, records, global_state)
|
||||||
|
|
||||||
actual_clip = global_state.sum_state.l2_norm_clip
|
actual_clip = global_state.sum_state.l2_norm_clip
|
||||||
|
|
|
@ -14,11 +14,11 @@
|
||||||
|
|
||||||
from absl.testing import parameterized
|
from absl.testing import parameterized
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.compat.v1 as tf
|
import tensorflow as tf
|
||||||
from tensorflow_privacy.privacy.dp_query import quantile_estimator_query
|
from tensorflow_privacy.privacy.dp_query import quantile_estimator_query
|
||||||
from tensorflow_privacy.privacy.dp_query import test_utils
|
from tensorflow_privacy.privacy.dp_query import test_utils
|
||||||
|
|
||||||
tf.enable_eager_execution()
|
tf.compat.v1.enable_eager_execution()
|
||||||
|
|
||||||
|
|
||||||
def _make_quantile_estimator_query(initial_estimate,
|
def _make_quantile_estimator_query(initial_estimate,
|
||||||
|
@ -254,7 +254,7 @@ class QuantileEstimatorQueryTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
global_state = query.initial_global_state()
|
global_state = query.initial_global_state()
|
||||||
|
|
||||||
for t in range(50):
|
for t in range(50):
|
||||||
tf.assign(learning_rate, 1.0 / np.sqrt(t + 1))
|
tf.compat.v1.assign(learning_rate, 1.0 / np.sqrt(t + 1))
|
||||||
_, global_state = test_utils.run_query(query, records, global_state)
|
_, global_state = test_utils.run_query(query, records, global_state)
|
||||||
|
|
||||||
actual_estimate = global_state.current_estimate
|
actual_estimate = global_state.current_estimate
|
||||||
|
|
|
@ -14,9 +14,7 @@
|
||||||
"""Differentially private optimizers for TensorFlow."""
|
"""Differentially private optimizers for TensorFlow."""
|
||||||
|
|
||||||
from absl import logging
|
from absl import logging
|
||||||
|
import tensorflow as tf
|
||||||
import tensorflow.compat.v1 as tf
|
|
||||||
|
|
||||||
from tensorflow_privacy.privacy.dp_query import gaussian_query
|
from tensorflow_privacy.privacy.dp_query import gaussian_query
|
||||||
|
|
||||||
|
|
||||||
|
@ -30,12 +28,12 @@ def make_optimizer_class(cls):
|
||||||
Returns:
|
Returns:
|
||||||
A DP-SGD subclass of `cls`.
|
A DP-SGD subclass of `cls`.
|
||||||
"""
|
"""
|
||||||
parent_code = tf.train.Optimizer.compute_gradients.__code__
|
parent_code = tf.compat.v1.train.Optimizer.compute_gradients.__code__
|
||||||
|
|
||||||
has_compute_gradients = hasattr(cls, 'compute_gradients')
|
has_compute_gradients = hasattr(cls, 'compute_gradients')
|
||||||
if has_compute_gradients:
|
if has_compute_gradients:
|
||||||
child_code = cls.compute_gradients.__code__
|
child_code = cls.compute_gradients.__code__
|
||||||
GATE_OP = tf.train.Optimizer.GATE_OP # pylint: disable=invalid-name
|
GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP # pylint: disable=invalid-name
|
||||||
if has_compute_gradients and child_code is not parent_code:
|
if has_compute_gradients and child_code is not parent_code:
|
||||||
logging.warning(
|
logging.warning(
|
||||||
'WARNING: Calling make_optimizer_class() on class %s that overrides '
|
'WARNING: Calling make_optimizer_class() on class %s that overrides '
|
||||||
|
@ -220,8 +218,8 @@ def make_optimizer_class(cls):
|
||||||
|
|
||||||
if var_list is None:
|
if var_list is None:
|
||||||
var_list = (
|
var_list = (
|
||||||
tf.trainable_variables() +
|
tf.compat.v1.trainable_variables() + tf.compat.v1.get_collection(
|
||||||
tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
|
tf.compat.v1.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
|
||||||
|
|
||||||
sample_state = self._dp_sum_query.initial_sample_state(var_list)
|
sample_state = self._dp_sum_query.initial_sample_state(var_list)
|
||||||
|
|
||||||
|
@ -283,7 +281,7 @@ def make_gaussian_optimizer_class(cls):
|
||||||
class DPGaussianOptimizerClass(make_optimizer_class(cls)): # pylint: disable=empty-docstring
|
class DPGaussianOptimizerClass(make_optimizer_class(cls)): # pylint: disable=empty-docstring
|
||||||
__doc__ = ("""DP subclass of `{}`.
|
__doc__ = ("""DP subclass of `{}`.
|
||||||
|
|
||||||
You can use this as a differentially private replacement for
|
You can use this as a differentially private replacement for
|
||||||
`tf.compat.v1.train.{}`. This optimizer implements DP-SGD using
|
`tf.compat.v1.train.{}`. This optimizer implements DP-SGD using
|
||||||
the standard Gaussian mechanism.
|
the standard Gaussian mechanism.
|
||||||
|
|
||||||
|
@ -295,7 +293,7 @@ def make_gaussian_optimizer_class(cls):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# Create optimizer.
|
# Create optimizer.
|
||||||
opt = {}(l2_norm_clip=1.0, noise_multiplier=0.5, num_microbatches=1,
|
opt = {}(l2_norm_clip=1.0, noise_multiplier=0.5, num_microbatches=1,
|
||||||
<standard arguments>)
|
<standard arguments>)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -372,10 +370,10 @@ def make_gaussian_optimizer_class(cls):
|
||||||
return DPGaussianOptimizerClass
|
return DPGaussianOptimizerClass
|
||||||
|
|
||||||
|
|
||||||
AdagradOptimizer = tf.train.AdagradOptimizer
|
AdagradOptimizer = tf.compat.v1.train.AdagradOptimizer
|
||||||
AdamOptimizer = tf.train.AdamOptimizer
|
AdamOptimizer = tf.compat.v1.train.AdamOptimizer
|
||||||
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
||||||
RMSPropOptimizer = tf.train.RMSPropOptimizer
|
RMSPropOptimizer = tf.compat.v1.train.RMSPropOptimizer
|
||||||
|
|
||||||
DPAdagradOptimizer = make_optimizer_class(AdagradOptimizer)
|
DPAdagradOptimizer = make_optimizer_class(AdagradOptimizer)
|
||||||
DPAdamOptimizer = make_optimizer_class(AdamOptimizer)
|
DPAdamOptimizer = make_optimizer_class(AdamOptimizer)
|
||||||
|
|
|
@ -14,8 +14,7 @@
|
||||||
|
|
||||||
from absl.testing import parameterized
|
from absl.testing import parameterized
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.compat.v1 as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
from tensorflow_privacy.privacy.dp_query import gaussian_query
|
from tensorflow_privacy.privacy.dp_query import gaussian_query
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
|
|
||||||
|
@ -23,7 +22,7 @@ from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
|
class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
tf.enable_eager_execution()
|
tf.compat.v1.enable_eager_execution()
|
||||||
super().setUp()
|
super().setUp()
|
||||||
|
|
||||||
def _loss_fn(self, val0, val1):
|
def _loss_fn(self, val0, val1):
|
||||||
|
@ -53,7 +52,7 @@ class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
opt = cls(
|
opt = cls(
|
||||||
dp_sum_query, num_microbatches=num_microbatches, learning_rate=2.0)
|
dp_sum_query, num_microbatches=num_microbatches, learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -77,7 +76,7 @@ class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
|
|
||||||
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
|
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -100,7 +99,7 @@ class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
|
|
||||||
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
|
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0], self.evaluate(var0))
|
self.assertAllClose([0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ import unittest
|
||||||
|
|
||||||
from absl.testing import parameterized
|
from absl.testing import parameterized
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.compat.v1 as tf
|
import tensorflow as tf
|
||||||
from tensorflow_privacy.privacy.dp_query import gaussian_query
|
from tensorflow_privacy.privacy.dp_query import gaussian_query
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
opt = cls(
|
opt = cls(
|
||||||
dp_sum_query, num_microbatches=num_microbatches, learning_rate=2.0)
|
dp_sum_query, num_microbatches=num_microbatches, learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -101,7 +101,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
|
|
||||||
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
|
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -127,7 +127,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
opt = cls(
|
opt = cls(
|
||||||
dp_sum_query, num_microbatches=num_microbatches, learning_rate=2.0)
|
dp_sum_query, num_microbatches=num_microbatches, learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
var_np = self.evaluate(var0)
|
var_np = self.evaluate(var0)
|
||||||
self.assertAllClose([0.0, 0.0], var_np)
|
self.assertAllClose([0.0, 0.0], var_np)
|
||||||
|
@ -162,7 +162,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
opt = cls(
|
opt = cls(
|
||||||
dp_sum_query, num_microbatches=num_microbatches, learning_rate=2.0)
|
dp_sum_query, num_microbatches=num_microbatches, learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0], self.evaluate(var0))
|
self.assertAllClose([0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -178,7 +178,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
@unittest.mock.patch('absl.logging.warning')
|
@unittest.mock.patch('absl.logging.warning')
|
||||||
def testComputeGradientsOverrideWarning(self, mock_logging):
|
def testComputeGradientsOverrideWarning(self, mock_logging):
|
||||||
|
|
||||||
class SimpleOptimizer(tf.train.Optimizer):
|
class SimpleOptimizer(tf.compat.v1.train.Optimizer):
|
||||||
|
|
||||||
def compute_gradients(self):
|
def compute_gradients(self):
|
||||||
return 0
|
return 0
|
||||||
|
@ -202,7 +202,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
dp_sum_query = gaussian_query.GaussianSumQuery(1.0, 0.0)
|
dp_sum_query = gaussian_query.GaussianSumQuery(1.0, 0.0)
|
||||||
optimizer = dp_optimizer.DPGradientDescentOptimizer(
|
optimizer = dp_optimizer.DPGradientDescentOptimizer(
|
||||||
dp_sum_query, num_microbatches=1, learning_rate=1.0)
|
dp_sum_query, num_microbatches=1, learning_rate=1.0)
|
||||||
global_step = tf.train.get_global_step()
|
global_step = tf.compat.v1.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=vector_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=vector_loss, global_step=global_step)
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf.estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
@ -216,7 +216,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
true_weights) + true_bias + np.random.normal(
|
true_weights) + true_bias + np.random.normal(
|
||||||
scale=0.1, size=(200, 1)).astype(np.float32)
|
scale=0.1, size=(200, 1)).astype(np.float32)
|
||||||
|
|
||||||
train_input_fn = tf.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data},
|
x={'x': train_data},
|
||||||
y=train_labels,
|
y=train_labels,
|
||||||
batch_size=20,
|
batch_size=20,
|
||||||
|
@ -248,7 +248,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
learning_rate=2.0,
|
learning_rate=2.0,
|
||||||
unroll_microbatches=True)
|
unroll_microbatches=True)
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -274,7 +274,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
num_microbatches=1,
|
num_microbatches=1,
|
||||||
learning_rate=2.0)
|
learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0], self.evaluate(var0))
|
self.assertAllClose([0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -327,7 +327,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
opt = cls(
|
opt = cls(
|
||||||
dp_sum_query, num_microbatches=num_microbatches, learning_rate=2.0)
|
dp_sum_query, num_microbatches=num_microbatches, learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
|
|
@ -14,14 +14,13 @@
|
||||||
"""Vectorized differentially private optimizers for TensorFlow."""
|
"""Vectorized differentially private optimizers for TensorFlow."""
|
||||||
|
|
||||||
from absl import logging
|
from absl import logging
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
import tensorflow.compat.v1 as tf
|
AdagradOptimizer = tf.compat.v1.train.AdagradOptimizer
|
||||||
|
AdamOptimizer = tf.compat.v1.train.AdamOptimizer
|
||||||
AdagradOptimizer = tf.train.AdagradOptimizer
|
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
||||||
AdamOptimizer = tf.train.AdamOptimizer
|
parent_code = tf.compat.v1.train.Optimizer.compute_gradients.__code__
|
||||||
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP # pylint: disable=invalid-name
|
||||||
parent_code = tf.train.Optimizer.compute_gradients.__code__
|
|
||||||
GATE_OP = tf.train.Optimizer.GATE_OP # pylint: disable=invalid-name
|
|
||||||
|
|
||||||
|
|
||||||
def make_vectorized_optimizer_class(cls):
|
def make_vectorized_optimizer_class(cls):
|
||||||
|
@ -134,8 +133,8 @@ def make_vectorized_optimizer_class(cls):
|
||||||
|
|
||||||
if var_list is None:
|
if var_list is None:
|
||||||
var_list = (
|
var_list = (
|
||||||
tf.trainable_variables() +
|
tf.compat.v1.trainable_variables() + tf.compat.v1.get_collection(
|
||||||
tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
|
tf.compat.v1.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
|
||||||
|
|
||||||
def process_microbatch(microbatch_loss):
|
def process_microbatch(microbatch_loss):
|
||||||
"""Compute clipped grads for one microbatch."""
|
"""Compute clipped grads for one microbatch."""
|
||||||
|
|
|
@ -16,7 +16,7 @@ import unittest
|
||||||
|
|
||||||
from absl.testing import parameterized
|
from absl.testing import parameterized
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.compat.v1 as tf
|
import tensorflow as tf
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer_vectorized
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer_vectorized
|
||||||
from tensorflow_privacy.privacy.optimizers.dp_optimizer_vectorized import VectorizedDPAdagrad
|
from tensorflow_privacy.privacy.optimizers.dp_optimizer_vectorized import VectorizedDPAdagrad
|
||||||
from tensorflow_privacy.privacy.optimizers.dp_optimizer_vectorized import VectorizedDPAdam
|
from tensorflow_privacy.privacy.optimizers.dp_optimizer_vectorized import VectorizedDPAdam
|
||||||
|
@ -52,7 +52,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
num_microbatches=num_microbatches,
|
num_microbatches=num_microbatches,
|
||||||
learning_rate=2.0)
|
learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -76,7 +76,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
num_microbatches=1,
|
num_microbatches=1,
|
||||||
learning_rate=2.0)
|
learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -99,7 +99,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
num_microbatches=1,
|
num_microbatches=1,
|
||||||
learning_rate=2.0)
|
learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0], self.evaluate(var0))
|
self.assertAllClose([0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
@unittest.mock.patch('absl.logging.warning')
|
@unittest.mock.patch('absl.logging.warning')
|
||||||
def testComputeGradientsOverrideWarning(self, mock_logging):
|
def testComputeGradientsOverrideWarning(self, mock_logging):
|
||||||
|
|
||||||
class SimpleOptimizer(tf.train.Optimizer):
|
class SimpleOptimizer(tf.compat.v1.train.Optimizer):
|
||||||
|
|
||||||
def compute_gradients(self):
|
def compute_gradients(self):
|
||||||
return 0
|
return 0
|
||||||
|
@ -141,7 +141,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
noise_multiplier=0.,
|
noise_multiplier=0.,
|
||||||
num_microbatches=1,
|
num_microbatches=1,
|
||||||
learning_rate=1.0)
|
learning_rate=1.0)
|
||||||
global_step = tf.train.get_global_step()
|
global_step = tf.compat.v1.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=vector_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=vector_loss, global_step=global_step)
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf.estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
@ -155,7 +155,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
true_weights) + true_bias + np.random.normal(
|
true_weights) + true_bias + np.random.normal(
|
||||||
scale=0.1, size=(200, 1)).astype(np.float32)
|
scale=0.1, size=(200, 1)).astype(np.float32)
|
||||||
|
|
||||||
train_input_fn = tf.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data},
|
x={'x': train_data},
|
||||||
y=train_labels,
|
y=train_labels,
|
||||||
batch_size=20,
|
batch_size=20,
|
||||||
|
@ -181,7 +181,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
num_microbatches=1,
|
num_microbatches=1,
|
||||||
learning_rate=2.0)
|
learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.global_variables_initializer())
|
self.evaluate(tf.compat.v1.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0], self.evaluate(var0))
|
self.assertAllClose([0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,6 @@ from absl import flags
|
||||||
from absl import logging
|
from absl import logging
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType
|
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import get_flattened_attack_metrics
|
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import get_flattened_attack_metrics
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import SlicingSpec
|
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import SlicingSpec
|
||||||
|
@ -56,7 +55,7 @@ def small_cnn_fn(features, labels, mode):
|
||||||
if mode == tf.estimator.ModeKeys.TRAIN:
|
if mode == tf.estimator.ModeKeys.TRAIN:
|
||||||
optimizer = tf.train.MomentumOptimizer(
|
optimizer = tf.train.MomentumOptimizer(
|
||||||
learning_rate=FLAGS.learning_rate, momentum=0.9)
|
learning_rate=FLAGS.learning_rate, momentum=0.9)
|
||||||
global_step = tf.train.get_global_step()
|
global_step = tf.compat.v1.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=scalar_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=scalar_loss, global_step=global_step)
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf.estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
@ -104,7 +103,8 @@ def main(unused_argv):
|
||||||
# A function to construct input_fn given (data, label), to be used by the
|
# A function to construct input_fn given (data, label), to be used by the
|
||||||
# membership inference training hook.
|
# membership inference training hook.
|
||||||
def input_fn_constructor(x, y):
|
def input_fn_constructor(x, y):
|
||||||
return tf.estimator.inputs.numpy_input_fn(x={'x': x}, y=y, shuffle=False)
|
return tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
|
x={'x': x}, y=y, shuffle=False)
|
||||||
|
|
||||||
# Get hook for membership inference attack.
|
# Get hook for membership inference attack.
|
||||||
mia_hook = MembershipInferenceTrainingHook(
|
mia_hook = MembershipInferenceTrainingHook(
|
||||||
|
@ -118,13 +118,13 @@ def main(unused_argv):
|
||||||
tensorboard_merge_classifiers=FLAGS.tensorboard_merge_classifiers)
|
tensorboard_merge_classifiers=FLAGS.tensorboard_merge_classifiers)
|
||||||
|
|
||||||
# Create tf.Estimator input functions for the training and test data.
|
# Create tf.Estimator input functions for the training and test data.
|
||||||
train_input_fn = tf.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': x_train},
|
x={'x': x_train},
|
||||||
y=y_train,
|
y=y_train,
|
||||||
batch_size=FLAGS.batch_size,
|
batch_size=FLAGS.batch_size,
|
||||||
num_epochs=FLAGS.epochs,
|
num_epochs=FLAGS.epochs,
|
||||||
shuffle=True)
|
shuffle=True)
|
||||||
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': x_test}, y=y_test, num_epochs=1, shuffle=False)
|
x={'x': x_test}, y=y_test, num_epochs=1, shuffle=False)
|
||||||
|
|
||||||
# Training loop.
|
# Training loop.
|
||||||
|
|
|
@ -14,11 +14,9 @@
|
||||||
|
|
||||||
from absl.testing import absltest
|
from absl.testing import absltest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.compat.v1 as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import data_structures
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import tf_estimator_evaluation
|
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import tf_estimator_evaluation
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackResults
|
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType
|
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import get_flattened_attack_metrics
|
|
||||||
|
|
||||||
|
|
||||||
class UtilsTest(absltest.TestCase):
|
class UtilsTest(absltest.TestCase):
|
||||||
|
@ -52,12 +50,12 @@ class UtilsTest(absltest.TestCase):
|
||||||
|
|
||||||
# Define the classifier, input_fn for training and test data
|
# Define the classifier, input_fn for training and test data
|
||||||
self.classifier = tf.estimator.Estimator(model_fn=model_fn)
|
self.classifier = tf.estimator.Estimator(model_fn=model_fn)
|
||||||
self.input_fn_train = tf.estimator.inputs.numpy_input_fn(
|
self.input_fn_train = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': self.train_data},
|
x={'x': self.train_data},
|
||||||
y=self.train_labels,
|
y=self.train_labels,
|
||||||
num_epochs=1,
|
num_epochs=1,
|
||||||
shuffle=False)
|
shuffle=False)
|
||||||
self.input_fn_test = tf.estimator.inputs.numpy_input_fn(
|
self.input_fn_test = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': self.test_data},
|
x={'x': self.test_data},
|
||||||
y=self.test_labels,
|
y=self.test_labels,
|
||||||
num_epochs=1,
|
num_epochs=1,
|
||||||
|
@ -83,9 +81,9 @@ class UtilsTest(absltest.TestCase):
|
||||||
self.input_fn_test,
|
self.input_fn_test,
|
||||||
self.train_labels,
|
self.train_labels,
|
||||||
self.test_labels,
|
self.test_labels,
|
||||||
attack_types=[AttackType.THRESHOLD_ATTACK])
|
attack_types=[data_structures.AttackType.THRESHOLD_ATTACK])
|
||||||
self.assertIsInstance(results, AttackResults)
|
self.assertIsInstance(results, data_structures.AttackResults)
|
||||||
att_types, att_slices, att_metrics, att_values = get_flattened_attack_metrics(
|
att_types, att_slices, att_metrics, att_values = data_structures.get_flattened_attack_metrics(
|
||||||
results)
|
results)
|
||||||
self.assertLen(att_types, 2)
|
self.assertLen(att_types, 2)
|
||||||
self.assertLen(att_slices, 2)
|
self.assertLen(att_slices, 2)
|
||||||
|
@ -96,15 +94,16 @@ class UtilsTest(absltest.TestCase):
|
||||||
"""Test the attack on the final models."""
|
"""Test the attack on the final models."""
|
||||||
|
|
||||||
def input_fn_constructor(x, y):
|
def input_fn_constructor(x, y):
|
||||||
return tf.estimator.inputs.numpy_input_fn(x={'x': x}, y=y, shuffle=False)
|
return tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
|
x={'x': x}, y=y, shuffle=False)
|
||||||
|
|
||||||
results = tf_estimator_evaluation.run_attack_on_tf_estimator_model(
|
results = tf_estimator_evaluation.run_attack_on_tf_estimator_model(
|
||||||
self.classifier, (self.train_data, self.train_labels),
|
self.classifier, (self.train_data, self.train_labels),
|
||||||
(self.test_data, self.test_labels),
|
(self.test_data, self.test_labels),
|
||||||
input_fn_constructor,
|
input_fn_constructor,
|
||||||
attack_types=[AttackType.THRESHOLD_ATTACK])
|
attack_types=[data_structures.AttackType.THRESHOLD_ATTACK])
|
||||||
self.assertIsInstance(results, AttackResults)
|
self.assertIsInstance(results, data_structures.AttackResults)
|
||||||
att_types, att_slices, att_metrics, att_values = get_flattened_attack_metrics(
|
att_types, att_slices, att_metrics, att_values = data_structures.get_flattened_attack_metrics(
|
||||||
results)
|
results)
|
||||||
self.assertLen(att_types, 2)
|
self.assertLen(att_types, 2)
|
||||||
self.assertLen(att_slices, 2)
|
self.assertLen(att_slices, 2)
|
||||||
|
|
|
@ -13,10 +13,11 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
"""Tutorial for bolt_on module, the model and the optimizer."""
|
"""Tutorial for bolt_on module, the model and the optimizer."""
|
||||||
|
|
||||||
import tensorflow.compat.v1 as tf # pylint: disable=wrong-import-position
|
import tensorflow as tf
|
||||||
from tensorflow_privacy.privacy.bolt_on import losses # pylint: disable=wrong-import-position
|
from tensorflow_privacy.privacy.bolt_on import losses # pylint: disable=wrong-import-position
|
||||||
from tensorflow_privacy.privacy.bolt_on import models # pylint: disable=wrong-import-position
|
from tensorflow_privacy.privacy.bolt_on import models # pylint: disable=wrong-import-position
|
||||||
from tensorflow_privacy.privacy.bolt_on.optimizers import BoltOn # pylint: disable=wrong-import-position
|
from tensorflow_privacy.privacy.bolt_on.optimizers import BoltOn # pylint: disable=wrong-import-position
|
||||||
|
|
||||||
# -------
|
# -------
|
||||||
# First, we will create a binary classification dataset with a single output
|
# First, we will create a binary classification dataset with a single output
|
||||||
# dimension. The samples for each label are repeated data points at different
|
# dimension. The samples for each label are repeated data points at different
|
||||||
|
|
|
@ -93,9 +93,10 @@ def rnn_model_fn(features, labels, mode): # pylint: disable=unused-argument
|
||||||
unroll_microbatches=True)
|
unroll_microbatches=True)
|
||||||
opt_loss = vector_loss
|
opt_loss = vector_loss
|
||||||
else:
|
else:
|
||||||
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
|
optimizer = tf.compat.v1.train.AdamOptimizer(
|
||||||
|
learning_rate=FLAGS.learning_rate)
|
||||||
opt_loss = scalar_loss
|
opt_loss = scalar_loss
|
||||||
global_step = tf.train.get_global_step()
|
global_step = tf.compat.v1.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf.estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
@ -175,12 +176,12 @@ def main(unused_argv):
|
||||||
batch_len = FLAGS.batch_size * SEQ_LEN
|
batch_len = FLAGS.batch_size * SEQ_LEN
|
||||||
train_data_end = len(train_data) - len(train_data) % batch_len
|
train_data_end = len(train_data) - len(train_data) % batch_len
|
||||||
test_data_end = len(test_data) - len(test_data) % batch_len
|
test_data_end = len(test_data) - len(test_data) % batch_len
|
||||||
train_input_fn = tf.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data[:train_data_end]},
|
x={'x': train_data[:train_data_end]},
|
||||||
batch_size=batch_len,
|
batch_size=batch_len,
|
||||||
num_epochs=FLAGS.epochs,
|
num_epochs=FLAGS.epochs,
|
||||||
shuffle=False)
|
shuffle=False)
|
||||||
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': test_data[:test_data_end]},
|
x={'x': test_data[:test_data_end]},
|
||||||
batch_size=batch_len,
|
batch_size=batch_len,
|
||||||
num_epochs=1,
|
num_epochs=1,
|
||||||
|
|
|
@ -18,8 +18,7 @@ import time
|
||||||
from absl import app
|
from absl import app
|
||||||
from absl import flags
|
from absl import flags
|
||||||
from absl import logging
|
from absl import logging
|
||||||
import tensorflow.compat.v1 as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
from tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy_lib
|
from tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy_lib
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
import mnist_dpsgd_tutorial_common as common
|
import mnist_dpsgd_tutorial_common as common
|
||||||
|
@ -58,8 +57,8 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu
|
||||||
if FLAGS.dpsgd:
|
if FLAGS.dpsgd:
|
||||||
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
||||||
# available in dp_optimizer. Most optimizers inheriting from
|
# available in dp_optimizer. Most optimizers inheriting from
|
||||||
# tf.train.Optimizer should be wrappable in differentially private
|
# tf.compat.v1.train.Optimizer should be wrappable in differentially
|
||||||
# counterparts by calling dp_optimizer.optimizer_from_args().
|
# private counterparts by calling dp_optimizer.optimizer_from_args().
|
||||||
optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
|
optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
|
||||||
l2_norm_clip=FLAGS.l2_norm_clip,
|
l2_norm_clip=FLAGS.l2_norm_clip,
|
||||||
noise_multiplier=FLAGS.noise_multiplier,
|
noise_multiplier=FLAGS.noise_multiplier,
|
||||||
|
@ -67,11 +66,11 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu
|
||||||
learning_rate=FLAGS.learning_rate)
|
learning_rate=FLAGS.learning_rate)
|
||||||
opt_loss = vector_loss
|
opt_loss = vector_loss
|
||||||
else:
|
else:
|
||||||
optimizer = tf.train.GradientDescentOptimizer(
|
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
|
||||||
learning_rate=FLAGS.learning_rate)
|
learning_rate=FLAGS.learning_rate)
|
||||||
opt_loss = scalar_loss
|
opt_loss = scalar_loss
|
||||||
|
|
||||||
global_step = tf.train.get_global_step()
|
global_step = tf.compat.v1.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
||||||
|
|
||||||
# In the following, we pass the mean of the loss (scalar_loss) rather than
|
# In the following, we pass the mean of the loss (scalar_loss) rather than
|
||||||
|
|
|
@ -16,14 +16,14 @@
|
||||||
from absl import app
|
from absl import app
|
||||||
from absl import flags
|
from absl import flags
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.compat.v1 as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
|
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
||||||
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
|
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
|
||||||
|
|
||||||
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
||||||
tf.enable_eager_execution()
|
tf.compat.v1.enable_eager_execution()
|
||||||
|
|
||||||
flags.DEFINE_boolean(
|
flags.DEFINE_boolean(
|
||||||
'dpsgd', True, 'If True, train with DP-SGD. If False, '
|
'dpsgd', True, 'If True, train with DP-SGD. If False, '
|
||||||
|
|
|
@ -19,7 +19,7 @@ import time
|
||||||
from absl import app
|
from absl import app
|
||||||
from absl import flags
|
from absl import flags
|
||||||
from absl import logging
|
from absl import logging
|
||||||
import tensorflow.compat.v1 as tf
|
import tensorflow as tf
|
||||||
from tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy_lib
|
from tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy_lib
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
import mnist_dpsgd_tutorial_common as common
|
import mnist_dpsgd_tutorial_common as common
|
||||||
|
@ -60,8 +60,8 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu
|
||||||
if FLAGS.dpsgd:
|
if FLAGS.dpsgd:
|
||||||
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
||||||
# available in dp_optimizer. Most optimizers inheriting from
|
# available in dp_optimizer. Most optimizers inheriting from
|
||||||
# tf.train.Optimizer should be wrappable in differentially private
|
# tf.compat.v1.train.Optimizer should be wrappable in differentially
|
||||||
# counterparts by calling dp_optimizer.optimizer_from_args().
|
# private counterparts by calling dp_optimizer.optimizer_from_args().
|
||||||
optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
|
optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
|
||||||
l2_norm_clip=FLAGS.l2_norm_clip,
|
l2_norm_clip=FLAGS.l2_norm_clip,
|
||||||
noise_multiplier=FLAGS.noise_multiplier,
|
noise_multiplier=FLAGS.noise_multiplier,
|
||||||
|
@ -69,7 +69,7 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu
|
||||||
learning_rate=FLAGS.learning_rate)
|
learning_rate=FLAGS.learning_rate)
|
||||||
opt_loss = vector_loss
|
opt_loss = vector_loss
|
||||||
else:
|
else:
|
||||||
optimizer = tf.train.GradientDescentOptimizer(
|
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
|
||||||
learning_rate=FLAGS.learning_rate)
|
learning_rate=FLAGS.learning_rate)
|
||||||
opt_loss = scalar_loss
|
opt_loss = scalar_loss
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu
|
||||||
# CrossShardOptimizer.
|
# CrossShardOptimizer.
|
||||||
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
|
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
|
||||||
|
|
||||||
global_step = tf.train.get_global_step()
|
global_step = tf.compat.v1.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
||||||
|
|
||||||
# In the following, we pass the mean of the loss (scalar_loss) rather than
|
# In the following, we pass the mean of the loss (scalar_loss) rather than
|
||||||
|
|
|
@ -40,7 +40,7 @@ FLAGS = flags.FLAGS
|
||||||
|
|
||||||
NUM_TRAIN_EXAMPLES = 60000
|
NUM_TRAIN_EXAMPLES = 60000
|
||||||
|
|
||||||
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
||||||
|
|
||||||
|
|
||||||
def compute_epsilon(steps):
|
def compute_epsilon(steps):
|
||||||
|
@ -85,8 +85,8 @@ def cnn_model_fn(features, labels, mode):
|
||||||
if FLAGS.dpsgd:
|
if FLAGS.dpsgd:
|
||||||
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
||||||
# available in dp_optimizer. Most optimizers inheriting from
|
# available in dp_optimizer. Most optimizers inheriting from
|
||||||
# tf.train.Optimizer should be wrappable in differentially private
|
# tf.compat.v1.train.Optimizer should be wrappable in differentially
|
||||||
# counterparts by calling dp_optimizer.optimizer_from_args().
|
# private counterparts by calling dp_optimizer.optimizer_from_args().
|
||||||
optimizer = dp_optimizer_vectorized.VectorizedDPSGD(
|
optimizer = dp_optimizer_vectorized.VectorizedDPSGD(
|
||||||
l2_norm_clip=FLAGS.l2_norm_clip,
|
l2_norm_clip=FLAGS.l2_norm_clip,
|
||||||
noise_multiplier=FLAGS.noise_multiplier,
|
noise_multiplier=FLAGS.noise_multiplier,
|
||||||
|
@ -96,7 +96,7 @@ def cnn_model_fn(features, labels, mode):
|
||||||
else:
|
else:
|
||||||
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
|
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
|
||||||
opt_loss = scalar_loss
|
opt_loss = scalar_loss
|
||||||
global_step = tf.train.get_global_step()
|
global_step = tf.compat.v1.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
||||||
# In the following, we pass the mean of the loss (scalar_loss) rather than
|
# In the following, we pass the mean of the loss (scalar_loss) rather than
|
||||||
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
||||||
|
@ -154,13 +154,13 @@ def main(unused_argv):
|
||||||
model_fn=cnn_model_fn, model_dir=FLAGS.model_dir)
|
model_fn=cnn_model_fn, model_dir=FLAGS.model_dir)
|
||||||
|
|
||||||
# Create tf.Estimator input functions for the training and test data.
|
# Create tf.Estimator input functions for the training and test data.
|
||||||
train_input_fn = tf.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data},
|
x={'x': train_data},
|
||||||
y=train_labels,
|
y=train_labels,
|
||||||
batch_size=FLAGS.batch_size,
|
batch_size=FLAGS.batch_size,
|
||||||
num_epochs=FLAGS.epochs,
|
num_epochs=FLAGS.epochs,
|
||||||
shuffle=True)
|
shuffle=True)
|
||||||
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
||||||
|
|
||||||
# Training loop.
|
# Training loop.
|
||||||
|
|
|
@ -27,12 +27,12 @@ from absl import app
|
||||||
from absl import flags
|
from absl import flags
|
||||||
from absl import logging
|
from absl import logging
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.compat.v1 as tf
|
import tensorflow as tf
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
|
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
|
|
||||||
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ def lr_model_fn(features, labels, mode, nclasses, dim):
|
||||||
else:
|
else:
|
||||||
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
|
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
|
||||||
opt_loss = scalar_loss
|
opt_loss = scalar_loss
|
||||||
global_step = tf.train.get_global_step()
|
global_step = tf.compat.v1.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
||||||
# In the following, we pass the mean of the loss (scalar_loss) rather than
|
# In the following, we pass the mean of the loss (scalar_loss) rather than
|
||||||
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
||||||
|
@ -205,13 +205,13 @@ def main(unused_argv):
|
||||||
# Create tf.Estimator input functions for the training and test data.
|
# Create tf.Estimator input functions for the training and test data.
|
||||||
# To analyze the per-user privacy loss, we keep the same orders of samples in
|
# To analyze the per-user privacy loss, we keep the same orders of samples in
|
||||||
# each epoch by setting shuffle=False.
|
# each epoch by setting shuffle=False.
|
||||||
train_input_fn = tf.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data},
|
x={'x': train_data},
|
||||||
y=train_labels,
|
y=train_labels,
|
||||||
batch_size=FLAGS.batch_size,
|
batch_size=FLAGS.batch_size,
|
||||||
num_epochs=FLAGS.epochs,
|
num_epochs=FLAGS.epochs,
|
||||||
shuffle=False)
|
shuffle=False)
|
||||||
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
||||||
|
|
||||||
# Train the model.
|
# Train the model.
|
||||||
|
|
|
@ -21,7 +21,6 @@ import pandas as pd
|
||||||
from scipy import stats
|
from scipy import stats
|
||||||
from sklearn.model_selection import train_test_split
|
from sklearn.model_selection import train_test_split
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_eps_poisson
|
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_eps_poisson
|
||||||
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_mu_poisson
|
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_mu_poisson
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
|
@ -92,8 +91,8 @@ def nn_model_fn(features, labels, mode):
|
||||||
if FLAGS.dpsgd:
|
if FLAGS.dpsgd:
|
||||||
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
||||||
# available in dp_optimizer. Most optimizers inheriting from
|
# available in dp_optimizer. Most optimizers inheriting from
|
||||||
# tf.train.Optimizer should be wrappable in differentially private
|
# tf.compat.v1.train.Optimizer should be wrappable in differentially
|
||||||
# counterparts by calling dp_optimizer.optimizer_from_args().
|
# private counterparts by calling dp_optimizer.optimizer_from_args().
|
||||||
optimizer = dp_optimizer.DPAdamGaussianOptimizer(
|
optimizer = dp_optimizer.DPAdamGaussianOptimizer(
|
||||||
l2_norm_clip=FLAGS.l2_norm_clip,
|
l2_norm_clip=FLAGS.l2_norm_clip,
|
||||||
noise_multiplier=FLAGS.noise_multiplier,
|
noise_multiplier=FLAGS.noise_multiplier,
|
||||||
|
|
|
@ -46,9 +46,9 @@ def cnn_model_fn(features, labels, mode):
|
||||||
|
|
||||||
# Configure the training op (for TRAIN mode).
|
# Configure the training op (for TRAIN mode).
|
||||||
if mode == tf.estimator.ModeKeys.TRAIN:
|
if mode == tf.estimator.ModeKeys.TRAIN:
|
||||||
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
|
optimizer = tf.compat.v1.train.GradientDescentOptimizer(FLAGS.learning_rate)
|
||||||
opt_loss = scalar_loss
|
opt_loss = scalar_loss
|
||||||
global_step = tf.train.get_global_step()
|
global_step = tf.compat.v1.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf.estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
@ -97,13 +97,13 @@ def main(unused_argv):
|
||||||
mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn)
|
mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn)
|
||||||
|
|
||||||
# Create tf.Estimator input functions for the training and test data.
|
# Create tf.Estimator input functions for the training and test data.
|
||||||
train_input_fn = tf.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data},
|
x={'x': train_data},
|
||||||
y=train_labels,
|
y=train_labels,
|
||||||
batch_size=FLAGS.batch_size,
|
batch_size=FLAGS.batch_size,
|
||||||
num_epochs=FLAGS.epochs,
|
num_epochs=FLAGS.epochs,
|
||||||
shuffle=True)
|
shuffle=True)
|
||||||
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
||||||
|
|
||||||
# Training loop.
|
# Training loop.
|
||||||
|
|
Loading…
Reference in a new issue