forked from 626_privacy/tensorflow_privacy
Fix issue with importing tensorflow.compat.v1.
PiperOrigin-RevId: 300175680
This commit is contained in:
parent
6541960e79
commit
2301931725
17 changed files with 87 additions and 88 deletions
|
@ -111,7 +111,7 @@ class PrivacyLedger(object):
|
||||||
|
|
||||||
def _do_record_query():
|
def _do_record_query():
|
||||||
with tf.control_dependencies(
|
with tf.control_dependencies(
|
||||||
[tf.compat.v1.assign(self._query_count, self._query_count + 1)]):
|
[tf.assign(self._query_count, self._query_count + 1)]):
|
||||||
return self._query_buffer.append(
|
return self._query_buffer.append(
|
||||||
[self._sample_count, l2_norm_bound, noise_stddev])
|
[self._sample_count, l2_norm_bound, noise_stddev])
|
||||||
|
|
||||||
|
@ -120,14 +120,14 @@ class PrivacyLedger(object):
|
||||||
def finalize_sample(self):
|
def finalize_sample(self):
|
||||||
"""Finalizes sample and records sample ledger entry."""
|
"""Finalizes sample and records sample ledger entry."""
|
||||||
with tf.control_dependencies([
|
with tf.control_dependencies([
|
||||||
tf.compat.v1.assign(self._sample_var, [
|
tf.assign(self._sample_var, [
|
||||||
self._population_size, self._selection_probability,
|
self._population_size, self._selection_probability,
|
||||||
self._query_count
|
self._query_count
|
||||||
])
|
])
|
||||||
]):
|
]):
|
||||||
with tf.control_dependencies([
|
with tf.control_dependencies([
|
||||||
tf.compat.v1.assign(self._sample_count, self._sample_count + 1),
|
tf.assign(self._sample_count, self._sample_count + 1),
|
||||||
tf.compat.v1.assign(self._query_count, 0)
|
tf.assign(self._query_count, 0)
|
||||||
]):
|
]):
|
||||||
return self._sample_buffer.append(self._sample_var)
|
return self._sample_buffer.append(self._sample_var)
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ from tensorflow_privacy.privacy.dp_query import gaussian_query
|
||||||
from tensorflow_privacy.privacy.dp_query import nested_query
|
from tensorflow_privacy.privacy.dp_query import nested_query
|
||||||
from tensorflow_privacy.privacy.dp_query import test_utils
|
from tensorflow_privacy.privacy.dp_query import test_utils
|
||||||
|
|
||||||
tf.compat.v1.enable_eager_execution()
|
tf.enable_eager_execution()
|
||||||
|
|
||||||
|
|
||||||
class PrivacyLedgerTest(tf.test.TestCase):
|
class PrivacyLedgerTest(tf.test.TestCase):
|
||||||
|
@ -63,8 +63,8 @@ class PrivacyLedgerTest(tf.test.TestCase):
|
||||||
query, population_size, selection_probability)
|
query, population_size, selection_probability)
|
||||||
|
|
||||||
# First sample.
|
# First sample.
|
||||||
tf.compat.v1.assign(population_size, 10)
|
tf.assign(population_size, 10)
|
||||||
tf.compat.v1.assign(selection_probability, 0.1)
|
tf.assign(selection_probability, 0.1)
|
||||||
test_utils.run_query(query, [record1, record2])
|
test_utils.run_query(query, [record1, record2])
|
||||||
|
|
||||||
expected_queries = [[10.0, 0.0]]
|
expected_queries = [[10.0, 0.0]]
|
||||||
|
@ -75,8 +75,8 @@ class PrivacyLedgerTest(tf.test.TestCase):
|
||||||
self.assertAllClose(sample_1.queries, expected_queries)
|
self.assertAllClose(sample_1.queries, expected_queries)
|
||||||
|
|
||||||
# Second sample.
|
# Second sample.
|
||||||
tf.compat.v1.assign(population_size, 20)
|
tf.assign(population_size, 20)
|
||||||
tf.compat.v1.assign(selection_probability, 0.2)
|
tf.assign(selection_probability, 0.2)
|
||||||
test_utils.run_query(query, [record1, record2])
|
test_utils.run_query(query, [record1, record2])
|
||||||
|
|
||||||
formatted = query.ledger.get_formatted_ledger_eager()
|
formatted = query.ledger.get_formatted_ledger_eager()
|
||||||
|
@ -106,8 +106,8 @@ class PrivacyLedgerTest(tf.test.TestCase):
|
||||||
record2 = [5.0, [1.0, 2.0]]
|
record2 = [5.0, [1.0, 2.0]]
|
||||||
|
|
||||||
# First sample.
|
# First sample.
|
||||||
tf.compat.v1.assign(population_size, 10)
|
tf.assign(population_size, 10)
|
||||||
tf.compat.v1.assign(selection_probability, 0.1)
|
tf.assign(selection_probability, 0.1)
|
||||||
test_utils.run_query(query, [record1, record2])
|
test_utils.run_query(query, [record1, record2])
|
||||||
|
|
||||||
expected_queries = [[4.0, 2.0], [5.0, 1.0]]
|
expected_queries = [[4.0, 2.0], [5.0, 1.0]]
|
||||||
|
@ -118,8 +118,8 @@ class PrivacyLedgerTest(tf.test.TestCase):
|
||||||
self.assertAllClose(sorted(sample_1.queries), sorted(expected_queries))
|
self.assertAllClose(sorted(sample_1.queries), sorted(expected_queries))
|
||||||
|
|
||||||
# Second sample.
|
# Second sample.
|
||||||
tf.compat.v1.assign(population_size, 20)
|
tf.assign(population_size, 20)
|
||||||
tf.compat.v1.assign(selection_probability, 0.2)
|
tf.assign(selection_probability, 0.2)
|
||||||
test_utils.run_query(query, [record1, record2])
|
test_utils.run_query(query, [record1, record2])
|
||||||
|
|
||||||
formatted = query.ledger.get_formatted_ledger_eager()
|
formatted = query.ledger.get_formatted_ledger_eager()
|
||||||
|
|
|
@ -50,10 +50,10 @@ class TensorBuffer(object):
|
||||||
raise ValueError('Shape cannot be scalar.')
|
raise ValueError('Shape cannot be scalar.')
|
||||||
shape = [capacity] + shape
|
shape = [capacity] + shape
|
||||||
|
|
||||||
with tf.compat.v1.variable_scope(self._name):
|
with tf.variable_scope(self._name):
|
||||||
# We need to use a placeholder as the initial value to allow resizing.
|
# We need to use a placeholder as the initial value to allow resizing.
|
||||||
self._buffer = tf.compat.v1.Variable(
|
self._buffer = tf.Variable(
|
||||||
initial_value=tf.compat.v1.placeholder_with_default(
|
initial_value=tf.placeholder_with_default(
|
||||||
tf.zeros(shape, dtype), shape=None),
|
tf.zeros(shape, dtype), shape=None),
|
||||||
trainable=False,
|
trainable=False,
|
||||||
name='buffer',
|
name='buffer',
|
||||||
|
@ -82,18 +82,18 @@ class TensorBuffer(object):
|
||||||
padding = tf.zeros_like(self._buffer, self._buffer.dtype)
|
padding = tf.zeros_like(self._buffer, self._buffer.dtype)
|
||||||
new_buffer = tf.concat([self._buffer, padding], axis=0)
|
new_buffer = tf.concat([self._buffer, padding], axis=0)
|
||||||
if tf.executing_eagerly():
|
if tf.executing_eagerly():
|
||||||
with tf.compat.v1.variable_scope(self._name, reuse=True):
|
with tf.variable_scope(self._name, reuse=True):
|
||||||
self._buffer = tf.compat.v1.get_variable(
|
self._buffer = tf.get_variable(
|
||||||
name='buffer',
|
name='buffer',
|
||||||
dtype=self._dtype,
|
dtype=self._dtype,
|
||||||
initializer=new_buffer,
|
initializer=new_buffer,
|
||||||
trainable=False)
|
trainable=False)
|
||||||
return self._buffer, tf.compat.v1.assign(
|
return self._buffer, tf.assign(
|
||||||
self._capacity, tf.multiply(self._capacity, 2))
|
self._capacity, tf.multiply(self._capacity, 2))
|
||||||
else:
|
else:
|
||||||
return tf.compat.v1.assign(
|
return tf.assign(
|
||||||
self._buffer, new_buffer,
|
self._buffer, new_buffer,
|
||||||
validate_shape=False), tf.compat.v1.assign(
|
validate_shape=False), tf.assign(
|
||||||
self._capacity, tf.multiply(self._capacity, 2))
|
self._capacity, tf.multiply(self._capacity, 2))
|
||||||
|
|
||||||
update_buffer, update_capacity = tf.cond(
|
update_buffer, update_capacity = tf.cond(
|
||||||
|
@ -103,18 +103,18 @@ class TensorBuffer(object):
|
||||||
|
|
||||||
with tf.control_dependencies([update_buffer, update_capacity]):
|
with tf.control_dependencies([update_buffer, update_capacity]):
|
||||||
with tf.control_dependencies([
|
with tf.control_dependencies([
|
||||||
tf.compat.v1.assert_less(
|
tf.assert_less(
|
||||||
self._current_size,
|
self._current_size,
|
||||||
self._capacity,
|
self._capacity,
|
||||||
message='Appending past end of TensorBuffer.'),
|
message='Appending past end of TensorBuffer.'),
|
||||||
tf.compat.v1.assert_equal(
|
tf.assert_equal(
|
||||||
tf.shape(input=value),
|
tf.shape(input=value),
|
||||||
tf.shape(input=self._buffer)[1:],
|
tf.shape(input=self._buffer)[1:],
|
||||||
message='Appending value of inconsistent shape.')
|
message='Appending value of inconsistent shape.')
|
||||||
]):
|
]):
|
||||||
with tf.control_dependencies(
|
with tf.control_dependencies(
|
||||||
[tf.compat.v1.assign(self._buffer[self._current_size, :], value)]):
|
[tf.assign(self._buffer[self._current_size, :], value)]):
|
||||||
return tf.compat.v1.assign_add(self._current_size, 1)
|
return tf.assign_add(self._current_size, 1)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def values(self):
|
def values(self):
|
||||||
|
|
|
@ -21,7 +21,7 @@ import tensorflow.compat.v1 as tf
|
||||||
|
|
||||||
from tensorflow_privacy.privacy.analysis import tensor_buffer
|
from tensorflow_privacy.privacy.analysis import tensor_buffer
|
||||||
|
|
||||||
tf.compat.v1.enable_eager_execution()
|
tf.enable_eager_execution()
|
||||||
|
|
||||||
|
|
||||||
class TensorBufferTest(tf.test.TestCase):
|
class TensorBufferTest(tf.test.TestCase):
|
||||||
|
|
|
@ -38,7 +38,7 @@ class TensorBufferTest(tf.test.TestCase):
|
||||||
values = my_buffer.values
|
values = my_buffer.values
|
||||||
current_size = my_buffer.current_size
|
current_size = my_buffer.current_size
|
||||||
capacity = my_buffer.capacity
|
capacity = my_buffer.capacity
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
|
|
||||||
v, cs, cap = sess.run([values, current_size, capacity])
|
v, cs, cap = sess.run([values, current_size, capacity])
|
||||||
self.assertAllEqual(v, [value1, value2])
|
self.assertAllEqual(v, [value1, value2])
|
||||||
|
@ -60,7 +60,7 @@ class TensorBufferTest(tf.test.TestCase):
|
||||||
values = my_buffer.values
|
values = my_buffer.values
|
||||||
current_size = my_buffer.current_size
|
current_size = my_buffer.current_size
|
||||||
capacity = my_buffer.capacity
|
capacity = my_buffer.capacity
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
|
|
||||||
v, cs, cap = sess.run([values, current_size, capacity])
|
v, cs, cap = sess.run([values, current_size, capacity])
|
||||||
self.assertAllEqual(v, [value1, value2, value3])
|
self.assertAllEqual(v, [value1, value2, value3])
|
||||||
|
|
|
@ -96,7 +96,7 @@ class GaussianSumQuery(dp_query.SumAggregationDPQuery):
|
||||||
return v + tf.random.normal(
|
return v + tf.random.normal(
|
||||||
tf.shape(input=v), stddev=global_state.stddev)
|
tf.shape(input=v), stddev=global_state.stddev)
|
||||||
else:
|
else:
|
||||||
random_normal = tf.compat.v1.random_normal_initializer(
|
random_normal = tf.random_normal_initializer(
|
||||||
stddev=global_state.stddev)
|
stddev=global_state.stddev)
|
||||||
|
|
||||||
def add_noise(v):
|
def add_noise(v):
|
||||||
|
|
|
@ -59,14 +59,13 @@ class GaussianQueryTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
record2 = tf.constant([4.0, -3.0]) # Not clipped.
|
record2 = tf.constant([4.0, -3.0]) # Not clipped.
|
||||||
|
|
||||||
l2_norm_clip = tf.Variable(5.0)
|
l2_norm_clip = tf.Variable(5.0)
|
||||||
l2_norm_clip_placeholder = tf.compat.v1.placeholder(tf.float32)
|
l2_norm_clip_placeholder = tf.placeholder(tf.float32)
|
||||||
assign_l2_norm_clip = tf.compat.v1.assign(l2_norm_clip,
|
assign_l2_norm_clip = tf.assign(l2_norm_clip, l2_norm_clip_placeholder)
|
||||||
l2_norm_clip_placeholder)
|
|
||||||
query = gaussian_query.GaussianSumQuery(
|
query = gaussian_query.GaussianSumQuery(
|
||||||
l2_norm_clip=l2_norm_clip, stddev=0.0)
|
l2_norm_clip=l2_norm_clip, stddev=0.0)
|
||||||
query_result, _ = test_utils.run_query(query, [record1, record2])
|
query_result, _ = test_utils.run_query(query, [record1, record2])
|
||||||
|
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
result = sess.run(query_result)
|
result = sess.run(query_result)
|
||||||
expected = [1.0, 1.0]
|
expected = [1.0, 1.0]
|
||||||
self.assertAllClose(result, expected)
|
self.assertAllClose(result, expected)
|
||||||
|
|
|
@ -27,7 +27,7 @@ from tensorflow_privacy.privacy.analysis import privacy_ledger
|
||||||
from tensorflow_privacy.privacy.dp_query import quantile_adaptive_clip_sum_query
|
from tensorflow_privacy.privacy.dp_query import quantile_adaptive_clip_sum_query
|
||||||
from tensorflow_privacy.privacy.dp_query import test_utils
|
from tensorflow_privacy.privacy.dp_query import test_utils
|
||||||
|
|
||||||
tf.compat.v1.enable_eager_execution()
|
tf.enable_eager_execution()
|
||||||
|
|
||||||
|
|
||||||
class QuantileAdaptiveClipSumQueryTest(
|
class QuantileAdaptiveClipSumQueryTest(
|
||||||
|
@ -323,7 +323,7 @@ class QuantileAdaptiveClipSumQueryTest(
|
||||||
global_state = query.initial_global_state()
|
global_state = query.initial_global_state()
|
||||||
|
|
||||||
for t in range(50):
|
for t in range(50):
|
||||||
tf.compat.v1.assign(learning_rate, 1.0 / np.sqrt(t + 1))
|
tf.assign(learning_rate, 1.0 / np.sqrt(t + 1))
|
||||||
_, global_state = test_utils.run_query(query, records, global_state)
|
_, global_state = test_utils.run_query(query, records, global_state)
|
||||||
|
|
||||||
actual_clip = global_state.sum_state.l2_norm_clip
|
actual_clip = global_state.sum_state.l2_norm_clip
|
||||||
|
@ -350,8 +350,8 @@ class QuantileAdaptiveClipSumQueryTest(
|
||||||
query, population_size, selection_probability)
|
query, population_size, selection_probability)
|
||||||
|
|
||||||
# First sample.
|
# First sample.
|
||||||
tf.compat.v1.assign(population_size, 10)
|
tf.assign(population_size, 10)
|
||||||
tf.compat.v1.assign(selection_probability, 0.1)
|
tf.assign(selection_probability, 0.1)
|
||||||
_, global_state = test_utils.run_query(query, [record1, record2])
|
_, global_state = test_utils.run_query(query, [record1, record2])
|
||||||
|
|
||||||
expected_queries = [[10.0, 10.0], [0.5, 0.0]]
|
expected_queries = [[10.0, 10.0], [0.5, 0.0]]
|
||||||
|
@ -362,8 +362,8 @@ class QuantileAdaptiveClipSumQueryTest(
|
||||||
self.assertAllClose(sample_1.queries, expected_queries)
|
self.assertAllClose(sample_1.queries, expected_queries)
|
||||||
|
|
||||||
# Second sample.
|
# Second sample.
|
||||||
tf.compat.v1.assign(population_size, 20)
|
tf.assign(population_size, 20)
|
||||||
tf.compat.v1.assign(selection_probability, 0.2)
|
tf.assign(selection_probability, 0.2)
|
||||||
test_utils.run_query(query, [record1, record2], global_state)
|
test_utils.run_query(query, [record1, record2], global_state)
|
||||||
|
|
||||||
formatted = query.ledger.get_formatted_ledger_eager()
|
formatted = query.ledger.get_formatted_ledger_eager()
|
||||||
|
|
|
@ -27,9 +27,9 @@ from tensorflow_privacy.privacy.dp_query import gaussian_query
|
||||||
|
|
||||||
def make_optimizer_class(cls):
|
def make_optimizer_class(cls):
|
||||||
"""Constructs a DP optimizer class from an existing one."""
|
"""Constructs a DP optimizer class from an existing one."""
|
||||||
parent_code = tf.compat.v1.train.Optimizer.compute_gradients.__code__
|
parent_code = tf.train.Optimizer.compute_gradients.__code__
|
||||||
child_code = cls.compute_gradients.__code__
|
child_code = cls.compute_gradients.__code__
|
||||||
GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP # pylint: disable=invalid-name
|
GATE_OP = tf.train.Optimizer.GATE_OP # pylint: disable=invalid-name
|
||||||
if child_code is not parent_code:
|
if child_code is not parent_code:
|
||||||
logging.warning(
|
logging.warning(
|
||||||
'WARNING: Calling make_optimizer_class() on class %s that overrides '
|
'WARNING: Calling make_optimizer_class() on class %s that overrides '
|
||||||
|
@ -146,8 +146,8 @@ def make_optimizer_class(cls):
|
||||||
|
|
||||||
if var_list is None:
|
if var_list is None:
|
||||||
var_list = (
|
var_list = (
|
||||||
tf.compat.v1.trainable_variables() + tf.compat.v1.get_collection(
|
tf.trainable_variables() + tf.get_collection(
|
||||||
tf.compat.v1.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
|
tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
|
||||||
|
|
||||||
sample_state = self._dp_sum_query.initial_sample_state(var_list)
|
sample_state = self._dp_sum_query.initial_sample_state(var_list)
|
||||||
|
|
||||||
|
@ -213,9 +213,9 @@ def make_gaussian_optimizer_class(cls):
|
||||||
|
|
||||||
return DPGaussianOptimizerClass
|
return DPGaussianOptimizerClass
|
||||||
|
|
||||||
AdagradOptimizer = tf.compat.v1.train.AdagradOptimizer
|
AdagradOptimizer = tf.train.AdagradOptimizer
|
||||||
AdamOptimizer = tf.compat.v1.train.AdamOptimizer
|
AdamOptimizer = tf.train.AdamOptimizer
|
||||||
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||||
|
|
||||||
DPAdagradOptimizer = make_optimizer_class(AdagradOptimizer)
|
DPAdagradOptimizer = make_optimizer_class(AdagradOptimizer)
|
||||||
DPAdamOptimizer = make_optimizer_class(AdamOptimizer)
|
DPAdamOptimizer = make_optimizer_class(AdamOptimizer)
|
||||||
|
|
|
@ -30,7 +30,7 @@ from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
|
class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
tf.compat.v1.enable_eager_execution()
|
tf.enable_eager_execution()
|
||||||
super(DPOptimizerEagerTest, self).setUp()
|
super(DPOptimizerEagerTest, self).setUp()
|
||||||
|
|
||||||
def _loss_fn(self, val0, val1):
|
def _loss_fn(self, val0, val1):
|
||||||
|
@ -64,7 +64,7 @@ class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
num_microbatches=num_microbatches,
|
num_microbatches=num_microbatches,
|
||||||
learning_rate=2.0)
|
learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
|
|
||||||
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
|
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
|
|
||||||
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
|
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0], self.evaluate(var0))
|
self.assertAllClose([0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
num_microbatches=num_microbatches,
|
num_microbatches=num_microbatches,
|
||||||
learning_rate=2.0)
|
learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -87,7 +87,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
|
|
||||||
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
|
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
|
|
||||||
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
|
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0], self.evaluate(var0))
|
self.assertAllClose([0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -126,7 +126,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
@mock.patch('absl.logging.warning')
|
@mock.patch('absl.logging.warning')
|
||||||
def testComputeGradientsOverrideWarning(self, mock_logging):
|
def testComputeGradientsOverrideWarning(self, mock_logging):
|
||||||
|
|
||||||
class SimpleOptimizer(tf.compat.v1.train.Optimizer):
|
class SimpleOptimizer(tf.train.Optimizer):
|
||||||
|
|
||||||
def compute_gradients(self):
|
def compute_gradients(self):
|
||||||
return 0
|
return 0
|
||||||
|
@ -153,7 +153,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
dp_sum_query,
|
dp_sum_query,
|
||||||
num_microbatches=1,
|
num_microbatches=1,
|
||||||
learning_rate=1.0)
|
learning_rate=1.0)
|
||||||
global_step = tf.compat.v1.train.get_global_step()
|
global_step = tf.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=vector_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=vector_loss, global_step=global_step)
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf.estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
@ -167,7 +167,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
true_weights) + true_bias + np.random.normal(
|
true_weights) + true_bias + np.random.normal(
|
||||||
scale=0.1, size=(200, 1)).astype(np.float32)
|
scale=0.1, size=(200, 1)).astype(np.float32)
|
||||||
|
|
||||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data},
|
x={'x': train_data},
|
||||||
y=train_labels,
|
y=train_labels,
|
||||||
batch_size=20,
|
batch_size=20,
|
||||||
|
@ -200,7 +200,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
learning_rate=2.0,
|
learning_rate=2.0,
|
||||||
unroll_microbatches=True)
|
unroll_microbatches=True)
|
||||||
|
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -225,7 +225,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
num_microbatches=1,
|
num_microbatches=1,
|
||||||
learning_rate=2.0)
|
learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0], self.evaluate(var0))
|
self.assertAllClose([0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
|
|
@ -21,11 +21,11 @@ from absl import logging
|
||||||
|
|
||||||
import tensorflow.compat.v1 as tf
|
import tensorflow.compat.v1 as tf
|
||||||
|
|
||||||
AdagradOptimizer = tf.compat.v1.train.AdagradOptimizer
|
AdagradOptimizer = tf.train.AdagradOptimizer
|
||||||
AdamOptimizer = tf.compat.v1.train.AdamOptimizer
|
AdamOptimizer = tf.train.AdamOptimizer
|
||||||
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||||
parent_code = tf.compat.v1.train.Optimizer.compute_gradients.__code__
|
parent_code = tf.train.Optimizer.compute_gradients.__code__
|
||||||
GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP # pylint: disable=invalid-name
|
GATE_OP = tf.train.Optimizer.GATE_OP # pylint: disable=invalid-name
|
||||||
|
|
||||||
|
|
||||||
def make_vectorized_optimizer_class(cls):
|
def make_vectorized_optimizer_class(cls):
|
||||||
|
@ -90,8 +90,8 @@ def make_vectorized_optimizer_class(cls):
|
||||||
|
|
||||||
if var_list is None:
|
if var_list is None:
|
||||||
var_list = (
|
var_list = (
|
||||||
tf.compat.v1.trainable_variables() + tf.compat.v1.get_collection(
|
tf.trainable_variables() + tf.get_collection(
|
||||||
tf.compat.v1.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
|
tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
|
||||||
|
|
||||||
def process_microbatch(microbatch_loss):
|
def process_microbatch(microbatch_loss):
|
||||||
"""Compute clipped grads for one microbatch."""
|
"""Compute clipped grads for one microbatch."""
|
||||||
|
|
|
@ -58,7 +58,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
num_microbatches=num_microbatches,
|
num_microbatches=num_microbatches,
|
||||||
learning_rate=2.0)
|
learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -82,7 +82,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
num_microbatches=1,
|
num_microbatches=1,
|
||||||
learning_rate=2.0)
|
learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
num_microbatches=1,
|
num_microbatches=1,
|
||||||
learning_rate=2.0)
|
learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0], self.evaluate(var0))
|
self.assertAllClose([0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
@mock.patch('absl.logging.warning')
|
@mock.patch('absl.logging.warning')
|
||||||
def testComputeGradientsOverrideWarning(self, mock_logging):
|
def testComputeGradientsOverrideWarning(self, mock_logging):
|
||||||
|
|
||||||
class SimpleOptimizer(tf.compat.v1.train.Optimizer):
|
class SimpleOptimizer(tf.train.Optimizer):
|
||||||
|
|
||||||
def compute_gradients(self):
|
def compute_gradients(self):
|
||||||
return 0
|
return 0
|
||||||
|
@ -147,7 +147,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
noise_multiplier=0.,
|
noise_multiplier=0.,
|
||||||
num_microbatches=1,
|
num_microbatches=1,
|
||||||
learning_rate=1.0)
|
learning_rate=1.0)
|
||||||
global_step = tf.compat.v1.train.get_global_step()
|
global_step = tf.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=vector_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=vector_loss, global_step=global_step)
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf.estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
@ -161,7 +161,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
true_weights) + true_bias + np.random.normal(
|
true_weights) + true_bias + np.random.normal(
|
||||||
scale=0.1, size=(200, 1)).astype(np.float32)
|
scale=0.1, size=(200, 1)).astype(np.float32)
|
||||||
|
|
||||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data},
|
x={'x': train_data},
|
||||||
y=train_labels,
|
y=train_labels,
|
||||||
batch_size=20,
|
batch_size=20,
|
||||||
|
@ -188,7 +188,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
num_microbatches=1,
|
num_microbatches=1,
|
||||||
learning_rate=2.0)
|
learning_rate=2.0)
|
||||||
|
|
||||||
self.evaluate(tf.compat.v1.global_variables_initializer())
|
self.evaluate(tf.global_variables_initializer())
|
||||||
# Fetch params to validate initial values
|
# Fetch params to validate initial values
|
||||||
self.assertAllClose([0.0], self.evaluate(var0))
|
self.assertAllClose([0.0], self.evaluate(var0))
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp_from_
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
|
|
||||||
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@ def cnn_model_fn(features, labels, mode):
|
||||||
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
|
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
|
||||||
training_hooks = []
|
training_hooks = []
|
||||||
opt_loss = scalar_loss
|
opt_loss = scalar_loss
|
||||||
global_step = tf.compat.v1.train.get_global_step()
|
global_step = tf.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
||||||
# In the following, we pass the mean of the loss (scalar_loss) rather than
|
# In the following, we pass the mean of the loss (scalar_loss) rather than
|
||||||
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
||||||
|
@ -145,7 +145,7 @@ def cnn_model_fn(features, labels, mode):
|
||||||
elif mode == tf.estimator.ModeKeys.EVAL:
|
elif mode == tf.estimator.ModeKeys.EVAL:
|
||||||
eval_metric_ops = {
|
eval_metric_ops = {
|
||||||
'accuracy':
|
'accuracy':
|
||||||
tf.compat.v1.metrics.accuracy(
|
tf.metrics.accuracy(
|
||||||
labels=labels,
|
labels=labels,
|
||||||
predictions=tf.argmax(input=logits, axis=1))
|
predictions=tf.argmax(input=logits, axis=1))
|
||||||
}
|
}
|
||||||
|
@ -178,7 +178,7 @@ def load_mnist():
|
||||||
|
|
||||||
|
|
||||||
def main(unused_argv):
|
def main(unused_argv):
|
||||||
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
|
tf.logging.set_verbosity(tf.logging.INFO)
|
||||||
if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0:
|
if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0:
|
||||||
raise ValueError('Number of microbatches should divide evenly batch_size')
|
raise ValueError('Number of microbatches should divide evenly batch_size')
|
||||||
|
|
||||||
|
@ -190,13 +190,13 @@ def main(unused_argv):
|
||||||
model_dir=FLAGS.model_dir)
|
model_dir=FLAGS.model_dir)
|
||||||
|
|
||||||
# Create tf.Estimator input functions for the training and test data.
|
# Create tf.Estimator input functions for the training and test data.
|
||||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data},
|
x={'x': train_data},
|
||||||
y=train_labels,
|
y=train_labels,
|
||||||
batch_size=FLAGS.batch_size,
|
batch_size=FLAGS.batch_size,
|
||||||
num_epochs=FLAGS.epochs,
|
num_epochs=FLAGS.epochs,
|
||||||
shuffle=True)
|
shuffle=True)
|
||||||
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': test_data},
|
x={'x': test_data},
|
||||||
y=test_labels,
|
y=test_labels,
|
||||||
num_epochs=1,
|
num_epochs=1,
|
||||||
|
|
|
@ -26,8 +26,8 @@ from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
||||||
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
|
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
|
||||||
|
|
||||||
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||||
tf.compat.v1.enable_eager_execution()
|
tf.enable_eager_execution()
|
||||||
|
|
||||||
flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, '
|
flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, '
|
||||||
'train with vanilla SGD.')
|
'train with vanilla SGD.')
|
||||||
|
|
|
@ -28,7 +28,7 @@ from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
||||||
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
|
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
|
||||||
|
|
||||||
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||||
|
|
||||||
flags.DEFINE_boolean(
|
flags.DEFINE_boolean(
|
||||||
'dpsgd', True, 'If True, train with DP-SGD. If False, '
|
'dpsgd', True, 'If True, train with DP-SGD. If False, '
|
||||||
|
@ -121,7 +121,7 @@ def main(unused_argv):
|
||||||
learning_rate=FLAGS.learning_rate)
|
learning_rate=FLAGS.learning_rate)
|
||||||
# Compute vector of per-example loss rather than its mean over a minibatch.
|
# Compute vector of per-example loss rather than its mean over a minibatch.
|
||||||
loss = tf.keras.losses.CategoricalCrossentropy(
|
loss = tf.keras.losses.CategoricalCrossentropy(
|
||||||
from_logits=True, reduction=tf.compat.v1.losses.Reduction.NONE)
|
from_logits=True, reduction=tf.losses.Reduction.NONE)
|
||||||
else:
|
else:
|
||||||
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
|
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
|
||||||
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
|
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
|
||||||
|
|
|
@ -47,7 +47,7 @@ FLAGS = flags.FLAGS
|
||||||
|
|
||||||
NUM_TRAIN_EXAMPLES = 60000
|
NUM_TRAIN_EXAMPLES = 60000
|
||||||
|
|
||||||
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
|
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
|
||||||
|
|
||||||
|
|
||||||
def compute_epsilon(steps):
|
def compute_epsilon(steps):
|
||||||
|
@ -106,7 +106,7 @@ def cnn_model_fn(features, labels, mode):
|
||||||
else:
|
else:
|
||||||
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
|
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
|
||||||
opt_loss = scalar_loss
|
opt_loss = scalar_loss
|
||||||
global_step = tf.compat.v1.train.get_global_step()
|
global_step = tf.compat.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
||||||
# In the following, we pass the mean of the loss (scalar_loss) rather than
|
# In the following, we pass the mean of the loss (scalar_loss) rather than
|
||||||
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
||||||
|
@ -120,7 +120,7 @@ def cnn_model_fn(features, labels, mode):
|
||||||
elif mode == tf.estimator.ModeKeys.EVAL:
|
elif mode == tf.estimator.ModeKeys.EVAL:
|
||||||
eval_metric_ops = {
|
eval_metric_ops = {
|
||||||
'accuracy':
|
'accuracy':
|
||||||
tf.compat.v1.metrics.accuracy(
|
tf.metrics.accuracy(
|
||||||
labels=labels,
|
labels=labels,
|
||||||
predictions=tf.argmax(input=logits, axis=1))
|
predictions=tf.argmax(input=logits, axis=1))
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ def load_mnist():
|
||||||
|
|
||||||
|
|
||||||
def main(unused_argv):
|
def main(unused_argv):
|
||||||
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
|
tf.logging.set_verbosity(tf.logging.INFO)
|
||||||
if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0:
|
if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0:
|
||||||
raise ValueError('Number of microbatches should divide evenly batch_size')
|
raise ValueError('Number of microbatches should divide evenly batch_size')
|
||||||
|
|
||||||
|
@ -165,13 +165,13 @@ def main(unused_argv):
|
||||||
model_dir=FLAGS.model_dir)
|
model_dir=FLAGS.model_dir)
|
||||||
|
|
||||||
# Create tf.Estimator input functions for the training and test data.
|
# Create tf.Estimator input functions for the training and test data.
|
||||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data},
|
x={'x': train_data},
|
||||||
y=train_labels,
|
y=train_labels,
|
||||||
batch_size=FLAGS.batch_size,
|
batch_size=FLAGS.batch_size,
|
||||||
num_epochs=FLAGS.epochs,
|
num_epochs=FLAGS.epochs,
|
||||||
shuffle=True)
|
shuffle=True)
|
||||||
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
|
||||||
x={'x': test_data},
|
x={'x': test_data},
|
||||||
y=test_labels,
|
y=test_labels,
|
||||||
num_epochs=1,
|
num_epochs=1,
|
||||||
|
|
Loading…
Reference in a new issue