Fix issue with importing tensorflow.compat.v1.

PiperOrigin-RevId: 300175680
This commit is contained in:
A. Unique TensorFlower 2020-03-10 14:16:25 -07:00
parent 6541960e79
commit 2301931725
17 changed files with 87 additions and 88 deletions

View file

@ -111,7 +111,7 @@ class PrivacyLedger(object):
def _do_record_query():
with tf.control_dependencies(
[tf.compat.v1.assign(self._query_count, self._query_count + 1)]):
[tf.assign(self._query_count, self._query_count + 1)]):
return self._query_buffer.append(
[self._sample_count, l2_norm_bound, noise_stddev])
@ -120,14 +120,14 @@ class PrivacyLedger(object):
def finalize_sample(self):
"""Finalizes sample and records sample ledger entry."""
with tf.control_dependencies([
tf.compat.v1.assign(self._sample_var, [
tf.assign(self._sample_var, [
self._population_size, self._selection_probability,
self._query_count
])
]):
with tf.control_dependencies([
tf.compat.v1.assign(self._sample_count, self._sample_count + 1),
tf.compat.v1.assign(self._query_count, 0)
tf.assign(self._sample_count, self._sample_count + 1),
tf.assign(self._query_count, 0)
]):
return self._sample_buffer.append(self._sample_var)

View file

@ -25,7 +25,7 @@ from tensorflow_privacy.privacy.dp_query import gaussian_query
from tensorflow_privacy.privacy.dp_query import nested_query
from tensorflow_privacy.privacy.dp_query import test_utils
tf.compat.v1.enable_eager_execution()
tf.enable_eager_execution()
class PrivacyLedgerTest(tf.test.TestCase):
@ -63,8 +63,8 @@ class PrivacyLedgerTest(tf.test.TestCase):
query, population_size, selection_probability)
# First sample.
tf.compat.v1.assign(population_size, 10)
tf.compat.v1.assign(selection_probability, 0.1)
tf.assign(population_size, 10)
tf.assign(selection_probability, 0.1)
test_utils.run_query(query, [record1, record2])
expected_queries = [[10.0, 0.0]]
@ -75,8 +75,8 @@ class PrivacyLedgerTest(tf.test.TestCase):
self.assertAllClose(sample_1.queries, expected_queries)
# Second sample.
tf.compat.v1.assign(population_size, 20)
tf.compat.v1.assign(selection_probability, 0.2)
tf.assign(population_size, 20)
tf.assign(selection_probability, 0.2)
test_utils.run_query(query, [record1, record2])
formatted = query.ledger.get_formatted_ledger_eager()
@ -106,8 +106,8 @@ class PrivacyLedgerTest(tf.test.TestCase):
record2 = [5.0, [1.0, 2.0]]
# First sample.
tf.compat.v1.assign(population_size, 10)
tf.compat.v1.assign(selection_probability, 0.1)
tf.assign(population_size, 10)
tf.assign(selection_probability, 0.1)
test_utils.run_query(query, [record1, record2])
expected_queries = [[4.0, 2.0], [5.0, 1.0]]
@ -118,8 +118,8 @@ class PrivacyLedgerTest(tf.test.TestCase):
self.assertAllClose(sorted(sample_1.queries), sorted(expected_queries))
# Second sample.
tf.compat.v1.assign(population_size, 20)
tf.compat.v1.assign(selection_probability, 0.2)
tf.assign(population_size, 20)
tf.assign(selection_probability, 0.2)
test_utils.run_query(query, [record1, record2])
formatted = query.ledger.get_formatted_ledger_eager()

View file

@ -50,10 +50,10 @@ class TensorBuffer(object):
raise ValueError('Shape cannot be scalar.')
shape = [capacity] + shape
with tf.compat.v1.variable_scope(self._name):
with tf.variable_scope(self._name):
# We need to use a placeholder as the initial value to allow resizing.
self._buffer = tf.compat.v1.Variable(
initial_value=tf.compat.v1.placeholder_with_default(
self._buffer = tf.Variable(
initial_value=tf.placeholder_with_default(
tf.zeros(shape, dtype), shape=None),
trainable=False,
name='buffer',
@ -82,18 +82,18 @@ class TensorBuffer(object):
padding = tf.zeros_like(self._buffer, self._buffer.dtype)
new_buffer = tf.concat([self._buffer, padding], axis=0)
if tf.executing_eagerly():
with tf.compat.v1.variable_scope(self._name, reuse=True):
self._buffer = tf.compat.v1.get_variable(
with tf.variable_scope(self._name, reuse=True):
self._buffer = tf.get_variable(
name='buffer',
dtype=self._dtype,
initializer=new_buffer,
trainable=False)
return self._buffer, tf.compat.v1.assign(
return self._buffer, tf.assign(
self._capacity, tf.multiply(self._capacity, 2))
else:
return tf.compat.v1.assign(
return tf.assign(
self._buffer, new_buffer,
validate_shape=False), tf.compat.v1.assign(
validate_shape=False), tf.assign(
self._capacity, tf.multiply(self._capacity, 2))
update_buffer, update_capacity = tf.cond(
@ -103,18 +103,18 @@ class TensorBuffer(object):
with tf.control_dependencies([update_buffer, update_capacity]):
with tf.control_dependencies([
tf.compat.v1.assert_less(
tf.assert_less(
self._current_size,
self._capacity,
message='Appending past end of TensorBuffer.'),
tf.compat.v1.assert_equal(
tf.assert_equal(
tf.shape(input=value),
tf.shape(input=self._buffer)[1:],
message='Appending value of inconsistent shape.')
]):
with tf.control_dependencies(
[tf.compat.v1.assign(self._buffer[self._current_size, :], value)]):
return tf.compat.v1.assign_add(self._current_size, 1)
[tf.assign(self._buffer[self._current_size, :], value)]):
return tf.assign_add(self._current_size, 1)
@property
def values(self):

View file

@ -21,7 +21,7 @@ import tensorflow.compat.v1 as tf
from tensorflow_privacy.privacy.analysis import tensor_buffer
tf.compat.v1.enable_eager_execution()
tf.enable_eager_execution()
class TensorBufferTest(tf.test.TestCase):

View file

@ -38,7 +38,7 @@ class TensorBufferTest(tf.test.TestCase):
values = my_buffer.values
current_size = my_buffer.current_size
capacity = my_buffer.capacity
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
v, cs, cap = sess.run([values, current_size, capacity])
self.assertAllEqual(v, [value1, value2])
@ -60,7 +60,7 @@ class TensorBufferTest(tf.test.TestCase):
values = my_buffer.values
current_size = my_buffer.current_size
capacity = my_buffer.capacity
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
v, cs, cap = sess.run([values, current_size, capacity])
self.assertAllEqual(v, [value1, value2, value3])

View file

@ -96,7 +96,7 @@ class GaussianSumQuery(dp_query.SumAggregationDPQuery):
return v + tf.random.normal(
tf.shape(input=v), stddev=global_state.stddev)
else:
random_normal = tf.compat.v1.random_normal_initializer(
random_normal = tf.random_normal_initializer(
stddev=global_state.stddev)
def add_noise(v):

View file

@ -59,14 +59,13 @@ class GaussianQueryTest(tf.test.TestCase, parameterized.TestCase):
record2 = tf.constant([4.0, -3.0]) # Not clipped.
l2_norm_clip = tf.Variable(5.0)
l2_norm_clip_placeholder = tf.compat.v1.placeholder(tf.float32)
assign_l2_norm_clip = tf.compat.v1.assign(l2_norm_clip,
l2_norm_clip_placeholder)
l2_norm_clip_placeholder = tf.placeholder(tf.float32)
assign_l2_norm_clip = tf.assign(l2_norm_clip, l2_norm_clip_placeholder)
query = gaussian_query.GaussianSumQuery(
l2_norm_clip=l2_norm_clip, stddev=0.0)
query_result, _ = test_utils.run_query(query, [record1, record2])
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
result = sess.run(query_result)
expected = [1.0, 1.0]
self.assertAllClose(result, expected)

View file

@ -27,7 +27,7 @@ from tensorflow_privacy.privacy.analysis import privacy_ledger
from tensorflow_privacy.privacy.dp_query import quantile_adaptive_clip_sum_query
from tensorflow_privacy.privacy.dp_query import test_utils
tf.compat.v1.enable_eager_execution()
tf.enable_eager_execution()
class QuantileAdaptiveClipSumQueryTest(
@ -323,7 +323,7 @@ class QuantileAdaptiveClipSumQueryTest(
global_state = query.initial_global_state()
for t in range(50):
tf.compat.v1.assign(learning_rate, 1.0 / np.sqrt(t + 1))
tf.assign(learning_rate, 1.0 / np.sqrt(t + 1))
_, global_state = test_utils.run_query(query, records, global_state)
actual_clip = global_state.sum_state.l2_norm_clip
@ -350,8 +350,8 @@ class QuantileAdaptiveClipSumQueryTest(
query, population_size, selection_probability)
# First sample.
tf.compat.v1.assign(population_size, 10)
tf.compat.v1.assign(selection_probability, 0.1)
tf.assign(population_size, 10)
tf.assign(selection_probability, 0.1)
_, global_state = test_utils.run_query(query, [record1, record2])
expected_queries = [[10.0, 10.0], [0.5, 0.0]]
@ -362,8 +362,8 @@ class QuantileAdaptiveClipSumQueryTest(
self.assertAllClose(sample_1.queries, expected_queries)
# Second sample.
tf.compat.v1.assign(population_size, 20)
tf.compat.v1.assign(selection_probability, 0.2)
tf.assign(population_size, 20)
tf.assign(selection_probability, 0.2)
test_utils.run_query(query, [record1, record2], global_state)
formatted = query.ledger.get_formatted_ledger_eager()

View file

@ -27,9 +27,9 @@ from tensorflow_privacy.privacy.dp_query import gaussian_query
def make_optimizer_class(cls):
"""Constructs a DP optimizer class from an existing one."""
parent_code = tf.compat.v1.train.Optimizer.compute_gradients.__code__
parent_code = tf.train.Optimizer.compute_gradients.__code__
child_code = cls.compute_gradients.__code__
GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP # pylint: disable=invalid-name
GATE_OP = tf.train.Optimizer.GATE_OP # pylint: disable=invalid-name
if child_code is not parent_code:
logging.warning(
'WARNING: Calling make_optimizer_class() on class %s that overrides '
@ -146,8 +146,8 @@ def make_optimizer_class(cls):
if var_list is None:
var_list = (
tf.compat.v1.trainable_variables() + tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
tf.trainable_variables() + tf.get_collection(
tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
sample_state = self._dp_sum_query.initial_sample_state(var_list)
@ -213,9 +213,9 @@ def make_gaussian_optimizer_class(cls):
return DPGaussianOptimizerClass
AdagradOptimizer = tf.compat.v1.train.AdagradOptimizer
AdamOptimizer = tf.compat.v1.train.AdamOptimizer
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
AdagradOptimizer = tf.train.AdagradOptimizer
AdamOptimizer = tf.train.AdamOptimizer
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
DPAdagradOptimizer = make_optimizer_class(AdagradOptimizer)
DPAdamOptimizer = make_optimizer_class(AdamOptimizer)

View file

@ -30,7 +30,7 @@ from tensorflow_privacy.privacy.optimizers import dp_optimizer
class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
tf.compat.v1.enable_eager_execution()
tf.enable_eager_execution()
super(DPOptimizerEagerTest, self).setUp()
def _loss_fn(self, val0, val1):
@ -64,7 +64,7 @@ class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
num_microbatches=num_microbatches,
learning_rate=2.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
@ -89,7 +89,7 @@ class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
@ -113,7 +113,7 @@ class DPOptimizerEagerTest(tf.test.TestCase, parameterized.TestCase):
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([0.0], self.evaluate(var0))

View file

@ -63,7 +63,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
num_microbatches=num_microbatches,
learning_rate=2.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
@ -87,7 +87,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
@ -110,7 +110,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
opt = cls(dp_sum_query, num_microbatches=1, learning_rate=2.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([0.0], self.evaluate(var0))
@ -126,7 +126,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
@mock.patch('absl.logging.warning')
def testComputeGradientsOverrideWarning(self, mock_logging):
class SimpleOptimizer(tf.compat.v1.train.Optimizer):
class SimpleOptimizer(tf.train.Optimizer):
def compute_gradients(self):
return 0
@ -153,7 +153,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
dp_sum_query,
num_microbatches=1,
learning_rate=1.0)
global_step = tf.compat.v1.train.get_global_step()
global_step = tf.train.get_global_step()
train_op = optimizer.minimize(loss=vector_loss, global_step=global_step)
return tf.estimator.EstimatorSpec(
mode=mode, loss=scalar_loss, train_op=train_op)
@ -167,7 +167,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
true_weights) + true_bias + np.random.normal(
scale=0.1, size=(200, 1)).astype(np.float32)
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': train_data},
y=train_labels,
batch_size=20,
@ -200,7 +200,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
learning_rate=2.0,
unroll_microbatches=True)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
@ -225,7 +225,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
num_microbatches=1,
learning_rate=2.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([0.0], self.evaluate(var0))

View file

@ -21,11 +21,11 @@ from absl import logging
import tensorflow.compat.v1 as tf
AdagradOptimizer = tf.compat.v1.train.AdagradOptimizer
AdamOptimizer = tf.compat.v1.train.AdamOptimizer
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
parent_code = tf.compat.v1.train.Optimizer.compute_gradients.__code__
GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP # pylint: disable=invalid-name
AdagradOptimizer = tf.train.AdagradOptimizer
AdamOptimizer = tf.train.AdamOptimizer
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
parent_code = tf.train.Optimizer.compute_gradients.__code__
GATE_OP = tf.train.Optimizer.GATE_OP # pylint: disable=invalid-name
def make_vectorized_optimizer_class(cls):
@ -90,8 +90,8 @@ def make_vectorized_optimizer_class(cls):
if var_list is None:
var_list = (
tf.compat.v1.trainable_variables() + tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
tf.trainable_variables() + tf.get_collection(
tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
def process_microbatch(microbatch_loss):
"""Compute clipped grads for one microbatch."""

View file

@ -58,7 +58,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
num_microbatches=num_microbatches,
learning_rate=2.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
@ -82,7 +82,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
num_microbatches=1,
learning_rate=2.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
@ -105,7 +105,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
num_microbatches=1,
learning_rate=2.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([0.0], self.evaluate(var0))
@ -121,7 +121,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
@mock.patch('absl.logging.warning')
def testComputeGradientsOverrideWarning(self, mock_logging):
class SimpleOptimizer(tf.compat.v1.train.Optimizer):
class SimpleOptimizer(tf.train.Optimizer):
def compute_gradients(self):
return 0
@ -147,7 +147,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
noise_multiplier=0.,
num_microbatches=1,
learning_rate=1.0)
global_step = tf.compat.v1.train.get_global_step()
global_step = tf.train.get_global_step()
train_op = optimizer.minimize(loss=vector_loss, global_step=global_step)
return tf.estimator.EstimatorSpec(
mode=mode, loss=scalar_loss, train_op=train_op)
@ -161,7 +161,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
true_weights) + true_bias + np.random.normal(
scale=0.1, size=(200, 1)).astype(np.float32)
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': train_data},
y=train_labels,
batch_size=20,
@ -188,7 +188,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
num_microbatches=1,
learning_rate=2.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([0.0], self.evaluate(var0))

View file

@ -29,7 +29,7 @@ from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp_from_
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
from tensorflow_privacy.privacy.optimizers import dp_optimizer
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
FLAGS = flags.FLAGS
@ -130,7 +130,7 @@ def cnn_model_fn(features, labels, mode):
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
training_hooks = []
opt_loss = scalar_loss
global_step = tf.compat.v1.train.get_global_step()
global_step = tf.train.get_global_step()
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
# In the following, we pass the mean of the loss (scalar_loss) rather than
# the vector_loss because tf.estimator requires a scalar loss. This is only
@ -145,7 +145,7 @@ def cnn_model_fn(features, labels, mode):
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
'accuracy':
tf.compat.v1.metrics.accuracy(
tf.metrics.accuracy(
labels=labels,
predictions=tf.argmax(input=logits, axis=1))
}
@ -178,7 +178,7 @@ def load_mnist():
def main(unused_argv):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0:
raise ValueError('Number of microbatches should divide evenly batch_size')
@ -190,13 +190,13 @@ def main(unused_argv):
model_dir=FLAGS.model_dir)
# Create tf.Estimator input functions for the training and test data.
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': train_data},
y=train_labels,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.epochs,
shuffle=True)
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': test_data},
y=test_labels,
num_epochs=1,

View file

@ -26,8 +26,8 @@ from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
tf.compat.v1.enable_eager_execution()
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
tf.enable_eager_execution()
flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, '
'train with vanilla SGD.')

View file

@ -28,7 +28,7 @@ from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
flags.DEFINE_boolean(
'dpsgd', True, 'If True, train with DP-SGD. If False, '
@ -121,7 +121,7 @@ def main(unused_argv):
learning_rate=FLAGS.learning_rate)
# Compute vector of per-example loss rather than its mean over a minibatch.
loss = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.compat.v1.losses.Reduction.NONE)
from_logits=True, reduction=tf.losses.Reduction.NONE)
else:
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)

View file

@ -47,7 +47,7 @@ FLAGS = flags.FLAGS
NUM_TRAIN_EXAMPLES = 60000
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
GradientDescentOptimizer = tf.train.GradientDescentOptimizer
def compute_epsilon(steps):
@ -106,7 +106,7 @@ def cnn_model_fn(features, labels, mode):
else:
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
opt_loss = scalar_loss
global_step = tf.compat.v1.train.get_global_step()
global_step = tf.compat.get_global_step()
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
# In the following, we pass the mean of the loss (scalar_loss) rather than
# the vector_loss because tf.estimator requires a scalar loss. This is only
@ -120,7 +120,7 @@ def cnn_model_fn(features, labels, mode):
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
'accuracy':
tf.compat.v1.metrics.accuracy(
tf.metrics.accuracy(
labels=labels,
predictions=tf.argmax(input=logits, axis=1))
}
@ -153,7 +153,7 @@ def load_mnist():
def main(unused_argv):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0:
raise ValueError('Number of microbatches should divide evenly batch_size')
@ -165,13 +165,13 @@ def main(unused_argv):
model_dir=FLAGS.model_dir)
# Create tf.Estimator input functions for the training and test data.
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': train_data},
y=train_labels,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.epochs,
shuffle=True)
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': test_data},
y=test_labels,
num_epochs=1,