diff --git a/research/GDP_2019/adult_tutorial.py b/research/GDP_2019/adult_tutorial.py index 437770b..36741a0 100644 --- a/research/GDP_2019/adult_tutorial.py +++ b/research/GDP_2019/adult_tutorial.py @@ -26,6 +26,8 @@ import pandas as pd from sklearn.model_selection import KFold import tensorflow as tf +from tensorflow import estimator as tf_estimator +from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_eps_poisson from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_mu_poisson from tensorflow_privacy.privacy.optimizers import dp_optimizer @@ -61,7 +63,7 @@ def nn_model_fn(features, labels, mode): scalar_loss = tf.reduce_mean(vector_loss) # Configure the training op (for TRAIN mode). - if mode == tf.estimator.ModeKeys.TRAIN: + if mode == tf_estimator.ModeKeys.TRAIN: if FLAGS.dpsgd: # Use DP version of GradientDescentOptimizer. Other optimizers are # available in dp_optimizer. Most optimizers inheriting from @@ -83,17 +85,17 @@ def nn_model_fn(features, labels, mode): # the vector_loss because tf.estimator requires a scalar loss. This is only # used for evaluation and debugging by tf.estimator. The actual loss being # minimized is opt_loss defined above and passed to optimizer.minimize(). - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) # Add evaluation metrics (for EVAL mode). - if mode == tf.estimator.ModeKeys.EVAL: + if mode == tf_estimator.ModeKeys.EVAL: eval_metric_ops = { 'accuracy': tf.compat.v1.metrics.accuracy( labels=labels, predictions=tf.argmax(input=logits, axis=1)) } - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops) return None @@ -123,11 +125,11 @@ def main(unused_argv): train_data, train_labels, test_data, test_labels = load_adult() # Instantiate the tf.Estimator. - adult_classifier = tf.compat.v1.estimator.Estimator( + adult_classifier = tf_compat_v1_estimator.Estimator( model_fn=nn_model_fn, model_dir=FLAGS.model_dir) # Create tf.Estimator input functions for the training and test data. - eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False) # Training loop. @@ -141,7 +143,7 @@ def main(unused_argv): global microbatches microbatches = len(subsampling) - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': train_data[subsampling]}, y=train_labels[subsampling], batch_size=len(subsampling), diff --git a/research/GDP_2019/imdb_tutorial.py b/research/GDP_2019/imdb_tutorial.py index 4281439..348321a 100644 --- a/research/GDP_2019/imdb_tutorial.py +++ b/research/GDP_2019/imdb_tutorial.py @@ -25,6 +25,8 @@ from keras.preprocessing import sequence import numpy as np import tensorflow as tf +from tensorflow import estimator as tf_estimator +from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_eps_poisson from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_mu_poisson from tensorflow_privacy.privacy.optimizers import dp_optimizer @@ -65,7 +67,7 @@ def nn_model_fn(features, labels, mode): scalar_loss = tf.reduce_mean(vector_loss) # Configure the training op (for TRAIN mode). - if mode == tf.estimator.ModeKeys.TRAIN: + if mode == tf_estimator.ModeKeys.TRAIN: if FLAGS.dpsgd: # Use DP version of GradientDescentOptimizer. Other optimizers are # available in dp_optimizer. Most optimizers inheriting from @@ -88,17 +90,17 @@ def nn_model_fn(features, labels, mode): # the vector_loss because tf.estimator requires a scalar loss. This is only # used for evaluation and debugging by tf.estimator. The actual loss being # minimized is opt_loss defined above and passed to optimizer.minimize(). - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) # Add evaluation metrics (for EVAL mode). - if mode == tf.estimator.ModeKeys.EVAL: + if mode == tf_estimator.ModeKeys.EVAL: eval_metric_ops = { 'accuracy': tf.compat.v1.metrics.accuracy( labels=labels, predictions=tf.argmax(input=logits, axis=1)) } - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops) return None @@ -122,11 +124,11 @@ def main(unused_argv): train_data, train_labels, test_data, test_labels = load_imdb() # Instantiate the tf.Estimator. - imdb_classifier = tf.estimator.Estimator( + imdb_classifier = tf_estimator.Estimator( model_fn=nn_model_fn, model_dir=FLAGS.model_dir) # Create tf.Estimator input functions for the training and test data. - eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False) # Training loop. @@ -141,7 +143,7 @@ def main(unused_argv): global microbatches microbatches = len(subsampling) - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': train_data[subsampling]}, y=train_labels[subsampling], batch_size=len(subsampling), diff --git a/tensorflow_privacy/privacy/estimators/v1/BUILD b/tensorflow_privacy/privacy/estimators/v1/BUILD index 0522c75..3195cd8 100644 --- a/tensorflow_privacy/privacy/estimators/v1/BUILD +++ b/tensorflow_privacy/privacy/estimators/v1/BUILD @@ -23,7 +23,10 @@ py_library( "dnn.py", ], srcs_version = "PY3", - deps = [":head"], + deps = [ + ":head", + "//third_party/py/tensorflow:tensorflow_estimator", + ], ) py_test( @@ -36,6 +39,7 @@ py_test( ":head", "//tensorflow_privacy/privacy/estimators:test_utils", "//tensorflow_privacy/privacy/optimizers:dp_optimizer", + "//third_party/py/tensorflow:tensorflow_estimator", ], ) diff --git a/tensorflow_privacy/privacy/estimators/v1/dnn.py b/tensorflow_privacy/privacy/estimators/v1/dnn.py index 6cc09f6..143d793 100644 --- a/tensorflow_privacy/privacy/estimators/v1/dnn.py +++ b/tensorflow_privacy/privacy/estimators/v1/dnn.py @@ -16,12 +16,13 @@ import tensorflow as tf +from tensorflow import estimator as tf_estimator from tensorflow_privacy.privacy.estimators.v1 import head as head_lib from tensorflow_estimator.python.estimator import estimator from tensorflow_estimator.python.estimator.canned import dnn -class DNNClassifier(tf.estimator.Estimator): +class DNNClassifier(tf_estimator.Estimator): """DP version of `tf.compat.v1.estimator.DNNClassifier`.""" def __init__( diff --git a/tensorflow_privacy/privacy/estimators/v1/head_test.py b/tensorflow_privacy/privacy/estimators/v1/head_test.py index 5238912..3536e09 100644 --- a/tensorflow_privacy/privacy/estimators/v1/head_test.py +++ b/tensorflow_privacy/privacy/estimators/v1/head_test.py @@ -15,6 +15,7 @@ from absl.testing import parameterized import tensorflow as tf +from tensorflow import estimator as tf_estimator from tensorflow_privacy.privacy.estimators import test_utils from tensorflow_privacy.privacy.estimators.v1 import head as head_lib from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer @@ -69,7 +70,7 @@ class DPHeadTest(tf.test.TestCase, parameterized.TestCase): noise_multiplier=0.0, num_microbatches=2) model_fn = make_model_fn(head, optimizer, feature_columns) - classifier = tf.estimator.Estimator(model_fn=model_fn) + classifier = tf_estimator.Estimator(model_fn=model_fn) classifier.train( input_fn=test_utils.make_input_fn(train_features, train_labels, True), diff --git a/tensorflow_privacy/privacy/optimizers/BUILD b/tensorflow_privacy/privacy/optimizers/BUILD index e6ca5ce..17d6742 100644 --- a/tensorflow_privacy/privacy/optimizers/BUILD +++ b/tensorflow_privacy/privacy/optimizers/BUILD @@ -53,6 +53,8 @@ py_test( deps = [ ":dp_optimizer", "//tensorflow_privacy/privacy/dp_query:gaussian_query", + "//third_party/py/tensorflow:tensorflow_compat_v1_estimator", + "//third_party/py/tensorflow:tensorflow_estimator", ], ) @@ -62,7 +64,11 @@ py_test( srcs = ["dp_optimizer_vectorized_test.py"], python_version = "PY3", srcs_version = "PY3", - deps = [":dp_optimizer_vectorized"], + deps = [ + ":dp_optimizer_vectorized", + "//third_party/py/tensorflow:tensorflow_compat_v1_estimator", + "//third_party/py/tensorflow:tensorflow_estimator", + ], ) py_test( @@ -86,5 +92,6 @@ py_test( deps = [ "//tensorflow_privacy/privacy/optimizers:dp_optimizer_keras", "//tensorflow_privacy/privacy/optimizers:dp_optimizer_keras_vectorized", + "//third_party/py/tensorflow:tensorflow_estimator", ], ) diff --git a/tensorflow_privacy/privacy/optimizers/dp_optimizer_keras_test.py b/tensorflow_privacy/privacy/optimizers/dp_optimizer_keras_test.py index 718684b..8417750 100644 --- a/tensorflow_privacy/privacy/optimizers/dp_optimizer_keras_test.py +++ b/tensorflow_privacy/privacy/optimizers/dp_optimizer_keras_test.py @@ -15,6 +15,7 @@ from absl.testing import parameterized import numpy as np import tensorflow as tf +from tensorflow import estimator as tf_estimator from tensorflow_privacy.privacy.optimizers import dp_optimizer_keras from tensorflow_privacy.privacy.optimizers import dp_optimizer_keras_vectorized @@ -227,7 +228,7 @@ class DPOptimizerGetGradientsTest(tf.test.TestCase, parameterized.TestCase): train_op = tf.group( optimizer.get_updates(loss=vector_loss, params=params), [tf.compat.v1.assign_add(global_step, 1)]) - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) return linear_model_fn @@ -249,7 +250,7 @@ class DPOptimizerGetGradientsTest(tf.test.TestCase, parameterized.TestCase): def testBaseline(self, cls, num_microbatches): """Tests that DP optimizers work with tf.estimator.""" - linear_regressor = tf.estimator.Estimator( + linear_regressor = tf_estimator.Estimator( model_fn=self._make_linear_model_fn(cls, 100.0, 0.0, num_microbatches, 0.05)) @@ -293,7 +294,7 @@ class DPOptimizerGetGradientsTest(tf.test.TestCase, parameterized.TestCase): return tf.data.Dataset.from_tensor_slices( (train_data, train_labels)).batch(1) - unclipped_linear_regressor = tf.estimator.Estimator( + unclipped_linear_regressor = tf_estimator.Estimator( model_fn=self._make_linear_model_fn(cls, 1.0e9, 0.0, num_microbatches, 1.0)) unclipped_linear_regressor.train(input_fn=train_input_fn, steps=1) @@ -302,7 +303,7 @@ class DPOptimizerGetGradientsTest(tf.test.TestCase, parameterized.TestCase): bias_value = unclipped_linear_regressor.get_variable_value('dense/bias') global_norm = np.linalg.norm(np.concatenate((kernel_value, [bias_value]))) - clipped_linear_regressor = tf.estimator.Estimator( + clipped_linear_regressor = tf_estimator.Estimator( model_fn=self._make_linear_model_fn(cls, 1.0, 0.0, num_microbatches, 1.0)) clipped_linear_regressor.train(input_fn=train_input_fn, steps=1) @@ -339,7 +340,7 @@ class DPOptimizerGetGradientsTest(tf.test.TestCase, parameterized.TestCase): num_microbatches): """Tests that DP optimizers work with tf.estimator.""" - linear_regressor = tf.estimator.Estimator( + linear_regressor = tf_estimator.Estimator( model_fn=self._make_linear_model_fn( cls, l2_norm_clip, diff --git a/tensorflow_privacy/privacy/optimizers/dp_optimizer_test.py b/tensorflow_privacy/privacy/optimizers/dp_optimizer_test.py index cfa9b89..3b1c04f 100644 --- a/tensorflow_privacy/privacy/optimizers/dp_optimizer_test.py +++ b/tensorflow_privacy/privacy/optimizers/dp_optimizer_test.py @@ -18,6 +18,8 @@ import unittest from absl.testing import parameterized import numpy as np import tensorflow as tf +from tensorflow import estimator as tf_estimator +from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator from tensorflow_privacy.privacy.dp_query import gaussian_query from tensorflow_privacy.privacy.optimizers import dp_optimizer @@ -205,10 +207,10 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): dp_sum_query, num_microbatches=1, learning_rate=1.0) global_step = tf.compat.v1.train.get_global_step() train_op = optimizer.minimize(loss=vector_loss, global_step=global_step) - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) - linear_regressor = tf.estimator.Estimator(model_fn=linear_model_fn) + linear_regressor = tf_estimator.Estimator(model_fn=linear_model_fn) true_weights = np.array([[-5], [4], [3], [2]]).astype(np.float32) true_bias = 6.0 train_data = np.random.normal(scale=3.0, size=(200, 4)).astype(np.float32) @@ -217,7 +219,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): true_weights) + true_bias + np.random.normal( scale=0.1, size=(200, 1)).astype(np.float32) - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': train_data}, y=train_labels, batch_size=20, diff --git a/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized_test.py b/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized_test.py index 1e6ab9e..17119f7 100644 --- a/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized_test.py +++ b/tensorflow_privacy/privacy/optimizers/dp_optimizer_vectorized_test.py @@ -17,6 +17,8 @@ import unittest from absl.testing import parameterized import numpy as np import tensorflow as tf +from tensorflow import estimator as tf_estimator +from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator from tensorflow_privacy.privacy.optimizers import dp_optimizer_vectorized from tensorflow_privacy.privacy.optimizers.dp_optimizer_vectorized import VectorizedDPAdagrad from tensorflow_privacy.privacy.optimizers.dp_optimizer_vectorized import VectorizedDPAdam @@ -144,10 +146,10 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): learning_rate=1.0) global_step = tf.compat.v1.train.get_global_step() train_op = optimizer.minimize(loss=vector_loss, global_step=global_step) - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) - linear_regressor = tf.estimator.Estimator(model_fn=linear_model_fn) + linear_regressor = tf_estimator.Estimator(model_fn=linear_model_fn) true_weights = np.array([[-5], [4], [3], [2]]).astype(np.float32) true_bias = 6.0 train_data = np.random.normal(scale=3.0, size=(200, 4)).astype(np.float32) @@ -156,7 +158,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase): true_weights) + true_bias + np.random.normal( scale=0.1, size=(200, 1)).astype(np.float32) - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': train_data}, y=train_labels, batch_size=20, diff --git a/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/BUILD b/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/BUILD index 1044742..6f4ffbf 100644 --- a/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/BUILD +++ b/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/BUILD @@ -113,6 +113,8 @@ py_test( deps = [ ":membership_inference_attack", ":tf_estimator_evaluation", + "//third_party/py/tensorflow:tensorflow_compat_v1_estimator", + "//third_party/py/tensorflow:tensorflow_estimator", ], ) @@ -124,6 +126,7 @@ py_library( ":membership_inference_attack", ":utils", ":utils_tensorboard", + "//third_party/py/tensorflow:tensorflow_estimator", ], ) @@ -135,6 +138,8 @@ py_binary( deps = [ ":membership_inference_attack", ":tf_estimator_evaluation", + "//third_party/py/tensorflow:tensorflow_compat_v1_estimator", + "//third_party/py/tensorflow:tensorflow_estimator", ], ) diff --git a/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/tf_estimator_evaluation.py b/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/tf_estimator_evaluation.py index 1b72086..bf13558 100644 --- a/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/tf_estimator_evaluation.py +++ b/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/tf_estimator_evaluation.py @@ -19,6 +19,7 @@ from typing import Iterable from absl import logging import numpy as np import tensorflow as tf +from tensorflow import estimator as tf_estimator from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import data_structures from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import membership_inference_attack as mia from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import utils @@ -47,7 +48,7 @@ def calculate_losses(estimator, input_fn, labels): return pred, loss -class MembershipInferenceTrainingHook(tf.estimator.SessionRunHook): +class MembershipInferenceTrainingHook(tf_estimator.SessionRunHook): """Training hook to perform membership inference attack on epoch end.""" def __init__(self, diff --git a/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/tf_estimator_evaluation_example.py b/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/tf_estimator_evaluation_example.py index 0763e91..1481af5 100644 --- a/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/tf_estimator_evaluation_example.py +++ b/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/tf_estimator_evaluation_example.py @@ -18,6 +18,8 @@ from absl import flags from absl import logging import numpy as np import tensorflow as tf +from tensorflow import estimator as tf_estimator +from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import get_flattened_attack_metrics from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import SlicingSpec @@ -46,34 +48,34 @@ def small_cnn_fn(features, labels, mode): y = tf.keras.layers.Dense(64, activation='relu')(y) logits = tf.keras.layers.Dense(10)(y) - if mode != tf.estimator.ModeKeys.PREDICT: + if mode != tf_estimator.ModeKeys.PREDICT: vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits) scalar_loss = tf.reduce_mean(input_tensor=vector_loss) # Configure the training op (for TRAIN mode). - if mode == tf.estimator.ModeKeys.TRAIN: + if mode == tf_estimator.ModeKeys.TRAIN: optimizer = tf.train.MomentumOptimizer( learning_rate=FLAGS.learning_rate, momentum=0.9) global_step = tf.compat.v1.train.get_global_step() train_op = optimizer.minimize(loss=scalar_loss, global_step=global_step) - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) # Add evaluation metrics (for EVAL mode). - elif mode == tf.estimator.ModeKeys.EVAL: + elif mode == tf_estimator.ModeKeys.EVAL: eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=labels, predictions=tf.argmax(input=logits, axis=1)) } - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops) # Output the prediction probability (for PREDICT mode). - elif mode == tf.estimator.ModeKeys.PREDICT: + elif mode == tf_estimator.ModeKeys.PREDICT: predictions = tf.nn.softmax(logits) - return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) + return tf_estimator.EstimatorSpec(mode=mode, predictions=predictions) def load_cifar10(): @@ -97,13 +99,13 @@ def main(unused_argv): x_train, y_train, x_test, y_test = load_cifar10() # Instantiate the tf.Estimator. - classifier = tf.estimator.Estimator( + classifier = tf_estimator.Estimator( model_fn=small_cnn_fn, model_dir=FLAGS.model_dir) # A function to construct input_fn given (data, label), to be used by the # membership inference training hook. def input_fn_constructor(x, y): - return tf.compat.v1.estimator.inputs.numpy_input_fn( + return tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': x}, y=y, shuffle=False) # Get hook for membership inference attack. @@ -118,13 +120,13 @@ def main(unused_argv): tensorboard_merge_classifiers=FLAGS.tensorboard_merge_classifiers) # Create tf.Estimator input functions for the training and test data. - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': x_train}, y=y_train, batch_size=FLAGS.batch_size, num_epochs=FLAGS.epochs, shuffle=True) - eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': x_test}, y=y_test, num_epochs=1, shuffle=False) # Training loop. diff --git a/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/tf_estimator_evaluation_test.py b/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/tf_estimator_evaluation_test.py index 515ba12..66d337e 100644 --- a/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/tf_estimator_evaluation_test.py +++ b/tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/tf_estimator_evaluation_test.py @@ -15,6 +15,8 @@ from absl.testing import absltest import numpy as np import tensorflow as tf +from tensorflow import estimator as tf_estimator +from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import data_structures from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import tf_estimator_evaluation @@ -44,18 +46,18 @@ class UtilsTest(absltest.TestCase): logits = tf.keras.layers.Dense(self.nclass)(input_layer) # Define the PREDICT mode becasue we only need that - if mode == tf.estimator.ModeKeys.PREDICT: + if mode == tf_estimator.ModeKeys.PREDICT: predictions = tf.nn.softmax(logits) - return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) + return tf_estimator.EstimatorSpec(mode=mode, predictions=predictions) # Define the classifier, input_fn for training and test data - self.classifier = tf.estimator.Estimator(model_fn=model_fn) - self.input_fn_train = tf.compat.v1.estimator.inputs.numpy_input_fn( + self.classifier = tf_estimator.Estimator(model_fn=model_fn) + self.input_fn_train = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': self.train_data}, y=self.train_labels, num_epochs=1, shuffle=False) - self.input_fn_test = tf.compat.v1.estimator.inputs.numpy_input_fn( + self.input_fn_test = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': self.test_data}, y=self.test_labels, num_epochs=1, @@ -94,7 +96,7 @@ class UtilsTest(absltest.TestCase): """Test the attack on the final models.""" def input_fn_constructor(x, y): - return tf.compat.v1.estimator.inputs.numpy_input_fn( + return tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': x}, y=y, shuffle=False) results = tf_estimator_evaluation.run_attack_on_tf_estimator_model( diff --git a/tutorials/BUILD b/tutorials/BUILD index ae8ff2f..4ab0c98 100644 --- a/tutorials/BUILD +++ b/tutorials/BUILD @@ -17,6 +17,7 @@ py_binary( ":mnist_dpsgd_tutorial_common", "//tensorflow_privacy/privacy/analysis:compute_dp_sgd_privacy_lib", "//tensorflow_privacy/privacy/optimizers:dp_optimizer", + "//third_party/py/tensorflow:tensorflow_estimator", ], ) @@ -61,6 +62,8 @@ py_binary( deps = [ "//tensorflow_privacy/privacy/analysis:rdp_accountant", "//tensorflow_privacy/privacy/optimizers:dp_optimizer_vectorized", + "//third_party/py/tensorflow:tensorflow_compat_v1_estimator", + "//third_party/py/tensorflow:tensorflow_estimator", ], ) @@ -73,6 +76,7 @@ py_binary( ":mnist_dpsgd_tutorial_common", "//tensorflow_privacy/privacy/analysis:compute_dp_sgd_privacy_lib", "//tensorflow_privacy/privacy/optimizers:dp_optimizer", + "//third_party/py/tensorflow:tensorflow_estimator", ], ) @@ -84,6 +88,8 @@ py_binary( deps = [ "//tensorflow_privacy/privacy/analysis:rdp_accountant", "//tensorflow_privacy/privacy/optimizers:dp_optimizer", + "//third_party/py/tensorflow:tensorflow_compat_v1_estimator", + "//third_party/py/tensorflow:tensorflow_estimator", ], ) @@ -95,6 +101,8 @@ py_binary( deps = [ "//tensorflow_privacy/privacy/analysis:rdp_accountant", "//tensorflow_privacy/privacy/optimizers:dp_optimizer", + "//third_party/py/tensorflow:tensorflow_compat_v1_estimator", + "//third_party/py/tensorflow:tensorflow_estimator", ], ) @@ -104,6 +112,8 @@ py_binary( deps = [ "//tensorflow_privacy/privacy/analysis:gdp_accountant", "//tensorflow_privacy/privacy/optimizers:dp_optimizer", + "//third_party/py/tensorflow:tensorflow_compat_v1_estimator", + "//third_party/py/tensorflow:tensorflow_estimator", ], ) diff --git a/tutorials/lm_dpsgd_tutorial.py b/tutorials/lm_dpsgd_tutorial.py index 448c70e..038b42b 100644 --- a/tutorials/lm_dpsgd_tutorial.py +++ b/tutorials/lm_dpsgd_tutorial.py @@ -37,6 +37,8 @@ from absl import flags from absl import logging import numpy as np import tensorflow as tf +from tensorflow import estimator as tf_estimator +from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator import tensorflow_datasets as tfds from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent @@ -82,7 +84,7 @@ def rnn_model_fn(features, labels, mode): # pylint: disable=unused-argument scalar_loss = tf.reduce_mean(vector_loss) # Configure the training op (for TRAIN mode). - if mode == tf.estimator.ModeKeys.TRAIN: + if mode == tf_estimator.ModeKeys.TRAIN: if FLAGS.dpsgd: optimizer = dp_optimizer.DPAdamGaussianOptimizer( @@ -98,18 +100,18 @@ def rnn_model_fn(features, labels, mode): # pylint: disable=unused-argument opt_loss = scalar_loss global_step = tf.compat.v1.train.get_global_step() train_op = optimizer.minimize(loss=opt_loss, global_step=global_step) - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) # Add evaluation metrics (for EVAL mode). - elif mode == tf.estimator.ModeKeys.EVAL: + elif mode == tf_estimator.ModeKeys.EVAL: eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=tf.cast(x[:, 1:], dtype=tf.int32), predictions=tf.argmax(input=logits, axis=2)) } - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops) @@ -168,20 +170,20 @@ def main(unused_argv): train_data, test_data = load_data() # Instantiate the tf.Estimator. - conf = tf.estimator.RunConfig(save_summary_steps=1000) - lm_classifier = tf.estimator.Estimator( + conf = tf_estimator.RunConfig(save_summary_steps=1000) + lm_classifier = tf_estimator.Estimator( model_fn=rnn_model_fn, model_dir=FLAGS.model_dir, config=conf) # Create tf.Estimator input functions for the training and test data. batch_len = FLAGS.batch_size * SEQ_LEN train_data_end = len(train_data) - len(train_data) % batch_len test_data_end = len(test_data) - len(test_data) % batch_len - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': train_data[:train_data_end]}, batch_size=batch_len, num_epochs=FLAGS.epochs, shuffle=False) - eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': test_data[:test_data_end]}, batch_size=batch_len, num_epochs=1, diff --git a/tutorials/mnist_dpsgd_tutorial.py b/tutorials/mnist_dpsgd_tutorial.py index 31ab5fa..3ba706c 100644 --- a/tutorials/mnist_dpsgd_tutorial.py +++ b/tutorials/mnist_dpsgd_tutorial.py @@ -19,6 +19,7 @@ from absl import app from absl import flags from absl import logging import tensorflow as tf +from tensorflow import estimator as tf_estimator from tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy_lib from tensorflow_privacy.privacy.optimizers import dp_optimizer import mnist_dpsgd_tutorial_common as common @@ -53,7 +54,7 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu scalar_loss = tf.reduce_mean(input_tensor=vector_loss) # Configure the training op (for TRAIN mode). - if mode == tf.estimator.ModeKeys.TRAIN: + if mode == tf_estimator.ModeKeys.TRAIN: if FLAGS.dpsgd: # Use DP version of GradientDescentOptimizer. Other optimizers are # available in dp_optimizer. Most optimizers inheriting from @@ -77,17 +78,17 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu # the vector_loss because tf.estimator requires a scalar loss. This is only # used for evaluation and debugging by tf.estimator. The actual loss being # minimized is opt_loss defined above and passed to optimizer.minimize(). - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) # Add evaluation metrics (for EVAL mode). - elif mode == tf.estimator.ModeKeys.EVAL: + elif mode == tf_estimator.ModeKeys.EVAL: eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=labels, predictions=tf.argmax(input=logits, axis=1)) } - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops) @@ -97,7 +98,7 @@ def main(unused_argv): raise ValueError('Number of microbatches should divide evenly batch_size') # Instantiate the tf.Estimator. - mnist_classifier = tf.estimator.Estimator( + mnist_classifier = tf_estimator.Estimator( model_fn=cnn_model_fn, model_dir=FLAGS.model_dir) # Training loop. diff --git a/tutorials/mnist_dpsgd_tutorial_tpu.py b/tutorials/mnist_dpsgd_tutorial_tpu.py index 5a5a0c6..74d2cd0 100644 --- a/tutorials/mnist_dpsgd_tutorial_tpu.py +++ b/tutorials/mnist_dpsgd_tutorial_tpu.py @@ -20,6 +20,7 @@ from absl import app from absl import flags from absl import logging import tensorflow as tf +from tensorflow import estimator as tf_estimator from tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy_lib from tensorflow_privacy.privacy.optimizers import dp_optimizer import mnist_dpsgd_tutorial_common as common @@ -56,7 +57,7 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu scalar_loss = tf.reduce_mean(input_tensor=vector_loss) # Configure the training op (for TRAIN mode). - if mode == tf.estimator.ModeKeys.TRAIN: + if mode == tf_estimator.ModeKeys.TRAIN: if FLAGS.dpsgd: # Use DP version of GradientDescentOptimizer. Other optimizers are # available in dp_optimizer. Most optimizers inheriting from @@ -84,11 +85,11 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu # the vector_loss because tf.estimator requires a scalar loss. This is only # used for evaluation and debugging by tf.estimator. The actual loss being # minimized is opt_loss defined above and passed to optimizer.minimize(). - return tf.estimator.tpu.TPUEstimatorSpec( + return tf_estimator.tpu.TPUEstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) # Add evaluation metrics (for EVAL mode). - elif mode == tf.estimator.ModeKeys.EVAL: + elif mode == tf_estimator.ModeKeys.EVAL: def metric_fn(labels, logits): predictions = tf.argmax(logits, 1) @@ -97,7 +98,7 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu tf.metrics.accuracy(labels=labels, predictions=predictions), } - return tf.estimator.tpu.TPUEstimatorSpec( + return tf_estimator.tpu.TPUEstimatorSpec( mode=mode, loss=scalar_loss, eval_metrics=(metric_fn, { @@ -112,8 +113,8 @@ def main(unused_argv): raise ValueError('Number of microbatches should divide evenly batch_size') # Instantiate the tf.Estimator. - run_config = tf.estimator.tpu.RunConfig(master=FLAGS.master) - mnist_classifier = tf.estimator.tpu.TPUEstimator( + run_config = tf_estimator.tpu.RunConfig(master=FLAGS.master) + mnist_classifier = tf_estimator.tpu.TPUEstimator( train_batch_size=FLAGS.batch_size, eval_batch_size=FLAGS.batch_size, model_fn=cnn_model_fn, diff --git a/tutorials/mnist_dpsgd_tutorial_vectorized.py b/tutorials/mnist_dpsgd_tutorial_vectorized.py index a12c7c5..cc15d82 100644 --- a/tutorials/mnist_dpsgd_tutorial_vectorized.py +++ b/tutorials/mnist_dpsgd_tutorial_vectorized.py @@ -18,6 +18,8 @@ from absl import flags from absl import logging import numpy as np import tensorflow as tf +from tensorflow import estimator as tf_estimator +from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent from tensorflow_privacy.privacy.optimizers import dp_optimizer_vectorized @@ -80,7 +82,7 @@ def cnn_model_fn(features, labels, mode): scalar_loss = tf.reduce_mean(input_tensor=vector_loss) # Configure the training op (for TRAIN mode). - if mode == tf.estimator.ModeKeys.TRAIN: + if mode == tf_estimator.ModeKeys.TRAIN: if FLAGS.dpsgd: # Use DP version of GradientDescentOptimizer. Other optimizers are @@ -102,18 +104,18 @@ def cnn_model_fn(features, labels, mode): # the vector_loss because tf.estimator requires a scalar loss. This is only # used for evaluation and debugging by tf.estimator. The actual loss being # minimized is opt_loss defined above and passed to optimizer.minimize(). - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) # Add evaluation metrics (for EVAL mode). - elif mode == tf.estimator.ModeKeys.EVAL: + elif mode == tf_estimator.ModeKeys.EVAL: eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=labels, predictions=tf.argmax(input=logits, axis=1)) } - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops) @@ -150,17 +152,17 @@ def main(unused_argv): train_data, train_labels, test_data, test_labels = load_mnist() # Instantiate the tf.Estimator. - mnist_classifier = tf.estimator.Estimator( + mnist_classifier = tf_estimator.Estimator( model_fn=cnn_model_fn, model_dir=FLAGS.model_dir) # Create tf.Estimator input functions for the training and test data. - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': train_data}, y=train_labels, batch_size=FLAGS.batch_size, num_epochs=FLAGS.epochs, shuffle=True) - eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False) # Training loop. diff --git a/tutorials/mnist_lr_tutorial.py b/tutorials/mnist_lr_tutorial.py index fff9718..557bf21 100644 --- a/tutorials/mnist_lr_tutorial.py +++ b/tutorials/mnist_lr_tutorial.py @@ -28,6 +28,8 @@ from absl import flags from absl import logging import numpy as np import tensorflow as tf +from tensorflow import estimator as tf_estimator +from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent from tensorflow_privacy.privacy.optimizers import dp_optimizer @@ -65,7 +67,7 @@ def lr_model_fn(features, labels, mode, nclasses, dim): scalar_loss = tf.reduce_mean(vector_loss) # Configure the training op (for TRAIN mode). - if mode == tf.estimator.ModeKeys.TRAIN: + if mode == tf_estimator.ModeKeys.TRAIN: if FLAGS.dpsgd: # The loss function is L-Lipschitz with L = sqrt(2*(||x||^2 + 1)) where # ||x|| is the norm of the data. @@ -86,17 +88,17 @@ def lr_model_fn(features, labels, mode, nclasses, dim): # the vector_loss because tf.estimator requires a scalar loss. This is only # used for evaluation and debugging by tf.estimator. The actual loss being # minimized is opt_loss defined above and passed to optimizer.minimize(). - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) # Add evaluation metrics (for EVAL mode). - elif mode == tf.estimator.ModeKeys.EVAL: + elif mode == tf_estimator.ModeKeys.EVAL: eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=labels, predictions=tf.argmax(input=logits, axis=1)) } - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops) @@ -199,19 +201,19 @@ def main(unused_argv): # pylint: disable=g-long-lambda model_fn = lambda features, labels, mode: lr_model_fn( features, labels, mode, nclasses=10, dim=train_data.shape[1:]) - mnist_classifier = tf.estimator.Estimator( + mnist_classifier = tf_estimator.Estimator( model_fn=model_fn, model_dir=FLAGS.model_dir) # Create tf.Estimator input functions for the training and test data. # To analyze the per-user privacy loss, we keep the same orders of samples in # each epoch by setting shuffle=False. - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': train_data}, y=train_labels, batch_size=FLAGS.batch_size, num_epochs=FLAGS.epochs, shuffle=False) - eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False) # Train the model. diff --git a/tutorials/movielens_tutorial.py b/tutorials/movielens_tutorial.py index 2321abc..f39ec50 100644 --- a/tutorials/movielens_tutorial.py +++ b/tutorials/movielens_tutorial.py @@ -21,6 +21,8 @@ import pandas as pd from scipy import stats from sklearn.model_selection import train_test_split import tensorflow as tf +from tensorflow import estimator as tf_estimator +from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_eps_poisson from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_mu_poisson from tensorflow_privacy.privacy.optimizers import dp_optimizer @@ -87,7 +89,7 @@ def nn_model_fn(features, labels, mode): scalar_loss = tf.reduce_mean(vector_loss) # Configure the training op (for TRAIN mode). - if mode == tf.estimator.ModeKeys.TRAIN: + if mode == tf_estimator.ModeKeys.TRAIN: if FLAGS.dpsgd: # Use DP version of GradientDescentOptimizer. Other optimizers are # available in dp_optimizer. Most optimizers inheriting from @@ -110,11 +112,11 @@ def nn_model_fn(features, labels, mode): # the vector_loss because tf.estimator requires a scalar loss. This is only # used for evaluation and debugging by tf.estimator. The actual loss being # minimized is opt_loss defined above and passed to optimizer.minimize(). - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) # Add evaluation metrics (for EVAL mode). - if mode == tf.estimator.ModeKeys.EVAL: + if mode == tf_estimator.ModeKeys.EVAL: eval_metric_ops = { 'rmse': tf.compat.v1.metrics.root_mean_squared_error( @@ -124,7 +126,7 @@ def nn_model_fn(features, labels, mode): b=tf.constant(np.array([0, 1, 2, 3, 4]), dtype=tf.float32), axes=1)) } - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops) return None @@ -161,11 +163,11 @@ def main(unused_argv): train_data, test_data, _ = load_movielens() # Instantiate the tf.Estimator. - ml_classifier = tf.estimator.Estimator( + ml_classifier = tf_estimator.Estimator( model_fn=nn_model_fn, model_dir=FLAGS.model_dir) # Create tf.Estimator input functions for the training and test data. - eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={ 'user': test_data[:, 0], 'movie': test_data[:, 4] @@ -185,7 +187,7 @@ def main(unused_argv): global microbatches microbatches = len(subsampling) - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={ 'user': train_data[subsampling, 0], 'movie': train_data[subsampling, 4] diff --git a/tutorials/walkthrough/BUILD b/tutorials/walkthrough/BUILD index e1a6791..2d6e382 100644 --- a/tutorials/walkthrough/BUILD +++ b/tutorials/walkthrough/BUILD @@ -7,4 +7,8 @@ py_binary( srcs = ["mnist_scratch.py"], python_version = "PY3", srcs_version = "PY3", + deps = [ + "//third_party/py/tensorflow:tensorflow_compat_v1_estimator", + "//third_party/py/tensorflow:tensorflow_estimator", + ], ) diff --git a/tutorials/walkthrough/mnist_scratch.py b/tutorials/walkthrough/mnist_scratch.py index f387ddc..9cf8341 100644 --- a/tutorials/walkthrough/mnist_scratch.py +++ b/tutorials/walkthrough/mnist_scratch.py @@ -16,6 +16,8 @@ from absl import logging import numpy as np import tensorflow as tf +from tensorflow import estimator as tf_estimator +from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator tf.flags.DEFINE_float('learning_rate', .15, 'Learning rate for training') tf.flags.DEFINE_integer('batch_size', 256, 'Batch size') @@ -45,22 +47,22 @@ def cnn_model_fn(features, labels, mode): scalar_loss = tf.reduce_mean(vector_loss) # Configure the training op (for TRAIN mode). - if mode == tf.estimator.ModeKeys.TRAIN: + if mode == tf_estimator.ModeKeys.TRAIN: optimizer = tf.compat.v1.train.GradientDescentOptimizer(FLAGS.learning_rate) opt_loss = scalar_loss global_step = tf.compat.v1.train.get_global_step() train_op = optimizer.minimize(loss=opt_loss, global_step=global_step) - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, train_op=train_op) # Add evaluation metrics (for EVAL mode). - elif mode == tf.estimator.ModeKeys.EVAL: + elif mode == tf_estimator.ModeKeys.EVAL: eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=labels, predictions=tf.argmax(input=logits, axis=1)) } - return tf.estimator.EstimatorSpec( + return tf_estimator.EstimatorSpec( mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops) @@ -94,16 +96,16 @@ def main(unused_argv): train_data, train_labels, test_data, test_labels = load_mnist() # Instantiate the tf.Estimator. - mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn) + mnist_classifier = tf_estimator.Estimator(model_fn=cnn_model_fn) # Create tf.Estimator input functions for the training and test data. - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': train_data}, y=train_labels, batch_size=FLAGS.batch_size, num_epochs=FLAGS.epochs, shuffle=True) - eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( + eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn( x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False) # Training loop.