Explicitly import estimator from tensorflow as a separate import instead of
accessing it via tf.estimator and depend on the tensorflow estimator target. PiperOrigin-RevId: 438419860
This commit is contained in:
parent
fc2c15ab21
commit
5493a3baf0
22 changed files with 152 additions and 94 deletions
|
@ -26,6 +26,8 @@ import pandas as pd
|
||||||
from sklearn.model_selection import KFold
|
from sklearn.model_selection import KFold
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
|
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
|
||||||
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_eps_poisson
|
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_eps_poisson
|
||||||
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_mu_poisson
|
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_mu_poisson
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
|
@ -61,7 +63,7 @@ def nn_model_fn(features, labels, mode):
|
||||||
scalar_loss = tf.reduce_mean(vector_loss)
|
scalar_loss = tf.reduce_mean(vector_loss)
|
||||||
|
|
||||||
# Configure the training op (for TRAIN mode).
|
# Configure the training op (for TRAIN mode).
|
||||||
if mode == tf.estimator.ModeKeys.TRAIN:
|
if mode == tf_estimator.ModeKeys.TRAIN:
|
||||||
if FLAGS.dpsgd:
|
if FLAGS.dpsgd:
|
||||||
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
||||||
# available in dp_optimizer. Most optimizers inheriting from
|
# available in dp_optimizer. Most optimizers inheriting from
|
||||||
|
@ -83,17 +85,17 @@ def nn_model_fn(features, labels, mode):
|
||||||
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
||||||
# used for evaluation and debugging by tf.estimator. The actual loss being
|
# used for evaluation and debugging by tf.estimator. The actual loss being
|
||||||
# minimized is opt_loss defined above and passed to optimizer.minimize().
|
# minimized is opt_loss defined above and passed to optimizer.minimize().
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
|
||||||
# Add evaluation metrics (for EVAL mode).
|
# Add evaluation metrics (for EVAL mode).
|
||||||
if mode == tf.estimator.ModeKeys.EVAL:
|
if mode == tf_estimator.ModeKeys.EVAL:
|
||||||
eval_metric_ops = {
|
eval_metric_ops = {
|
||||||
'accuracy':
|
'accuracy':
|
||||||
tf.compat.v1.metrics.accuracy(
|
tf.compat.v1.metrics.accuracy(
|
||||||
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
||||||
}
|
}
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
@ -123,11 +125,11 @@ def main(unused_argv):
|
||||||
train_data, train_labels, test_data, test_labels = load_adult()
|
train_data, train_labels, test_data, test_labels = load_adult()
|
||||||
|
|
||||||
# Instantiate the tf.Estimator.
|
# Instantiate the tf.Estimator.
|
||||||
adult_classifier = tf.compat.v1.estimator.Estimator(
|
adult_classifier = tf_compat_v1_estimator.Estimator(
|
||||||
model_fn=nn_model_fn, model_dir=FLAGS.model_dir)
|
model_fn=nn_model_fn, model_dir=FLAGS.model_dir)
|
||||||
|
|
||||||
# Create tf.Estimator input functions for the training and test data.
|
# Create tf.Estimator input functions for the training and test data.
|
||||||
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
||||||
|
|
||||||
# Training loop.
|
# Training loop.
|
||||||
|
@ -141,7 +143,7 @@ def main(unused_argv):
|
||||||
global microbatches
|
global microbatches
|
||||||
microbatches = len(subsampling)
|
microbatches = len(subsampling)
|
||||||
|
|
||||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data[subsampling]},
|
x={'x': train_data[subsampling]},
|
||||||
y=train_labels[subsampling],
|
y=train_labels[subsampling],
|
||||||
batch_size=len(subsampling),
|
batch_size=len(subsampling),
|
||||||
|
|
|
@ -25,6 +25,8 @@ from keras.preprocessing import sequence
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
|
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
|
||||||
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_eps_poisson
|
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_eps_poisson
|
||||||
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_mu_poisson
|
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_mu_poisson
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
|
@ -65,7 +67,7 @@ def nn_model_fn(features, labels, mode):
|
||||||
scalar_loss = tf.reduce_mean(vector_loss)
|
scalar_loss = tf.reduce_mean(vector_loss)
|
||||||
|
|
||||||
# Configure the training op (for TRAIN mode).
|
# Configure the training op (for TRAIN mode).
|
||||||
if mode == tf.estimator.ModeKeys.TRAIN:
|
if mode == tf_estimator.ModeKeys.TRAIN:
|
||||||
if FLAGS.dpsgd:
|
if FLAGS.dpsgd:
|
||||||
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
||||||
# available in dp_optimizer. Most optimizers inheriting from
|
# available in dp_optimizer. Most optimizers inheriting from
|
||||||
|
@ -88,17 +90,17 @@ def nn_model_fn(features, labels, mode):
|
||||||
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
||||||
# used for evaluation and debugging by tf.estimator. The actual loss being
|
# used for evaluation and debugging by tf.estimator. The actual loss being
|
||||||
# minimized is opt_loss defined above and passed to optimizer.minimize().
|
# minimized is opt_loss defined above and passed to optimizer.minimize().
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
|
||||||
# Add evaluation metrics (for EVAL mode).
|
# Add evaluation metrics (for EVAL mode).
|
||||||
if mode == tf.estimator.ModeKeys.EVAL:
|
if mode == tf_estimator.ModeKeys.EVAL:
|
||||||
eval_metric_ops = {
|
eval_metric_ops = {
|
||||||
'accuracy':
|
'accuracy':
|
||||||
tf.compat.v1.metrics.accuracy(
|
tf.compat.v1.metrics.accuracy(
|
||||||
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
||||||
}
|
}
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -122,11 +124,11 @@ def main(unused_argv):
|
||||||
train_data, train_labels, test_data, test_labels = load_imdb()
|
train_data, train_labels, test_data, test_labels = load_imdb()
|
||||||
|
|
||||||
# Instantiate the tf.Estimator.
|
# Instantiate the tf.Estimator.
|
||||||
imdb_classifier = tf.estimator.Estimator(
|
imdb_classifier = tf_estimator.Estimator(
|
||||||
model_fn=nn_model_fn, model_dir=FLAGS.model_dir)
|
model_fn=nn_model_fn, model_dir=FLAGS.model_dir)
|
||||||
|
|
||||||
# Create tf.Estimator input functions for the training and test data.
|
# Create tf.Estimator input functions for the training and test data.
|
||||||
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
||||||
|
|
||||||
# Training loop.
|
# Training loop.
|
||||||
|
@ -141,7 +143,7 @@ def main(unused_argv):
|
||||||
global microbatches
|
global microbatches
|
||||||
microbatches = len(subsampling)
|
microbatches = len(subsampling)
|
||||||
|
|
||||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data[subsampling]},
|
x={'x': train_data[subsampling]},
|
||||||
y=train_labels[subsampling],
|
y=train_labels[subsampling],
|
||||||
batch_size=len(subsampling),
|
batch_size=len(subsampling),
|
||||||
|
|
|
@ -23,7 +23,10 @@ py_library(
|
||||||
"dnn.py",
|
"dnn.py",
|
||||||
],
|
],
|
||||||
srcs_version = "PY3",
|
srcs_version = "PY3",
|
||||||
deps = [":head"],
|
deps = [
|
||||||
|
":head",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
py_test(
|
py_test(
|
||||||
|
@ -36,6 +39,7 @@ py_test(
|
||||||
":head",
|
":head",
|
||||||
"//tensorflow_privacy/privacy/estimators:test_utils",
|
"//tensorflow_privacy/privacy/estimators:test_utils",
|
||||||
"//tensorflow_privacy/privacy/optimizers:dp_optimizer",
|
"//tensorflow_privacy/privacy/optimizers:dp_optimizer",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -16,12 +16,13 @@
|
||||||
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
from tensorflow_privacy.privacy.estimators.v1 import head as head_lib
|
from tensorflow_privacy.privacy.estimators.v1 import head as head_lib
|
||||||
from tensorflow_estimator.python.estimator import estimator
|
from tensorflow_estimator.python.estimator import estimator
|
||||||
from tensorflow_estimator.python.estimator.canned import dnn
|
from tensorflow_estimator.python.estimator.canned import dnn
|
||||||
|
|
||||||
|
|
||||||
class DNNClassifier(tf.estimator.Estimator):
|
class DNNClassifier(tf_estimator.Estimator):
|
||||||
"""DP version of `tf.compat.v1.estimator.DNNClassifier`."""
|
"""DP version of `tf.compat.v1.estimator.DNNClassifier`."""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
from absl.testing import parameterized
|
from absl.testing import parameterized
|
||||||
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
from tensorflow_privacy.privacy.estimators import test_utils
|
from tensorflow_privacy.privacy.estimators import test_utils
|
||||||
from tensorflow_privacy.privacy.estimators.v1 import head as head_lib
|
from tensorflow_privacy.privacy.estimators.v1 import head as head_lib
|
||||||
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
|
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
|
||||||
|
@ -69,7 +70,7 @@ class DPHeadTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
noise_multiplier=0.0,
|
noise_multiplier=0.0,
|
||||||
num_microbatches=2)
|
num_microbatches=2)
|
||||||
model_fn = make_model_fn(head, optimizer, feature_columns)
|
model_fn = make_model_fn(head, optimizer, feature_columns)
|
||||||
classifier = tf.estimator.Estimator(model_fn=model_fn)
|
classifier = tf_estimator.Estimator(model_fn=model_fn)
|
||||||
|
|
||||||
classifier.train(
|
classifier.train(
|
||||||
input_fn=test_utils.make_input_fn(train_features, train_labels, True),
|
input_fn=test_utils.make_input_fn(train_features, train_labels, True),
|
||||||
|
|
|
@ -53,6 +53,8 @@ py_test(
|
||||||
deps = [
|
deps = [
|
||||||
":dp_optimizer",
|
":dp_optimizer",
|
||||||
"//tensorflow_privacy/privacy/dp_query:gaussian_query",
|
"//tensorflow_privacy/privacy/dp_query:gaussian_query",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_compat_v1_estimator",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -62,7 +64,11 @@ py_test(
|
||||||
srcs = ["dp_optimizer_vectorized_test.py"],
|
srcs = ["dp_optimizer_vectorized_test.py"],
|
||||||
python_version = "PY3",
|
python_version = "PY3",
|
||||||
srcs_version = "PY3",
|
srcs_version = "PY3",
|
||||||
deps = [":dp_optimizer_vectorized"],
|
deps = [
|
||||||
|
":dp_optimizer_vectorized",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_compat_v1_estimator",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
py_test(
|
py_test(
|
||||||
|
@ -86,5 +92,6 @@ py_test(
|
||||||
deps = [
|
deps = [
|
||||||
"//tensorflow_privacy/privacy/optimizers:dp_optimizer_keras",
|
"//tensorflow_privacy/privacy/optimizers:dp_optimizer_keras",
|
||||||
"//tensorflow_privacy/privacy/optimizers:dp_optimizer_keras_vectorized",
|
"//tensorflow_privacy/privacy/optimizers:dp_optimizer_keras_vectorized",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
from absl.testing import parameterized
|
from absl.testing import parameterized
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer_keras
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer_keras
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer_keras_vectorized
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer_keras_vectorized
|
||||||
|
|
||||||
|
@ -227,7 +228,7 @@ class DPOptimizerGetGradientsTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
train_op = tf.group(
|
train_op = tf.group(
|
||||||
optimizer.get_updates(loss=vector_loss, params=params),
|
optimizer.get_updates(loss=vector_loss, params=params),
|
||||||
[tf.compat.v1.assign_add(global_step, 1)])
|
[tf.compat.v1.assign_add(global_step, 1)])
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
|
||||||
return linear_model_fn
|
return linear_model_fn
|
||||||
|
@ -249,7 +250,7 @@ class DPOptimizerGetGradientsTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
def testBaseline(self, cls, num_microbatches):
|
def testBaseline(self, cls, num_microbatches):
|
||||||
"""Tests that DP optimizers work with tf.estimator."""
|
"""Tests that DP optimizers work with tf.estimator."""
|
||||||
|
|
||||||
linear_regressor = tf.estimator.Estimator(
|
linear_regressor = tf_estimator.Estimator(
|
||||||
model_fn=self._make_linear_model_fn(cls, 100.0, 0.0, num_microbatches,
|
model_fn=self._make_linear_model_fn(cls, 100.0, 0.0, num_microbatches,
|
||||||
0.05))
|
0.05))
|
||||||
|
|
||||||
|
@ -293,7 +294,7 @@ class DPOptimizerGetGradientsTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
return tf.data.Dataset.from_tensor_slices(
|
return tf.data.Dataset.from_tensor_slices(
|
||||||
(train_data, train_labels)).batch(1)
|
(train_data, train_labels)).batch(1)
|
||||||
|
|
||||||
unclipped_linear_regressor = tf.estimator.Estimator(
|
unclipped_linear_regressor = tf_estimator.Estimator(
|
||||||
model_fn=self._make_linear_model_fn(cls, 1.0e9, 0.0, num_microbatches,
|
model_fn=self._make_linear_model_fn(cls, 1.0e9, 0.0, num_microbatches,
|
||||||
1.0))
|
1.0))
|
||||||
unclipped_linear_regressor.train(input_fn=train_input_fn, steps=1)
|
unclipped_linear_regressor.train(input_fn=train_input_fn, steps=1)
|
||||||
|
@ -302,7 +303,7 @@ class DPOptimizerGetGradientsTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
bias_value = unclipped_linear_regressor.get_variable_value('dense/bias')
|
bias_value = unclipped_linear_regressor.get_variable_value('dense/bias')
|
||||||
global_norm = np.linalg.norm(np.concatenate((kernel_value, [bias_value])))
|
global_norm = np.linalg.norm(np.concatenate((kernel_value, [bias_value])))
|
||||||
|
|
||||||
clipped_linear_regressor = tf.estimator.Estimator(
|
clipped_linear_regressor = tf_estimator.Estimator(
|
||||||
model_fn=self._make_linear_model_fn(cls, 1.0, 0.0, num_microbatches,
|
model_fn=self._make_linear_model_fn(cls, 1.0, 0.0, num_microbatches,
|
||||||
1.0))
|
1.0))
|
||||||
clipped_linear_regressor.train(input_fn=train_input_fn, steps=1)
|
clipped_linear_regressor.train(input_fn=train_input_fn, steps=1)
|
||||||
|
@ -339,7 +340,7 @@ class DPOptimizerGetGradientsTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
num_microbatches):
|
num_microbatches):
|
||||||
"""Tests that DP optimizers work with tf.estimator."""
|
"""Tests that DP optimizers work with tf.estimator."""
|
||||||
|
|
||||||
linear_regressor = tf.estimator.Estimator(
|
linear_regressor = tf_estimator.Estimator(
|
||||||
model_fn=self._make_linear_model_fn(
|
model_fn=self._make_linear_model_fn(
|
||||||
cls,
|
cls,
|
||||||
l2_norm_clip,
|
l2_norm_clip,
|
||||||
|
|
|
@ -18,6 +18,8 @@ import unittest
|
||||||
from absl.testing import parameterized
|
from absl.testing import parameterized
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
|
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
|
||||||
from tensorflow_privacy.privacy.dp_query import gaussian_query
|
from tensorflow_privacy.privacy.dp_query import gaussian_query
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
|
|
||||||
|
@ -205,10 +207,10 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
dp_sum_query, num_microbatches=1, learning_rate=1.0)
|
dp_sum_query, num_microbatches=1, learning_rate=1.0)
|
||||||
global_step = tf.compat.v1.train.get_global_step()
|
global_step = tf.compat.v1.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=vector_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=vector_loss, global_step=global_step)
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
|
||||||
linear_regressor = tf.estimator.Estimator(model_fn=linear_model_fn)
|
linear_regressor = tf_estimator.Estimator(model_fn=linear_model_fn)
|
||||||
true_weights = np.array([[-5], [4], [3], [2]]).astype(np.float32)
|
true_weights = np.array([[-5], [4], [3], [2]]).astype(np.float32)
|
||||||
true_bias = 6.0
|
true_bias = 6.0
|
||||||
train_data = np.random.normal(scale=3.0, size=(200, 4)).astype(np.float32)
|
train_data = np.random.normal(scale=3.0, size=(200, 4)).astype(np.float32)
|
||||||
|
@ -217,7 +219,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
true_weights) + true_bias + np.random.normal(
|
true_weights) + true_bias + np.random.normal(
|
||||||
scale=0.1, size=(200, 1)).astype(np.float32)
|
scale=0.1, size=(200, 1)).astype(np.float32)
|
||||||
|
|
||||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data},
|
x={'x': train_data},
|
||||||
y=train_labels,
|
y=train_labels,
|
||||||
batch_size=20,
|
batch_size=20,
|
||||||
|
|
|
@ -17,6 +17,8 @@ import unittest
|
||||||
from absl.testing import parameterized
|
from absl.testing import parameterized
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
|
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer_vectorized
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer_vectorized
|
||||||
from tensorflow_privacy.privacy.optimizers.dp_optimizer_vectorized import VectorizedDPAdagrad
|
from tensorflow_privacy.privacy.optimizers.dp_optimizer_vectorized import VectorizedDPAdagrad
|
||||||
from tensorflow_privacy.privacy.optimizers.dp_optimizer_vectorized import VectorizedDPAdam
|
from tensorflow_privacy.privacy.optimizers.dp_optimizer_vectorized import VectorizedDPAdam
|
||||||
|
@ -144,10 +146,10 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
learning_rate=1.0)
|
learning_rate=1.0)
|
||||||
global_step = tf.compat.v1.train.get_global_step()
|
global_step = tf.compat.v1.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=vector_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=vector_loss, global_step=global_step)
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
|
||||||
linear_regressor = tf.estimator.Estimator(model_fn=linear_model_fn)
|
linear_regressor = tf_estimator.Estimator(model_fn=linear_model_fn)
|
||||||
true_weights = np.array([[-5], [4], [3], [2]]).astype(np.float32)
|
true_weights = np.array([[-5], [4], [3], [2]]).astype(np.float32)
|
||||||
true_bias = 6.0
|
true_bias = 6.0
|
||||||
train_data = np.random.normal(scale=3.0, size=(200, 4)).astype(np.float32)
|
train_data = np.random.normal(scale=3.0, size=(200, 4)).astype(np.float32)
|
||||||
|
@ -156,7 +158,7 @@ class DPOptimizerTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
true_weights) + true_bias + np.random.normal(
|
true_weights) + true_bias + np.random.normal(
|
||||||
scale=0.1, size=(200, 1)).astype(np.float32)
|
scale=0.1, size=(200, 1)).astype(np.float32)
|
||||||
|
|
||||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data},
|
x={'x': train_data},
|
||||||
y=train_labels,
|
y=train_labels,
|
||||||
batch_size=20,
|
batch_size=20,
|
||||||
|
|
|
@ -113,6 +113,8 @@ py_test(
|
||||||
deps = [
|
deps = [
|
||||||
":membership_inference_attack",
|
":membership_inference_attack",
|
||||||
":tf_estimator_evaluation",
|
":tf_estimator_evaluation",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_compat_v1_estimator",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -124,6 +126,7 @@ py_library(
|
||||||
":membership_inference_attack",
|
":membership_inference_attack",
|
||||||
":utils",
|
":utils",
|
||||||
":utils_tensorboard",
|
":utils_tensorboard",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -135,6 +138,8 @@ py_binary(
|
||||||
deps = [
|
deps = [
|
||||||
":membership_inference_attack",
|
":membership_inference_attack",
|
||||||
":tf_estimator_evaluation",
|
":tf_estimator_evaluation",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_compat_v1_estimator",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,7 @@ from typing import Iterable
|
||||||
from absl import logging
|
from absl import logging
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import data_structures
|
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import data_structures
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import membership_inference_attack as mia
|
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import membership_inference_attack as mia
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import utils
|
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import utils
|
||||||
|
@ -47,7 +48,7 @@ def calculate_losses(estimator, input_fn, labels):
|
||||||
return pred, loss
|
return pred, loss
|
||||||
|
|
||||||
|
|
||||||
class MembershipInferenceTrainingHook(tf.estimator.SessionRunHook):
|
class MembershipInferenceTrainingHook(tf_estimator.SessionRunHook):
|
||||||
"""Training hook to perform membership inference attack on epoch end."""
|
"""Training hook to perform membership inference attack on epoch end."""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
|
|
|
@ -18,6 +18,8 @@ from absl import flags
|
||||||
from absl import logging
|
from absl import logging
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
|
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType
|
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import get_flattened_attack_metrics
|
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import get_flattened_attack_metrics
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import SlicingSpec
|
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import SlicingSpec
|
||||||
|
@ -46,34 +48,34 @@ def small_cnn_fn(features, labels, mode):
|
||||||
y = tf.keras.layers.Dense(64, activation='relu')(y)
|
y = tf.keras.layers.Dense(64, activation='relu')(y)
|
||||||
logits = tf.keras.layers.Dense(10)(y)
|
logits = tf.keras.layers.Dense(10)(y)
|
||||||
|
|
||||||
if mode != tf.estimator.ModeKeys.PREDICT:
|
if mode != tf_estimator.ModeKeys.PREDICT:
|
||||||
vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
|
vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
|
||||||
labels=labels, logits=logits)
|
labels=labels, logits=logits)
|
||||||
scalar_loss = tf.reduce_mean(input_tensor=vector_loss)
|
scalar_loss = tf.reduce_mean(input_tensor=vector_loss)
|
||||||
|
|
||||||
# Configure the training op (for TRAIN mode).
|
# Configure the training op (for TRAIN mode).
|
||||||
if mode == tf.estimator.ModeKeys.TRAIN:
|
if mode == tf_estimator.ModeKeys.TRAIN:
|
||||||
optimizer = tf.train.MomentumOptimizer(
|
optimizer = tf.train.MomentumOptimizer(
|
||||||
learning_rate=FLAGS.learning_rate, momentum=0.9)
|
learning_rate=FLAGS.learning_rate, momentum=0.9)
|
||||||
global_step = tf.compat.v1.train.get_global_step()
|
global_step = tf.compat.v1.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=scalar_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=scalar_loss, global_step=global_step)
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
|
||||||
# Add evaluation metrics (for EVAL mode).
|
# Add evaluation metrics (for EVAL mode).
|
||||||
elif mode == tf.estimator.ModeKeys.EVAL:
|
elif mode == tf_estimator.ModeKeys.EVAL:
|
||||||
eval_metric_ops = {
|
eval_metric_ops = {
|
||||||
'accuracy':
|
'accuracy':
|
||||||
tf.metrics.accuracy(
|
tf.metrics.accuracy(
|
||||||
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
||||||
}
|
}
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
||||||
|
|
||||||
# Output the prediction probability (for PREDICT mode).
|
# Output the prediction probability (for PREDICT mode).
|
||||||
elif mode == tf.estimator.ModeKeys.PREDICT:
|
elif mode == tf_estimator.ModeKeys.PREDICT:
|
||||||
predictions = tf.nn.softmax(logits)
|
predictions = tf.nn.softmax(logits)
|
||||||
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
|
return tf_estimator.EstimatorSpec(mode=mode, predictions=predictions)
|
||||||
|
|
||||||
|
|
||||||
def load_cifar10():
|
def load_cifar10():
|
||||||
|
@ -97,13 +99,13 @@ def main(unused_argv):
|
||||||
x_train, y_train, x_test, y_test = load_cifar10()
|
x_train, y_train, x_test, y_test = load_cifar10()
|
||||||
|
|
||||||
# Instantiate the tf.Estimator.
|
# Instantiate the tf.Estimator.
|
||||||
classifier = tf.estimator.Estimator(
|
classifier = tf_estimator.Estimator(
|
||||||
model_fn=small_cnn_fn, model_dir=FLAGS.model_dir)
|
model_fn=small_cnn_fn, model_dir=FLAGS.model_dir)
|
||||||
|
|
||||||
# A function to construct input_fn given (data, label), to be used by the
|
# A function to construct input_fn given (data, label), to be used by the
|
||||||
# membership inference training hook.
|
# membership inference training hook.
|
||||||
def input_fn_constructor(x, y):
|
def input_fn_constructor(x, y):
|
||||||
return tf.compat.v1.estimator.inputs.numpy_input_fn(
|
return tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': x}, y=y, shuffle=False)
|
x={'x': x}, y=y, shuffle=False)
|
||||||
|
|
||||||
# Get hook for membership inference attack.
|
# Get hook for membership inference attack.
|
||||||
|
@ -118,13 +120,13 @@ def main(unused_argv):
|
||||||
tensorboard_merge_classifiers=FLAGS.tensorboard_merge_classifiers)
|
tensorboard_merge_classifiers=FLAGS.tensorboard_merge_classifiers)
|
||||||
|
|
||||||
# Create tf.Estimator input functions for the training and test data.
|
# Create tf.Estimator input functions for the training and test data.
|
||||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': x_train},
|
x={'x': x_train},
|
||||||
y=y_train,
|
y=y_train,
|
||||||
batch_size=FLAGS.batch_size,
|
batch_size=FLAGS.batch_size,
|
||||||
num_epochs=FLAGS.epochs,
|
num_epochs=FLAGS.epochs,
|
||||||
shuffle=True)
|
shuffle=True)
|
||||||
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': x_test}, y=y_test, num_epochs=1, shuffle=False)
|
x={'x': x_test}, y=y_test, num_epochs=1, shuffle=False)
|
||||||
|
|
||||||
# Training loop.
|
# Training loop.
|
||||||
|
|
|
@ -15,6 +15,8 @@
|
||||||
from absl.testing import absltest
|
from absl.testing import absltest
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
|
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import data_structures
|
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import data_structures
|
||||||
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import tf_estimator_evaluation
|
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import tf_estimator_evaluation
|
||||||
|
|
||||||
|
@ -44,18 +46,18 @@ class UtilsTest(absltest.TestCase):
|
||||||
logits = tf.keras.layers.Dense(self.nclass)(input_layer)
|
logits = tf.keras.layers.Dense(self.nclass)(input_layer)
|
||||||
|
|
||||||
# Define the PREDICT mode becasue we only need that
|
# Define the PREDICT mode becasue we only need that
|
||||||
if mode == tf.estimator.ModeKeys.PREDICT:
|
if mode == tf_estimator.ModeKeys.PREDICT:
|
||||||
predictions = tf.nn.softmax(logits)
|
predictions = tf.nn.softmax(logits)
|
||||||
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
|
return tf_estimator.EstimatorSpec(mode=mode, predictions=predictions)
|
||||||
|
|
||||||
# Define the classifier, input_fn for training and test data
|
# Define the classifier, input_fn for training and test data
|
||||||
self.classifier = tf.estimator.Estimator(model_fn=model_fn)
|
self.classifier = tf_estimator.Estimator(model_fn=model_fn)
|
||||||
self.input_fn_train = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
self.input_fn_train = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': self.train_data},
|
x={'x': self.train_data},
|
||||||
y=self.train_labels,
|
y=self.train_labels,
|
||||||
num_epochs=1,
|
num_epochs=1,
|
||||||
shuffle=False)
|
shuffle=False)
|
||||||
self.input_fn_test = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
self.input_fn_test = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': self.test_data},
|
x={'x': self.test_data},
|
||||||
y=self.test_labels,
|
y=self.test_labels,
|
||||||
num_epochs=1,
|
num_epochs=1,
|
||||||
|
@ -94,7 +96,7 @@ class UtilsTest(absltest.TestCase):
|
||||||
"""Test the attack on the final models."""
|
"""Test the attack on the final models."""
|
||||||
|
|
||||||
def input_fn_constructor(x, y):
|
def input_fn_constructor(x, y):
|
||||||
return tf.compat.v1.estimator.inputs.numpy_input_fn(
|
return tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': x}, y=y, shuffle=False)
|
x={'x': x}, y=y, shuffle=False)
|
||||||
|
|
||||||
results = tf_estimator_evaluation.run_attack_on_tf_estimator_model(
|
results = tf_estimator_evaluation.run_attack_on_tf_estimator_model(
|
||||||
|
|
|
@ -17,6 +17,7 @@ py_binary(
|
||||||
":mnist_dpsgd_tutorial_common",
|
":mnist_dpsgd_tutorial_common",
|
||||||
"//tensorflow_privacy/privacy/analysis:compute_dp_sgd_privacy_lib",
|
"//tensorflow_privacy/privacy/analysis:compute_dp_sgd_privacy_lib",
|
||||||
"//tensorflow_privacy/privacy/optimizers:dp_optimizer",
|
"//tensorflow_privacy/privacy/optimizers:dp_optimizer",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -61,6 +62,8 @@ py_binary(
|
||||||
deps = [
|
deps = [
|
||||||
"//tensorflow_privacy/privacy/analysis:rdp_accountant",
|
"//tensorflow_privacy/privacy/analysis:rdp_accountant",
|
||||||
"//tensorflow_privacy/privacy/optimizers:dp_optimizer_vectorized",
|
"//tensorflow_privacy/privacy/optimizers:dp_optimizer_vectorized",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_compat_v1_estimator",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -73,6 +76,7 @@ py_binary(
|
||||||
":mnist_dpsgd_tutorial_common",
|
":mnist_dpsgd_tutorial_common",
|
||||||
"//tensorflow_privacy/privacy/analysis:compute_dp_sgd_privacy_lib",
|
"//tensorflow_privacy/privacy/analysis:compute_dp_sgd_privacy_lib",
|
||||||
"//tensorflow_privacy/privacy/optimizers:dp_optimizer",
|
"//tensorflow_privacy/privacy/optimizers:dp_optimizer",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -84,6 +88,8 @@ py_binary(
|
||||||
deps = [
|
deps = [
|
||||||
"//tensorflow_privacy/privacy/analysis:rdp_accountant",
|
"//tensorflow_privacy/privacy/analysis:rdp_accountant",
|
||||||
"//tensorflow_privacy/privacy/optimizers:dp_optimizer",
|
"//tensorflow_privacy/privacy/optimizers:dp_optimizer",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_compat_v1_estimator",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -95,6 +101,8 @@ py_binary(
|
||||||
deps = [
|
deps = [
|
||||||
"//tensorflow_privacy/privacy/analysis:rdp_accountant",
|
"//tensorflow_privacy/privacy/analysis:rdp_accountant",
|
||||||
"//tensorflow_privacy/privacy/optimizers:dp_optimizer",
|
"//tensorflow_privacy/privacy/optimizers:dp_optimizer",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_compat_v1_estimator",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -104,6 +112,8 @@ py_binary(
|
||||||
deps = [
|
deps = [
|
||||||
"//tensorflow_privacy/privacy/analysis:gdp_accountant",
|
"//tensorflow_privacy/privacy/analysis:gdp_accountant",
|
||||||
"//tensorflow_privacy/privacy/optimizers:dp_optimizer",
|
"//tensorflow_privacy/privacy/optimizers:dp_optimizer",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_compat_v1_estimator",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -37,6 +37,8 @@ from absl import flags
|
||||||
from absl import logging
|
from absl import logging
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
|
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
|
||||||
import tensorflow_datasets as tfds
|
import tensorflow_datasets as tfds
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
|
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
||||||
|
@ -82,7 +84,7 @@ def rnn_model_fn(features, labels, mode): # pylint: disable=unused-argument
|
||||||
scalar_loss = tf.reduce_mean(vector_loss)
|
scalar_loss = tf.reduce_mean(vector_loss)
|
||||||
|
|
||||||
# Configure the training op (for TRAIN mode).
|
# Configure the training op (for TRAIN mode).
|
||||||
if mode == tf.estimator.ModeKeys.TRAIN:
|
if mode == tf_estimator.ModeKeys.TRAIN:
|
||||||
if FLAGS.dpsgd:
|
if FLAGS.dpsgd:
|
||||||
|
|
||||||
optimizer = dp_optimizer.DPAdamGaussianOptimizer(
|
optimizer = dp_optimizer.DPAdamGaussianOptimizer(
|
||||||
|
@ -98,18 +100,18 @@ def rnn_model_fn(features, labels, mode): # pylint: disable=unused-argument
|
||||||
opt_loss = scalar_loss
|
opt_loss = scalar_loss
|
||||||
global_step = tf.compat.v1.train.get_global_step()
|
global_step = tf.compat.v1.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
|
||||||
# Add evaluation metrics (for EVAL mode).
|
# Add evaluation metrics (for EVAL mode).
|
||||||
elif mode == tf.estimator.ModeKeys.EVAL:
|
elif mode == tf_estimator.ModeKeys.EVAL:
|
||||||
eval_metric_ops = {
|
eval_metric_ops = {
|
||||||
'accuracy':
|
'accuracy':
|
||||||
tf.metrics.accuracy(
|
tf.metrics.accuracy(
|
||||||
labels=tf.cast(x[:, 1:], dtype=tf.int32),
|
labels=tf.cast(x[:, 1:], dtype=tf.int32),
|
||||||
predictions=tf.argmax(input=logits, axis=2))
|
predictions=tf.argmax(input=logits, axis=2))
|
||||||
}
|
}
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
||||||
|
|
||||||
|
|
||||||
|
@ -168,20 +170,20 @@ def main(unused_argv):
|
||||||
train_data, test_data = load_data()
|
train_data, test_data = load_data()
|
||||||
|
|
||||||
# Instantiate the tf.Estimator.
|
# Instantiate the tf.Estimator.
|
||||||
conf = tf.estimator.RunConfig(save_summary_steps=1000)
|
conf = tf_estimator.RunConfig(save_summary_steps=1000)
|
||||||
lm_classifier = tf.estimator.Estimator(
|
lm_classifier = tf_estimator.Estimator(
|
||||||
model_fn=rnn_model_fn, model_dir=FLAGS.model_dir, config=conf)
|
model_fn=rnn_model_fn, model_dir=FLAGS.model_dir, config=conf)
|
||||||
|
|
||||||
# Create tf.Estimator input functions for the training and test data.
|
# Create tf.Estimator input functions for the training and test data.
|
||||||
batch_len = FLAGS.batch_size * SEQ_LEN
|
batch_len = FLAGS.batch_size * SEQ_LEN
|
||||||
train_data_end = len(train_data) - len(train_data) % batch_len
|
train_data_end = len(train_data) - len(train_data) % batch_len
|
||||||
test_data_end = len(test_data) - len(test_data) % batch_len
|
test_data_end = len(test_data) - len(test_data) % batch_len
|
||||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data[:train_data_end]},
|
x={'x': train_data[:train_data_end]},
|
||||||
batch_size=batch_len,
|
batch_size=batch_len,
|
||||||
num_epochs=FLAGS.epochs,
|
num_epochs=FLAGS.epochs,
|
||||||
shuffle=False)
|
shuffle=False)
|
||||||
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': test_data[:test_data_end]},
|
x={'x': test_data[:test_data_end]},
|
||||||
batch_size=batch_len,
|
batch_size=batch_len,
|
||||||
num_epochs=1,
|
num_epochs=1,
|
||||||
|
|
|
@ -19,6 +19,7 @@ from absl import app
|
||||||
from absl import flags
|
from absl import flags
|
||||||
from absl import logging
|
from absl import logging
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
from tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy_lib
|
from tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy_lib
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
import mnist_dpsgd_tutorial_common as common
|
import mnist_dpsgd_tutorial_common as common
|
||||||
|
@ -53,7 +54,7 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu
|
||||||
scalar_loss = tf.reduce_mean(input_tensor=vector_loss)
|
scalar_loss = tf.reduce_mean(input_tensor=vector_loss)
|
||||||
|
|
||||||
# Configure the training op (for TRAIN mode).
|
# Configure the training op (for TRAIN mode).
|
||||||
if mode == tf.estimator.ModeKeys.TRAIN:
|
if mode == tf_estimator.ModeKeys.TRAIN:
|
||||||
if FLAGS.dpsgd:
|
if FLAGS.dpsgd:
|
||||||
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
||||||
# available in dp_optimizer. Most optimizers inheriting from
|
# available in dp_optimizer. Most optimizers inheriting from
|
||||||
|
@ -77,17 +78,17 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu
|
||||||
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
||||||
# used for evaluation and debugging by tf.estimator. The actual loss being
|
# used for evaluation and debugging by tf.estimator. The actual loss being
|
||||||
# minimized is opt_loss defined above and passed to optimizer.minimize().
|
# minimized is opt_loss defined above and passed to optimizer.minimize().
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
|
||||||
# Add evaluation metrics (for EVAL mode).
|
# Add evaluation metrics (for EVAL mode).
|
||||||
elif mode == tf.estimator.ModeKeys.EVAL:
|
elif mode == tf_estimator.ModeKeys.EVAL:
|
||||||
eval_metric_ops = {
|
eval_metric_ops = {
|
||||||
'accuracy':
|
'accuracy':
|
||||||
tf.metrics.accuracy(
|
tf.metrics.accuracy(
|
||||||
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
||||||
}
|
}
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
||||||
|
|
||||||
|
|
||||||
|
@ -97,7 +98,7 @@ def main(unused_argv):
|
||||||
raise ValueError('Number of microbatches should divide evenly batch_size')
|
raise ValueError('Number of microbatches should divide evenly batch_size')
|
||||||
|
|
||||||
# Instantiate the tf.Estimator.
|
# Instantiate the tf.Estimator.
|
||||||
mnist_classifier = tf.estimator.Estimator(
|
mnist_classifier = tf_estimator.Estimator(
|
||||||
model_fn=cnn_model_fn, model_dir=FLAGS.model_dir)
|
model_fn=cnn_model_fn, model_dir=FLAGS.model_dir)
|
||||||
|
|
||||||
# Training loop.
|
# Training loop.
|
||||||
|
|
|
@ -20,6 +20,7 @@ from absl import app
|
||||||
from absl import flags
|
from absl import flags
|
||||||
from absl import logging
|
from absl import logging
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
from tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy_lib
|
from tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy_lib
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
import mnist_dpsgd_tutorial_common as common
|
import mnist_dpsgd_tutorial_common as common
|
||||||
|
@ -56,7 +57,7 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu
|
||||||
scalar_loss = tf.reduce_mean(input_tensor=vector_loss)
|
scalar_loss = tf.reduce_mean(input_tensor=vector_loss)
|
||||||
|
|
||||||
# Configure the training op (for TRAIN mode).
|
# Configure the training op (for TRAIN mode).
|
||||||
if mode == tf.estimator.ModeKeys.TRAIN:
|
if mode == tf_estimator.ModeKeys.TRAIN:
|
||||||
if FLAGS.dpsgd:
|
if FLAGS.dpsgd:
|
||||||
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
||||||
# available in dp_optimizer. Most optimizers inheriting from
|
# available in dp_optimizer. Most optimizers inheriting from
|
||||||
|
@ -84,11 +85,11 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu
|
||||||
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
||||||
# used for evaluation and debugging by tf.estimator. The actual loss being
|
# used for evaluation and debugging by tf.estimator. The actual loss being
|
||||||
# minimized is opt_loss defined above and passed to optimizer.minimize().
|
# minimized is opt_loss defined above and passed to optimizer.minimize().
|
||||||
return tf.estimator.tpu.TPUEstimatorSpec(
|
return tf_estimator.tpu.TPUEstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
|
||||||
# Add evaluation metrics (for EVAL mode).
|
# Add evaluation metrics (for EVAL mode).
|
||||||
elif mode == tf.estimator.ModeKeys.EVAL:
|
elif mode == tf_estimator.ModeKeys.EVAL:
|
||||||
|
|
||||||
def metric_fn(labels, logits):
|
def metric_fn(labels, logits):
|
||||||
predictions = tf.argmax(logits, 1)
|
predictions = tf.argmax(logits, 1)
|
||||||
|
@ -97,7 +98,7 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu
|
||||||
tf.metrics.accuracy(labels=labels, predictions=predictions),
|
tf.metrics.accuracy(labels=labels, predictions=predictions),
|
||||||
}
|
}
|
||||||
|
|
||||||
return tf.estimator.tpu.TPUEstimatorSpec(
|
return tf_estimator.tpu.TPUEstimatorSpec(
|
||||||
mode=mode,
|
mode=mode,
|
||||||
loss=scalar_loss,
|
loss=scalar_loss,
|
||||||
eval_metrics=(metric_fn, {
|
eval_metrics=(metric_fn, {
|
||||||
|
@ -112,8 +113,8 @@ def main(unused_argv):
|
||||||
raise ValueError('Number of microbatches should divide evenly batch_size')
|
raise ValueError('Number of microbatches should divide evenly batch_size')
|
||||||
|
|
||||||
# Instantiate the tf.Estimator.
|
# Instantiate the tf.Estimator.
|
||||||
run_config = tf.estimator.tpu.RunConfig(master=FLAGS.master)
|
run_config = tf_estimator.tpu.RunConfig(master=FLAGS.master)
|
||||||
mnist_classifier = tf.estimator.tpu.TPUEstimator(
|
mnist_classifier = tf_estimator.tpu.TPUEstimator(
|
||||||
train_batch_size=FLAGS.batch_size,
|
train_batch_size=FLAGS.batch_size,
|
||||||
eval_batch_size=FLAGS.batch_size,
|
eval_batch_size=FLAGS.batch_size,
|
||||||
model_fn=cnn_model_fn,
|
model_fn=cnn_model_fn,
|
||||||
|
|
|
@ -18,6 +18,8 @@ from absl import flags
|
||||||
from absl import logging
|
from absl import logging
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
|
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
|
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer_vectorized
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer_vectorized
|
||||||
|
@ -80,7 +82,7 @@ def cnn_model_fn(features, labels, mode):
|
||||||
scalar_loss = tf.reduce_mean(input_tensor=vector_loss)
|
scalar_loss = tf.reduce_mean(input_tensor=vector_loss)
|
||||||
|
|
||||||
# Configure the training op (for TRAIN mode).
|
# Configure the training op (for TRAIN mode).
|
||||||
if mode == tf.estimator.ModeKeys.TRAIN:
|
if mode == tf_estimator.ModeKeys.TRAIN:
|
||||||
|
|
||||||
if FLAGS.dpsgd:
|
if FLAGS.dpsgd:
|
||||||
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
||||||
|
@ -102,18 +104,18 @@ def cnn_model_fn(features, labels, mode):
|
||||||
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
||||||
# used for evaluation and debugging by tf.estimator. The actual loss being
|
# used for evaluation and debugging by tf.estimator. The actual loss being
|
||||||
# minimized is opt_loss defined above and passed to optimizer.minimize().
|
# minimized is opt_loss defined above and passed to optimizer.minimize().
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
|
||||||
# Add evaluation metrics (for EVAL mode).
|
# Add evaluation metrics (for EVAL mode).
|
||||||
elif mode == tf.estimator.ModeKeys.EVAL:
|
elif mode == tf_estimator.ModeKeys.EVAL:
|
||||||
eval_metric_ops = {
|
eval_metric_ops = {
|
||||||
'accuracy':
|
'accuracy':
|
||||||
tf.metrics.accuracy(
|
tf.metrics.accuracy(
|
||||||
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
||||||
}
|
}
|
||||||
|
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
||||||
|
|
||||||
|
|
||||||
|
@ -150,17 +152,17 @@ def main(unused_argv):
|
||||||
train_data, train_labels, test_data, test_labels = load_mnist()
|
train_data, train_labels, test_data, test_labels = load_mnist()
|
||||||
|
|
||||||
# Instantiate the tf.Estimator.
|
# Instantiate the tf.Estimator.
|
||||||
mnist_classifier = tf.estimator.Estimator(
|
mnist_classifier = tf_estimator.Estimator(
|
||||||
model_fn=cnn_model_fn, model_dir=FLAGS.model_dir)
|
model_fn=cnn_model_fn, model_dir=FLAGS.model_dir)
|
||||||
|
|
||||||
# Create tf.Estimator input functions for the training and test data.
|
# Create tf.Estimator input functions for the training and test data.
|
||||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data},
|
x={'x': train_data},
|
||||||
y=train_labels,
|
y=train_labels,
|
||||||
batch_size=FLAGS.batch_size,
|
batch_size=FLAGS.batch_size,
|
||||||
num_epochs=FLAGS.epochs,
|
num_epochs=FLAGS.epochs,
|
||||||
shuffle=True)
|
shuffle=True)
|
||||||
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
||||||
|
|
||||||
# Training loop.
|
# Training loop.
|
||||||
|
|
|
@ -28,6 +28,8 @@ from absl import flags
|
||||||
from absl import logging
|
from absl import logging
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
|
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
|
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
|
@ -65,7 +67,7 @@ def lr_model_fn(features, labels, mode, nclasses, dim):
|
||||||
scalar_loss = tf.reduce_mean(vector_loss)
|
scalar_loss = tf.reduce_mean(vector_loss)
|
||||||
|
|
||||||
# Configure the training op (for TRAIN mode).
|
# Configure the training op (for TRAIN mode).
|
||||||
if mode == tf.estimator.ModeKeys.TRAIN:
|
if mode == tf_estimator.ModeKeys.TRAIN:
|
||||||
if FLAGS.dpsgd:
|
if FLAGS.dpsgd:
|
||||||
# The loss function is L-Lipschitz with L = sqrt(2*(||x||^2 + 1)) where
|
# The loss function is L-Lipschitz with L = sqrt(2*(||x||^2 + 1)) where
|
||||||
# ||x|| is the norm of the data.
|
# ||x|| is the norm of the data.
|
||||||
|
@ -86,17 +88,17 @@ def lr_model_fn(features, labels, mode, nclasses, dim):
|
||||||
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
||||||
# used for evaluation and debugging by tf.estimator. The actual loss being
|
# used for evaluation and debugging by tf.estimator. The actual loss being
|
||||||
# minimized is opt_loss defined above and passed to optimizer.minimize().
|
# minimized is opt_loss defined above and passed to optimizer.minimize().
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
|
||||||
# Add evaluation metrics (for EVAL mode).
|
# Add evaluation metrics (for EVAL mode).
|
||||||
elif mode == tf.estimator.ModeKeys.EVAL:
|
elif mode == tf_estimator.ModeKeys.EVAL:
|
||||||
eval_metric_ops = {
|
eval_metric_ops = {
|
||||||
'accuracy':
|
'accuracy':
|
||||||
tf.metrics.accuracy(
|
tf.metrics.accuracy(
|
||||||
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
||||||
}
|
}
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
||||||
|
|
||||||
|
|
||||||
|
@ -199,19 +201,19 @@ def main(unused_argv):
|
||||||
# pylint: disable=g-long-lambda
|
# pylint: disable=g-long-lambda
|
||||||
model_fn = lambda features, labels, mode: lr_model_fn(
|
model_fn = lambda features, labels, mode: lr_model_fn(
|
||||||
features, labels, mode, nclasses=10, dim=train_data.shape[1:])
|
features, labels, mode, nclasses=10, dim=train_data.shape[1:])
|
||||||
mnist_classifier = tf.estimator.Estimator(
|
mnist_classifier = tf_estimator.Estimator(
|
||||||
model_fn=model_fn, model_dir=FLAGS.model_dir)
|
model_fn=model_fn, model_dir=FLAGS.model_dir)
|
||||||
|
|
||||||
# Create tf.Estimator input functions for the training and test data.
|
# Create tf.Estimator input functions for the training and test data.
|
||||||
# To analyze the per-user privacy loss, we keep the same orders of samples in
|
# To analyze the per-user privacy loss, we keep the same orders of samples in
|
||||||
# each epoch by setting shuffle=False.
|
# each epoch by setting shuffle=False.
|
||||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data},
|
x={'x': train_data},
|
||||||
y=train_labels,
|
y=train_labels,
|
||||||
batch_size=FLAGS.batch_size,
|
batch_size=FLAGS.batch_size,
|
||||||
num_epochs=FLAGS.epochs,
|
num_epochs=FLAGS.epochs,
|
||||||
shuffle=False)
|
shuffle=False)
|
||||||
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
||||||
|
|
||||||
# Train the model.
|
# Train the model.
|
||||||
|
|
|
@ -21,6 +21,8 @@ import pandas as pd
|
||||||
from scipy import stats
|
from scipy import stats
|
||||||
from sklearn.model_selection import train_test_split
|
from sklearn.model_selection import train_test_split
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
|
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
|
||||||
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_eps_poisson
|
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_eps_poisson
|
||||||
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_mu_poisson
|
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_mu_poisson
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
|
@ -87,7 +89,7 @@ def nn_model_fn(features, labels, mode):
|
||||||
scalar_loss = tf.reduce_mean(vector_loss)
|
scalar_loss = tf.reduce_mean(vector_loss)
|
||||||
|
|
||||||
# Configure the training op (for TRAIN mode).
|
# Configure the training op (for TRAIN mode).
|
||||||
if mode == tf.estimator.ModeKeys.TRAIN:
|
if mode == tf_estimator.ModeKeys.TRAIN:
|
||||||
if FLAGS.dpsgd:
|
if FLAGS.dpsgd:
|
||||||
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
# Use DP version of GradientDescentOptimizer. Other optimizers are
|
||||||
# available in dp_optimizer. Most optimizers inheriting from
|
# available in dp_optimizer. Most optimizers inheriting from
|
||||||
|
@ -110,11 +112,11 @@ def nn_model_fn(features, labels, mode):
|
||||||
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
# the vector_loss because tf.estimator requires a scalar loss. This is only
|
||||||
# used for evaluation and debugging by tf.estimator. The actual loss being
|
# used for evaluation and debugging by tf.estimator. The actual loss being
|
||||||
# minimized is opt_loss defined above and passed to optimizer.minimize().
|
# minimized is opt_loss defined above and passed to optimizer.minimize().
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
|
||||||
# Add evaluation metrics (for EVAL mode).
|
# Add evaluation metrics (for EVAL mode).
|
||||||
if mode == tf.estimator.ModeKeys.EVAL:
|
if mode == tf_estimator.ModeKeys.EVAL:
|
||||||
eval_metric_ops = {
|
eval_metric_ops = {
|
||||||
'rmse':
|
'rmse':
|
||||||
tf.compat.v1.metrics.root_mean_squared_error(
|
tf.compat.v1.metrics.root_mean_squared_error(
|
||||||
|
@ -124,7 +126,7 @@ def nn_model_fn(features, labels, mode):
|
||||||
b=tf.constant(np.array([0, 1, 2, 3, 4]), dtype=tf.float32),
|
b=tf.constant(np.array([0, 1, 2, 3, 4]), dtype=tf.float32),
|
||||||
axes=1))
|
axes=1))
|
||||||
}
|
}
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -161,11 +163,11 @@ def main(unused_argv):
|
||||||
train_data, test_data, _ = load_movielens()
|
train_data, test_data, _ = load_movielens()
|
||||||
|
|
||||||
# Instantiate the tf.Estimator.
|
# Instantiate the tf.Estimator.
|
||||||
ml_classifier = tf.estimator.Estimator(
|
ml_classifier = tf_estimator.Estimator(
|
||||||
model_fn=nn_model_fn, model_dir=FLAGS.model_dir)
|
model_fn=nn_model_fn, model_dir=FLAGS.model_dir)
|
||||||
|
|
||||||
# Create tf.Estimator input functions for the training and test data.
|
# Create tf.Estimator input functions for the training and test data.
|
||||||
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={
|
x={
|
||||||
'user': test_data[:, 0],
|
'user': test_data[:, 0],
|
||||||
'movie': test_data[:, 4]
|
'movie': test_data[:, 4]
|
||||||
|
@ -185,7 +187,7 @@ def main(unused_argv):
|
||||||
global microbatches
|
global microbatches
|
||||||
microbatches = len(subsampling)
|
microbatches = len(subsampling)
|
||||||
|
|
||||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={
|
x={
|
||||||
'user': train_data[subsampling, 0],
|
'user': train_data[subsampling, 0],
|
||||||
'movie': train_data[subsampling, 4]
|
'movie': train_data[subsampling, 4]
|
||||||
|
|
|
@ -7,4 +7,8 @@ py_binary(
|
||||||
srcs = ["mnist_scratch.py"],
|
srcs = ["mnist_scratch.py"],
|
||||||
python_version = "PY3",
|
python_version = "PY3",
|
||||||
srcs_version = "PY3",
|
srcs_version = "PY3",
|
||||||
|
deps = [
|
||||||
|
"//third_party/py/tensorflow:tensorflow_compat_v1_estimator",
|
||||||
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
|
@ -16,6 +16,8 @@
|
||||||
from absl import logging
|
from absl import logging
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
from tensorflow import estimator as tf_estimator
|
||||||
|
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
|
||||||
|
|
||||||
tf.flags.DEFINE_float('learning_rate', .15, 'Learning rate for training')
|
tf.flags.DEFINE_float('learning_rate', .15, 'Learning rate for training')
|
||||||
tf.flags.DEFINE_integer('batch_size', 256, 'Batch size')
|
tf.flags.DEFINE_integer('batch_size', 256, 'Batch size')
|
||||||
|
@ -45,22 +47,22 @@ def cnn_model_fn(features, labels, mode):
|
||||||
scalar_loss = tf.reduce_mean(vector_loss)
|
scalar_loss = tf.reduce_mean(vector_loss)
|
||||||
|
|
||||||
# Configure the training op (for TRAIN mode).
|
# Configure the training op (for TRAIN mode).
|
||||||
if mode == tf.estimator.ModeKeys.TRAIN:
|
if mode == tf_estimator.ModeKeys.TRAIN:
|
||||||
optimizer = tf.compat.v1.train.GradientDescentOptimizer(FLAGS.learning_rate)
|
optimizer = tf.compat.v1.train.GradientDescentOptimizer(FLAGS.learning_rate)
|
||||||
opt_loss = scalar_loss
|
opt_loss = scalar_loss
|
||||||
global_step = tf.compat.v1.train.get_global_step()
|
global_step = tf.compat.v1.train.get_global_step()
|
||||||
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, train_op=train_op)
|
mode=mode, loss=scalar_loss, train_op=train_op)
|
||||||
|
|
||||||
# Add evaluation metrics (for EVAL mode).
|
# Add evaluation metrics (for EVAL mode).
|
||||||
elif mode == tf.estimator.ModeKeys.EVAL:
|
elif mode == tf_estimator.ModeKeys.EVAL:
|
||||||
eval_metric_ops = {
|
eval_metric_ops = {
|
||||||
'accuracy':
|
'accuracy':
|
||||||
tf.metrics.accuracy(
|
tf.metrics.accuracy(
|
||||||
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
||||||
}
|
}
|
||||||
return tf.estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
|
||||||
|
|
||||||
|
|
||||||
|
@ -94,16 +96,16 @@ def main(unused_argv):
|
||||||
train_data, train_labels, test_data, test_labels = load_mnist()
|
train_data, train_labels, test_data, test_labels = load_mnist()
|
||||||
|
|
||||||
# Instantiate the tf.Estimator.
|
# Instantiate the tf.Estimator.
|
||||||
mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn)
|
mnist_classifier = tf_estimator.Estimator(model_fn=cnn_model_fn)
|
||||||
|
|
||||||
# Create tf.Estimator input functions for the training and test data.
|
# Create tf.Estimator input functions for the training and test data.
|
||||||
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': train_data},
|
x={'x': train_data},
|
||||||
y=train_labels,
|
y=train_labels,
|
||||||
batch_size=FLAGS.batch_size,
|
batch_size=FLAGS.batch_size,
|
||||||
num_epochs=FLAGS.epochs,
|
num_epochs=FLAGS.epochs,
|
||||||
shuffle=True)
|
shuffle=True)
|
||||||
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
|
eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
|
||||||
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
|
||||||
|
|
||||||
# Training loop.
|
# Training loop.
|
||||||
|
|
Loading…
Reference in a new issue