diff --git a/tensorflow_privacy/privacy/analysis/BUILD b/tensorflow_privacy/privacy/analysis/BUILD index 6e72cb5..be92179 100644 --- a/tensorflow_privacy/privacy/analysis/BUILD +++ b/tensorflow_privacy/privacy/analysis/BUILD @@ -13,7 +13,10 @@ py_library( name = "compute_dp_sgd_privacy_lib", srcs = ["compute_dp_sgd_privacy_lib.py"], srcs_version = "PY3", - deps = [":rdp_accountant"], + deps = [ + "@com_google_differential_py//python/dp_accounting:dp_event", + "@com_google_differential_py//python/dp_accounting/rdp:rdp_privacy_accountant", + ], ) py_binary( diff --git a/tensorflow_privacy/privacy/analysis/compute_dp_sgd_privacy_lib.py b/tensorflow_privacy/privacy/analysis/compute_dp_sgd_privacy_lib.py index 3d8752e..f884a32 100644 --- a/tensorflow_privacy/privacy/analysis/compute_dp_sgd_privacy_lib.py +++ b/tensorflow_privacy/privacy/analysis/compute_dp_sgd_privacy_lib.py @@ -17,20 +17,22 @@ import math from absl import app -from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp # pylint: disable=g-import-not-at-top -from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent + +from com_google_differential_py.python.dp_accounting import dp_event +from com_google_differential_py.python.dp_accounting.rdp import rdp_privacy_accountant def apply_dp_sgd_analysis(q, sigma, steps, orders, delta): """Compute and print results of DP-SGD analysis.""" - # compute_rdp requires that sigma be the ratio of the standard deviation of - # the Gaussian noise to the l2-sensitivity of the function to which it is - # added. Hence, sigma here corresponds to the `noise_multiplier` parameter - # in the DP-SGD implementation found in privacy.optimizers.dp_optimizer - rdp = compute_rdp(q, sigma, steps, orders) + accountant = rdp_privacy_accountant.RdpAccountant(orders) - eps, _, opt_order = get_privacy_spent(orders, rdp, target_delta=delta) + event = dp_event.SelfComposedDpEvent( + dp_event.PoissonSampledDpEvent(q, dp_event.GaussianDpEvent(sigma)), steps) + + accountant.compose(event) + + eps, opt_order = accountant.get_epsilon_and_optimal_order(delta) print( 'DP-SGD with sampling rate = {:.3g}% and noise_multiplier = {} iterated' diff --git a/tutorials/BUILD b/tutorials/BUILD index 4ab0c98..e398d3b 100644 --- a/tutorials/BUILD +++ b/tutorials/BUILD @@ -27,8 +27,9 @@ py_binary( python_version = "PY3", srcs_version = "PY3", deps = [ - "//tensorflow_privacy/privacy/analysis:rdp_accountant", "//tensorflow_privacy/privacy/optimizers:dp_optimizer", + "@com_google_differential_py//python/dp_accounting:dp_event", + "@com_google_differential_py//python/dp_accounting/rdp:rdp_privacy_accountant", ], ) @@ -38,8 +39,9 @@ py_binary( python_version = "PY3", srcs_version = "PY3", deps = [ - "//tensorflow_privacy/privacy/analysis:rdp_accountant", "//tensorflow_privacy/privacy/optimizers:dp_optimizer_keras", + "@com_google_differential_py//python/dp_accounting:dp_event", + "@com_google_differential_py//python/dp_accounting/rdp:rdp_privacy_accountant", ], ) @@ -49,8 +51,9 @@ py_binary( python_version = "PY3", srcs_version = "PY3", deps = [ - "//tensorflow_privacy/privacy/analysis:rdp_accountant", "//tensorflow_privacy/privacy/keras_models:dp_keras_model", + "@com_google_differential_py//python/dp_accounting:dp_event", + "@com_google_differential_py//python/dp_accounting/rdp:rdp_privacy_accountant", ], ) @@ -60,10 +63,11 @@ py_binary( python_version = "PY3", srcs_version = "PY3", deps = [ - "//tensorflow_privacy/privacy/analysis:rdp_accountant", "//tensorflow_privacy/privacy/optimizers:dp_optimizer_vectorized", "//third_party/py/tensorflow:tensorflow_compat_v1_estimator", "//third_party/py/tensorflow:tensorflow_estimator", + "@com_google_differential_py//python/dp_accounting:dp_event", + "@com_google_differential_py//python/dp_accounting/rdp:rdp_privacy_accountant", ], ) diff --git a/tutorials/mnist_dpsgd_tutorial_eager.py b/tutorials/mnist_dpsgd_tutorial_eager.py index 07ff3e8..9d306d7 100644 --- a/tutorials/mnist_dpsgd_tutorial_eager.py +++ b/tutorials/mnist_dpsgd_tutorial_eager.py @@ -17,11 +17,12 @@ from absl import app from absl import flags import numpy as np import tensorflow as tf - -from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp -from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer +from com_google_differential_py.python.dp_accounting import dp_event +from com_google_differential_py.python.dp_accounting.rdp import rdp_privacy_accountant + + GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer tf.compat.v1.enable_eager_execution() @@ -46,14 +47,18 @@ def compute_epsilon(steps): if FLAGS.noise_multiplier == 0.0: return float('inf') orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64)) + accountant = rdp_privacy_accountant.RdpAccountant(orders) + sampling_probability = FLAGS.batch_size / 60000 - rdp = compute_rdp( - q=sampling_probability, - noise_multiplier=FLAGS.noise_multiplier, - steps=steps, - orders=orders) + event = dp_event.SelfComposedDpEvent( + dp_event.PoissonSampledDpEvent( + sampling_probability, + dp_event.GaussianDpEvent(FLAGS.noise_multiplier)), steps) + + accountant.compose(event) + # Delta is set to 1e-5 because MNIST has 60000 training points. - return get_privacy_spent(orders, rdp, target_delta=1e-5)[0] + return accountant.get_epsilon(target_delta=1e-5) def main(_): diff --git a/tutorials/mnist_dpsgd_tutorial_keras.py b/tutorials/mnist_dpsgd_tutorial_keras.py index 8c63c82..2d57bcd 100644 --- a/tutorials/mnist_dpsgd_tutorial_keras.py +++ b/tutorials/mnist_dpsgd_tutorial_keras.py @@ -20,10 +20,11 @@ from absl import logging import numpy as np import tensorflow as tf -from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp -from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent from tensorflow_privacy.privacy.optimizers.dp_optimizer_keras import DPKerasSGDOptimizer +from com_google_differential_py.python.dp_accounting import dp_event +from com_google_differential_py.python.dp_accounting.rdp import rdp_privacy_accountant + flags.DEFINE_boolean( 'dpsgd', True, 'If True, train with DP-SGD. If False, ' 'train with vanilla SGD.') @@ -46,14 +47,18 @@ def compute_epsilon(steps): if FLAGS.noise_multiplier == 0.0: return float('inf') orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64)) + accountant = rdp_privacy_accountant.RdpAccountant(orders) + sampling_probability = FLAGS.batch_size / 60000 - rdp = compute_rdp( - q=sampling_probability, - noise_multiplier=FLAGS.noise_multiplier, - steps=steps, - orders=orders) + event = dp_event.SelfComposedDpEvent( + dp_event.PoissonSampledDpEvent( + sampling_probability, + dp_event.GaussianDpEvent(FLAGS.noise_multiplier)), steps) + + accountant.compose(event) + # Delta is set to 1e-5 because MNIST has 60000 training points. - return get_privacy_spent(orders, rdp, target_delta=1e-5)[0] + return accountant.get_epsilon(target_delta=1e-5) def load_mnist(): diff --git a/tutorials/mnist_dpsgd_tutorial_keras_model.py b/tutorials/mnist_dpsgd_tutorial_keras_model.py index 4cbfe1b..0212f6b 100644 --- a/tutorials/mnist_dpsgd_tutorial_keras_model.py +++ b/tutorials/mnist_dpsgd_tutorial_keras_model.py @@ -19,10 +19,11 @@ from absl import logging import numpy as np import tensorflow as tf -from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp -from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent from tensorflow_privacy.privacy.keras_models.dp_keras_model import DPSequential +from com_google_differential_py.python.dp_accounting import dp_event +from com_google_differential_py.python.dp_accounting.rdp import rdp_privacy_accountant + flags.DEFINE_boolean( 'dpsgd', True, 'If True, train with DP-SGD. If False, ' 'train with vanilla SGD.') @@ -45,14 +46,18 @@ def compute_epsilon(steps): if FLAGS.noise_multiplier == 0.0: return float('inf') orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64)) + accountant = rdp_privacy_accountant.RdpAccountant(orders) + sampling_probability = FLAGS.batch_size / 60000 - rdp = compute_rdp( - q=sampling_probability, - noise_multiplier=FLAGS.noise_multiplier, - steps=steps, - orders=orders) + event = dp_event.SelfComposedDpEvent( + dp_event.PoissonSampledDpEvent( + sampling_probability, + dp_event.GaussianDpEvent(FLAGS.noise_multiplier)), steps) + + accountant.compose(event) + # Delta is set to 1e-5 because MNIST has 60000 training points. - return get_privacy_spent(orders, rdp, target_delta=1e-5)[0] + return accountant.get_epsilon(target_delta=1e-5) def load_mnist(): diff --git a/tutorials/mnist_dpsgd_tutorial_vectorized.py b/tutorials/mnist_dpsgd_tutorial_vectorized.py index cc15d82..142d279 100644 --- a/tutorials/mnist_dpsgd_tutorial_vectorized.py +++ b/tutorials/mnist_dpsgd_tutorial_vectorized.py @@ -20,10 +20,11 @@ import numpy as np import tensorflow as tf from tensorflow import estimator as tf_estimator from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator -from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp -from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent from tensorflow_privacy.privacy.optimizers import dp_optimizer_vectorized +from com_google_differential_py.python.dp_accounting import dp_event +from com_google_differential_py.python.dp_accounting.rdp import rdp_privacy_accountant + flags.DEFINE_boolean( 'dpsgd', True, 'If True, train with DP-SGD. If False, ' 'train with vanilla SGD.') @@ -50,14 +51,18 @@ def compute_epsilon(steps): if FLAGS.noise_multiplier == 0.0: return float('inf') orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64)) - sampling_probability = FLAGS.batch_size / NUM_TRAIN_EXAMPLES - rdp = compute_rdp( - q=sampling_probability, - noise_multiplier=FLAGS.noise_multiplier, - steps=steps, - orders=orders) - # Delta is set to approximate 1 / (number of training points). - return get_privacy_spent(orders, rdp, target_delta=1e-5)[0] + accountant = rdp_privacy_accountant.RdpAccountant(orders) + + sampling_probability = FLAGS.batch_size / 60000 + event = dp_event.SelfComposedDpEvent( + dp_event.PoissonSampledDpEvent( + sampling_probability, + dp_event.GaussianDpEvent(FLAGS.noise_multiplier)), steps) + + accountant.compose(event) + + # Delta is set to 1e-5 because MNIST has 60000 training points. + return accountant.get_epsilon(target_delta=1e-5) def cnn_model_fn(features, labels, mode):