From afe676135eccaf91a2ceb9d1f1d36afb979c081b Mon Sep 17 00:00:00 2001 From: Galen Andrew Date: Thu, 5 May 2022 16:30:57 -0700 Subject: [PATCH] Migrate lm_dpsgd_tutorial from deprecated TFP rdp_accountant to Google DP. PiperOrigin-RevId: 446846972 --- tutorials/BUILD | 3 ++- tutorials/lm_dpsgd_tutorial.py | 21 +++++++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/tutorials/BUILD b/tutorials/BUILD index 8c0bb1a..a732962 100644 --- a/tutorials/BUILD +++ b/tutorials/BUILD @@ -105,10 +105,11 @@ py_binary( python_version = "PY3", srcs_version = "PY3", deps = [ - "//tensorflow_privacy/privacy/analysis:rdp_accountant", "//tensorflow_privacy/privacy/optimizers:dp_optimizer", "//third_party/py/tensorflow:tensorflow_compat_v1_estimator", "//third_party/py/tensorflow:tensorflow_estimator", + "@com_google_differential_py//python/dp_accounting:dp_event", + "@com_google_differential_py//python/dp_accounting/rdp:rdp_privacy_accountant", ], ) diff --git a/tutorials/lm_dpsgd_tutorial.py b/tutorials/lm_dpsgd_tutorial.py index 038b42b..10448f0 100644 --- a/tutorials/lm_dpsgd_tutorial.py +++ b/tutorials/lm_dpsgd_tutorial.py @@ -40,9 +40,11 @@ import tensorflow as tf from tensorflow import estimator as tf_estimator from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator import tensorflow_datasets as tfds -from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp -from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent + from tensorflow_privacy.privacy.optimizers import dp_optimizer +from com_google_differential_py.python.dp_accounting import dp_event +from com_google_differential_py.python.dp_accounting.rdp import rdp_privacy_accountant + flags.DEFINE_boolean( 'dpsgd', True, 'If True, train with DP-SGD. If False, ' @@ -150,13 +152,16 @@ def compute_epsilon(steps): return float('inf') orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64)) sampling_probability = FLAGS.batch_size / NB_TRAIN - rdp = compute_rdp( - q=sampling_probability, - noise_multiplier=FLAGS.noise_multiplier, - steps=steps, - orders=orders) + + accountant = rdp_privacy_accountant.RdpAccountant(orders) + event = dp_event.SelfComposedDpEvent( + dp_event.PoissonSampledDpEvent( + sampling_probability, + dp_event.GaussianDpEvent(FLAGS.noise_multiplier)), steps) + accountant.compose(event) + # Delta is set to 1e-5 because Penn TreeBank has 60000 training points. - return get_privacy_spent(orders, rdp, target_delta=1e-5)[0] + return accountant.get_epsilon(target_delta=1e-5) def main(unused_argv):