forked from 626_privacy/tensorflow_privacy
Migrate lm_dpsgd_tutorial from deprecated TFP rdp_accountant to Google DP.
PiperOrigin-RevId: 446846972
This commit is contained in:
parent
65eadd3a02
commit
afe676135e
2 changed files with 15 additions and 9 deletions
|
@ -105,10 +105,11 @@ py_binary(
|
||||||
python_version = "PY3",
|
python_version = "PY3",
|
||||||
srcs_version = "PY3",
|
srcs_version = "PY3",
|
||||||
deps = [
|
deps = [
|
||||||
"//tensorflow_privacy/privacy/analysis:rdp_accountant",
|
|
||||||
"//tensorflow_privacy/privacy/optimizers:dp_optimizer",
|
"//tensorflow_privacy/privacy/optimizers:dp_optimizer",
|
||||||
"//third_party/py/tensorflow:tensorflow_compat_v1_estimator",
|
"//third_party/py/tensorflow:tensorflow_compat_v1_estimator",
|
||||||
"//third_party/py/tensorflow:tensorflow_estimator",
|
"//third_party/py/tensorflow:tensorflow_estimator",
|
||||||
|
"@com_google_differential_py//python/dp_accounting:dp_event",
|
||||||
|
"@com_google_differential_py//python/dp_accounting/rdp:rdp_privacy_accountant",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -40,9 +40,11 @@ import tensorflow as tf
|
||||||
from tensorflow import estimator as tf_estimator
|
from tensorflow import estimator as tf_estimator
|
||||||
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
|
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
|
||||||
import tensorflow_datasets as tfds
|
import tensorflow_datasets as tfds
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
|
|
||||||
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
|
|
||||||
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
from tensorflow_privacy.privacy.optimizers import dp_optimizer
|
||||||
|
from com_google_differential_py.python.dp_accounting import dp_event
|
||||||
|
from com_google_differential_py.python.dp_accounting.rdp import rdp_privacy_accountant
|
||||||
|
|
||||||
|
|
||||||
flags.DEFINE_boolean(
|
flags.DEFINE_boolean(
|
||||||
'dpsgd', True, 'If True, train with DP-SGD. If False, '
|
'dpsgd', True, 'If True, train with DP-SGD. If False, '
|
||||||
|
@ -150,13 +152,16 @@ def compute_epsilon(steps):
|
||||||
return float('inf')
|
return float('inf')
|
||||||
orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))
|
orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))
|
||||||
sampling_probability = FLAGS.batch_size / NB_TRAIN
|
sampling_probability = FLAGS.batch_size / NB_TRAIN
|
||||||
rdp = compute_rdp(
|
|
||||||
q=sampling_probability,
|
accountant = rdp_privacy_accountant.RdpAccountant(orders)
|
||||||
noise_multiplier=FLAGS.noise_multiplier,
|
event = dp_event.SelfComposedDpEvent(
|
||||||
steps=steps,
|
dp_event.PoissonSampledDpEvent(
|
||||||
orders=orders)
|
sampling_probability,
|
||||||
|
dp_event.GaussianDpEvent(FLAGS.noise_multiplier)), steps)
|
||||||
|
accountant.compose(event)
|
||||||
|
|
||||||
# Delta is set to 1e-5 because Penn TreeBank has 60000 training points.
|
# Delta is set to 1e-5 because Penn TreeBank has 60000 training points.
|
||||||
return get_privacy_spent(orders, rdp, target_delta=1e-5)[0]
|
return accountant.get_epsilon(target_delta=1e-5)
|
||||||
|
|
||||||
|
|
||||||
def main(unused_argv):
|
def main(unused_argv):
|
||||||
|
|
Loading…
Reference in a new issue