Use DP library's calibrate_dp_mechanism function to compute noise multiplier.

calibrate_dp_mechanism is in dp_accounting's top level symbols, so after this change we can replace all of the dependencies on DP accounting subpackages with dp_accounting.

Tested by verifying that the resulting noise multiplier is the same before and after change when running all tests in multinomial_logistic_test.

PiperOrigin-RevId: 458235745
This commit is contained in:
Galen Andrew 2022-06-30 08:47:23 -07:00 committed by A. Unique TensorFlower
parent 9383743252
commit 77d962e0fc
2 changed files with 21 additions and 11 deletions

View file

@ -11,9 +11,10 @@ py_library(
deps = [
":datasets",
":single_layer_softmax",
"//tensorflow_privacy/privacy/analysis:compute_dp_sgd_privacy_lib",
"//tensorflow_privacy/privacy/optimizers:dp_optimizer_keras",
"@com_google_differential_py//python/dp_accounting:common",
"@com_google_differential_py//python/dp_accounting:dp_event",
"@com_google_differential_py//python/dp_accounting:mechanism_calibration",
"@com_google_differential_py//python/dp_accounting/rdp:rdp_privacy_accountant",
],
)

View file

@ -30,12 +30,13 @@ from typing import List, Optional, Tuple
import numpy as np
import tensorflow as tf
from tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy_lib
from tensorflow_privacy.privacy.logistic_regression import datasets
from tensorflow_privacy.privacy.logistic_regression import single_layer_softmax
from tensorflow_privacy.privacy.optimizers import dp_optimizer_keras
from com_google_differential_py.python.dp_accounting import common
from com_google_differential_py.python.dp_accounting import dp_event
from com_google_differential_py.python.dp_accounting import mechanism_calibration
from com_google_differential_py.python.dp_accounting.rdp import rdp_privacy_accountant
@tf.keras.utils.register_keras_serializable(package='Custom', name='Kifer')
@ -170,15 +171,23 @@ def compute_dpsgd_noise_multiplier(num_train: int,
the given tolerance) for which using DPKerasAdamOptimizer will result in an
(epsilon, delta)-differentially private trained model.
"""
search_parameters = common.BinarySearchParameters(
lower_bound=0, upper_bound=math.inf, initial_guess=1, tolerance=tolerance)
orders = ([1.25, 1.5, 1.75, 2., 2.25, 2.5, 3., 3.5, 4., 4.5] +
list(range(5, 64)) + [128, 256, 512])
steps = int(math.ceil(epochs * num_train / batch_size))
def _func(x):
result = compute_dp_sgd_privacy_lib.compute_dp_sgd_privacy(
num_train, batch_size, x, epochs, delta)
return result[0]
def make_event_from_param(noise_multiplier):
return dp_event.SelfComposedDpEvent(
dp_event.PoissonSampledDpEvent(
sampling_probability=batch_size / num_train,
event=dp_event.GaussianDpEvent(noise_multiplier)), steps)
return common.inverse_monotone_function(_func, epsilon, search_parameters)
return mechanism_calibration.calibrate_dp_mechanism(
lambda: rdp_privacy_accountant.RdpAccountant(orders),
make_event_from_param,
epsilon,
delta,
mechanism_calibration.LowerEndpointAndGuess(0, 1),
tol=tolerance)
def logistic_dpsgd(train_dataset: datasets.RegressionDataset,