forked from 626_privacy/tensorflow_privacy
Minor fix to tutorials.
PiperOrigin-RevId: 463145196
This commit is contained in:
parent
d16f020329
commit
44dc40454b
5 changed files with 27 additions and 23 deletions
|
@ -73,8 +73,8 @@ def rnn_model_fn(features, labels, mode): # pylint: disable=unused-argument
|
||||||
x = tf.reshape(x, [-1, SEQ_LEN])
|
x = tf.reshape(x, [-1, SEQ_LEN])
|
||||||
input_layer = x[:, :-1]
|
input_layer = x[:, :-1]
|
||||||
input_one_hot = tf.one_hot(input_layer, 256)
|
input_one_hot = tf.one_hot(input_layer, 256)
|
||||||
lstm = tf.keras.layers.LSTM(256, return_sequences=True).apply(input_one_hot)
|
lstm = tf.keras.layers.LSTM(256, return_sequences=True)(input_one_hot)
|
||||||
logits = tf.keras.layers.Dense(256).apply(lstm)
|
logits = tf.keras.layers.Dense(256)(lstm)
|
||||||
|
|
||||||
# Calculate loss as a vector (to support microbatches in DP-SGD).
|
# Calculate loss as a vector (to support microbatches in DP-SGD).
|
||||||
vector_loss = tf.nn.softmax_cross_entropy_with_logits(
|
vector_loss = tf.nn.softmax_cross_entropy_with_logits(
|
||||||
|
|
|
@ -85,7 +85,7 @@ def cnn_model_fn(features, labels, mode, params): # pylint: disable=unused-argu
|
||||||
elif mode == tf_estimator.ModeKeys.EVAL:
|
elif mode == tf_estimator.ModeKeys.EVAL:
|
||||||
eval_metric_ops = {
|
eval_metric_ops = {
|
||||||
'accuracy':
|
'accuracy':
|
||||||
tf.metrics.accuracy(
|
tf.compat.v1.metrics.accuracy(
|
||||||
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
||||||
}
|
}
|
||||||
return tf_estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
|
|
|
@ -21,14 +21,16 @@ def get_cnn_model(features):
|
||||||
"""Given input features, returns the logits from a simple CNN model."""
|
"""Given input features, returns the logits from a simple CNN model."""
|
||||||
input_layer = tf.reshape(features, [-1, 28, 28, 1])
|
input_layer = tf.reshape(features, [-1, 28, 28, 1])
|
||||||
y = tf.keras.layers.Conv2D(
|
y = tf.keras.layers.Conv2D(
|
||||||
16, 8, strides=2, padding='same', activation='relu').apply(input_layer)
|
16, 8, strides=2, padding='same', activation='relu')(
|
||||||
y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
|
input_layer)
|
||||||
|
y = tf.keras.layers.MaxPool2D(2, 1)(y)
|
||||||
y = tf.keras.layers.Conv2D(
|
y = tf.keras.layers.Conv2D(
|
||||||
32, 4, strides=2, padding='valid', activation='relu').apply(y)
|
32, 4, strides=2, padding='valid', activation='relu')(
|
||||||
y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
|
y)
|
||||||
y = tf.keras.layers.Flatten().apply(y)
|
y = tf.keras.layers.MaxPool2D(2, 1)(y)
|
||||||
y = tf.keras.layers.Dense(32, activation='relu').apply(y)
|
y = tf.keras.layers.Flatten()(y)
|
||||||
logits = tf.keras.layers.Dense(10).apply(y)
|
y = tf.keras.layers.Dense(32, activation='relu')(y)
|
||||||
|
logits = tf.keras.layers.Dense(10)(y)
|
||||||
|
|
||||||
return logits
|
return logits
|
||||||
|
|
||||||
|
|
|
@ -70,14 +70,16 @@ def cnn_model_fn(features, labels, mode):
|
||||||
# Define CNN architecture using tf.keras.layers.
|
# Define CNN architecture using tf.keras.layers.
|
||||||
input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
|
input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
|
||||||
y = tf.keras.layers.Conv2D(
|
y = tf.keras.layers.Conv2D(
|
||||||
16, 8, strides=2, padding='same', activation='relu').apply(input_layer)
|
16, 8, strides=2, padding='same', activation='relu')(
|
||||||
y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
|
input_layer)
|
||||||
|
y = tf.keras.layers.MaxPool2D(2, 1)(y)
|
||||||
y = tf.keras.layers.Conv2D(
|
y = tf.keras.layers.Conv2D(
|
||||||
32, 4, strides=2, padding='valid', activation='relu').apply(y)
|
32, 4, strides=2, padding='valid', activation='relu')(
|
||||||
y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
|
y)
|
||||||
y = tf.keras.layers.Flatten().apply(y)
|
y = tf.keras.layers.MaxPool2D(2, 1)(y)
|
||||||
y = tf.keras.layers.Dense(32, activation='relu').apply(y)
|
y = tf.keras.layers.Flatten()(y)
|
||||||
logits = tf.keras.layers.Dense(10).apply(y)
|
y = tf.keras.layers.Dense(32, activation='relu')(y)
|
||||||
|
logits = tf.keras.layers.Dense(10)(y)
|
||||||
|
|
||||||
# Calculate loss as a vector (to support microbatches in DP-SGD).
|
# Calculate loss as a vector (to support microbatches in DP-SGD).
|
||||||
vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
|
vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
|
||||||
|
@ -115,7 +117,7 @@ def cnn_model_fn(features, labels, mode):
|
||||||
elif mode == tf_estimator.ModeKeys.EVAL:
|
elif mode == tf_estimator.ModeKeys.EVAL:
|
||||||
eval_metric_ops = {
|
eval_metric_ops = {
|
||||||
'accuracy':
|
'accuracy':
|
||||||
tf.metrics.accuracy(
|
tf.compat.v1.metrics.accuracy(
|
||||||
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -56,12 +56,12 @@ def lr_model_fn(features, labels, mode, nclasses, dim):
|
||||||
logits = tf.keras.layers.Dense(
|
logits = tf.keras.layers.Dense(
|
||||||
units=nclasses,
|
units=nclasses,
|
||||||
kernel_regularizer=tf.keras.regularizers.L2(l2=FLAGS.regularizer),
|
kernel_regularizer=tf.keras.regularizers.L2(l2=FLAGS.regularizer),
|
||||||
bias_regularizer=tf.keras.regularizers.L2(
|
bias_regularizer=tf.keras.regularizers.L2(l2=FLAGS.regularizer))(
|
||||||
l2=FLAGS.regularizer)).apply(input_layer)
|
input_layer)
|
||||||
|
|
||||||
# Calculate loss as a vector (to support microbatches in DP-SGD).
|
# Calculate loss as a vector (to support microbatches in DP-SGD).
|
||||||
vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
|
vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
|
||||||
labels=labels, logits=logits) + tf.losses.get_regularization_loss()
|
labels, logits) + tf.compat.v1.losses.get_regularization_loss()
|
||||||
# Define mean of loss across minibatch (for reporting through tf.Estimator).
|
# Define mean of loss across minibatch (for reporting through tf.Estimator).
|
||||||
scalar_loss = tf.reduce_mean(vector_loss)
|
scalar_loss = tf.reduce_mean(vector_loss)
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ def lr_model_fn(features, labels, mode, nclasses, dim):
|
||||||
elif mode == tf_estimator.ModeKeys.EVAL:
|
elif mode == tf_estimator.ModeKeys.EVAL:
|
||||||
eval_metric_ops = {
|
eval_metric_ops = {
|
||||||
'accuracy':
|
'accuracy':
|
||||||
tf.metrics.accuracy(
|
tf.compat.v1.metrics.accuracy(
|
||||||
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
labels=labels, predictions=tf.argmax(input=logits, axis=1))
|
||||||
}
|
}
|
||||||
return tf_estimator.EstimatorSpec(
|
return tf_estimator.EstimatorSpec(
|
||||||
|
@ -165,7 +165,7 @@ def print_privacy_guarantees(epochs, batch_size, samples, noise_multiplier):
|
||||||
# Using RDP accountant to compute eps. Doing computation analytically is
|
# Using RDP accountant to compute eps. Doing computation analytically is
|
||||||
# an option.
|
# an option.
|
||||||
rdp = [order * coef for order in orders]
|
rdp = [order * coef for order in orders]
|
||||||
eps = dp_accounting.rdp.compute_epsilon(orders, rdp, delta)
|
eps, _ = dp_accounting.rdp.compute_epsilon(orders, rdp, delta)
|
||||||
print('\t{:g}% enjoy at least ({:.2f}, {})-DP'.format(p * 100, eps, delta))
|
print('\t{:g}% enjoy at least ({:.2f}, {})-DP'.format(p * 100, eps, delta))
|
||||||
|
|
||||||
accountant = dp_accounting.rdp.RdpAccountant(orders)
|
accountant = dp_accounting.rdp.RdpAccountant(orders)
|
||||||
|
|
Loading…
Reference in a new issue