Adds an end-to-end codelab that trains a model and evaluates it for privacy risks.

PiperOrigin-RevId: 318032025
This commit is contained in:
A. Unique TensorFlower 2020-06-24 02:59:54 -07:00
parent 88dd8771bf
commit c07e87d0b5
2 changed files with 334 additions and 1 deletions

View file

@ -0,0 +1,333 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "1eiwVljWpzM7"
},
"source": [
"##### Copyright 2020 The TensorFlow Authors.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"colab": {},
"colab_type": "code",
"id": "4rmwPgXeptiS"
},
"outputs": [],
"source": [
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
"# https://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License."
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "YM2gRaJMqvMi"
},
"source": [
"# Assess privacy risks with TensorFlow Privacy Membership Inference Attacks"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "-B5ZvlSqqLaR"
},
"source": [
"\u003ctable class=\"tfo-notebook-buttons\" align=\"left\"\u003e\n",
" \u003ctd\u003e\n",
" \u003ca target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/privacy/blob/master/tensorflow_privacy/privacy/membership_inference_attack/codelab.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /\u003eRun in Google Colab\u003c/a\u003e\n",
" \u003c/td\u003e\n",
" \u003ctd\u003e\n",
" \u003ca target=\"_blank\" href=\"https://github.com/tensorflow/privacy/blob/master/tensorflow_privacy/privacy/membership_inference_attack/codelab.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView source on GitHub\u003c/a\u003e\n",
" \u003c/td\u003e\n",
"\u003c/table\u003e"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "9rMuytY7Nn8P"
},
"source": [
"##Overview\n",
"In this codelab we'll train a simple image classification model on the CIFAR10 dataset, and then use the \"membership inference attack\" against this model to assess if the attacker is able to \"guess\" whether a particular sample was present in the training set."
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "FUWqArj_q8vs"
},
"source": [
"## Setup\n",
"First, set this notebook's runtime to use a GPU, under Runtime \u003e Change runtime type \u003e Hardware accelerator. Then, begin importing the necessary libraries."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "Lr1pwHcbralz"
},
"outputs": [],
"source": [
"#@title Import statements.\n",
"try:\n",
" # %tensorflow_version only exists in Colab.\n",
" %tensorflow_version 1.x\n",
"except Exception:\n",
" pass\n",
"\n",
"import numpy as np\n",
"from colabtools import adhoc_import\n",
"from typing import Tuple, Text\n",
"from scipy import special\n",
"\n",
"import tensorflow as tf\n",
"import tensorflow_datasets as tfds\n",
"\n",
"tf.compat.v1.logging.set_verbosity(tf.logging.ERROR)"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "ucw81ar6ru-6"
},
"source": [
"Install TensorFlow Privacy."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "both",
"colab": {},
"colab_type": "code",
"id": "zcqAmiGH90kl"
},
"outputs": [],
"source": [
"!pip install tensorflow_privacy\n",
"\n",
"from tensorflow_privacy.privacy.membership_inference_attack import membership_inference_attack as mia"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "vCyOWyyhXLib"
},
"outputs": [],
"source": [
"#@title Train a simple model on CIFAR10 with Keras.\n",
"\n",
"dataset = 'cifar10'\n",
"num_classes = 10\n",
"num_conv = 3\n",
"activation = 'relu'\n",
"optimizer = 'adam'\n",
"lr = 0.02\n",
"momentum = 0.9\n",
"batch_size = 250\n",
"epochs = 100 # Privacy risks are especially visible with lots of epochs.\n",
"\n",
"\n",
"def small_cnn(input_shape: Tuple[int],\n",
" num_classes: int,\n",
" num_conv: int,\n",
" activation: Text = 'relu') -\u003e tf.keras.models.Sequential:\n",
" \"\"\"Setup a small CNN for image classification.\n",
"\n",
" Args:\n",
" input_shape: Integer tuple for the shape of the images.\n",
" num_classes: Number of prediction classes.\n",
" num_conv: Number of convolutional layers.\n",
" activation: The activation function to use for conv and dense layers.\n",
"\n",
" Returns:\n",
" The Keras model.\n",
" \"\"\"\n",
" model = tf.keras.models.Sequential()\n",
" model.add(tf.keras.layers.Input(shape=input_shape))\n",
"\n",
" # Conv layers\n",
" for _ in range(num_conv):\n",
" model.add(tf.keras.layers.Conv2D(32, (3, 3), activation=activation))\n",
" model.add(tf.keras.layers.MaxPooling2D())\n",
"\n",
" model.add(tf.keras.layers.Flatten())\n",
" model.add(tf.keras.layers.Dense(64, activation=activation))\n",
" model.add(tf.keras.layers.Dense(num_classes))\n",
" return model\n",
"\n",
"\n",
"print('Loading the dataset.')\n",
"train_ds = tfds.as_numpy(\n",
" tfds.load(dataset, split=tfds.Split.TRAIN, batch_size=-1))\n",
"test_ds = tfds.as_numpy(\n",
" tfds.load(dataset, split=tfds.Split.TEST, batch_size=-1))\n",
"x_train = train_ds['image'].astype('float32') / 255.\n",
"y_train_indices = train_ds['label'][:, np.newaxis]\n",
"x_test = test_ds['image'].astype('float32') / 255.\n",
"y_test_indices = test_ds['label'][:, np.newaxis]\n",
"\n",
"# Convert class vectors to binary class matrices.\n",
"y_train = tf.keras.utils.to_categorical(y_train_indices, num_classes)\n",
"y_test = tf.keras.utils.to_categorical(y_test_indices, num_classes)\n",
"\n",
"input_shape = x_train.shape[1:]\n",
"\n",
"model = small_cnn(\n",
" input_shape, num_classes, num_conv=num_conv, activation=activation)\n",
"\n",
"print('Optimizer ', optimizer)\n",
"print('learning rate %f', lr)\n",
"\n",
"optimizer = tf.keras.optimizers.SGD(lr=lr, momentum=momentum)\n",
"\n",
"loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\n",
"model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])\n",
"model.summary()\n",
"model.fit(\n",
" x_train,\n",
" y_train,\n",
" batch_size=batch_size,\n",
" epochs=epochs,\n",
" validation_data=(x_test, y_test),\n",
" shuffle=True)\n",
"print('Finished training.')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "um9r0tSiPx4u"
},
"outputs": [],
"source": [
"#@title Calculate logits, probabilities and lossess for training and test sets.\n",
"#@markdown We will use these values later in the membership inference attack to\n",
"#@markdown separate training and test samples.\n",
"print('Predict on train...')\n",
"logits_train = model.predict(x_train, batch_size=batch_size)\n",
"print('Predict on test...')\n",
"logits_test = model.predict(x_test, batch_size=batch_size)\n",
"\n",
"print('Apply softmax to get probabilities from logits...')\n",
"prob_train = special.softmax(logits_train)\n",
"prob_test = special.softmax(logits_test)\n",
"\n",
"print('Compute losses...')\n",
"cce = tf.keras.backend.categorical_crossentropy\n",
"constant = tf.keras.backend.constant\n",
"\n",
"loss_train = cce(constant(y_train), prob_train, from_logits=False).numpy()\n",
"loss_test = cce(constant(y_test), prob_test, from_logits=False).numpy()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "B8NIwhVwQT7I"
},
"outputs": [],
"source": [
"#@title Run membership inference attacks.\n",
"#@markdown We will now execute membership inference attack against the\n",
"#@markdown previously trained CIFAR10 model. This will generate a number of\n",
"#@markdown scores (most notably, attacker advantage and AUC for the membership\n",
"#@markdown inference classifier). An AUC of close to 0.5 means that the attack\n",
"#@markdown isn't able to identify training samples, which means that the model\n",
"#@markdown doesn't have privacy issues according to this test. Higher values,\n",
"#@markdown on the contrary, indicate potential privacy issues.\n",
"\n",
"#@markdown Note: This will take a while, since it also trains ML models to\n",
"#@markdown separate train/test examples.\n",
"\n",
"labels_train = np.argmax(y_train, axis=1)\n",
"labels_test = np.argmax(y_test, axis=1)\n",
"\n",
"attack_result_summary = mia.run_all_attacks_and_create_summary(\n",
" loss_train,\n",
" loss_test,\n",
" logits_train,\n",
" logits_test,\n",
" labels_train,\n",
" labels_test,\n",
")[0]\n",
"\n",
"print(attack_result_summary)"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "E9zwsPGFujVq"
},
"source": [
"This is the end of the codelab! Feel free to change the parameters to see how the privacy risks change."
]
}
],
"metadata": {
"colab": {
"collapsed_sections": [],
"last_runtime": {
"build_target": "//learning/deepmind/public/tools/ml_python:ml_notebook",
"kind": "private"
},
"name": "Membership inference codelab",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"pycharm": {
"stem_cell": {
"cell_type": "raw",
"metadata": {
"collapsed": false
},
"source": []
}
}
},
"nbformat": 4,
"nbformat_minor": 0
}

View file

@ -397,7 +397,7 @@ def run_attack(loss_train: np.ndarray = None,
# ---------- If labels are provided ---------- # ---------- If labels are provided ----------
if labels_train is not None and labels_test is not None: if labels_train is not None and labels_test is not None:
if labels_train.ndim != 1 or labels_test.ndim != 1: if labels_train.ndim != 1 or labels_test.ndim != 1:
raise ValueError('Losses must be 1D arrays.') raise ValueError('Labels must be 1D arrays.')
if 'loss' in features: if 'loss' in features:
assert (loss_train.shape[0] == labels_train.shape[0] and assert (loss_train.shape[0] == labels_train.shape[0] and
loss_test.shape[0] == labels_test.shape[0]), \ loss_test.shape[0] == labels_test.shape[0]), \