forked from 626_privacy/tensorflow_privacy
fix variable names
This commit is contained in:
parent
f8c2745c8d
commit
62c51db99c
5 changed files with 54 additions and 66 deletions
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -14,39 +14,37 @@
|
|||
# =============================================================================
|
||||
"""Poisoning attack library for auditing."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
from sklearn.decomposition import PCA
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
|
||||
def make_clip_aware(trn_x, trn_y, l2_norm=10):
|
||||
def make_clip_aware(train_x, train_y, l2_norm=10):
|
||||
"""
|
||||
trn_x: clean training features - must be shape (n_samples, n_features)
|
||||
trn_y: clean training labels - must be shape (n_samples, )
|
||||
train_x: clean training features - must be shape (n_samples, n_features)
|
||||
train_y: clean training labels - must be shape (n_samples, )
|
||||
|
||||
Returns x, y1, y2
|
||||
x: poisoning sample
|
||||
y1: first corresponding y value
|
||||
y2: second corresponding y value
|
||||
"""
|
||||
x_shape = list(trn_x.shape[1:])
|
||||
to_image = lambda x: x.reshape([-1] + x_shape)
|
||||
flatten = lambda x: x.reshape((x.shape[0], -1))
|
||||
assert np.allclose(to_image(flatten(trn_x)), trn_x)
|
||||
x_shape = list(train_x.shape[1:])
|
||||
to_image = lambda x: x.reshape([-1] + x_shape) # reshapes to standard image shape
|
||||
flatten = lambda x: x.reshape((x.shape[0], -1)) # flattens all pixels - allows PCA
|
||||
|
||||
flat_x = flatten(trn_x)
|
||||
# make sure to_image an flatten are inverse functions
|
||||
assert np.allclose(to_image(flatten(train_x)), train_x)
|
||||
|
||||
flat_x = flatten(train_x)
|
||||
pca = PCA(flat_x.shape[1])
|
||||
pca.fit(flat_x)
|
||||
|
||||
new_x = l2_norm*pca.components_[-1]
|
||||
|
||||
lr = LogisticRegression(max_iter=1000)
|
||||
lr.fit(flat_x, np.argmax(trn_y, axis=1))
|
||||
lr.fit(flat_x, np.argmax(train_y, axis=1))
|
||||
|
||||
num_classes = trn_y.shape[1]
|
||||
num_classes = train_y.shape[1]
|
||||
lr_probs = lr.predict_proba(new_x[None, :])
|
||||
min_y = np.argmin(lr_probs)
|
||||
second_y = np.argmin(lr_probs + np.eye(num_classes)[min_y])
|
||||
|
@ -56,10 +54,12 @@ def make_clip_aware(trn_x, trn_y, l2_norm=10):
|
|||
|
||||
return to_image(new_x), oh_min_y, oh_second_y
|
||||
|
||||
def make_backdoor(trn_x, trn_y):
|
||||
def make_backdoor(train_x, train_y):
|
||||
"""
|
||||
trn_x: clean training features - must be shape (n_samples, n_features)
|
||||
trn_y: clean training labels - must be shape (n_samples, )
|
||||
Makes a backdoored dataset, following Gu et al. https://arxiv.org/abs/1708.06733
|
||||
|
||||
train_x: clean training features - must be shape (n_samples, n_features)
|
||||
train_y: clean training labels - must be shape (n_samples, )
|
||||
|
||||
Returns x, y1, y2
|
||||
x: poisoning sample
|
||||
|
@ -67,24 +67,24 @@ def make_backdoor(trn_x, trn_y):
|
|||
y2: second corresponding y value
|
||||
"""
|
||||
|
||||
sample_ind = np.random.choice(trn_x.shape[0], 1)
|
||||
pois_x = np.copy(trn_x[sample_ind, :])
|
||||
sample_ind = np.random.choice(train_x.shape[0], 1)
|
||||
pois_x = np.copy(train_x[sample_ind, :])
|
||||
pois_x[0] = 1 # set corner feature to 1
|
||||
second_y = trn_y[sample_ind]
|
||||
second_y = train_y[sample_ind]
|
||||
|
||||
num_classes = trn_y.shape[1]
|
||||
num_classes = train_y.shape[1]
|
||||
min_y = np.eye(num_classes)[second_y.argmax(1) + 1]
|
||||
|
||||
return pois_x, min_y, second_y
|
||||
|
||||
|
||||
def make_many_pois(trn_x, trn_y, pois_sizes, attack="clip_aware", l2_norm=10):
|
||||
def make_many_poisoned_datasets(train_x, train_y, pois_sizes, attack="clip_aware", l2_norm=10):
|
||||
"""
|
||||
Makes a dict containing many poisoned datasets. make_pois is fairly slow:
|
||||
this avoids making multiple calls
|
||||
|
||||
trn_x: clean training features - shape (n_samples, n_features)
|
||||
trn_y: clean training labels - shape (n_samples, )
|
||||
train_x: clean training features - shape (n_samples, n_features)
|
||||
train_y: clean training labels - shape (n_samples, )
|
||||
pois_sizes: list of poisoning sizes
|
||||
l2_norm: l2 norm of the poisoned data
|
||||
|
||||
|
@ -92,16 +92,16 @@ def make_many_pois(trn_x, trn_y, pois_sizes, attack="clip_aware", l2_norm=10):
|
|||
all_poisons[poison_size] is a pair of poisoned datasets
|
||||
"""
|
||||
if attack == "clip_aware":
|
||||
pois_sample_x, y, second_y = make_clip_aware(trn_x, trn_y, l2_norm)
|
||||
pois_sample_x, y, second_y = make_clip_aware(train_x, train_y, l2_norm)
|
||||
elif attack == "backdoor":
|
||||
pois_sample_x, y, second_y = make_backdoor(trn_x, trn_y)
|
||||
pois_sample_x, y, second_y = make_backdoor(train_x, train_y)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
all_poisons = {"pois": (pois_sample_x, y)}
|
||||
|
||||
for pois_size in pois_sizes: # make_pois is slow - don't want it in a loop
|
||||
new_pois_x1, new_pois_y1 = trn_x.copy(), trn_y.copy()
|
||||
new_pois_x2, new_pois_y2 = trn_x.copy(), trn_y.copy()
|
||||
new_pois_x1, new_pois_y1 = train_x.copy(), train_y.copy()
|
||||
new_pois_x2, new_pois_y2 = train_x.copy(), train_y.copy()
|
||||
|
||||
new_pois_x1[-pois_size:] = pois_sample_x[None, :]
|
||||
new_pois_y1[-pois_size:] = y
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -14,10 +14,6 @@
|
|||
# =============================================================================
|
||||
"""Class for running auditing procedure."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
from statsmodels.stats import proportion
|
||||
|
||||
|
@ -77,24 +73,24 @@ def compute_epsilon_and_acc(poison_arr, unpois_arr, threshold, alpha, pois_ct):
|
|||
|
||||
class AuditAttack(object):
|
||||
"""Audit attack class. Generates poisoning, then runs auditing algorithm."""
|
||||
def __init__(self, trn_x, trn_y, train_function):
|
||||
def __init__(self, train_x, train_y, train_function):
|
||||
"""
|
||||
trn_x: training features
|
||||
trn_y: training labels
|
||||
train_x: training features
|
||||
train_y: training labels
|
||||
name: identifier for the attack
|
||||
train_function: function returning membership score
|
||||
"""
|
||||
self.trn_x, self.trn_y = trn_x, trn_y
|
||||
self.train_x, self.train_y = train_x, train_y
|
||||
self.train_function = train_function
|
||||
self.poisoning = None
|
||||
|
||||
def make_poisoning(self, pois_ct, attack_type, l2_norm=10):
|
||||
"""Get poisoning data."""
|
||||
return attacks.make_many_pois(self.trn_x, self.trn_y, [pois_ct],
|
||||
return attacks.make_many_poisoned_datasets(self.train_x, self.train_y, [pois_ct],
|
||||
attack=attack_type, l2_norm=l2_norm)
|
||||
|
||||
def run_experiments(self, num_trials):
|
||||
"""Uses multiprocessing to run all training experiments."""
|
||||
"""Runs all training experiments."""
|
||||
(pois_x1, pois_y1), (pois_x2, pois_y2) = self.poisoning['data']
|
||||
sample_x, sample_y = self.poisoning['pois']
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2020, The TensorFlow Authors.
|
||||
# Copyright 2021, The TensorFlow Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -81,7 +81,7 @@ class AuditAttackTest(absltest.TestCase):
|
|||
def test_run_experiments(self):
|
||||
auditor = get_auditor()
|
||||
pois, unpois = auditor.run_experiments(100)
|
||||
expected = [0 for _ in range(100)]
|
||||
expected = [0]*100
|
||||
self.assertListEqual(pois, expected)
|
||||
self.assertListEqual(unpois, expected)
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -14,10 +14,6 @@
|
|||
# =============================================================================
|
||||
"""Run auditing on the FashionMNIST dataset."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow.compat.v1 as tf
|
||||
|
||||
|
@ -146,21 +142,21 @@ def main(unused_argv):
|
|||
# Load training and test data.
|
||||
np.random.seed(0)
|
||||
|
||||
(trn_x, trn_y), _ = tf.keras.datasets.fashion_mnist.load_data()
|
||||
trn_inds = np.where(trn_y < 2)[0]
|
||||
(train_x, train_y), _ = tf.keras.datasets.fashion_mnist.load_data()
|
||||
train_inds = np.where(train_y < 2)[0]
|
||||
|
||||
trn_x = -.5 + trn_x[trn_inds] / 255.
|
||||
trn_y = np.eye(2)[trn_y[trn_inds]]
|
||||
train_x = -.5 + train_x[train_inds] / 255.
|
||||
train_y = np.eye(2)[train_y[train_inds]]
|
||||
|
||||
# subsample dataset
|
||||
ss_inds = np.random.choice(trn_x.shape[0], trn_x.shape[0]//2, replace=False)
|
||||
trn_x = trn_x[ss_inds]
|
||||
trn_y = trn_y[ss_inds]
|
||||
ss_inds = np.random.choice(train_x.shape[0], train_x.shape[0]//2, replace=False)
|
||||
train_x = train_x[ss_inds]
|
||||
train_y = train_y[ss_inds]
|
||||
|
||||
init_model = build_model(trn_x, trn_y)
|
||||
_ = train_model(init_model, trn_x, trn_y, save_weights=True)
|
||||
init_model = build_model(train_x, train_y)
|
||||
_ = train_model(init_model, train_x, train_y, save_weights=True)
|
||||
|
||||
auditor = audit.AuditAttack(trn_x, trn_y, train_and_score)
|
||||
auditor = audit.AuditAttack(train_x, train_y, train_and_score)
|
||||
|
||||
thresh, _, _ = auditor.run(FLAGS.pois_ct, FLAGS.attack_type, FLAGS.num_trials,
|
||||
alpha=FLAGS.alpha, threshold=None,
|
||||
|
@ -170,9 +166,9 @@ def main(unused_argv):
|
|||
alpha=FLAGS.alpha, threshold=thresh,
|
||||
l2_norm=FLAGS.attack_l2_norm)
|
||||
|
||||
epsilon_ub = compute_epsilon(trn_x.shape[0])
|
||||
epsilon_upper_bound = compute_epsilon(train_x.shape[0])
|
||||
|
||||
print("Analysis epsilon is {}.".format(epsilon_ub))
|
||||
print("Analysis epsilon is {}.".format(epsilon_upper_bound))
|
||||
print("At threshold={}, epsilon={}.".format(thresh, eps))
|
||||
print("The best accuracy at distinguishing poisoning is {}.".format(acc))
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -15,10 +15,6 @@
|
|||
"""Auditing a model which computes the mean of a synthetic dataset.
|
||||
This gives an example for instrumenting the auditor to audit a user-given sample."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow.compat.v1 as tf
|
||||
|
||||
|
@ -146,9 +142,9 @@ def main(unused_argv):
|
|||
_, eps, acc = auditor.run(1, None, FLAGS.num_trials, alpha=FLAGS.alpha,
|
||||
threshold=thresh)
|
||||
|
||||
epsilon_ub = compute_epsilon(FLAGS.batch_size)
|
||||
epsilon_upper_bound = compute_epsilon(FLAGS.batch_size)
|
||||
|
||||
print("Analysis epsilon is {}.".format(epsilon_ub))
|
||||
print("Analysis epsilon is {}.".format(epsilon_upper_bound))
|
||||
print("At threshold={}, epsilon={}.".format(thresh, eps))
|
||||
print("The best accuracy at distinguishing poisoning is {}.".format(acc))
|
||||
|
||||
|
|
Loading…
Reference in a new issue