Compare commits
No commits in common. "main" and "main" have entirely different histories.
41 changed files with 114 additions and 3972 deletions
|
@ -1,8 +1,6 @@
|
|||
## Sources
|
||||
|
||||
- [cifar10-fast-simple](https://github.com/99991/cifar10-fast-simple)
|
||||
- [lira-pytorch](https://github.com/orientino/lira-pytorch)
|
||||
- [wresnet-pytorch](https://github.com/AlexandrosFerles/Wide-Residual-Networks-PyTorch)
|
||||
|
||||
## Setup
|
||||
|
||||
|
|
|
@ -6,10 +6,52 @@ import torchvision
|
|||
import model
|
||||
|
||||
|
||||
def load_model(model_path, device, dtype, train_data):
|
||||
def train(seed=0):
|
||||
# Configurable parameters
|
||||
epochs = 10
|
||||
batch_size = 512
|
||||
momentum = 0.9
|
||||
weight_decay = 0.256
|
||||
weight_decay_bias = 0.004
|
||||
ema_update_freq = 5
|
||||
ema_rho = 0.99 ** ema_update_freq
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
dtype = torch.float16 if device.type != "cpu" else torch.float32
|
||||
|
||||
# First, the learning rate rises from 0 to 0.002 for the first 194 batches.
|
||||
# Next, the learning rate shrinks down to 0.0002 over the next 582 batches.
|
||||
lr_schedule = torch.cat([
|
||||
torch.linspace(0e+0, 2e-3, 194),
|
||||
torch.linspace(2e-3, 2e-4, 582),
|
||||
])
|
||||
|
||||
lr_schedule_bias = 64.0 * lr_schedule
|
||||
|
||||
# Print information about hardware on first run
|
||||
if seed == 0:
|
||||
if device.type == "cuda":
|
||||
print("Device :", torch.cuda.get_device_name(device.index))
|
||||
|
||||
print("Dtype :", dtype)
|
||||
print()
|
||||
|
||||
# Start measuring time
|
||||
start_time = time.perf_counter()
|
||||
|
||||
# Set random seed to increase chance of reproducability
|
||||
torch.manual_seed(seed)
|
||||
|
||||
# Setting cudnn.benchmark to True hampers reproducability, but is faster
|
||||
torch.backends.cudnn.benchmark = True
|
||||
|
||||
# Load dataset
|
||||
train_data, train_targets, valid_data, valid_targets = load_cifar10(device, dtype)
|
||||
|
||||
# Compute special weights for first layer
|
||||
weights = model.patch_whitening(train_data[:10000, :, 4:-4, 4:-4])
|
||||
|
||||
# Construct the neural network
|
||||
train_model = model.Model(weights, c_in=3, c_out=10, scale_out=0.125)
|
||||
train_model.load_state_dict(torch.load(model_path, weights_only=True))
|
||||
|
||||
# Convert model weights to half precision
|
||||
train_model.to(dtype)
|
||||
|
@ -22,75 +64,6 @@ def load_model(model_path, device, dtype, train_data):
|
|||
# Upload model to GPU
|
||||
train_model.to(device)
|
||||
|
||||
return train_model
|
||||
|
||||
|
||||
def eval_model(smodel, device, dtype, data, labels, batch_size):
|
||||
smodel.eval()
|
||||
eval_correct = []
|
||||
|
||||
with torch.no_grad():
|
||||
for i in range(0, len(data), batch_size):
|
||||
regular_inputs = data[i : i + batch_size].to(device, dtype)
|
||||
flipped_inputs = torch.flip(regular_inputs, [-1])
|
||||
|
||||
logits1 = smodel(regular_inputs)
|
||||
logits2 = smodel(flipped_inputs)
|
||||
|
||||
# Final logits are average of augmented logits
|
||||
logits = torch.mean(torch.stack([logits1, logits2], dim=0), dim=0)
|
||||
|
||||
# Compute correct predictions
|
||||
correct = logits.max(dim=1)[1] == labels[i : i + batch_size].to(device)
|
||||
eval_correct.append(correct.detach().type(torch.float64))
|
||||
|
||||
# Accuracy is average number of correct predictions
|
||||
eval_acc = torch.mean(torch.cat(eval_correct)).item()
|
||||
|
||||
return eval_acc
|
||||
|
||||
|
||||
def run_shadow_model(shadow_path, device, dtype, data, labels, batch_size):
|
||||
smodel = load_model(shadow_path, device, dtype, data)
|
||||
eval_acc = eval_model(smodel, device, dtype, data, labels, batch_size)
|
||||
|
||||
print(f"Evaluation Accuracy: {eval_acc:.4f}")
|
||||
|
||||
|
||||
def train_shadow(shadow_path, train_data, train_targets, batch_size):
|
||||
# Configurable parameters
|
||||
epochs = 10
|
||||
momentum = 0.9
|
||||
weight_decay = 0.256
|
||||
weight_decay_bias = 0.004
|
||||
ema_update_freq = 5
|
||||
ema_rho = 0.99**ema_update_freq
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
dtype = torch.float16 if device.type != "cpu" else torch.float32
|
||||
|
||||
# First, the learning rate rises from 0 to 0.002 for the first 194 batches.
|
||||
# Next, the learning rate shrinks down to 0.0002 over the next 582 batches.
|
||||
lr_schedule = torch.cat(
|
||||
[
|
||||
torch.linspace(0e0, 2e-3, 194),
|
||||
torch.linspace(2e-3, 2e-4, 582),
|
||||
]
|
||||
)
|
||||
|
||||
lr_schedule_bias = 64.0 * lr_schedule
|
||||
|
||||
torch.backends.cudnn.benchmark = True
|
||||
|
||||
weights = model.patch_whitening(train_data[:10000, :, 4:-4, 4:-4])
|
||||
train_model = model.Model(weights, c_in=3, c_out=10, scale_out=0.125)
|
||||
train_model.to(dtype)
|
||||
|
||||
for module in train_model.modules():
|
||||
if isinstance(module, nn.BatchNorm2d):
|
||||
module.float()
|
||||
|
||||
train_model.to(device)
|
||||
|
||||
# Collect weights and biases and create nesterov velocity values
|
||||
weights = [
|
||||
(w, torch.zeros_like(w))
|
||||
|
@ -103,15 +76,15 @@ def train_shadow(shadow_path, train_data, train_targets, batch_size):
|
|||
if w.requires_grad and len(w.shape) <= 1
|
||||
]
|
||||
|
||||
# Copy the model for validation
|
||||
valid_model = copy.deepcopy(train_model)
|
||||
|
||||
print(f"Preprocessing: {time.perf_counter() - start_time:.2f} seconds")
|
||||
|
||||
# Train and validate
|
||||
print("\nepoch batch train time [sec] validation accuracy")
|
||||
train_time = 0.0
|
||||
batch_count = 0
|
||||
|
||||
# Randomly sample half the data per model
|
||||
nb_rows = train_data.shape[0]
|
||||
indices = torch.randperm(nb_rows)[: nb_rows // 2]
|
||||
indices_in = indices[: nb_rows // 2]
|
||||
train_data = train_data[indices_in]
|
||||
train_targets = train_targets[indices_in]
|
||||
|
||||
for epoch in range(1, epochs + 1):
|
||||
# Flush CUDA pipeline for more accurate time measurement
|
||||
if torch.cuda.is_available():
|
||||
|
@ -162,8 +135,41 @@ def train_shadow(shadow_path, train_data, train_targets, batch_size):
|
|||
update_nesterov(weights, lr, weight_decay, momentum)
|
||||
update_nesterov(biases, lr_bias, weight_decay_bias, momentum)
|
||||
|
||||
torch.save(train_model.state_dict(), shadow_path)
|
||||
# Update validation model with exponential moving averages
|
||||
if (i // batch_size % ema_update_freq) == 0:
|
||||
update_ema(train_model, valid_model, ema_rho)
|
||||
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.synchronize()
|
||||
|
||||
# Add training time
|
||||
train_time += time.perf_counter() - start_time
|
||||
|
||||
valid_correct = []
|
||||
for i in range(0, len(valid_data), batch_size):
|
||||
valid_model.train(False)
|
||||
|
||||
# Test time agumentation: Test model on regular and flipped data
|
||||
regular_inputs = valid_data[i : i + batch_size]
|
||||
flipped_inputs = torch.flip(regular_inputs, [-1])
|
||||
|
||||
logits1 = valid_model(regular_inputs).detach()
|
||||
logits2 = valid_model(flipped_inputs).detach()
|
||||
|
||||
# Final logits are average of augmented logits
|
||||
logits = torch.mean(torch.stack([logits1, logits2], dim=0), dim=0)
|
||||
|
||||
# Compute correct predictions
|
||||
correct = logits.max(dim=1)[1] == valid_targets[i : i + batch_size]
|
||||
|
||||
valid_correct.append(correct.detach().type(torch.float64))
|
||||
|
||||
# Accuracy is average number of correct predictions
|
||||
valid_acc = torch.mean(torch.cat(valid_correct)).item()
|
||||
|
||||
print(f"{epoch:5} {batch_count:8d} {train_time:19.2f} {valid_acc:22.4f}")
|
||||
|
||||
return valid_acc
|
||||
|
||||
def preprocess_data(data, device, dtype):
|
||||
# Convert to torch float16 tensor
|
||||
|
@ -196,6 +202,17 @@ def load_cifar10(device, dtype, data_dir="~/data"):
|
|||
return train_data, train_targets, valid_data, valid_targets
|
||||
|
||||
|
||||
def update_ema(train_model, valid_model, rho):
|
||||
# The trained model is not used for validation directly. Instead, the
|
||||
# validation model weights are updated with exponential moving averages.
|
||||
train_weights = train_model.state_dict().values()
|
||||
valid_weights = valid_model.state_dict().values()
|
||||
for train_weight, valid_weight in zip(train_weights, valid_weights):
|
||||
if valid_weight.dtype in [torch.float16, torch.float32]:
|
||||
valid_weight *= rho
|
||||
valid_weight += (1 - rho) * train_weight
|
||||
|
||||
|
||||
def update_nesterov(weights, lr, weight_decay, momentum):
|
||||
for weight, velocity in weights:
|
||||
if weight.requires_grad:
|
||||
|
@ -218,14 +235,12 @@ def random_crop(data, crop_size):
|
|||
|
||||
def sha256(path):
|
||||
import hashlib
|
||||
|
||||
with open(path, "rb") as f:
|
||||
return hashlib.sha256(f.read()).hexdigest()
|
||||
|
||||
|
||||
def getrelpath(abspath):
|
||||
import os
|
||||
|
||||
return os.path.relpath(abspath, os.getcwd())
|
||||
|
||||
|
||||
|
@ -239,15 +254,24 @@ def print_info():
|
|||
def main():
|
||||
print_info()
|
||||
|
||||
batch_size = 512
|
||||
shadow_path = "shadow.pt"
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
dtype = torch.float16 if device.type != "cpu" else torch.float32
|
||||
train_data, train_targets, valid_data, valid_targets = load_cifar10(device, dtype)
|
||||
accuracies = []
|
||||
threshold = 0.94
|
||||
for run in range(100):
|
||||
valid_acc = train(seed=run)
|
||||
accuracies.append(valid_acc)
|
||||
|
||||
train_shadow(shadow_path, train_data, train_targets, batch_size)
|
||||
run_shadow_model(shadow_path, device, dtype, train_data, train_targets, batch_size)
|
||||
run_shadow_model(shadow_path, device, dtype, valid_data, valid_targets, batch_size)
|
||||
# Print accumulated results
|
||||
within_threshold = sum(acc >= threshold for acc in accuracies)
|
||||
acc = threshold * 100.0
|
||||
print()
|
||||
print(f"{within_threshold} of {run + 1} runs >= {acc} % accuracy")
|
||||
mean = sum(accuracies) / len(accuracies)
|
||||
variance = sum((acc - mean)**2 for acc in accuracies) / len(accuracies)
|
||||
std = variance**0.5
|
||||
print(f"Min accuracy: {min(accuracies)}")
|
||||
print(f"Max accuracy: {max(accuracies)}")
|
||||
print(f"Mean accuracy: {mean} +- {std}")
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
310
env.yaml
310
env.yaml
|
@ -1,310 +0,0 @@
|
|||
name: 626_pytorch_lira
|
||||
channels:
|
||||
- pytorch
|
||||
- nvidia
|
||||
- conda-forge
|
||||
- defaults
|
||||
dependencies:
|
||||
- _libgcc_mutex=0.1=conda_forge
|
||||
- _openmp_mutex=4.5=2_gnu
|
||||
- alsa-lib=1.2.3.2=h166bdaf_0
|
||||
- anyio=4.5.0=pyhd8ed1ab_0
|
||||
- appdirs=1.4.4=pyh9f0ad1d_0
|
||||
- argon2-cffi=23.1.0=pyhd8ed1ab_0
|
||||
- argon2-cffi-bindings=21.2.0=py38h01eb140_4
|
||||
- arrow=1.3.0=pyhd8ed1ab_0
|
||||
- asttokens=2.4.1=pyhd8ed1ab_0
|
||||
- async-lru=2.0.4=pyhd8ed1ab_0
|
||||
- attrs=24.2.0=pyh71513ae_0
|
||||
- babel=2.16.0=pyhd8ed1ab_0
|
||||
- backcall=0.2.0=pyh9f0ad1d_0
|
||||
- beautifulsoup4=4.12.3=pyha770c72_0
|
||||
- blas=1.0=mkl
|
||||
- bleach=6.1.0=pyhd8ed1ab_0
|
||||
- brotli=1.1.0=hd590300_1
|
||||
- brotli-bin=1.1.0=hd590300_1
|
||||
- brotli-python=1.1.0=py38h17151c0_1
|
||||
- bzip2=1.0.8=h4bc722e_7
|
||||
- ca-certificates=2024.8.30=hbcca054_0
|
||||
- cached-property=1.5.2=hd8ed1ab_1
|
||||
- cached_property=1.5.2=pyha770c72_1
|
||||
- certifi=2024.8.30=pyhd8ed1ab_0
|
||||
- cffi=1.14.6=py38ha65f79e_0
|
||||
- charset-normalizer=3.4.0=pyhd8ed1ab_0
|
||||
- click=8.1.7=unix_pyh707e725_0
|
||||
- colorama=0.4.6=pyhd8ed1ab_0
|
||||
- comm=0.2.2=pyhd8ed1ab_0
|
||||
- contourpy=1.1.1=py38h7f3f72f_1
|
||||
- cuda-cudart=12.1.105=0
|
||||
- cuda-cupti=12.1.105=0
|
||||
- cuda-libraries=12.1.0=0
|
||||
- cuda-nvrtc=12.1.105=0
|
||||
- cuda-nvtx=12.1.105=0
|
||||
- cuda-opencl=12.6.77=0
|
||||
- cuda-runtime=12.1.0=0
|
||||
- cuda-version=12.6=3
|
||||
- cycler=0.12.1=pyhd8ed1ab_0
|
||||
- dataclasses=0.8=pyhc8e2a94_3
|
||||
- dbus=1.13.6=h48d8840_2
|
||||
- debugpy=1.8.5=py38h6d02427_0
|
||||
- decorator=5.1.1=pyhd8ed1ab_0
|
||||
- defusedxml=0.7.1=pyhd8ed1ab_0
|
||||
- docker-pycreds=0.4.0=py_0
|
||||
- entrypoints=0.4=pyhd8ed1ab_0
|
||||
- exceptiongroup=1.2.2=pyhd8ed1ab_0
|
||||
- executing=2.1.0=pyhd8ed1ab_0
|
||||
- expat=2.6.4=h5888daf_0
|
||||
- ffmpeg=4.3=hf484d3e_0
|
||||
- filelock=3.16.1=pyhd8ed1ab_0
|
||||
- fontconfig=2.14.2=h14ed4e7_0
|
||||
- fonttools=4.53.1=py38h2019614_0
|
||||
- fqdn=1.5.1=pyhd8ed1ab_0
|
||||
- freetype=2.12.1=h267a509_2
|
||||
- fsspec=2024.10.0=pyhff2d567_0
|
||||
- functorch=2.0.0=pyhd8ed1ab_0
|
||||
- gettext=0.22.5=he02047a_3
|
||||
- gettext-tools=0.22.5=he02047a_3
|
||||
- giflib=5.2.2=hd590300_0
|
||||
- gitdb=4.0.11=pyhd8ed1ab_0
|
||||
- gitpython=3.1.43=pyhd8ed1ab_0
|
||||
- glib=2.68.4=h9c3ff4c_0
|
||||
- glib-tools=2.68.4=h9c3ff4c_0
|
||||
- gmp=6.3.0=hac33072_2
|
||||
- gmpy2=2.1.5=py38h6a1700d_1
|
||||
- gnutls=3.6.13=h85f3911_1
|
||||
- gst-plugins-base=1.18.5=hf529b03_0
|
||||
- gstreamer=1.18.5=h76c114f_0
|
||||
- h11=0.14.0=pyhd8ed1ab_0
|
||||
- h2=4.1.0=pyhd8ed1ab_0
|
||||
- hpack=4.0.0=pyh9f0ad1d_0
|
||||
- httpcore=1.0.7=pyh29332c3_1
|
||||
- httpx=0.27.2=pyhd8ed1ab_0
|
||||
- hyperframe=6.0.1=pyhd8ed1ab_0
|
||||
- icu=68.2=h9c3ff4c_0
|
||||
- idna=3.10=pyhd8ed1ab_0
|
||||
- importlib-metadata=8.5.0=pyha770c72_0
|
||||
- importlib-resources=6.4.5=pyhd8ed1ab_0
|
||||
- importlib_resources=6.4.5=pyhd8ed1ab_0
|
||||
- iniconfig=2.0.0=pyhd8ed1ab_0
|
||||
- intel-openmp=2022.1.0=h9e868ea_3769
|
||||
- ipykernel=6.29.5=pyh3099207_0
|
||||
- ipython=8.12.2=pyh41d4057_0
|
||||
- ipywidgets=8.1.5=pyhd8ed1ab_0
|
||||
- isoduration=20.11.0=pyhd8ed1ab_0
|
||||
- jedi=0.19.1=pyhd8ed1ab_0
|
||||
- jinja2=3.1.4=pyhd8ed1ab_0
|
||||
- joblib=1.4.2=pyhd8ed1ab_0
|
||||
- jpeg=9e=h166bdaf_2
|
||||
- json5=0.9.25=pyhd8ed1ab_0
|
||||
- jsonpointer=3.0.0=py38h578d9bd_0
|
||||
- jsonschema=4.23.0=pyhd8ed1ab_0
|
||||
- jsonschema-specifications=2024.10.1=pyhd8ed1ab_0
|
||||
- jsonschema-with-format-nongpl=4.23.0=hd8ed1ab_0
|
||||
- jupyter=1.1.1=pyhd8ed1ab_0
|
||||
- jupyter-lsp=2.2.5=pyhd8ed1ab_0
|
||||
- jupyter_client=8.6.3=pyhd8ed1ab_0
|
||||
- jupyter_console=6.6.3=pyhd8ed1ab_0
|
||||
- jupyter_core=5.7.2=pyh31011fe_1
|
||||
- jupyter_events=0.10.0=pyhd8ed1ab_0
|
||||
- jupyter_server=2.14.2=pyhd8ed1ab_0
|
||||
- jupyter_server_terminals=0.5.3=pyhd8ed1ab_0
|
||||
- jupyterlab=4.3.0=pyhd8ed1ab_0
|
||||
- jupyterlab_pygments=0.3.0=pyhd8ed1ab_1
|
||||
- jupyterlab_server=2.27.3=pyhd8ed1ab_0
|
||||
- jupyterlab_widgets=3.0.13=pyhd8ed1ab_0
|
||||
- keyutils=1.6.1=h166bdaf_0
|
||||
- kiwisolver=1.4.5=py38h7f3f72f_1
|
||||
- krb5=1.19.3=h3790be6_0
|
||||
- lame=3.100=h166bdaf_1003
|
||||
- lcms2=2.15=hfd0df8a_0
|
||||
- ld_impl_linux-64=2.43=h712a8e2_2
|
||||
- lerc=4.0.0=h27087fc_0
|
||||
- libabseil=20240116.2=cxx17_he02047a_1
|
||||
- libasprintf=0.22.5=he8f35ee_3
|
||||
- libasprintf-devel=0.22.5=he8f35ee_3
|
||||
- libblas=3.9.0=16_linux64_mkl
|
||||
- libbrotlicommon=1.1.0=hd590300_1
|
||||
- libbrotlidec=1.1.0=hd590300_1
|
||||
- libbrotlienc=1.1.0=hd590300_1
|
||||
- libcblas=3.9.0=16_linux64_mkl
|
||||
- libclang=11.1.0=default_ha53f305_1
|
||||
- libcublas=12.1.0.26=0
|
||||
- libcufft=11.0.2.4=0
|
||||
- libcufile=1.11.1.6=0
|
||||
- libcurand=10.3.7.77=0
|
||||
- libcusolver=11.4.4.55=0
|
||||
- libcusparse=12.0.2.55=0
|
||||
- libdeflate=1.17=h0b41bf4_0
|
||||
- libedit=3.1.20191231=he28a2e2_2
|
||||
- libevent=2.1.10=h9b69904_4
|
||||
- libexpat=2.6.4=h5888daf_0
|
||||
- libffi=3.3=h58526e2_2
|
||||
- libgcc=14.2.0=h77fa898_1
|
||||
- libgcc-ng=14.2.0=h69a702a_1
|
||||
- libgettextpo=0.22.5=he02047a_3
|
||||
- libgettextpo-devel=0.22.5=he02047a_3
|
||||
- libgfortran=14.2.0=h69a702a_1
|
||||
- libgfortran-ng=14.2.0=h69a702a_1
|
||||
- libgfortran5=14.2.0=hd5240d6_1
|
||||
- libglib=2.68.4=h3e27bee_0
|
||||
- libgomp=14.2.0=h77fa898_1
|
||||
- libiconv=1.17=hd590300_2
|
||||
- libjpeg-turbo=2.0.0=h9bf148f_0
|
||||
- liblapack=3.9.0=16_linux64_mkl
|
||||
- libllvm11=11.1.0=he0ac6c6_5
|
||||
- libnpp=12.0.2.50=0
|
||||
- libnvjitlink=12.1.105=0
|
||||
- libnvjpeg=12.1.1.14=0
|
||||
- libogg=1.3.5=h4ab18f5_0
|
||||
- libopus=1.3.1=h7f98852_1
|
||||
- libpng=1.6.43=h2797004_0
|
||||
- libpq=13.8=hd77ab85_0
|
||||
- libprotobuf=4.25.3=h08a7969_0
|
||||
- libsodium=1.0.18=h36c2ea0_1
|
||||
- libsqlite=3.46.0=hde9e2c9_0
|
||||
- libstdcxx=14.2.0=hc0a3c3a_1
|
||||
- libstdcxx-ng=14.2.0=h4852527_1
|
||||
- libtiff=4.5.0=h6adf6a1_2
|
||||
- libuuid=2.38.1=h0b41bf4_0
|
||||
- libvorbis=1.3.7=h9c3ff4c_0
|
||||
- libwebp=1.3.2=h11a3e52_0
|
||||
- libwebp-base=1.3.2=hd590300_1
|
||||
- libxcb=1.13=h7f98852_1004
|
||||
- libxkbcommon=1.0.3=he3ba5ed_0
|
||||
- libxml2=2.9.12=h72842e0_0
|
||||
- libzlib=1.2.13=h4ab18f5_6
|
||||
- lightning-bolts=0.6.0.post1=pyhd8ed1ab_0
|
||||
- lightning-utilities=0.11.8=pyhd8ed1ab_0
|
||||
- llvm-openmp=15.0.7=h0cdce71_0
|
||||
- markupsafe=2.1.5=py38h01eb140_0
|
||||
- matplotlib=3.7.3=py38h578d9bd_0
|
||||
- matplotlib-base=3.7.3=py38h58ed7fa_0
|
||||
- matplotlib-inline=0.1.7=pyhd8ed1ab_0
|
||||
- mistune=3.0.2=pyhd8ed1ab_0
|
||||
- mkl=2022.1.0=hc2b9512_224
|
||||
- mpc=1.3.1=h24ddda3_1
|
||||
- mpfr=4.2.1=h90cbb55_3
|
||||
- mpmath=1.3.0=pyhd8ed1ab_0
|
||||
- munkres=1.1.4=pyh9f0ad1d_0
|
||||
- mysql-common=8.0.32=h14678bc_0
|
||||
- mysql-libs=8.0.32=h54cf53e_0
|
||||
- nbclient=0.10.1=pyhd8ed1ab_0
|
||||
- nbconvert-core=7.16.4=pyhd8ed1ab_1
|
||||
- nbformat=5.10.4=pyhd8ed1ab_0
|
||||
- ncurses=6.5=he02047a_1
|
||||
- nest-asyncio=1.6.0=pyhd8ed1ab_0
|
||||
- nettle=3.6=he412f7d_0
|
||||
- networkx=3.1=pyhd8ed1ab_0
|
||||
- notebook=7.0.6=py38h06a4308_0
|
||||
- notebook-shim=0.2.4=pyhd8ed1ab_0
|
||||
- nspr=4.36=h5888daf_0
|
||||
- nss=3.100=hca3bf56_0
|
||||
- numpy=1.24.4=py38h59b608b_0
|
||||
- opacus=1.5.2=pyhd8ed1ab_0
|
||||
- openh264=2.1.1=h780b84a_0
|
||||
- openjpeg=2.5.0=hfec8fc6_2
|
||||
- openssl=1.1.1w=hd590300_0
|
||||
- opt_einsum=3.4.0=pyhd8ed1ab_0
|
||||
- overrides=7.7.0=pyhd8ed1ab_0
|
||||
- packaging=24.2=pyhff2d567_1
|
||||
- pandas=2.0.3=py38h01efb38_1
|
||||
- pandocfilters=1.5.0=pyhd8ed1ab_0
|
||||
- parso=0.8.4=pyhd8ed1ab_0
|
||||
- pcre=8.45=h9c3ff4c_0
|
||||
- pexpect=4.9.0=pyhd8ed1ab_0
|
||||
- pickleshare=0.7.5=py_1003
|
||||
- pillow=9.4.0=py38hde6dc18_1
|
||||
- pip=24.3.1=pyh8b19718_0
|
||||
- pkgutil-resolve-name=1.3.10=pyhd8ed1ab_1
|
||||
- platformdirs=4.3.6=pyhd8ed1ab_0
|
||||
- pluggy=1.5.0=pyhd8ed1ab_0
|
||||
- pooch=1.8.2=pyhd8ed1ab_0
|
||||
- prometheus_client=0.21.0=pyhd8ed1ab_0
|
||||
- prompt-toolkit=3.0.48=pyha770c72_0
|
||||
- prompt_toolkit=3.0.48=hd8ed1ab_0
|
||||
- protobuf=4.25.3=py38hb5c7596_0
|
||||
- psutil=6.0.0=py38hfb59056_0
|
||||
- pthread-stubs=0.4=hb9d3cd8_1002
|
||||
- ptyprocess=0.7.0=pyhd3deb0d_0
|
||||
- pure_eval=0.2.3=pyhd8ed1ab_0
|
||||
- pycparser=2.22=pyhd8ed1ab_0
|
||||
- pygments=2.18.0=pyhd8ed1ab_0
|
||||
- pyparsing=3.1.4=pyhd8ed1ab_0
|
||||
- pyqt=5.12.3=py38h578d9bd_8
|
||||
- pyqt-impl=5.12.3=py38h0ffb2e6_8
|
||||
- pyqt5-sip=4.19.18=py38h709712a_8
|
||||
- pyqtchart=5.12=py38h7400c14_8
|
||||
- pyqtwebengine=5.12.1=py38h7400c14_8
|
||||
- pysocks=1.7.1=pyha2e5f31_6
|
||||
- pytest=8.3.3=pyhd8ed1ab_0
|
||||
- python=3.8.6=hffdb5ce_5_cpython
|
||||
- python-dateutil=2.9.0=pyhd8ed1ab_0
|
||||
- python-fastjsonschema=2.20.0=pyhd8ed1ab_0
|
||||
- python-json-logger=2.0.7=pyhd8ed1ab_0
|
||||
- python-tzdata=2024.2=pyhd8ed1ab_0
|
||||
- python_abi=3.8=5_cp38
|
||||
- pytorch=2.4.1=py3.8_cuda12.1_cudnn9.1.0_0
|
||||
- pytorch-cuda=12.1=ha16c6d3_6
|
||||
- pytorch-lightning=2.4.0=pyhd8ed1ab_0
|
||||
- pytorch-mutex=1.0=cuda
|
||||
- pytz=2024.2=pyhd8ed1ab_0
|
||||
- pyyaml=6.0.2=py38h2019614_0
|
||||
- pyzmq=26.2.0=py38h6c80b9a_0
|
||||
- qt=5.12.9=hda022c4_4
|
||||
- readline=8.2=h8228510_1
|
||||
- referencing=0.35.1=pyhd8ed1ab_0
|
||||
- requests=2.32.3=pyhd8ed1ab_0
|
||||
- rfc3339-validator=0.1.4=pyhd8ed1ab_0
|
||||
- rfc3986-validator=0.1.1=pyh9f0ad1d_0
|
||||
- rpds-py=0.20.0=py38h4005ec7_0
|
||||
- scikit-learn=1.3.2=py38ha25d942_2
|
||||
- scipy=1.10.1=py38h59b608b_3
|
||||
- send2trash=1.8.3=pyh0d859eb_0
|
||||
- sentry-sdk=2.19.0=pyhd8ed1ab_0
|
||||
- setproctitle=1.3.3=py38h01eb140_0
|
||||
- setuptools=75.3.0=pyhd8ed1ab_0
|
||||
- six=1.16.0=pyh6c4a22f_0
|
||||
- smmap=5.0.0=pyhd8ed1ab_0
|
||||
- sniffio=1.3.1=pyhd8ed1ab_0
|
||||
- soupsieve=2.5=pyhd8ed1ab_1
|
||||
- sqlite=3.46.0=h6d4b2fc_0
|
||||
- stack_data=0.6.2=pyhd8ed1ab_0
|
||||
- sympy=1.13.3=pypyh2585a3b_103
|
||||
- terminado=0.18.1=pyh0d859eb_0
|
||||
- threadpoolctl=3.5.0=pyhc1e730c_0
|
||||
- tinycss2=1.4.0=pyhd8ed1ab_0
|
||||
- tk=8.6.13=noxft_h4845f30_101
|
||||
- tomli=2.0.2=pyhd8ed1ab_0
|
||||
- torchaudio=2.4.1=py38_cu121
|
||||
- torchmetrics=1.5.2=pyhe5570ce_0
|
||||
- torchtriton=3.0.0=py38
|
||||
- torchvision=0.20.0=py38_cu121
|
||||
- tornado=6.4.1=py38hfb59056_0
|
||||
- tqdm=4.67.1=pyhd8ed1ab_0
|
||||
- traitlets=5.14.3=pyhd8ed1ab_0
|
||||
- types-python-dateutil=2.9.0.20241003=pyhff2d567_0
|
||||
- typing-extensions=4.12.2=hd8ed1ab_0
|
||||
- typing_extensions=4.12.2=pyha770c72_0
|
||||
- typing_utils=0.1.0=pyhd8ed1ab_0
|
||||
- unicodedata2=15.1.0=py38h01eb140_0
|
||||
- uri-template=1.3.0=pyhd8ed1ab_0
|
||||
- urllib3=2.2.3=pyhd8ed1ab_0
|
||||
- wandb=0.16.6=pyhd8ed1ab_1
|
||||
- wcwidth=0.2.13=pyhd8ed1ab_0
|
||||
- webcolors=24.8.0=pyhd8ed1ab_0
|
||||
- webencodings=0.5.1=pyhd8ed1ab_2
|
||||
- websocket-client=1.8.0=pyhd8ed1ab_0
|
||||
- wheel=0.45.1=pyhd8ed1ab_0
|
||||
- widgetsnbextension=4.0.13=pyhd8ed1ab_0
|
||||
- xorg-libxau=1.0.11=hb9d3cd8_1
|
||||
- xorg-libxdmcp=1.1.5=hb9d3cd8_0
|
||||
- xz=5.2.6=h166bdaf_0
|
||||
- yaml=0.2.5=h7f98852_2
|
||||
- zeromq=4.3.5=h59595ed_1
|
||||
- zipp=3.21.0=pyhd8ed1ab_0
|
||||
- zlib=1.2.13=h4ab18f5_6
|
||||
- zstandard=0.23.0=py38h62bed22_0
|
||||
- zstd=1.5.6=ha6fb4c9_0
|
||||
- pip:
|
||||
- pyvacy==0.0.32
|
||||
- torchsummary==1.5.1
|
8
lira-pytorch/.gitignore
vendored
8
lira-pytorch/.gitignore
vendored
|
@ -1,8 +0,0 @@
|
|||
__pycache__
|
||||
exp
|
||||
logs
|
||||
slurm
|
||||
gpu.sh
|
||||
*.out
|
||||
.psyncup.json
|
||||
|
|
@ -1,202 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
# Likelihood Ration Attack (LiRA) in PyTorch
|
||||
Implementation of the original [LiRA](https://github.com/tensorflow/privacy/tree/master/research/mi_lira_2021) using PyTorch. To run the code, first create an environment with the `env.yml` file. Then run the following command to train the models and run the LiRA attack:
|
||||
|
||||
```
|
||||
./run.sh
|
||||
```
|
||||
|
||||
The output will generate and store a log-scale FPR-TPR curve as `./fprtpr.png` with the TPR@0.1%FPR in the output log.
|
||||
|
||||
## Results on CIFAR10
|
||||
|
||||
Using 16 shadow models trained with `ResNet18 and 2 augmented queries`:
|
||||
|
||||
![roc](figures/fprtpr_resnet18.png)
|
||||
```
|
||||
Attack Ours (online)
|
||||
AUC 0.6548, Accuracy 0.6015, TPR@0.1%FPR of 0.0068
|
||||
Attack Ours (online, fixed variance)
|
||||
AUC 0.6700, Accuracy 0.6042, TPR@0.1%FPR of 0.0464
|
||||
Attack Ours (offline)
|
||||
AUC 0.5250, Accuracy 0.5353, TPR@0.1%FPR of 0.0041
|
||||
Attack Ours (offline, fixed variance)
|
||||
AUC 0.5270, Accuracy 0.5380, TPR@0.1%FPR of 0.0192
|
||||
Attack Global threshold
|
||||
AUC 0.5948, Accuracy 0.5869, TPR@0.1%FPR of 0.0006
|
||||
```
|
||||
|
||||
Using 16 shadow models trained with `WideResNet28-10 and 2 augmented queries`:
|
||||
|
||||
![roc](figures/fprtpr_wideresnet.png)
|
||||
```
|
||||
Attack Ours (online)
|
||||
AUC 0.6834, Accuracy 0.6152, TPR@0.1%FPR of 0.0240
|
||||
Attack Ours (online, fixed variance)
|
||||
AUC 0.7017, Accuracy 0.6240, TPR@0.1%FPR of 0.0704
|
||||
Attack Ours (offline)
|
||||
AUC 0.5621, Accuracy 0.5649, TPR@0.1%FPR of 0.0140
|
||||
Attack Ours (offline, fixed variance)
|
||||
AUC 0.5698, Accuracy 0.5628, TPR@0.1%FPR of 0.0370
|
||||
Attack Global threshold
|
||||
AUC 0.6016, Accuracy 0.5977, TPR@0.1%FPR of 0.0013
|
||||
```
|
|
@ -1,35 +0,0 @@
|
|||
# Minimal environment for starting a project using conda/mamba:
|
||||
# conda env create -n ENVNAME --file ENV.yml
|
||||
|
||||
name: template
|
||||
channels:
|
||||
- pytorch
|
||||
- nvidia
|
||||
- conda-forge
|
||||
- defaults
|
||||
dependencies:
|
||||
- python=3.8.6
|
||||
- pip
|
||||
- pytest
|
||||
- numpy
|
||||
- scipy
|
||||
- scikit-learn
|
||||
- matplotlib
|
||||
- pandas
|
||||
- tqdm
|
||||
- wandb
|
||||
- jupyterlab
|
||||
- jupyter
|
||||
- ipykernel
|
||||
- pytorch
|
||||
- torchvision
|
||||
- torchaudio
|
||||
- pytorch-cuda=12.1
|
||||
- tqdm
|
||||
- pytorch-lightning
|
||||
- lightning-bolts
|
||||
- torchmetrics
|
||||
|
||||
# Install packages with pip
|
||||
# - pip:
|
||||
# - ray[tune]
|
Binary file not shown.
Before Width: | Height: | Size: 38 KiB |
Binary file not shown.
Before Width: | Height: | Size: 37 KiB |
|
@ -1,58 +0,0 @@
|
|||
# PyTorch implementation of
|
||||
# https://github.com/tensorflow/privacy/blob/master/research/mi_lira_2021/inference.py
|
||||
#
|
||||
# author: Chenxiang Zhang (orientino)
|
||||
|
||||
import argparse
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.utils.data import DataLoader
|
||||
from torchvision import models, transforms
|
||||
from torchvision.datasets import CIFAR10
|
||||
from tqdm import tqdm
|
||||
|
||||
import student_model
|
||||
from utils import json_file_to_pyobj, get_loaders
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--n_queries", default=2, type=int)
|
||||
parser.add_argument("--model", default="resnet18", type=str)
|
||||
parser.add_argument("--savedir", default="exp/cifar10", type=str)
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def run():
|
||||
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("mps")
|
||||
dataset = "cifar10"
|
||||
|
||||
# Dataset
|
||||
train_dl, test_dl = get_loaders(dataset, 4096)
|
||||
|
||||
# Infer the logits with multiple queries
|
||||
for path in os.listdir(args.savedir):
|
||||
m = student_model.Model(num_classes=10)
|
||||
m.load_state_dict(torch.load(os.path.join(args.savedir, path, "model.pt")))
|
||||
m.to(DEVICE)
|
||||
m.eval()
|
||||
|
||||
logits_n = []
|
||||
for i in range(args.n_queries):
|
||||
logits = []
|
||||
for x, _ in tqdm(train_dl):
|
||||
x = x.to(DEVICE)
|
||||
outputs = m(x)
|
||||
logits.append(outputs.cpu().numpy())
|
||||
logits_n.append(np.concatenate(logits))
|
||||
logits_n = np.stack(logits_n, axis=1)
|
||||
print(logits_n.shape)
|
||||
|
||||
np.save(os.path.join(args.savedir, path, "logits.npy"), logits_n)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
|
@ -1,205 +0,0 @@
|
|||
# Copyright 2021 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Modified copy by Chenxiang Zhang (orientino) of the original:
|
||||
# https://github.com/tensorflow/privacy/tree/master/research/mi_lira_2021
|
||||
|
||||
|
||||
import argparse
|
||||
import functools
|
||||
import os
|
||||
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import scipy.stats
|
||||
from sklearn.metrics import auc, roc_curve
|
||||
|
||||
matplotlib.rcParams["pdf.fonttype"] = 42
|
||||
matplotlib.rcParams["ps.fonttype"] = 42
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--savedir", default="exp/cifar10", type=str)
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
def sweep(score, x):
|
||||
"""
|
||||
Compute a ROC curve and then return the FPR, TPR, AUC, and ACC.
|
||||
"""
|
||||
fpr, tpr, _ = roc_curve(x, -score)
|
||||
acc = np.max(1 - (fpr + (1 - tpr)) / 2)
|
||||
return fpr, tpr, auc(fpr, tpr), acc
|
||||
|
||||
|
||||
def load_data():
|
||||
"""
|
||||
Load our saved scores and then put them into a big matrix.
|
||||
"""
|
||||
global scores, keep
|
||||
scores = []
|
||||
keep = []
|
||||
|
||||
for path in os.listdir(args.savedir):
|
||||
scores.append(np.load(os.path.join(args.savedir, path, "scores.npy")))
|
||||
keep.append(np.load(os.path.join(args.savedir, path, "keep.npy")))
|
||||
scores = np.array(scores)
|
||||
keep = np.array(keep)
|
||||
|
||||
return scores, keep
|
||||
|
||||
|
||||
def generate_ours(keep, scores, check_keep, check_scores, in_size=100000, out_size=100000, fix_variance=False):
|
||||
"""
|
||||
Fit a two predictive models using keep and scores in order to predict
|
||||
if the examples in check_scores were training data or not, using the
|
||||
ground truth answer from check_keep.
|
||||
"""
|
||||
dat_in = []
|
||||
dat_out = []
|
||||
|
||||
for j in range(scores.shape[1]):
|
||||
dat_in.append(scores[keep[:, j], j, :])
|
||||
dat_out.append(scores[~keep[:, j], j, :])
|
||||
|
||||
in_size = min(min(map(len, dat_in)), in_size)
|
||||
out_size = min(min(map(len, dat_out)), out_size)
|
||||
|
||||
dat_in = np.array([x[:in_size] for x in dat_in])
|
||||
dat_out = np.array([x[:out_size] for x in dat_out])
|
||||
|
||||
mean_in = np.median(dat_in, 1)
|
||||
mean_out = np.median(dat_out, 1)
|
||||
|
||||
if fix_variance:
|
||||
std_in = np.std(dat_in)
|
||||
std_out = np.std(dat_in)
|
||||
else:
|
||||
std_in = np.std(dat_in, 1)
|
||||
std_out = np.std(dat_out, 1)
|
||||
|
||||
prediction = []
|
||||
answers = []
|
||||
for ans, sc in zip(check_keep, check_scores):
|
||||
pr_in = -scipy.stats.norm.logpdf(sc, mean_in, std_in + 1e-30)
|
||||
pr_out = -scipy.stats.norm.logpdf(sc, mean_out, std_out + 1e-30)
|
||||
score = pr_in - pr_out
|
||||
|
||||
prediction.extend(score.mean(1))
|
||||
answers.extend(ans)
|
||||
|
||||
return prediction, answers
|
||||
|
||||
|
||||
def generate_ours_offline(keep, scores, check_keep, check_scores, in_size=100000, out_size=100000, fix_variance=False):
|
||||
"""
|
||||
Fit a single predictive model using keep and scores in order to predict
|
||||
if the examples in check_scores were training data or not, using the
|
||||
ground truth answer from check_keep.
|
||||
"""
|
||||
dat_in = []
|
||||
dat_out = []
|
||||
|
||||
for j in range(scores.shape[1]):
|
||||
dat_in.append(scores[keep[:, j], j, :])
|
||||
dat_out.append(scores[~keep[:, j], j, :])
|
||||
|
||||
out_size = min(min(map(len, dat_out)), out_size)
|
||||
|
||||
dat_out = np.array([x[:out_size] for x in dat_out])
|
||||
|
||||
mean_out = np.median(dat_out, 1)
|
||||
|
||||
if fix_variance:
|
||||
std_out = np.std(dat_out)
|
||||
else:
|
||||
std_out = np.std(dat_out, 1)
|
||||
|
||||
prediction = []
|
||||
answers = []
|
||||
for ans, sc in zip(check_keep, check_scores):
|
||||
score = scipy.stats.norm.logpdf(sc, mean_out, std_out + 1e-30)
|
||||
|
||||
prediction.extend(score.mean(1))
|
||||
answers.extend(ans)
|
||||
return prediction, answers
|
||||
|
||||
|
||||
def generate_global(keep, scores, check_keep, check_scores):
|
||||
"""
|
||||
Use a simple global threshold sweep to predict if the examples in
|
||||
check_scores were training data or not, using the ground truth answer from
|
||||
check_keep.
|
||||
"""
|
||||
prediction = []
|
||||
answers = []
|
||||
for ans, sc in zip(check_keep, check_scores):
|
||||
prediction.extend(-sc.mean(1))
|
||||
answers.extend(ans)
|
||||
|
||||
return prediction, answers
|
||||
|
||||
|
||||
def do_plot(fn, keep, scores, ntest, legend="", metric="auc", sweep_fn=sweep, **plot_kwargs):
|
||||
"""
|
||||
Generate the ROC curves by using ntest models as test models and the rest to train.
|
||||
"""
|
||||
|
||||
prediction, answers = fn(keep[:-ntest], scores[:-ntest], keep[-ntest:], scores[-ntest:])
|
||||
|
||||
fpr, tpr, auc, acc = sweep_fn(np.array(prediction), np.array(answers, dtype=bool))
|
||||
|
||||
low = tpr[np.where(fpr < 0.001)[0][-1]]
|
||||
|
||||
print("Attack %s AUC %.4f, Accuracy %.4f, TPR@0.1%%FPR of %.4f" % (legend, auc, acc, low))
|
||||
|
||||
metric_text = ""
|
||||
if metric == "auc":
|
||||
metric_text = "auc=%.3f" % auc
|
||||
elif metric == "acc":
|
||||
metric_text = "acc=%.3f" % acc
|
||||
|
||||
plt.plot(fpr, tpr, label=legend + metric_text, **plot_kwargs)
|
||||
return (acc, auc)
|
||||
|
||||
|
||||
def fig_fpr_tpr():
|
||||
plt.figure(figsize=(4, 3))
|
||||
|
||||
do_plot(generate_ours, keep, scores, 1, "Ours (online)\n", metric="auc")
|
||||
|
||||
do_plot(functools.partial(generate_ours, fix_variance=True), keep, scores, 1, "Ours (online, fixed variance)\n", metric="auc")
|
||||
|
||||
do_plot(functools.partial(generate_ours_offline), keep, scores, 1, "Ours (offline)\n", metric="auc")
|
||||
|
||||
do_plot(functools.partial(generate_ours_offline, fix_variance=True), keep, scores, 1, "Ours (offline, fixed variance)\n", metric="auc")
|
||||
|
||||
do_plot(generate_global, keep, scores, 1, "Global threshold\n", metric="auc")
|
||||
|
||||
plt.semilogx()
|
||||
plt.semilogy()
|
||||
plt.xlim(1e-5, 1)
|
||||
plt.ylim(1e-5, 1)
|
||||
plt.xlabel("False Positive Rate")
|
||||
plt.ylabel("True Positive Rate")
|
||||
plt.plot([0, 1], [0, 1], ls="--", color="gray")
|
||||
plt.subplots_adjust(bottom=0.18, left=0.18, top=0.96, right=0.96)
|
||||
plt.legend(fontsize=8)
|
||||
plt.savefig("fprtpr.png")
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_data()
|
||||
fig_fpr_tpr()
|
|
@ -1,21 +0,0 @@
|
|||
python3 train.py --epochs 100 --shadow_id 0 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 1 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 2 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 3 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 4 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 5 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 6 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 7 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 8 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 9 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 10 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 11 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 12 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 13 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 14 --debug
|
||||
python3 train.py --epochs 100 --shadow_id 15 --debug
|
||||
|
||||
python3 inference.py --savedir exp/cifar10
|
||||
python3 score.py --savedir exp/cifar10
|
||||
python3 plot.py --savedir exp/cifar10
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
python3 student_shadow_train.py --epochs 100 --shadow_id 0 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 1 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 2 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 3 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 4 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 5 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 6 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 7 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 8 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 9 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 10 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 11 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 12 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 13 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 14 --debug
|
||||
python3 student_shadow_train.py --epochs 100 --shadow_id 15 --debug
|
|
@ -1,70 +0,0 @@
|
|||
# Copyright 2021 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Modified copy by Chenxiang Zhang (orientino) of the original:
|
||||
# https://github.com/tensorflow/privacy/tree/master/research/mi_lira_2021
|
||||
|
||||
|
||||
import argparse
|
||||
import multiprocessing as mp
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
from torchvision.datasets import CIFAR10
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--savedir", default="exp/cifar10", type=str)
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
def load_one(path):
|
||||
"""
|
||||
This loads a logits and converts it to a scored prediction.
|
||||
"""
|
||||
opredictions = np.load(os.path.join(path, "logits.npy")) # [n_examples, n_augs, n_classes]
|
||||
|
||||
# Be exceptionally careful.
|
||||
# Numerically stable everything, as described in the paper.
|
||||
predictions = opredictions - np.max(opredictions, axis=-1, keepdims=True)
|
||||
predictions = np.array(np.exp(predictions), dtype=np.float64)
|
||||
predictions = predictions / np.sum(predictions, axis=-1, keepdims=True)
|
||||
|
||||
labels = get_labels() # TODO generalize this
|
||||
|
||||
COUNT = predictions.shape[0]
|
||||
y_true = predictions[np.arange(COUNT), :, labels[:COUNT]]
|
||||
|
||||
print("mean acc", np.mean(predictions[:, 0, :].argmax(1) == labels[:COUNT]))
|
||||
|
||||
predictions[np.arange(COUNT), :, labels[:COUNT]] = 0
|
||||
y_wrong = np.sum(predictions, axis=-1)
|
||||
|
||||
logit = np.log(y_true + 1e-45) - np.log(y_wrong + 1e-45)
|
||||
np.save(os.path.join(path, "scores.npy"), logit)
|
||||
|
||||
|
||||
def get_labels():
|
||||
datadir = Path().home() / "opt/data/cifar"
|
||||
train_ds = CIFAR10(root=datadir, train=True, download=True)
|
||||
return np.array(train_ds.targets)
|
||||
|
||||
|
||||
def load_stats():
|
||||
with mp.Pool(8) as p:
|
||||
p.map(load_one, [os.path.join(args.savedir, x) for x in os.listdir(args.savedir)])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load_stats()
|
|
@ -1,30 +0,0 @@
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
# Create a similar student class where we return a tuple. We do not apply pooling after flattening.
|
||||
class ModifiedLightNNCosine(nn.Module):
|
||||
def __init__(self, num_classes=10):
|
||||
super(ModifiedLightNNCosine, self).__init__()
|
||||
self.features = nn.Sequential(
|
||||
nn.Conv2d(3, 16, kernel_size=3, padding=1),
|
||||
nn.ReLU(),
|
||||
nn.MaxPool2d(kernel_size=2, stride=2),
|
||||
nn.Conv2d(16, 16, kernel_size=3, padding=1),
|
||||
nn.ReLU(),
|
||||
nn.MaxPool2d(kernel_size=2, stride=2),
|
||||
)
|
||||
self.classifier = nn.Sequential(
|
||||
nn.Linear(1024, 256),
|
||||
nn.ReLU(),
|
||||
nn.Dropout(0.1),
|
||||
nn.Linear(256, num_classes)
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.features(x)
|
||||
flattened_conv_output = torch.flatten(x, 1)
|
||||
x = self.classifier(flattened_conv_output)
|
||||
return x
|
||||
|
||||
Model = ModifiedLightNNCosine
|
||||
|
|
@ -1,246 +0,0 @@
|
|||
# PyTorch implementation of
|
||||
# https://github.com/tensorflow/privacy/blob/master/research/mi_lira_2021/train.py
|
||||
#
|
||||
# author: Chenxiang Zhang (orientino)
|
||||
#random stuff
|
||||
import os
|
||||
import argparse
|
||||
import time
|
||||
from pathlib import Path
|
||||
#torch stuff
|
||||
import numpy as np
|
||||
import pytorch_lightning as pl
|
||||
import torch
|
||||
import wandb
|
||||
from torch import nn
|
||||
from torch.utils.data import DataLoader
|
||||
from torchvision import models, transforms
|
||||
from torchvision.datasets import CIFAR10
|
||||
from tqdm import tqdm
|
||||
from torch.optim.lr_scheduler import MultiStepLR
|
||||
import torch.optim as optim
|
||||
import torch.nn.functional as F
|
||||
import torchvision
|
||||
from torchvision import transforms
|
||||
|
||||
|
||||
|
||||
#privacy libraries
|
||||
import opacus
|
||||
from opacus.validators import ModuleValidator
|
||||
#cutom modules
|
||||
from utils import json_file_to_pyobj, get_loaders
|
||||
from WideResNet import WideResNet
|
||||
import student_model
|
||||
|
||||
#suppress warning
|
||||
import warnings
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--lr", default=0.1, type=float)
|
||||
parser.add_argument("--epochs", default=1, type=int)
|
||||
parser.add_argument("--n_shadows", default=16, type=int)
|
||||
parser.add_argument("--shadow_id", default=1, type=int)
|
||||
parser.add_argument("--model", default="resnet18", type=str)
|
||||
parser.add_argument("--pkeep", default=0.5, type=float)
|
||||
parser.add_argument("--savedir", default="exp/cifar10", type=str)
|
||||
parser.add_argument("--debug", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("mps")
|
||||
|
||||
def get_trainset(train_batch_size=128, test_batch_size=10):
|
||||
print(f"Train batch size: {train_batch_size}")
|
||||
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
|
||||
|
||||
train_transform = transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
|
||||
(4, 4, 4, 4), mode='reflect').squeeze()),
|
||||
transforms.ToPILImage(),
|
||||
transforms.RandomCrop(32),
|
||||
transforms.RandomHorizontalFlip(),
|
||||
transforms.ToTensor(),
|
||||
normalize,
|
||||
])
|
||||
|
||||
test_transform = transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
normalize
|
||||
])
|
||||
|
||||
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=False, transform=train_transform)
|
||||
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=test_transform)
|
||||
|
||||
return trainset, testset
|
||||
|
||||
@torch.no_grad()
|
||||
def test(model, test_dl, teacher=False):
|
||||
device = DEVICE
|
||||
model.to(device)
|
||||
model.eval()
|
||||
|
||||
correct = 0
|
||||
total = 0
|
||||
|
||||
for inputs, labels in test_dl:
|
||||
inputs, labels = inputs.to(device), labels.to(device)
|
||||
if teacher:
|
||||
outputs, _, _, _ = model(inputs)
|
||||
else:
|
||||
outputs = model(inputs)
|
||||
_, predicted = torch.max(outputs.data, 1)
|
||||
|
||||
total += labels.size(0)
|
||||
correct += (predicted == labels).sum().item()
|
||||
|
||||
accuracy = 100 * correct / total
|
||||
print(f"Test Accuracy: {accuracy:.2f}%")
|
||||
return accuracy
|
||||
|
||||
|
||||
|
||||
def run(teacher, student):
|
||||
device = DEVICE
|
||||
seed = np.random.randint(0, 1000000000)
|
||||
seed ^= int(time.time())
|
||||
pl.seed_everything(seed)
|
||||
|
||||
args.debug = True
|
||||
wandb.init(project="lira", mode="disabled" if args.debug else "online")
|
||||
wandb.config.update(args)
|
||||
|
||||
# Dataset
|
||||
train_ds, test_ds = get_trainset()
|
||||
# Compute the IN / OUT subset:
|
||||
# If we run each experiment independently then even after a lot of trials
|
||||
# there will still probably be some examples that were always included
|
||||
# or always excluded. So instead, with experiment IDs, we guarantee that
|
||||
# after `args.n_shadows` are done, each example is seen exactly half
|
||||
# of the time in train, and half of the time not in train.
|
||||
|
||||
size = len(train_ds)
|
||||
np.random.seed(seed)
|
||||
if args.n_shadows is not None:
|
||||
np.random.seed(0)
|
||||
keep = np.random.uniform(0, 1, size=(args.n_shadows, size))
|
||||
order = keep.argsort(0)
|
||||
keep = order < int(args.pkeep * args.n_shadows)
|
||||
keep = np.array(keep[args.shadow_id], dtype=bool)
|
||||
keep = keep.nonzero()[0]
|
||||
else:
|
||||
keep = np.random.choice(size, size=int(args.pkeep * size), replace=False)
|
||||
keep.sort()
|
||||
keep_bool = np.full((size), False)
|
||||
keep_bool[keep] = True
|
||||
|
||||
train_ds = torch.utils.data.Subset(train_ds, keep)
|
||||
train_dl = DataLoader(train_ds, batch_size=128, shuffle=True, num_workers=4)
|
||||
test_dl = DataLoader(test_ds, batch_size=128, shuffle=False, num_workers=4)
|
||||
|
||||
|
||||
# Train
|
||||
learning_rate=0.001
|
||||
T=2
|
||||
soft_target_loss_weight=0.25
|
||||
ce_loss_weight=0.75
|
||||
|
||||
ce_loss = nn.CrossEntropyLoss()
|
||||
optimizer = optim.Adam(student.parameters(), lr=learning_rate)
|
||||
|
||||
teacher.eval() # Teacher set to evaluation mode
|
||||
student.train() # Student to train mode
|
||||
|
||||
for epoch in range(args.epochs):
|
||||
running_loss = 0.0
|
||||
for inputs, labels in train_dl:
|
||||
inputs, labels = inputs.to(device), labels.to(device)
|
||||
|
||||
optimizer.zero_grad()
|
||||
# Forward pass with the teacher model - do not save gradients here as we do not change the teacher's weights
|
||||
with torch.no_grad():
|
||||
teacher_logits, _, _, _ = teacher(inputs)
|
||||
|
||||
# Forward pass with the student model
|
||||
student_logits = student(inputs)
|
||||
#Soften the student logits by applying softmax first and log() second
|
||||
soft_targets = nn.functional.softmax(teacher_logits / T, dim=-1)
|
||||
soft_prob = nn.functional.log_softmax(student_logits / T, dim=-1)
|
||||
|
||||
# Calculate the soft targets loss. Scaled by T**2 as suggested by the authors of the paper "Distilling the knowledge in a neural network"
|
||||
soft_targets_loss = torch.sum(soft_targets * (soft_targets.log() - soft_prob)) / soft_prob.size()[0] * (T**2)
|
||||
|
||||
# Calculate the true label loss
|
||||
label_loss = ce_loss(student_logits, labels)
|
||||
|
||||
# Weighted sum of the two losses
|
||||
loss = soft_target_loss_weight * soft_targets_loss + ce_loss_weight * label_loss
|
||||
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
running_loss += loss.item()
|
||||
|
||||
print(f"Epoch {epoch+1}/{args.epochs}, Loss: {running_loss / len(train_dl)}")
|
||||
accuracy = test(student, test_dl)
|
||||
#saving models
|
||||
print("saving model")
|
||||
savedir = os.path.join(args.savedir, str(args.shadow_id))
|
||||
os.makedirs(savedir, exist_ok=True)
|
||||
np.save(savedir + "/keep.npy", keep_bool)
|
||||
torch.save(student.state_dict(), savedir + "/model.pt")
|
||||
|
||||
|
||||
def main():
|
||||
epochs = args.epochs
|
||||
json_options = json_file_to_pyobj("wresnet16-audit-cifar10.json")
|
||||
training_configurations = json_options.training
|
||||
|
||||
wrn_depth = training_configurations.wrn_depth
|
||||
wrn_width = training_configurations.wrn_width
|
||||
dataset = training_configurations.dataset.lower()
|
||||
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device('cuda:0')
|
||||
else:
|
||||
device = torch.device('cpu')
|
||||
|
||||
print("Load the teacher model")
|
||||
# instantiate teacher model
|
||||
strides = [1, 1, 2, 2]
|
||||
teacher = WideResNet(d=wrn_depth, k=wrn_width, n_classes=10, input_features=3, output_features=16, strides=strides)
|
||||
teacher = ModuleValidator.fix(teacher)
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
optimizer = optim.SGD(teacher.parameters(), lr=0.1, momentum=0.9, nesterov=True, weight_decay=5e-4)
|
||||
scheduler = MultiStepLR(optimizer, milestones=[int(elem*epochs) for elem in [0.3, 0.6, 0.8]], gamma=0.2)
|
||||
train_loader, test_loader = get_loaders(dataset, training_configurations.batch_size)
|
||||
best_test_set_accuracy = 0
|
||||
dp_epsilon = 8
|
||||
dp_delta = 1e-5
|
||||
norm = 1.0
|
||||
privacy_engine = opacus.PrivacyEngine()
|
||||
teacher, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
|
||||
module=teacher,
|
||||
optimizer=optimizer,
|
||||
data_loader=train_loader,
|
||||
epochs=epochs,
|
||||
target_epsilon=dp_epsilon,
|
||||
target_delta=dp_delta,
|
||||
max_grad_norm=norm,
|
||||
)
|
||||
|
||||
teacher.load_state_dict(torch.load(os.path.join("wrn-1733078278-8e-1e-05d-12.0n-dict.pt"), weights_only=True))
|
||||
teacher.to(device)
|
||||
teacher.eval()
|
||||
#instantiate student "shadow model"
|
||||
student = student_model.Model(num_classes=10).to(device)
|
||||
# Check norm of layer for both networks -- student should be smaller?
|
||||
print("Norm of 1st layer for teacher:", torch.norm(teacher.conv1.weight).item())
|
||||
print("Norm of 1st layer for student:", torch.norm(student.features[0].weight).item())
|
||||
#train student shadow model
|
||||
run(teacher=teacher, student=student)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,257 +0,0 @@
|
|||
# PyTorch implementation of
|
||||
# https://github.com/tensorflow/privacy/blob/master/research/mi_lira_2021/train.py
|
||||
#
|
||||
# author: Chenxiang Zhang (orientino)
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import pytorch_lightning as pl
|
||||
import torch
|
||||
import wandb
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
from torch.utils.data import DataLoader
|
||||
from torchvision import models, transforms
|
||||
from torchvision.datasets import CIFAR10
|
||||
from tqdm import tqdm
|
||||
from opacus.validators import ModuleValidator
|
||||
from opacus import PrivacyEngine
|
||||
from opacus.utils.batch_memory_manager import BatchMemoryManager
|
||||
import pyvacy
|
||||
#from pyvacy import optim#, analysis, sampling
|
||||
|
||||
from wide_resnet import WideResNet
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--lr", default=0.1, type=float)
|
||||
parser.add_argument("--epochs", default=1, type=int)
|
||||
parser.add_argument("--n_shadows", default=16, type=int)
|
||||
parser.add_argument("--shadow_id", default=1, type=int)
|
||||
parser.add_argument("--model", default="resnet18", type=str)
|
||||
parser.add_argument("--pkeep", default=0.5, type=float)
|
||||
parser.add_argument("--savedir", default="exp/cifar10", type=str)
|
||||
parser.add_argument("--debug", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("mps")
|
||||
EPOCHS = args.epochs
|
||||
|
||||
|
||||
class DewisNet(nn.Module):
|
||||
def __init__(self):
|
||||
super(DewisNet, self).__init__()
|
||||
# I started my model from the tutorial: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html,
|
||||
# then modified it.
|
||||
|
||||
# 2 convolutional layers, with pooling after each
|
||||
self.conv1 = nn.Conv2d(3, 12, 5)
|
||||
self.conv2 = nn.Conv2d(12, 32, 5)
|
||||
self.pool = nn.MaxPool2d(2, 2)
|
||||
|
||||
# 3 linear layers
|
||||
self.fc1 = nn.Linear(32 * 5 * 5, 120)
|
||||
self.fc2 = nn.Linear(120, 84)
|
||||
self.fc3 = nn.Linear(84, 10)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.pool(F.relu(self.conv1(x)))
|
||||
x = self.pool(F.relu(self.conv2(x)))
|
||||
x = torch.flatten(x, 1)
|
||||
x = F.relu(self.fc1(x))
|
||||
x = F.relu(self.fc2(x))
|
||||
x = self.fc3(x)
|
||||
return x
|
||||
|
||||
|
||||
class JagielskiNet(nn.Module):
|
||||
def __init__(self, input_shape, num_classes, l2=0.01):
|
||||
super(JagielskiNet, self).__init__()
|
||||
self.flatten = nn.Flatten()
|
||||
|
||||
input_dim = 1
|
||||
for dim in input_shape:
|
||||
input_dim *= dim
|
||||
|
||||
self.dense1 = nn.Linear(input_dim, 32)
|
||||
self.relu1 = nn.ReLU()
|
||||
self.dense2 = nn.Linear(32, num_classes)
|
||||
|
||||
# Initialize weights with Glorot Normal (Xavier Normal)
|
||||
torch.nn.init.xavier_normal_(self.dense1.weight)
|
||||
torch.nn.init.xavier_normal_(self.dense2.weight)
|
||||
|
||||
# L2 regularization (weight decay)
|
||||
self.l2 = l2
|
||||
|
||||
def forward(self, x):
|
||||
x = self.flatten(x)
|
||||
x = self.dense1(x)
|
||||
x = self.relu1(x)
|
||||
x = self.dense2(x)
|
||||
return x
|
||||
|
||||
|
||||
def run():
|
||||
seed = np.random.randint(0, 1000000000)
|
||||
seed ^= int(time.time())
|
||||
pl.seed_everything(seed)
|
||||
|
||||
args.debug = True
|
||||
wandb.init(project="lira", mode="disabled" if args.debug else "online")
|
||||
wandb.config.update(args)
|
||||
|
||||
# Dataset
|
||||
train_transform = transforms.Compose(
|
||||
[
|
||||
transforms.RandomHorizontalFlip(),
|
||||
transforms.RandomCrop(32, padding=4),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2470, 0.2435, 0.2616]),
|
||||
]
|
||||
)
|
||||
test_transform = transforms.Compose(
|
||||
[
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2470, 0.2435, 0.2616]),
|
||||
]
|
||||
)
|
||||
datadir = Path().home() / "opt/data/cifar"
|
||||
train_ds = CIFAR10(root=datadir, train=True, download=True, transform=train_transform)
|
||||
test_ds = CIFAR10(root=datadir, train=False, download=True, transform=test_transform)
|
||||
|
||||
# Compute the IN / OUT subset:
|
||||
# If we run each experiment independently then even after a lot of trials
|
||||
# there will still probably be some examples that were always included
|
||||
# or always excluded. So instead, with experiment IDs, we guarantee that
|
||||
# after `args.n_shadows` are done, each example is seen exactly half
|
||||
# of the time in train, and half of the time not in train.
|
||||
|
||||
size = len(train_ds)
|
||||
np.random.seed(seed)
|
||||
if args.n_shadows is not None:
|
||||
np.random.seed(0)
|
||||
keep = np.random.uniform(0, 1, size=(args.n_shadows, size))
|
||||
order = keep.argsort(0)
|
||||
keep = order < int(args.pkeep * args.n_shadows)
|
||||
keep = np.array(keep[args.shadow_id], dtype=bool)
|
||||
keep = keep.nonzero()[0]
|
||||
else:
|
||||
keep = np.random.choice(size, size=int(args.pkeep * size), replace=False)
|
||||
keep.sort()
|
||||
keep_bool = np.full((size), False)
|
||||
keep_bool[keep] = True
|
||||
|
||||
train_ds = torch.utils.data.Subset(train_ds, keep)
|
||||
train_dl = DataLoader(train_ds, batch_size=256, shuffle=True, num_workers=4)
|
||||
test_dl = DataLoader(test_ds, batch_size=128, shuffle=False, num_workers=4)
|
||||
|
||||
# Model
|
||||
if args.model == "dewisnet":
|
||||
m = DewisNet()
|
||||
elif args.model == "jnet":
|
||||
m = JagielskiNet((3,32,32), 10)
|
||||
elif args.model == "wresnet28-2":
|
||||
m = WideResNet(28, 2, 0.0, 10)
|
||||
elif args.model == "wresnet28-10":
|
||||
m = WideResNet(28, 10, 0.3, 10)
|
||||
elif args.model == "resnet18":
|
||||
m = models.resnet18(weights=None, num_classes=10)
|
||||
m.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
|
||||
m.maxpool = nn.Identity()
|
||||
else:
|
||||
raise NotImplementedError
|
||||
m = m.to(DEVICE)
|
||||
|
||||
m = ModuleValidator.fix(m)
|
||||
ModuleValidator.validate(m, strict=True)
|
||||
|
||||
print(f"Device: {DEVICE}")
|
||||
optim = torch.optim.SGD(m.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
|
||||
#optim = pyvacy.DPSGD(
|
||||
# params=m.parameters(),
|
||||
# lr=args.lr,
|
||||
# momentum=0.9,
|
||||
# weight_decay=5e-4,
|
||||
#)
|
||||
sched = torch.optim.lr_scheduler.CosineAnnealingLR(optim, T_max=args.epochs)
|
||||
|
||||
# Train
|
||||
if False:
|
||||
privacy_engine = PrivacyEngine()
|
||||
m, optim, train_dl = privacy_engine.make_private_with_epsilon(
|
||||
module=m,
|
||||
optimizer=optim,
|
||||
data_loader=train_dl,
|
||||
epochs=args.epochs,
|
||||
target_epsilon=8,
|
||||
target_delta=1e-4,
|
||||
max_grad_norm=1.0,
|
||||
batch_first=True,
|
||||
)
|
||||
|
||||
with BatchMemoryManager(
|
||||
data_loader=train_dl,
|
||||
max_physical_batch_size=1000,
|
||||
optimizer=optim
|
||||
) as memory_safe_data_loader:
|
||||
for i in tqdm(range(args.epochs)):
|
||||
m.train()
|
||||
loss_total = 0
|
||||
pbar = tqdm(memory_safe_data_loader, leave=False)
|
||||
for itr, (x, y) in enumerate(pbar):
|
||||
x, y = x.to(DEVICE), y.to(DEVICE)
|
||||
loss = F.cross_entropy(m(x), y)
|
||||
loss_total += loss
|
||||
|
||||
pbar.set_postfix_str(f"loss: {loss:.2f}")
|
||||
optim.zero_grad()
|
||||
loss.backward()
|
||||
optim.step()
|
||||
sched.step()
|
||||
|
||||
wandb.log({"loss": loss_total / len(train_dl)})
|
||||
else:
|
||||
for i in tqdm(range(args.epochs)):
|
||||
m.train()
|
||||
loss_total = 0
|
||||
pbar = tqdm(train_dl, leave=False)
|
||||
for itr, (x, y) in enumerate(pbar):
|
||||
x, y = x.to(DEVICE), y.to(DEVICE)
|
||||
loss = F.cross_entropy(m(x), y)
|
||||
loss_total += loss
|
||||
|
||||
pbar.set_postfix_str(f"loss: {loss:.2f}")
|
||||
optim.zero_grad()
|
||||
loss.backward()
|
||||
optim.step()
|
||||
sched.step()
|
||||
|
||||
wandb.log({"loss": loss_total / len(train_dl)})
|
||||
|
||||
print(f"[test] acc_test: {get_acc(m, test_dl):.4f}")
|
||||
wandb.log({"acc_test": get_acc(m, test_dl)})
|
||||
|
||||
savedir = os.path.join(args.savedir, str(args.shadow_id))
|
||||
os.makedirs(savedir, exist_ok=True)
|
||||
np.save(savedir + "/keep.npy", keep_bool)
|
||||
torch.save(m.state_dict(), savedir + "/model.pt")
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def get_acc(model, dl):
|
||||
acc = []
|
||||
for x, y in dl:
|
||||
x, y = x.to(DEVICE), y.to(DEVICE)
|
||||
acc.append(torch.argmax(model(x), dim=1) == y)
|
||||
acc = torch.cat(acc)
|
||||
acc = torch.sum(acc) / len(acc)
|
||||
|
||||
return acc.item()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
|
@ -1,75 +0,0 @@
|
|||
import numpy as np
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.nn.init as init
|
||||
|
||||
|
||||
class wide_basic(nn.Module):
|
||||
def __init__(self, in_planes, planes, dropout_rate, stride=1):
|
||||
super(wide_basic, self).__init__()
|
||||
self.bn1 = nn.BatchNorm2d(in_planes)
|
||||
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1)
|
||||
self.bn2 = nn.BatchNorm2d(planes)
|
||||
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1)
|
||||
self.dropout = nn.Dropout(p=dropout_rate)
|
||||
|
||||
self.shortcut = nn.Sequential()
|
||||
if stride != 1 or in_planes != planes:
|
||||
self.shortcut = nn.Sequential(
|
||||
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride),
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
out = self.conv1(F.relu(self.bn1(x)))
|
||||
out = self.dropout(out)
|
||||
out = self.conv2(F.relu(self.bn2(out)))
|
||||
out += self.shortcut(x)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class WideResNet(nn.Module):
|
||||
def __init__(self, depth, widen_factor, dropout_rate, n_classes):
|
||||
super(WideResNet, self).__init__()
|
||||
self.in_planes = 16
|
||||
|
||||
assert (depth - 4) % 6 == 0, "Wide-ResNet depth should be 6n+4"
|
||||
n = (depth - 4) // 6
|
||||
k = widen_factor
|
||||
stages = [16, 16 * k, 32 * k, 64 * k]
|
||||
|
||||
self.conv1 = nn.Conv2d(3, stages[0], kernel_size=3, stride=1, padding=1)
|
||||
self.layer1 = self._wide_layer(wide_basic, stages[1], n, dropout_rate, stride=1)
|
||||
self.layer2 = self._wide_layer(wide_basic, stages[2], n, dropout_rate, stride=2)
|
||||
self.layer3 = self._wide_layer(wide_basic, stages[3], n, dropout_rate, stride=2)
|
||||
self.bn1 = nn.BatchNorm2d(stages[3], momentum=0.9)
|
||||
self.linear = nn.Linear(stages[3], n_classes)
|
||||
|
||||
for m in self.modules():
|
||||
if isinstance(m, nn.Conv2d):
|
||||
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
|
||||
elif isinstance(m, nn.Linear):
|
||||
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
|
||||
nn.init.constant_(m.bias, 0)
|
||||
|
||||
def _wide_layer(self, block, planes, n_blocks, dropout_rate, stride):
|
||||
strides = [stride] + [1] * (int(n_blocks) - 1)
|
||||
layers = []
|
||||
|
||||
for stride in strides:
|
||||
layers.append(block(self.in_planes, planes, dropout_rate, stride))
|
||||
self.in_planes = planes
|
||||
|
||||
return nn.Sequential(*layers)
|
||||
|
||||
def forward(self, x):
|
||||
out = self.conv1(x)
|
||||
out = self.layer1(out)
|
||||
out = self.layer2(out)
|
||||
out = self.layer3(out)
|
||||
out = F.relu(self.bn1(out))
|
||||
out = F.avg_pool2d(out, 8)
|
||||
out = out.view(out.size(0), -1)
|
||||
out = self.linear(out)
|
||||
|
||||
return out
|
|
@ -1,143 +0,0 @@
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
from torchsummary import summary
|
||||
import math
|
||||
|
||||
|
||||
class IndividualBlock1(nn.Module):
|
||||
def __init__(self, input_features, output_features, stride, subsample_input=True, increase_filters=True):
|
||||
super(IndividualBlock1, self).__init__()
|
||||
|
||||
self.activation = nn.ReLU(inplace=True)
|
||||
|
||||
self.batch_norm1 = nn.BatchNorm2d(input_features)
|
||||
self.batch_norm2 = nn.BatchNorm2d(output_features)
|
||||
|
||||
self.conv1 = nn.Conv2d(input_features, output_features, kernel_size=3, stride=stride, padding=1, bias=False)
|
||||
self.conv2 = nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1, bias=False)
|
||||
|
||||
self.subsample_input = subsample_input
|
||||
self.increase_filters = increase_filters
|
||||
if subsample_input:
|
||||
self.conv_inp = nn.Conv2d(input_features, output_features, kernel_size=1, stride=2, padding=0, bias=False)
|
||||
elif increase_filters:
|
||||
self.conv_inp = nn.Conv2d(input_features, output_features, kernel_size=1, stride=1, padding=0, bias=False)
|
||||
|
||||
def forward(self, x):
|
||||
|
||||
if self.subsample_input or self.increase_filters:
|
||||
x = self.batch_norm1(x)
|
||||
x = self.activation(x)
|
||||
x1 = self.conv1(x)
|
||||
else:
|
||||
x1 = self.batch_norm1(x)
|
||||
x1 = self.activation(x1)
|
||||
x1 = self.conv1(x1)
|
||||
x1 = self.batch_norm2(x1)
|
||||
x1 = self.activation(x1)
|
||||
x1 = self.conv2(x1)
|
||||
|
||||
if self.subsample_input or self.increase_filters:
|
||||
return self.conv_inp(x) + x1
|
||||
else:
|
||||
return x + x1
|
||||
|
||||
|
||||
class IndividualBlockN(nn.Module):
|
||||
def __init__(self, input_features, output_features, stride):
|
||||
super(IndividualBlockN, self).__init__()
|
||||
|
||||
self.activation = nn.ReLU(inplace=True)
|
||||
|
||||
self.batch_norm1 = nn.BatchNorm2d(input_features)
|
||||
self.batch_norm2 = nn.BatchNorm2d(output_features)
|
||||
|
||||
self.conv1 = nn.Conv2d(input_features, output_features, kernel_size=3, stride=stride, padding=1, bias=False)
|
||||
self.conv2 = nn.Conv2d(output_features, output_features, kernel_size=3, stride=stride, padding=1, bias=False)
|
||||
|
||||
def forward(self, x):
|
||||
x1 = self.batch_norm1(x)
|
||||
x1 = self.activation(x1)
|
||||
x1 = self.conv1(x1)
|
||||
x1 = self.batch_norm2(x1)
|
||||
x1 = self.activation(x1)
|
||||
x1 = self.conv2(x1)
|
||||
|
||||
return x1 + x
|
||||
|
||||
|
||||
class Nblock(nn.Module):
|
||||
|
||||
def __init__(self, N, input_features, output_features, stride, subsample_input=True, increase_filters=True):
|
||||
super(Nblock, self).__init__()
|
||||
|
||||
layers = []
|
||||
for i in range(N):
|
||||
if i == 0:
|
||||
layers.append(IndividualBlock1(input_features, output_features, stride, subsample_input, increase_filters))
|
||||
else:
|
||||
layers.append(IndividualBlockN(output_features, output_features, stride=1))
|
||||
|
||||
self.nblockLayer = nn.Sequential(*layers)
|
||||
|
||||
def forward(self, x):
|
||||
return self.nblockLayer(x)
|
||||
|
||||
|
||||
class WideResNet(nn.Module):
|
||||
|
||||
def __init__(self, d, k, n_classes, input_features, output_features, strides):
|
||||
super(WideResNet, self).__init__()
|
||||
|
||||
self.conv1 = nn.Conv2d(input_features, output_features, kernel_size=3, stride=strides[0], padding=1, bias=False)
|
||||
|
||||
filters = [16 * k, 32 * k, 64 * k]
|
||||
self.out_filters = filters[-1]
|
||||
N = (d - 4) // 6
|
||||
increase_filters = k > 1
|
||||
self.block1 = Nblock(N, input_features=output_features, output_features=filters[0], stride=strides[1], subsample_input=False, increase_filters=increase_filters)
|
||||
self.block2 = Nblock(N, input_features=filters[0], output_features=filters[1], stride=strides[2])
|
||||
self.block3 = Nblock(N, input_features=filters[1], output_features=filters[2], stride=strides[3])
|
||||
|
||||
self.batch_norm = nn.BatchNorm2d(filters[-1])
|
||||
self.activation = nn.ReLU(inplace=True)
|
||||
self.avg_pool = nn.AvgPool2d(kernel_size=8)
|
||||
self.fc = nn.Linear(filters[-1], n_classes)
|
||||
|
||||
for m in self.modules():
|
||||
if isinstance(m, nn.Conv2d):
|
||||
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
|
||||
m.weight.data.normal_(0, math.sqrt(2. / n))
|
||||
elif isinstance(m, nn.BatchNorm2d):
|
||||
m.weight.data.fill_(1)
|
||||
m.bias.data.zero_()
|
||||
elif isinstance(m, nn.Linear):
|
||||
m.bias.data.zero_()
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv1(x)
|
||||
attention1 = self.block1(x)
|
||||
attention2 = self.block2(attention1)
|
||||
attention3 = self.block3(attention2)
|
||||
out = self.batch_norm(attention3)
|
||||
out = self.activation(out)
|
||||
out = self.avg_pool(out)
|
||||
out = out.view(-1, self.out_filters)
|
||||
|
||||
return self.fc(out), attention1, attention2, attention3
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# change d and k if you want to check a model other than WRN-40-2
|
||||
d = 40
|
||||
k = 2
|
||||
strides = [1, 1, 2, 2]
|
||||
net = WideResNet(d=d, k=k, n_classes=10, input_features=3, output_features=16, strides=strides)
|
||||
|
||||
# verify that an output is produced
|
||||
sample_input = torch.ones(size=(1, 3, 32, 32), requires_grad=False)
|
||||
net(sample_input)
|
||||
|
||||
# Summarize model
|
||||
summary(net, input_size=(3, 32, 32))
|
|
@ -1,838 +0,0 @@
|
|||
import argparse
|
||||
import equations
|
||||
import numpy as np
|
||||
import time
|
||||
import copy
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch import optim
|
||||
from torch.optim.lr_scheduler import MultiStepLR
|
||||
from torch.utils.data import DataLoader, Subset, TensorDataset, ConcatDataset
|
||||
import torch.nn.functional as F
|
||||
from pathlib import Path
|
||||
from torchvision import transforms
|
||||
from torchvision.datasets import CIFAR10
|
||||
import pytorch_lightning as pl
|
||||
import opacus
|
||||
import random
|
||||
from tqdm import tqdm
|
||||
from opacus.validators import ModuleValidator
|
||||
from opacus.utils.batch_memory_manager import BatchMemoryManager
|
||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||
from WideResNet import WideResNet
|
||||
from equations import get_eps_audit
|
||||
import student_model
|
||||
import fast_model
|
||||
import convnet_classifier
|
||||
import wrn
|
||||
import warnings
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
DEVICE = None
|
||||
DTYPE = None
|
||||
DATADIR = Path("./data")
|
||||
|
||||
|
||||
def get_dataloaders3(m=1000, train_batch_size=128, test_batch_size=10):
|
||||
seed = np.random.randint(0, 1e9)
|
||||
seed ^= int(time.time())
|
||||
pl.seed_everything(seed)
|
||||
|
||||
train_transform = transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
|
||||
(4, 4, 4, 4), mode='reflect').squeeze()),
|
||||
transforms.ToPILImage(),
|
||||
transforms.RandomCrop(32),
|
||||
transforms.RandomHorizontalFlip(),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
||||
])
|
||||
test_transform = transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
||||
])
|
||||
train_ds = CIFAR10(root=DATADIR, train=True, download=True, transform=train_transform)
|
||||
test_ds = CIFAR10(root=DATADIR, train=False, download=True, transform=test_transform)
|
||||
|
||||
# Original dataset
|
||||
x_train = np.stack(train_ds[i][0].numpy() for i in range(len(train_ds)))
|
||||
y_train = np.array(train_ds.targets).astype(np.int64)
|
||||
|
||||
x = np.stack(test_ds[i][0].numpy() for i in range(len(test_ds))) # Applies transforms
|
||||
y = np.array(test_ds.targets).astype(np.int64)
|
||||
|
||||
# Pull points from training set when m > test set
|
||||
if m > len(x):
|
||||
k = m - len(x)
|
||||
mask = np.full(len(x_train), False)
|
||||
mask[:k] = True
|
||||
|
||||
x = np.concatenate([x_train[mask], x])
|
||||
y = np.concatenate([y_train[mask], y])
|
||||
x_train = x_train[~mask]
|
||||
y_train = y_train[~mask]
|
||||
|
||||
# Store the m points which could have been included/excluded
|
||||
mask = np.full(len(x), False)
|
||||
mask[:m] = True
|
||||
mask = mask[np.random.permutation(len(x))]
|
||||
|
||||
adv_points = x[mask]
|
||||
adv_labels = y[mask]
|
||||
|
||||
# Mislabel inclusion/exclusion examples intentionally!
|
||||
for i in range(len(adv_labels)):
|
||||
while True:
|
||||
c = np.random.choice(range(10))
|
||||
if adv_labels[i] != c:
|
||||
adv_labels[i] = c
|
||||
break
|
||||
|
||||
# Choose m points to randomly exclude at chance
|
||||
S = np.random.choice([True, False], size=m) # Vector of determining if each point is in or out
|
||||
|
||||
assert len(adv_points) == m
|
||||
inc_points = adv_points[S]
|
||||
inc_labels = adv_labels[S]
|
||||
|
||||
td = TensorDataset(torch.from_numpy(inc_points).float(), torch.from_numpy(inc_labels).long())
|
||||
td2 = TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train).long())
|
||||
td = ConcatDataset([td, td2])
|
||||
train_dl = DataLoader(td, batch_size=train_batch_size, shuffle=True, num_workers=4)
|
||||
pure_train_dl = DataLoader(train_ds, batch_size=train_batch_size, shuffle=True, num_workers=4)
|
||||
test_dl = DataLoader(test_ds, batch_size=test_batch_size, shuffle=True, num_workers=4)
|
||||
|
||||
return train_dl, test_dl, pure_train_dl, adv_points, adv_labels, S
|
||||
|
||||
|
||||
def get_dataloaders_raw(m=1000, train_batch_size=512, test_batch_size=10):
|
||||
def preprocess_data(data):
|
||||
data = torch.tensor(data)#.to(DTYPE)
|
||||
data = data / 255.0
|
||||
data = data.permute(0, 3, 1, 2)
|
||||
data = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))(data)
|
||||
data = nn.ReflectionPad2d(4)(data)
|
||||
data = transforms.RandomCrop(size=(32, 32))(data)
|
||||
data = transforms.RandomHorizontalFlip()(data)
|
||||
return data
|
||||
|
||||
train_ds = CIFAR10(root=DATADIR, train=True, download=True)
|
||||
test_ds = CIFAR10(root=DATADIR, train=False, download=True)
|
||||
|
||||
train_x = train_ds.data
|
||||
test_x = test_ds.data
|
||||
train_y = np.array(train_ds.targets)
|
||||
test_y = np.array(test_ds.targets)
|
||||
|
||||
if m > len(test_x):
|
||||
k = m - len(test_x)
|
||||
mask = np.full(len(train_x), False)
|
||||
mask[:k] = True
|
||||
mask = mask[np.random.permutation(len(train_x))]
|
||||
|
||||
test_x = np.concatenate([train_x[mask], test_x])
|
||||
test_y = np.concatenate([train_y[mask], test_y])
|
||||
train_y = train_y[~mask]
|
||||
train_x = train_x[~mask]
|
||||
|
||||
mask = np.full(len(test_x), False)
|
||||
mask[:m] = True
|
||||
mask = mask[np.random.permutation(len(test_x))]
|
||||
S = np.random.choice([True, False], size=m)
|
||||
|
||||
attack_x = test_x[mask][S]
|
||||
attack_y = test_y[mask][S]
|
||||
|
||||
for i in range(len(attack_y)):
|
||||
while True:
|
||||
c = np.random.choice(range(10))
|
||||
if attack_y[i] != c:
|
||||
attack_y[i] = c
|
||||
break
|
||||
|
||||
train_x = np.concatenate([train_x, attack_x])
|
||||
train_y = np.concatenate([train_y, attack_y])
|
||||
|
||||
train_x = preprocess_data(train_x)
|
||||
test_x = preprocess_data(test_x)
|
||||
attack_x = preprocess_data(attack_x)
|
||||
train_y = torch.tensor(train_y)
|
||||
test_y = torch.tensor(test_y)
|
||||
attack_y = torch.tensor(attack_y)
|
||||
|
||||
train_dl = DataLoader(
|
||||
TensorDataset(train_x, train_y.long()),
|
||||
batch_size=train_batch_size,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
num_workers=4
|
||||
)
|
||||
test_dl = DataLoader(
|
||||
TensorDataset(test_x, test_y.long()),
|
||||
batch_size=train_batch_size,
|
||||
shuffle=True,
|
||||
num_workers=4
|
||||
)
|
||||
return train_dl, test_dl, train_x, attack_x.numpy(), attack_y.numpy(), S
|
||||
|
||||
|
||||
def evaluate_on(model, dataloader):
|
||||
correct = 0
|
||||
total = 0
|
||||
|
||||
with torch.no_grad():
|
||||
model.eval()
|
||||
|
||||
for data in dataloader:
|
||||
images, labels = data
|
||||
images = images.to(DEVICE)
|
||||
labels = labels.to(DEVICE)
|
||||
|
||||
wrn_outputs = model(images)
|
||||
if len(wrn_outputs) == 4:
|
||||
outputs = wrn_outputs[0]
|
||||
else:
|
||||
outputs = wrn_outputs
|
||||
|
||||
_, predicted = torch.max(outputs.data, 1)
|
||||
total += labels.size(0)
|
||||
correct += (predicted == labels).sum().item()
|
||||
|
||||
return correct, total
|
||||
|
||||
|
||||
def train_knowledge_distillation(teacher, train_dl, epochs, device, learning_rate=0.001, T=2, soft_target_loss_weight=0.25, ce_loss_weight=0.75):
|
||||
#instantiate istudent
|
||||
student = student_model.Model(num_classes=10).to(device)
|
||||
|
||||
ce_loss = nn.CrossEntropyLoss()
|
||||
optimizer = optim.Adam(student.parameters(), lr=learning_rate)
|
||||
student_init = copy.deepcopy(student)
|
||||
student.to(device)
|
||||
teacher.to(device)
|
||||
teacher.eval() # Teacher set to evaluation mode
|
||||
student.train() # Student to train mode
|
||||
for epoch in range(epochs):
|
||||
running_loss = 0.0
|
||||
for inputs, labels in train_dl:
|
||||
inputs, labels = inputs.to(device), labels.to(device)
|
||||
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Forward pass with the teacher model - do not save gradients here as we do not change the teacher's weights
|
||||
with torch.no_grad():
|
||||
teacher_logits, _, _, _ = teacher(inputs)
|
||||
|
||||
# Forward pass with the student model
|
||||
student_logits = student(inputs)
|
||||
#Soften the student logits by applying softmax first and log() second
|
||||
soft_targets = nn.functional.softmax(teacher_logits / T, dim=-1)
|
||||
soft_prob = nn.functional.log_softmax(student_logits / T, dim=-1)
|
||||
|
||||
# Calculate the soft targets loss. Scaled by T**2 as suggested by the authors of the paper "Distilling the knowledge in a neural network"
|
||||
soft_targets_loss = torch.sum(soft_targets * (soft_targets.log() - soft_prob)) / soft_prob.size()[0] * (T**2)
|
||||
|
||||
# Calculate the true label loss
|
||||
label_loss = ce_loss(student_logits, labels)
|
||||
|
||||
# Weighted sum of the two losses
|
||||
loss = soft_target_loss_weight * soft_targets_loss + ce_loss_weight * label_loss
|
||||
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
running_loss += loss.item()
|
||||
if epoch % 10 == 0:
|
||||
print(f"Epoch {epoch+1}/{epochs}, Loss: {running_loss / len(train_dl)}")
|
||||
|
||||
return student_init, student
|
||||
|
||||
|
||||
def train_no_cap(model, model_init, hp, train_dl, test_dl, optimizer, criterion, scheduler, adv_points, adv_labels, S):
|
||||
best_test_set_accuracy = 0
|
||||
|
||||
for epoch in range(hp['epochs']):
|
||||
model.train()
|
||||
for i, data in enumerate(train_dl, 0):
|
||||
inputs, labels = data
|
||||
inputs = inputs.to(DEVICE)
|
||||
labels = labels.to(DEVICE)
|
||||
|
||||
optimizer.zero_grad()
|
||||
|
||||
wrn_outputs = model(inputs)
|
||||
if len(wrn_outputs) == 4:
|
||||
outputs = wrn_outputs[0]
|
||||
else:
|
||||
outputs = wrn_outputs
|
||||
loss = criterion(outputs, labels)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
scheduler.step()
|
||||
|
||||
if epoch % 10 == 0 or epoch == hp['epochs'] - 1:
|
||||
correct, total = evaluate_on(model, test_dl)
|
||||
epoch_accuracy = round(100 * correct / total, 2)
|
||||
scores = score_model(model_init, model, adv_points, adv_labels, S)
|
||||
audits = audit_model(hp, scores)
|
||||
print(f"Epoch {epoch+1}/{hp['epochs']}: {epoch_accuracy}% | Audit : {audits[2]}/{2*audits[1]}/{audits[3]} | p[ε < {audits[0]}] < {hp['p_value']} @ ε={hp['epsilon']}")
|
||||
|
||||
return best_test_set_accuracy
|
||||
|
||||
|
||||
def load(hp, model_path, train_dl):
|
||||
init_model = model_path / "init_model.pt"
|
||||
trained_model = model_path / "trained_model.pt"
|
||||
|
||||
model = WideResNet(
|
||||
d=hp["wrn_depth"],
|
||||
k=hp["wrn_width"],
|
||||
n_classes=10,
|
||||
input_features=3,
|
||||
output_features=16,
|
||||
strides=[1, 1, 2, 2],
|
||||
)
|
||||
model = ModuleValidator.fix(model)
|
||||
ModuleValidator.validate(model, strict=True)
|
||||
model_init = copy.deepcopy(model)
|
||||
|
||||
privacy_engine = opacus.PrivacyEngine()
|
||||
optimizer = optim.SGD(
|
||||
model.parameters(),
|
||||
lr=0.1,
|
||||
momentum=0.9,
|
||||
nesterov=True,
|
||||
weight_decay=5e-4
|
||||
)
|
||||
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
|
||||
module=model,
|
||||
optimizer=optimizer,
|
||||
data_loader=train_dl,
|
||||
epochs=hp['epochs'],
|
||||
target_epsilon=hp['epsilon'],
|
||||
target_delta=hp['delta'],
|
||||
max_grad_norm=hp['norm'],
|
||||
)
|
||||
|
||||
model_init.load_state_dict(torch.load(init_model, weights_only=True))
|
||||
model.load_state_dict(torch.load(trained_model, weights_only=True))
|
||||
|
||||
model_init = model_init.to(DEVICE)
|
||||
model = model.to(DEVICE)
|
||||
|
||||
adv_points = np.load("data/adv_points.npy")
|
||||
adv_labels = np.load("data/adv_labels.npy")
|
||||
S = np.load("data/S.npy")
|
||||
|
||||
return model_init, model, adv_points, adv_labels, S
|
||||
|
||||
|
||||
def train_wrn2(hp, train_dl, test_dl, adv_points, adv_labels, S):
|
||||
model = wrn.WideResNet(16, 10, 4)
|
||||
model = model.to(DEVICE)
|
||||
ModuleValidator.validate(model, strict=True)
|
||||
model_init = copy.deepcopy(model)
|
||||
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
optimizer = optim.SGD(
|
||||
model.parameters(),
|
||||
lr=0.12,
|
||||
momentum=0.9,
|
||||
weight_decay=1e-4
|
||||
)
|
||||
scheduler = MultiStepLR(
|
||||
optimizer,
|
||||
milestones=[int(i * hp['epochs']) for i in [0.3, 0.6, 0.8]],
|
||||
gamma=0.1
|
||||
)
|
||||
|
||||
print(f"Training with {hp['epochs']} epochs")
|
||||
|
||||
if hp['epsilon'] is not None:
|
||||
privacy_engine = opacus.PrivacyEngine()
|
||||
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
|
||||
module=model,
|
||||
optimizer=optimizer,
|
||||
data_loader=train_dl,
|
||||
epochs=hp['epochs'],
|
||||
target_epsilon=hp['epsilon'],
|
||||
target_delta=hp['delta'],
|
||||
max_grad_norm=hp['norm'],
|
||||
)
|
||||
|
||||
print(f"DP epsilon = {hp['epsilon']}, delta = {hp['delta']}")
|
||||
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {hp['norm']}")
|
||||
|
||||
with BatchMemoryManager(
|
||||
data_loader=train_loader,
|
||||
max_physical_batch_size=10, # 1000 ~= 9.4GB vram
|
||||
optimizer=optimizer
|
||||
) as memory_safe_data_loader:
|
||||
best_test_set_accuracy = train_no_cap(
|
||||
model,
|
||||
model_init,
|
||||
hp,
|
||||
memory_safe_data_loader,
|
||||
test_dl,
|
||||
optimizer,
|
||||
criterion,
|
||||
scheduler,
|
||||
adv_points,
|
||||
adv_labels,
|
||||
S,
|
||||
)
|
||||
else:
|
||||
print("Training without differential privacy")
|
||||
best_test_set_accuracy = train_no_cap(
|
||||
model,
|
||||
model_init,
|
||||
hp,
|
||||
train_dl,
|
||||
test_dl,
|
||||
optimizer,
|
||||
criterion,
|
||||
scheduler,
|
||||
adv_points,
|
||||
adv_labels,
|
||||
S,
|
||||
)
|
||||
|
||||
return model_init, model
|
||||
|
||||
|
||||
def train_small(hp, train_dl, test_dl, adv_points, adv_labels, S):
|
||||
model = student_model.Model(num_classes=10).to(DEVICE)
|
||||
model = model.to(DEVICE)
|
||||
model = ModuleValidator.fix(model)
|
||||
ModuleValidator.validate(model, strict=True)
|
||||
|
||||
model_init = copy.deepcopy(model)
|
||||
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
optimizer = optim.Adam(model.parameters(), lr=0.001)
|
||||
scheduler = MultiStepLR(
|
||||
optimizer,
|
||||
milestones=[int(i * hp['epochs']) for i in [0.3, 0.6, 0.8]],
|
||||
gamma=0.2
|
||||
)
|
||||
|
||||
print(f"Training raw (no distill) STUDENT with {hp['epochs']} epochs")
|
||||
|
||||
if hp['epsilon'] is not None:
|
||||
privacy_engine = opacus.PrivacyEngine()
|
||||
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
|
||||
module=model,
|
||||
optimizer=optimizer,
|
||||
data_loader=train_dl,
|
||||
epochs=hp['epochs'],
|
||||
target_epsilon=hp['epsilon'],
|
||||
target_delta=hp['delta'],
|
||||
max_grad_norm=hp['norm'],
|
||||
)
|
||||
|
||||
print(f"DP epsilon = {hp['epsilon']}, delta = {hp['delta']}")
|
||||
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {hp['norm']}")
|
||||
|
||||
with BatchMemoryManager(
|
||||
data_loader=train_loader,
|
||||
max_physical_batch_size=2000, # 1000 ~= 9.4GB vram
|
||||
optimizer=optimizer
|
||||
) as memory_safe_data_loader:
|
||||
best_test_set_accuracy = train_no_cap(
|
||||
model,
|
||||
model_init,
|
||||
hp,
|
||||
memory_safe_data_loader,
|
||||
test_dl,
|
||||
optimizer,
|
||||
criterion,
|
||||
scheduler,
|
||||
adv_points,
|
||||
adv_labels,
|
||||
S,
|
||||
)
|
||||
else:
|
||||
print("Training without differential privacy")
|
||||
best_test_set_accuracy = train_no_cap(
|
||||
model,
|
||||
model_init,
|
||||
hp,
|
||||
train_dl,
|
||||
test_dl,
|
||||
optimizer,
|
||||
criterion,
|
||||
scheduler,
|
||||
adv_points,
|
||||
adv_labels,
|
||||
S,
|
||||
)
|
||||
|
||||
return model_init, model
|
||||
|
||||
|
||||
def train_fast(hp, train_dl, test_dl, train_x, adv_points, adv_labels, S):
|
||||
epochs = hp['epochs']
|
||||
momentum = 0.9
|
||||
weight_decay = 0.256
|
||||
weight_decay_bias = 0.004
|
||||
ema_update_freq = 5
|
||||
ema_rho = 0.99**ema_update_freq
|
||||
dtype = torch.float16 if DEVICE.type != "cpu" else torch.float32
|
||||
|
||||
print("=========================")
|
||||
print("Training a fast model")
|
||||
print("=========================")
|
||||
weights = fast_model.patch_whitening(train_x[:10000, :, 4:-4, 4:-4])
|
||||
model = fast_model.Model(weights, c_in=3, c_out=10, scale_out=0.125)
|
||||
|
||||
model.to(DEVICE)
|
||||
init_model = copy.deepcopy(model)
|
||||
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
optimizer = optim.SGD(
|
||||
model.parameters(),
|
||||
lr=0.1,
|
||||
momentum=0.9,
|
||||
nesterov=True,
|
||||
weight_decay=5e-4
|
||||
)
|
||||
scheduler = MultiStepLR(
|
||||
optimizer,
|
||||
milestones=[int(i * hp['epochs']) for i in [0.3, 0.6, 0.8]],
|
||||
gamma=0.2
|
||||
)
|
||||
|
||||
train_no_cap(model, model_init, hp, train_dl, test_dl, optimizer, criterion, scheduler, adv_points, adv_labels, S)
|
||||
return init_model, model
|
||||
|
||||
|
||||
def train_convnet(hp, train_dl, test_dl, adv_points, adv_labels, S):
|
||||
model = convnet_classifier.ConvNet()
|
||||
model = model.to(DEVICE)
|
||||
ModuleValidator.validate(model, strict=True)
|
||||
model_init = copy.deepcopy(model)
|
||||
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
optimizer = optim.Adam(model.parameters(), lr=1e-3)
|
||||
scheduler = MultiStepLR(optimizer, milestones=[10, 25], gamma=0.1)
|
||||
|
||||
print(f"Training with {hp['epochs']} epochs")
|
||||
|
||||
if hp['epsilon'] is not None:
|
||||
privacy_engine = opacus.PrivacyEngine(accountant='rdp')
|
||||
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
|
||||
module=model,
|
||||
optimizer=optimizer,
|
||||
data_loader=train_dl,
|
||||
epochs=hp['epochs'],
|
||||
target_epsilon=hp['epsilon'],
|
||||
target_delta=hp['delta'],
|
||||
max_grad_norm=hp['norm'],
|
||||
)
|
||||
|
||||
print(f"DP epsilon = {hp['epsilon']}, delta = {hp['delta']}")
|
||||
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {hp['norm']}")
|
||||
|
||||
with BatchMemoryManager(
|
||||
data_loader=train_loader,
|
||||
max_physical_batch_size=2000, # 1000 ~= 9.4GB vram
|
||||
optimizer=optimizer
|
||||
) as memory_safe_data_loader:
|
||||
best_test_set_accuracy = train_no_cap(
|
||||
model,
|
||||
model_init,
|
||||
hp,
|
||||
memory_safe_data_loader,
|
||||
test_dl,
|
||||
optimizer,
|
||||
criterion,
|
||||
scheduler,
|
||||
adv_points,
|
||||
adv_labels,
|
||||
S,
|
||||
)
|
||||
else:
|
||||
print("Training without differential privacy")
|
||||
best_test_set_accuracy = train_no_cap(
|
||||
model,
|
||||
model_init,
|
||||
hp,
|
||||
train_dl,
|
||||
test_dl,
|
||||
optimizer,
|
||||
criterion,
|
||||
scheduler,
|
||||
adv_points,
|
||||
adv_labels,
|
||||
S,
|
||||
)
|
||||
|
||||
return model_init, model
|
||||
|
||||
|
||||
def train(hp, train_dl, test_dl, adv_points, adv_labels, S):
|
||||
model = WideResNet(
|
||||
d=hp["wrn_depth"],
|
||||
k=hp["wrn_width"],
|
||||
n_classes=10,
|
||||
input_features=3,
|
||||
output_features=16,
|
||||
strides=[1, 1, 2, 2],
|
||||
)
|
||||
model = model.to(DEVICE)
|
||||
model = ModuleValidator.fix(model)
|
||||
ModuleValidator.validate(model, strict=True)
|
||||
|
||||
model_init = copy.deepcopy(model)
|
||||
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
optimizer = optim.SGD(
|
||||
model.parameters(),
|
||||
lr=0.1,
|
||||
momentum=0.9,
|
||||
nesterov=True,
|
||||
weight_decay=5e-4
|
||||
)
|
||||
scheduler = MultiStepLR(
|
||||
optimizer,
|
||||
milestones=[int(i * hp['epochs']) for i in [0.3, 0.6, 0.8]],
|
||||
gamma=0.2
|
||||
)
|
||||
|
||||
print(f"Training with {hp['epochs']} epochs")
|
||||
|
||||
if hp['epsilon'] is not None:
|
||||
privacy_engine = opacus.PrivacyEngine()
|
||||
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
|
||||
module=model,
|
||||
optimizer=optimizer,
|
||||
data_loader=train_dl,
|
||||
epochs=hp['epochs'],
|
||||
target_epsilon=hp['epsilon'],
|
||||
target_delta=hp['delta'],
|
||||
max_grad_norm=hp['norm'],
|
||||
)
|
||||
|
||||
print(f"DP epsilon = {hp['epsilon']}, delta = {hp['delta']}")
|
||||
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {hp['norm']}")
|
||||
|
||||
with BatchMemoryManager(
|
||||
data_loader=train_loader,
|
||||
max_physical_batch_size=2000, # 1000 ~= 9.4GB vram
|
||||
optimizer=optimizer
|
||||
) as memory_safe_data_loader:
|
||||
best_test_set_accuracy = train_no_cap(
|
||||
model,
|
||||
model_init,
|
||||
hp,
|
||||
memory_safe_data_loader,
|
||||
test_dl,
|
||||
optimizer,
|
||||
criterion,
|
||||
scheduler,
|
||||
adv_points,
|
||||
adv_labels,
|
||||
S,
|
||||
)
|
||||
else:
|
||||
print("Training without differential privacy")
|
||||
best_test_set_accuracy = train_no_cap(
|
||||
model,
|
||||
model_init,
|
||||
hp,
|
||||
train_dl,
|
||||
test_dl,
|
||||
optimizer,
|
||||
criterion,
|
||||
scheduler,
|
||||
adv_points,
|
||||
adv_labels,
|
||||
S,
|
||||
)
|
||||
|
||||
return model_init, model
|
||||
|
||||
|
||||
def get_k_audit(k, scores, hp):
|
||||
correct = np.sum(~scores[:k]) + np.sum(scores[-k:])
|
||||
|
||||
eps_lb = get_eps_audit(
|
||||
hp['target_points'],
|
||||
2*k,
|
||||
correct,
|
||||
hp['delta'],
|
||||
hp['p_value']
|
||||
)
|
||||
return eps_lb, k, correct, len(scores)
|
||||
|
||||
|
||||
def score_model(model_init, model_trained, adv_points, adv_labels, S):
|
||||
scores = list()
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
with torch.no_grad():
|
||||
model_init.eval()
|
||||
x_m = torch.from_numpy(adv_points).to(DEVICE)
|
||||
y_m = torch.from_numpy(adv_labels).long().to(DEVICE)
|
||||
|
||||
for i in range(len(x_m)):
|
||||
x_point = x_m[i].unsqueeze(0).to(DEVICE)
|
||||
y_point = y_m[i].unsqueeze(0).to(DEVICE)
|
||||
is_in = S[i]
|
||||
|
||||
wrn_outputs = model_init(x_point)
|
||||
outputs = wrn_outputs[0] if len(wrn_outputs) == 4 else wrn_outputs
|
||||
init_loss = criterion(outputs, y_point)
|
||||
|
||||
wrn_outputs = model_trained(x_point)
|
||||
outputs = wrn_outputs[0] if len(wrn_outputs) == 4 else wrn_outputs
|
||||
trained_loss = criterion(outputs, y_point)
|
||||
|
||||
scores.append(((init_loss - trained_loss).item(), is_in))
|
||||
|
||||
scores = sorted(scores, key=lambda x: x[0])
|
||||
scores = np.array([x[1] for x in scores])
|
||||
return scores
|
||||
|
||||
|
||||
def audit_model(hp, scores):
|
||||
audits = (0, 0, 0, 0)
|
||||
k_schedule = np.linspace(1, hp['target_points']//2, 40)
|
||||
k_schedule = np.floor(k_schedule).astype(int)
|
||||
|
||||
with ProcessPoolExecutor() as executor:
|
||||
futures = {
|
||||
executor.submit(get_k_audit, k, scores, hp): k for k in k_schedule
|
||||
}
|
||||
|
||||
for future in as_completed(futures):
|
||||
try:
|
||||
eps_lb, k, correct, total = future.result()
|
||||
if eps_lb > audits[0]:
|
||||
audits = (eps_lb, k, correct, total)
|
||||
except Exception as exc:
|
||||
k = futures[future]
|
||||
print(f"'k={k}' generated an exception: {exc}")
|
||||
|
||||
return audits
|
||||
|
||||
|
||||
def main():
|
||||
global DEVICE
|
||||
global DTYPE
|
||||
|
||||
parser = argparse.ArgumentParser(description='WideResNet O1 audit')
|
||||
parser.add_argument('--norm', type=float, help='dpsgd norm clip factor', required=True)
|
||||
parser.add_argument('--cuda', type=int, help='gpu index', required=False)
|
||||
parser.add_argument('--epsilon', type=float, help='dp epsilon', required=False, default=None)
|
||||
parser.add_argument('--m', type=int, help='number of target points', required=True)
|
||||
parser.add_argument('--epochs', type=int, help='number of epochs', required=True)
|
||||
parser.add_argument('--load', type=Path, help='number of epochs', required=False)
|
||||
parser.add_argument('--studentraw', action='store_true', help='train a raw student', required=False)
|
||||
parser.add_argument('--distill', action='store_true', help='train a raw student', required=False)
|
||||
parser.add_argument('--fast', action='store_true', help='train the fast model', required=False)
|
||||
parser.add_argument('--wrn2', action='store_true', help='Train a groupnormed wrn', required=False)
|
||||
parser.add_argument('--convnet', action='store_true', help='Train a convnet', required=False)
|
||||
args = parser.parse_args()
|
||||
|
||||
if torch.cuda.is_available() and args.cuda:
|
||||
DEVICE = torch.device(f'cuda:{args.cuda}')
|
||||
DTYPE = torch.float16
|
||||
elif torch.cuda.is_available():
|
||||
DEVICE = torch.device('cuda:0')
|
||||
DTYPE = torch.float16
|
||||
else:
|
||||
DEVICE = torch.device('cpu')
|
||||
DTYPE = torch.float32
|
||||
|
||||
hp = {
|
||||
"target_points": args.m,
|
||||
"wrn_depth": 16,
|
||||
"wrn_width": 1,
|
||||
"epsilon": args.epsilon,
|
||||
"delta": 1e-6,
|
||||
"norm": args.norm,
|
||||
"batch_size": 50 if args.convnet else 4096,
|
||||
"epochs": args.epochs,
|
||||
"p_value": 0.05,
|
||||
}
|
||||
|
||||
hp['logfile'] = Path('WideResNet_{}_{}_{}_{}s_x{}_{}e_{}d_{}C.txt'.format(
|
||||
int(time.time()),
|
||||
hp['wrn_depth'],
|
||||
hp['wrn_width'],
|
||||
hp['batch_size'],
|
||||
hp['epochs'],
|
||||
hp['epsilon'],
|
||||
hp['delta'],
|
||||
hp['norm'],
|
||||
))
|
||||
|
||||
if args.load:
|
||||
train_dl, test_dl, ____, _, __, ___ = get_dataloaders3(hp['target_points'], hp['batch_size'])
|
||||
model_init, model_trained, adv_points, adv_labels, S = load(hp, args.load, train_dl)
|
||||
test_dl = None
|
||||
elif args.fast:
|
||||
train_dl, test_dl, train_x, adv_points, adv_labels, S = get_dataloaders_raw(hp['target_points'])
|
||||
model_init, model_trained = train_fast(hp, train_dl, test_dl, train_x, adv_points, adv_labels, S)
|
||||
else:
|
||||
train_dl, test_dl, pure_train_dl, adv_points, adv_labels, S = get_dataloaders3(hp['target_points'], hp['batch_size'])
|
||||
if args.wrn2:
|
||||
print("=========================")
|
||||
print("Training wrn2 model from meta")
|
||||
print("=========================")
|
||||
model_init, model_trained = train_wrn2(hp, train_dl, test_dl, adv_points, adv_labels, S)
|
||||
elif args.convnet:
|
||||
print("=========================")
|
||||
print("Training a simple convnet")
|
||||
print("=========================")
|
||||
model_init, model_trained = train_convnet(hp, train_dl, test_dl, adv_points, adv_labels, S)
|
||||
elif args.studentraw:
|
||||
print("=========================")
|
||||
print("Training a raw student model")
|
||||
print("=========================")
|
||||
model_init, model_trained = train_small(hp, train_dl, test_dl, adv_points, adv_labels, S)
|
||||
elif args.distill:
|
||||
print("=========================")
|
||||
print("Training a distilled student model")
|
||||
print("=========================")
|
||||
teacher_init, teacher_trained = train(hp, train_dl, test_dl, adv_points, adv_labels, S)
|
||||
model_init, model_trained = train_knowledge_distillation(
|
||||
teacher=teacher_trained,
|
||||
train_dl=train_dl,
|
||||
epochs=hp['epochs'],
|
||||
device=DEVICE,
|
||||
learning_rate=0.001,
|
||||
T=2,
|
||||
soft_target_loss_weight=0.25,
|
||||
ce_loss_weight=0.75,
|
||||
)
|
||||
else:
|
||||
print("=========================")
|
||||
print("Training teacher model")
|
||||
print("=========================")
|
||||
model_init, model_trained = train(hp, train_dl, test_dl)
|
||||
|
||||
np.save("data/adv_points", adv_points)
|
||||
np.save("data/adv_labels", adv_labels)
|
||||
np.save("data/S", S)
|
||||
torch.save(model_init.state_dict(), "data/init_model.pt")
|
||||
torch.save(model_trained.state_dict(), "data/trained_model.pt")
|
||||
|
||||
# scores = score_model(model_init, model_trained, adv_points, adv_labels, S)
|
||||
# audits = audit_model(hp, scores)
|
||||
|
||||
# print(f"Audit total: {audits[2]}/{2*audits[1]}/{audits[3]}")
|
||||
# print(f"p[ε < {audits[0]}] < {hp['p_value']} for true epsilon {hp['epsilon']}")
|
||||
|
||||
if test_dl is not None:
|
||||
correct, total = evaluate_on(model_init, test_dl)
|
||||
print(f"Init model accuracy: {correct}/{total} = {round(correct/total*100, 2)}")
|
||||
correct, total = evaluate_on(model_trained, test_dl)
|
||||
print(f"Done model accuracy: {correct}/{total} = {round(correct/total*100, 2)}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,51 +0,0 @@
|
|||
# Name: Peng Cheng
|
||||
# UIN: 674792652
|
||||
#
|
||||
# Code adapted from:
|
||||
# https://github.com/jameschengpeng/PyTorch-CNN-on-CIFAR10
|
||||
import torch
|
||||
import torchvision
|
||||
import torchvision.transforms as transforms
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
transform_train = transforms.Compose([
|
||||
transforms.RandomCrop(32, padding=4),
|
||||
transforms.RandomHorizontalFlip(),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
||||
])
|
||||
|
||||
transform_test = transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
||||
])
|
||||
|
||||
class ConvNet(nn.Module):
|
||||
def __init__(self):
|
||||
super(ConvNet, self).__init__()
|
||||
self.conv1 = nn.Conv2d(in_channels=3, out_channels=48, kernel_size=(3,3), padding=(1,1))
|
||||
self.conv2 = nn.Conv2d(in_channels=48, out_channels=96, kernel_size=(3,3), padding=(1,1))
|
||||
self.conv3 = nn.Conv2d(in_channels=96, out_channels=192, kernel_size=(3,3), padding=(1,1))
|
||||
self.conv4 = nn.Conv2d(in_channels=192, out_channels=256, kernel_size=(3,3), padding=(1,1))
|
||||
self.pool = nn.MaxPool2d(2,2)
|
||||
self.fc1 = nn.Linear(in_features=8*8*256, out_features=512)
|
||||
self.fc2 = nn.Linear(in_features=512, out_features=64)
|
||||
self.Dropout = nn.Dropout(0.25)
|
||||
self.fc3 = nn.Linear(in_features=64, out_features=10)
|
||||
|
||||
def forward(self, x):
|
||||
x = F.relu(self.conv1(x)) #32*32*48
|
||||
x = F.relu(self.conv2(x)) #32*32*96
|
||||
x = self.pool(x) #16*16*96
|
||||
x = self.Dropout(x)
|
||||
x = F.relu(self.conv3(x)) #16*16*192
|
||||
x = F.relu(self.conv4(x)) #16*16*256
|
||||
x = self.pool(x) # 8*8*256
|
||||
x = self.Dropout(x)
|
||||
x = x.view(-1, 8*8*256) # reshape x
|
||||
x = F.relu(self.fc1(x))
|
||||
x = F.relu(self.fc2(x))
|
||||
x = self.Dropout(x)
|
||||
x = self.fc3(x)
|
||||
return x
|
|
@ -1,52 +0,0 @@
|
|||
# These equations come from:
|
||||
# [1] T. Steinke, M. Nasr, and M. Jagielski, “Privacy Auditing with One (1)
|
||||
# Training Run,” May 15, 2023, arXiv: arXiv:2305.08846. Accessed: Sep. 15, 2024.
|
||||
# [Online]. Available: http://arxiv.org/abs/2305.08846
|
||||
|
||||
import math
|
||||
import scipy.stats
|
||||
|
||||
# m = number of examples, each included independently with probability 0.5
|
||||
# r = number of guesses (i.e. excluding abstentions)
|
||||
# v = number of correct guesses by auditor
|
||||
# eps,delta = DP guarantee of null hypothesis
|
||||
# output: p-value = probability of >=v correct guesses under null hypothesis
|
||||
def p_value_DP_audit(m, r, v, eps, delta):
|
||||
assert 0 <= v <= r <= m
|
||||
assert eps >= 0
|
||||
assert 0 <= delta <= 1
|
||||
q = 1 / (1 + math.exp(-eps)) # accuracy of eps-DP randomized response
|
||||
beta = scipy.stats.binom.sf(v - 1, r, q) # = P[Binomial(r, q) >= v]
|
||||
alpha = 0
|
||||
sum = 0 # = P[v > Binomial(r, q) >= v - i]
|
||||
for i in range(1, v + 1):
|
||||
sum = sum + scipy.stats.binom.pmf(v - i, r, q)
|
||||
if sum > i * alpha:
|
||||
alpha = sum / i
|
||||
p = beta + alpha * delta * 2 * m
|
||||
return min(p, 1)
|
||||
|
||||
# m = number of examples, each included independently with probability 0.5
|
||||
# r = number of guesses (i.e. excluding abstentions)
|
||||
# v = number of correct guesses by auditor
|
||||
# p = 1-confidence e.g. p=0.05 corresponds to 95%
|
||||
# output: lower bound on eps i.e. algorithm is not (eps,delta)-DP
|
||||
def get_eps_audit(m, r, v, delta, p):
|
||||
assert 0 <= v <= r <= m
|
||||
assert 0 <= delta <= 1
|
||||
assert 0 < p < 1
|
||||
eps_min = 0 # maintain p_value_DP(eps_min) < p
|
||||
eps_max = 1 # maintain p_value_DP(eps_max) >= p
|
||||
while p_value_DP_audit(m, r, v, eps_max, delta) < p:
|
||||
eps_max = eps_max + 1
|
||||
for _ in range(30): # binary search
|
||||
eps = (eps_min + eps_max) / 2
|
||||
if p_value_DP_audit(m, r, v, eps, delta) < p:
|
||||
eps_min = eps
|
||||
else:
|
||||
eps_max = eps
|
||||
return eps_min
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(get_eps_audit(1000, 600, 600, 1e-5, 0.05))
|
|
@ -1,141 +0,0 @@
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
def label_smoothing_loss(inputs, targets, alpha):
|
||||
log_probs = torch.nn.functional.log_softmax(inputs, dim=1, _stacklevel=5)
|
||||
kl = -log_probs.mean(dim=1)
|
||||
xent = torch.nn.functional.nll_loss(log_probs, targets, reduction="none")
|
||||
loss = (1 - alpha) * xent + alpha * kl
|
||||
return loss
|
||||
|
||||
|
||||
class GhostBatchNorm(nn.BatchNorm2d):
|
||||
def __init__(self, num_features, num_splits, **kw):
|
||||
super().__init__(num_features, **kw)
|
||||
|
||||
running_mean = torch.zeros(num_features * num_splits)
|
||||
running_var = torch.ones(num_features * num_splits)
|
||||
|
||||
self.weight.requires_grad = False
|
||||
self.num_splits = num_splits
|
||||
self.register_buffer("running_mean", running_mean)
|
||||
self.register_buffer("running_var", running_var)
|
||||
|
||||
def train(self, mode=True):
|
||||
if (self.training is True) and (mode is False):
|
||||
# lazily collate stats when we are going to use them
|
||||
self.running_mean = torch.mean(
|
||||
self.running_mean.view(self.num_splits, self.num_features), dim=0
|
||||
).repeat(self.num_splits)
|
||||
self.running_var = torch.mean(
|
||||
self.running_var.view(self.num_splits, self.num_features), dim=0
|
||||
).repeat(self.num_splits)
|
||||
return super().train(mode)
|
||||
|
||||
def forward(self, input):
|
||||
n, c, h, w = input.shape
|
||||
if self.training or not self.track_running_stats:
|
||||
assert n % self.num_splits == 0, f"Batch size ({n}) must be divisible by num_splits ({self.num_splits}) of GhostBatchNorm"
|
||||
return F.batch_norm(
|
||||
input.view(-1, c * self.num_splits, h, w),
|
||||
self.running_mean,
|
||||
self.running_var,
|
||||
self.weight.repeat(self.num_splits),
|
||||
self.bias.repeat(self.num_splits),
|
||||
True,
|
||||
self.momentum,
|
||||
self.eps,
|
||||
).view(n, c, h, w)
|
||||
else:
|
||||
return F.batch_norm(
|
||||
input,
|
||||
self.running_mean[: self.num_features],
|
||||
self.running_var[: self.num_features],
|
||||
self.weight,
|
||||
self.bias,
|
||||
False,
|
||||
self.momentum,
|
||||
self.eps,
|
||||
)
|
||||
|
||||
|
||||
def conv_bn_relu(c_in, c_out, kernel_size=(3, 3), padding=(1, 1)):
|
||||
return nn.Sequential(
|
||||
nn.Conv2d(c_in, c_out, kernel_size=kernel_size, padding=padding, bias=False),
|
||||
GhostBatchNorm(c_out, num_splits=16),
|
||||
nn.CELU(alpha=0.3),
|
||||
)
|
||||
|
||||
|
||||
def conv_pool_norm_act(c_in, c_out):
|
||||
return nn.Sequential(
|
||||
nn.Conv2d(c_in, c_out, kernel_size=(3, 3), padding=(1, 1), bias=False),
|
||||
nn.MaxPool2d(kernel_size=2, stride=2),
|
||||
GhostBatchNorm(c_out, num_splits=16),
|
||||
nn.CELU(alpha=0.3),
|
||||
)
|
||||
|
||||
|
||||
def patch_whitening(data, patch_size=(3, 3)):
|
||||
# Compute weights from data such that
|
||||
# torch.std(F.conv2d(data, weights), dim=(2, 3))
|
||||
# is close to 1.
|
||||
h, w = patch_size
|
||||
c = data.size(1)
|
||||
patches = data.unfold(2, h, 1).unfold(3, w, 1)
|
||||
patches = patches.transpose(1, 3).reshape(-1, c, h, w).to(torch.float32)
|
||||
|
||||
n, c, h, w = patches.shape
|
||||
X = patches.reshape(n, c * h * w)
|
||||
X = X / (X.size(0) - 1) ** 0.5
|
||||
covariance = X.t() @ X
|
||||
|
||||
eigenvalues, eigenvectors = torch.linalg.eigh(covariance)
|
||||
|
||||
eigenvalues = eigenvalues.flip(0)
|
||||
|
||||
eigenvectors = eigenvectors.t().reshape(c * h * w, c, h, w).flip(0)
|
||||
|
||||
return eigenvectors / torch.sqrt(eigenvalues + 1e-2).view(-1, 1, 1, 1)
|
||||
|
||||
|
||||
class ResNetBagOfTricks(nn.Module):
|
||||
def __init__(self, first_layer_weights, c_in, c_out, scale_out):
|
||||
super().__init__()
|
||||
|
||||
c = first_layer_weights.size(0)
|
||||
|
||||
conv1 = nn.Conv2d(c_in, c, kernel_size=(3, 3), padding=(1, 1), bias=False)
|
||||
conv1.weight.data = first_layer_weights
|
||||
conv1.weight.requires_grad = False
|
||||
|
||||
self.conv1 = conv1
|
||||
self.conv2 = conv_bn_relu(c, 64, kernel_size=(1, 1), padding=0)
|
||||
self.conv3 = conv_pool_norm_act(64, 128)
|
||||
self.conv4 = conv_bn_relu(128, 128)
|
||||
self.conv5 = conv_bn_relu(128, 128)
|
||||
self.conv6 = conv_pool_norm_act(128, 256)
|
||||
self.conv7 = conv_pool_norm_act(256, 512)
|
||||
self.conv8 = conv_bn_relu(512, 512)
|
||||
self.conv9 = conv_bn_relu(512, 512)
|
||||
self.pool10 = nn.MaxPool2d(kernel_size=4, stride=4)
|
||||
self.linear11 = nn.Linear(512, c_out, bias=False)
|
||||
self.scale_out = scale_out
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv1(x)
|
||||
x = self.conv2(x)
|
||||
x = self.conv3(x)
|
||||
x = x + self.conv5(self.conv4(x))
|
||||
x = self.conv6(x)
|
||||
x = self.conv7(x)
|
||||
x = x + self.conv9(self.conv8(x))
|
||||
x = self.pool10(x)
|
||||
x = x.reshape(x.size(0), x.size(1))
|
||||
x = self.linear11(x)
|
||||
x = self.scale_out * x
|
||||
return x
|
||||
|
||||
Model = ResNetBagOfTricks
|
|
@ -1,94 +0,0 @@
|
|||
import time
|
||||
import math
|
||||
import concurrent.futures
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from equations import get_eps_audit
|
||||
|
||||
|
||||
def compute_y(x_values, p, delta, proportion_correct, key):
|
||||
return key, [get_eps_audit(x, x, math.floor(x *proportion_correct), delta, p) for x in x_values]
|
||||
|
||||
|
||||
def get_plots():
|
||||
final_values = dict()
|
||||
mul = 1.5 #1.275 #1.5
|
||||
max = 60000 #2000 #60000
|
||||
|
||||
x_values = np.floor((mul)**np.arange(30)).astype(int)
|
||||
x_values = np.concatenate([x_values[x_values < max], [max]])
|
||||
|
||||
with concurrent.futures.ProcessPoolExecutor(max_workers=16) as executor:
|
||||
start_time = time.time()
|
||||
futures = [
|
||||
executor.submit(compute_y, x_values, 0.05, 0.0, 1.0, "y11"),
|
||||
executor.submit(compute_y, x_values, 0.05, 1e-6, 1.0, "y12"),
|
||||
executor.submit(compute_y, x_values, 0.05, 1e-4, 1.0, "y13"),
|
||||
executor.submit(compute_y, x_values, 0.05, 1e-2, 1.0, "y14"),
|
||||
executor.submit(compute_y, x_values, 0.01, 0.0, 1.0, "y21"),
|
||||
executor.submit(compute_y, x_values, 0.01, 1e-6, 1.0, "y22"),
|
||||
executor.submit(compute_y, x_values, 0.01, 1e-4, 1.0, "y23"),
|
||||
executor.submit(compute_y, x_values, 0.01, 1e-2, 1.0, "y24"),
|
||||
executor.submit(compute_y, x_values, 0.05, 0.0, 0.9, "y31"),
|
||||
executor.submit(compute_y, x_values, 0.05, 1e-6, 0.9, "y32"),
|
||||
executor.submit(compute_y, x_values, 0.05, 1e-4, 0.9, "y33"),
|
||||
executor.submit(compute_y, x_values, 0.05, 1e-2, 0.9, "y34"),
|
||||
executor.submit(compute_y, x_values, 0.01, 0.0, 0.9, "y41"),
|
||||
executor.submit(compute_y, x_values, 0.01, 1e-6, 0.9, "y42"),
|
||||
executor.submit(compute_y, x_values, 0.01, 1e-4, 0.9, "y43"),
|
||||
executor.submit(compute_y, x_values, 0.01, 1e-2, 0.9, "y44"),
|
||||
]
|
||||
|
||||
for future in concurrent.futures.as_completed(futures):
|
||||
k, v = future.result()
|
||||
final_values[k] = v
|
||||
print(f"Took: {time.time()-start_time}s")
|
||||
|
||||
return final_values, x_values
|
||||
|
||||
|
||||
def plot_to(value_set, x_values, title, fig_name):
|
||||
plt.xscale('log')
|
||||
plt.plot(x_values, value_set[0], marker='o', label='δ=0')
|
||||
plt.plot(x_values, value_set[1], marker='o', label='δ=1e-6')
|
||||
plt.plot(x_values, value_set[2], marker='o', label='δ=1e-4')
|
||||
plt.plot(x_values, value_set[3], marker='o', label='δ=1e-2')
|
||||
|
||||
plt.xlabel("Number of samples attacked")
|
||||
plt.ylabel("Maximum ε lower-bound from audit")
|
||||
plt.title(title)
|
||||
plt.legend()
|
||||
plt.savefig(fig_name, dpi=300, bbox_inches='tight')
|
||||
|
||||
|
||||
def main():
|
||||
final_values, x_values = get_plots()
|
||||
|
||||
plot_to(
|
||||
[final_values[f"y1{i}"] for i in range(1,5)],
|
||||
x_values,
|
||||
"Maximum ε audit with p-value=0.05 and 100% MIA accuracy",
|
||||
"/dev/shm/plot_05_100.png"
|
||||
)
|
||||
plot_to(
|
||||
[final_values[f"y1{i}"] for i in range(1,5)],
|
||||
x_values,
|
||||
"Maximum ε audit with p-value=0.01 and 100% MIA accuracy",
|
||||
"/dev/shm/plot_01_100.png"
|
||||
)
|
||||
plot_to(
|
||||
[final_values[f"y1{i}"] for i in range(1,5)],
|
||||
x_values,
|
||||
"Maximum ε audit with p-value=0.05 and 90% MIA accuracy",
|
||||
"/dev/shm/plot_05_90.png"
|
||||
)
|
||||
plot_to(
|
||||
[final_values[f"y1{i}"] for i in range(1,5)],
|
||||
x_values,
|
||||
"Maximum ε audit with p-value=0.01 and 90% MIA accuracy"
|
||||
"/dev/shm/plot_01_90.png"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,29 +0,0 @@
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
# Create a similar student class where we return a tuple. We do not apply pooling after flattening.
|
||||
class ModifiedLightNNCosine(nn.Module):
|
||||
def __init__(self, num_classes=10):
|
||||
super(ModifiedLightNNCosine, self).__init__()
|
||||
self.features = nn.Sequential(
|
||||
nn.Conv2d(3, 16, kernel_size=3, padding=1),
|
||||
nn.ReLU(),
|
||||
nn.MaxPool2d(kernel_size=2, stride=2),
|
||||
nn.Conv2d(16, 16, kernel_size=3, padding=1),
|
||||
nn.ReLU(),
|
||||
nn.MaxPool2d(kernel_size=2, stride=2),
|
||||
)
|
||||
self.classifier = nn.Sequential(
|
||||
nn.Linear(1024, 256),
|
||||
nn.ReLU(),
|
||||
nn.Dropout(0.1),
|
||||
nn.Linear(256, num_classes)
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.features(x)
|
||||
flattened_conv_output = torch.flatten(x, 1)
|
||||
x = self.classifier(flattened_conv_output)
|
||||
return x
|
||||
|
||||
Model = ModifiedLightNNCosine
|
|
@ -1,232 +0,0 @@
|
|||
"""
|
||||
Adapted from:
|
||||
https://github.com/facebookresearch/tan/blob/main/src/models/wideresnet.py
|
||||
"""
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the BSD-style license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
|
||||
"""
|
||||
Adapted from timm:
|
||||
https://github.com/xternalz/WideResNet-pytorch/blob/master/wideresnet.py
|
||||
"""
|
||||
|
||||
import math
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
class L2Norm(nn.Module):
|
||||
def forward(self, x):
|
||||
return x / x.norm(p=2, dim=1, keepdim=True)
|
||||
|
||||
class BasicBlock(nn.Module):
|
||||
def __init__(self, in_planes, out_planes, stride, nb_groups, order):
|
||||
super(BasicBlock, self).__init__()
|
||||
self.order = order
|
||||
self.bn1 = nn.GroupNorm(nb_groups, in_planes) if nb_groups else nn.Identity()
|
||||
self.relu1 = nn.ReLU()
|
||||
self.conv1 = nn.Conv2d(
|
||||
in_planes, out_planes, kernel_size=3, stride=stride, padding=1
|
||||
)
|
||||
self.bn2 = nn.GroupNorm(nb_groups, out_planes) if nb_groups else nn.Identity()
|
||||
self.relu2 = nn.ReLU()
|
||||
self.conv2 = nn.Conv2d(
|
||||
out_planes, out_planes, kernel_size=3, stride=1, padding=1
|
||||
)
|
||||
|
||||
self.equalInOut = in_planes == out_planes
|
||||
self.bnShortcut = (
|
||||
(not self.equalInOut)
|
||||
and nb_groups
|
||||
and nn.GroupNorm(nb_groups, in_planes)
|
||||
or (not self.equalInOut)
|
||||
and nn.Identity()
|
||||
or None
|
||||
)
|
||||
self.convShortcut = (
|
||||
(not self.equalInOut)
|
||||
and nn.Conv2d(
|
||||
in_planes, out_planes, kernel_size=1, stride=stride, padding=0
|
||||
)
|
||||
) or None
|
||||
|
||||
def forward(self, x):
|
||||
skip = x
|
||||
assert self.order in [0, 1, 2, 3]
|
||||
if self.order == 0: # DM accuracy good
|
||||
if not self.equalInOut:
|
||||
skip = self.convShortcut(self.bnShortcut(self.relu1(x)))
|
||||
out = self.conv1(self.bn1(self.relu1(x)))
|
||||
out = self.conv2(self.bn2(self.relu2(out)))
|
||||
elif self.order == 1: # classic accuracy bad
|
||||
if not self.equalInOut:
|
||||
skip = self.convShortcut(self.relu1(self.bnShortcut(x)))
|
||||
out = self.conv1(self.relu1(self.bn1(x)))
|
||||
out = self.conv2(self.relu2(self.bn2(out)))
|
||||
elif self.order == 2: # DM IN RESIDUAL, normal other
|
||||
if not self.equalInOut:
|
||||
skip = self.convShortcut(self.bnShortcut(self.relu1(x)))
|
||||
out = self.conv1(self.relu1(self.bn1(x)))
|
||||
out = self.conv2(self.relu2(self.bn2(out)))
|
||||
elif self.order == 3: # normal in residualm DM in others
|
||||
if not self.equalInOut:
|
||||
skip = self.convShortcut(self.relu1(self.bnShortcut(x)))
|
||||
out = self.conv1(self.bn1(self.relu1(x)))
|
||||
out = self.conv2(self.bn2(self.relu2(out)))
|
||||
return torch.add(skip, out)
|
||||
|
||||
|
||||
class NetworkBlock(nn.Module):
|
||||
def __init__(
|
||||
self, nb_layers, in_planes, out_planes, block, stride, nb_groups, order
|
||||
):
|
||||
super(NetworkBlock, self).__init__()
|
||||
self.layer = self._make_layer(
|
||||
block, in_planes, out_planes, nb_layers, stride, nb_groups, order
|
||||
)
|
||||
|
||||
def _make_layer(
|
||||
self, block, in_planes, out_planes, nb_layers, stride, nb_groups, order
|
||||
):
|
||||
layers = []
|
||||
for i in range(int(nb_layers)):
|
||||
layers.append(
|
||||
block(
|
||||
i == 0 and in_planes or out_planes,
|
||||
out_planes,
|
||||
i == 0 and stride or 1,
|
||||
nb_groups,
|
||||
order,
|
||||
)
|
||||
)
|
||||
return nn.Sequential(*layers)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layer(x)
|
||||
|
||||
|
||||
class WideResNet(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
depth,
|
||||
feat_dim,
|
||||
#num_classes,
|
||||
widen_factor=1,
|
||||
nb_groups=16,
|
||||
init=0,
|
||||
order1=0,
|
||||
order2=0,
|
||||
):
|
||||
if order1 == 0:
|
||||
print("order1=0: In the blocks: like in DM, BN on top of relu")
|
||||
if order1 == 1:
|
||||
print("order1=1: In the blocks: not like in DM, relu on top of BN")
|
||||
if order1 == 2:
|
||||
print(
|
||||
"order1=2: In the blocks: BN on top of relu in residual (DM), relu on top of BN ortherplace (clqssique)"
|
||||
)
|
||||
if order1 == 3:
|
||||
print(
|
||||
"order1=3: In the blocks: relu on top of BN in residual (classic), BN on top of relu otherplace (DM)"
|
||||
)
|
||||
if order2 == 0:
|
||||
print("order2=0: outside the blocks: like in DM, BN on top of relu")
|
||||
if order2 == 1:
|
||||
print("order2=1: outside the blocks: not like in DM, relu on top of BN")
|
||||
super(WideResNet, self).__init__()
|
||||
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
|
||||
assert (depth - 4) % 6 == 0
|
||||
n = (depth - 4) / 6
|
||||
block = BasicBlock
|
||||
# 1st conv before any network block
|
||||
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1)
|
||||
# 1st block
|
||||
self.block1 = NetworkBlock(
|
||||
n, nChannels[0], nChannels[1], block, 1, nb_groups, order1
|
||||
)
|
||||
# 2nd block
|
||||
self.block2 = NetworkBlock(
|
||||
n, nChannels[1], nChannels[2], block, 2, nb_groups, order1
|
||||
)
|
||||
# 3rd block
|
||||
self.block3 = NetworkBlock(
|
||||
n, nChannels[2], nChannels[3], block, 2, nb_groups, order1
|
||||
)
|
||||
# global average pooling and classifier
|
||||
"""
|
||||
self.bn1 = nn.GroupNorm(nb_groups, nChannels[3]) if nb_groups else nn.Identity()
|
||||
self.relu = nn.ReLU()
|
||||
self.fc = nn.Linear(nChannels[3], num_classes)
|
||||
"""
|
||||
self.nChannels = nChannels[3]
|
||||
|
||||
self.block4 = nn.Sequential(
|
||||
nn.Flatten(),
|
||||
nn.Linear(256 * 8 * 8, 4096, bias=False), # 256 * 6 * 6 if 224 * 224
|
||||
nn.GroupNorm(16, 4096),
|
||||
nn.ReLU(inplace=True),
|
||||
)
|
||||
|
||||
# fc7
|
||||
self.block5 = nn.Sequential(
|
||||
nn.Linear(4096, 4096, bias=False),
|
||||
nn.GroupNorm(16, 4096),
|
||||
nn.ReLU(inplace=True),
|
||||
)
|
||||
# fc8
|
||||
self.block6 =nn.Sequential(
|
||||
nn.Linear(4096, feat_dim),
|
||||
L2Norm(),
|
||||
)
|
||||
|
||||
|
||||
if init == 0: # as in Deep Mind's paper
|
||||
for m in self.modules():
|
||||
if isinstance(m, nn.Conv2d):
|
||||
fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(m.weight)
|
||||
s = 1 / (max(fan_in, 1)) ** 0.5
|
||||
nn.init.trunc_normal_(m.weight, std=s)
|
||||
m.bias.data.zero_()
|
||||
elif isinstance(m, nn.GroupNorm):
|
||||
m.weight.data.fill_(1)
|
||||
m.bias.data.zero_()
|
||||
elif isinstance(m, nn.Linear):
|
||||
fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(m.weight)
|
||||
s = 1 / (max(fan_in, 1)) ** 0.5
|
||||
nn.init.trunc_normal_(m.weight, std=s)
|
||||
#m.bias.data.zero_()
|
||||
if init == 1: # old version
|
||||
for m in self.modules():
|
||||
if isinstance(m, nn.Conv2d):
|
||||
nn.init.kaiming_normal_(
|
||||
m.weight, mode="fan_out", nonlinearity="relu"
|
||||
)
|
||||
elif isinstance(m, nn.GroupNorm):
|
||||
m.weight.data.fill_(1)
|
||||
m.bias.data.zero_()
|
||||
elif isinstance(m, nn.Linear):
|
||||
m.bias.data.zero_()
|
||||
self.order2 = order2
|
||||
|
||||
def forward(self, x):
|
||||
out = self.conv1(x)
|
||||
out = self.block1(out)
|
||||
out = self.block2(out)
|
||||
out = self.block3(out)
|
||||
out = self.block4(out)
|
||||
out = self.block5(out)
|
||||
out = self.block6(out)
|
||||
if out.ndim == 4:
|
||||
out = out.mean(dim=-1)
|
||||
if out.ndim == 3:
|
||||
out = out.mean(dim=-1)
|
||||
|
||||
#out = self.bn1(self.relu(out)) if self.order2 == 0 else self.relu(self.bn1(out))
|
||||
#out = F.avg_pool2d(out, 8)
|
||||
#out = out.view(-1, self.nChannels)
|
||||
return out#self.fc(out)
|
|
@ -1,34 +0,0 @@
|
|||
# Wide Residual Networks in PyTorch
|
||||
|
||||
Implementation of Wide Residual Networks (WRNs) in PyTorch.
|
||||
|
||||
## How to train WRNs
|
||||
|
||||
At the moment the CIFAR10 and SVHN datasets are fully supported, with specific augmentations for CIFAR10 drawn from related literature and mean/std normalization for SVHN, and multistep learning rate scheduling for both cases. Training is executed through JSON configuration files, which you can modify or extend to support other configurations of WRNs and/or extend datasets etc.
|
||||
|
||||
### Example Runs
|
||||
|
||||
Train a WideResNet-16-1 on CIFAR10:
|
||||
```
|
||||
python train.py --config configs/WRN-16-1-scratch-CIFAR10.json
|
||||
```
|
||||
|
||||
Train a WideResNet-40-2 on SVHN:
|
||||
```
|
||||
python train.py --config configs/WRN-40-2-scratch-SVHN.json
|
||||
```
|
||||
|
||||
## Results
|
||||
|
||||
This work has been tested with 4 variants of WRNs. When setting the seed generator equal to 0, you should expect a test-set accuracy performance close to the following values:
|
||||
|
||||
|Model | CIFAR10 | SVHN |
|
||||
|:---------|:--------|:-------|
|
||||
| WRN-16-1 |90.97% | 95.52% |
|
||||
| WRN-16-2 |94.21% | 96.17% |
|
||||
| WRN-40-1 |93.52% | 96.07% |
|
||||
| WRN-40-2 |95.14% | 96.14% |
|
||||
|
||||
## Notes
|
||||
|
||||
The motivation for originally implementing WRNs in PyTorch was [this](https://github.com/AlexandrosFerles/NIPS_2019_Reproducibilty_Challenge_Zero-shot_Knowledge_Transfer_via_Adversarial_Belief_Matching) NeurIPS reproducibility project, where WRNs were used as the main framework for few-shot and zero-shot knowledge transfer.
|
|
@ -1,143 +0,0 @@
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
from torchsummary import summary
|
||||
import math
|
||||
|
||||
|
||||
class IndividualBlock1(nn.Module):
|
||||
def __init__(self, input_features, output_features, stride, subsample_input=True, increase_filters=True):
|
||||
super(IndividualBlock1, self).__init__()
|
||||
|
||||
self.activation = nn.ReLU(inplace=True)
|
||||
|
||||
self.batch_norm1 = nn.BatchNorm2d(input_features)
|
||||
self.batch_norm2 = nn.BatchNorm2d(output_features)
|
||||
|
||||
self.conv1 = nn.Conv2d(input_features, output_features, kernel_size=3, stride=stride, padding=1, bias=False)
|
||||
self.conv2 = nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1, bias=False)
|
||||
|
||||
self.subsample_input = subsample_input
|
||||
self.increase_filters = increase_filters
|
||||
if subsample_input:
|
||||
self.conv_inp = nn.Conv2d(input_features, output_features, kernel_size=1, stride=2, padding=0, bias=False)
|
||||
elif increase_filters:
|
||||
self.conv_inp = nn.Conv2d(input_features, output_features, kernel_size=1, stride=1, padding=0, bias=False)
|
||||
|
||||
def forward(self, x):
|
||||
|
||||
if self.subsample_input or self.increase_filters:
|
||||
x = self.batch_norm1(x)
|
||||
x = self.activation(x)
|
||||
x1 = self.conv1(x)
|
||||
else:
|
||||
x1 = self.batch_norm1(x)
|
||||
x1 = self.activation(x1)
|
||||
x1 = self.conv1(x1)
|
||||
x1 = self.batch_norm2(x1)
|
||||
x1 = self.activation(x1)
|
||||
x1 = self.conv2(x1)
|
||||
|
||||
if self.subsample_input or self.increase_filters:
|
||||
return self.conv_inp(x) + x1
|
||||
else:
|
||||
return x + x1
|
||||
|
||||
|
||||
class IndividualBlockN(nn.Module):
|
||||
def __init__(self, input_features, output_features, stride):
|
||||
super(IndividualBlockN, self).__init__()
|
||||
|
||||
self.activation = nn.ReLU(inplace=True)
|
||||
|
||||
self.batch_norm1 = nn.BatchNorm2d(input_features)
|
||||
self.batch_norm2 = nn.BatchNorm2d(output_features)
|
||||
|
||||
self.conv1 = nn.Conv2d(input_features, output_features, kernel_size=3, stride=stride, padding=1, bias=False)
|
||||
self.conv2 = nn.Conv2d(output_features, output_features, kernel_size=3, stride=stride, padding=1, bias=False)
|
||||
|
||||
def forward(self, x):
|
||||
x1 = self.batch_norm1(x)
|
||||
x1 = self.activation(x1)
|
||||
x1 = self.conv1(x1)
|
||||
x1 = self.batch_norm2(x1)
|
||||
x1 = self.activation(x1)
|
||||
x1 = self.conv2(x1)
|
||||
|
||||
return x1 + x
|
||||
|
||||
|
||||
class Nblock(nn.Module):
|
||||
|
||||
def __init__(self, N, input_features, output_features, stride, subsample_input=True, increase_filters=True):
|
||||
super(Nblock, self).__init__()
|
||||
|
||||
layers = []
|
||||
for i in range(N):
|
||||
if i == 0:
|
||||
layers.append(IndividualBlock1(input_features, output_features, stride, subsample_input, increase_filters))
|
||||
else:
|
||||
layers.append(IndividualBlockN(output_features, output_features, stride=1))
|
||||
|
||||
self.nblockLayer = nn.Sequential(*layers)
|
||||
|
||||
def forward(self, x):
|
||||
return self.nblockLayer(x)
|
||||
|
||||
|
||||
class WideResNet(nn.Module):
|
||||
|
||||
def __init__(self, d, k, n_classes, input_features, output_features, strides):
|
||||
super(WideResNet, self).__init__()
|
||||
|
||||
self.conv1 = nn.Conv2d(input_features, output_features, kernel_size=3, stride=strides[0], padding=1, bias=False)
|
||||
|
||||
filters = [16 * k, 32 * k, 64 * k]
|
||||
self.out_filters = filters[-1]
|
||||
N = (d - 4) // 6
|
||||
increase_filters = k > 1
|
||||
self.block1 = Nblock(N, input_features=output_features, output_features=filters[0], stride=strides[1], subsample_input=False, increase_filters=increase_filters)
|
||||
self.block2 = Nblock(N, input_features=filters[0], output_features=filters[1], stride=strides[2])
|
||||
self.block3 = Nblock(N, input_features=filters[1], output_features=filters[2], stride=strides[3])
|
||||
|
||||
self.batch_norm = nn.BatchNorm2d(filters[-1])
|
||||
self.activation = nn.ReLU(inplace=True)
|
||||
self.avg_pool = nn.AvgPool2d(kernel_size=8)
|
||||
self.fc = nn.Linear(filters[-1], n_classes)
|
||||
|
||||
for m in self.modules():
|
||||
if isinstance(m, nn.Conv2d):
|
||||
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
|
||||
m.weight.data.normal_(0, math.sqrt(2. / n))
|
||||
elif isinstance(m, nn.BatchNorm2d):
|
||||
m.weight.data.fill_(1)
|
||||
m.bias.data.zero_()
|
||||
elif isinstance(m, nn.Linear):
|
||||
m.bias.data.zero_()
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv1(x)
|
||||
attention1 = self.block1(x)
|
||||
attention2 = self.block2(attention1)
|
||||
attention3 = self.block3(attention2)
|
||||
out = self.batch_norm(attention3)
|
||||
out = self.activation(out)
|
||||
out = self.avg_pool(out)
|
||||
out = out.view(-1, self.out_filters)
|
||||
|
||||
return self.fc(out), attention1, attention2, attention3
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# change d and k if you want to check a model other than WRN-40-2
|
||||
d = 40
|
||||
k = 2
|
||||
strides = [1, 1, 2, 2]
|
||||
net = WideResNet(d=d, k=k, n_classes=10, input_features=3, output_features=16, strides=strides)
|
||||
|
||||
# verify that an output is produced
|
||||
sample_input = torch.ones(size=(1, 3, 32, 32), requires_grad=False)
|
||||
net(sample_input)
|
||||
|
||||
# Summarize model
|
||||
summary(net, input_size=(3, 32, 32))
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"training":{
|
||||
"dataset": "CIFAR10",
|
||||
"wrn_depth": 16,
|
||||
"wrn_width": 1,
|
||||
"seeds": "0",
|
||||
"checkpoint": "True",
|
||||
"log": "True"
|
||||
}
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"training":{
|
||||
"dataset": "SVHN",
|
||||
"wrn_depth": 16,
|
||||
"wrn_width": 1,
|
||||
"seeds": "0",
|
||||
"checkpoint": "True",
|
||||
"log": "True"
|
||||
}
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"training":{
|
||||
"dataset": "CIFAR10",
|
||||
"wrn_depth": 16,
|
||||
"wrn_width": 2,
|
||||
"seeds": "0",
|
||||
"checkpoint": "True",
|
||||
"log": "True"
|
||||
}
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"training":{
|
||||
"dataset": "SVHN",
|
||||
"wrn_depth": 16,
|
||||
"wrn_width": 2,
|
||||
"seeds": "0",
|
||||
"checkpoint": "True",
|
||||
"log": "True"
|
||||
}
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"training":{
|
||||
"dataset": "CIFAR10",
|
||||
"wrn_depth": 40,
|
||||
"wrn_width": 1,
|
||||
"seeds": "0",
|
||||
"checkpoint": "True",
|
||||
"log": "True"
|
||||
}
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"training":{
|
||||
"dataset": "SVHN",
|
||||
"wrn_depth": 40,
|
||||
"wrn_width": 1,
|
||||
"seeds": "0",
|
||||
"checkpoint": "True",
|
||||
"log": "True"
|
||||
}
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"training":{
|
||||
"dataset": "CIFAR10",
|
||||
"wrn_depth": 40,
|
||||
"wrn_width": 2,
|
||||
"seeds": "012",
|
||||
"checkpoint": "True",
|
||||
"log": "True"
|
||||
}
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"training":{
|
||||
"dataset": "SVHN",
|
||||
"wrn_depth": 40,
|
||||
"wrn_width": 2,
|
||||
"seeds": "012",
|
||||
"checkpoint": "True",
|
||||
"log": "True"
|
||||
}
|
||||
}
|
|
@ -1,168 +0,0 @@
|
|||
from datetime import datetime
|
||||
import time
|
||||
import argparse
|
||||
from utils import json_file_to_pyobj, get_loaders
|
||||
from WideResNet import WideResNet
|
||||
from opacus.validators import ModuleValidator
|
||||
import os
|
||||
from pathlib import Path
|
||||
from torch.optim.lr_scheduler import MultiStepLR
|
||||
from torchvision.datasets import CIFAR10
|
||||
from torch.utils.data import DataLoader
|
||||
import os
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torchvision import models, transforms
|
||||
import student_model
|
||||
import torch.optim as optim
|
||||
import torch.nn.functional as F
|
||||
import opacus
|
||||
import warnings
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def train_knowledge_distillation(teacher, student, train_dl, epochs, learning_rate, T, soft_target_loss_weight, ce_loss_weight, device):
|
||||
# Dataset
|
||||
ce_loss = nn.CrossEntropyLoss()
|
||||
optimizer = optim.Adam(student.parameters(), lr=learning_rate)
|
||||
|
||||
teacher.eval() # Teacher set to evaluation mode
|
||||
student.train() # Student to train mode
|
||||
|
||||
for epoch in range(epochs):
|
||||
running_loss = 0.0
|
||||
for inputs, labels in train_dl:
|
||||
inputs, labels = inputs.to(device), labels.to(device)
|
||||
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Forward pass with the teacher model - do not save gradients here as we do not change the teacher's weights
|
||||
with torch.no_grad():
|
||||
teacher_logits, _, _, _ = teacher(inputs)
|
||||
|
||||
# Forward pass with the student model
|
||||
student_logits = student(inputs)
|
||||
#Soften the student logits by applying softmax first and log() second
|
||||
soft_targets = nn.functional.softmax(teacher_logits / T, dim=-1)
|
||||
soft_prob = nn.functional.log_softmax(student_logits / T, dim=-1)
|
||||
|
||||
# Calculate the soft targets loss. Scaled by T**2 as suggested by the authors of the paper "Distilling the knowledge in a neural network"
|
||||
soft_targets_loss = torch.sum(soft_targets * (soft_targets.log() - soft_prob)) / soft_prob.size()[0] * (T**2)
|
||||
|
||||
# Calculate the true label loss
|
||||
label_loss = ce_loss(student_logits, labels)
|
||||
|
||||
# Weighted sum of the two losses
|
||||
loss = soft_target_loss_weight * soft_targets_loss + ce_loss_weight * label_loss
|
||||
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
running_loss += loss.item()
|
||||
|
||||
print(f"Epoch {epoch+1}/{epochs}, Loss: {running_loss / len(train_dl)}")
|
||||
|
||||
@torch.no_grad()
|
||||
def test(model, device, test_dl, is_teacher=False):
|
||||
model.to(device)
|
||||
model.eval()
|
||||
|
||||
correct = 0
|
||||
total = 0
|
||||
|
||||
for inputs, labels in test_dl:
|
||||
inputs, labels = inputs.to(device), labels.to(device)
|
||||
if is_teacher:
|
||||
outputs, _, _, _ = model(inputs)
|
||||
else:
|
||||
outputs = model(inputs)
|
||||
_, predicted = torch.max(outputs.data, 1)
|
||||
|
||||
total += labels.size(0)
|
||||
correct += (predicted == labels).sum().item()
|
||||
|
||||
accuracy = 100 * correct / total
|
||||
return accuracy
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Student trainer')
|
||||
parser.add_argument('--teacher', type=Path, help='path to saved teacher .pt', required=True)
|
||||
parser.add_argument('--norm', type=float, help='dpsgd norm clip factor', required=True)
|
||||
parser.add_argument('--cuda', type=int, help='gpu index', required=False)
|
||||
parser.add_argument('--epsilon', type=float, help='dp epsilon', required=False, default=None)
|
||||
parser.add_argument('--epochs', type=int, help='student epochs', required=True)
|
||||
args = parser.parse_args()
|
||||
|
||||
json_options = json_file_to_pyobj("wresnet16-audit-cifar10.json")
|
||||
training_configurations = json_options.training
|
||||
|
||||
wrn_depth = training_configurations.wrn_depth
|
||||
wrn_width = training_configurations.wrn_width
|
||||
dataset = training_configurations.dataset.lower()
|
||||
|
||||
if args.cuda is not None:
|
||||
device = torch.device(f'cuda:{args.cuda}')
|
||||
elif torch.cuda.is_available():
|
||||
device = torch.device('cuda:0')
|
||||
else:
|
||||
device = torch.device('cpu')
|
||||
epochs=10
|
||||
|
||||
print("Load the teacher model")
|
||||
# instantiate teacher model
|
||||
strides = [1, 1, 2, 2]
|
||||
teacher = WideResNet(d=wrn_depth, k=wrn_width, n_classes=10, input_features=3, output_features=16, strides=strides)
|
||||
teacher = ModuleValidator.fix(teacher)
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
optimizer = optim.SGD(teacher.parameters(), lr=0.1, momentum=0.9, nesterov=True, weight_decay=5e-4)
|
||||
scheduler = MultiStepLR(optimizer, milestones=[int(elem*epochs) for elem in [0.3, 0.6, 0.8]], gamma=0.2)
|
||||
train_loader, test_loader = get_loaders(dataset, training_configurations.batch_size)
|
||||
best_test_set_accuracy = 0
|
||||
|
||||
if args.epsilon is not None:
|
||||
dp_epsilon = args.epsilon
|
||||
dp_delta = 1e-5
|
||||
norm = args.norm
|
||||
privacy_engine = opacus.PrivacyEngine()
|
||||
teacher, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
|
||||
module=teacher,
|
||||
optimizer=optimizer,
|
||||
data_loader=train_loader,
|
||||
epochs=epochs,
|
||||
target_epsilon=dp_epsilon,
|
||||
target_delta=dp_delta,
|
||||
max_grad_norm=norm,
|
||||
)
|
||||
|
||||
teacher.load_state_dict(torch.load(args.teacher, weights_only=True))
|
||||
teacher.to(device)
|
||||
teacher.eval()
|
||||
#instantiate istudent
|
||||
student = student_model.Model(num_classes=10).to(device)
|
||||
|
||||
print("Training student")
|
||||
train_knowledge_distillation(
|
||||
teacher=teacher,
|
||||
student=student,
|
||||
train_dl=train_loader,
|
||||
epochs=args.epochs,
|
||||
learning_rate=0.001,
|
||||
T=2,
|
||||
soft_target_loss_weight=0.25,
|
||||
ce_loss_weight=0.75,
|
||||
device=device
|
||||
)
|
||||
print(f"Saving student model for time {int(time.time())}")
|
||||
Path('students').mkdir(exist_ok=True)
|
||||
torch.save(student.state_dict(), f"students/studentmodel-{int(time.time())}.pt")
|
||||
|
||||
print("Testing student and teacher")
|
||||
test_student = test(student, device, test_loader)
|
||||
test_teacher = test(teacher, device, test_loader, True)
|
||||
print(f"Teacher accuracy: {test_teacher:.2f}%")
|
||||
print(f"Student accuracy: {test_student:.2f}%")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
main()
|
|
@ -1,29 +0,0 @@
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
# Create a similar student class where we return a tuple. We do not apply pooling after flattening.
|
||||
class ModifiedLightNNCosine(nn.Module):
|
||||
def __init__(self, num_classes=10):
|
||||
super(ModifiedLightNNCosine, self).__init__()
|
||||
self.features = nn.Sequential(
|
||||
nn.Conv2d(3, 16, kernel_size=3, padding=1),
|
||||
nn.ReLU(),
|
||||
nn.MaxPool2d(kernel_size=2, stride=2),
|
||||
nn.Conv2d(16, 16, kernel_size=3, padding=1),
|
||||
nn.ReLU(),
|
||||
nn.MaxPool2d(kernel_size=2, stride=2),
|
||||
)
|
||||
self.classifier = nn.Sequential(
|
||||
nn.Linear(1024, 256),
|
||||
nn.ReLU(),
|
||||
nn.Dropout(0.1),
|
||||
nn.Linear(256, num_classes)
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.features(x)
|
||||
flattened_conv_output = torch.flatten(x, 1)
|
||||
x = self.classifier(flattened_conv_output)
|
||||
return x
|
||||
|
||||
Model = ModifiedLightNNCosine
|
|
@ -1,199 +0,0 @@
|
|||
import os
|
||||
import time
|
||||
import torch
|
||||
from torch import optim
|
||||
from torch.optim.lr_scheduler import MultiStepLR
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
import random
|
||||
from utils import json_file_to_pyobj, get_loaders
|
||||
from WideResNet import WideResNet
|
||||
from tqdm import tqdm
|
||||
import opacus
|
||||
from opacus.validators import ModuleValidator
|
||||
from opacus.utils.batch_memory_manager import BatchMemoryManager
|
||||
import warnings
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
|
||||
def set_seed(seed=42):
|
||||
torch.backends.cudnn.deterministic = True
|
||||
random.seed(seed)
|
||||
np.random.seed(seed)
|
||||
torch.manual_seed(seed)
|
||||
torch.cuda.manual_seed(seed)
|
||||
|
||||
|
||||
def train_no_cap(net, epochs, data_loader, device, optimizer, criterion, scheduler, test_loader, log, logfile, checkpointFile):
|
||||
best_test_set_accuracy = 0
|
||||
|
||||
for epoch in range(epochs):
|
||||
net.train()
|
||||
#for i, data in tqdm(enumerate(train_loader, 0), leave=False):
|
||||
for i, data in enumerate(data_loader, 0):
|
||||
inputs, labels = data
|
||||
inputs = inputs.to(device)
|
||||
labels = labels.to(device)
|
||||
|
||||
optimizer.zero_grad()
|
||||
|
||||
wrn_outputs = net(inputs)
|
||||
outputs = wrn_outputs[0]
|
||||
loss = criterion(outputs, labels)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
scheduler.step()
|
||||
|
||||
if epoch % 10 == 0 or epoch == epochs - 1:
|
||||
with torch.no_grad():
|
||||
|
||||
correct = 0
|
||||
total = 0
|
||||
|
||||
net.eval()
|
||||
for data in test_loader:
|
||||
images, labels = data
|
||||
images = images.to(device)
|
||||
labels = labels.to(device)
|
||||
|
||||
wrn_outputs = net(images)
|
||||
outputs = wrn_outputs[0]
|
||||
_, predicted = torch.max(outputs.data, 1)
|
||||
total += labels.size(0)
|
||||
correct += (predicted == labels).sum().item()
|
||||
|
||||
epoch_accuracy = correct / total
|
||||
epoch_accuracy = round(100 * epoch_accuracy, 2)
|
||||
|
||||
if log:
|
||||
print('Accuracy at epoch {} is {}%'.format(epoch + 1, epoch_accuracy))
|
||||
with open(logfile, 'a') as temp:
|
||||
temp.write('Accuracy at epoch {} is {}%\n'.format(epoch + 1, epoch_accuracy))
|
||||
|
||||
if epoch_accuracy > best_test_set_accuracy:
|
||||
best_test_set_accuracy = epoch_accuracy
|
||||
torch.save(net.state_dict(), checkpointFile)
|
||||
|
||||
return best_test_set_accuracy
|
||||
|
||||
|
||||
def _train_seed(net, loaders, device, dataset, log=False, logfile='', epochs=200, norm=1.0, dp_epsilon=None):
|
||||
train_loader, test_loader = loaders
|
||||
|
||||
dp_delta = 1e-5
|
||||
checkpointFile = 'wrn-{}-{}e-{}d-{}n-dict.pt'.format(int(time.time()), dp_epsilon, dp_delta, norm)
|
||||
|
||||
#net = ModuleValidator.fix(net, replace_bn_with_in=True)
|
||||
net = ModuleValidator.fix(net)
|
||||
ModuleValidator.validate(net, strict=True)
|
||||
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9, nesterov=True, weight_decay=5e-4)
|
||||
scheduler = MultiStepLR(optimizer, milestones=[int(elem*epochs) for elem in [0.3, 0.6, 0.8]], gamma=0.2)
|
||||
|
||||
if dp_epsilon is not None:
|
||||
privacy_engine = opacus.PrivacyEngine()
|
||||
net, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
|
||||
module=net,
|
||||
optimizer=optimizer,
|
||||
data_loader=train_loader,
|
||||
epochs=epochs,
|
||||
target_epsilon=dp_epsilon,
|
||||
target_delta=dp_delta,
|
||||
max_grad_norm=norm,
|
||||
)
|
||||
|
||||
print(f"DP epsilon = {dp_epsilon}, delta = {dp_delta}")
|
||||
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {norm}")
|
||||
else:
|
||||
print("Training without differential privacy")
|
||||
|
||||
print(f"Training with {epochs} epochs")
|
||||
|
||||
if dp_epsilon is not None:
|
||||
with BatchMemoryManager(
|
||||
data_loader=train_loader,
|
||||
max_physical_batch_size=1000, # Roughly 12gb vram, uses 9.4
|
||||
optimizer=optimizer
|
||||
) as memory_safe_data_loader:
|
||||
best_test_set_accuracy = train_no_cap(net, epochs, memory_safe_data_loader, device, optimizer, criterion, scheduler, test_loader, log, logfile, checkpointFile)
|
||||
else:
|
||||
best_test_set_accuracy = train_no_cap(net, epochs, train_loader, device, optimizer, criterion, scheduler, test_loader, log, logfile, checkpointFile)
|
||||
|
||||
return best_test_set_accuracy
|
||||
|
||||
|
||||
def train(args):
|
||||
json_options = json_file_to_pyobj(args.config)
|
||||
training_configurations = json_options.training
|
||||
|
||||
wrn_depth = training_configurations.wrn_depth
|
||||
wrn_width = training_configurations.wrn_width
|
||||
dataset = training_configurations.dataset.lower()
|
||||
#seeds = [int(seed) for seed in training_configurations.seeds]
|
||||
seeds = [int.from_bytes(os.urandom(4), byteorder='big')]
|
||||
log = True if training_configurations.log.lower() == 'true' else False
|
||||
|
||||
if log:
|
||||
logfile = 'WideResNet-{}-{}-{}-{}-{}.txt'.format(wrn_depth, wrn_width, training_configurations.dataset, training_configurations.batch_size, training_configurations.epochs)
|
||||
with open(logfile, 'w') as temp:
|
||||
temp.write('WideResNet-{}-{} on {} {}batch for {} epochs\n'.format(wrn_depth, wrn_width, training_configurations.dataset, training_configurations.batch_size, training_configurations.epochs))
|
||||
else:
|
||||
logfile = ''
|
||||
|
||||
checkpoint = True if training_configurations.checkpoint.lower() == 'true' else False
|
||||
loaders = get_loaders(dataset, training_configurations.batch_size)
|
||||
|
||||
if torch.cuda.is_available() and args.cuda:
|
||||
device = torch.device(f'cuda:{args.cuda}')
|
||||
elif torch.cuda.is_available():
|
||||
device = torch.device('cuda:0')
|
||||
else:
|
||||
device = torch.device('cpu')
|
||||
|
||||
test_set_accuracies = []
|
||||
|
||||
for seed in seeds:
|
||||
set_seed(seed)
|
||||
|
||||
if log:
|
||||
with open(logfile, 'a') as temp:
|
||||
temp.write('------------------- SEED {} -------------------\n'.format(seed))
|
||||
|
||||
strides = [1, 1, 2, 2]
|
||||
net = WideResNet(d=wrn_depth, k=wrn_width, n_classes=10, input_features=3, output_features=16, strides=strides)
|
||||
net = net.to(device)
|
||||
|
||||
epochs = training_configurations.epochs
|
||||
best_test_set_accuracy = _train_seed(net, loaders, device, dataset, log, logfile, epochs, args.norm, args.epsilon)
|
||||
|
||||
if log:
|
||||
with open(logfile, 'a') as temp:
|
||||
temp.write('Best test set accuracy of seed {} is {}\n'.format(seed, best_test_set_accuracy))
|
||||
|
||||
test_set_accuracies.append(best_test_set_accuracy)
|
||||
|
||||
mean_test_set_accuracy, std_test_set_accuracy = np.mean(test_set_accuracies), np.std(test_set_accuracies)
|
||||
|
||||
if log:
|
||||
with open(logfile, 'a') as temp:
|
||||
temp.write('Mean test set accuracy is {} with standard deviation equal to {}\n'.format(mean_test_set_accuracy, std_test_set_accuracy))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import argparse
|
||||
|
||||
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1, 2, 3"
|
||||
|
||||
parser = argparse.ArgumentParser(description='WideResNet')
|
||||
|
||||
parser.add_argument('-config', '--config', help='Training Configurations', required=True)
|
||||
parser.add_argument('--norm', type=float, help='dpsgd norm clip factor', required=True)
|
||||
parser.add_argument('--cuda', type=int, help='gpu index', required=False)
|
||||
parser.add_argument('--epsilon', type=float, help='dp epsilon', required=False, default=None)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
train(args)
|
|
@ -1,61 +0,0 @@
|
|||
import json
|
||||
import collections
|
||||
import torchvision
|
||||
from torchvision import transforms
|
||||
from torch.utils.data import DataLoader
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
# Borrowed from https://github.com/ozan-oktay/Attention-Gated-Networks
|
||||
def json_file_to_pyobj(filename):
|
||||
def _json_object_hook(d): return collections.namedtuple('X', d.keys())(*d.values())
|
||||
|
||||
def json2obj(data): return json.loads(data, object_hook=_json_object_hook)
|
||||
|
||||
return json2obj(open(filename).read())
|
||||
|
||||
|
||||
def get_loaders(dataset, train_batch_size=128, test_batch_size=10):
|
||||
print(f"Train batch size: {train_batch_size}")
|
||||
|
||||
if dataset == 'cifar10':
|
||||
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
|
||||
|
||||
train_transform = transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
|
||||
(4, 4, 4, 4), mode='reflect').squeeze()),
|
||||
transforms.ToPILImage(),
|
||||
transforms.RandomCrop(32),
|
||||
transforms.RandomHorizontalFlip(),
|
||||
transforms.ToTensor(),
|
||||
normalize,
|
||||
])
|
||||
|
||||
test_transform = transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
normalize
|
||||
])
|
||||
|
||||
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=train_transform)
|
||||
trainloader = DataLoader(trainset, batch_size=train_batch_size, shuffle=True, num_workers=4)
|
||||
|
||||
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=test_transform)
|
||||
testloader = DataLoader(testset, batch_size=test_batch_size, shuffle=True, num_workers=4)
|
||||
|
||||
elif dataset == 'svhn':
|
||||
normalize = transforms.Normalize((0.4377, 0.4438, 0.4728), (0.1980, 0.2010, 0.1970))
|
||||
|
||||
transform = transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
normalize,
|
||||
])
|
||||
|
||||
trainset = torchvision.datasets.SVHN(root='./data', split='train', download=True, transform=transform)
|
||||
trainloader = DataLoader(trainset, batch_size=train_batch_size, shuffle=True, num_workers=4)
|
||||
|
||||
testset = torchvision.datasets.SVHN(root='./data', split='test', download=True, transform=transform)
|
||||
testloader = DataLoader(testset, batch_size=test_batch_size, shuffle=True, num_workers=4)
|
||||
|
||||
return trainloader, testloader
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
{
|
||||
"training":{
|
||||
"dataset": "CIFAR10",
|
||||
"wrn_depth": 16,
|
||||
"wrn_width": 1,
|
||||
"checkpoint": "True",
|
||||
"log": "True",
|
||||
"batch_size": 4096,
|
||||
"epochs": 200
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue