257 lines
8.2 KiB
Python
257 lines
8.2 KiB
Python
# PyTorch implementation of
|
|
# https://github.com/tensorflow/privacy/blob/master/research/mi_lira_2021/train.py
|
|
#
|
|
# author: Chenxiang Zhang (orientino)
|
|
|
|
import argparse
|
|
import os
|
|
import time
|
|
from pathlib import Path
|
|
|
|
import numpy as np
|
|
import pytorch_lightning as pl
|
|
import torch
|
|
import wandb
|
|
from torch import nn
|
|
from torch.nn import functional as F
|
|
from torch.utils.data import DataLoader
|
|
from torchvision import models, transforms
|
|
from torchvision.datasets import CIFAR10
|
|
from tqdm import tqdm
|
|
from opacus.validators import ModuleValidator
|
|
from opacus import PrivacyEngine
|
|
from opacus.utils.batch_memory_manager import BatchMemoryManager
|
|
import pyvacy
|
|
#from pyvacy import optim#, analysis, sampling
|
|
|
|
from wide_resnet import WideResNet
|
|
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument("--lr", default=0.1, type=float)
|
|
parser.add_argument("--epochs", default=1, type=int)
|
|
parser.add_argument("--n_shadows", default=16, type=int)
|
|
parser.add_argument("--shadow_id", default=1, type=int)
|
|
parser.add_argument("--model", default="resnet18", type=str)
|
|
parser.add_argument("--pkeep", default=0.5, type=float)
|
|
parser.add_argument("--savedir", default="exp/cifar10", type=str)
|
|
parser.add_argument("--debug", action="store_true")
|
|
args = parser.parse_args()
|
|
|
|
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("mps")
|
|
EPOCHS = args.epochs
|
|
|
|
|
|
class DewisNet(nn.Module):
|
|
def __init__(self):
|
|
super(DewisNet, self).__init__()
|
|
# I started my model from the tutorial: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html,
|
|
# then modified it.
|
|
|
|
# 2 convolutional layers, with pooling after each
|
|
self.conv1 = nn.Conv2d(3, 12, 5)
|
|
self.conv2 = nn.Conv2d(12, 32, 5)
|
|
self.pool = nn.MaxPool2d(2, 2)
|
|
|
|
# 3 linear layers
|
|
self.fc1 = nn.Linear(32 * 5 * 5, 120)
|
|
self.fc2 = nn.Linear(120, 84)
|
|
self.fc3 = nn.Linear(84, 10)
|
|
|
|
def forward(self, x):
|
|
x = self.pool(F.relu(self.conv1(x)))
|
|
x = self.pool(F.relu(self.conv2(x)))
|
|
x = torch.flatten(x, 1)
|
|
x = F.relu(self.fc1(x))
|
|
x = F.relu(self.fc2(x))
|
|
x = self.fc3(x)
|
|
return x
|
|
|
|
|
|
class JagielskiNet(nn.Module):
|
|
def __init__(self, input_shape, num_classes, l2=0.01):
|
|
super(JagielskiNet, self).__init__()
|
|
self.flatten = nn.Flatten()
|
|
|
|
input_dim = 1
|
|
for dim in input_shape:
|
|
input_dim *= dim
|
|
|
|
self.dense1 = nn.Linear(input_dim, 32)
|
|
self.relu1 = nn.ReLU()
|
|
self.dense2 = nn.Linear(32, num_classes)
|
|
|
|
# Initialize weights with Glorot Normal (Xavier Normal)
|
|
torch.nn.init.xavier_normal_(self.dense1.weight)
|
|
torch.nn.init.xavier_normal_(self.dense2.weight)
|
|
|
|
# L2 regularization (weight decay)
|
|
self.l2 = l2
|
|
|
|
def forward(self, x):
|
|
x = self.flatten(x)
|
|
x = self.dense1(x)
|
|
x = self.relu1(x)
|
|
x = self.dense2(x)
|
|
return x
|
|
|
|
|
|
def run():
|
|
seed = np.random.randint(0, 1000000000)
|
|
seed ^= int(time.time())
|
|
pl.seed_everything(seed)
|
|
|
|
args.debug = True
|
|
wandb.init(project="lira", mode="disabled" if args.debug else "online")
|
|
wandb.config.update(args)
|
|
|
|
# Dataset
|
|
train_transform = transforms.Compose(
|
|
[
|
|
transforms.RandomHorizontalFlip(),
|
|
transforms.RandomCrop(32, padding=4),
|
|
transforms.ToTensor(),
|
|
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2470, 0.2435, 0.2616]),
|
|
]
|
|
)
|
|
test_transform = transforms.Compose(
|
|
[
|
|
transforms.ToTensor(),
|
|
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2470, 0.2435, 0.2616]),
|
|
]
|
|
)
|
|
datadir = Path().home() / "opt/data/cifar"
|
|
train_ds = CIFAR10(root=datadir, train=True, download=True, transform=train_transform)
|
|
test_ds = CIFAR10(root=datadir, train=False, download=True, transform=test_transform)
|
|
|
|
# Compute the IN / OUT subset:
|
|
# If we run each experiment independently then even after a lot of trials
|
|
# there will still probably be some examples that were always included
|
|
# or always excluded. So instead, with experiment IDs, we guarantee that
|
|
# after `args.n_shadows` are done, each example is seen exactly half
|
|
# of the time in train, and half of the time not in train.
|
|
|
|
size = len(train_ds)
|
|
np.random.seed(seed)
|
|
if args.n_shadows is not None:
|
|
np.random.seed(0)
|
|
keep = np.random.uniform(0, 1, size=(args.n_shadows, size))
|
|
order = keep.argsort(0)
|
|
keep = order < int(args.pkeep * args.n_shadows)
|
|
keep = np.array(keep[args.shadow_id], dtype=bool)
|
|
keep = keep.nonzero()[0]
|
|
else:
|
|
keep = np.random.choice(size, size=int(args.pkeep * size), replace=False)
|
|
keep.sort()
|
|
keep_bool = np.full((size), False)
|
|
keep_bool[keep] = True
|
|
|
|
train_ds = torch.utils.data.Subset(train_ds, keep)
|
|
train_dl = DataLoader(train_ds, batch_size=256, shuffle=True, num_workers=4)
|
|
test_dl = DataLoader(test_ds, batch_size=128, shuffle=False, num_workers=4)
|
|
|
|
# Model
|
|
if args.model == "dewisnet":
|
|
m = DewisNet()
|
|
elif args.model == "jnet":
|
|
m = JagielskiNet((3,32,32), 10)
|
|
elif args.model == "wresnet28-2":
|
|
m = WideResNet(28, 2, 0.0, 10)
|
|
elif args.model == "wresnet28-10":
|
|
m = WideResNet(28, 10, 0.3, 10)
|
|
elif args.model == "resnet18":
|
|
m = models.resnet18(weights=None, num_classes=10)
|
|
m.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
|
|
m.maxpool = nn.Identity()
|
|
else:
|
|
raise NotImplementedError
|
|
m = m.to(DEVICE)
|
|
|
|
m = ModuleValidator.fix(m)
|
|
ModuleValidator.validate(m, strict=True)
|
|
|
|
print(f"Device: {DEVICE}")
|
|
optim = torch.optim.SGD(m.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
|
|
#optim = pyvacy.DPSGD(
|
|
# params=m.parameters(),
|
|
# lr=args.lr,
|
|
# momentum=0.9,
|
|
# weight_decay=5e-4,
|
|
#)
|
|
sched = torch.optim.lr_scheduler.CosineAnnealingLR(optim, T_max=args.epochs)
|
|
|
|
# Train
|
|
if False:
|
|
privacy_engine = PrivacyEngine()
|
|
m, optim, train_dl = privacy_engine.make_private_with_epsilon(
|
|
module=m,
|
|
optimizer=optim,
|
|
data_loader=train_dl,
|
|
epochs=args.epochs,
|
|
target_epsilon=8,
|
|
target_delta=1e-4,
|
|
max_grad_norm=1.0,
|
|
batch_first=True,
|
|
)
|
|
|
|
with BatchMemoryManager(
|
|
data_loader=train_dl,
|
|
max_physical_batch_size=1000,
|
|
optimizer=optim
|
|
) as memory_safe_data_loader:
|
|
for i in tqdm(range(args.epochs)):
|
|
m.train()
|
|
loss_total = 0
|
|
pbar = tqdm(memory_safe_data_loader, leave=False)
|
|
for itr, (x, y) in enumerate(pbar):
|
|
x, y = x.to(DEVICE), y.to(DEVICE)
|
|
loss = F.cross_entropy(m(x), y)
|
|
loss_total += loss
|
|
|
|
pbar.set_postfix_str(f"loss: {loss:.2f}")
|
|
optim.zero_grad()
|
|
loss.backward()
|
|
optim.step()
|
|
sched.step()
|
|
|
|
wandb.log({"loss": loss_total / len(train_dl)})
|
|
else:
|
|
for i in tqdm(range(args.epochs)):
|
|
m.train()
|
|
loss_total = 0
|
|
pbar = tqdm(train_dl, leave=False)
|
|
for itr, (x, y) in enumerate(pbar):
|
|
x, y = x.to(DEVICE), y.to(DEVICE)
|
|
loss = F.cross_entropy(m(x), y)
|
|
loss_total += loss
|
|
|
|
pbar.set_postfix_str(f"loss: {loss:.2f}")
|
|
optim.zero_grad()
|
|
loss.backward()
|
|
optim.step()
|
|
sched.step()
|
|
|
|
wandb.log({"loss": loss_total / len(train_dl)})
|
|
|
|
print(f"[test] acc_test: {get_acc(m, test_dl):.4f}")
|
|
wandb.log({"acc_test": get_acc(m, test_dl)})
|
|
|
|
savedir = os.path.join(args.savedir, str(args.shadow_id))
|
|
os.makedirs(savedir, exist_ok=True)
|
|
np.save(savedir + "/keep.npy", keep_bool)
|
|
torch.save(m.state_dict(), savedir + "/model.pt")
|
|
|
|
|
|
@torch.no_grad()
|
|
def get_acc(model, dl):
|
|
acc = []
|
|
for x, y in dl:
|
|
x, y = x.to(DEVICE), y.to(DEVICE)
|
|
acc.append(torch.argmax(model(x), dim=1) == y)
|
|
acc = torch.cat(acc)
|
|
acc = torch.sum(acc) / len(acc)
|
|
|
|
return acc.item()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
run()
|