Compare commits
3 commits
2586c351d9
...
f407827ac1
Author | SHA1 | Date | |
---|---|---|---|
f407827ac1 | |||
5da8c44743 | |||
7b77748dcd |
3 changed files with 448 additions and 16 deletions
|
@ -15,12 +15,15 @@ from torchvision.datasets import CIFAR10
|
||||||
import pytorch_lightning as pl
|
import pytorch_lightning as pl
|
||||||
import opacus
|
import opacus
|
||||||
import random
|
import random
|
||||||
|
from tqdm import tqdm
|
||||||
from opacus.validators import ModuleValidator
|
from opacus.validators import ModuleValidator
|
||||||
from opacus.utils.batch_memory_manager import BatchMemoryManager
|
from opacus.utils.batch_memory_manager import BatchMemoryManager
|
||||||
from WideResNet import WideResNet
|
from WideResNet import WideResNet
|
||||||
from equations import get_eps_audit
|
from equations import get_eps_audit
|
||||||
import student_model
|
import student_model
|
||||||
import fast_model
|
import fast_model
|
||||||
|
import convnet_classifier
|
||||||
|
import wrn
|
||||||
import warnings
|
import warnings
|
||||||
warnings.filterwarnings("ignore")
|
warnings.filterwarnings("ignore")
|
||||||
|
|
||||||
|
@ -230,8 +233,10 @@ def get_dataloaders_raw(m=1000, train_batch_size=512, test_batch_size=10):
|
||||||
|
|
||||||
train_x = preprocess_data(train_x)
|
train_x = preprocess_data(train_x)
|
||||||
test_x = preprocess_data(test_x)
|
test_x = preprocess_data(test_x)
|
||||||
|
attack_x = preprocess_data(attack_x)
|
||||||
train_y = torch.tensor(train_y)
|
train_y = torch.tensor(train_y)
|
||||||
test_y = torch.tensor(test_y)
|
test_y = torch.tensor(test_y)
|
||||||
|
attack_y = torch.tensor(attack_y)
|
||||||
|
|
||||||
train_dl = DataLoader(
|
train_dl = DataLoader(
|
||||||
TensorDataset(train_x, train_y.long()),
|
TensorDataset(train_x, train_y.long()),
|
||||||
|
@ -246,7 +251,7 @@ def get_dataloaders_raw(m=1000, train_batch_size=512, test_batch_size=10):
|
||||||
shuffle=True,
|
shuffle=True,
|
||||||
num_workers=4
|
num_workers=4
|
||||||
)
|
)
|
||||||
return train_dl, test_dl, train_x
|
return train_dl, test_dl, train_x, attack_x.numpy(), attack_y.numpy(), S
|
||||||
|
|
||||||
def evaluate_on(model, dataloader):
|
def evaluate_on(model, dataloader):
|
||||||
correct = 0
|
correct = 0
|
||||||
|
@ -398,6 +403,70 @@ def load(hp, model_path, train_dl):
|
||||||
return model_init, model, adv_points, adv_labels, S
|
return model_init, model, adv_points, adv_labels, S
|
||||||
|
|
||||||
|
|
||||||
|
def train_wrn2(hp, train_dl, test_dl):
|
||||||
|
model = wrn.WideResNet(16, 10, 4)
|
||||||
|
model = model.to(DEVICE)
|
||||||
|
#model = ModuleValidator.fix(model)
|
||||||
|
ModuleValidator.validate(model, strict=True)
|
||||||
|
model_init = copy.deepcopy(model)
|
||||||
|
|
||||||
|
criterion = nn.CrossEntropyLoss()
|
||||||
|
optimizer = optim.SGD(
|
||||||
|
model.parameters(),
|
||||||
|
lr=0.12,
|
||||||
|
momentum=0.9,
|
||||||
|
weight_decay=1e-4
|
||||||
|
)
|
||||||
|
scheduler = MultiStepLR(
|
||||||
|
optimizer,
|
||||||
|
milestones=[int(i * hp['epochs']) for i in [0.3, 0.6, 0.8]],
|
||||||
|
gamma=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"Training with {hp['epochs']} epochs")
|
||||||
|
|
||||||
|
if hp['epsilon'] is not None:
|
||||||
|
privacy_engine = opacus.PrivacyEngine()
|
||||||
|
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
|
||||||
|
module=model,
|
||||||
|
optimizer=optimizer,
|
||||||
|
data_loader=train_dl,
|
||||||
|
epochs=hp['epochs'],
|
||||||
|
target_epsilon=hp['epsilon'],
|
||||||
|
target_delta=hp['delta'],
|
||||||
|
max_grad_norm=hp['norm'],
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"DP epsilon = {hp['epsilon']}, delta = {hp['delta']}")
|
||||||
|
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {hp['norm']}")
|
||||||
|
|
||||||
|
with BatchMemoryManager(
|
||||||
|
data_loader=train_loader,
|
||||||
|
max_physical_batch_size=10, # 1000 ~= 9.4GB vram
|
||||||
|
optimizer=optimizer
|
||||||
|
) as memory_safe_data_loader:
|
||||||
|
best_test_set_accuracy = train_no_cap(
|
||||||
|
model,
|
||||||
|
hp,
|
||||||
|
memory_safe_data_loader,
|
||||||
|
test_dl,
|
||||||
|
optimizer,
|
||||||
|
criterion,
|
||||||
|
scheduler,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print("Training without differential privacy")
|
||||||
|
best_test_set_accuracy = train_no_cap(
|
||||||
|
model,
|
||||||
|
hp,
|
||||||
|
train_dl,
|
||||||
|
test_dl,
|
||||||
|
optimizer,
|
||||||
|
criterion,
|
||||||
|
scheduler,
|
||||||
|
)
|
||||||
|
|
||||||
|
return model_init, model
|
||||||
|
|
||||||
def train_small(hp, train_dl, test_dl):
|
def train_small(hp, train_dl, test_dl):
|
||||||
model = student_model.Model(num_classes=10).to(DEVICE)
|
model = student_model.Model(num_classes=10).to(DEVICE)
|
||||||
|
@ -460,7 +529,7 @@ def train_small(hp, train_dl, test_dl):
|
||||||
|
|
||||||
return model_init, model
|
return model_init, model
|
||||||
|
|
||||||
def train_fast(hp):
|
def train_fast(hp, train_dl, test_dl, train_x):
|
||||||
epochs = hp['epochs']
|
epochs = hp['epochs']
|
||||||
momentum = 0.9
|
momentum = 0.9
|
||||||
weight_decay = 0.256
|
weight_decay = 0.256
|
||||||
|
@ -472,8 +541,6 @@ def train_fast(hp):
|
||||||
print("=========================")
|
print("=========================")
|
||||||
print("Training a fast model")
|
print("Training a fast model")
|
||||||
print("=========================")
|
print("=========================")
|
||||||
train_dl, test_dl, train_x = get_dataloaders_raw(hp['target_points'])
|
|
||||||
|
|
||||||
weights = fast_model.patch_whitening(train_x[:10000, :, 4:-4, 4:-4])
|
weights = fast_model.patch_whitening(train_x[:10000, :, 4:-4, 4:-4])
|
||||||
model = fast_model.Model(weights, c_in=3, c_out=10, scale_out=0.125)
|
model = fast_model.Model(weights, c_in=3, c_out=10, scale_out=0.125)
|
||||||
|
|
||||||
|
@ -520,6 +587,75 @@ def train_fast(hp):
|
||||||
train_no_cap(model, hp, train_dl, test_dl, optimizer, criterion, scheduler)
|
train_no_cap(model, hp, train_dl, test_dl, optimizer, criterion, scheduler)
|
||||||
return init_model, model
|
return init_model, model
|
||||||
|
|
||||||
|
def train_convnet(hp, train_dl, test_dl):
|
||||||
|
model = convnet_classifier.ConvNet()
|
||||||
|
model = model.to(DEVICE)
|
||||||
|
#model = ModuleValidator.fix(model)
|
||||||
|
ModuleValidator.validate(model, strict=True)
|
||||||
|
model_init = copy.deepcopy(model)
|
||||||
|
|
||||||
|
criterion = nn.CrossEntropyLoss()
|
||||||
|
optimizer = optim.Adam(model.parameters(), lr=1e-3)
|
||||||
|
|
||||||
|
#if hp['epochs'] <= 10:
|
||||||
|
# optimizer = optim.Adam(model.parameters(), lr=lr)
|
||||||
|
#elif hp['epochs'] > 10 and hp['epochs'] <= 25:
|
||||||
|
# optimizer = optim.Adam(model.parameters(), lr=(lr/10))
|
||||||
|
#else:
|
||||||
|
# optimizer = optim.Adam(model.parameters(), lr=(lr/50))
|
||||||
|
scheduler = MultiStepLR(optimizer, milestones=[10, 25], gamma=0.1)
|
||||||
|
|
||||||
|
# scheduler = MultiStepLR(
|
||||||
|
# optimizer,
|
||||||
|
# milestones=[int(i * hp['epochs']) for i in [0.3, 0.6, 0.8]],
|
||||||
|
# gamma=0.2
|
||||||
|
# )
|
||||||
|
|
||||||
|
print(f"Training with {hp['epochs']} epochs")
|
||||||
|
|
||||||
|
if hp['epsilon'] is not None:
|
||||||
|
privacy_engine = opacus.PrivacyEngine()
|
||||||
|
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
|
||||||
|
module=model,
|
||||||
|
optimizer=optimizer,
|
||||||
|
data_loader=train_dl,
|
||||||
|
epochs=hp['epochs'],
|
||||||
|
target_epsilon=hp['epsilon'],
|
||||||
|
target_delta=hp['delta'],
|
||||||
|
max_grad_norm=hp['norm'],
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"DP epsilon = {hp['epsilon']}, delta = {hp['delta']}")
|
||||||
|
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {hp['norm']}")
|
||||||
|
|
||||||
|
with BatchMemoryManager(
|
||||||
|
data_loader=train_loader,
|
||||||
|
max_physical_batch_size=2000, # 1000 ~= 9.4GB vram
|
||||||
|
optimizer=optimizer
|
||||||
|
) as memory_safe_data_loader:
|
||||||
|
best_test_set_accuracy = train_no_cap(
|
||||||
|
model,
|
||||||
|
hp,
|
||||||
|
memory_safe_data_loader,
|
||||||
|
test_dl,
|
||||||
|
optimizer,
|
||||||
|
criterion,
|
||||||
|
scheduler,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print("Training without differential privacy")
|
||||||
|
best_test_set_accuracy = train_no_cap(
|
||||||
|
model,
|
||||||
|
hp,
|
||||||
|
train_dl,
|
||||||
|
test_dl,
|
||||||
|
optimizer,
|
||||||
|
criterion,
|
||||||
|
scheduler,
|
||||||
|
)
|
||||||
|
|
||||||
|
return model_init, model
|
||||||
|
|
||||||
def train(hp, train_dl, test_dl):
|
def train(hp, train_dl, test_dl):
|
||||||
model = WideResNet(
|
model = WideResNet(
|
||||||
d=hp["wrn_depth"],
|
d=hp["wrn_depth"],
|
||||||
|
@ -604,12 +740,13 @@ def main():
|
||||||
parser.add_argument('--cuda', type=int, help='gpu index', required=False)
|
parser.add_argument('--cuda', type=int, help='gpu index', required=False)
|
||||||
parser.add_argument('--epsilon', type=float, help='dp epsilon', required=False, default=None)
|
parser.add_argument('--epsilon', type=float, help='dp epsilon', required=False, default=None)
|
||||||
parser.add_argument('--m', type=int, help='number of target points', required=True)
|
parser.add_argument('--m', type=int, help='number of target points', required=True)
|
||||||
parser.add_argument('--k', type=int, help='number of symmetric guesses', required=True)
|
|
||||||
parser.add_argument('--epochs', type=int, help='number of epochs', required=True)
|
parser.add_argument('--epochs', type=int, help='number of epochs', required=True)
|
||||||
parser.add_argument('--load', type=Path, help='number of epochs', required=False)
|
parser.add_argument('--load', type=Path, help='number of epochs', required=False)
|
||||||
parser.add_argument('--studentraw', action='store_true', help='train a raw student', required=False)
|
parser.add_argument('--studentraw', action='store_true', help='train a raw student', required=False)
|
||||||
parser.add_argument('--distill', action='store_true', help='train a raw student', required=False)
|
parser.add_argument('--distill', action='store_true', help='train a raw student', required=False)
|
||||||
parser.add_argument('--fast', action='store_true', help='train a the fast model', required=False)
|
parser.add_argument('--fast', action='store_true', help='train the fast model', required=False)
|
||||||
|
parser.add_argument('--wrn2', action='store_true', help='Train a groupnormed wrn', required=False)
|
||||||
|
parser.add_argument('--convnet', action='store_true', help='Train a convnet', required=False)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
if torch.cuda.is_available() and args.cuda:
|
if torch.cuda.is_available() and args.cuda:
|
||||||
|
@ -629,10 +766,8 @@ def main():
|
||||||
"epsilon": args.epsilon,
|
"epsilon": args.epsilon,
|
||||||
"delta": 1e-5,
|
"delta": 1e-5,
|
||||||
"norm": args.norm,
|
"norm": args.norm,
|
||||||
"batch_size": 4096,
|
"batch_size": 50 if args.convnet else 4096,
|
||||||
"epochs": args.epochs,
|
"epochs": args.epochs,
|
||||||
"k+": args.k,
|
|
||||||
"k-": args.k,
|
|
||||||
"p_value": 0.05,
|
"p_value": 0.05,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -652,12 +787,21 @@ def main():
|
||||||
model_init, model_trained, adv_points, adv_labels, S = load(hp, args.load, train_dl)
|
model_init, model_trained, adv_points, adv_labels, S = load(hp, args.load, train_dl)
|
||||||
test_dl = None
|
test_dl = None
|
||||||
elif args.fast:
|
elif args.fast:
|
||||||
train_dl, test_dl, _ = get_dataloaders_raw(hp['target_points'])
|
train_dl, test_dl, train_x, adv_points, adv_labels, S = get_dataloaders_raw(hp['target_points'])
|
||||||
model_init, model_trained = train_fast(hp)
|
model_init, model_trained = train_fast(hp, train_dl, test_dl, train_x)
|
||||||
exit(1)
|
|
||||||
else:
|
else:
|
||||||
train_dl, test_dl, pure_train_dl, adv_points, adv_labels, S = get_dataloaders3(hp['target_points'], hp['batch_size'])
|
train_dl, test_dl, pure_train_dl, adv_points, adv_labels, S = get_dataloaders3(hp['target_points'], hp['batch_size'])
|
||||||
if args.studentraw:
|
if args.wrn2:
|
||||||
|
print("=========================")
|
||||||
|
print("Training wrn2 model from meta")
|
||||||
|
print("=========================")
|
||||||
|
model_init, model_trained = train_wrn2(hp, train_dl, test_dl)
|
||||||
|
elif args.convnet:
|
||||||
|
print("=========================")
|
||||||
|
print("Training a simple convnet")
|
||||||
|
print("=========================")
|
||||||
|
model_init, model_trained = train_convnet(hp, train_dl, test_dl)
|
||||||
|
elif args.studentraw:
|
||||||
print("=========================")
|
print("=========================")
|
||||||
print("Training a raw student model")
|
print("Training a raw student model")
|
||||||
print("=========================")
|
print("=========================")
|
||||||
|
@ -711,13 +855,18 @@ def main():
|
||||||
|
|
||||||
scores.append(((init_loss - trained_loss).item(), is_in))
|
scores.append(((init_loss - trained_loss).item(), is_in))
|
||||||
|
|
||||||
|
print(f"Top 10 unsorted scores: {scores[:10]}")
|
||||||
|
print(f"Btm 10 unsorted scores: {scores[-10:]}")
|
||||||
scores = sorted(scores, key=lambda x: x[0])
|
scores = sorted(scores, key=lambda x: x[0])
|
||||||
|
print(f"Top 10 sorted scores: {scores[:10]}")
|
||||||
|
print(f"Btm 10 sorted scores: {scores[-10:]}")
|
||||||
scores = np.array([x[1] for x in scores])
|
scores = np.array([x[1] for x in scores])
|
||||||
|
|
||||||
print(scores[:10])
|
|
||||||
|
|
||||||
audits = (0, 0, 0, 0)
|
audits = (0, 0, 0, 0)
|
||||||
for k in [10, 20, 50, 100, 200, 300, 500, 800, 1000, 1200, 1400, 1600, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500]:
|
k_schedule = np.linspace(1, hp['target_points']//2, 40)
|
||||||
|
k_schedule = np.floor(k_schedule).astype(int)
|
||||||
|
|
||||||
|
for k in tqdm(k_schedule):
|
||||||
correct = np.sum(~scores[:k]) + np.sum(scores[-k:])
|
correct = np.sum(~scores[:k]) + np.sum(scores[-k:])
|
||||||
total = len(scores)
|
total = len(scores)
|
||||||
|
|
||||||
|
|
51
one_run_audit/convnet_classifier.py
Normal file
51
one_run_audit/convnet_classifier.py
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
# Name: Peng Cheng
|
||||||
|
# UIN: 674792652
|
||||||
|
#
|
||||||
|
# Code adapted from:
|
||||||
|
# https://github.com/jameschengpeng/PyTorch-CNN-on-CIFAR10
|
||||||
|
import torch
|
||||||
|
import torchvision
|
||||||
|
import torchvision.transforms as transforms
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.nn.functional as F
|
||||||
|
|
||||||
|
transform_train = transforms.Compose([
|
||||||
|
transforms.RandomCrop(32, padding=4),
|
||||||
|
transforms.RandomHorizontalFlip(),
|
||||||
|
transforms.ToTensor(),
|
||||||
|
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
||||||
|
])
|
||||||
|
|
||||||
|
transform_test = transforms.Compose([
|
||||||
|
transforms.ToTensor(),
|
||||||
|
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
||||||
|
])
|
||||||
|
|
||||||
|
class ConvNet(nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super(ConvNet, self).__init__()
|
||||||
|
self.conv1 = nn.Conv2d(in_channels=3, out_channels=48, kernel_size=(3,3), padding=(1,1))
|
||||||
|
self.conv2 = nn.Conv2d(in_channels=48, out_channels=96, kernel_size=(3,3), padding=(1,1))
|
||||||
|
self.conv3 = nn.Conv2d(in_channels=96, out_channels=192, kernel_size=(3,3), padding=(1,1))
|
||||||
|
self.conv4 = nn.Conv2d(in_channels=192, out_channels=256, kernel_size=(3,3), padding=(1,1))
|
||||||
|
self.pool = nn.MaxPool2d(2,2)
|
||||||
|
self.fc1 = nn.Linear(in_features=8*8*256, out_features=512)
|
||||||
|
self.fc2 = nn.Linear(in_features=512, out_features=64)
|
||||||
|
self.Dropout = nn.Dropout(0.25)
|
||||||
|
self.fc3 = nn.Linear(in_features=64, out_features=10)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
x = F.relu(self.conv1(x)) #32*32*48
|
||||||
|
x = F.relu(self.conv2(x)) #32*32*96
|
||||||
|
x = self.pool(x) #16*16*96
|
||||||
|
x = self.Dropout(x)
|
||||||
|
x = F.relu(self.conv3(x)) #16*16*192
|
||||||
|
x = F.relu(self.conv4(x)) #16*16*256
|
||||||
|
x = self.pool(x) # 8*8*256
|
||||||
|
x = self.Dropout(x)
|
||||||
|
x = x.view(-1, 8*8*256) # reshape x
|
||||||
|
x = F.relu(self.fc1(x))
|
||||||
|
x = F.relu(self.fc2(x))
|
||||||
|
x = self.Dropout(x)
|
||||||
|
x = self.fc3(x)
|
||||||
|
return x
|
232
one_run_audit/wrn.py
Normal file
232
one_run_audit/wrn.py
Normal file
|
@ -0,0 +1,232 @@
|
||||||
|
"""
|
||||||
|
Adapted from:
|
||||||
|
https://github.com/facebookresearch/tan/blob/main/src/models/wideresnet.py
|
||||||
|
"""
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the BSD-style license found in the
|
||||||
|
# LICENSE file in the root directory of this source tree.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
Adapted from timm:
|
||||||
|
https://github.com/xternalz/WideResNet-pytorch/blob/master/wideresnet.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
import math
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.nn.functional as F
|
||||||
|
|
||||||
|
class L2Norm(nn.Module):
|
||||||
|
def forward(self, x):
|
||||||
|
return x / x.norm(p=2, dim=1, keepdim=True)
|
||||||
|
|
||||||
|
class BasicBlock(nn.Module):
|
||||||
|
def __init__(self, in_planes, out_planes, stride, nb_groups, order):
|
||||||
|
super(BasicBlock, self).__init__()
|
||||||
|
self.order = order
|
||||||
|
self.bn1 = nn.GroupNorm(nb_groups, in_planes) if nb_groups else nn.Identity()
|
||||||
|
self.relu1 = nn.ReLU()
|
||||||
|
self.conv1 = nn.Conv2d(
|
||||||
|
in_planes, out_planes, kernel_size=3, stride=stride, padding=1
|
||||||
|
)
|
||||||
|
self.bn2 = nn.GroupNorm(nb_groups, out_planes) if nb_groups else nn.Identity()
|
||||||
|
self.relu2 = nn.ReLU()
|
||||||
|
self.conv2 = nn.Conv2d(
|
||||||
|
out_planes, out_planes, kernel_size=3, stride=1, padding=1
|
||||||
|
)
|
||||||
|
|
||||||
|
self.equalInOut = in_planes == out_planes
|
||||||
|
self.bnShortcut = (
|
||||||
|
(not self.equalInOut)
|
||||||
|
and nb_groups
|
||||||
|
and nn.GroupNorm(nb_groups, in_planes)
|
||||||
|
or (not self.equalInOut)
|
||||||
|
and nn.Identity()
|
||||||
|
or None
|
||||||
|
)
|
||||||
|
self.convShortcut = (
|
||||||
|
(not self.equalInOut)
|
||||||
|
and nn.Conv2d(
|
||||||
|
in_planes, out_planes, kernel_size=1, stride=stride, padding=0
|
||||||
|
)
|
||||||
|
) or None
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
skip = x
|
||||||
|
assert self.order in [0, 1, 2, 3]
|
||||||
|
if self.order == 0: # DM accuracy good
|
||||||
|
if not self.equalInOut:
|
||||||
|
skip = self.convShortcut(self.bnShortcut(self.relu1(x)))
|
||||||
|
out = self.conv1(self.bn1(self.relu1(x)))
|
||||||
|
out = self.conv2(self.bn2(self.relu2(out)))
|
||||||
|
elif self.order == 1: # classic accuracy bad
|
||||||
|
if not self.equalInOut:
|
||||||
|
skip = self.convShortcut(self.relu1(self.bnShortcut(x)))
|
||||||
|
out = self.conv1(self.relu1(self.bn1(x)))
|
||||||
|
out = self.conv2(self.relu2(self.bn2(out)))
|
||||||
|
elif self.order == 2: # DM IN RESIDUAL, normal other
|
||||||
|
if not self.equalInOut:
|
||||||
|
skip = self.convShortcut(self.bnShortcut(self.relu1(x)))
|
||||||
|
out = self.conv1(self.relu1(self.bn1(x)))
|
||||||
|
out = self.conv2(self.relu2(self.bn2(out)))
|
||||||
|
elif self.order == 3: # normal in residualm DM in others
|
||||||
|
if not self.equalInOut:
|
||||||
|
skip = self.convShortcut(self.relu1(self.bnShortcut(x)))
|
||||||
|
out = self.conv1(self.bn1(self.relu1(x)))
|
||||||
|
out = self.conv2(self.bn2(self.relu2(out)))
|
||||||
|
return torch.add(skip, out)
|
||||||
|
|
||||||
|
|
||||||
|
class NetworkBlock(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self, nb_layers, in_planes, out_planes, block, stride, nb_groups, order
|
||||||
|
):
|
||||||
|
super(NetworkBlock, self).__init__()
|
||||||
|
self.layer = self._make_layer(
|
||||||
|
block, in_planes, out_planes, nb_layers, stride, nb_groups, order
|
||||||
|
)
|
||||||
|
|
||||||
|
def _make_layer(
|
||||||
|
self, block, in_planes, out_planes, nb_layers, stride, nb_groups, order
|
||||||
|
):
|
||||||
|
layers = []
|
||||||
|
for i in range(int(nb_layers)):
|
||||||
|
layers.append(
|
||||||
|
block(
|
||||||
|
i == 0 and in_planes or out_planes,
|
||||||
|
out_planes,
|
||||||
|
i == 0 and stride or 1,
|
||||||
|
nb_groups,
|
||||||
|
order,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return nn.Sequential(*layers)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
return self.layer(x)
|
||||||
|
|
||||||
|
|
||||||
|
class WideResNet(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
depth,
|
||||||
|
feat_dim,
|
||||||
|
#num_classes,
|
||||||
|
widen_factor=1,
|
||||||
|
nb_groups=16,
|
||||||
|
init=0,
|
||||||
|
order1=0,
|
||||||
|
order2=0,
|
||||||
|
):
|
||||||
|
if order1 == 0:
|
||||||
|
print("order1=0: In the blocks: like in DM, BN on top of relu")
|
||||||
|
if order1 == 1:
|
||||||
|
print("order1=1: In the blocks: not like in DM, relu on top of BN")
|
||||||
|
if order1 == 2:
|
||||||
|
print(
|
||||||
|
"order1=2: In the blocks: BN on top of relu in residual (DM), relu on top of BN ortherplace (clqssique)"
|
||||||
|
)
|
||||||
|
if order1 == 3:
|
||||||
|
print(
|
||||||
|
"order1=3: In the blocks: relu on top of BN in residual (classic), BN on top of relu otherplace (DM)"
|
||||||
|
)
|
||||||
|
if order2 == 0:
|
||||||
|
print("order2=0: outside the blocks: like in DM, BN on top of relu")
|
||||||
|
if order2 == 1:
|
||||||
|
print("order2=1: outside the blocks: not like in DM, relu on top of BN")
|
||||||
|
super(WideResNet, self).__init__()
|
||||||
|
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
|
||||||
|
assert (depth - 4) % 6 == 0
|
||||||
|
n = (depth - 4) / 6
|
||||||
|
block = BasicBlock
|
||||||
|
# 1st conv before any network block
|
||||||
|
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1)
|
||||||
|
# 1st block
|
||||||
|
self.block1 = NetworkBlock(
|
||||||
|
n, nChannels[0], nChannels[1], block, 1, nb_groups, order1
|
||||||
|
)
|
||||||
|
# 2nd block
|
||||||
|
self.block2 = NetworkBlock(
|
||||||
|
n, nChannels[1], nChannels[2], block, 2, nb_groups, order1
|
||||||
|
)
|
||||||
|
# 3rd block
|
||||||
|
self.block3 = NetworkBlock(
|
||||||
|
n, nChannels[2], nChannels[3], block, 2, nb_groups, order1
|
||||||
|
)
|
||||||
|
# global average pooling and classifier
|
||||||
|
"""
|
||||||
|
self.bn1 = nn.GroupNorm(nb_groups, nChannels[3]) if nb_groups else nn.Identity()
|
||||||
|
self.relu = nn.ReLU()
|
||||||
|
self.fc = nn.Linear(nChannels[3], num_classes)
|
||||||
|
"""
|
||||||
|
self.nChannels = nChannels[3]
|
||||||
|
|
||||||
|
self.block4 = nn.Sequential(
|
||||||
|
nn.Flatten(),
|
||||||
|
nn.Linear(256 * 8 * 8, 4096, bias=False), # 256 * 6 * 6 if 224 * 224
|
||||||
|
nn.GroupNorm(16, 4096),
|
||||||
|
nn.ReLU(inplace=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
# fc7
|
||||||
|
self.block5 = nn.Sequential(
|
||||||
|
nn.Linear(4096, 4096, bias=False),
|
||||||
|
nn.GroupNorm(16, 4096),
|
||||||
|
nn.ReLU(inplace=True),
|
||||||
|
)
|
||||||
|
# fc8
|
||||||
|
self.block6 =nn.Sequential(
|
||||||
|
nn.Linear(4096, feat_dim),
|
||||||
|
L2Norm(),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if init == 0: # as in Deep Mind's paper
|
||||||
|
for m in self.modules():
|
||||||
|
if isinstance(m, nn.Conv2d):
|
||||||
|
fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(m.weight)
|
||||||
|
s = 1 / (max(fan_in, 1)) ** 0.5
|
||||||
|
nn.init.trunc_normal_(m.weight, std=s)
|
||||||
|
m.bias.data.zero_()
|
||||||
|
elif isinstance(m, nn.GroupNorm):
|
||||||
|
m.weight.data.fill_(1)
|
||||||
|
m.bias.data.zero_()
|
||||||
|
elif isinstance(m, nn.Linear):
|
||||||
|
fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(m.weight)
|
||||||
|
s = 1 / (max(fan_in, 1)) ** 0.5
|
||||||
|
nn.init.trunc_normal_(m.weight, std=s)
|
||||||
|
#m.bias.data.zero_()
|
||||||
|
if init == 1: # old version
|
||||||
|
for m in self.modules():
|
||||||
|
if isinstance(m, nn.Conv2d):
|
||||||
|
nn.init.kaiming_normal_(
|
||||||
|
m.weight, mode="fan_out", nonlinearity="relu"
|
||||||
|
)
|
||||||
|
elif isinstance(m, nn.GroupNorm):
|
||||||
|
m.weight.data.fill_(1)
|
||||||
|
m.bias.data.zero_()
|
||||||
|
elif isinstance(m, nn.Linear):
|
||||||
|
m.bias.data.zero_()
|
||||||
|
self.order2 = order2
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
out = self.conv1(x)
|
||||||
|
out = self.block1(out)
|
||||||
|
out = self.block2(out)
|
||||||
|
out = self.block3(out)
|
||||||
|
out = self.block4(out)
|
||||||
|
out = self.block5(out)
|
||||||
|
out = self.block6(out)
|
||||||
|
if out.ndim == 4:
|
||||||
|
out = out.mean(dim=-1)
|
||||||
|
if out.ndim == 3:
|
||||||
|
out = out.mean(dim=-1)
|
||||||
|
|
||||||
|
#out = self.bn1(self.relu(out)) if self.order2 == 0 else self.relu(self.bn1(out))
|
||||||
|
#out = F.avg_pool2d(out, 8)
|
||||||
|
#out = out.view(-1, self.nChannels)
|
||||||
|
return out#self.fc(out)
|
Loading…
Reference in a new issue