O1: wrn2 fixes

This commit is contained in:
Akemi Izuko 2024-12-07 13:59:39 -07:00
parent 2586c351d9
commit 7b77748dcd
Signed by: akemi
GPG key ID: 8DE0764E1809E9FC

View file

@ -15,12 +15,15 @@ from torchvision.datasets import CIFAR10
import pytorch_lightning as pl import pytorch_lightning as pl
import opacus import opacus
import random import random
from tqdm import tqdm
from opacus.validators import ModuleValidator from opacus.validators import ModuleValidator
from opacus.utils.batch_memory_manager import BatchMemoryManager from opacus.utils.batch_memory_manager import BatchMemoryManager
from WideResNet import WideResNet from WideResNet import WideResNet
from equations import get_eps_audit from equations import get_eps_audit
import student_model import student_model
import fast_model import fast_model
import convnet_classifier
import wrn
import warnings import warnings
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
@ -230,8 +233,10 @@ def get_dataloaders_raw(m=1000, train_batch_size=512, test_batch_size=10):
train_x = preprocess_data(train_x) train_x = preprocess_data(train_x)
test_x = preprocess_data(test_x) test_x = preprocess_data(test_x)
attack_x = preprocess_data(attack_x)
train_y = torch.tensor(train_y) train_y = torch.tensor(train_y)
test_y = torch.tensor(test_y) test_y = torch.tensor(test_y)
attack_y = torch.tensor(attack_y)
train_dl = DataLoader( train_dl = DataLoader(
TensorDataset(train_x, train_y.long()), TensorDataset(train_x, train_y.long()),
@ -246,7 +251,7 @@ def get_dataloaders_raw(m=1000, train_batch_size=512, test_batch_size=10):
shuffle=True, shuffle=True,
num_workers=4 num_workers=4
) )
return train_dl, test_dl, train_x return train_dl, test_dl, train_x, attack_x.numpy(), attack_y.numpy(), S
def evaluate_on(model, dataloader): def evaluate_on(model, dataloader):
correct = 0 correct = 0
@ -398,6 +403,70 @@ def load(hp, model_path, train_dl):
return model_init, model, adv_points, adv_labels, S return model_init, model, adv_points, adv_labels, S
def train_wrn2(hp, train_dl, test_dl):
model = wrn.WideResNet(16, 10, 4)
model = model.to(DEVICE)
#model = ModuleValidator.fix(model)
ModuleValidator.validate(model, strict=True)
model_init = copy.deepcopy(model)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(
model.parameters(),
lr=0.12,
momentum=0.9,
weight_decay=1e-4
)
scheduler = MultiStepLR(
optimizer,
milestones=[int(i * hp['epochs']) for i in [0.3, 0.6, 0.8]],
gamma=0.1
)
print(f"Training with {hp['epochs']} epochs")
if hp['epsilon'] is not None:
privacy_engine = opacus.PrivacyEngine()
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=model,
optimizer=optimizer,
data_loader=train_dl,
epochs=hp['epochs'],
target_epsilon=hp['epsilon'],
target_delta=hp['delta'],
max_grad_norm=hp['norm'],
)
print(f"DP epsilon = {hp['epsilon']}, delta = {hp['delta']}")
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {hp['norm']}")
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=10, # 1000 ~= 9.4GB vram
optimizer=optimizer
) as memory_safe_data_loader:
best_test_set_accuracy = train_no_cap(
model,
hp,
memory_safe_data_loader,
test_dl,
optimizer,
criterion,
scheduler,
)
else:
print("Training without differential privacy")
best_test_set_accuracy = train_no_cap(
model,
hp,
train_dl,
test_dl,
optimizer,
criterion,
scheduler,
)
return model_init, model
def train_small(hp, train_dl, test_dl): def train_small(hp, train_dl, test_dl):
model = student_model.Model(num_classes=10).to(DEVICE) model = student_model.Model(num_classes=10).to(DEVICE)
@ -460,7 +529,7 @@ def train_small(hp, train_dl, test_dl):
return model_init, model return model_init, model
def train_fast(hp): def train_fast(hp, train_dl, test_dl, train_x):
epochs = hp['epochs'] epochs = hp['epochs']
momentum = 0.9 momentum = 0.9
weight_decay = 0.256 weight_decay = 0.256
@ -472,8 +541,6 @@ def train_fast(hp):
print("=========================") print("=========================")
print("Training a fast model") print("Training a fast model")
print("=========================") print("=========================")
train_dl, test_dl, train_x = get_dataloaders_raw(hp['target_points'])
weights = fast_model.patch_whitening(train_x[:10000, :, 4:-4, 4:-4]) weights = fast_model.patch_whitening(train_x[:10000, :, 4:-4, 4:-4])
model = fast_model.Model(weights, c_in=3, c_out=10, scale_out=0.125) model = fast_model.Model(weights, c_in=3, c_out=10, scale_out=0.125)
@ -604,12 +671,12 @@ def main():
parser.add_argument('--cuda', type=int, help='gpu index', required=False) parser.add_argument('--cuda', type=int, help='gpu index', required=False)
parser.add_argument('--epsilon', type=float, help='dp epsilon', required=False, default=None) parser.add_argument('--epsilon', type=float, help='dp epsilon', required=False, default=None)
parser.add_argument('--m', type=int, help='number of target points', required=True) parser.add_argument('--m', type=int, help='number of target points', required=True)
parser.add_argument('--k', type=int, help='number of symmetric guesses', required=True)
parser.add_argument('--epochs', type=int, help='number of epochs', required=True) parser.add_argument('--epochs', type=int, help='number of epochs', required=True)
parser.add_argument('--load', type=Path, help='number of epochs', required=False) parser.add_argument('--load', type=Path, help='number of epochs', required=False)
parser.add_argument('--studentraw', action='store_true', help='train a raw student', required=False) parser.add_argument('--studentraw', action='store_true', help='train a raw student', required=False)
parser.add_argument('--distill', action='store_true', help='train a raw student', required=False) parser.add_argument('--distill', action='store_true', help='train a raw student', required=False)
parser.add_argument('--fast', action='store_true', help='train a the fast model', required=False) parser.add_argument('--fast', action='store_true', help='train the fast model', required=False)
parser.add_argument('--wrn2', action='store_true', help='Train a groupnormed wrn', required=False)
args = parser.parse_args() args = parser.parse_args()
if torch.cuda.is_available() and args.cuda: if torch.cuda.is_available() and args.cuda:
@ -631,8 +698,6 @@ def main():
"norm": args.norm, "norm": args.norm,
"batch_size": 4096, "batch_size": 4096,
"epochs": args.epochs, "epochs": args.epochs,
"k+": args.k,
"k-": args.k,
"p_value": 0.05, "p_value": 0.05,
} }
@ -652,12 +717,16 @@ def main():
model_init, model_trained, adv_points, adv_labels, S = load(hp, args.load, train_dl) model_init, model_trained, adv_points, adv_labels, S = load(hp, args.load, train_dl)
test_dl = None test_dl = None
elif args.fast: elif args.fast:
train_dl, test_dl, _ = get_dataloaders_raw(hp['target_points']) train_dl, test_dl, train_x, adv_points, adv_labels, S = get_dataloaders_raw(hp['target_points'])
model_init, model_trained = train_fast(hp) model_init, model_trained = train_fast(hp, train_dl, test_dl, train_x)
exit(1)
else: else:
train_dl, test_dl, pure_train_dl, adv_points, adv_labels, S = get_dataloaders3(hp['target_points'], hp['batch_size']) train_dl, test_dl, pure_train_dl, adv_points, adv_labels, S = get_dataloaders3(hp['target_points'], hp['batch_size'])
if args.studentraw: if args.wrn2:
print("=========================")
print("Training wrn2 model from meta")
print("=========================")
model_init, model_trained = train_wrn2(hp, train_dl, test_dl)
elif args.studentraw:
print("=========================") print("=========================")
print("Training a raw student model") print("Training a raw student model")
print("=========================") print("=========================")
@ -711,13 +780,18 @@ def main():
scores.append(((init_loss - trained_loss).item(), is_in)) scores.append(((init_loss - trained_loss).item(), is_in))
print(f"Top 10 unsorted scores: {scores[:10]}")
print(f"Btm 10 unsorted scores: {scores[-10:]}")
scores = sorted(scores, key=lambda x: x[0]) scores = sorted(scores, key=lambda x: x[0])
print(f"Top 10 sorted scores: {scores[:10]}")
print(f"Btm 10 sorted scores: {scores[-10:]}")
scores = np.array([x[1] for x in scores]) scores = np.array([x[1] for x in scores])
print(scores[:10])
audits = (0, 0, 0, 0) audits = (0, 0, 0, 0)
for k in [10, 20, 50, 100, 200, 300, 500, 800, 1000, 1200, 1400, 1600, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500]: k_schedule = np.linspace(1, hp['target_points']//2, 40)
k_schedule = np.floor(k_schedule).astype(int)
for k in tqdm(k_schedule):
correct = np.sum(~scores[:k]) + np.sum(scores[-k:]) correct = np.sum(~scores[:k]) + np.sum(scores[-k:])
total = len(scores) total = len(scores)