Wres: toggle non-dp training

This commit is contained in:
Akemi Izuko 2024-12-01 13:58:50 -07:00
parent 0eb26f8979
commit aa190cd4f1
Signed by: akemi
GPG key ID: 8DE0764E1809E9FC
2 changed files with 69 additions and 61 deletions

View file

@ -148,8 +148,6 @@ def main():
max_grad_norm=norm,
)
teacher.load_state_dict(torch.load(os.path.join("wrn-1733078278-8e-1e-05d-12.0n-dict.pt"), weights_only=True))
teacher.to(device)
teacher.eval()

View file

@ -1,4 +1,5 @@
import os
import time
import torch
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
@ -21,50 +22,13 @@ def set_seed(seed=42):
torch.cuda.manual_seed(seed)
def _train_seed(net, loaders, device, dataset, log=False, checkpoint=False, logfile='', checkpointFile='', epochs=200, norm=1.0):
train_loader, test_loader = loaders
dp_epsilon = 8
dp_delta = 1e-5
if dp_epsilon is not None:
print(f"DP epsilon = {dp_epsilon}, delta = {dp_delta}")
#net = ModuleValidator.fix(net, replace_bn_with_in=True)
net = ModuleValidator.fix(net)
ModuleValidator.validate(net, strict=True)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9, nesterov=True, weight_decay=5e-4)
scheduler = MultiStepLR(optimizer, milestones=[int(elem*epochs) for elem in [0.3, 0.6, 0.8]], gamma=0.2)
def train_no_cap(net, epochs, data_loader, device, optimizer, criterion, scheduler, test_loader, log, logfile, checkpointFile):
best_test_set_accuracy = 0
if dp_epsilon is not None:
privacy_engine = opacus.PrivacyEngine()
net, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=net,
optimizer=optimizer,
data_loader=train_loader,
epochs=epochs,
target_epsilon=dp_epsilon,
target_delta=dp_delta,
max_grad_norm=norm,
)
print(f"Using sigma={optimizer.noise_multiplier} and C={1.0}, norm = {norm}")
else:
print("Training without differential privacy")
print(f"Training with {epochs} epochs")
#for epoch in tqdm(range(epochs)):
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=1000, # Roughly 12gb vram, uses 9.4
optimizer=optimizer
) as memory_safe_data_loader:
for epoch in range(epochs):
net.train()
#for i, data in tqdm(enumerate(train_loader, 0), leave=False):
for i, data in enumerate(memory_safe_data_loader, 0):
for i, data in enumerate(data_loader, 0):
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
@ -107,12 +71,59 @@ def _train_seed(net, loaders, device, dataset, log=False, checkpoint=False, logf
if epoch_accuracy > best_test_set_accuracy:
best_test_set_accuracy = epoch_accuracy
if checkpoint:
torch.save(net.state_dict(), checkpointFile)
return best_test_set_accuracy
def _train_seed(net, loaders, device, dataset, log=False, logfile='', epochs=200, norm=1.0):
train_loader, test_loader = loaders
dp_epsilon = None
dp_delta = 1e-5
checkpointFile = 'wrn-{}-{}e-{}d-{}n-dict.pt'.format(int(time.time()), dp_epsilon, dp_delta, norm)
if dp_epsilon is not None:
print(f"DP epsilon = {dp_epsilon}, delta = {dp_delta}")
#net = ModuleValidator.fix(net, replace_bn_with_in=True)
net = ModuleValidator.fix(net)
ModuleValidator.validate(net, strict=True)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9, nesterov=True, weight_decay=5e-4)
scheduler = MultiStepLR(optimizer, milestones=[int(elem*epochs) for elem in [0.3, 0.6, 0.8]], gamma=0.2)
if dp_epsilon is not None:
privacy_engine = opacus.PrivacyEngine()
net, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=net,
optimizer=optimizer,
data_loader=train_loader,
epochs=epochs,
target_epsilon=dp_epsilon,
target_delta=dp_delta,
max_grad_norm=norm,
)
print(f"Using sigma={optimizer.noise_multiplier} and C={1.0}, norm = {norm}")
else:
print("Training without differential privacy")
print(f"Training with {epochs} epochs")
if dp_epsilon is not None:
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=1000, # Roughly 12gb vram, uses 9.4
optimizer=optimizer
) as memory_safe_data_loader:
best_test_set_accuracy = train_no_cap(net, epochs, memory_safe_data_loader, device, optimizer, criterion, scheduler, test_loader, log, logfile, checkpointFile)
else:
best_test_set_accuracy = train_no_cap(net, epochs, train_loader, device, optimizer, criterion, scheduler, test_loader, log, logfile, checkpointFile)
return best_test_set_accuracy
def train(args):
json_options = json_file_to_pyobj(args.config)
training_configurations = json_options.training
@ -154,9 +165,8 @@ def train(args):
net = WideResNet(d=wrn_depth, k=wrn_width, n_classes=10, input_features=3, output_features=16, strides=strides)
net = net.to(device)
checkpointFile = 'wrn-{}-{}-seed-{}-{}-dict.pth'.format(wrn_depth, wrn_width, dataset, seed) if checkpoint else ''
epochs = training_configurations.epochs
best_test_set_accuracy = _train_seed(net, loaders, device, dataset, log, checkpoint, logfile, checkpointFile, epochs, args.norm)
best_test_set_accuracy = _train_seed(net, loaders, device, dataset, log, logfile, epochs, args.norm)
if log:
with open(logfile, 'a') as temp: