diff --git a/wresnet-pytorch/src/WideResNet.py b/wresnet-pytorch/src/WideResNet.py index 887547d..024818e 100644 --- a/wresnet-pytorch/src/WideResNet.py +++ b/wresnet-pytorch/src/WideResNet.py @@ -5,7 +5,6 @@ import math class IndividualBlock1(nn.Module): - def __init__(self, input_features, output_features, stride, subsample_input=True, increase_filters=True): super(IndividualBlock1, self).__init__() diff --git a/wresnet-pytorch/src/train.py b/wresnet-pytorch/src/train.py index 2d591ca..64acb55 100644 --- a/wresnet-pytorch/src/train.py +++ b/wresnet-pytorch/src/train.py @@ -10,6 +10,7 @@ from WideResNet import WideResNet from tqdm import tqdm import opacus from opacus.validators import ModuleValidator +from opacus.utils.batch_memory_manager import BatchMemoryManager def set_seed(seed=42): @@ -20,15 +21,15 @@ def set_seed(seed=42): torch.cuda.manual_seed(seed) -def _train_seed(net, loaders, device, dataset, log=False, checkpoint=False, logfile='', checkpointFile='', epochs=200): +def _train_seed(net, loaders, device, dataset, log=False, checkpoint=False, logfile='', checkpointFile='', epochs=200, norm=1.0): train_loader, test_loader = loaders dp_epsilon = 8 + dp_delta = 1e-5 if dp_epsilon is not None: - print(f"DP epsilon: {dp_epsilon}") + print(f"DP epsilon = {dp_epsilon}, delta = {dp_delta}") #net = ModuleValidator.fix(net, replace_bn_with_in=True) net = ModuleValidator.fix(net) - print(net) ModuleValidator.validate(net, strict=True) criterion = nn.CrossEntropyLoss() @@ -37,68 +38,77 @@ def _train_seed(net, loaders, device, dataset, log=False, checkpoint=False, logf best_test_set_accuracy = 0 - privacy_engine = opacus.PrivacyEngine() - net, optimizer, train_loader = privacy_engine.make_private_with_epsilon( - module=net, - optimizer=optimizer, - data_loader=train_loader, - epochs=epochs, - target_epsilon=8, - target_delta=1e-5, - max_grad_norm=3.0, - ) + if dp_epsilon is not None: + privacy_engine = opacus.PrivacyEngine() + net, optimizer, train_loader = privacy_engine.make_private_with_epsilon( + module=net, + optimizer=optimizer, + data_loader=train_loader, + epochs=epochs, + target_epsilon=dp_epsilon, + target_delta=dp_delta, + max_grad_norm=norm, + ) - print(f"Using sigma={optimizer.noise_multiplier} and C={1.0}") + print(f"Using sigma={optimizer.noise_multiplier} and C={1.0}, norm = {norm}") + else: + print("Training without differential privacy") print(f"Training with {epochs} epochs") #for epoch in tqdm(range(epochs)): - for epoch in range(epochs): - net.train() - #for i, data in tqdm(enumerate(train_loader, 0), leave=False): - for i, data in enumerate(train_loader, 0): - inputs, labels = data - inputs = inputs.to(device) - labels = labels.to(device) - - optimizer.zero_grad() - - wrn_outputs = net(inputs) - outputs = wrn_outputs[0] - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - scheduler.step() - - with torch.no_grad(): - - correct = 0 - total = 0 - - net.eval() - for data in test_loader: - images, labels = data - images = images.to(device) + with BatchMemoryManager( + data_loader=train_loader, + max_physical_batch_size=1000, # Roughly 12gb vram, uses 9.4 + optimizer=optimizer + ) as memory_safe_data_loader: + for epoch in range(epochs): + net.train() + #for i, data in tqdm(enumerate(train_loader, 0), leave=False): + for i, data in enumerate(memory_safe_data_loader, 0): + inputs, labels = data + inputs = inputs.to(device) labels = labels.to(device) - wrn_outputs = net(images) + optimizer.zero_grad() + + wrn_outputs = net(inputs) outputs = wrn_outputs[0] - _, predicted = torch.max(outputs.data, 1) - total += labels.size(0) - correct += (predicted == labels).sum().item() + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() - epoch_accuracy = correct / total - epoch_accuracy = round(100 * epoch_accuracy, 2) + scheduler.step() - if log: - print('Accuracy at epoch {} is {}%\n'.format(epoch + 1, epoch_accuracy)) - with open(logfile, 'a') as temp: - temp.write('Accuracy at epoch {} is {}%\n'.format(epoch + 1, epoch_accuracy)) + if epoch % 10 == 0 or epoch == epochs - 1: + with torch.no_grad(): - if epoch_accuracy > best_test_set_accuracy: - best_test_set_accuracy = epoch_accuracy - if checkpoint: - torch.save(net.state_dict(), checkpointFile) + correct = 0 + total = 0 + + net.eval() + for data in test_loader: + images, labels = data + images = images.to(device) + labels = labels.to(device) + + wrn_outputs = net(images) + outputs = wrn_outputs[0] + _, predicted = torch.max(outputs.data, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + + epoch_accuracy = correct / total + epoch_accuracy = round(100 * epoch_accuracy, 2) + + if log: + print('Accuracy at epoch {} is {}%'.format(epoch + 1, epoch_accuracy)) + with open(logfile, 'a') as temp: + temp.write('Accuracy at epoch {} is {}%\n'.format(epoch + 1, epoch_accuracy)) + + if epoch_accuracy > best_test_set_accuracy: + best_test_set_accuracy = epoch_accuracy + if checkpoint: + torch.save(net.state_dict(), checkpointFile) return best_test_set_accuracy @@ -124,7 +134,9 @@ def train(args): checkpoint = True if training_configurations.checkpoint.lower() == 'true' else False loaders = get_loaders(dataset, training_configurations.batch_size) - if torch.cuda.is_available(): + if torch.cuda.is_available() and args.cuda: + device = torch.device(f'cuda:{args.cuda}') + elif torch.cuda.is_available(): device = torch.device('cuda:0') else: device = torch.device('cpu') @@ -144,7 +156,7 @@ def train(args): checkpointFile = 'wrn-{}-{}-seed-{}-{}-dict.pth'.format(wrn_depth, wrn_width, dataset, seed) if checkpoint else '' epochs = training_configurations.epochs - best_test_set_accuracy = _train_seed(net, loaders, device, dataset, log, checkpoint, logfile, checkpointFile, epochs) + best_test_set_accuracy = _train_seed(net, loaders, device, dataset, log, checkpoint, logfile, checkpointFile, epochs, args.norm) if log: with open(logfile, 'a') as temp: @@ -168,6 +180,8 @@ if __name__ == '__main__': parser = argparse.ArgumentParser(description='WideResNet') parser.add_argument('-config', '--config', help='Training Configurations', required=True) + parser.add_argument('--norm', type=float, help='dpsgd norm clip factor', required=True) + parser.add_argument('--cuda', type=int, help='gpu index', required=False) args = parser.parse_args()