O1: add simple convnet
This commit is contained in:
parent
7b77748dcd
commit
5da8c44743
2 changed files with 127 additions and 1 deletions
|
@ -587,6 +587,75 @@ def train_fast(hp, train_dl, test_dl, train_x):
|
||||||
train_no_cap(model, hp, train_dl, test_dl, optimizer, criterion, scheduler)
|
train_no_cap(model, hp, train_dl, test_dl, optimizer, criterion, scheduler)
|
||||||
return init_model, model
|
return init_model, model
|
||||||
|
|
||||||
|
def train_convnet(hp, train_dl, test_dl):
|
||||||
|
model = convnet_classifier.ConvNet()
|
||||||
|
model = model.to(DEVICE)
|
||||||
|
#model = ModuleValidator.fix(model)
|
||||||
|
ModuleValidator.validate(model, strict=True)
|
||||||
|
model_init = copy.deepcopy(model)
|
||||||
|
|
||||||
|
criterion = nn.CrossEntropyLoss()
|
||||||
|
optimizer = optim.Adam(model.parameters(), lr=1e-3)
|
||||||
|
|
||||||
|
#if hp['epochs'] <= 10:
|
||||||
|
# optimizer = optim.Adam(model.parameters(), lr=lr)
|
||||||
|
#elif hp['epochs'] > 10 and hp['epochs'] <= 25:
|
||||||
|
# optimizer = optim.Adam(model.parameters(), lr=(lr/10))
|
||||||
|
#else:
|
||||||
|
# optimizer = optim.Adam(model.parameters(), lr=(lr/50))
|
||||||
|
scheduler = MultiStepLR(optimizer, milestones=[10, 25], gamma=0.1)
|
||||||
|
|
||||||
|
# scheduler = MultiStepLR(
|
||||||
|
# optimizer,
|
||||||
|
# milestones=[int(i * hp['epochs']) for i in [0.3, 0.6, 0.8]],
|
||||||
|
# gamma=0.2
|
||||||
|
# )
|
||||||
|
|
||||||
|
print(f"Training with {hp['epochs']} epochs")
|
||||||
|
|
||||||
|
if hp['epsilon'] is not None:
|
||||||
|
privacy_engine = opacus.PrivacyEngine()
|
||||||
|
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
|
||||||
|
module=model,
|
||||||
|
optimizer=optimizer,
|
||||||
|
data_loader=train_dl,
|
||||||
|
epochs=hp['epochs'],
|
||||||
|
target_epsilon=hp['epsilon'],
|
||||||
|
target_delta=hp['delta'],
|
||||||
|
max_grad_norm=hp['norm'],
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"DP epsilon = {hp['epsilon']}, delta = {hp['delta']}")
|
||||||
|
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {hp['norm']}")
|
||||||
|
|
||||||
|
with BatchMemoryManager(
|
||||||
|
data_loader=train_loader,
|
||||||
|
max_physical_batch_size=2000, # 1000 ~= 9.4GB vram
|
||||||
|
optimizer=optimizer
|
||||||
|
) as memory_safe_data_loader:
|
||||||
|
best_test_set_accuracy = train_no_cap(
|
||||||
|
model,
|
||||||
|
hp,
|
||||||
|
memory_safe_data_loader,
|
||||||
|
test_dl,
|
||||||
|
optimizer,
|
||||||
|
criterion,
|
||||||
|
scheduler,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print("Training without differential privacy")
|
||||||
|
best_test_set_accuracy = train_no_cap(
|
||||||
|
model,
|
||||||
|
hp,
|
||||||
|
train_dl,
|
||||||
|
test_dl,
|
||||||
|
optimizer,
|
||||||
|
criterion,
|
||||||
|
scheduler,
|
||||||
|
)
|
||||||
|
|
||||||
|
return model_init, model
|
||||||
|
|
||||||
def train(hp, train_dl, test_dl):
|
def train(hp, train_dl, test_dl):
|
||||||
model = WideResNet(
|
model = WideResNet(
|
||||||
d=hp["wrn_depth"],
|
d=hp["wrn_depth"],
|
||||||
|
@ -677,6 +746,7 @@ def main():
|
||||||
parser.add_argument('--distill', action='store_true', help='train a raw student', required=False)
|
parser.add_argument('--distill', action='store_true', help='train a raw student', required=False)
|
||||||
parser.add_argument('--fast', action='store_true', help='train the fast model', required=False)
|
parser.add_argument('--fast', action='store_true', help='train the fast model', required=False)
|
||||||
parser.add_argument('--wrn2', action='store_true', help='Train a groupnormed wrn', required=False)
|
parser.add_argument('--wrn2', action='store_true', help='Train a groupnormed wrn', required=False)
|
||||||
|
parser.add_argument('--convnet', action='store_true', help='Train a convnet', required=False)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
if torch.cuda.is_available() and args.cuda:
|
if torch.cuda.is_available() and args.cuda:
|
||||||
|
@ -696,7 +766,7 @@ def main():
|
||||||
"epsilon": args.epsilon,
|
"epsilon": args.epsilon,
|
||||||
"delta": 1e-5,
|
"delta": 1e-5,
|
||||||
"norm": args.norm,
|
"norm": args.norm,
|
||||||
"batch_size": 4096,
|
"batch_size": 50 if args.convnet else 4096,
|
||||||
"epochs": args.epochs,
|
"epochs": args.epochs,
|
||||||
"p_value": 0.05,
|
"p_value": 0.05,
|
||||||
}
|
}
|
||||||
|
@ -726,6 +796,11 @@ def main():
|
||||||
print("Training wrn2 model from meta")
|
print("Training wrn2 model from meta")
|
||||||
print("=========================")
|
print("=========================")
|
||||||
model_init, model_trained = train_wrn2(hp, train_dl, test_dl)
|
model_init, model_trained = train_wrn2(hp, train_dl, test_dl)
|
||||||
|
elif args.convnet:
|
||||||
|
print("=========================")
|
||||||
|
print("Training a simple convnet")
|
||||||
|
print("=========================")
|
||||||
|
model_init, model_trained = train_convnet(hp, train_dl, test_dl)
|
||||||
elif args.studentraw:
|
elif args.studentraw:
|
||||||
print("=========================")
|
print("=========================")
|
||||||
print("Training a raw student model")
|
print("Training a raw student model")
|
||||||
|
|
51
one_run_audit/convnet_classifier.py
Normal file
51
one_run_audit/convnet_classifier.py
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
# Name: Peng Cheng
|
||||||
|
# UIN: 674792652
|
||||||
|
#
|
||||||
|
# Code adapted from:
|
||||||
|
# https://github.com/jameschengpeng/PyTorch-CNN-on-CIFAR10
|
||||||
|
import torch
|
||||||
|
import torchvision
|
||||||
|
import torchvision.transforms as transforms
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.nn.functional as F
|
||||||
|
|
||||||
|
transform_train = transforms.Compose([
|
||||||
|
transforms.RandomCrop(32, padding=4),
|
||||||
|
transforms.RandomHorizontalFlip(),
|
||||||
|
transforms.ToTensor(),
|
||||||
|
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
||||||
|
])
|
||||||
|
|
||||||
|
transform_test = transforms.Compose([
|
||||||
|
transforms.ToTensor(),
|
||||||
|
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
||||||
|
])
|
||||||
|
|
||||||
|
class ConvNet(nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super(ConvNet, self).__init__()
|
||||||
|
self.conv1 = nn.Conv2d(in_channels=3, out_channels=48, kernel_size=(3,3), padding=(1,1))
|
||||||
|
self.conv2 = nn.Conv2d(in_channels=48, out_channels=96, kernel_size=(3,3), padding=(1,1))
|
||||||
|
self.conv3 = nn.Conv2d(in_channels=96, out_channels=192, kernel_size=(3,3), padding=(1,1))
|
||||||
|
self.conv4 = nn.Conv2d(in_channels=192, out_channels=256, kernel_size=(3,3), padding=(1,1))
|
||||||
|
self.pool = nn.MaxPool2d(2,2)
|
||||||
|
self.fc1 = nn.Linear(in_features=8*8*256, out_features=512)
|
||||||
|
self.fc2 = nn.Linear(in_features=512, out_features=64)
|
||||||
|
self.Dropout = nn.Dropout(0.25)
|
||||||
|
self.fc3 = nn.Linear(in_features=64, out_features=10)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
x = F.relu(self.conv1(x)) #32*32*48
|
||||||
|
x = F.relu(self.conv2(x)) #32*32*96
|
||||||
|
x = self.pool(x) #16*16*96
|
||||||
|
x = self.Dropout(x)
|
||||||
|
x = F.relu(self.conv3(x)) #16*16*192
|
||||||
|
x = F.relu(self.conv4(x)) #16*16*256
|
||||||
|
x = self.pool(x) # 8*8*256
|
||||||
|
x = self.Dropout(x)
|
||||||
|
x = x.view(-1, 8*8*256) # reshape x
|
||||||
|
x = F.relu(self.fc1(x))
|
||||||
|
x = F.relu(self.fc2(x))
|
||||||
|
x = self.Dropout(x)
|
||||||
|
x = self.fc3(x)
|
||||||
|
return x
|
Loading…
Reference in a new issue