2024-12-01 15:33:03 -07:00
|
|
|
from datetime import datetime
|
|
|
|
import time
|
2024-12-02 17:31:11 -07:00
|
|
|
import argparse
|
2024-12-01 13:38:32 -07:00
|
|
|
from utils import json_file_to_pyobj, get_loaders
|
|
|
|
from WideResNet import WideResNet
|
|
|
|
from opacus.validators import ModuleValidator
|
|
|
|
import os
|
|
|
|
from pathlib import Path
|
|
|
|
from torch.optim.lr_scheduler import MultiStepLR
|
|
|
|
from torchvision.datasets import CIFAR10
|
|
|
|
from torch.utils.data import DataLoader
|
|
|
|
import os
|
|
|
|
import torch
|
|
|
|
import torch.nn as nn
|
|
|
|
from torchvision import models, transforms
|
2024-12-02 17:31:11 -07:00
|
|
|
import student_model
|
2024-12-01 13:38:32 -07:00
|
|
|
import torch.optim as optim
|
|
|
|
import torch.nn.functional as F
|
|
|
|
import opacus
|
|
|
|
import warnings
|
|
|
|
warnings.filterwarnings("ignore")
|
|
|
|
|
|
|
|
|
2024-12-01 15:33:03 -07:00
|
|
|
def train_knowledge_distillation(teacher, student, train_dl, epochs, learning_rate, T, soft_target_loss_weight, ce_loss_weight, device):
|
2024-12-01 13:38:32 -07:00
|
|
|
# Dataset
|
|
|
|
ce_loss = nn.CrossEntropyLoss()
|
|
|
|
optimizer = optim.Adam(student.parameters(), lr=learning_rate)
|
|
|
|
|
|
|
|
teacher.eval() # Teacher set to evaluation mode
|
|
|
|
student.train() # Student to train mode
|
|
|
|
|
|
|
|
for epoch in range(epochs):
|
|
|
|
running_loss = 0.0
|
|
|
|
for inputs, labels in train_dl:
|
|
|
|
inputs, labels = inputs.to(device), labels.to(device)
|
|
|
|
|
|
|
|
optimizer.zero_grad()
|
|
|
|
|
|
|
|
# Forward pass with the teacher model - do not save gradients here as we do not change the teacher's weights
|
|
|
|
with torch.no_grad():
|
|
|
|
teacher_logits, _, _, _ = teacher(inputs)
|
|
|
|
|
|
|
|
# Forward pass with the student model
|
|
|
|
student_logits = student(inputs)
|
|
|
|
#Soften the student logits by applying softmax first and log() second
|
|
|
|
soft_targets = nn.functional.softmax(teacher_logits / T, dim=-1)
|
|
|
|
soft_prob = nn.functional.log_softmax(student_logits / T, dim=-1)
|
|
|
|
|
|
|
|
# Calculate the soft targets loss. Scaled by T**2 as suggested by the authors of the paper "Distilling the knowledge in a neural network"
|
|
|
|
soft_targets_loss = torch.sum(soft_targets * (soft_targets.log() - soft_prob)) / soft_prob.size()[0] * (T**2)
|
|
|
|
|
|
|
|
# Calculate the true label loss
|
|
|
|
label_loss = ce_loss(student_logits, labels)
|
|
|
|
|
|
|
|
# Weighted sum of the two losses
|
|
|
|
loss = soft_target_loss_weight * soft_targets_loss + ce_loss_weight * label_loss
|
|
|
|
|
|
|
|
loss.backward()
|
|
|
|
optimizer.step()
|
|
|
|
|
|
|
|
running_loss += loss.item()
|
|
|
|
|
|
|
|
print(f"Epoch {epoch+1}/{epochs}, Loss: {running_loss / len(train_dl)}")
|
|
|
|
|
2024-12-01 15:33:03 -07:00
|
|
|
@torch.no_grad()
|
2024-12-02 18:45:04 -07:00
|
|
|
def test(model, device, test_dl, is_teacher=False):
|
2024-12-01 13:38:32 -07:00
|
|
|
model.to(device)
|
|
|
|
model.eval()
|
|
|
|
|
|
|
|
correct = 0
|
|
|
|
total = 0
|
|
|
|
|
2024-12-01 15:33:03 -07:00
|
|
|
for inputs, labels in test_dl:
|
|
|
|
inputs, labels = inputs.to(device), labels.to(device)
|
2024-12-02 18:45:04 -07:00
|
|
|
if is_teacher:
|
2024-12-01 15:33:03 -07:00
|
|
|
outputs, _, _, _ = model(inputs)
|
|
|
|
else:
|
|
|
|
outputs = model(inputs)
|
|
|
|
_, predicted = torch.max(outputs.data, 1)
|
2024-12-01 13:38:32 -07:00
|
|
|
|
2024-12-01 15:33:03 -07:00
|
|
|
total += labels.size(0)
|
|
|
|
correct += (predicted == labels).sum().item()
|
2024-12-01 13:38:32 -07:00
|
|
|
|
|
|
|
accuracy = 100 * correct / total
|
|
|
|
return accuracy
|
|
|
|
|
|
|
|
def main():
|
2024-12-02 17:31:11 -07:00
|
|
|
parser = argparse.ArgumentParser(description='Student trainer')
|
|
|
|
parser.add_argument('--teacher', type=Path, help='path to saved teacher .pt', required=True)
|
|
|
|
parser.add_argument('--norm', type=float, help='dpsgd norm clip factor', required=True)
|
|
|
|
parser.add_argument('--cuda', type=int, help='gpu index', required=False)
|
|
|
|
parser.add_argument('--epsilon', type=float, help='dp epsilon', required=False, default=None)
|
|
|
|
parser.add_argument('--epochs', type=int, help='student epochs', required=True)
|
|
|
|
args = parser.parse_args()
|
|
|
|
|
2024-12-01 13:38:32 -07:00
|
|
|
json_options = json_file_to_pyobj("wresnet16-audit-cifar10.json")
|
|
|
|
training_configurations = json_options.training
|
|
|
|
|
|
|
|
wrn_depth = training_configurations.wrn_depth
|
|
|
|
wrn_width = training_configurations.wrn_width
|
|
|
|
dataset = training_configurations.dataset.lower()
|
|
|
|
|
2024-12-02 17:31:11 -07:00
|
|
|
if args.cuda is not None:
|
|
|
|
device = torch.device(f'cuda:{args.cuda}')
|
|
|
|
elif torch.cuda.is_available():
|
2024-12-01 13:38:32 -07:00
|
|
|
device = torch.device('cuda:0')
|
|
|
|
else:
|
|
|
|
device = torch.device('cpu')
|
|
|
|
epochs=10
|
|
|
|
|
|
|
|
print("Load the teacher model")
|
|
|
|
# instantiate teacher model
|
2024-12-02 17:31:11 -07:00
|
|
|
strides = [1, 1, 2, 2]
|
2024-12-01 13:38:32 -07:00
|
|
|
teacher = WideResNet(d=wrn_depth, k=wrn_width, n_classes=10, input_features=3, output_features=16, strides=strides)
|
2024-12-02 17:31:11 -07:00
|
|
|
teacher = ModuleValidator.fix(teacher)
|
2024-12-01 13:38:32 -07:00
|
|
|
criterion = nn.CrossEntropyLoss()
|
|
|
|
optimizer = optim.SGD(teacher.parameters(), lr=0.1, momentum=0.9, nesterov=True, weight_decay=5e-4)
|
|
|
|
scheduler = MultiStepLR(optimizer, milestones=[int(elem*epochs) for elem in [0.3, 0.6, 0.8]], gamma=0.2)
|
|
|
|
train_loader, test_loader = get_loaders(dataset, training_configurations.batch_size)
|
|
|
|
best_test_set_accuracy = 0
|
2024-12-02 17:31:11 -07:00
|
|
|
|
|
|
|
if args.epsilon is not None:
|
|
|
|
dp_epsilon = args.epsilon
|
|
|
|
dp_delta = 1e-5
|
|
|
|
norm = args.norm
|
|
|
|
privacy_engine = opacus.PrivacyEngine()
|
|
|
|
teacher, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
|
2024-12-01 13:38:32 -07:00
|
|
|
module=teacher,
|
|
|
|
optimizer=optimizer,
|
|
|
|
data_loader=train_loader,
|
|
|
|
epochs=epochs,
|
|
|
|
target_epsilon=dp_epsilon,
|
|
|
|
target_delta=dp_delta,
|
|
|
|
max_grad_norm=norm,
|
|
|
|
)
|
|
|
|
|
2024-12-02 17:31:11 -07:00
|
|
|
teacher.load_state_dict(torch.load(args.teacher, weights_only=True))
|
2024-12-01 13:38:32 -07:00
|
|
|
teacher.to(device)
|
|
|
|
teacher.eval()
|
|
|
|
#instantiate istudent
|
|
|
|
student = student_model.Model(num_classes=10).to(device)
|
|
|
|
|
|
|
|
print("Training student")
|
2024-12-02 17:31:11 -07:00
|
|
|
train_knowledge_distillation(
|
|
|
|
teacher=teacher,
|
|
|
|
student=student,
|
|
|
|
train_dl=train_loader,
|
|
|
|
epochs=args.epochs,
|
|
|
|
learning_rate=0.001,
|
|
|
|
T=2,
|
|
|
|
soft_target_loss_weight=0.25,
|
|
|
|
ce_loss_weight=0.75,
|
|
|
|
device=device
|
|
|
|
)
|
|
|
|
print(f"Saving student model for time {int(time.time())}")
|
|
|
|
Path('students').mkdir(exist_ok=True)
|
|
|
|
torch.save(student.state_dict(), f"students/studentmodel-{int(time.time())}.pt")
|
|
|
|
|
2024-12-01 15:33:03 -07:00
|
|
|
print("Testing student and teacher")
|
2024-12-02 18:45:04 -07:00
|
|
|
test_student = test(student, device, test_loader)
|
2024-12-01 15:33:03 -07:00
|
|
|
test_teacher = test(teacher, device, test_loader, True)
|
2024-12-01 13:38:32 -07:00
|
|
|
print(f"Teacher accuracy: {test_teacher:.2f}%")
|
2024-12-01 15:33:03 -07:00
|
|
|
print(f"Student accuracy: {test_student:.2f}%")
|
2024-12-01 13:38:32 -07:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2024-12-02 17:31:11 -07:00
|
|
|
|
2024-12-01 13:38:32 -07:00
|
|
|
main()
|