mia_on_model_distillation/one_run_audit/audit.py

622 lines
21 KiB
Python
Raw Normal View History

2024-12-02 23:48:50 -07:00
import argparse
import equations
import numpy as np
import time
2024-12-03 19:43:05 -07:00
import copy
2024-12-02 23:48:50 -07:00
import torch
import torch.nn as nn
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
2024-12-05 00:13:50 -07:00
from torch.utils.data import DataLoader, Subset, TensorDataset, ConcatDataset
2024-12-02 23:48:50 -07:00
import torch.nn.functional as F
from pathlib import Path
from torchvision import transforms
from torchvision.datasets import CIFAR10
import pytorch_lightning as pl
import opacus
2024-12-05 00:13:50 -07:00
import random
2024-12-02 23:48:50 -07:00
from opacus.validators import ModuleValidator
from opacus.utils.batch_memory_manager import BatchMemoryManager
from WideResNet import WideResNet
2024-12-03 23:26:50 -07:00
from equations import get_eps_audit
2024-12-05 01:04:35 -07:00
import student_model
2024-12-02 23:48:50 -07:00
import warnings
warnings.filterwarnings("ignore")
2024-12-03 13:01:38 -07:00
DEVICE = None
2024-12-02 23:48:50 -07:00
def get_dataloaders(m=1000, train_batch_size=128, test_batch_size=10):
seed = np.random.randint(0, 1e9)
seed ^= int(time.time())
pl.seed_everything(seed)
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
(4, 4, 4, 4), mode='reflect').squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
datadir = Path("./data")
train_ds = CIFAR10(root=datadir, train=True, download=True, transform=train_transform)
test_ds = CIFAR10(root=datadir, train=False, download=True, transform=test_transform)
2024-12-03 22:35:41 -07:00
# Original dataset
x = np.stack(train_ds[i][0].numpy() for i in range(len(train_ds))) # Applies transforms
2024-12-05 00:13:50 -07:00
y = np.array(train_ds.targets).astype(np.int64)
2024-12-03 23:02:55 -07:00
p = np.random.permutation(len(train_ds))
2024-12-03 22:35:41 -07:00
# Choose m points to randomly exclude at chance
2024-12-03 13:01:38 -07:00
S = np.full(len(train_ds), True)
S[:m] = np.random.choice([True, False], size=m) # Vector of determining if each point is in or out
2024-12-05 00:13:50 -07:00
S = S[p]
2024-12-03 13:01:38 -07:00
2024-12-03 22:35:41 -07:00
# Store the m points which could have been included/excluded
mask = np.full(len(train_ds), False)
mask[:m] = True
mask = mask[p]
2024-12-05 00:13:50 -07:00
# Mislabel inclusion/exclusion examples intentionally!
for i in range(len(y_m)):
possible_values = np.array([x for x in range(10) if x != original_array[i]])
y_m[i] = np.random.choice(possible_values)
2024-12-03 22:35:41 -07:00
x_m = x[mask] # These are the points being guessed at
2024-12-05 00:13:50 -07:00
S_m = S[mask] # Ground truth of inclusion/exclusion for x_m
2024-12-03 22:35:41 -07:00
y_m = np.array(train_ds.targets)[mask].astype(np.int64)
# Remove excluded points from dataset
2024-12-05 00:13:50 -07:00
x_in = x[S]
2024-12-03 16:53:33 -07:00
y_in = np.array(train_ds.targets).astype(np.int64)
2024-12-05 00:13:50 -07:00
y_in = y_in[S]
2024-12-03 13:01:38 -07:00
2024-12-03 16:53:33 -07:00
td = TensorDataset(torch.from_numpy(x_in), torch.from_numpy(y_in).long())
train_dl = DataLoader(td, batch_size=train_batch_size, shuffle=True, num_workers=4)
2024-12-02 23:48:50 -07:00
test_dl = DataLoader(test_ds, batch_size=test_batch_size, shuffle=True, num_workers=4)
2024-12-03 23:02:55 -07:00
return train_dl, test_dl, x_in, x_m, y_m, S_m
2024-12-02 23:48:50 -07:00
2024-12-05 00:13:50 -07:00
def get_dataloaders2(m=1000, train_batch_size=128, test_batch_size=10):
seed = np.random.randint(0, 1e9)
seed ^= int(time.time())
pl.seed_everything(seed)
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
(4, 4, 4, 4), mode='reflect').squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
datadir = Path("./data")
train_ds = CIFAR10(root=datadir, train=True, download=True, transform=train_transform)
trainp_ds = CIFAR10(root=datadir, train=False, download=True, transform=test_transform)
test_ds = CIFAR10(root=datadir, train=False, download=True, transform=test_transform)
mask = random.sample(range(len(trainp_ds)), m)
S = np.random.choice([True, False], size=m)
S_mask = list(map(lambda x: x[1], filter(lambda x: S[x[0]], enumerate(mask))))
x_adv = Subset(trainp_ds, mask)
x_in_adv = Subset(trainp_ds, S_mask)
train_ds = ConcatDataset([train_ds, x_in_adv])
check_train_dl = DataLoader(train_ds, batch_size=1, shuffle=False, num_workers=1)
train_dl = DataLoader(train_ds, batch_size=train_batch_size, shuffle=True, num_workers=4)
x_adv_dl = DataLoader(x_adv, batch_size=1, shuffle=False, num_workers=1)
test_dl = DataLoader(test_ds, batch_size=test_batch_size, shuffle=True, num_workers=4)
return train_dl, test_dl, x_adv_dl, S, check_train_dl
def get_dataloaders3(m=1000, train_batch_size=128, test_batch_size=10):
seed = np.random.randint(0, 1e9)
seed ^= int(time.time())
pl.seed_everything(seed)
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
(4, 4, 4, 4), mode='reflect').squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
datadir = Path("./data")
train_ds = CIFAR10(root=datadir, train=True, download=True, transform=train_transform)
test_ds = CIFAR10(root=datadir, train=False, download=True, transform=test_transform)
# Original dataset
x_train = np.stack(train_ds[i][0].numpy() for i in range(len(train_ds)))
y_train = np.array(train_ds.targets).astype(np.int64)
x = np.stack(test_ds[i][0].numpy() for i in range(len(test_ds))) # Applies transforms
y = np.array(test_ds.targets).astype(np.int64)
# Store the m points which could have been included/excluded
mask = np.full(len(test_ds), False)
mask[:m] = True
mask = mask[np.random.permutation(len(test_ds))]
adv_points = x[mask]
adv_labels = y[mask]
# Mislabel inclusion/exclusion examples intentionally!
for i in range(len(adv_labels)):
while True:
c = np.random.choice(range(10))
if adv_labels[i] != c:
adv_labels[i] = c
break
# Choose m points to randomly exclude at chance
S = np.random.choice([True, False], size=m) # Vector of determining if each point is in or out
assert len(adv_points) == m
inc_points = adv_points[S]
inc_labels = adv_labels[S]
td = TensorDataset(torch.from_numpy(inc_points).float(), torch.from_numpy(inc_labels).long())
td2 = TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train).long())
td = ConcatDataset([td, td2])
train_dl = DataLoader(td, batch_size=train_batch_size, shuffle=True, num_workers=4)
2024-12-05 01:04:35 -07:00
pure_train_dl = DataLoader(train_ds, batch_size=train_batch_size, shuffle=True, num_workers=4)
2024-12-05 00:13:50 -07:00
test_dl = DataLoader(test_ds, batch_size=test_batch_size, shuffle=True, num_workers=4)
2024-12-05 01:04:35 -07:00
return train_dl, test_dl, pure_train_dl, adv_points, adv_labels, S
2024-12-05 00:13:50 -07:00
2024-12-03 19:43:05 -07:00
def evaluate_on(model, dataloader):
correct = 0
total = 0
with torch.no_grad():
model.eval()
for data in dataloader:
images, labels = data
images = images.to(DEVICE)
labels = labels.to(DEVICE)
wrn_outputs = model(images)
2024-12-05 01:04:35 -07:00
if len(wrn_outputs) == 4:
outputs = wrn_outputs[0]
else:
outputs = wrn_outputs
2024-12-03 19:43:05 -07:00
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return correct, total
2024-12-05 01:04:35 -07:00
def train_knowledge_distillation(teacher, train_dl, epochs, device, learning_rate=0.001, T=2, soft_target_loss_weight=0.25, ce_loss_weight=0.75):
#instantiate istudent
student = student_model.Model(num_classes=10).to(device)
ce_loss = nn.CrossEntropyLoss()
optimizer = optim.Adam(student.parameters(), lr=learning_rate)
student_init = copy.deepcopy(student)
student.to(device)
teacher.to(device)
teacher.eval() # Teacher set to evaluation mode
student.train() # Student to train mode
for epoch in range(epochs):
running_loss = 0.0
for inputs, labels in train_dl:
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# Forward pass with the teacher model - do not save gradients here as we do not change the teacher's weights
with torch.no_grad():
teacher_logits, _, _, _ = teacher(inputs)
# Forward pass with the student model
student_logits = student(inputs)
#Soften the student logits by applying softmax first and log() second
soft_targets = nn.functional.softmax(teacher_logits / T, dim=-1)
soft_prob = nn.functional.log_softmax(student_logits / T, dim=-1)
# Calculate the soft targets loss. Scaled by T**2 as suggested by the authors of the paper "Distilling the knowledge in a neural network"
soft_targets_loss = torch.sum(soft_targets * (soft_targets.log() - soft_prob)) / soft_prob.size()[0] * (T**2)
# Calculate the true label loss
label_loss = ce_loss(student_logits, labels)
# Weighted sum of the two losses
loss = soft_target_loss_weight * soft_targets_loss + ce_loss_weight * label_loss
loss.backward()
optimizer.step()
running_loss += loss.item()
if epoch % 10 == 0:
print(f"Epoch {epoch+1}/{epochs}, Loss: {running_loss / len(train_dl)}")
return student_init, student
2024-12-03 19:43:05 -07:00
def train_no_cap(model, hp, train_dl, test_dl, optimizer, criterion, scheduler):
2024-12-02 23:48:50 -07:00
best_test_set_accuracy = 0
for epoch in range(hp['epochs']):
model.train()
2024-12-03 19:43:05 -07:00
for i, data in enumerate(train_dl, 0):
2024-12-02 23:48:50 -07:00
inputs, labels = data
inputs = inputs.to(DEVICE)
labels = labels.to(DEVICE)
optimizer.zero_grad()
wrn_outputs = model(inputs)
2024-12-05 01:04:35 -07:00
if len(wrn_outputs) == 4:
outputs = wrn_outputs[0]
else:
outputs = wrn_outputs
2024-12-02 23:48:50 -07:00
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
scheduler.step()
2024-12-03 16:53:33 -07:00
if epoch % 10 == 0 or epoch == hp['epochs'] - 1:
2024-12-03 19:43:05 -07:00
correct, total = evaluate_on(model, test_dl)
epoch_accuracy = round(100 * correct / total, 2)
print(f"Epoch {epoch+1}/{hp['epochs']}: {epoch_accuracy}%")
2024-12-02 23:48:50 -07:00
return best_test_set_accuracy
2024-12-05 00:13:50 -07:00
def load(hp, model_path, train_dl):
init_model = model_path / "init_model.pt"
trained_model = model_path / "trained_model.pt"
model = WideResNet(
d=hp["wrn_depth"],
k=hp["wrn_width"],
n_classes=10,
input_features=3,
output_features=16,
strides=[1, 1, 2, 2],
)
model = ModuleValidator.fix(model)
ModuleValidator.validate(model, strict=True)
model_init = copy.deepcopy(model)
privacy_engine = opacus.PrivacyEngine()
optimizer = optim.SGD(
model.parameters(),
lr=0.1,
momentum=0.9,
nesterov=True,
weight_decay=5e-4
)
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=model,
optimizer=optimizer,
data_loader=train_dl,
epochs=hp['epochs'],
target_epsilon=hp['epsilon'],
target_delta=hp['delta'],
max_grad_norm=hp['norm'],
)
model_init.load_state_dict(torch.load(init_model, weights_only=True))
model.load_state_dict(torch.load(trained_model, weights_only=True))
model_init = model_init.to(DEVICE)
model = model.to(DEVICE)
adv_points = np.load("data/adv_points.npy")
adv_labels = np.load("data/adv_labels.npy")
S = np.load("data/S.npy")
return model_init, model, adv_points, adv_labels, S
2024-12-05 01:04:35 -07:00
def train_small(hp, train_dl, test_dl):
model = student_model.Model(num_classes=10).to(DEVICE)
model = model.to(DEVICE)
model = ModuleValidator.fix(model)
ModuleValidator.validate(model, strict=True)
model_init = copy.deepcopy(model)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
scheduler = MultiStepLR(
optimizer,
milestones=[int(i * hp['epochs']) for i in [0.3, 0.6, 0.8]],
gamma=0.2
)
print(f"Training raw (no distill) STUDENT with {hp['epochs']} epochs")
if hp['epsilon'] is not None:
privacy_engine = opacus.PrivacyEngine()
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=model,
optimizer=optimizer,
data_loader=train_dl,
epochs=hp['epochs'],
target_epsilon=hp['epsilon'],
target_delta=hp['delta'],
max_grad_norm=hp['norm'],
)
print(f"DP epsilon = {hp['epsilon']}, delta = {hp['delta']}")
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {hp['norm']}")
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=2000, # 1000 ~= 9.4GB vram
optimizer=optimizer
) as memory_safe_data_loader:
best_test_set_accuracy = train_no_cap(
model,
hp,
memory_safe_data_loader,
test_dl,
optimizer,
criterion,
scheduler,
)
else:
print("Training without differential privacy")
best_test_set_accuracy = train_no_cap(
model,
hp,
train_dl,
test_dl,
optimizer,
criterion,
scheduler,
)
return model_init, model
2024-12-03 19:43:05 -07:00
def train(hp, train_dl, test_dl):
2024-12-02 23:48:50 -07:00
model = WideResNet(
d=hp["wrn_depth"],
k=hp["wrn_width"],
n_classes=10,
input_features=3,
output_features=16,
strides=[1, 1, 2, 2],
)
2024-12-03 13:01:38 -07:00
model = model.to(DEVICE)
2024-12-02 23:48:50 -07:00
model = ModuleValidator.fix(model)
ModuleValidator.validate(model, strict=True)
2024-12-03 19:43:05 -07:00
model_init = copy.deepcopy(model)
2024-12-02 23:48:50 -07:00
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(
model.parameters(),
lr=0.1,
momentum=0.9,
nesterov=True,
weight_decay=5e-4
)
scheduler = MultiStepLR(
optimizer,
milestones=[int(i * hp['epochs']) for i in [0.3, 0.6, 0.8]],
gamma=0.2
)
print(f"Training with {hp['epochs']} epochs")
if hp['epsilon'] is not None:
privacy_engine = opacus.PrivacyEngine()
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=model,
optimizer=optimizer,
data_loader=train_dl,
epochs=hp['epochs'],
target_epsilon=hp['epsilon'],
target_delta=hp['delta'],
max_grad_norm=hp['norm'],
)
print(f"DP epsilon = {hp['epsilon']}, delta = {hp['delta']}")
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {hp['norm']}")
with BatchMemoryManager(
data_loader=train_loader,
2024-12-03 13:01:38 -07:00
max_physical_batch_size=2000, # 1000 ~= 9.4GB vram
2024-12-02 23:48:50 -07:00
optimizer=optimizer
) as memory_safe_data_loader:
best_test_set_accuracy = train_no_cap(
model,
hp,
2024-12-03 13:01:38 -07:00
memory_safe_data_loader,
2024-12-02 23:48:50 -07:00
test_dl,
optimizer,
criterion,
scheduler,
)
else:
print("Training without differential privacy")
best_test_set_accuracy = train_no_cap(
model,
hp,
train_dl,
test_dl,
optimizer,
criterion,
scheduler,
)
2024-12-03 19:43:05 -07:00
return model_init, model
2024-12-02 23:48:50 -07:00
def main():
global DEVICE
parser = argparse.ArgumentParser(description='WideResNet O1 audit')
parser.add_argument('--norm', type=float, help='dpsgd norm clip factor', required=True)
parser.add_argument('--cuda', type=int, help='gpu index', required=False)
parser.add_argument('--epsilon', type=float, help='dp epsilon', required=False, default=None)
2024-12-03 16:53:33 -07:00
parser.add_argument('--m', type=int, help='number of target points', required=True)
2024-12-05 00:13:50 -07:00
parser.add_argument('--k', type=int, help='number of symmetric guesses', required=True)
parser.add_argument('--epochs', type=int, help='number of epochs', required=True)
parser.add_argument('--load', type=Path, help='number of epochs', required=False)
2024-12-05 01:04:35 -07:00
parser.add_argument('--studentraw', action='store_true', help='train a raw student', required=False)
parser.add_argument('--distill', action='store_true', help='train a raw student', required=False)
2024-12-02 23:48:50 -07:00
args = parser.parse_args()
if torch.cuda.is_available() and args.cuda:
DEVICE = torch.device(f'cuda:{args.cuda}')
elif torch.cuda.is_available():
DEVICE = torch.device('cuda:0')
else:
DEVICE = torch.device('cpu')
2024-12-03 19:43:05 -07:00
hp = {
2024-12-03 16:53:33 -07:00
"target_points": args.m,
2024-12-02 23:48:50 -07:00
"wrn_depth": 16,
"wrn_width": 1,
"epsilon": args.epsilon,
"delta": 1e-5,
"norm": args.norm,
"batch_size": 4096,
2024-12-05 00:13:50 -07:00
"epochs": args.epochs,
"k+": args.k,
"k-": args.k,
2024-12-03 23:26:50 -07:00
"p_value": 0.05,
2024-12-02 23:48:50 -07:00
}
2024-12-03 19:43:05 -07:00
hp['logfile'] = Path('WideResNet_{}_{}_{}_{}s_x{}_{}e_{}d_{}C.txt'.format(
2024-12-02 23:48:50 -07:00
int(time.time()),
2024-12-03 19:43:05 -07:00
hp['wrn_depth'],
hp['wrn_width'],
hp['batch_size'],
hp['epochs'],
hp['epsilon'],
hp['delta'],
hp['norm'],
2024-12-02 23:48:50 -07:00
))
2024-12-05 00:13:50 -07:00
if args.load:
2024-12-05 01:04:35 -07:00
train_dl, test_dl, ____, _, __, ___ = get_dataloaders3(hp['target_points'], hp['batch_size'])
2024-12-05 00:13:50 -07:00
model_init, model_trained, adv_points, adv_labels, S = load(hp, args.load, train_dl)
test_dl = None
else:
2024-12-05 01:04:35 -07:00
train_dl, test_dl, pure_train_dl, adv_points, adv_labels, S = get_dataloaders3(hp['target_points'], hp['batch_size'])
if args.studentraw:
print("=========================")
print("Training a raw student model")
print("=========================")
model_init, model_trained = train_small(hp, train_dl, test_dl)
elif args.distill:
print("=========================")
print("Training a distilled student model")
print("=========================")
teacher_init, teacher_trained = train(hp, train_dl, test_dl)
model_init, model_trained = train_knowledge_distillation(
teacher=teacher_trained,
train_dl=train_dl,
epochs=100,
device=DEVICE,
learning_rate=0.001,
T=2,
soft_target_loss_weight=0.25,
ce_loss_weight=0.75,
)
else:
print("=========================")
print("Training teacher model")
print("=========================")
model_init, model_trained = train(hp, train_dl, test_dl)
2024-12-03 19:43:05 -07:00
2024-12-05 00:13:50 -07:00
np.save("data/adv_points", adv_points)
np.save("data/adv_labels", adv_labels)
np.save("data/S", S)
torch.save(model_init.state_dict(), "data/init_model.pt")
torch.save(model_trained.state_dict(), "data/trained_model.pt")
2024-12-03 22:35:41 -07:00
scores = list()
criterion = nn.CrossEntropyLoss()
with torch.no_grad():
model_init.eval()
2024-12-05 00:13:50 -07:00
x_m = torch.from_numpy(adv_points).to(DEVICE)
y_m = torch.from_numpy(adv_labels).long().to(DEVICE)
2024-12-03 22:35:41 -07:00
for i in range(len(x_m)):
2024-12-05 00:13:50 -07:00
x_point = x_m[i].unsqueeze(0).to(DEVICE)
y_point = y_m[i].unsqueeze(0).to(DEVICE)
is_in = S[i]
2024-12-03 22:35:41 -07:00
2024-12-05 01:04:35 -07:00
wrn_outputs = model_init(x_point)
outputs = wrn_outputs[0] if len(wrn_outputs) == 4 else wrn_outputs
init_loss = criterion(outputs, y_point)
wrn_outputs = model_trained(x_point)
outputs = wrn_outputs[0] if len(wrn_outputs) == 4 else wrn_outputs
trained_loss = criterion(outputs, y_point)
2024-12-03 22:35:41 -07:00
2024-12-03 23:02:55 -07:00
scores.append(((init_loss - trained_loss).item(), is_in))
scores = sorted(scores, key=lambda x: x[0])
scores = np.array([x[1] for x in scores])
2024-12-03 22:35:41 -07:00
print(scores[:10])
2024-12-05 00:13:50 -07:00
audits = (0, 0, 0, 0)
for k in [10, 20, 50, 100, 200, 300, 500, 800, 1000, 1200, 1400, 1600, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500]:
correct = np.sum(~scores[:k]) + np.sum(scores[-k:])
total = len(scores)
eps_lb = get_eps_audit(
hp['target_points'],
2*k,
correct,
hp['delta'],
hp['p_value']
)
2024-12-03 23:26:50 -07:00
2024-12-05 00:13:50 -07:00
if eps_lb > audits[0]:
audits = (eps_lb, k, correct, total)
2024-12-03 23:26:50 -07:00
2024-12-05 00:13:50 -07:00
print(f"Audit total: {audits[2]}/{2*audits[1]}/{audits[3]}")
print(f"p[ε < {audits[0]}] < {hp['p_value']} for true epsilon {hp['epsilon']}")
2024-12-03 23:02:55 -07:00
2024-12-05 00:13:50 -07:00
if test_dl is not None:
correct, total = evaluate_on(model_init, test_dl)
print(f"Init model accuracy: {correct}/{total} = {round(correct/total*100, 2)}")
correct, total = evaluate_on(model_trained, test_dl)
print(f"Done model accuracy: {correct}/{total} = {round(correct/total*100, 2)}")
2024-12-03 19:43:05 -07:00
2024-12-02 23:48:50 -07:00
if __name__ == '__main__':
main()