Compare commits

...

26 commits

Author SHA1 Message Date
1c16496e61
O1: allow audit to pull from training set 2024-12-08 23:35:53 -07:00
99ba0b3f6d
O1: multithread k-search 2024-12-07 17:46:07 -07:00
70d4e4dfdc
O1: slight cleanup 2024-12-07 17:37:19 -07:00
f407827ac1
O1: add wrn2 architecture 2024-12-07 14:00:39 -07:00
5da8c44743
O1: add simple convnet 2024-12-07 14:00:06 -07:00
7b77748dcd
O1: wrn2 fixes 2024-12-07 13:59:39 -07:00
2586c351d9
O1: insert attack points 2024-12-06 19:12:00 -07:00
e239602148
O1: update theoretical plots 2024-12-06 18:57:02 -07:00
ce3a848eb7
O1: add fast training code 2024-12-06 18:56:47 -07:00
86d16e53d7
O1: fix epochs 2024-12-06 17:12:38 -07:00
ebfbd88332
O1: with student model 2024-12-05 01:04:35 -07:00
a697d4687c
O1: new data splitting 2024-12-05 00:13:50 -07:00
369249ce69
O1: add epsilon audit 2024-12-03 23:26:50 -07:00
5d6f7e2916
O1: get audit vectors 2024-12-03 23:02:55 -07:00
4692502763
O1: return target point labels 2024-12-03 22:35:41 -07:00
d606245ad1
O1: save starting model 2024-12-03 19:43:05 -07:00
36deb4613b
O1: fix inout dataloader 2024-12-03 16:53:33 -07:00
e9af7cacf1
O1: fix dataloader batch size 2024-12-03 13:01:38 -07:00
0d67830f7e
O1: add training code 2024-12-02 23:48:50 -07:00
2eef211415
Wres: add conda env file 2024-12-02 19:01:26 -07:00
ad666283c5
Wres: clean up student a bit 2024-12-02 18:45:13 -07:00
ARVP
524576db31 fixed to use student model 2024-12-02 17:58:01 -07:00
e3ccfbcf76
Wres: args for students 2024-12-02 17:31:11 -07:00
Ruby
5be312bf18 changed to same data loaders as train.py and added saving student model 2024-12-01 15:33:03 -07:00
7208c16efc
Wres: epsilon in args 2024-12-01 14:49:13 -07:00
aa190cd4f1
Wres: toggle non-dp training 2024-12-01 13:58:50 -07:00
15 changed files with 2320 additions and 146 deletions

310
env.yaml Normal file
View file

@ -0,0 +1,310 @@
name: 626_pytorch_lira
channels:
- pytorch
- nvidia
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_gnu
- alsa-lib=1.2.3.2=h166bdaf_0
- anyio=4.5.0=pyhd8ed1ab_0
- appdirs=1.4.4=pyh9f0ad1d_0
- argon2-cffi=23.1.0=pyhd8ed1ab_0
- argon2-cffi-bindings=21.2.0=py38h01eb140_4
- arrow=1.3.0=pyhd8ed1ab_0
- asttokens=2.4.1=pyhd8ed1ab_0
- async-lru=2.0.4=pyhd8ed1ab_0
- attrs=24.2.0=pyh71513ae_0
- babel=2.16.0=pyhd8ed1ab_0
- backcall=0.2.0=pyh9f0ad1d_0
- beautifulsoup4=4.12.3=pyha770c72_0
- blas=1.0=mkl
- bleach=6.1.0=pyhd8ed1ab_0
- brotli=1.1.0=hd590300_1
- brotli-bin=1.1.0=hd590300_1
- brotli-python=1.1.0=py38h17151c0_1
- bzip2=1.0.8=h4bc722e_7
- ca-certificates=2024.8.30=hbcca054_0
- cached-property=1.5.2=hd8ed1ab_1
- cached_property=1.5.2=pyha770c72_1
- certifi=2024.8.30=pyhd8ed1ab_0
- cffi=1.14.6=py38ha65f79e_0
- charset-normalizer=3.4.0=pyhd8ed1ab_0
- click=8.1.7=unix_pyh707e725_0
- colorama=0.4.6=pyhd8ed1ab_0
- comm=0.2.2=pyhd8ed1ab_0
- contourpy=1.1.1=py38h7f3f72f_1
- cuda-cudart=12.1.105=0
- cuda-cupti=12.1.105=0
- cuda-libraries=12.1.0=0
- cuda-nvrtc=12.1.105=0
- cuda-nvtx=12.1.105=0
- cuda-opencl=12.6.77=0
- cuda-runtime=12.1.0=0
- cuda-version=12.6=3
- cycler=0.12.1=pyhd8ed1ab_0
- dataclasses=0.8=pyhc8e2a94_3
- dbus=1.13.6=h48d8840_2
- debugpy=1.8.5=py38h6d02427_0
- decorator=5.1.1=pyhd8ed1ab_0
- defusedxml=0.7.1=pyhd8ed1ab_0
- docker-pycreds=0.4.0=py_0
- entrypoints=0.4=pyhd8ed1ab_0
- exceptiongroup=1.2.2=pyhd8ed1ab_0
- executing=2.1.0=pyhd8ed1ab_0
- expat=2.6.4=h5888daf_0
- ffmpeg=4.3=hf484d3e_0
- filelock=3.16.1=pyhd8ed1ab_0
- fontconfig=2.14.2=h14ed4e7_0
- fonttools=4.53.1=py38h2019614_0
- fqdn=1.5.1=pyhd8ed1ab_0
- freetype=2.12.1=h267a509_2
- fsspec=2024.10.0=pyhff2d567_0
- functorch=2.0.0=pyhd8ed1ab_0
- gettext=0.22.5=he02047a_3
- gettext-tools=0.22.5=he02047a_3
- giflib=5.2.2=hd590300_0
- gitdb=4.0.11=pyhd8ed1ab_0
- gitpython=3.1.43=pyhd8ed1ab_0
- glib=2.68.4=h9c3ff4c_0
- glib-tools=2.68.4=h9c3ff4c_0
- gmp=6.3.0=hac33072_2
- gmpy2=2.1.5=py38h6a1700d_1
- gnutls=3.6.13=h85f3911_1
- gst-plugins-base=1.18.5=hf529b03_0
- gstreamer=1.18.5=h76c114f_0
- h11=0.14.0=pyhd8ed1ab_0
- h2=4.1.0=pyhd8ed1ab_0
- hpack=4.0.0=pyh9f0ad1d_0
- httpcore=1.0.7=pyh29332c3_1
- httpx=0.27.2=pyhd8ed1ab_0
- hyperframe=6.0.1=pyhd8ed1ab_0
- icu=68.2=h9c3ff4c_0
- idna=3.10=pyhd8ed1ab_0
- importlib-metadata=8.5.0=pyha770c72_0
- importlib-resources=6.4.5=pyhd8ed1ab_0
- importlib_resources=6.4.5=pyhd8ed1ab_0
- iniconfig=2.0.0=pyhd8ed1ab_0
- intel-openmp=2022.1.0=h9e868ea_3769
- ipykernel=6.29.5=pyh3099207_0
- ipython=8.12.2=pyh41d4057_0
- ipywidgets=8.1.5=pyhd8ed1ab_0
- isoduration=20.11.0=pyhd8ed1ab_0
- jedi=0.19.1=pyhd8ed1ab_0
- jinja2=3.1.4=pyhd8ed1ab_0
- joblib=1.4.2=pyhd8ed1ab_0
- jpeg=9e=h166bdaf_2
- json5=0.9.25=pyhd8ed1ab_0
- jsonpointer=3.0.0=py38h578d9bd_0
- jsonschema=4.23.0=pyhd8ed1ab_0
- jsonschema-specifications=2024.10.1=pyhd8ed1ab_0
- jsonschema-with-format-nongpl=4.23.0=hd8ed1ab_0
- jupyter=1.1.1=pyhd8ed1ab_0
- jupyter-lsp=2.2.5=pyhd8ed1ab_0
- jupyter_client=8.6.3=pyhd8ed1ab_0
- jupyter_console=6.6.3=pyhd8ed1ab_0
- jupyter_core=5.7.2=pyh31011fe_1
- jupyter_events=0.10.0=pyhd8ed1ab_0
- jupyter_server=2.14.2=pyhd8ed1ab_0
- jupyter_server_terminals=0.5.3=pyhd8ed1ab_0
- jupyterlab=4.3.0=pyhd8ed1ab_0
- jupyterlab_pygments=0.3.0=pyhd8ed1ab_1
- jupyterlab_server=2.27.3=pyhd8ed1ab_0
- jupyterlab_widgets=3.0.13=pyhd8ed1ab_0
- keyutils=1.6.1=h166bdaf_0
- kiwisolver=1.4.5=py38h7f3f72f_1
- krb5=1.19.3=h3790be6_0
- lame=3.100=h166bdaf_1003
- lcms2=2.15=hfd0df8a_0
- ld_impl_linux-64=2.43=h712a8e2_2
- lerc=4.0.0=h27087fc_0
- libabseil=20240116.2=cxx17_he02047a_1
- libasprintf=0.22.5=he8f35ee_3
- libasprintf-devel=0.22.5=he8f35ee_3
- libblas=3.9.0=16_linux64_mkl
- libbrotlicommon=1.1.0=hd590300_1
- libbrotlidec=1.1.0=hd590300_1
- libbrotlienc=1.1.0=hd590300_1
- libcblas=3.9.0=16_linux64_mkl
- libclang=11.1.0=default_ha53f305_1
- libcublas=12.1.0.26=0
- libcufft=11.0.2.4=0
- libcufile=1.11.1.6=0
- libcurand=10.3.7.77=0
- libcusolver=11.4.4.55=0
- libcusparse=12.0.2.55=0
- libdeflate=1.17=h0b41bf4_0
- libedit=3.1.20191231=he28a2e2_2
- libevent=2.1.10=h9b69904_4
- libexpat=2.6.4=h5888daf_0
- libffi=3.3=h58526e2_2
- libgcc=14.2.0=h77fa898_1
- libgcc-ng=14.2.0=h69a702a_1
- libgettextpo=0.22.5=he02047a_3
- libgettextpo-devel=0.22.5=he02047a_3
- libgfortran=14.2.0=h69a702a_1
- libgfortran-ng=14.2.0=h69a702a_1
- libgfortran5=14.2.0=hd5240d6_1
- libglib=2.68.4=h3e27bee_0
- libgomp=14.2.0=h77fa898_1
- libiconv=1.17=hd590300_2
- libjpeg-turbo=2.0.0=h9bf148f_0
- liblapack=3.9.0=16_linux64_mkl
- libllvm11=11.1.0=he0ac6c6_5
- libnpp=12.0.2.50=0
- libnvjitlink=12.1.105=0
- libnvjpeg=12.1.1.14=0
- libogg=1.3.5=h4ab18f5_0
- libopus=1.3.1=h7f98852_1
- libpng=1.6.43=h2797004_0
- libpq=13.8=hd77ab85_0
- libprotobuf=4.25.3=h08a7969_0
- libsodium=1.0.18=h36c2ea0_1
- libsqlite=3.46.0=hde9e2c9_0
- libstdcxx=14.2.0=hc0a3c3a_1
- libstdcxx-ng=14.2.0=h4852527_1
- libtiff=4.5.0=h6adf6a1_2
- libuuid=2.38.1=h0b41bf4_0
- libvorbis=1.3.7=h9c3ff4c_0
- libwebp=1.3.2=h11a3e52_0
- libwebp-base=1.3.2=hd590300_1
- libxcb=1.13=h7f98852_1004
- libxkbcommon=1.0.3=he3ba5ed_0
- libxml2=2.9.12=h72842e0_0
- libzlib=1.2.13=h4ab18f5_6
- lightning-bolts=0.6.0.post1=pyhd8ed1ab_0
- lightning-utilities=0.11.8=pyhd8ed1ab_0
- llvm-openmp=15.0.7=h0cdce71_0
- markupsafe=2.1.5=py38h01eb140_0
- matplotlib=3.7.3=py38h578d9bd_0
- matplotlib-base=3.7.3=py38h58ed7fa_0
- matplotlib-inline=0.1.7=pyhd8ed1ab_0
- mistune=3.0.2=pyhd8ed1ab_0
- mkl=2022.1.0=hc2b9512_224
- mpc=1.3.1=h24ddda3_1
- mpfr=4.2.1=h90cbb55_3
- mpmath=1.3.0=pyhd8ed1ab_0
- munkres=1.1.4=pyh9f0ad1d_0
- mysql-common=8.0.32=h14678bc_0
- mysql-libs=8.0.32=h54cf53e_0
- nbclient=0.10.1=pyhd8ed1ab_0
- nbconvert-core=7.16.4=pyhd8ed1ab_1
- nbformat=5.10.4=pyhd8ed1ab_0
- ncurses=6.5=he02047a_1
- nest-asyncio=1.6.0=pyhd8ed1ab_0
- nettle=3.6=he412f7d_0
- networkx=3.1=pyhd8ed1ab_0
- notebook=7.0.6=py38h06a4308_0
- notebook-shim=0.2.4=pyhd8ed1ab_0
- nspr=4.36=h5888daf_0
- nss=3.100=hca3bf56_0
- numpy=1.24.4=py38h59b608b_0
- opacus=1.5.2=pyhd8ed1ab_0
- openh264=2.1.1=h780b84a_0
- openjpeg=2.5.0=hfec8fc6_2
- openssl=1.1.1w=hd590300_0
- opt_einsum=3.4.0=pyhd8ed1ab_0
- overrides=7.7.0=pyhd8ed1ab_0
- packaging=24.2=pyhff2d567_1
- pandas=2.0.3=py38h01efb38_1
- pandocfilters=1.5.0=pyhd8ed1ab_0
- parso=0.8.4=pyhd8ed1ab_0
- pcre=8.45=h9c3ff4c_0
- pexpect=4.9.0=pyhd8ed1ab_0
- pickleshare=0.7.5=py_1003
- pillow=9.4.0=py38hde6dc18_1
- pip=24.3.1=pyh8b19718_0
- pkgutil-resolve-name=1.3.10=pyhd8ed1ab_1
- platformdirs=4.3.6=pyhd8ed1ab_0
- pluggy=1.5.0=pyhd8ed1ab_0
- pooch=1.8.2=pyhd8ed1ab_0
- prometheus_client=0.21.0=pyhd8ed1ab_0
- prompt-toolkit=3.0.48=pyha770c72_0
- prompt_toolkit=3.0.48=hd8ed1ab_0
- protobuf=4.25.3=py38hb5c7596_0
- psutil=6.0.0=py38hfb59056_0
- pthread-stubs=0.4=hb9d3cd8_1002
- ptyprocess=0.7.0=pyhd3deb0d_0
- pure_eval=0.2.3=pyhd8ed1ab_0
- pycparser=2.22=pyhd8ed1ab_0
- pygments=2.18.0=pyhd8ed1ab_0
- pyparsing=3.1.4=pyhd8ed1ab_0
- pyqt=5.12.3=py38h578d9bd_8
- pyqt-impl=5.12.3=py38h0ffb2e6_8
- pyqt5-sip=4.19.18=py38h709712a_8
- pyqtchart=5.12=py38h7400c14_8
- pyqtwebengine=5.12.1=py38h7400c14_8
- pysocks=1.7.1=pyha2e5f31_6
- pytest=8.3.3=pyhd8ed1ab_0
- python=3.8.6=hffdb5ce_5_cpython
- python-dateutil=2.9.0=pyhd8ed1ab_0
- python-fastjsonschema=2.20.0=pyhd8ed1ab_0
- python-json-logger=2.0.7=pyhd8ed1ab_0
- python-tzdata=2024.2=pyhd8ed1ab_0
- python_abi=3.8=5_cp38
- pytorch=2.4.1=py3.8_cuda12.1_cudnn9.1.0_0
- pytorch-cuda=12.1=ha16c6d3_6
- pytorch-lightning=2.4.0=pyhd8ed1ab_0
- pytorch-mutex=1.0=cuda
- pytz=2024.2=pyhd8ed1ab_0
- pyyaml=6.0.2=py38h2019614_0
- pyzmq=26.2.0=py38h6c80b9a_0
- qt=5.12.9=hda022c4_4
- readline=8.2=h8228510_1
- referencing=0.35.1=pyhd8ed1ab_0
- requests=2.32.3=pyhd8ed1ab_0
- rfc3339-validator=0.1.4=pyhd8ed1ab_0
- rfc3986-validator=0.1.1=pyh9f0ad1d_0
- rpds-py=0.20.0=py38h4005ec7_0
- scikit-learn=1.3.2=py38ha25d942_2
- scipy=1.10.1=py38h59b608b_3
- send2trash=1.8.3=pyh0d859eb_0
- sentry-sdk=2.19.0=pyhd8ed1ab_0
- setproctitle=1.3.3=py38h01eb140_0
- setuptools=75.3.0=pyhd8ed1ab_0
- six=1.16.0=pyh6c4a22f_0
- smmap=5.0.0=pyhd8ed1ab_0
- sniffio=1.3.1=pyhd8ed1ab_0
- soupsieve=2.5=pyhd8ed1ab_1
- sqlite=3.46.0=h6d4b2fc_0
- stack_data=0.6.2=pyhd8ed1ab_0
- sympy=1.13.3=pypyh2585a3b_103
- terminado=0.18.1=pyh0d859eb_0
- threadpoolctl=3.5.0=pyhc1e730c_0
- tinycss2=1.4.0=pyhd8ed1ab_0
- tk=8.6.13=noxft_h4845f30_101
- tomli=2.0.2=pyhd8ed1ab_0
- torchaudio=2.4.1=py38_cu121
- torchmetrics=1.5.2=pyhe5570ce_0
- torchtriton=3.0.0=py38
- torchvision=0.20.0=py38_cu121
- tornado=6.4.1=py38hfb59056_0
- tqdm=4.67.1=pyhd8ed1ab_0
- traitlets=5.14.3=pyhd8ed1ab_0
- types-python-dateutil=2.9.0.20241003=pyhff2d567_0
- typing-extensions=4.12.2=hd8ed1ab_0
- typing_extensions=4.12.2=pyha770c72_0
- typing_utils=0.1.0=pyhd8ed1ab_0
- unicodedata2=15.1.0=py38h01eb140_0
- uri-template=1.3.0=pyhd8ed1ab_0
- urllib3=2.2.3=pyhd8ed1ab_0
- wandb=0.16.6=pyhd8ed1ab_1
- wcwidth=0.2.13=pyhd8ed1ab_0
- webcolors=24.8.0=pyhd8ed1ab_0
- webencodings=0.5.1=pyhd8ed1ab_2
- websocket-client=1.8.0=pyhd8ed1ab_0
- wheel=0.45.1=pyhd8ed1ab_0
- widgetsnbextension=4.0.13=pyhd8ed1ab_0
- xorg-libxau=1.0.11=hb9d3cd8_1
- xorg-libxdmcp=1.1.5=hb9d3cd8_0
- xz=5.2.6=h166bdaf_0
- yaml=0.2.5=h7f98852_2
- zeromq=4.3.5=h59595ed_1
- zipp=3.21.0=pyhd8ed1ab_0
- zlib=1.2.13=h4ab18f5_6
- zstandard=0.23.0=py38h62bed22_0
- zstd=1.5.6=ha6fb4c9_0
- pip:
- pyvacy==0.0.32
- torchsummary==1.5.1

View file

@ -15,7 +15,8 @@ from torchvision import models, transforms
from torchvision.datasets import CIFAR10
from tqdm import tqdm
from wide_resnet import WideResNet
import student_model
from utils import json_file_to_pyobj, get_loaders
parser = argparse.ArgumentParser()
parser.add_argument("--n_queries", default=2, type=int)
@ -27,32 +28,14 @@ args = parser.parse_args()
@torch.no_grad()
def run():
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("mps")
dataset = "cifar10"
# Dataset
transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2470, 0.2435, 0.2616]),
]
)
datadir = Path().home() / "opt/data/cifar"
train_ds = CIFAR10(root=datadir, train=True, download=True, transform=transform)
train_dl = DataLoader(train_ds, batch_size=128, shuffle=False, num_workers=4)
train_dl, test_dl = get_loaders(dataset, 4096)
# Infer the logits with multiple queries
for path in os.listdir(args.savedir):
if args.model == "wresnet28-2":
m = WideResNet(28, 2, 0.0, 10)
elif args.model == "wresnet28-10":
m = WideResNet(28, 10, 0.3, 10)
elif args.model == "resnet18":
m = models.resnet18(weights=None, num_classes=10)
m.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
m.maxpool = nn.Identity()
else:
raise NotImplementedError
m = student_model.Model(num_classes=10)
m.load_state_dict(torch.load(os.path.join(args.savedir, path, "model.pt")))
m.to(DEVICE)
m.eval()

16
lira-pytorch/run_distilled.sh Executable file
View file

@ -0,0 +1,16 @@
python3 student_shadow_train.py --epochs 100 --shadow_id 0 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 1 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 2 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 3 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 4 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 5 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 6 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 7 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 8 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 9 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 10 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 11 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 12 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 13 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 14 --debug
python3 student_shadow_train.py --epochs 100 --shadow_id 15 --debug

View file

@ -0,0 +1,30 @@
import torch
import torch.nn as nn
# Create a similar student class where we return a tuple. We do not apply pooling after flattening.
class ModifiedLightNNCosine(nn.Module):
def __init__(self, num_classes=10):
super(ModifiedLightNNCosine, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(16, 16, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.classifier = nn.Sequential(
nn.Linear(1024, 256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(256, num_classes)
)
def forward(self, x):
x = self.features(x)
flattened_conv_output = torch.flatten(x, 1)
x = self.classifier(flattened_conv_output)
return x
Model = ModifiedLightNNCosine

View file

@ -0,0 +1,246 @@
# PyTorch implementation of
# https://github.com/tensorflow/privacy/blob/master/research/mi_lira_2021/train.py
#
# author: Chenxiang Zhang (orientino)
#random stuff
import os
import argparse
import time
from pathlib import Path
#torch stuff
import numpy as np
import pytorch_lightning as pl
import torch
import wandb
from torch import nn
from torch.utils.data import DataLoader
from torchvision import models, transforms
from torchvision.datasets import CIFAR10
from tqdm import tqdm
from torch.optim.lr_scheduler import MultiStepLR
import torch.optim as optim
import torch.nn.functional as F
import torchvision
from torchvision import transforms
#privacy libraries
import opacus
from opacus.validators import ModuleValidator
#cutom modules
from utils import json_file_to_pyobj, get_loaders
from WideResNet import WideResNet
import student_model
#suppress warning
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
parser.add_argument("--lr", default=0.1, type=float)
parser.add_argument("--epochs", default=1, type=int)
parser.add_argument("--n_shadows", default=16, type=int)
parser.add_argument("--shadow_id", default=1, type=int)
parser.add_argument("--model", default="resnet18", type=str)
parser.add_argument("--pkeep", default=0.5, type=float)
parser.add_argument("--savedir", default="exp/cifar10", type=str)
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("mps")
def get_trainset(train_batch_size=128, test_batch_size=10):
print(f"Train batch size: {train_batch_size}")
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
(4, 4, 4, 4), mode='reflect').squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
test_transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=False, transform=train_transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=test_transform)
return trainset, testset
@torch.no_grad()
def test(model, test_dl, teacher=False):
device = DEVICE
model.to(device)
model.eval()
correct = 0
total = 0
for inputs, labels in test_dl:
inputs, labels = inputs.to(device), labels.to(device)
if teacher:
outputs, _, _, _ = model(inputs)
else:
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
print(f"Test Accuracy: {accuracy:.2f}%")
return accuracy
def run(teacher, student):
device = DEVICE
seed = np.random.randint(0, 1000000000)
seed ^= int(time.time())
pl.seed_everything(seed)
args.debug = True
wandb.init(project="lira", mode="disabled" if args.debug else "online")
wandb.config.update(args)
# Dataset
train_ds, test_ds = get_trainset()
# Compute the IN / OUT subset:
# If we run each experiment independently then even after a lot of trials
# there will still probably be some examples that were always included
# or always excluded. So instead, with experiment IDs, we guarantee that
# after `args.n_shadows` are done, each example is seen exactly half
# of the time in train, and half of the time not in train.
size = len(train_ds)
np.random.seed(seed)
if args.n_shadows is not None:
np.random.seed(0)
keep = np.random.uniform(0, 1, size=(args.n_shadows, size))
order = keep.argsort(0)
keep = order < int(args.pkeep * args.n_shadows)
keep = np.array(keep[args.shadow_id], dtype=bool)
keep = keep.nonzero()[0]
else:
keep = np.random.choice(size, size=int(args.pkeep * size), replace=False)
keep.sort()
keep_bool = np.full((size), False)
keep_bool[keep] = True
train_ds = torch.utils.data.Subset(train_ds, keep)
train_dl = DataLoader(train_ds, batch_size=128, shuffle=True, num_workers=4)
test_dl = DataLoader(test_ds, batch_size=128, shuffle=False, num_workers=4)
# Train
learning_rate=0.001
T=2
soft_target_loss_weight=0.25
ce_loss_weight=0.75
ce_loss = nn.CrossEntropyLoss()
optimizer = optim.Adam(student.parameters(), lr=learning_rate)
teacher.eval() # Teacher set to evaluation mode
student.train() # Student to train mode
for epoch in range(args.epochs):
running_loss = 0.0
for inputs, labels in train_dl:
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# Forward pass with the teacher model - do not save gradients here as we do not change the teacher's weights
with torch.no_grad():
teacher_logits, _, _, _ = teacher(inputs)
# Forward pass with the student model
student_logits = student(inputs)
#Soften the student logits by applying softmax first and log() second
soft_targets = nn.functional.softmax(teacher_logits / T, dim=-1)
soft_prob = nn.functional.log_softmax(student_logits / T, dim=-1)
# Calculate the soft targets loss. Scaled by T**2 as suggested by the authors of the paper "Distilling the knowledge in a neural network"
soft_targets_loss = torch.sum(soft_targets * (soft_targets.log() - soft_prob)) / soft_prob.size()[0] * (T**2)
# Calculate the true label loss
label_loss = ce_loss(student_logits, labels)
# Weighted sum of the two losses
loss = soft_target_loss_weight * soft_targets_loss + ce_loss_weight * label_loss
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f"Epoch {epoch+1}/{args.epochs}, Loss: {running_loss / len(train_dl)}")
accuracy = test(student, test_dl)
#saving models
print("saving model")
savedir = os.path.join(args.savedir, str(args.shadow_id))
os.makedirs(savedir, exist_ok=True)
np.save(savedir + "/keep.npy", keep_bool)
torch.save(student.state_dict(), savedir + "/model.pt")
def main():
epochs = args.epochs
json_options = json_file_to_pyobj("wresnet16-audit-cifar10.json")
training_configurations = json_options.training
wrn_depth = training_configurations.wrn_depth
wrn_width = training_configurations.wrn_width
dataset = training_configurations.dataset.lower()
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
print("Load the teacher model")
# instantiate teacher model
strides = [1, 1, 2, 2]
teacher = WideResNet(d=wrn_depth, k=wrn_width, n_classes=10, input_features=3, output_features=16, strides=strides)
teacher = ModuleValidator.fix(teacher)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(teacher.parameters(), lr=0.1, momentum=0.9, nesterov=True, weight_decay=5e-4)
scheduler = MultiStepLR(optimizer, milestones=[int(elem*epochs) for elem in [0.3, 0.6, 0.8]], gamma=0.2)
train_loader, test_loader = get_loaders(dataset, training_configurations.batch_size)
best_test_set_accuracy = 0
dp_epsilon = 8
dp_delta = 1e-5
norm = 1.0
privacy_engine = opacus.PrivacyEngine()
teacher, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=teacher,
optimizer=optimizer,
data_loader=train_loader,
epochs=epochs,
target_epsilon=dp_epsilon,
target_delta=dp_delta,
max_grad_norm=norm,
)
teacher.load_state_dict(torch.load(os.path.join("wrn-1733078278-8e-1e-05d-12.0n-dict.pt"), weights_only=True))
teacher.to(device)
teacher.eval()
#instantiate student "shadow model"
student = student_model.Model(num_classes=10).to(device)
# Check norm of layer for both networks -- student should be smaller?
print("Norm of 1st layer for teacher:", torch.norm(teacher.conv1.weight).item())
print("Norm of 1st layer for student:", torch.norm(student.features[0].weight).item())
#train student shadow model
run(teacher=teacher, student=student)
if __name__ == "__main__":
main()

143
one_run_audit/WideResNet.py Normal file
View file

@ -0,0 +1,143 @@
import torch
import torch.nn as nn
from torchsummary import summary
import math
class IndividualBlock1(nn.Module):
def __init__(self, input_features, output_features, stride, subsample_input=True, increase_filters=True):
super(IndividualBlock1, self).__init__()
self.activation = nn.ReLU(inplace=True)
self.batch_norm1 = nn.BatchNorm2d(input_features)
self.batch_norm2 = nn.BatchNorm2d(output_features)
self.conv1 = nn.Conv2d(input_features, output_features, kernel_size=3, stride=stride, padding=1, bias=False)
self.conv2 = nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1, bias=False)
self.subsample_input = subsample_input
self.increase_filters = increase_filters
if subsample_input:
self.conv_inp = nn.Conv2d(input_features, output_features, kernel_size=1, stride=2, padding=0, bias=False)
elif increase_filters:
self.conv_inp = nn.Conv2d(input_features, output_features, kernel_size=1, stride=1, padding=0, bias=False)
def forward(self, x):
if self.subsample_input or self.increase_filters:
x = self.batch_norm1(x)
x = self.activation(x)
x1 = self.conv1(x)
else:
x1 = self.batch_norm1(x)
x1 = self.activation(x1)
x1 = self.conv1(x1)
x1 = self.batch_norm2(x1)
x1 = self.activation(x1)
x1 = self.conv2(x1)
if self.subsample_input or self.increase_filters:
return self.conv_inp(x) + x1
else:
return x + x1
class IndividualBlockN(nn.Module):
def __init__(self, input_features, output_features, stride):
super(IndividualBlockN, self).__init__()
self.activation = nn.ReLU(inplace=True)
self.batch_norm1 = nn.BatchNorm2d(input_features)
self.batch_norm2 = nn.BatchNorm2d(output_features)
self.conv1 = nn.Conv2d(input_features, output_features, kernel_size=3, stride=stride, padding=1, bias=False)
self.conv2 = nn.Conv2d(output_features, output_features, kernel_size=3, stride=stride, padding=1, bias=False)
def forward(self, x):
x1 = self.batch_norm1(x)
x1 = self.activation(x1)
x1 = self.conv1(x1)
x1 = self.batch_norm2(x1)
x1 = self.activation(x1)
x1 = self.conv2(x1)
return x1 + x
class Nblock(nn.Module):
def __init__(self, N, input_features, output_features, stride, subsample_input=True, increase_filters=True):
super(Nblock, self).__init__()
layers = []
for i in range(N):
if i == 0:
layers.append(IndividualBlock1(input_features, output_features, stride, subsample_input, increase_filters))
else:
layers.append(IndividualBlockN(output_features, output_features, stride=1))
self.nblockLayer = nn.Sequential(*layers)
def forward(self, x):
return self.nblockLayer(x)
class WideResNet(nn.Module):
def __init__(self, d, k, n_classes, input_features, output_features, strides):
super(WideResNet, self).__init__()
self.conv1 = nn.Conv2d(input_features, output_features, kernel_size=3, stride=strides[0], padding=1, bias=False)
filters = [16 * k, 32 * k, 64 * k]
self.out_filters = filters[-1]
N = (d - 4) // 6
increase_filters = k > 1
self.block1 = Nblock(N, input_features=output_features, output_features=filters[0], stride=strides[1], subsample_input=False, increase_filters=increase_filters)
self.block2 = Nblock(N, input_features=filters[0], output_features=filters[1], stride=strides[2])
self.block3 = Nblock(N, input_features=filters[1], output_features=filters[2], stride=strides[3])
self.batch_norm = nn.BatchNorm2d(filters[-1])
self.activation = nn.ReLU(inplace=True)
self.avg_pool = nn.AvgPool2d(kernel_size=8)
self.fc = nn.Linear(filters[-1], n_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
x = self.conv1(x)
attention1 = self.block1(x)
attention2 = self.block2(attention1)
attention3 = self.block3(attention2)
out = self.batch_norm(attention3)
out = self.activation(out)
out = self.avg_pool(out)
out = out.view(-1, self.out_filters)
return self.fc(out), attention1, attention2, attention3
if __name__ == '__main__':
# change d and k if you want to check a model other than WRN-40-2
d = 40
k = 2
strides = [1, 1, 2, 2]
net = WideResNet(d=d, k=k, n_classes=10, input_features=3, output_features=16, strides=strides)
# verify that an output is produced
sample_input = torch.ones(size=(1, 3, 32, 32), requires_grad=False)
net(sample_input)
# Summarize model
summary(net, input_size=(3, 32, 32))

838
one_run_audit/audit.py Normal file
View file

@ -0,0 +1,838 @@
import argparse
import equations
import numpy as np
import time
import copy
import torch
import torch.nn as nn
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data import DataLoader, Subset, TensorDataset, ConcatDataset
import torch.nn.functional as F
from pathlib import Path
from torchvision import transforms
from torchvision.datasets import CIFAR10
import pytorch_lightning as pl
import opacus
import random
from tqdm import tqdm
from opacus.validators import ModuleValidator
from opacus.utils.batch_memory_manager import BatchMemoryManager
from concurrent.futures import ProcessPoolExecutor, as_completed
from WideResNet import WideResNet
from equations import get_eps_audit
import student_model
import fast_model
import convnet_classifier
import wrn
import warnings
warnings.filterwarnings("ignore")
DEVICE = None
DTYPE = None
DATADIR = Path("./data")
def get_dataloaders3(m=1000, train_batch_size=128, test_batch_size=10):
seed = np.random.randint(0, 1e9)
seed ^= int(time.time())
pl.seed_everything(seed)
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
(4, 4, 4, 4), mode='reflect').squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_ds = CIFAR10(root=DATADIR, train=True, download=True, transform=train_transform)
test_ds = CIFAR10(root=DATADIR, train=False, download=True, transform=test_transform)
# Original dataset
x_train = np.stack(train_ds[i][0].numpy() for i in range(len(train_ds)))
y_train = np.array(train_ds.targets).astype(np.int64)
x = np.stack(test_ds[i][0].numpy() for i in range(len(test_ds))) # Applies transforms
y = np.array(test_ds.targets).astype(np.int64)
# Pull points from training set when m > test set
if m > len(x):
k = m - len(x)
mask = np.full(len(x_train), False)
mask[:k] = True
x = np.concatenate([x_train[mask], x])
y = np.concatenate([y_train[mask], y])
x_train = x_train[~mask]
y_train = y_train[~mask]
# Store the m points which could have been included/excluded
mask = np.full(len(x), False)
mask[:m] = True
mask = mask[np.random.permutation(len(x))]
adv_points = x[mask]
adv_labels = y[mask]
# Mislabel inclusion/exclusion examples intentionally!
for i in range(len(adv_labels)):
while True:
c = np.random.choice(range(10))
if adv_labels[i] != c:
adv_labels[i] = c
break
# Choose m points to randomly exclude at chance
S = np.random.choice([True, False], size=m) # Vector of determining if each point is in or out
assert len(adv_points) == m
inc_points = adv_points[S]
inc_labels = adv_labels[S]
td = TensorDataset(torch.from_numpy(inc_points).float(), torch.from_numpy(inc_labels).long())
td2 = TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train).long())
td = ConcatDataset([td, td2])
train_dl = DataLoader(td, batch_size=train_batch_size, shuffle=True, num_workers=4)
pure_train_dl = DataLoader(train_ds, batch_size=train_batch_size, shuffle=True, num_workers=4)
test_dl = DataLoader(test_ds, batch_size=test_batch_size, shuffle=True, num_workers=4)
return train_dl, test_dl, pure_train_dl, adv_points, adv_labels, S
def get_dataloaders_raw(m=1000, train_batch_size=512, test_batch_size=10):
def preprocess_data(data):
data = torch.tensor(data)#.to(DTYPE)
data = data / 255.0
data = data.permute(0, 3, 1, 2)
data = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))(data)
data = nn.ReflectionPad2d(4)(data)
data = transforms.RandomCrop(size=(32, 32))(data)
data = transforms.RandomHorizontalFlip()(data)
return data
train_ds = CIFAR10(root=DATADIR, train=True, download=True)
test_ds = CIFAR10(root=DATADIR, train=False, download=True)
train_x = train_ds.data
test_x = test_ds.data
train_y = np.array(train_ds.targets)
test_y = np.array(test_ds.targets)
if m > len(test_x):
k = m - len(test_x)
mask = np.full(len(train_x), False)
mask[:k] = True
mask = mask[np.random.permutation(len(train_x))]
test_x = np.concatenate([train_x[mask], test_x])
test_y = np.concatenate([train_y[mask], test_y])
train_y = train_y[~mask]
train_x = train_x[~mask]
mask = np.full(len(test_x), False)
mask[:m] = True
mask = mask[np.random.permutation(len(test_x))]
S = np.random.choice([True, False], size=m)
attack_x = test_x[mask][S]
attack_y = test_y[mask][S]
for i in range(len(attack_y)):
while True:
c = np.random.choice(range(10))
if attack_y[i] != c:
attack_y[i] = c
break
train_x = np.concatenate([train_x, attack_x])
train_y = np.concatenate([train_y, attack_y])
train_x = preprocess_data(train_x)
test_x = preprocess_data(test_x)
attack_x = preprocess_data(attack_x)
train_y = torch.tensor(train_y)
test_y = torch.tensor(test_y)
attack_y = torch.tensor(attack_y)
train_dl = DataLoader(
TensorDataset(train_x, train_y.long()),
batch_size=train_batch_size,
shuffle=True,
drop_last=True,
num_workers=4
)
test_dl = DataLoader(
TensorDataset(test_x, test_y.long()),
batch_size=train_batch_size,
shuffle=True,
num_workers=4
)
return train_dl, test_dl, train_x, attack_x.numpy(), attack_y.numpy(), S
def evaluate_on(model, dataloader):
correct = 0
total = 0
with torch.no_grad():
model.eval()
for data in dataloader:
images, labels = data
images = images.to(DEVICE)
labels = labels.to(DEVICE)
wrn_outputs = model(images)
if len(wrn_outputs) == 4:
outputs = wrn_outputs[0]
else:
outputs = wrn_outputs
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return correct, total
def train_knowledge_distillation(teacher, train_dl, epochs, device, learning_rate=0.001, T=2, soft_target_loss_weight=0.25, ce_loss_weight=0.75):
#instantiate istudent
student = student_model.Model(num_classes=10).to(device)
ce_loss = nn.CrossEntropyLoss()
optimizer = optim.Adam(student.parameters(), lr=learning_rate)
student_init = copy.deepcopy(student)
student.to(device)
teacher.to(device)
teacher.eval() # Teacher set to evaluation mode
student.train() # Student to train mode
for epoch in range(epochs):
running_loss = 0.0
for inputs, labels in train_dl:
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# Forward pass with the teacher model - do not save gradients here as we do not change the teacher's weights
with torch.no_grad():
teacher_logits, _, _, _ = teacher(inputs)
# Forward pass with the student model
student_logits = student(inputs)
#Soften the student logits by applying softmax first and log() second
soft_targets = nn.functional.softmax(teacher_logits / T, dim=-1)
soft_prob = nn.functional.log_softmax(student_logits / T, dim=-1)
# Calculate the soft targets loss. Scaled by T**2 as suggested by the authors of the paper "Distilling the knowledge in a neural network"
soft_targets_loss = torch.sum(soft_targets * (soft_targets.log() - soft_prob)) / soft_prob.size()[0] * (T**2)
# Calculate the true label loss
label_loss = ce_loss(student_logits, labels)
# Weighted sum of the two losses
loss = soft_target_loss_weight * soft_targets_loss + ce_loss_weight * label_loss
loss.backward()
optimizer.step()
running_loss += loss.item()
if epoch % 10 == 0:
print(f"Epoch {epoch+1}/{epochs}, Loss: {running_loss / len(train_dl)}")
return student_init, student
def train_no_cap(model, model_init, hp, train_dl, test_dl, optimizer, criterion, scheduler, adv_points, adv_labels, S):
best_test_set_accuracy = 0
for epoch in range(hp['epochs']):
model.train()
for i, data in enumerate(train_dl, 0):
inputs, labels = data
inputs = inputs.to(DEVICE)
labels = labels.to(DEVICE)
optimizer.zero_grad()
wrn_outputs = model(inputs)
if len(wrn_outputs) == 4:
outputs = wrn_outputs[0]
else:
outputs = wrn_outputs
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
scheduler.step()
if epoch % 10 == 0 or epoch == hp['epochs'] - 1:
correct, total = evaluate_on(model, test_dl)
epoch_accuracy = round(100 * correct / total, 2)
scores = score_model(model_init, model, adv_points, adv_labels, S)
audits = audit_model(hp, scores)
print(f"Epoch {epoch+1}/{hp['epochs']}: {epoch_accuracy}% | Audit : {audits[2]}/{2*audits[1]}/{audits[3]} | p[ε < {audits[0]}] < {hp['p_value']} @ ε={hp['epsilon']}")
return best_test_set_accuracy
def load(hp, model_path, train_dl):
init_model = model_path / "init_model.pt"
trained_model = model_path / "trained_model.pt"
model = WideResNet(
d=hp["wrn_depth"],
k=hp["wrn_width"],
n_classes=10,
input_features=3,
output_features=16,
strides=[1, 1, 2, 2],
)
model = ModuleValidator.fix(model)
ModuleValidator.validate(model, strict=True)
model_init = copy.deepcopy(model)
privacy_engine = opacus.PrivacyEngine()
optimizer = optim.SGD(
model.parameters(),
lr=0.1,
momentum=0.9,
nesterov=True,
weight_decay=5e-4
)
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=model,
optimizer=optimizer,
data_loader=train_dl,
epochs=hp['epochs'],
target_epsilon=hp['epsilon'],
target_delta=hp['delta'],
max_grad_norm=hp['norm'],
)
model_init.load_state_dict(torch.load(init_model, weights_only=True))
model.load_state_dict(torch.load(trained_model, weights_only=True))
model_init = model_init.to(DEVICE)
model = model.to(DEVICE)
adv_points = np.load("data/adv_points.npy")
adv_labels = np.load("data/adv_labels.npy")
S = np.load("data/S.npy")
return model_init, model, adv_points, adv_labels, S
def train_wrn2(hp, train_dl, test_dl, adv_points, adv_labels, S):
model = wrn.WideResNet(16, 10, 4)
model = model.to(DEVICE)
ModuleValidator.validate(model, strict=True)
model_init = copy.deepcopy(model)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(
model.parameters(),
lr=0.12,
momentum=0.9,
weight_decay=1e-4
)
scheduler = MultiStepLR(
optimizer,
milestones=[int(i * hp['epochs']) for i in [0.3, 0.6, 0.8]],
gamma=0.1
)
print(f"Training with {hp['epochs']} epochs")
if hp['epsilon'] is not None:
privacy_engine = opacus.PrivacyEngine()
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=model,
optimizer=optimizer,
data_loader=train_dl,
epochs=hp['epochs'],
target_epsilon=hp['epsilon'],
target_delta=hp['delta'],
max_grad_norm=hp['norm'],
)
print(f"DP epsilon = {hp['epsilon']}, delta = {hp['delta']}")
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {hp['norm']}")
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=10, # 1000 ~= 9.4GB vram
optimizer=optimizer
) as memory_safe_data_loader:
best_test_set_accuracy = train_no_cap(
model,
model_init,
hp,
memory_safe_data_loader,
test_dl,
optimizer,
criterion,
scheduler,
adv_points,
adv_labels,
S,
)
else:
print("Training without differential privacy")
best_test_set_accuracy = train_no_cap(
model,
model_init,
hp,
train_dl,
test_dl,
optimizer,
criterion,
scheduler,
adv_points,
adv_labels,
S,
)
return model_init, model
def train_small(hp, train_dl, test_dl, adv_points, adv_labels, S):
model = student_model.Model(num_classes=10).to(DEVICE)
model = model.to(DEVICE)
model = ModuleValidator.fix(model)
ModuleValidator.validate(model, strict=True)
model_init = copy.deepcopy(model)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
scheduler = MultiStepLR(
optimizer,
milestones=[int(i * hp['epochs']) for i in [0.3, 0.6, 0.8]],
gamma=0.2
)
print(f"Training raw (no distill) STUDENT with {hp['epochs']} epochs")
if hp['epsilon'] is not None:
privacy_engine = opacus.PrivacyEngine()
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=model,
optimizer=optimizer,
data_loader=train_dl,
epochs=hp['epochs'],
target_epsilon=hp['epsilon'],
target_delta=hp['delta'],
max_grad_norm=hp['norm'],
)
print(f"DP epsilon = {hp['epsilon']}, delta = {hp['delta']}")
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {hp['norm']}")
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=2000, # 1000 ~= 9.4GB vram
optimizer=optimizer
) as memory_safe_data_loader:
best_test_set_accuracy = train_no_cap(
model,
model_init,
hp,
memory_safe_data_loader,
test_dl,
optimizer,
criterion,
scheduler,
adv_points,
adv_labels,
S,
)
else:
print("Training without differential privacy")
best_test_set_accuracy = train_no_cap(
model,
model_init,
hp,
train_dl,
test_dl,
optimizer,
criterion,
scheduler,
adv_points,
adv_labels,
S,
)
return model_init, model
def train_fast(hp, train_dl, test_dl, train_x, adv_points, adv_labels, S):
epochs = hp['epochs']
momentum = 0.9
weight_decay = 0.256
weight_decay_bias = 0.004
ema_update_freq = 5
ema_rho = 0.99**ema_update_freq
dtype = torch.float16 if DEVICE.type != "cpu" else torch.float32
print("=========================")
print("Training a fast model")
print("=========================")
weights = fast_model.patch_whitening(train_x[:10000, :, 4:-4, 4:-4])
model = fast_model.Model(weights, c_in=3, c_out=10, scale_out=0.125)
model.to(DEVICE)
init_model = copy.deepcopy(model)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(
model.parameters(),
lr=0.1,
momentum=0.9,
nesterov=True,
weight_decay=5e-4
)
scheduler = MultiStepLR(
optimizer,
milestones=[int(i * hp['epochs']) for i in [0.3, 0.6, 0.8]],
gamma=0.2
)
train_no_cap(model, model_init, hp, train_dl, test_dl, optimizer, criterion, scheduler, adv_points, adv_labels, S)
return init_model, model
def train_convnet(hp, train_dl, test_dl, adv_points, adv_labels, S):
model = convnet_classifier.ConvNet()
model = model.to(DEVICE)
ModuleValidator.validate(model, strict=True)
model_init = copy.deepcopy(model)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
scheduler = MultiStepLR(optimizer, milestones=[10, 25], gamma=0.1)
print(f"Training with {hp['epochs']} epochs")
if hp['epsilon'] is not None:
privacy_engine = opacus.PrivacyEngine(accountant='rdp')
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=model,
optimizer=optimizer,
data_loader=train_dl,
epochs=hp['epochs'],
target_epsilon=hp['epsilon'],
target_delta=hp['delta'],
max_grad_norm=hp['norm'],
)
print(f"DP epsilon = {hp['epsilon']}, delta = {hp['delta']}")
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {hp['norm']}")
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=2000, # 1000 ~= 9.4GB vram
optimizer=optimizer
) as memory_safe_data_loader:
best_test_set_accuracy = train_no_cap(
model,
model_init,
hp,
memory_safe_data_loader,
test_dl,
optimizer,
criterion,
scheduler,
adv_points,
adv_labels,
S,
)
else:
print("Training without differential privacy")
best_test_set_accuracy = train_no_cap(
model,
model_init,
hp,
train_dl,
test_dl,
optimizer,
criterion,
scheduler,
adv_points,
adv_labels,
S,
)
return model_init, model
def train(hp, train_dl, test_dl, adv_points, adv_labels, S):
model = WideResNet(
d=hp["wrn_depth"],
k=hp["wrn_width"],
n_classes=10,
input_features=3,
output_features=16,
strides=[1, 1, 2, 2],
)
model = model.to(DEVICE)
model = ModuleValidator.fix(model)
ModuleValidator.validate(model, strict=True)
model_init = copy.deepcopy(model)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(
model.parameters(),
lr=0.1,
momentum=0.9,
nesterov=True,
weight_decay=5e-4
)
scheduler = MultiStepLR(
optimizer,
milestones=[int(i * hp['epochs']) for i in [0.3, 0.6, 0.8]],
gamma=0.2
)
print(f"Training with {hp['epochs']} epochs")
if hp['epsilon'] is not None:
privacy_engine = opacus.PrivacyEngine()
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=model,
optimizer=optimizer,
data_loader=train_dl,
epochs=hp['epochs'],
target_epsilon=hp['epsilon'],
target_delta=hp['delta'],
max_grad_norm=hp['norm'],
)
print(f"DP epsilon = {hp['epsilon']}, delta = {hp['delta']}")
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {hp['norm']}")
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=2000, # 1000 ~= 9.4GB vram
optimizer=optimizer
) as memory_safe_data_loader:
best_test_set_accuracy = train_no_cap(
model,
model_init,
hp,
memory_safe_data_loader,
test_dl,
optimizer,
criterion,
scheduler,
adv_points,
adv_labels,
S,
)
else:
print("Training without differential privacy")
best_test_set_accuracy = train_no_cap(
model,
model_init,
hp,
train_dl,
test_dl,
optimizer,
criterion,
scheduler,
adv_points,
adv_labels,
S,
)
return model_init, model
def get_k_audit(k, scores, hp):
correct = np.sum(~scores[:k]) + np.sum(scores[-k:])
eps_lb = get_eps_audit(
hp['target_points'],
2*k,
correct,
hp['delta'],
hp['p_value']
)
return eps_lb, k, correct, len(scores)
def score_model(model_init, model_trained, adv_points, adv_labels, S):
scores = list()
criterion = nn.CrossEntropyLoss()
with torch.no_grad():
model_init.eval()
x_m = torch.from_numpy(adv_points).to(DEVICE)
y_m = torch.from_numpy(adv_labels).long().to(DEVICE)
for i in range(len(x_m)):
x_point = x_m[i].unsqueeze(0).to(DEVICE)
y_point = y_m[i].unsqueeze(0).to(DEVICE)
is_in = S[i]
wrn_outputs = model_init(x_point)
outputs = wrn_outputs[0] if len(wrn_outputs) == 4 else wrn_outputs
init_loss = criterion(outputs, y_point)
wrn_outputs = model_trained(x_point)
outputs = wrn_outputs[0] if len(wrn_outputs) == 4 else wrn_outputs
trained_loss = criterion(outputs, y_point)
scores.append(((init_loss - trained_loss).item(), is_in))
scores = sorted(scores, key=lambda x: x[0])
scores = np.array([x[1] for x in scores])
return scores
def audit_model(hp, scores):
audits = (0, 0, 0, 0)
k_schedule = np.linspace(1, hp['target_points']//2, 40)
k_schedule = np.floor(k_schedule).astype(int)
with ProcessPoolExecutor() as executor:
futures = {
executor.submit(get_k_audit, k, scores, hp): k for k in k_schedule
}
for future in as_completed(futures):
try:
eps_lb, k, correct, total = future.result()
if eps_lb > audits[0]:
audits = (eps_lb, k, correct, total)
except Exception as exc:
k = futures[future]
print(f"'k={k}' generated an exception: {exc}")
return audits
def main():
global DEVICE
global DTYPE
parser = argparse.ArgumentParser(description='WideResNet O1 audit')
parser.add_argument('--norm', type=float, help='dpsgd norm clip factor', required=True)
parser.add_argument('--cuda', type=int, help='gpu index', required=False)
parser.add_argument('--epsilon', type=float, help='dp epsilon', required=False, default=None)
parser.add_argument('--m', type=int, help='number of target points', required=True)
parser.add_argument('--epochs', type=int, help='number of epochs', required=True)
parser.add_argument('--load', type=Path, help='number of epochs', required=False)
parser.add_argument('--studentraw', action='store_true', help='train a raw student', required=False)
parser.add_argument('--distill', action='store_true', help='train a raw student', required=False)
parser.add_argument('--fast', action='store_true', help='train the fast model', required=False)
parser.add_argument('--wrn2', action='store_true', help='Train a groupnormed wrn', required=False)
parser.add_argument('--convnet', action='store_true', help='Train a convnet', required=False)
args = parser.parse_args()
if torch.cuda.is_available() and args.cuda:
DEVICE = torch.device(f'cuda:{args.cuda}')
DTYPE = torch.float16
elif torch.cuda.is_available():
DEVICE = torch.device('cuda:0')
DTYPE = torch.float16
else:
DEVICE = torch.device('cpu')
DTYPE = torch.float32
hp = {
"target_points": args.m,
"wrn_depth": 16,
"wrn_width": 1,
"epsilon": args.epsilon,
"delta": 1e-6,
"norm": args.norm,
"batch_size": 50 if args.convnet else 4096,
"epochs": args.epochs,
"p_value": 0.05,
}
hp['logfile'] = Path('WideResNet_{}_{}_{}_{}s_x{}_{}e_{}d_{}C.txt'.format(
int(time.time()),
hp['wrn_depth'],
hp['wrn_width'],
hp['batch_size'],
hp['epochs'],
hp['epsilon'],
hp['delta'],
hp['norm'],
))
if args.load:
train_dl, test_dl, ____, _, __, ___ = get_dataloaders3(hp['target_points'], hp['batch_size'])
model_init, model_trained, adv_points, adv_labels, S = load(hp, args.load, train_dl)
test_dl = None
elif args.fast:
train_dl, test_dl, train_x, adv_points, adv_labels, S = get_dataloaders_raw(hp['target_points'])
model_init, model_trained = train_fast(hp, train_dl, test_dl, train_x, adv_points, adv_labels, S)
else:
train_dl, test_dl, pure_train_dl, adv_points, adv_labels, S = get_dataloaders3(hp['target_points'], hp['batch_size'])
if args.wrn2:
print("=========================")
print("Training wrn2 model from meta")
print("=========================")
model_init, model_trained = train_wrn2(hp, train_dl, test_dl, adv_points, adv_labels, S)
elif args.convnet:
print("=========================")
print("Training a simple convnet")
print("=========================")
model_init, model_trained = train_convnet(hp, train_dl, test_dl, adv_points, adv_labels, S)
elif args.studentraw:
print("=========================")
print("Training a raw student model")
print("=========================")
model_init, model_trained = train_small(hp, train_dl, test_dl, adv_points, adv_labels, S)
elif args.distill:
print("=========================")
print("Training a distilled student model")
print("=========================")
teacher_init, teacher_trained = train(hp, train_dl, test_dl, adv_points, adv_labels, S)
model_init, model_trained = train_knowledge_distillation(
teacher=teacher_trained,
train_dl=train_dl,
epochs=hp['epochs'],
device=DEVICE,
learning_rate=0.001,
T=2,
soft_target_loss_weight=0.25,
ce_loss_weight=0.75,
)
else:
print("=========================")
print("Training teacher model")
print("=========================")
model_init, model_trained = train(hp, train_dl, test_dl)
np.save("data/adv_points", adv_points)
np.save("data/adv_labels", adv_labels)
np.save("data/S", S)
torch.save(model_init.state_dict(), "data/init_model.pt")
torch.save(model_trained.state_dict(), "data/trained_model.pt")
# scores = score_model(model_init, model_trained, adv_points, adv_labels, S)
# audits = audit_model(hp, scores)
# print(f"Audit total: {audits[2]}/{2*audits[1]}/{audits[3]}")
# print(f"p[ε < {audits[0]}] < {hp['p_value']} for true epsilon {hp['epsilon']}")
if test_dl is not None:
correct, total = evaluate_on(model_init, test_dl)
print(f"Init model accuracy: {correct}/{total} = {round(correct/total*100, 2)}")
correct, total = evaluate_on(model_trained, test_dl)
print(f"Done model accuracy: {correct}/{total} = {round(correct/total*100, 2)}")
if __name__ == '__main__':
main()

View file

@ -0,0 +1,51 @@
# Name: Peng Cheng
# UIN: 674792652
#
# Code adapted from:
# https://github.com/jameschengpeng/PyTorch-CNN-on-CIFAR10
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=48, kernel_size=(3,3), padding=(1,1))
self.conv2 = nn.Conv2d(in_channels=48, out_channels=96, kernel_size=(3,3), padding=(1,1))
self.conv3 = nn.Conv2d(in_channels=96, out_channels=192, kernel_size=(3,3), padding=(1,1))
self.conv4 = nn.Conv2d(in_channels=192, out_channels=256, kernel_size=(3,3), padding=(1,1))
self.pool = nn.MaxPool2d(2,2)
self.fc1 = nn.Linear(in_features=8*8*256, out_features=512)
self.fc2 = nn.Linear(in_features=512, out_features=64)
self.Dropout = nn.Dropout(0.25)
self.fc3 = nn.Linear(in_features=64, out_features=10)
def forward(self, x):
x = F.relu(self.conv1(x)) #32*32*48
x = F.relu(self.conv2(x)) #32*32*96
x = self.pool(x) #16*16*96
x = self.Dropout(x)
x = F.relu(self.conv3(x)) #16*16*192
x = F.relu(self.conv4(x)) #16*16*256
x = self.pool(x) # 8*8*256
x = self.Dropout(x)
x = x.view(-1, 8*8*256) # reshape x
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.Dropout(x)
x = self.fc3(x)
return x

View file

@ -0,0 +1,52 @@
# These equations come from:
# [1] T. Steinke, M. Nasr, and M. Jagielski, “Privacy Auditing with One (1)
# Training Run,” May 15, 2023, arXiv: arXiv:2305.08846. Accessed: Sep. 15, 2024.
# [Online]. Available: http://arxiv.org/abs/2305.08846
import math
import scipy.stats
# m = number of examples, each included independently with probability 0.5
# r = number of guesses (i.e. excluding abstentions)
# v = number of correct guesses by auditor
# eps,delta = DP guarantee of null hypothesis
# output: p-value = probability of >=v correct guesses under null hypothesis
def p_value_DP_audit(m, r, v, eps, delta):
assert 0 <= v <= r <= m
assert eps >= 0
assert 0 <= delta <= 1
q = 1 / (1 + math.exp(-eps)) # accuracy of eps-DP randomized response
beta = scipy.stats.binom.sf(v - 1, r, q) # = P[Binomial(r, q) >= v]
alpha = 0
sum = 0 # = P[v > Binomial(r, q) >= v - i]
for i in range(1, v + 1):
sum = sum + scipy.stats.binom.pmf(v - i, r, q)
if sum > i * alpha:
alpha = sum / i
p = beta + alpha * delta * 2 * m
return min(p, 1)
# m = number of examples, each included independently with probability 0.5
# r = number of guesses (i.e. excluding abstentions)
# v = number of correct guesses by auditor
# p = 1-confidence e.g. p=0.05 corresponds to 95%
# output: lower bound on eps i.e. algorithm is not (eps,delta)-DP
def get_eps_audit(m, r, v, delta, p):
assert 0 <= v <= r <= m
assert 0 <= delta <= 1
assert 0 < p < 1
eps_min = 0 # maintain p_value_DP(eps_min) < p
eps_max = 1 # maintain p_value_DP(eps_max) >= p
while p_value_DP_audit(m, r, v, eps_max, delta) < p:
eps_max = eps_max + 1
for _ in range(30): # binary search
eps = (eps_min + eps_max) / 2
if p_value_DP_audit(m, r, v, eps, delta) < p:
eps_min = eps
else:
eps_max = eps
return eps_min
if __name__ == '__main__':
print(get_eps_audit(1000, 600, 600, 1e-5, 0.05))

141
one_run_audit/fast_model.py Normal file
View file

@ -0,0 +1,141 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
def label_smoothing_loss(inputs, targets, alpha):
log_probs = torch.nn.functional.log_softmax(inputs, dim=1, _stacklevel=5)
kl = -log_probs.mean(dim=1)
xent = torch.nn.functional.nll_loss(log_probs, targets, reduction="none")
loss = (1 - alpha) * xent + alpha * kl
return loss
class GhostBatchNorm(nn.BatchNorm2d):
def __init__(self, num_features, num_splits, **kw):
super().__init__(num_features, **kw)
running_mean = torch.zeros(num_features * num_splits)
running_var = torch.ones(num_features * num_splits)
self.weight.requires_grad = False
self.num_splits = num_splits
self.register_buffer("running_mean", running_mean)
self.register_buffer("running_var", running_var)
def train(self, mode=True):
if (self.training is True) and (mode is False):
# lazily collate stats when we are going to use them
self.running_mean = torch.mean(
self.running_mean.view(self.num_splits, self.num_features), dim=0
).repeat(self.num_splits)
self.running_var = torch.mean(
self.running_var.view(self.num_splits, self.num_features), dim=0
).repeat(self.num_splits)
return super().train(mode)
def forward(self, input):
n, c, h, w = input.shape
if self.training or not self.track_running_stats:
assert n % self.num_splits == 0, f"Batch size ({n}) must be divisible by num_splits ({self.num_splits}) of GhostBatchNorm"
return F.batch_norm(
input.view(-1, c * self.num_splits, h, w),
self.running_mean,
self.running_var,
self.weight.repeat(self.num_splits),
self.bias.repeat(self.num_splits),
True,
self.momentum,
self.eps,
).view(n, c, h, w)
else:
return F.batch_norm(
input,
self.running_mean[: self.num_features],
self.running_var[: self.num_features],
self.weight,
self.bias,
False,
self.momentum,
self.eps,
)
def conv_bn_relu(c_in, c_out, kernel_size=(3, 3), padding=(1, 1)):
return nn.Sequential(
nn.Conv2d(c_in, c_out, kernel_size=kernel_size, padding=padding, bias=False),
GhostBatchNorm(c_out, num_splits=16),
nn.CELU(alpha=0.3),
)
def conv_pool_norm_act(c_in, c_out):
return nn.Sequential(
nn.Conv2d(c_in, c_out, kernel_size=(3, 3), padding=(1, 1), bias=False),
nn.MaxPool2d(kernel_size=2, stride=2),
GhostBatchNorm(c_out, num_splits=16),
nn.CELU(alpha=0.3),
)
def patch_whitening(data, patch_size=(3, 3)):
# Compute weights from data such that
# torch.std(F.conv2d(data, weights), dim=(2, 3))
# is close to 1.
h, w = patch_size
c = data.size(1)
patches = data.unfold(2, h, 1).unfold(3, w, 1)
patches = patches.transpose(1, 3).reshape(-1, c, h, w).to(torch.float32)
n, c, h, w = patches.shape
X = patches.reshape(n, c * h * w)
X = X / (X.size(0) - 1) ** 0.5
covariance = X.t() @ X
eigenvalues, eigenvectors = torch.linalg.eigh(covariance)
eigenvalues = eigenvalues.flip(0)
eigenvectors = eigenvectors.t().reshape(c * h * w, c, h, w).flip(0)
return eigenvectors / torch.sqrt(eigenvalues + 1e-2).view(-1, 1, 1, 1)
class ResNetBagOfTricks(nn.Module):
def __init__(self, first_layer_weights, c_in, c_out, scale_out):
super().__init__()
c = first_layer_weights.size(0)
conv1 = nn.Conv2d(c_in, c, kernel_size=(3, 3), padding=(1, 1), bias=False)
conv1.weight.data = first_layer_weights
conv1.weight.requires_grad = False
self.conv1 = conv1
self.conv2 = conv_bn_relu(c, 64, kernel_size=(1, 1), padding=0)
self.conv3 = conv_pool_norm_act(64, 128)
self.conv4 = conv_bn_relu(128, 128)
self.conv5 = conv_bn_relu(128, 128)
self.conv6 = conv_pool_norm_act(128, 256)
self.conv7 = conv_pool_norm_act(256, 512)
self.conv8 = conv_bn_relu(512, 512)
self.conv9 = conv_bn_relu(512, 512)
self.pool10 = nn.MaxPool2d(kernel_size=4, stride=4)
self.linear11 = nn.Linear(512, c_out, bias=False)
self.scale_out = scale_out
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = x + self.conv5(self.conv4(x))
x = self.conv6(x)
x = self.conv7(x)
x = x + self.conv9(self.conv8(x))
x = self.pool10(x)
x = x.reshape(x.size(0), x.size(1))
x = self.linear11(x)
x = self.scale_out * x
return x
Model = ResNetBagOfTricks

94
one_run_audit/plot.py Normal file
View file

@ -0,0 +1,94 @@
import time
import math
import concurrent.futures
import numpy as np
import matplotlib.pyplot as plt
from equations import get_eps_audit
def compute_y(x_values, p, delta, proportion_correct, key):
return key, [get_eps_audit(x, x, math.floor(x *proportion_correct), delta, p) for x in x_values]
def get_plots():
final_values = dict()
mul = 1.5 #1.275 #1.5
max = 60000 #2000 #60000
x_values = np.floor((mul)**np.arange(30)).astype(int)
x_values = np.concatenate([x_values[x_values < max], [max]])
with concurrent.futures.ProcessPoolExecutor(max_workers=16) as executor:
start_time = time.time()
futures = [
executor.submit(compute_y, x_values, 0.05, 0.0, 1.0, "y11"),
executor.submit(compute_y, x_values, 0.05, 1e-6, 1.0, "y12"),
executor.submit(compute_y, x_values, 0.05, 1e-4, 1.0, "y13"),
executor.submit(compute_y, x_values, 0.05, 1e-2, 1.0, "y14"),
executor.submit(compute_y, x_values, 0.01, 0.0, 1.0, "y21"),
executor.submit(compute_y, x_values, 0.01, 1e-6, 1.0, "y22"),
executor.submit(compute_y, x_values, 0.01, 1e-4, 1.0, "y23"),
executor.submit(compute_y, x_values, 0.01, 1e-2, 1.0, "y24"),
executor.submit(compute_y, x_values, 0.05, 0.0, 0.9, "y31"),
executor.submit(compute_y, x_values, 0.05, 1e-6, 0.9, "y32"),
executor.submit(compute_y, x_values, 0.05, 1e-4, 0.9, "y33"),
executor.submit(compute_y, x_values, 0.05, 1e-2, 0.9, "y34"),
executor.submit(compute_y, x_values, 0.01, 0.0, 0.9, "y41"),
executor.submit(compute_y, x_values, 0.01, 1e-6, 0.9, "y42"),
executor.submit(compute_y, x_values, 0.01, 1e-4, 0.9, "y43"),
executor.submit(compute_y, x_values, 0.01, 1e-2, 0.9, "y44"),
]
for future in concurrent.futures.as_completed(futures):
k, v = future.result()
final_values[k] = v
print(f"Took: {time.time()-start_time}s")
return final_values, x_values
def plot_to(value_set, x_values, title, fig_name):
plt.xscale('log')
plt.plot(x_values, value_set[0], marker='o', label='δ=0')
plt.plot(x_values, value_set[1], marker='o', label='δ=1e-6')
plt.plot(x_values, value_set[2], marker='o', label='δ=1e-4')
plt.plot(x_values, value_set[3], marker='o', label='δ=1e-2')
plt.xlabel("Number of samples attacked")
plt.ylabel("Maximum ε lower-bound from audit")
plt.title(title)
plt.legend()
plt.savefig(fig_name, dpi=300, bbox_inches='tight')
def main():
final_values, x_values = get_plots()
plot_to(
[final_values[f"y1{i}"] for i in range(1,5)],
x_values,
"Maximum ε audit with p-value=0.05 and 100% MIA accuracy",
"/dev/shm/plot_05_100.png"
)
plot_to(
[final_values[f"y1{i}"] for i in range(1,5)],
x_values,
"Maximum ε audit with p-value=0.01 and 100% MIA accuracy",
"/dev/shm/plot_01_100.png"
)
plot_to(
[final_values[f"y1{i}"] for i in range(1,5)],
x_values,
"Maximum ε audit with p-value=0.05 and 90% MIA accuracy",
"/dev/shm/plot_05_90.png"
)
plot_to(
[final_values[f"y1{i}"] for i in range(1,5)],
x_values,
"Maximum ε audit with p-value=0.01 and 90% MIA accuracy"
"/dev/shm/plot_01_90.png"
)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,29 @@
import torch
import torch.nn as nn
# Create a similar student class where we return a tuple. We do not apply pooling after flattening.
class ModifiedLightNNCosine(nn.Module):
def __init__(self, num_classes=10):
super(ModifiedLightNNCosine, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(16, 16, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.classifier = nn.Sequential(
nn.Linear(1024, 256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(256, num_classes)
)
def forward(self, x):
x = self.features(x)
flattened_conv_output = torch.flatten(x, 1)
x = self.classifier(flattened_conv_output)
return x
Model = ModifiedLightNNCosine

232
one_run_audit/wrn.py Normal file
View file

@ -0,0 +1,232 @@
"""
Adapted from:
https://github.com/facebookresearch/tan/blob/main/src/models/wideresnet.py
"""
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Adapted from timm:
https://github.com/xternalz/WideResNet-pytorch/blob/master/wideresnet.py
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class L2Norm(nn.Module):
def forward(self, x):
return x / x.norm(p=2, dim=1, keepdim=True)
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, nb_groups, order):
super(BasicBlock, self).__init__()
self.order = order
self.bn1 = nn.GroupNorm(nb_groups, in_planes) if nb_groups else nn.Identity()
self.relu1 = nn.ReLU()
self.conv1 = nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1
)
self.bn2 = nn.GroupNorm(nb_groups, out_planes) if nb_groups else nn.Identity()
self.relu2 = nn.ReLU()
self.conv2 = nn.Conv2d(
out_planes, out_planes, kernel_size=3, stride=1, padding=1
)
self.equalInOut = in_planes == out_planes
self.bnShortcut = (
(not self.equalInOut)
and nb_groups
and nn.GroupNorm(nb_groups, in_planes)
or (not self.equalInOut)
and nn.Identity()
or None
)
self.convShortcut = (
(not self.equalInOut)
and nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, padding=0
)
) or None
def forward(self, x):
skip = x
assert self.order in [0, 1, 2, 3]
if self.order == 0: # DM accuracy good
if not self.equalInOut:
skip = self.convShortcut(self.bnShortcut(self.relu1(x)))
out = self.conv1(self.bn1(self.relu1(x)))
out = self.conv2(self.bn2(self.relu2(out)))
elif self.order == 1: # classic accuracy bad
if not self.equalInOut:
skip = self.convShortcut(self.relu1(self.bnShortcut(x)))
out = self.conv1(self.relu1(self.bn1(x)))
out = self.conv2(self.relu2(self.bn2(out)))
elif self.order == 2: # DM IN RESIDUAL, normal other
if not self.equalInOut:
skip = self.convShortcut(self.bnShortcut(self.relu1(x)))
out = self.conv1(self.relu1(self.bn1(x)))
out = self.conv2(self.relu2(self.bn2(out)))
elif self.order == 3: # normal in residualm DM in others
if not self.equalInOut:
skip = self.convShortcut(self.relu1(self.bnShortcut(x)))
out = self.conv1(self.bn1(self.relu1(x)))
out = self.conv2(self.bn2(self.relu2(out)))
return torch.add(skip, out)
class NetworkBlock(nn.Module):
def __init__(
self, nb_layers, in_planes, out_planes, block, stride, nb_groups, order
):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(
block, in_planes, out_planes, nb_layers, stride, nb_groups, order
)
def _make_layer(
self, block, in_planes, out_planes, nb_layers, stride, nb_groups, order
):
layers = []
for i in range(int(nb_layers)):
layers.append(
block(
i == 0 and in_planes or out_planes,
out_planes,
i == 0 and stride or 1,
nb_groups,
order,
)
)
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(
self,
depth,
feat_dim,
#num_classes,
widen_factor=1,
nb_groups=16,
init=0,
order1=0,
order2=0,
):
if order1 == 0:
print("order1=0: In the blocks: like in DM, BN on top of relu")
if order1 == 1:
print("order1=1: In the blocks: not like in DM, relu on top of BN")
if order1 == 2:
print(
"order1=2: In the blocks: BN on top of relu in residual (DM), relu on top of BN ortherplace (clqssique)"
)
if order1 == 3:
print(
"order1=3: In the blocks: relu on top of BN in residual (classic), BN on top of relu otherplace (DM)"
)
if order2 == 0:
print("order2=0: outside the blocks: like in DM, BN on top of relu")
if order2 == 1:
print("order2=1: outside the blocks: not like in DM, relu on top of BN")
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert (depth - 4) % 6 == 0
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1)
# 1st block
self.block1 = NetworkBlock(
n, nChannels[0], nChannels[1], block, 1, nb_groups, order1
)
# 2nd block
self.block2 = NetworkBlock(
n, nChannels[1], nChannels[2], block, 2, nb_groups, order1
)
# 3rd block
self.block3 = NetworkBlock(
n, nChannels[2], nChannels[3], block, 2, nb_groups, order1
)
# global average pooling and classifier
"""
self.bn1 = nn.GroupNorm(nb_groups, nChannels[3]) if nb_groups else nn.Identity()
self.relu = nn.ReLU()
self.fc = nn.Linear(nChannels[3], num_classes)
"""
self.nChannels = nChannels[3]
self.block4 = nn.Sequential(
nn.Flatten(),
nn.Linear(256 * 8 * 8, 4096, bias=False), # 256 * 6 * 6 if 224 * 224
nn.GroupNorm(16, 4096),
nn.ReLU(inplace=True),
)
# fc7
self.block5 = nn.Sequential(
nn.Linear(4096, 4096, bias=False),
nn.GroupNorm(16, 4096),
nn.ReLU(inplace=True),
)
# fc8
self.block6 =nn.Sequential(
nn.Linear(4096, feat_dim),
L2Norm(),
)
if init == 0: # as in Deep Mind's paper
for m in self.modules():
if isinstance(m, nn.Conv2d):
fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(m.weight)
s = 1 / (max(fan_in, 1)) ** 0.5
nn.init.trunc_normal_(m.weight, std=s)
m.bias.data.zero_()
elif isinstance(m, nn.GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(m.weight)
s = 1 / (max(fan_in, 1)) ** 0.5
nn.init.trunc_normal_(m.weight, std=s)
#m.bias.data.zero_()
if init == 1: # old version
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode="fan_out", nonlinearity="relu"
)
elif isinstance(m, nn.GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
self.order2 = order2
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.block4(out)
out = self.block5(out)
out = self.block6(out)
if out.ndim == 4:
out = out.mean(dim=-1)
if out.ndim == 3:
out = out.mean(dim=-1)
#out = self.bn1(self.relu(out)) if self.order2 == 0 else self.relu(self.bn1(out))
#out = F.avg_pool2d(out, 8)
#out = out.view(-1, self.nChannels)
return out#self.fc(out)

View file

@ -1,3 +1,6 @@
from datetime import datetime
import time
import argparse
from utils import json_file_to_pyobj, get_loaders
from WideResNet import WideResNet
from opacus.validators import ModuleValidator
@ -10,7 +13,7 @@ import os
import torch
import torch.nn as nn
from torchvision import models, transforms
import student_model
import student_model
import torch.optim as optim
import torch.nn.functional as F
import opacus
@ -18,21 +21,8 @@ import warnings
warnings.filterwarnings("ignore")
def train_knowledge_distillation(teacher, student, epochs, learning_rate, T, soft_target_loss_weight, ce_loss_weight, device):
def train_knowledge_distillation(teacher, student, train_dl, epochs, learning_rate, T, soft_target_loss_weight, ce_loss_weight, device):
# Dataset
transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2470, 0.2435, 0.2616]),
]
)
datadir = Path().home() / "opt/data/cifar"
train_ds = CIFAR10(root=datadir, train=True, download=True, transform=transform)
train_dl = DataLoader(train_ds, batch_size=128, shuffle=False, num_workers=4)
ce_loss = nn.CrossEntropyLoss()
optimizer = optim.Adam(student.parameters(), lr=learning_rate)
@ -72,45 +62,37 @@ def train_knowledge_distillation(teacher, student, epochs, learning_rate, T, sof
print(f"Epoch {epoch+1}/{epochs}, Loss: {running_loss / len(train_dl)}")
def test(model, device, teacher=False):
transform = transforms.Compose(
[
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2470, 0.2435, 0.2616]),
]
)
datadir = Path().home() / "opt/data/cifar"
test_ds = CIFAR10(root=datadir, train=True, download=False, transform=transform)
test_dl = DataLoader(test_ds, batch_size=128, shuffle=False, num_workers=4
)
@torch.no_grad()
def test(model, device, test_dl, is_teacher=False):
model.to(device)
model.eval()
correct = 0
total = 0
with torch.no_grad():
for inputs, labels in test_dl:
inputs, labels = inputs.to(device), labels.to(device)
if teacher:
outputs, _, _, _ = model(inputs)
else:
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
for inputs, labels in test_dl:
inputs, labels = inputs.to(device), labels.to(device)
if is_teacher:
outputs, _, _, _ = model(inputs)
else:
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
print(f"Test Accuracy: {accuracy:.2f}%")
return accuracy
def main():
parser = argparse.ArgumentParser(description='Student trainer')
parser.add_argument('--teacher', type=Path, help='path to saved teacher .pt', required=True)
parser.add_argument('--norm', type=float, help='dpsgd norm clip factor', required=True)
parser.add_argument('--cuda', type=int, help='gpu index', required=False)
parser.add_argument('--epsilon', type=float, help='dp epsilon', required=False, default=None)
parser.add_argument('--epochs', type=int, help='student epochs', required=True)
args = parser.parse_args()
json_options = json_file_to_pyobj("wresnet16-audit-cifar10.json")
training_configurations = json_options.training
@ -118,7 +100,9 @@ def main():
wrn_width = training_configurations.wrn_width
dataset = training_configurations.dataset.lower()
if torch.cuda.is_available():
if args.cuda is not None:
device = torch.device(f'cuda:{args.cuda}')
elif torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
@ -126,19 +110,21 @@ def main():
print("Load the teacher model")
# instantiate teacher model
strides = [1, 1, 2, 2]
strides = [1, 1, 2, 2]
teacher = WideResNet(d=wrn_depth, k=wrn_width, n_classes=10, input_features=3, output_features=16, strides=strides)
teacher = ModuleValidator.fix(teacher)
teacher = ModuleValidator.fix(teacher)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(teacher.parameters(), lr=0.1, momentum=0.9, nesterov=True, weight_decay=5e-4)
scheduler = MultiStepLR(optimizer, milestones=[int(elem*epochs) for elem in [0.3, 0.6, 0.8]], gamma=0.2)
train_loader, test_loader = get_loaders(dataset, training_configurations.batch_size)
best_test_set_accuracy = 0
dp_epsilon = 8
dp_delta = 1e-5
norm = 1.0
privacy_engine = opacus.PrivacyEngine()
teacher, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
if args.epsilon is not None:
dp_epsilon = args.epsilon
dp_delta = 1e-5
norm = args.norm
privacy_engine = opacus.PrivacyEngine()
teacher, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=teacher,
optimizer=optimizer,
data_loader=train_loader,
@ -148,23 +134,35 @@ def main():
max_grad_norm=norm,
)
teacher.load_state_dict(torch.load(os.path.join("wrn-1733078278-8e-1e-05d-12.0n-dict.pt"), weights_only=True))
teacher.load_state_dict(torch.load(args.teacher, weights_only=True))
teacher.to(device)
teacher.eval()
#instantiate istudent
student = student_model.Model(num_classes=10).to(device)
print("Training student")
#train_knowledge_distillation(teacher=teacher, student=student, epochs=100, learning_rate=0.001, T=2, soft_target_loss_weight=0.25, ce_loss_weight=0.75, device=device)
#test_student = test(student, device)
test_teacher = test(teacher, device, True)
train_knowledge_distillation(
teacher=teacher,
student=student,
train_dl=train_loader,
epochs=args.epochs,
learning_rate=0.001,
T=2,
soft_target_loss_weight=0.25,
ce_loss_weight=0.75,
device=device
)
print(f"Saving student model for time {int(time.time())}")
Path('students').mkdir(exist_ok=True)
torch.save(student.state_dict(), f"students/studentmodel-{int(time.time())}.pt")
print("Testing student and teacher")
test_student = test(student, device, test_loader)
test_teacher = test(teacher, device, test_loader, True)
print(f"Teacher accuracy: {test_teacher:.2f}%")
#print(f"Student accuracy: {test_student:.2f}%")
print(f"Student accuracy: {test_student:.2f}%")
if __name__ == "__main__":
main()
main()

View file

@ -1,4 +1,5 @@
import os
import time
import torch
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
@ -11,6 +12,8 @@ from tqdm import tqdm
import opacus
from opacus.validators import ModuleValidator
from opacus.utils.batch_memory_manager import BatchMemoryManager
import warnings
warnings.filterwarnings("ignore")
def set_seed(seed=42):
@ -21,23 +24,74 @@ def set_seed(seed=42):
torch.cuda.manual_seed(seed)
def _train_seed(net, loaders, device, dataset, log=False, checkpoint=False, logfile='', checkpointFile='', epochs=200, norm=1.0):
def train_no_cap(net, epochs, data_loader, device, optimizer, criterion, scheduler, test_loader, log, logfile, checkpointFile):
best_test_set_accuracy = 0
for epoch in range(epochs):
net.train()
#for i, data in tqdm(enumerate(train_loader, 0), leave=False):
for i, data in enumerate(data_loader, 0):
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
wrn_outputs = net(inputs)
outputs = wrn_outputs[0]
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
scheduler.step()
if epoch % 10 == 0 or epoch == epochs - 1:
with torch.no_grad():
correct = 0
total = 0
net.eval()
for data in test_loader:
images, labels = data
images = images.to(device)
labels = labels.to(device)
wrn_outputs = net(images)
outputs = wrn_outputs[0]
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
epoch_accuracy = correct / total
epoch_accuracy = round(100 * epoch_accuracy, 2)
if log:
print('Accuracy at epoch {} is {}%'.format(epoch + 1, epoch_accuracy))
with open(logfile, 'a') as temp:
temp.write('Accuracy at epoch {} is {}%\n'.format(epoch + 1, epoch_accuracy))
if epoch_accuracy > best_test_set_accuracy:
best_test_set_accuracy = epoch_accuracy
torch.save(net.state_dict(), checkpointFile)
return best_test_set_accuracy
def _train_seed(net, loaders, device, dataset, log=False, logfile='', epochs=200, norm=1.0, dp_epsilon=None):
train_loader, test_loader = loaders
dp_epsilon = 8
dp_delta = 1e-5
if dp_epsilon is not None:
print(f"DP epsilon = {dp_epsilon}, delta = {dp_delta}")
#net = ModuleValidator.fix(net, replace_bn_with_in=True)
net = ModuleValidator.fix(net)
ModuleValidator.validate(net, strict=True)
checkpointFile = 'wrn-{}-{}e-{}d-{}n-dict.pt'.format(int(time.time()), dp_epsilon, dp_delta, norm)
#net = ModuleValidator.fix(net, replace_bn_with_in=True)
net = ModuleValidator.fix(net)
ModuleValidator.validate(net, strict=True)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9, nesterov=True, weight_decay=5e-4)
scheduler = MultiStepLR(optimizer, milestones=[int(elem*epochs) for elem in [0.3, 0.6, 0.8]], gamma=0.2)
best_test_set_accuracy = 0
if dp_epsilon is not None:
privacy_engine = opacus.PrivacyEngine()
net, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
@ -50,65 +104,22 @@ def _train_seed(net, loaders, device, dataset, log=False, checkpoint=False, logf
max_grad_norm=norm,
)
print(f"Using sigma={optimizer.noise_multiplier} and C={1.0}, norm = {norm}")
print(f"DP epsilon = {dp_epsilon}, delta = {dp_delta}")
print(f"Using sigma={optimizer.noise_multiplier} and C = norm = {norm}")
else:
print("Training without differential privacy")
print(f"Training with {epochs} epochs")
#for epoch in tqdm(range(epochs)):
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=1000, # Roughly 12gb vram, uses 9.4
optimizer=optimizer
) as memory_safe_data_loader:
for epoch in range(epochs):
net.train()
#for i, data in tqdm(enumerate(train_loader, 0), leave=False):
for i, data in enumerate(memory_safe_data_loader, 0):
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
wrn_outputs = net(inputs)
outputs = wrn_outputs[0]
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
scheduler.step()
if epoch % 10 == 0 or epoch == epochs - 1:
with torch.no_grad():
correct = 0
total = 0
net.eval()
for data in test_loader:
images, labels = data
images = images.to(device)
labels = labels.to(device)
wrn_outputs = net(images)
outputs = wrn_outputs[0]
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
epoch_accuracy = correct / total
epoch_accuracy = round(100 * epoch_accuracy, 2)
if log:
print('Accuracy at epoch {} is {}%'.format(epoch + 1, epoch_accuracy))
with open(logfile, 'a') as temp:
temp.write('Accuracy at epoch {} is {}%\n'.format(epoch + 1, epoch_accuracy))
if epoch_accuracy > best_test_set_accuracy:
best_test_set_accuracy = epoch_accuracy
if checkpoint:
torch.save(net.state_dict(), checkpointFile)
if dp_epsilon is not None:
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=1000, # Roughly 12gb vram, uses 9.4
optimizer=optimizer
) as memory_safe_data_loader:
best_test_set_accuracy = train_no_cap(net, epochs, memory_safe_data_loader, device, optimizer, criterion, scheduler, test_loader, log, logfile, checkpointFile)
else:
best_test_set_accuracy = train_no_cap(net, epochs, train_loader, device, optimizer, criterion, scheduler, test_loader, log, logfile, checkpointFile)
return best_test_set_accuracy
@ -154,9 +165,8 @@ def train(args):
net = WideResNet(d=wrn_depth, k=wrn_width, n_classes=10, input_features=3, output_features=16, strides=strides)
net = net.to(device)
checkpointFile = 'wrn-{}-{}-seed-{}-{}-dict.pth'.format(wrn_depth, wrn_width, dataset, seed) if checkpoint else ''
epochs = training_configurations.epochs
best_test_set_accuracy = _train_seed(net, loaders, device, dataset, log, checkpoint, logfile, checkpointFile, epochs, args.norm)
best_test_set_accuracy = _train_seed(net, loaders, device, dataset, log, logfile, epochs, args.norm, args.epsilon)
if log:
with open(logfile, 'a') as temp:
@ -182,6 +192,7 @@ if __name__ == '__main__':
parser.add_argument('-config', '--config', help='Training Configurations', required=True)
parser.add_argument('--norm', type=float, help='dpsgd norm clip factor', required=True)
parser.add_argument('--cuda', type=int, help='gpu index', required=False)
parser.add_argument('--epsilon', type=float, help='dp epsilon', required=False, default=None)
args = parser.parse_args()