From 7e40ad9704ead1261eaa2643ccec619b51748453 Mon Sep 17 00:00:00 2001 From: Nicholas Carlini Date: Tue, 14 Dec 2021 00:50:49 +0000 Subject: [PATCH] Add code to reproduce Membership Inference Attacks From First Principles --- research/mi_lira_2021/README.md | 114 ++++++ research/mi_lira_2021/dataset.py | 95 +++++ research/mi_lira_2021/fprtpr.png | Bin 0 -> 37841 bytes research/mi_lira_2021/inference.py | 150 ++++++++ research/mi_lira_2021/logs/.keep | 0 research/mi_lira_2021/plot.py | 224 ++++++++++++ research/mi_lira_2021/score.py | 66 ++++ research/mi_lira_2021/scripts/train_demo.sh | 16 + .../scripts/train_demo_multigpu.sh | 18 + research/mi_lira_2021/train.py | 329 ++++++++++++++++++ 10 files changed, 1012 insertions(+) create mode 100644 research/mi_lira_2021/README.md create mode 100644 research/mi_lira_2021/dataset.py create mode 100644 research/mi_lira_2021/fprtpr.png create mode 100644 research/mi_lira_2021/inference.py create mode 100644 research/mi_lira_2021/logs/.keep create mode 100644 research/mi_lira_2021/plot.py create mode 100644 research/mi_lira_2021/score.py create mode 100644 research/mi_lira_2021/scripts/train_demo.sh create mode 100644 research/mi_lira_2021/scripts/train_demo_multigpu.sh create mode 100644 research/mi_lira_2021/train.py diff --git a/research/mi_lira_2021/README.md b/research/mi_lira_2021/README.md new file mode 100644 index 0000000..fc287b0 --- /dev/null +++ b/research/mi_lira_2021/README.md @@ -0,0 +1,114 @@ +This directory contains code to reproduce our paper: + +**"Membership Inference Attacks From First Principles"** +https://arxiv.org/abs/2112.03570 +by Nicholas Carlini, Steve Chien, Milad Nasr, Shuang Song, Andreas Terzis, and Florian Tramer. + + +###INSTALLING + +You will need to install fairly standard dependencies + +`pip install scipy, sklearn, numpy, matplotlib` + +and also some machine learning framework to train models. We train our models +with JAX + ObJAX so you will need to follow build instructions for that +https://github.com/google/objax +https://objax.readthedocs.io/en/latest/installation_setup.html + + +###RUNNING THE CODE + +####1. Train the models + +The first step in our attack is to train shadow models. As a baseline +that should give most of the gains in our attack, you should start by +training 16 shadow models with the command + +> bash scripts/train_demo.sh + +or if you have multiple GPUs on your machine and want to train these models +in parallel, then modify and run + +> bash scripts/train_demo_multigpu.sh + +This will train several CIFAR-10 wide ResNet models to ~91% accuracy each, and +will output a bunch of files under the directory exp/cifar10 with structure: + +``` +exp/cifar10/ +- experiment_N_of_16 +-- hparams.json +-- keep.npy +-- ckpt/ +--- 0000000100.npz +-- tb/ +``` + +####2. Perform inference + +Once the models are trained, now it's necessary to perform inference and save +the output features for each training example for each model in the dataset. + +> python3 inference.py --logdir=exp/cifar10/ + +This will add to the experiment directory a new set of files + +``` +exp/cifar10/ +- experiment_N_of_16 +-- logits/ +--- 0000000100.npy +``` + +where this new file has shape (50000, 10) and stores the model's +output features for each example. + + +####3. Compute membership inference scores + +Finally we take the output features and generate our logit-scaled membership inference +scores for each example for each model. + +> python3 score.py exp/cifar10/ + +And this in turn generates a new directory + +``` +exp/cifar10/ +- experiment_N_of_16 +-- scores/ +--- 0000000100.npy +``` + +with shape (50000,) storing just our scores. + + +###PLOTTING THE RESULTS + +Finally we can generate pretty pictures, and run the plotting code + +> python3 plot.py + +which should give (something like) the following output + + +![Log-log ROC Curve for all attacks](fprtpr.png "Log-log ROC Curve") + +``` +Attack Ours (online) + AUC 0.6676, Accuracy 0.6077, TPR@0.1%FPR of 0.0169 +Attack Ours (online, fixed variance) + AUC 0.6856, Accuracy 0.6137, TPR@0.1%FPR of 0.0593 +Attack Ours (offline) + AUC 0.5488, Accuracy 0.5500, TPR@0.1%FPR of 0.0130 +Attack Ours (offline, fixed variance) + AUC 0.5549, Accuracy 0.5537, TPR@0.1%FPR of 0.0299 +Attack Global threshold + AUC 0.5921, Accuracy 0.6044, TPR@0.1%FPR of 0.0009 +``` + +where the global threshold attack is the baseline, and our online, +online-with-fixed-variance, offline, and offline-with-fixed-variance +attack variants are the four other curves. Note that because we only +train a few models, the fixed variance variants perform best. diff --git a/research/mi_lira_2021/dataset.py b/research/mi_lira_2021/dataset.py new file mode 100644 index 0000000..fa2c2b0 --- /dev/null +++ b/research/mi_lira_2021/dataset.py @@ -0,0 +1,95 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Optional, Tuple, List + +import numpy as np +import tensorflow as tf + + +def record_parse(serialized_example: str, image_shape: Tuple[int, int, int]): + features = tf.io.parse_single_example(serialized_example, + features={'image': tf.io.FixedLenFeature([], tf.string), + 'label': tf.io.FixedLenFeature([], tf.int64)}) + image = tf.image.decode_image(features['image']).set_shape(image_shape) + image = tf.cast(image, tf.float32) * (2.0 / 255) - 1.0 + return dict(image=image, label=features['label']) + + +class DataSet: + """Wrapper for tf.data.Dataset to permit extensions.""" + + def __init__(self, data: tf.data.Dataset, + image_shape: Tuple[int, int, int], + augment_fn: Optional[Callable] = None, + parse_fn: Optional[Callable] = record_parse): + self.data = data + self.parse_fn = parse_fn + self.augment_fn = augment_fn + self.image_shape = image_shape + + @classmethod + def from_arrays(cls, images: np.ndarray, labels: np.ndarray, augment_fn: Optional[Callable] = None): + return cls(tf.data.Dataset.from_tensor_slices(dict(image=images, label=labels)), images.shape[1:], + augment_fn=augment_fn, parse_fn=None) + + @classmethod + def from_files(cls, filenames: List[str], + image_shape: Tuple[int, int, int], + augment_fn: Optional[Callable], + parse_fn: Optional[Callable] = record_parse): + filenames_in = filenames + filenames = sorted(sum([tf.io.gfile.glob(x) for x in filenames], [])) + if not filenames: + raise ValueError('Empty dataset, files not found:', filenames_in) + return cls(tf.data.TFRecordDataset(filenames), image_shape, augment_fn=augment_fn, parse_fn=parse_fn) + + @classmethod + def from_tfds(cls, dataset: tf.data.Dataset, image_shape: Tuple[int, int, int], + augment_fn: Optional[Callable] = None): + return cls(dataset.map(lambda x: dict(image=tf.cast(x['image'], tf.float32) / 127.5 - 1, label=x['label'])), + image_shape, augment_fn=augment_fn, parse_fn=None) + + def __iter__(self): + return iter(self.data) + + def __getattr__(self, item): + if item in self.__dict__: + return self.__dict__[item] + + def call_and_update(*args, **kwargs): + v = getattr(self.__dict__['data'], item)(*args, **kwargs) + if isinstance(v, tf.data.Dataset): + return self.__class__(v, self.image_shape, augment_fn=self.augment_fn, parse_fn=self.parse_fn) + return v + + return call_and_update + + def augment(self, para_augment: int = 4): + if self.augment_fn: + return self.map(self.augment_fn, para_augment) + return self + + def nchw(self): + return self.map(lambda x: dict(image=tf.transpose(x['image'], [0, 3, 1, 2]), label=x['label'])) + + def one_hot(self, nclass: int): + return self.map(lambda x: dict(image=x['image'], label=tf.one_hot(x['label'], nclass))) + + def parse(self, para_parse: int = 2): + if not self.parse_fn: + return self + if self.image_shape: + return self.map(lambda x: self.parse_fn(x, self.image_shape), para_parse) + return self.map(self.parse_fn, para_parse) diff --git a/research/mi_lira_2021/fprtpr.png b/research/mi_lira_2021/fprtpr.png new file mode 100644 index 0000000000000000000000000000000000000000..8419ca1d9ff3a2ce6edd2836d30e502c273668b6 GIT binary patch literal 37841 zcmaHTWl$bn6D1NLxVyW%y9Rf6cXxLJ!CitwaCe6gB)B`l-Q9hM_p9BXo2sW$h0NT! zbNlq^)2E40l$U^q!GQq*0fCp26jKHP0gVSfbYCHXubj2)^#cEKyMEJjRdFzP^)PZa z1CcXwb+mPGwY4%PayN5!v2w6yWng1qp(C<%b#-*%VPv%Xe?P$B;B3K28?vnlTm;%t zQp*Jd1kvd816nFrVg&+{Bp@Xwtm>I{n(g7K`gbvKgZ>Em2%4UFPBCdp;kYq8KtdMB!r)1QME<5E42vJiZ8}tB4F$5ZlbCeSOEni^+IsJOPK_`oS{P z`PgbYYvvlihq=6xWFi?c($_Cw!pKUgO(0-FLPVuMze11zUkWxRMhh1rLi!pRsz~(t z4Jr(gAmAs1@-$VwA+84r12)gjbOSBry=gQS!b#!!|?@k8K*E&YE={_%lmTUvz zy!i(d5&=(gJcC_{3e9%oH@`$CI<5NV-d<=?5&7A_rk%&D9p|+!pNCSlid0{{3UzuC z5)x@q*Ut;w0vAZk$cU_|VWd^B9BT5PEtZo_X9?wY*>`-twet1zJDM#)oSvR8qALR) zP?dkqj9$q9_GB^6|Lu8CtJ`v#WFQ82@F%kgh-RBJdBFP z30OeD2j9!Ab(3Y2_R~K@CL~PE9}NwxFrmF)SW$qzyAC3gBBP{KX)u>d9x<*aFN6Uj zvRi4E`@LY@lqg*^qD~=|czZp{C=&t+pGYQ^$im861PPBL>2a~1l9-fKBp!n!RaaN% z70YZffd`Gmc!k1vRnAhw>v4|u;?bsCBXmv5!GY1GAJ@_0)9mcw;Zd#I1{SDYwLi+( zT__PpfCUF>KK3)A24bP(;`eAWwaV^Ef4>+XKYyFc0m6K_8f|*S=c}ZivOw{%=G_V) z+!R@zHI-ZdSxPq`?URaYN{M}3;q=_xh6&^mNQ~2{`a3aEy(Frx38AbBtg}B)I0R;X zp|=+d78X{$QnM!#op$Qv4n(y=U5*MVNJvp&+5*Mnl(Nw$)sg>RtJ(@oDLp9Li;jWc zX0}6zGuDqgGoFl9*~Z7PXi;TWu4l>vd`uf98xmv&Q7)@kmiBaN8|@tJC}Ge}HK%+dS~Qz0}qK5n(v&Mqu03`f9+#YEr@ z1p&eAwB28iF8_IPn2?}CR-Ng$8)EqkUcRAQQ z9vH`X5vS9I@IO}S{8thGYO6T#eT7E4<<+7CND~ZeRITrtSwVhF)>dvD;Z`$z#-*f&X5X0WJw2 zAIN2?Mig?Ne(%f-%i`})FlwWI7%?KGWQcU9mbN-C4ET$WFqjC};jHY`x3Xmi81`iL z<+f|SMB(0Xdek_6mnmG;FCNE|aZ_6O=oAC?;4StMbsS9zlk);7XhW)q)!aBtx~Q+5kY%hf8>$x4Lk z{yQ(|NYvbkLmPqvcgN!8A1!KbV^iO6?gpkUHeuo1nwguE$lkT_#g?GS^ZxK(Q+eM> zxeKrczJtHawyWs`vbcC+d~WtvX>h6F7hphV%r6gn$DHiZvo7g$o?{6PCqSiOIcsL+zH-Ss*&PtdqT}?P!8tMih2Vl zg(%F`4FGngw3L|Q~3wi8*C~m-ugZ>0ZPGs$-dC9vc>~qocb!=dL zf0RiSyUgyxdvYR{`}jTbhiWQg#iji2i*l7sK8MQnEv~;On-j?z9|A9L2N*v1DA;El z1?9Dpi3<%uvc&mCX=$Tb3o%l2*;-Ca-WGeWSo}nr`tsvr%!w5~{ng10s)RE@F0K1w zT5w@WF3W|V;iW4=+#zh8IZ}YMeRH}WIuQ8BlA+u46(SLK8%d7{X;(@cPxl>P#9yS{ z2Q1X|51eGYt!7O?q=|gU;cOi|tgnr+gBfJtdY;9>ogml!EQ$AdthrccT?HJj3kuZl z;lDqh58=`+D-6ihkjH~ikLGMR+8^6SWT2hGa}fW#14!QFcU+&%-Wo?M-a|I$d1g@) ze|A^V70<7tuP3ub`B9zoWKt*rKh8)F4*%E?+P`-$6uOr4Ad3BLL@xIwV5DL^QbN<)v?vsP{+U9c#&@!qOu1vKW; zxo+6~W%;G#@nl7-@s%>Vj(MP778f>4B5uHYit1h4C%e_A&eHZg!JIJG)R?wnM(8tL z5qT!RSj5oAz;=sl+qH2C8%;Kk-M{8LOmY`-g8N$6Ra<}`xBVNjgG&tR!*qAvSt;@) zZ0}>W8gTAv!})<}kT;N-6cIbqVE4qWtTH3z(Iwx`16g=&Wu}JoA7v7jhRf2jP{mQg z+%?YjVt1!$uj43VCsqbmr9q!;uKughg`>Q3Wt2#fEj*axXA_})ga5k()#_p6s%&5Z z?r@ zViMZrZhc4mZu`9z7qP;+ao6~+hc5QgeA2FfMTu(jTYy3_-~3n1l^>C$+}8{}f__2X zK1l4w(acqFLRkUeof|)6bzZ8SSLVd%KYRlcco&VO+*+mI2K3?l+5*yRIR5wU(Y}*m zPqI(Efu;F}6m}q%;b7 z!sFw|O_7i1>%(ETc^Ofi1Fv@XJ;8Vfp79=uKjs*P7UjbWd|1y zjLtW{WYh>6!?kQY0jgrO6~RdVJ)ka}cPl;|W{c>d!E#GUvi0RTls{?0v75!g-WLwY zjs2;+q`)iRou>tY=%Z6|2bH6w#PE%-%#Q9#3BPk1tr7wy1Q?QB`)?grgiPh$JA8ko zqNR3#+oAaJCWtLI9L`%&rAR;D9;pylRlY2r)oJL zV*=atrm3oyUAGYgU!9^enai9T{KqR;tKvBC^z4`mixV4tI3v10glFEns90S~nF%6u zYf8wTmhIuN-UZew=0`Ltc{R2l4_=}QDJ_hxOyoP<-*wxOmGkkyqar^Gm^|O4bbDR1 zRUWXGxZdoYyhVY^mPV+HlKf$!(hcyjO3(OTdq3W`Z26pi{4_PukzplgY`inIEot}U z(oS^WCVNPGliH5Om&7r#L?L$^gb2<|H{vZ7Eb#7($!yYL5aAa~Bole>>f1WYKSbAF zwQ0M$#ig%xsV~$qhXSak%yBlg%Xcs!3+=&51^0I#`G2i`hv2+7gCzwE?4q1I1B-bP z4Ji?_drgRyTDV^8PJ22!v{P;rzxjuApBjC%`}f9iNdQ{{0dh+sMe-gL!_l zwI#UVck1ow9gG9JAGA+Y@J+OZ`C-gEep;`jdwXztXkrPad&$3mh+i}f*C6>q&fmCD zV7?TElL^aGt#X^|>a8M7?B{|8+PRf3!t?zL(!g}IvgGRrfrE_TPy6+FwM37gAN4l* z3npW(bl_y|WVR!M_Ib7>BJ&#J0TCZ8TdD4_a*TUgXghb`n_*6As^)yQ@-{apDn%y0 zUtVsm{^n!CiS{|t*38W8KqyQeTovK-@Wp(3V-_Pgonnh=$rc zAJ5nKu$+QDb`2h+%VA6R_UFg%@qlu0X1(8f3RAH2SwB=xlaJt(T1FKc2?9SnUkmUw zaFckqtloOSz%JCGvD}&owz4ap9_doc{~Lgm&>LH$fNaHOU{qE4oiraPV?VIqko=H) z5rj_Jcce-8NBX9SL8MuCj(-*BUTt`y=+=^|IuTu<}$zubPrd`N8DJ>7WUTN96w*%0F*YB>jW{O|=aptg3jfmbSVDS$of zuBT-Q6-RF2cSL4UqQ|L;C8{69S#Z}O3X0_81wm02{rvOYp1%yD!1RZ(r8X5vlda87 z`D6uLxnQYeB>3j>m1OXO9>p*NUWj%`R~PuCpn=!_4M0O2HYrD=sH@8tbtbY6AI%cB z5F7zR4e$(p*qpPT-|j(r(uw`uX$4w!4?M&kD7)7dS*%{n9A8QT-oJfpf_{9HlQ46L zv#ZfxLOh&|;lc1lX;1a@Xa+a5upeyXn8nfzATpP3Ad#bISCYAG#{EtZ^CeFz#=Z1d z{Ax}G6i6QBWdC6gTAXYQ^T1rX8eDZ&ACgghu!28N}F0Gv$u?AoKHgqy3hpDyOu5+8n43LoBY z=`-HaK@@Fqq*5r03f~%D4p0kee%19vrFQXKah4L$xIM{p>fro`xiCLCAP96h!(iK< zId#`kd^*JaVpf#>Nw3#ZN8qIrxiByWeY$fF%x!7xt(-cMkVQ5cnN}S_aVAyX`-)8@ z@NxnOI&%0Ge`dN+Ji~3A<&Fh)K!GUc5#`B))I4`awj_BZlvGTzAvM0E957@NH)TwZ zl#Aw?yTVy2?*68s{p1J1tISxNx7sF(y+g1A^I^$RRz4vhj#giOGm6|{)O*<}4b9;$ zxe+I_|LEg#p^ZKF5PjS@Lv4(1ORdT_i^AaI6NFol&t`hLSlv>cqgKz>`*!(e_@|WX zdEsnStUJO}{f4fd4#Y|d`}OV&yLuppsBhiUY`)dWj&Y8HX4A~E+=14-@f!BnlpOb2HUgt zG-NAz{S5rx2)BTTVDS<(`X%crs;Na`JN{}aP1MLPrf<{25$hc+bv>qjEi*Zx|5@fl z%g2ZwrwbAS26CRTe{@{GIKiY~@COZB1oP2r$3Du|gQ!$pHv$Ya?}~Nj<@P0w4M5sb z^n+Y4*0cRy?o*nuz=WSfYgofz+OA8Sd}yAP@1HZOHq-Yu*T+mnDGJSMjr%}eU^_MLAP!KqjW~(D{iG23<``e4{N^?w4 z2$Y3QgYjP&3}oZcQenw(koNenTfJchWpxge3+YFE-mjGn^#gXNSf@O0 zz2!v))q-=Js1EgyyT%COcw!$LTU|93j1JAds2|7g3a>l%Q@C86cRYi!1f=742xna{ zGbTS7&G%{WYW;uxu7EKs7XoxgiEjV5>q>)0OXQt_7?ZJ|%pjGzZHa;(uSxS28mun+ zvLGTNBC-^5>J{HA{IDo^sHuld{b0Wog$bZCd>~*3u)k;GHofJ)*-Uxn;SDdK9d2*8 zKFm(`SLsHC3M4(p%@4wPgLZ4^7iM^@TcCR+UevyhIlG5m^j;dYey(%OlD9x}zqKu2 zYTWdoh4{NTeo|GX>iiBfE91L$F^#hQq!0@W3zyq5+QsjeYTaU08tAWIiMY60#Rhb1 z4M7S0AB`ez>5-DZLh`Tj*ur6>_oOMr6tWOF21)r%gp7Et29U|R~OwM80}S|p%54t7$k(@d9O}}#CW=2(yhE&ob2G5R&e-x zaPJ^eth(33qT$#MCQ(0TArsxt$CN&N)?)E0){}8S*Pf35s+T4|eOy{(JDqlPC@|P) zbm2R+cx9ST6gRwrNk+;YwA6@%%R`g??fGd_s3hu;gU_O=sl^YvrykVW+M1_a79JTn zqE3L0{_XB~UdHtl8kK^{sP_xE{%;}0V=OqwTbrGeD_v|Y&5l1)7d;IHQj^T}@njKW zD=kvX-MLF@8(11RaoxH-r;pzxV%{BAEw%1K=XSjR7KxMKtI*6v8{r=V zo23lB1-B)G3i7gjb+&5z!S`vqvO5Eu#ZJi)$?XT+?uN^Lk>&F8A|09Hru4A5in!r} z{Lv|w%e0CtZZ8Cv7+CpvJI`k~!qYWx=)XoJpWsng==k>R_QXR+7abpul$Dird3$R* zl1%*t5^|=>+gSvWqejFY-j&#h&9V|AS8Tl-t zt+a8nj~j(pZvckX1q8%Aj>c~|@#E1D3#J%+>d3+UZUhBC*Lky(+psf$@D$xQF%}y^ zPs#FY`0K1Zh0WNKtG|v@WJd-P3m&0jUpsOig&<>PrL1f^O?(oi!c2h#;uFsa2rm;; ze3Q*0i|?bOv9a+_ejkp6goMYZr#hR(YCNbDFyYYc0eus;l~UOF1naSxw1ljNHJ1(1 zsC;sgVBrE{?`e|l#j44s^=2JTJc-V5x0Pnw-&mu)-J%ijqk$g;t1s*|6ZM!nzts2e zXZVydhoF84Y)*BF9>R$m(AzYz2aSV4+hT;rq|4*Xm}^VF5tB_X37aOjQYDhoEwR+4;yU@6rB7Fd6Uipd+D3!u)1;hQ zrNfG*nH4p)_Ts4hHzMDt@}@2O3B`%R+h7LY89$hamlcK5sHfa`e(nVv>gvKB9j(IP z#q8^z%ZDCku$SKb#nf!FO+S3MHqCFMO$;Lv+n}UAfBux0mZ_LRPKNBiU~@ z^o8ku`FB!yx{+(sr8b({Dw`E3HyF$9uj=Lv-r%8Y+E@3ZqNtsLqY`U6vY7oQfQ8@v z1M6eut=rbDTD`jRya9*Nr)FEO7zyhZk^wJ#bEt-M$^u6&YURN`Qt!>1i;E|mzC!tz zq>kpz{`k%hLVv^cPOoTO&RAGX2E`hEeh#~T{fXo<(sccjTExw@Usw{qWQeuaLcC{X zH2e-DY{WK+rZ?NswKg^r7f0zZhJem=QApjtyAiK^eRZyy#U&H>_ed__+^J7tBsd>! zptm{THg9LhHmG?UDH#+TS6}~=4{GyV+0!$VFX=)B0~sQAIQ??^LXDP9oP_pb!&T9R z#nq0=j+4CFnv$Tr(F?Q{@i%;L;Jdu0SZlum8Oq`TOC2J-J)S#xdC+#c44G@MlwG55 z8KFgxs+LH0^%i>JVygz)x%O-~z=o^xOGn&0JIJ7$OU~631<)tSc=dYn7UIwI2CM8h zP;fQkIln0rAtNKRS;n*GclH<(v9ivZia>uc6EWv091yW)`%W0fPZ*+~*^nKR$$ab8 z;vuc(WZBo-%ISuaA(Q4jM?E%P_Vibz{O&yaqzkzJ|BfET;;XG@rz@7VIm{L!^6Yku%)6& zVVb6g1o*K>?Pa`A^_x&B($taHn+^&AwnTAoH z_e=A93`}3A=_{tQC?=-F?XPcS_RASIe&dYYe|`si=$?>CZQJ|uWGbwgSxq(RuDo8E zTVyPbtZ3aI&l+HDV0R8zxc6 zVF7!_v`;MxEYN&1N6ygjb02{grvKZWhKQ)B7VxeEUausihBc4BD%?uTuFCB+&)xY3 z+m_Qs(;^mOH{wAn_yY9kL8S{p~on1tZ6W_%APQq)$ zGYX^`Bs7Bsb18eNDvkHUIelg22!SlF4#9QsTB zMQRmCY{BY3?m+2{)w%81TyLFfO>t}ar(%sj;VL=1ag43q?wH~L56a|u3B5~Qu|zxD z_neL2BIMlBvcYL@S9b-{FFUh;;2W#$5|fdc8Q6DHBWO}INeGEFAizX+y-ZRC2%bt; z<^$^jLQIC=sBmUxBQ|OizSHXn6_M>c$B3+1s8?X5(rX9m|JVsf)@*SwD5bQMN_@Pn zp{?X`RAG2IOgE1{-c;M{@<_U1`B?Hr_=Y#4{w{T(0M!15`MVtGN7UH1ua{V5)Z*U^ zHcEK>201B7S;2*DwbH|tTYAeiY6vLhv;Hh|ccdGOh=5gTH*N2aXL_w_)gf6B?cqtW z*b9@dLWWF~;{9R_`iDZ+rR1c$W){twvO?^cSvU1$=QzZRCsEfwGDc>8#KM-wnB;w^ z_}eaJzTD`M(X3uBjY0Kf5iwI9pP5!Xs|6N0!uR!{2yegD1&V`S(W{cLBN3gL);SsR zfneh0Y@P9r{#O(fDOc?gjrKQ`VFnGy%6*Xy4RA%86wUJQ+f)u{QbFH;{5W_nD4(~E z2p=M~5QY%T3QA@=x*7-!4*L+0IdT#=Gqkr+1pzd7N^GL%>wx@Gba6AIDd9(9$r$Qa zPmb!#e@2SsJtLsW#Vtz_Kbb=c@x68lHOd^ZSiW1oJ^yld``amxkG8L{ujt825vuUf z-g_>i)V5HAGoK+aem#+iii0^awc!G3owaJs!pQCgE#r3du3181{baM}Mbd{dt+|n8 zqIEQ`A}J%&x3JKxx`&JwD?%NQ&r9$x^W*JdW7%Kz&YK_Tb$fbwrR#DDlkZKIiEqHD zOr2^K7%OIAqcVCmL$TT@rsHQd>{+fhHYfO75ymg$8gw^9oj0*1plv zD8}yBncZG!8l-30R*!*J5zpj!_<>!?IDu*NLcZ64uJeI_S%Sc-4(5YU5eOJ^s+56% zr1PK(&eE!P6$~0h9iak$!xL>|l|-@#-kpJi4*a{;Ys*>>M00lQpQ;D|^gp;fJUTz9 z*$?iis?&-${_0lfG0BPJo(#`kzH#vU=T&cViJ(eeOG68XdB0)yF;F2sC*(c}g7QapUQ_mGwsW3rva+z}=FYIT%nr45mo zv^=TJHvCECYy$eU<)6!rhL50QYUHSy6+{5^PS#91oxFmR!4-1p_vFpI9$-;bR?@(H z;{k_;g{7tD{$eKV&6lNb;rfdvV3 zOAOmYQbu%SOpFC<>f2y^y#pb&&;2e_#T^27NPTj%ade%FsGPYT z0LR1q#m3~>O3U!j(2zQ()eKn@w0f#4Hf?2+4bfrWueF=fUqeA)0~a9~{t85@M|n|k zl*ZROh4_={58F<75-~LBd&t?9TSqkr$eD?=&bIe~#nETFEq??^R7jt8mncuW8aO4g zQ~pI(fwyccjrWmVW27u~P`^IiX(?|vnr4ujC|X@z*~$O;Sq#Evo7Ei`nfIzhCK%6Z zF>!f2tB{nMDsF8}12{JN2L_Do>|!!Ab&8mG#(%9o2Z`ly=D_k%6XR~qxH~$4y1_UF zmM@CzfEu2p`gFzE+^dOm10Bn-`)4<}va(eIR*s7>8gzGlpUv0q52L*k9ph`sM6szo zO9Z{Ry+2J@_5+3H6&`g>n-!R^mo+EJ7J+?q6MwTzb|(_r7PvLJY6BkqlOtbWsHn-L zjPDvnEtOdl;!SC3o6;k%b_REjwVe+%gFC6+1mjya{4)Wms2(WCPtVS{y>F?^&CNS+ zXJlk^_}VnVzD5ppAw5!*ppT;})!^Qy8#s2rgqt$uMMabO&oc69Hql6QrQ{MeeO%b6 z-rswP*U&?w%(7m+IeJvPozK%PvA5C2azNxNkXfT1y}F#|+_1h`T9;UcJ9K?(5-7rLu{$a2 z@NGVUnI7hzlS-+Y$j9{_uks2E!7%bxb zmKCUWhngmSS)q3Nzhm#AdIw8!vdAvUZUmme;sU^Q{Cc^`#B?fMs>w677JS?6x`E+w|1DMlpo=<9K zYmG#If4sk-AS)6LAySiuM6a|=|Ha)ATE07pO3&nVSAYnV4izct?Oo}RO&&y)=xv~m zo6Nm23Uq+49FH=8{h6E~RuYr#oLqC#5}RtjM~SH5oE7GFfe$Jq(mOGohMe@i7cGZPmO4_VrTz=V~)5*tTB)v#8f!F-Yd<@ zni}Mf7P47xw2H`B!w74H!=J3?60w{UJs2xuY^JNf=JI?Xa`@7fP*o}4#PfP(`}7if zrWcSd^W(xq@lev~kug;bCyUpkTG@;SeYbE5W*3mB_HzT?dsbJoKHe?@<|;JeXf>*t z0KL)wF#emPUSOhNBzZr>#tX(-B*_q3V!RH+0$J#t&*{1CT^sKIld_CHfySD zsjK_@`Mg&>vhlw&!aEF?6ZIDj}_Yl_$JZN3dE17>*Z(C^7ZmX|cWXa%8 zn{)~JUdq<&Yd+SJW9|{_8ceHX^D>(od%o zOU&&PIJj+k%!;1w4esKhyITXB&|f4F{ZXv?htI5yzLCj+6>b%EeIlx2k zC~nVegU^w5J$8L>U7A{eB?aCkZg}yGw`pss3Ci_kL5>SjT^OcFXl8b{uDKa9np>ET zj;_>bI|~X-7-J^DLSg=3LU`{qI=!QVS;CVV)s;K4EOT=InEQRabm4Y_a>oE;@SM=( zUZDr&j#*ey-MAs7vG6LKlT!Us`YQz3H0GuBJ*gac+DRQwsXahd+ZoyMsQXu z;boOW(OW=XmY1h3n?2I(aJP%?BuXL2smb#-oNZ&>H)|Z>_r3wGd39vbN$*O*zR9r? zr&W;P%c_Ju-TAWyF8ehj-eygM|MxGG@mMpQlJZ)~#QuNjKcu9jSu<6(ZWkgC^!6Hs(kgjyxkUR405YtMDl^2pyF19{LJNAKGSdROQ!nUaQB?f z)qI5YbNA4HzlP^5u3Zi%m$y})G#(C?!zO2z>X_m2aCcoio=9GO=>n>`EE?;by?!!J zt@bD`{DK|<4OJ%E+S($g6CZXwn7{`dwclJ_*{qu~0N?Sj1$XduXyKx$x_MBq1r`&u zk`#`7*kIDH@!jmIgNAuOwpQ+fe~Z+v2E^0E(a$!j&GlMm7FV4QHxvm?jetU1W3WM6 zS^4G37)7G@I#RpJd^nLD(EpIJv87HHs=S_$N&xpRwas)nTe{M8;VHDp4E7{XVo0zg zulF38A1+)NUc`hmsQ`P(88Py!=dF74wYIzXq!q3U`^Bw|YzZWTC0hpA%jlfNXMH6v zkzE}2)OXYK@UKhaCF2ZsUc##fNFa|}%yf8gh3c?SH(zXPivjcU?ddWqENm1Edv1Ab zTU#66m&G|%YY626armd&ZtuHeK$HfY%9Lo5)V}~)Wj0ArIQ9c-I>7~YBG^)W5S6us z4@F~7SX>;^$jAr_?#ZDBi!5SD3?9DH&_3?eL^GYbj!nhLBp)&n8kSb9-GGRlHKpV< zXPdgC1cXT=eaH0ZyK+UPVp$$*lQ$c6dLx!)T;Y_7B92@{Rb^#uZSCedKPnjuYBG&< zQDVE>N$QZ68lbC!Lm|{UZi&ce^Zb2#d01|?!wrE#oLDgb8;MS*+ZsPKBnddSwVG|Q z0n_X^f|I#2mBTx4KE4r(YzsB==TSzf)x?g1tlk+Epbs1U!O7Zpl%iqkq6aj|BR{sO z&>^J~TLV4>Xi{Jh5#xMwTUlJaUF!x;17JeI02|aP{WuIh_Jfi#i*mSBRF6pgANT^6 z>&BaY5xw7dA21?`WNcRHh2*Ax0=RR~q#^d*QTF%uxAQuKjhV1oJuV0yE;a^dXJ>cK z7l3|}vWm*p-JRL#Qe7KG!M{c;RFz^`BS7qJ~Nnc!jp36Lk!njf*u&wRak1e(q+~UEb&*gUF2An!%88T27?9336=pMj8_E|8dQ zuuuSSK+VvnN)&Svm1a5Iil&uE+hOC`=jp&O7-yp85wOD)SRPz~S3Css1=i z-{m4INvX`_OeK^h@JUV*u+`;=@>k9kYe}V0Kz0QcF1f;u$imU=9OxG*D~;>#QrE~P z?|J~_()8o~?Q^UEINDCYK*RoIZXCdAEGBdIUM{*VMpNl2Q0e|><9{(Brd$1AKP~^6 z%lDBflf$leaPS9ECxO@czjvTa#HO)l(zdu-X9cUy;`31INtNl*wj+uGxMqGCds|07Afs zj*Nw+`OCSusA#&$M&tf`Eh<~@e1wQpPr}!g1~LUh%1W1ATU&d3bCW2SkQxnUduPYV z(Q)&rB-i|E`>&^u52j8vaFuC5`4W%DQaTm*vKK`{9S#IRRRO)jj9s;Y)3*rXF>{ife!T_N#- ze(BI6fbwWq#2&YY)4;IBL=JycObk2#?7+qXAv8!^)2ZBe<;F92-0P>8#%M6XI?0z* zaUDO&iw1dXy_1R_*ww|qu(04IP<6iA)*yMkKM;-0dbrxg+V=x74OI@BhT|Ixfm!_tsVwE;KkhBzD?$3^Z z9&tppN`jKMR)M?Q_?_B%iiEB;@t1Ug1bzx{Xt z**eCS<9u)LZ<>KWb&d1G*-9F{#Q;FFfNlU-^h5^&0z&7_1aG7DeB|C}+6{14c6N3` zfTlT602~0N-l*FtU;{Vv84>ilCd21-FVvddZ?u|C-Jy&NF5H~R=4G>*`Cilgo>8sW zF;i>Q$A=!8mX_wbzSQO8Q7o4!p2nb8YS1lE({-P*%D~N??zA(ob#W0F7l*hr7?<|u zyt=yj`TY)$i;0IWR8>4cmIZWV07U&`uYoXEtWdAgJ*N@?fUwbiEd%Hvwv7Q3FlcCE zKM?R`c@}nLe=-FD;|9PeMFa}IgyLt>TThWhn;^9sqcEy|V43t1)FRB1H*E!=)Ry!r zdUZelDoc~9&bFzR$2s0wyW6PKXWb?#D_iLAx5M#Ey~1oLo>(^h7QyF(r?(SP!06Pb zU}(z9(b=7hnmQttUdI_gid5jltT=S*<>?yo!2)2+h{>S439$3jNpiq+Aq{L5m;Wml z5VC&_nLgphEc2sa!%FyXG0-Vt?7RfK=)6J50%{ zS%>BhR(mVYEJM3exXX241UD6Wrl!_I`vI@2 z)oDjO-xOECm&>{dr%5{oIAEOit9hb75P*5B?9EL>40`P^SfpNp?RsheQDe|+S6yup zQ=Fhm1bf;|Quql>3xNUjEf5k5fM>WPfw|0ewjd-01*+8Y_i9Rou_|&)9v(300zWRu&{=P5=hgT zO~hk;*l>Vxt1F`3Ry9g$G5{WqWvr?v-6sILLu^o?S}r2U#-G6^U8FqxTQiZY&h11U zhuu2)#1AJ1vcmcIJT(;`P$mQC-uLCKWh{d|3aAR=6B9)MB)3|sHL8QWSnuLh2Hnp^ zrIa@XPXFIy14X9xN6*6BTd z`V3b+p7iG*Tk4(Joi~luG;n~7V!HV!D4I|(=k8?D#LiC6(~}Em-Iy&@YJq&#j5;m$ z$f&3x6%{tIUl-$C_D0Y%*lomt@>a28zAcA*u3{yywA9kxAZB1@Cb1o!p9$+0kIOL- z$n-8pGvrT3V{-?oAOPFi|E#otv9Qv4YXIWCE;m8paj-~{41W7!8yFZQ#nDpXM&816 zyB;C|5jHS8i>jb7nV48MRz{2NOkl=lPmm}$AT4%q;Bk;HhuH9%E)YirgOnU(nd93cInjqE-mBRo& zUp_nxuC4JKlxPP&toy`GO(_BvTA(o6|9HRnsGql)VUdbkob8f9L?Bp}m|WLmW>vWc zvdR_U9J?N-%r&&K8U_NqwyB988KU0v%Gi7&>vIT7x(KC7d;B0bKpvR812_SIUCYp# zidRaK5cHq~pVhc_ll9eh6yx=b4C6Eqq>0H@W5fS?r2$aUCcg!b&~N++wc!gcO&%%Y zSxRGPTY9&Lxs+UsENV1$d|PO77$GTnATCRj`{oqe#=q)Vp@kdzuT^f((oWpXXH-{% zAd$?8_6VaaU3^foNjavYiI>Bi<7XL7DyDm}x`MGRPuRzVk9=s>?x>f$#JOJg7KuUcAKBL;nG@;@J#DH4@AkpS4-fVv_bP@;}05jF#q~OOGz>l5)A@l0$ z%4l~eq4RM&ibT+xn)v9h-{IeP2A1;80f>0_S1u6w0{z}M zYAvIsv#K%2x|RfR(qap9gKyTZ+UF(f5=rTPD-)yn82i#PEO+r`lh`_EPL*S-dn6w% zJw(Wa>HIuGaV70Y7|h=R1_`uy^o{d7qoj?ENdTF{s|OweJ&dGEh=3eeG;peG0q6lH z7Da$ce!0OSu56DMm0+#6-TThEO*^QNfPds{y0}Ti#s7_cl|q5TDwn(`yOk|=HGI!k zj6{&2E4!*%!Gu!Z-d=9BI7Y}$e0Dh?t&}cp|E}j>r)>lAAN=l+m9t-O%g; zc5@7xC)+wcDsJIrWqWm^IOP_5z0VT?42S};Vhpgfzh~vEN;y@v^~OqNx_Y$%&qtK! z3V@+dg&Tn$nILl{_u!&nR)+5V!MeFU_AXoNLz*`kMp8y$M|=sM1s?m*Ek$t6+FKfO z7%^I1N2{(9O`4sC6<6T>(F6!pa4fltgUQ@n8$tkkfD>A8)CUPv_l{0ZMgU+yEZ__J zXGtYd07QD)k_|Zg6hGL1=2R=r#}%yHGe}g|>B-faU>(fM*D};sD_a zp-Ec~NQZ|@b)-OvHaR^xQzXL;WL+Rx{sDppQ1V6QV*r%$y4h#8oFdxZ-nLwAWd?_Y zE4{V&6`Tch&^lkvJ6>OJCRaN=;!1M7BWr#?M_S}~75{mK4K5#TfwE@_IM|~+A6!|B zUC@v9>{vlwnwDYM@H8V8ZV7J1_rU#t91t@xr(qpa9e7kb=Mc{&rfouBIPNszPeCJ< zh$UlVOOB3)16*d0fS3jBBT$Ve#K(hR;ai`bp6=R}0Qsy~Ay?knnFV+W(-MSNW2}=P z@%hsUNGYlGuRMIwW=&o<=0J5o{zCzq*PRY1FUHh@J_}HOpZg)G)3~s&BQ^5*ibM4! z#l^<<_OVGSJ?l@wBN7Q{pDxXx0)9L|@bNJ)Fp!h0q^qcndCG*C7&&<@q*tWOIS;Ls z(Gk;!JmlC1i{$5dv&-Ej*do0C*WG*`6=eEQfoSl0wgrhM<9mN zc|GiZn^ywmAuyu)^Rr)HB!dYD8S-kvzjenMqw^OA8hVE#TO?noNcn1oolMu@lt9~9OBD+1;)giMnes7D$DE{Cn*+w+|z z8x9+eTo9suPfrg}L(Brt^yeq|;P7w+TudxC>p`FFRK;up+B4qPVpOnviD=1eLQc*^ zoyp)=M8p!4!B{r?RUqrf@tpv=z$ema0Y*qt@#syUi3})dF@RMioW7DqG6=?WFc$x_ zOaR)6?x#x$4lBYTkm<6u*J9?faOrWO=~T40&=C}8j8aJv6a||p3&)~Hm4ev5>caNsh2(8ksvSI~d)-M$Yz&#KzN9PT8ZJI>nJmSv##gl7Xy8$9)d^A(^1qx~h zs6=D&c>y<=dl0egAIqEyxlyvGAjl%=2w?}qg-#D{JYS>~Z6jI1m+)=PCTn!SE7>MLz6@=6oO#!yfh>sC3SLfF}d9A@oH9~LQ8QUJuu(!UsCUVh3H@qV=?Npx((~q zaei5JBgu=>g~7P1=0k(ZRUUtfHe(ROe!H zI(8ObO0QnBQa_p3&4G^i(-HWRV zm|gEC1KOE=S}5KBDThI;fDir2t6rb+L2=+3A8On_{t*XWP0wz;_2y#P74 z-e!?u-E#*XK#xz(4e_vr@8j&1P zT3>BzdksiY{`+&^~5)F}R@Bod}EURY(iieu3QpM2Ni2Wyr~G+r6woy%g9!(v|Rq329*LUPz%W z(u^F5Pd))HoJ^`s02KK94K2z#wBv7YDAG~a2Pew1q{X&NKP!%AN^RxpXI%#@Tm7s3 z8=V~>gisGC)Wp3^t?$Uhus6U%(o+k`c8;&501Y;HCQlKj&}=l73T?m_2JnN4RQY>m z>eo;LRO7ebOX|;fGdYj{)7Y1QW7)22OHqc9DU~Qu<|t)I=6NPFX)so#NM%kXp~y@! z4H_D*t(Z>)UIuJ^g!ktm9Z8>Gi(Pb3fO8UFUSWnza`=w^`C} zieaWaaIvdMacsKbgjvQ`7Aw=61G^)6BXWD9@W0zXUU=yWMtgHT(@4 znPWtuVf&h7w_vHH6hP^&y>IUR`A_T5_i=T@hOCt)m*xJkebtS*_dp>EZv{oGDqc)a z414p_{Cv>l&X+%ayjTg1Ix_n9^|Ov0z0<9icr~3~p5twhqu-Ij)<&C^d~$!-n>Sl( z%(68c_?V)p8yLoI*!T-g&Dnw(8GLJ9CH>k;PV3BwpDTQ~mBPy1YO{=l@La5}XIY(A z*vnhVceWqVrd>^4qQ;;({OJ?FTOgg*)jcJ3Zf+bamVmu*rb$qkQpnGvs1aEM8v)xBOsYLnn-)ePS zu4xrG4A)CuJm36Kp~b=?>z}xkkpG3yD@tkG~yY!Pu9Kz1eyCwhps#!@e{#aM+v*3pHcyY4& zmzIY$|5ZvFdiv-Ud9R3zlbf*uI5!tu>$UMN&@eTXNxJ`H?L=GL?s4>Jo;+hyQWk>~d<_^uocc=(IKSJs zbG&qTCwv0Z`5{1)2{#V!-KhKB z)O3~@hxVu6PmYD8+&dCs1KoRf*7>+S$%>!dzP&X$|LsrIZg%0U^S+XK73N)4p6+ss zpLRT1{37J+?EI$2Umir?^g+_8wYAZSQ7fJUfQPD+hAK`HB8L1|wsLcGFE1}U4OU6l z{El7f>qaNQF{KgPLKM$Mw(34UKA6YiGEO?)Byl8=Ug~0enl@&&v9Y;&^(sZ<83CXk z8Gw9Vfy}`r(LEh^=T1BD1v16~#e`a6<9t6EXmHosflP&976Y`B?)~dKu!gNyo+=DI z@m(0E%lOcMn@LcLG;n%8Qw@IDx!U$z5vO$r zyX@3g%!eIz8BOCf$3nF~K&ItM|CJ1Mvh$0Jd&o$NY?cIR_nlxL&{IyKhh^dBX5+tARUd5LxFR*`S-D$S;6XJaNhQsj5{35lES2Xbs#KpxUfj&OEFlm#ECD)BBMt*UM-=gN?(8`}(pU=tQ#}LHg z(T#g4g8xQGp-W&FJ28&Tt5?_L0-iKFdh}@6*_Ykjy!Z^C;e^5qcLP_>gab)4(r#IY zZ`Uqud`~7x@5@k4%_EGLnp4z*DkNg*Mj zWbg0N0J%Gns$x-jzM#t2L5$<#c+v$aKtPea8@6uM#TD}W5V(;7Gofv=qY|#`4NmDu zV8#GmgF{1iVD|+OF~0QYHy{hn1Lt+*HnwT0mHIB7#{(4f*&IODfx|DH zO9w?oRT=CfORg^c8uA~@N(l;DTU0bA6g?NUZCPPW1K(a{eM`&Dy1tv4+0RGS*Vj91 zW?sH>W$MR|+Mb@0+Ny1U`1=^%Y!tDqZwTGk2Ot+HD|tto_BxD~-%V|<4^OTHdf(hD z7?bxRWkLP9qKP1zOp$j8zH6!bZg$YqpbXD>- z53@0X>~U!4u<`%x@n?R75ygz2V`%czCnvQ1*N{sEy02w!@Z&FoA9ZxQe}E-vjfhRU za|d3;-le#W02H13_Mk~jI#sQfA;7Zt2Tu|Kg0F?k|A_~__YeTPbf+8MWa-`HkPd7O zq`!tzmj9ID8n&yLjYiAQ9nauS?a4BPg33$2CkU77$e*(qiyXha$^{@vC$`fTm=HT2 zI*NQi_(S4AS$HNJYOu&T4|++cZ*g^X#WL5_*N-b)PrZ6&d4`cfJ&bcZJZ8N;p#mHr z@=|=~5@g5G@ZUdD!@$A#3meuuo?o5_&U!LGOxJNodUY*nK*LKQHtX|+%Kgx37AB@z zR9m12FnaAjJrM7>f*p7DQBa_3)~sn>cnAI>*QQzq(6fq~S}|&?QX3~Xw>}>FRRq-< zn+drL4LH*gpQ*s*)a2Pd@;*X0?%pZ#-04qJY-}TL>DZF!kta6_3tiL3_Ohv329Wq9 zYPqTmqWmqEeip`=|iwy9;?h70m+Nj56$y^;O~H z-RY`yKobpfjdfDw4$E;ASUlXh*P*?3Xowp@T7>hQG)HH^g)JyPF%hWi^OfI2D`QT! zCr)VMVFh5t2oR(RxCeO07yvio9$$=wMMV0F?deKOOZkdVJ@_9GM{+-51QRmy*VNRk z!XhC&Dmt!**coAR`V3MEb$b*7{o=lSy}Py`k{>zuMMAJJRkQLt9M99pmoHiv?z>LT zbi_+zDI(1NE7}*o0{UuLekcS7G*`=lE8<1a&Qe8$V@)(~6|ziUsd_td^meWpoU+i#DNk zeI3j1?V)-i$Dar8TeE+?on`+Cdl5Ybt$voni@Q0Fo3b9(Oj9q^Q%^H6dj6VgOJ(tl zpghJo)Qb@;bpE}Y+%nw9JC-5l*|~2wdu^6>+{M`*Gf3%=-k%-Y$*sXo%&ya8Z4}Uh zMWep6ojMbcs&b3BSx2bo`@Y-+UOvJ15xyQ&CpounEqd^Pf9A&owr2WGd#;X-j=Ig- z`hI$mR?MDeyLcM)yd;%*_0$D`PE8o)4YHxA99O)@A7a z%FZSGXm-dwvSbEXA`+-mUI`a{7YV30jO(%zH2f@54-YGjG-&YWdCi#IV; zAOl>VEbJsi54Z+s2PqoI4#B+uOGE3gRIQ_%^;DCN&qegv@AtD0KWt)7|3$*UJfpNL z?!ee~rf7Ejb95lX=)huP)^e0RK#*F*UC)HtN4$<4Ga6DBGF>37bHiLRNe)8R3O*0r#*8aLNRm0263z9Zh*~qG;D8F?J%Dnig*ZHe%e9VN~sIw%ASJP+y zLy+^E2dj|NW^HZ#26geMta^gP31-Uz_M2)uxc98Cg1Qe2O?hrIUfMnT>z3^~mcZ*c z=&xBbwmxrfw|n?-S_LZoRjXD(9RSS%ni&U(qwGauHFI3d5~a6(j4t6AV+o{eccFb} z9m(4rwO9NT_Y%GKt=RmvdneTE3KLg~1PXJH@8UTN0abX| zGBsH!DJhE=D%a&vtIB+v=zF*eZ}8(_b&&oAm`JLA&-MTO($h9tcTnK``SXPQfWr{w zx^Y*EPu-QZCPGU|sYmxnco_}v%jC1S^Zy-Jqinf_CEUJ(q9j*fQrrCPfd-KZn5C?M`Fdq0|-c7FRP*f2xwn62#<7*GLNJ$FCl z`Jpb*2eKeH{slbnZ636Nm(kl$KrsSGX58-`Tyeh**V-!fKsCTCTck;KK-q1sIjwicW*ED zIrvDI_IrGJ6|pn}iyGQT=|)X0Ezsd1P;T8sJJJQH5l9Se@t{e)0-zpmh}W(+p}j^Fzc|evhpTQIfxtyeFpUOKYHAC zJ8F_4{t+VICBDkV#bRunD3Ejq)vyw=YmA^=0h%=set1s`@I}kn&C*Y!L}xxkJ$!2& zToBczfo+MU#8RY?zFqic%d3#b_9f`g(9ndVl4W$U&M(k0Ff{h}TbXVK+2=RiNn3P` zwz$*-N*?L+Upb&II0w9t)LWR^@22>*UZ$es5I?>~(seKsoEzD+u?i{0FmK|iug8|) z`8g}@)4NK^j*gC2!Fw^nYiN zH})Hznwe?a7uF6sCl@;t!~k~=ddKGE@L!|fRQDoHIh6Cf{m?hxH*eVd^s9NLH+tXC zSIs#Cjy+o~958{Ahuul{MsGMN-pH75(`3dkHnky-%JlYMBT#CG;Qr!%3Is{4wVjV1s7gf>11>G*%04eo&#v4<=t-m$ zVcU5Ftrn{2-lOyt-G5_}LbrtRB%sr0hdyGYVmwc;PAnh-PL!3UniYclLDtNK29sF3 z(6c_R3#3bN`ayCxw>&up(iWCOy|6IiEB~`RqfhX`_JqBwMmeddK79Wk2Et}7BUkk5 zI|su|?>%S#PU0XhB?LL77kd?7PtR5>;E!c=is3yd9Th$QRlb}3^~rqan^uOjS(GIe z%$kwc!^7kJU#zFT!dYG@1*NO4qa#V}M5a~oLo;m5fFtmXfcHD}y1;7J&`Zp|*a#c3 zAEP$W0no%Fd*{v_+7il5z#9-p`5u_NN`dJ^fU>e>&q-^IE7!2awMx<2niCnPj!>^k z$;d#z{S3SCCln{h0?`cL5IB@g(d>3bT)qif88?1*#hz4y8YKV-j%cvj6Ko$X5~j}+ z8)J%jz^FPDdDs@b(fDTA( zHO~P zD#!4d#ASZgz77REGwN8EQf{Nym!DFN1YH2NH=TCOj}r_VK!~=WadOij<9Pni_p}fb zqyro1?or-4BAhQwCnR*h{p+3L-h+?rsnnj)sjD~fY>lQ{GnTL?JTR;0B*IuPQl}aV zl5(xA7H3AvQFQ}VCO~+c`N2$`NDSDwG9naF@5@TR+tE#|WjAI=w?QuvBc5ay?$1&VYUZA%Vro#KEBf2^#PlM<`n{ytbNJ@CBL&7+ZHA+M^NH68&42b0FU^LEvUY zFI%&zDa4eLl4ZL(dPSIw?R6pz^2!lMs^775jvr%K{^3e?G0RK#(#(sn$e;UcBu!8Hdn;l^w6fAw2&fyPpvjdUmrE%yxsJ-{PX)72%7ceR z19aXBp#gKR@;(G4yyf&Z6KWUk;b$eFV@!5?!GpZb$La3*?AXM6KPiM~-}WmztZrJR z#?Gm=37qnF|N7O%Fmdc0Jt86~15Ruh%Csf|7JyK=Xe{<3A@4UJ@v1&vLX^9rQ-0`3 zWX#%2`r&WQ`x1_wu)X%SeZI()pS8I7L0MT-S67j740$Ap_GkTOiByaL1Xe;8M_PT8 zuC{gt5R76^H$PHI$sRI3-UOVb_;3my7OT<2^71uGvkBZ_LPh{o>c0v~UEYRF1KMXP zLLFz9j?bN0zWKId#Psgw!L5hk<63>=wUZH1`I|kVzK-4xtyNBrNVBtlautgnOdOOE=>V?mqJ$qOp5~6QnLz+IF z+Y-Uh-pZama=G|!Gwta6r&q{32J!j8zK#84PhJCNpsBhxI(1d=KRPfT!y62ctG&G)ODWQ9n|+bJ$G5lp2;6`d zvD;<6d-v`q%hTF6-rx4wc#g55uV}Jzt>#-1J5qioVAX1BCN3^LXj}kURxN(H2dqt= z=wb=1h*6gt1|m#Ff0h?yXWI8lH`;(hL$Qf1)K8Xe$<&O}(A0bioePG=m)N6#t+Wzk z_@LJ#&c)HLiQP};d8}&CcVV=JoQ*i|q0=HR3{1a~W;ULIjB@i7mmoLBmac*~@%i&- z@_T%ir_~@E*^oqv%1mc(RSr)KhU{# zLtM328o<%E419a$;b8mQ|0@tqX+XJxiV>`+PH~=;$4fE9+YU=4h z@5wGed{{?ds*Yk*qu;V$$B*&y<;zzobJt)TAQyoI<3O=!!7xkz0@rteMtK{*;I$Q> z{a+Rzzex4?6CBKWuix$v(YP$np9L7EgHHq(E%*duS|K5ju+jrE3P8=`Wj*&`HqAoV z^>p~3jDc!zNvp?~q49~<;zLZ(s#MGd<&~8z%*@mE2dKg9Qrs}yfv#EI+?)gL5VA|` zL$^f$z<<7>0y{7M{jcCg_Gm`v6d|uhtvnUxhU9VS-}d$j&c_nM*8VaoFPEpRmUBHd zwIs>B>dbw6?Hwz1LAj6#EYJ13T35KK%8g^i3T|x80Fh)}dHyEa3_Ghp!s>qb5CDBu zfTv=9LBT_p8WGD!*8xow1J5M>HEB0qUAhe{V6v3*24x<90(3~-nq4@X&!#UJyl6_r7_~qG?j#9mPY8&$k0zJCIMxpnp z!_vjIJNNA^8=0GI)BB-`#Qxe0FvFW_B*UhytxZHT1qCj_?VH}CgFg=0gM{lKXP{C5Iezykd-U#*(O^+cX5xXPV;9~gBC=lWnnKEcm(AS1KMjLvEKHF+uxlKS+YNrA zv0S_NWuF+ot^X8?9wuDx7gSM7M~O=K1sILLLluNGCX_6s{V{eD`^^(JF@T4NFDOi;LglNu<7_ zc$G(h*P`EU>?T^KxRQ00EJm3k`6ODr(s>AKEzPTfyCJX$s(+_H)?C=z@@n+HvF&8` zt%&;>zu$XZ+~4+n>8SmwYX!Dl?x$r>+EbAkoupvj^9X-a=hcjZ1AtVXqPPh3I9}f0 z@BX*PXc!Bu%IuUQYHJk+{g%E_P5`6yTo{Y1I6Fd(_R8n?Y;T37BW>&2F$cPdvu!sa zF~ihae&z!u+KUEe1q1PNFX2e5acE}JwCb)h3D)d-l!#(b#(-W7G(?sS-a>3*qbg^B=4Ih=2gG&6+|)!-hgoL| z`rx{uA$j$$oQh-5p1ymm;Cl8DroXkkn(0V@Vv`yB3Tyzz52{92W5(Nw~<=J$R=bj>~?d(c1RMM=+}4YaihS-ra6M2Io$Q|VZ>!sZXQCCZzf z_F&|a#rPfdrH5O_2lL`t41!A4+?QJeHCZ)qB?A9M(zPbzHH%O}!3JG}02I42ZbD3n zxqdsoV~+X;#qCYCE1{vSh+4+5(+TsL_r}C;S6*_ z7*Hx}nRa>_# zqHIth5P2}Oy4triE-H!zB63HdE$9mFX4(mRs3_%j19JF*gBcz`;DU``_qIeDKm-72 zEf`3fIyx9YY5{;JlAxpXG$5juRce3;FC>$8AMFzZm+U$Y_r&^!hE8_78q6d2pL@$jS@ z{eHJ5@a_vCrfQo`oN?iVISnFHK7puI+8;SFCZ>|^2zDls_;NcwFfo(W)xCTaoew638wu(h{`SI99rEC7+f z%ZEm<8d(Ie;9`mN-*z0hukjh61`9u) zF?xP0>7AC?&T6c~rILB({rEjQC)%+1#M`&jQ|o;$%EtC!A2U(v9K4lB+!LY{#w3XF{ruTi`f>R^_x#g}muT@%o{d5MV-5} z{H%};qnCafTQd*@^W2n1s%%0%p8N8O1~3D*m3yyRXU}4-#_v1DPF5J)^AJ=657UyJ zRRlkW6DI75rqzMh8mk$75)qz*Uc_e1bHpD(BmiTz89si(NCZ?xTe$55<{4r=Mj6Ic zjV7{CRN#?s-&`X9+|^CIR!|^L)_HvB%}D1XS7$*Ej6Y@)%GOy0y{f z;ws6q$nZMB9CH{?p;|z>h<#qJYVNutLp@yWUXL44PKY6heDFw(A7E_E9x*(#SuZ~u z+@t&DL7JOR*%_TQzfXTpOP~;x!zOFG0s*{wXS$T9L1ixoJ;hhJ ziF6uM#BHfD+7Nmn$2iYuOPPT~mC1sD(Je`+fX{y)5b%zryFnUC5b!fVi<3JdeLFg8htC^SQzMVBiAw?sV2XctFNyxB-Y;OZ-GHCf$k^EU*lklm0A+j%5c3P4 zGSs@dqyN5k+mZE=45uGkj(TI&3W-=0tK5c7|0ndpn%EPf=&6-yG%TcW5ddh5YGNX^ z-}x*9LzU(cxhJ|ojdo4s2f^(d(G}2@c&a~d{=KP)CU5>!a9_h00k89|C*YL z2;-FI!FNB;dI{jos;`u}4#k3K9y?WFD&%?bq6U~C2pA$!>-j4xE48uD;Y*_=8IMdD zqME;PSHngWg>a8C83nF~*etsfU7rZx1sm$?HJ}cSBGw;#Is5&c721{*yi{~Tg23$O zm+Fh?5NIzp@d}8E8CX_bJbSa}ZHFr&qyHx=zEuriZ5^BxMbND2QxJ|__SHhqxD zABYqaFL(maLibv3pKY3vQ!`(Nhk1y^qxV;PPmf8rlLXOWU^2_X5K81G_~YhjX?c5e ze0&#p5hBTe%q`;NtDNnsYHA+^-Epm~xo|r@y}cbz#q}=rLMHh+N?$Ib-(hwy%I(4o zY*d*T##I+Fn~a?&n2H2w2+oZH?UzjkPM-xlai8xc`aJxwkz5%kiPm7ri94A z=#7uQnp}#REaHg5gb(i|Ek|ckvFTYPDY->fq7Fgto@%``_v1$@9un|qBs8Gt{5MOq z^b87KUNWv#Q`E>m0YVD9;_+Ss-VD`pU;ejAI_s1`5E(>%coz+_%vQ!2Wa4>*sq& zlVaL{F7HZ_Cr$K#LJx&q<f~vtX0hf52hXLQMzc9W8{FjiOz3I8|Dg6RZ@sSX^+|@(q6Vy?xsa zTP^|2DugFy#6gkEEq&c<(FN+Ft>W`jj2TfBNm}X+aq{2 z{qiLC?zP3-fuXvzHSrVsHMIQ%b%5(ik)-a49i>3Ub_AJA0T+1`lrrxpCyB3h>{W^9 z6v2c59$1z-w*YB!-=|*k=usxhIvTjO6r?``gM%Z{u3&p2uswROmYJ45)@5wdustTB z=XwdD1tG1lyeXs*V^iw$C6G+ZRZyc4U-?Nkk(FF=XAmS&+y{dRJ#J=X?qJ zgbkw`Z&mmkL}}4{LNei8tefenHB)X|rWY1VaPVKpveAT?WcMj?x73%PGeM|ocvU~i zSIs>~1QOkt6x|DiW|V(M^6cBk1}h#628la{WNNWZ$HPgA%$Q7EFQSEkV2laBeJK+n z#j(d1ASXLbSUOZPr&l=}pz#ryS}@+pb<^GhQM;&HCh7VG*(zH4yB?WLFmHXf^iu$_ zZhSIoXQzPq!#Io6>*C^H9yx1}Ys{m2js}2aGoB5bFC($Jl%KK4o>*x^NOByDXFAgD zKg9wp9FW|4&?_3yY69BtC^#g+?O>C#1z1y2+DlH&W6bE-$;ZcLYs$7ofKljPNV@DA zHGDX$*F5XAB0f9|ifbt!drd2ZG6BD1i%srP+4}YC)-`{;6~zsB<|u4X6rdGi(KwCf z$`-$Ml=d`HhZ4q`%UMzK-lXND9dlg&pLMA zJ2#W?Z$QpRQ5Zp$$#{HGLI3c1sK)>Pw~f%<09)6!+?69^>?M>h4B6q~RL1LA#^jn{ zSw3r|WJZfQ_nuLm(no9Fu`2+n_>oKijK;12KHu$N3 zl^)n>{#WUNJ;1u`JCO?84xKjJ26GacRA;UN3b0P9`&SDKhL5lIWQx#HGa6eG*A3HG zysH;g*#GFn73i!YbQq`>9>R>&7d4OaXOHb`dg1VCOrfL@p`lj-c_PL^@3x!^$?}2D z{8WOfVKpL0>p)^q1$sqr!|ur=&+kY_fdwP9D!Z_`0u=hm<$4Al{AIND&`{xS5G}y& z7|7AEsrFH(#86vpOm1u)$Tn^gKC$O{q~`f@%CK7}a_Bh9{H{)miTtaJXgxmCLMh`v z$b5#LG-E;gm0NhQ6OBtTM-l9nS2N$d=rX8<<}|Sot6e#k(YcgpQ(8W1Zu;NO=6mHF zCdc=5M8KusUV^Q?p|qblAFt7?3z`y@)U-4uc7fhkroFdWM-W>qx6KOPaamBU=%9v9EszPm zZB0UG(oR2w+@a;=%R_s>HKNPT!W0vG+b8CEgjk#S8Qh0%tg=qlW@Rk>L;EU^RuU%`K=b9=ea~J+c%*#Z)$hw@$BG%cz|@R6d$Yyh0-Z%+(%Ry8IHQXRHyaSS(4CYjBd5ySv@F;*1CY>1^ufnH_ za6TfRYQBOXO}M2+?!V>8;+w%dtQw-mk3{mZ8PA}`I?&bF^nn8<2TwR38&gv4uPZJq ziz~~w@trkElWnB|b3}$m*x=0dR1e@`ln#0hWP^^a3j=3G)$D64RW5(TWnf$2?Udu5 zz0_G)Aiy6iP2~RQ;^ZT&FYm%95#)#P!EGQ9yZcP5P$6O+M3B-L#sSk0)&QYCK7CqO zTeI><8_+u@PrCwK$zFL%!C`xjX3g*+gQpOFVk4UZuf72jQ5;;oR{Y^^ z^(5~C3K5*08XL1#0e_PFM#!lZ01OG}L|mIVi1L0VNH2ISGl{$lO&pjPioe4oh&Ue| zsFadjj)Q4mB3K4W0-*U?JeiilXnVa7;&8Q_r8pfMe&@lV0K zPXzV&5_SMQNMjs_@!m49qd-$cWT3=@?m2+Tb?=$?BpX=^CbO~5LWCKBiy#v>x|kUV zcUo=bwcvjOjMdcNPhJpKjRneaL4FJHV+sJdV<1w6lB3mQ@@#EwqeQ+UYJF|rgJY}B?pr^7_Ds3sRUd|ZLfzp7r2bA6Myt6HMijOc zofyI~(o)!ZeS%Oh2@(+&Hns*-%`tB@^duS?Wf-A~doFyp#_exGv@=@1)nLsO?)4C( z3pG7QEgl_d40ok#T(B$ILYr&-_#6$273e>zTFD`wXApyvGbS*oU_;;0+x!e;&$GI^ z%V;ouU`gUQ-TQa;LUYZRMw#I54Xk$k)ytFq6V%ZPV!aU zPQZIC8#LJCn{^lQ=IsHJxTWWUrb)t?y6X2Q(G}fU>$p5WwTwGVfk=l-Xyl25#cS?s z3#t+Tex)KipSeCqSpQ?ALf@62Rle^H72Vj^RHB^3ZzBX52u?teAx62QXfE-M7=YHX z;EPiT6svuw2a|~OaaWL$qkn)v9l`c{6MHWP60A7;L~nL%J5|%ILj0_>n6Ed9A6F^* zC?K{~DPUB|Q%0rpZLuYwRfzC;Y>!f<5qc2aEjZ-bi+XT|)uB%iiW^f`^zYAKB-WR~ z4x-nG{4bD!aYZUTIS7%_=VX$6Tuz-5S~(KQo9y*P0Nll>8Lk`Z!f3^K@(9#2xJqsJ zBIgZ+h8U>j<6oJs)Z|CuE&714_=vrM==1B{g}#q}oGRfPhOi8qD36J&w1WAu z7iixyFc^AW9&S z-V2|*17SUA)?b8Ehw8=&w;^B;L{LOrNqAz@>=GfJ&_6hf7WMVl!RktlnUgP4SD_Ig zS{^^?6e>~|0=8XHk@_V_ ztoZ17MzG=$=HGSCY`4PKS#~zf$QSk;TIc9XDvXv7uQ9<5bW0OfN|5DhYef|5tH%4z zpZ3uE?Dg~y4io=#?{vIH4t_WwwF=qXe{CK(8>CsgDspt{#{>^_KihKnp?tX}3pLW; z1RC$$j})eoipZ%W9zs*%twR5%_I)3?(dH=XQAOzl;ztN^!5s-|+7sgLRT(Bd5JtQ} z$1Sm$2(OSXKyeWdK_7&Ff-z^Q_5;VW0TUOQ9Gl9CeP~l8j9dSpo?-n^H@8Sw7pP=t zUl#ZOXUxVozX0by)t)FB4vRH50di?KVZOu_v_~l|HVV0Zo#a6j^IR&#^8t^(Xpp_9^?A+6(QToZ^KgoJ)^)XvW2kHck3F-HVYlxC(lB7vp|zTE;oi8h2`xJ@mJ?4IFYi_CI}|^l6@Zv0ZejwuNeU7d zi$1^kH_T9kPCK7|{&k939 zS%&|s#ax-;2<9OVV3H|wa6kVCDds9$%Deeho6yj9u-E?RGBaHlMIbkXq(MyGb+QDq z1Q|0rfh4_MO|L26pyh+B3dU_TGk9O?z30BJ+O~gxEcz9)(NI{N7E{|K2bWvbo(;w# z_3Wmi@pX(4%mxB^w>eLbL+r~C2zvY{GP>}*ZGe3Spe;Fq31`5A`6#w;BQOdGGWxAD z#!sLPi@>%>3?Vqh-B2*b2T(Gang<59=W(DI=30V@P!MCddi_DjvT-jQP=SGdfqf>2 zOra_c57&YCqp-~!_%ELDbU^$RIHc;zg9l;|S)sa;o(_7Y_~`-`V58t(D5=3K>-*JgHfsh|L9${sVh5~)(P$yrmt7Av!@?GNw5f& zfpQ{E7QJT{TGixRO|}*RT~lAW=ucDPRQF9AXh&(7;56VK*3jS9tf%R>>nf$d&Ttv( z5OY)eaHPtC$o+>1rC~^MTWyXk5dRxfA3)a+`e&JK1M@+Q0FZHIWb6O~>^m$s>s6gE zt1>@39RTGYs3zdHao}Bw+10o4OJYYRVXr(LZ8(vQ0wR_AOh;xQe`uiGhwEw3*=#G# zZ>53hhJ+>Kf)ul>;-AGQB*d7Hh6<)B=qO^B#YR>9%ouo0F;xLeG^4R#%sQ4GOwr8t zCmX$y$_G)62AWJmepZ0I2h|hVgiOK31cGuzpELrwjp}CQG=mqAM**p914(5pEKLMw zfb~dLCHRnrFUmMaTX2j0V@ge3-G<)YIARn72hBN4D0?VRi0T6$t_agKs-d!zQx2!x z+kc6G#A)NY=m1qv6i8Mjwr&UtDY>~*cv=>KA0f^t7+&XQ6HyEyD~6|)5^!ZOJH?6| zu*4<{C_)5%;o2JkV;D0pf@T6O?-0>MGD+~sY%vN{ygpT8`N*=sCn-Lj(A($=#^6uO z1|nGZSRE(b(n808>yD`Iv^U}_w_0f~J#y*a|Ce1^y#?e=3Uq9nId6uT^bqrf`J?>&~+(nawUTh;F%ic+JvQ;jZ_dEsIQgtn-@2btB&?mr+a_l;y^3+ zZttHmWsiZ<;iEz>r&n|(pH1!c@S}Y_Ropcli~g?vx8dpQ!cr!E@G-1H*Pc*G%3xLJZfzs zdpog$M;ym|MU_0iCs#Jz)W%iaQP^P|-Q+3ncbIub^kmIKiEpH*yp3tZ$D|{^p38}5 zC7y9h%I8QToQM@Q2V>X4g9j6&&K@sf=n4%H7#=fI$@@~3_NAjbuVYKz_W>avxm(fE ztT5<_9tjEz+`!Cy8N!3)Z(q-$)h#$!81wyW1Z}oRrNJ zy%ic9ZLIe-Y@u%6*U5^!JbjZKu#2Nr+5bB3Iv~}29o2Cj@Qwhjp2AUL_Uuxt9@t?J zImhtc>_xP4Z{513Kv1oj8Dh02Ml^d7qujIbY(WjA2Rk{5ufkSMzB>gf1~~=I*qGp) zW*&nop%!Bpnmv;FHP)xcnVz@~%2HA5@}u4LCTzi^_Xh<5_~+@yBR+<&hohyus=1K) z3F{2}5M;2!jz9eMtM%*j7%5PWy95PkF-73R!ic;NLuEr_V|Ko(&{+%=>KOds|6L7$ zTj11Ltu;`pC8tE!#m18kl2^kk<^dDJ(QJG&s|LkCyFsi#swNwAD7@K|xI{ zTE9o5KUTHv)nAk&(Mte+TYB*uc<`sUDDNs|i6g1Wo51Gxt>;(aXK#QdfHuid0 zSZu%DHxKzgxFcK?7@vj#)eweDUvA+ldghqc0owG;@jvhscI6oEfMwg*@{k~F)|BK0 zL=0fqfPx>k=xdy3`&&c0;}0l+w!w)Wdys#r8{C;I7X7-a?LhWLZd%=<)lba~E^$h$ z5wMH&!M$G9HhC5gvmXKIJ$_RhL^1Jl>uKf zh zA?A8jc0H^65wwd$n=31O2V&#Pm@X0C|22}ik2n7KAQ0E-Az8?FA-R74?b~6V>D$bQ z+7CzaVjR`Q$P6{Hv1af|BOUR{R)u4OYf%fZo)xfv0gNGJ0Q&J=y?+Ef=#L$?at9Bo`Lkjv@2#ZPr8WzcE!3mIVRw(A1?Cx`P|wQMRr3he-; zg3Je4{_K^|xIjZ{FQS(4gCu5_tjnr{Qx80dLBO}XWp-109cvTwye|Xy{;hT`oz?j* zXY5IZ)?5!U*)+7Yk!=t}Vg6=2dBT$Ozy4^jNd9zkc^+?Wo_k^5%(c7&zw%@zn*@4( zJ*_vj9g8IQQ6bFi=+V>xB1?)y2CyBv{)MF_ZNxo@90<_WFiG+@Nh;$t`K#Q?>z#PQ z$ip6sGPW*6HAa3lBsP?aQ%vM2^mS_*{yLw08xt$tU*|L{7zqD$TsNg+(f{QK3{D_8 zT?F!ET#up_A^+J7qQgL^5*H#%si=svhWX9#9VliaMrNJPlot=L^jEIL=S;|78qp6M z{VwzUj2(j>|GtsemRgRFgZIst-D!KgKv&Q&vEAiG@I8wn2g-eqOkcd1RX%x{ndMQ> z8Qa?9E$6MR(y5)(gP84}TNOBo(XRP!GbDc}#dCT`!yH@9&@bCcdAUhMax6V-oS5;f z`5=c;%2HM)DGW+KP;atNE@+0=PW~dt$x_H1H~_c7mbs1yL|15OY5j?rxw&)KF7-r5 zXyH+fiAf&JZ{lWkzRDHfm5oD<_XaFD8P+0|V!J)UN+w-Gc1AZGe>y$`Ck4*aaKIx7 zm?`+}+qYI+OKO8nOl)i(ZQ~Ui80DA6JUl$sP_9CluQudVI7k%wjAgTq!hub%Sg!B+ zfJ{*|j!JSc_pq|E{%GD2esoU)v~s&Z2MrDm8a9!aM*wmBa|dB|I+UU@h+!;!m6ccP z%TBEUc#T;CsT~VQSZcf>f+Pa;r1_>oLZeJz=ZT8>m7gsLg3MK;hD( zM-uRF2jFBvhlhsd<~cp%qoZejyjK*&5ifTW78kuJ{QfLB8W|Z8!F*R&msFA-Zc@_? z^@Mn1O2h9QxQ}=Oh=>5Hk!BJMj1Me#(mSqA6{Ys(&70T8{Nm)SltbzSdn z6tZA8ELqF@Sy?5*T-|INesCubkLf*=>FH^uMc^Cyz`|5i>@eOIqnW3nzOo~Xv6+Ba zJUm;sZl!F-;%4f0K!Y%RPNu~6?n?pm8*isD*EgIj(b~quv>^gYB69ArawETpNX(lz zZ#LL6ZD{G}V8_6^GfEQ{k2$$u7%vQ$u3snKmtyQMhPKsSrxTyEyRC<@M_4Vx5Lc~< z^GL$QS6p~nAVrSHmaKAMe}YbJOyv}?bdW^SUS3`txT`fae}UkDDZSrFL0%_Fd@u-B zdb_86HKQzrwZ-7c6DQJe#ObNsJZr8f%E-uE{FD}?cB7!OG7%~-oFKabOzod*)X%ScVehv@BWw(E;kIDIMG!+ zc_K$mO^r}z&b@;lKRRGbywvB+ATT*Oc^86PPBVpyBs>c{Iip(Yf$Ax>;sLRM))mD2 z=-3#^DM3*{N3&An{hdqwRFr%dRd(508B)U^w#c0f;)tGur&W9=Hv^ z74?R+yu1+fvmd4|&O_^2vN$+s1D*)l+}Q_DsIMG*b#@02&!d#5gj@!vs*BBv$Q~#V z_eUBlJbqwE@c}=g_7JL>doRkJ&Bu76%OR!ruWzid)tuc|HuvhkB3|N zZNZTmk}muZpe?HT;lqbj)YXG-T=1ybj(fBz{OQr7ATXk1V$K2#!u>j5dH7yg|F?Oc zCyB}9vxISgid;iqzxQJTVkXtpuD$-axa&Ip9GmL@b{_HR#3ITbIu?7}=(K!E@cn@e zSkMg!lqddRcmXmWJ&MLqeeTs9PVg*$clyi3!~%LSJlhic_n*{0MD$Yx&OTk1i1mTM zBYk`OFIsVTdzt}d#~)=QT!Q<5kii1VFJof^TTkGPl{ z^PS-sdK=IT9y;_4w2an;3%_#4Ju0k?O-x8Qzst;W9vHi`pI&Uo9g~J)3CLd-)(3E~ z!B+#gFSTfC!Awe0z_qdssbZPnBuY#74|+|AY~Hd(c@b3+StaZ!kzkz;K($_OEt;*9 zaQWru`jM#@ST?@C2SMFox$Kr?i`uhl;JYq|YQ<}fErn)mtgIT&&iNcS)ll4G(U8$> z14DB6`qk5Jh%mDN6u1pLFl4&hQHlmmUudRYz({Nd9SSo<=qMXFck-Y!%d%xJF3fJG zb*SE9I*3t1<>w^=yf0r)7Cgzx(SZIDMKQSH_+M2Baf2mZ_xn^fZdkyC#VaRg1EK=a zK~AqGSk|rKh0KBCgsUqzyP2-;O+aG7T~4f_4Lsnn;?mR84aXyrl0@+0n@s%>Q`ozs4`gw9P4tiWg{D2)SLeR5`T3A?Y{|!@zOI(*vu> zcJ3=yeOy_oep53czD`Buv@7i)R#n6ADko{Q4Ovw;ZH!UCm(gbpX;l92UoJ52UM@BD TE$8&3z#mOjJ(c@PM=$?BO#PkF literal 0 HcmV?d00001 diff --git a/research/mi_lira_2021/inference.py b/research/mi_lira_2021/inference.py new file mode 100644 index 0000000..11ad696 --- /dev/null +++ b/research/mi_lira_2021/inference.py @@ -0,0 +1,150 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +import os +from typing import Callable +import json + +import re +import jax +import jax.numpy as jn +import numpy as np +import tensorflow as tf # For data augmentation. +import tensorflow_datasets as tfds +from absl import app, flags +from tqdm import tqdm, trange +import pickle +from functools import partial + +import objax +from objax.jaxboard import SummaryWriter, Summary +from objax.util import EasyDict +from objax.zoo import convnet, wide_resnet + +from dataset import DataSet + +from train import MemModule, network + +from collections import defaultdict +FLAGS = flags.FLAGS + + +def main(argv): + """ + Perform inference of the saved model in order to generate the + output logits, using a particular set of augmentations. + """ + del argv + tf.config.experimental.set_visible_devices([], "GPU") + + def load(arch): + return MemModule(network(arch), nclass=100 if FLAGS.dataset == 'cifar100' else 10, + mnist=FLAGS.dataset == 'mnist', + arch=arch, + lr=.1, + batch=0, + epochs=0, + weight_decay=0) + + def cache_load(arch): + thing = [] + def fn(): + if len(thing) == 0: + thing.append(load(arch)) + return thing[0] + return fn + + xs_all = np.load(os.path.join(FLAGS.logdir,"x_train.npy"))[:FLAGS.dataset_size] + ys_all = np.load(os.path.join(FLAGS.logdir,"y_train.npy"))[:FLAGS.dataset_size] + + + def get_loss(model, xbatch, ybatch, shift, reflect=True, stride=1): + + outs = [] + for aug in [xbatch, xbatch[:,:,::-1,:]][:reflect+1]: + aug_pad = tf.pad(aug, [[0] * 2, [shift] * 2, [shift] * 2, [0] * 2], mode='REFLECT').numpy() + for dx in range(0, 2*shift+1, stride): + for dy in range(0, 2*shift+1, stride): + this_x = aug_pad[:, dx:dx+32, dy:dy+32, :].transpose((0,3,1,2)) + + logits = model.model(this_x, training=True) + outs.append(logits) + + print(np.array(outs).shape) + return np.array(outs).transpose((1, 0, 2)) + + N = 5000 + + def features(model, xbatch, ybatch): + return get_loss(model, xbatch, ybatch, + shift=0, reflect=True, stride=1) + + for path in sorted(os.listdir(os.path.join(FLAGS.logdir))): + if re.search(FLAGS.regex, path) is None: + print("Skipping from regex") + continue + + hparams = json.load(open(os.path.join(FLAGS.logdir, path, "hparams.json"))) + arch = hparams['arch'] + model = cache_load(arch)() + + logdir = os.path.join(FLAGS.logdir, path) + + checkpoint = objax.io.Checkpoint(logdir, keep_ckpts=10, makedir=True) + max_epoch, last_ckpt = checkpoint.restore(model.vars()) + if max_epoch == 0: continue + + if not os.path.exists(os.path.join(FLAGS.logdir, path, "logits")): + os.mkdir(os.path.join(FLAGS.logdir, path, "logits")) + if FLAGS.from_epoch is not None: + first = FLAGS.from_epoch + else: + first = max_epoch-1 + + for epoch in range(first,max_epoch+1): + if not os.path.exists(os.path.join(FLAGS.logdir, path, "ckpt", "%010d.npz"%epoch)): + # no checkpoint saved here + continue + + if os.path.exists(os.path.join(FLAGS.logdir, path, "logits", "%010d.npy"%epoch)): + print("Skipping already generated file", epoch) + continue + + try: + start_epoch, last_ckpt = checkpoint.restore(model.vars(), epoch) + except: + print("Fail to load", epoch) + continue + + stats = [] + + for i in range(0,len(xs_all),N): + stats.extend(features(model, xs_all[i:i+N], + ys_all[i:i+N])) + # This will be shape N, augs, nclass + + np.save(os.path.join(FLAGS.logdir, path, "logits", "%010d"%epoch), + np.array(stats)[:,None,:,:]) + +if __name__ == '__main__': + flags.DEFINE_string('dataset', 'cifar10', 'Dataset.') + flags.DEFINE_string('logdir', 'experiments/', 'Directory where to save checkpoints and tensorboard data.') + flags.DEFINE_string('regex', '.*experiment.*', 'keep files when matching') + flags.DEFINE_bool('random_labels', False, 'use random labels.') + flags.DEFINE_integer('dataset_size', 50000, 'size of dataset.') + flags.DEFINE_integer('from_epoch', None, 'which epoch to load from.') + flags.DEFINE_integer('seed_mod', None, 'keep mod seed.') + flags.DEFINE_integer('modulus', 8, 'modulus.') + app.run(main) diff --git a/research/mi_lira_2021/logs/.keep b/research/mi_lira_2021/logs/.keep new file mode 100644 index 0000000..e69de29 diff --git a/research/mi_lira_2021/plot.py b/research/mi_lira_2021/plot.py new file mode 100644 index 0000000..435125c --- /dev/null +++ b/research/mi_lira_2021/plot.py @@ -0,0 +1,224 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import scipy.stats + +import numpy as np +import matplotlib.pyplot as plt +from sklearn.metrics import auc, roc_curve +import functools + +# Look at me being proactive! +import matplotlib +matplotlib.rcParams['pdf.fonttype'] = 42 +matplotlib.rcParams['ps.fonttype'] = 42 + + +def sweep(score, x): + """ + Compute a ROC curve and then return the FPR, TPR, AUC, and ACC. + """ + fpr, tpr, _ = roc_curve(x, -score) + acc = np.max(1-(fpr+(1-tpr))/2) + return fpr, tpr, auc(fpr, tpr), acc + +def load_data(p): + """ + Load our saved scores and then put them into a big matrix. + """ + global scores, keep + scores = [] + keep = [] + + for root,ds,_ in os.walk(p): + for f in ds: + if not f.startswith("experiment"): continue + if not os.path.exists(os.path.join(root,f,"scores")): continue + last_epoch = sorted(os.listdir(os.path.join(root,f,"scores"))) + if len(last_epoch) == 0: continue + scores.append(np.load(os.path.join(root,f,"scores",last_epoch[-1]))) + keep.append(np.load(os.path.join(root,f,"keep.npy"))) + + scores = np.array(scores) + keep = np.array(keep)[:,:scores.shape[1]] + + return scores, keep + +def generate_ours(keep, scores, check_keep, check_scores, in_size=100000, out_size=100000, + fix_variance=False): + """ + Fit a two predictive models using keep and scores in order to predict + if the examples in check_scores were training data or not, using the + ground truth answer from check_keep. + """ + dat_in = [] + dat_out = [] + + for j in range(scores.shape[1]): + dat_in.append(scores[keep[:,j],j,:]) + dat_out.append(scores[~keep[:,j],j,:]) + + in_size = min(min(map(len,dat_in)), in_size) + out_size = min(min(map(len,dat_out)), out_size) + + dat_in = np.array([x[:in_size] for x in dat_in]) + dat_out = np.array([x[:out_size] for x in dat_out]) + + mean_in = np.median(dat_in, 1) + mean_out = np.median(dat_out, 1) + + if fix_variance: + std_in = np.std(dat_in) + std_out = np.std(dat_in) + else: + std_in = np.std(dat_in, 1) + std_out = np.std(dat_out, 1) + + prediction = [] + answers = [] + for ans, sc in zip(check_keep, check_scores): + pr_in = -scipy.stats.norm.logpdf(sc, mean_in, std_in+1e-30) + pr_out = -scipy.stats.norm.logpdf(sc, mean_out, std_out+1e-30) + score = pr_in-pr_out + + prediction.extend(score.mean(1)) + answers.extend(ans) + + return prediction, answers + +def generate_ours_offline(keep, scores, check_keep, check_scores, in_size=100000, out_size=100000, + fix_variance=False): + """ + Fit a single predictive model using keep and scores in order to predict + if the examples in check_scores were training data or not, using the + ground truth answer from check_keep. + """ + dat_in = [] + dat_out = [] + + for j in range(scores.shape[1]): + dat_in.append(scores[keep[:, j], j, :]) + dat_out.append(scores[~keep[:, j], j, :]) + + out_size = min(min(map(len,dat_out)), out_size) + + dat_out = np.array([x[:out_size] for x in dat_out]) + + mean_out = np.median(dat_out, 1) + + if fix_variance: + std_out = np.std(dat_out) + else: + std_out = np.std(dat_out, 1) + + prediction = [] + answers = [] + for ans, sc in zip(check_keep, check_scores): + score = scipy.stats.norm.logpdf(sc, mean_out, std_out+1e-30) + + prediction.extend(score.mean(1)) + answers.extend(ans) + return prediction, answers + + +def generate_global(keep, scores, check_keep, check_scores): + """ + Use a simple global threshold sweep to predict if the examples in + check_scores were training data or not, using the ground truth answer from + check_keep. + """ + prediction = [] + answers = [] + for ans, sc in zip(check_keep, check_scores): + prediction.extend(-sc.mean(1)) + answers.extend(ans) + + return prediction, answers + +def do_plot(fn, keep, scores, ntest, legend='', metric='auc', sweep_fn=sweep, **plot_kwargs): + """ + Generate the ROC curves by using ntest models as test models and the rest to train. + """ + + prediction, answers = fn(keep[:-ntest], + scores[:-ntest], + keep[-ntest:], + scores[-ntest:]) + + fpr, tpr, auc, acc = sweep_fn(np.array(prediction), np.array(answers, dtype=bool)) + + low = tpr[np.where(fpr<.001)[0][-1]] + + print('Attack %s AUC %.4f, Accuracy %.4f, TPR@0.1%%FPR of %.4f'%(legend, auc,acc, low)) + + metric_text = '' + if metric == 'auc': + metric_text = 'auc=%.3f'%auc + elif metric == 'acc': + metric_text = 'acc=%.3f'%acc + + plt.plot(fpr, tpr, label=legend+metric_text, **plot_kwargs) + return (acc,auc) + + +def fig_fpr_tpr(): + + plt.figure(figsize=(4,3)) + + do_plot(generate_ours, + keep, scores, 1, + "Ours (online)\n", + metric='auc' + ) + + do_plot(functools.partial(generate_ours, fix_variance=True), + keep, scores, 1, + "Ours (online, fixed variance)\n", + metric='auc' + ) + + do_plot(functools.partial(generate_ours_offline), + keep, scores, 1, + "Ours (offline)\n", + metric='auc' + ) + + do_plot(functools.partial(generate_ours_offline, fix_variance=True), + keep, scores, 1, + "Ours (offline, fixed variance)\n", + metric='auc' + ) + + do_plot(generate_global, + keep, scores, 1, + "Global threshold\n", + metric='auc' + ) + + plt.semilogx() + plt.semilogy() + plt.xlim(1e-5,1) + plt.ylim(1e-5,1) + plt.xlabel("False Positive Rate") + plt.ylabel("True Positive Rate") + plt.plot([0, 1], [0, 1], ls='--', color='gray') + plt.subplots_adjust(bottom=.18, left=.18, top=.96, right=.96) + plt.legend(fontsize=8) + plt.savefig("/tmp/fprtpr.png") + plt.show() + + +load_data("exp/cifar10/") +fig_fpr_tpr() diff --git a/research/mi_lira_2021/score.py b/research/mi_lira_2021/score.py new file mode 100644 index 0000000..91aeaf4 --- /dev/null +++ b/research/mi_lira_2021/score.py @@ -0,0 +1,66 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import numpy as np +import os +import multiprocessing as mp + + +def load_one(base): + """ + This loads a logits and converts it to a scored prediction. + """ + root = os.path.join(logdir,base,'logits') + if not os.path.exists(root): return None + + if not os.path.exists(os.path.join(logdir,base,'scores')): + os.mkdir(os.path.join(logdir,base,'scores')) + + for f in os.listdir(root): + try: + opredictions = np.load(os.path.join(root,f)) + except: + print("Fail") + continue + + ## Be exceptionally careful. + ## Numerically stable everything, as described in the paper. + predictions = opredictions - np.max(opredictions, axis=3, keepdims=True) + predictions = np.array(np.exp(predictions), dtype=np.float64) + predictions = predictions/np.sum(predictions,axis=3,keepdims=True) + + COUNT = predictions.shape[0] + # x num_examples x num_augmentations x logits + y_true = predictions[np.arange(COUNT),:,:,labels[:COUNT]] + print(y_true.shape) + + print('mean acc',np.mean(predictions[:,0,0,:].argmax(1)==labels[:COUNT])) + + predictions[np.arange(COUNT),:,:,labels[:COUNT]] = 0 + y_wrong = np.sum(predictions, axis=3) + + logit = (np.log(y_true.mean((1))+1e-45) - np.log(y_wrong.mean((1))+1e-45)) + + np.save(os.path.join(logdir, base, 'scores', f), logit) + + +def load_stats(): + with mp.Pool(8) as p: + p.map(load_one, [x for x in os.listdir(logdir) if 'exp' in x]) + + +logdir = sys.argv[1] +labels = np.load(os.path.join(logdir,"y_train.npy")) +load_stats() diff --git a/research/mi_lira_2021/scripts/train_demo.sh b/research/mi_lira_2021/scripts/train_demo.sh new file mode 100644 index 0000000..06f8779 --- /dev/null +++ b/research/mi_lira_2021/scripts/train_demo.sh @@ -0,0 +1,16 @@ +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 0 --logdir exp/cifar10 &> logs/log_0 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 1 --logdir exp/cifar10 &> logs/log_1 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 2 --logdir exp/cifar10 &> logs/log_2 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 3 --logdir exp/cifar10 &> logs/log_3 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 4 --logdir exp/cifar10 &> logs/log_4 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 5 --logdir exp/cifar10 &> logs/log_5 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 6 --logdir exp/cifar10 &> logs/log_6 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 7 --logdir exp/cifar10 &> logs/log_7 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 8 --logdir exp/cifar10 &> logs/log_8 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 9 --logdir exp/cifar10 &> logs/log_9 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 10 --logdir exp/cifar10 &> logs/log_10 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 11 --logdir exp/cifar10 &> logs/log_11 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 12 --logdir exp/cifar10 &> logs/log_12 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 13 --logdir exp/cifar10 &> logs/log_13 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 14 --logdir exp/cifar10 &> logs/log_14 +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 15 --logdir exp/cifar10 &> logs/log_15 diff --git a/research/mi_lira_2021/scripts/train_demo_multigpu.sh b/research/mi_lira_2021/scripts/train_demo_multigpu.sh new file mode 100644 index 0000000..6bd689d --- /dev/null +++ b/research/mi_lira_2021/scripts/train_demo_multigpu.sh @@ -0,0 +1,18 @@ +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 0 --logdir exp/cifar10 &> logs/log_0 & +CUDA_VISIBLE_DEVICES='1' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 1 --logdir exp/cifar10 &> logs/log_1 & +CUDA_VISIBLE_DEVICES='2' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 2 --logdir exp/cifar10 &> logs/log_2 & +CUDA_VISIBLE_DEVICES='3' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 3 --logdir exp/cifar10 &> logs/log_3 & +CUDA_VISIBLE_DEVICES='4' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 4 --logdir exp/cifar10 &> logs/log_4 & +CUDA_VISIBLE_DEVICES='5' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 5 --logdir exp/cifar10 &> logs/log_5 & +CUDA_VISIBLE_DEVICES='6' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 6 --logdir exp/cifar10 &> logs/log_6 & +CUDA_VISIBLE_DEVICES='7' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 7 --logdir exp/cifar10 &> logs/log_7 & +wait; +CUDA_VISIBLE_DEVICES='0' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 8 --logdir exp/cifar10 &> logs/log_8 & +CUDA_VISIBLE_DEVICES='1' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 9 --logdir exp/cifar10 &> logs/log_9 & +CUDA_VISIBLE_DEVICES='2' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 10 --logdir exp/cifar10 &> logs/log_10 & +CUDA_VISIBLE_DEVICES='3' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 11 --logdir exp/cifar10 &> logs/log_11 & +CUDA_VISIBLE_DEVICES='4' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 12 --logdir exp/cifar10 &> logs/log_12 & +CUDA_VISIBLE_DEVICES='5' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 13 --logdir exp/cifar10 &> logs/log_13 & +CUDA_VISIBLE_DEVICES='6' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 14 --logdir exp/cifar10 &> logs/log_14 & +CUDA_VISIBLE_DEVICES='7' python3 -u train.py --dataset=cifar10 --epochs=100 --save_steps=20 --arch wrn28-2 --num_experiments 16 --expid 15 --logdir exp/cifar10 &> logs/log_15 & +wait; diff --git a/research/mi_lira_2021/train.py b/research/mi_lira_2021/train.py new file mode 100644 index 0000000..19ff0e3 --- /dev/null +++ b/research/mi_lira_2021/train.py @@ -0,0 +1,329 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +import os +import shutil +from typing import Callable +import json + +import jax +import jax.numpy as jn +import numpy as np +import tensorflow as tf # For data augmentation. +import tensorflow_datasets as tfds +from absl import app, flags +from tqdm import tqdm, trange + +import objax +from objax.jaxboard import SummaryWriter, Summary +from objax.util import EasyDict +from objax.zoo import convnet, wide_resnet, dnnet + +from dataset import DataSet + +FLAGS = flags.FLAGS + +def augment(x, shift: int, mirror=True): + """ + Augmentation function used in training the model. + """ + y = x['image'] + if mirror: + y = tf.image.random_flip_left_right(y) + y = tf.pad(y, [[shift] * 2, [shift] * 2, [0] * 2], mode='REFLECT') + y = tf.image.random_crop(y, tf.shape(x['image'])) + return dict(image=y, label=x['label']) + + +class TrainLoop(objax.Module): + """ + Training loop for general machine learning models. + Based on the training loop from the objax CIFAR10 example code. + """ + predict: Callable + train_op: Callable + + def __init__(self, nclass: int, **kwargs): + self.nclass = nclass + self.params = EasyDict(kwargs) + + def train_step(self, summary: Summary, data: dict, progress: np.ndarray): + kv = self.train_op(progress, data['image'].numpy(), data['label'].numpy()) + for k, v in kv.items(): + if jn.isnan(v): + raise ValueError('NaN, try reducing learning rate', k) + if summary is not None: + summary.scalar(k, float(v)) + + def train(self, num_train_epochs: int, train_size: int, train: DataSet, test: DataSet, logdir: str, save_steps=100, patience=None): + """ + Completely standard training. Nothing interesting to see here. + """ + checkpoint = objax.io.Checkpoint(logdir, keep_ckpts=20, makedir=True) + start_epoch, last_ckpt = checkpoint.restore(self.vars()) + train_iter = iter(train) + progress = np.zeros(jax.local_device_count(), 'f') # for multi-GPU + + best_acc = 0 + best_acc_epoch = -1 + + with SummaryWriter(os.path.join(logdir, 'tb')) as tensorboard: + for epoch in range(start_epoch, num_train_epochs): + # Train + summary = Summary() + loop = range(0, train_size, self.params.batch) + for step in loop: + progress[:] = (step + (epoch * train_size)) / (num_train_epochs * train_size) + self.train_step(summary, next(train_iter), progress) + + # Eval + accuracy, total = 0, 0 + if epoch%FLAGS.eval_steps == 0 and test is not None: + for data in test: + total += data['image'].shape[0] + preds = np.argmax(self.predict(data['image'].numpy()), axis=1) + accuracy += (preds == data['label'].numpy()).sum() + accuracy /= total + summary.scalar('eval/accuracy', 100 * accuracy) + tensorboard.write(summary, step=(epoch + 1) * train_size) + print('Epoch %04d Loss %.2f Accuracy %.2f' % (epoch + 1, summary['losses/xe'](), + summary['eval/accuracy']())) + + if summary['eval/accuracy']() > best_acc: + best_acc = summary['eval/accuracy']() + best_acc_epoch = epoch + elif patience is not None and epoch > best_acc_epoch + patience: + print("early stopping!") + checkpoint.save(self.vars(), epoch + 1) + return + + else: + print('Epoch %04d Loss %.2f Accuracy --' % (epoch + 1, summary['losses/xe']())) + + if epoch%save_steps == save_steps-1: + checkpoint.save(self.vars(), epoch + 1) + + +# We inherit from the training loop and define predict and train_op. +class MemModule(TrainLoop): + def __init__(self, model: Callable, nclass: int, mnist=False, **kwargs): + """ + Completely standard training. Nothing interesting to see here. + """ + super().__init__(nclass, **kwargs) + self.model = model(1 if mnist else 3, nclass) + self.opt = objax.optimizer.Momentum(self.model.vars()) + self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999, debias=True) + + @objax.Function.with_vars(self.model.vars()) + def loss(x, label): + logit = self.model(x, training=True) + loss_wd = 0.5 * sum((v.value ** 2).sum() for k, v in self.model.vars().items() if k.endswith('.w')) + loss_xe = objax.functional.loss.cross_entropy_logits(logit, label).mean() + return loss_xe + loss_wd * self.params.weight_decay, {'losses/xe': loss_xe, 'losses/wd': loss_wd} + + gv = objax.GradValues(loss, self.model.vars()) + self.gv = gv + + @objax.Function.with_vars(self.vars()) + def train_op(progress, x, y): + g, v = gv(x, y) + lr = self.params.lr * jn.cos(progress * (7 * jn.pi) / (2 * 8)) + lr = lr * jn.clip(progress*100,0,1) + self.opt(lr, g) + self.model_ema.update_ema() + return {'monitors/lr': lr, **v[1]} + + self.predict = objax.Jit(objax.nn.Sequential([objax.ForceArgs(self.model_ema, training=False)])) + + self.train_op = objax.Jit(train_op) + + +def network(arch: str): + if arch == 'cnn32-3-max': + return functools.partial(convnet.ConvNet, scales=3, filters=32, filters_max=1024, + pooling=objax.functional.max_pool_2d) + elif arch == 'cnn32-3-mean': + return functools.partial(convnet.ConvNet, scales=3, filters=32, filters_max=1024, + pooling=objax.functional.average_pool_2d) + elif arch == 'cnn64-3-max': + return functools.partial(convnet.ConvNet, scales=3, filters=64, filters_max=1024, + pooling=objax.functional.max_pool_2d) + elif arch == 'cnn64-3-mean': + return functools.partial(convnet.ConvNet, scales=3, filters=64, filters_max=1024, + pooling=objax.functional.average_pool_2d) + elif arch == 'wrn28-1': + return functools.partial(wide_resnet.WideResNet, depth=28, width=1) + elif arch == 'wrn28-2': + return functools.partial(wide_resnet.WideResNet, depth=28, width=2) + elif arch == 'wrn28-10': + return functools.partial(wide_resnet.WideResNet, depth=28, width=10) + raise ValueError('Architecture not recognized', arch) + +def get_data(seed): + """ + This is the function to generate subsets of the data for training models. + + First, we get the training dataset either from the numpy cache + or otherwise we load it from tensorflow datasets. + + Then, we compute the subset. This works in one of two ways. + + 1. If we have a seed, then we just randomly choose examples based on + a prng with that seed, keeping FLAGS.pkeep fraction of the data. + + 2. Otherwise, if we have an experiment ID, then we do something fancier. + If we run each experiment independently then even after a lot of trials + there will still probably be some examples that were always included + or always excluded. So instead, with experiment IDs, we guarantee that + after FLAGS.num_experiments are done, each example is seen exactly half + of the time in train, and half of the time not in train. + + """ + DATA_DIR = os.path.join(os.environ['HOME'], 'TFDS') + + if os.path.exists(os.path.join(FLAGS.logdir, "x_train.npy")): + inputs = np.load(os.path.join(FLAGS.logdir, "x_train.npy")) + labels = np.load(os.path.join(FLAGS.logdir, "y_train.npy")) + else: + print("First time, creating dataset") + data = tfds.as_numpy(tfds.load(name=FLAGS.dataset, batch_size=-1, data_dir=DATA_DIR)) + inputs = data['train']['image'] + labels = data['train']['label'] + + inputs = (inputs/127.5)-1 + np.save(os.path.join(FLAGS.logdir, "x_train.npy"),inputs) + np.save(os.path.join(FLAGS.logdir, "y_train.npy"),labels) + + nclass = np.max(labels)+1 + + np.random.seed(seed) + if FLAGS.num_experiments is not None: + np.random.seed(0) + keep = np.random.uniform(0,1,size=(FLAGS.num_experiments, FLAGS.dataset_size)) + order = keep.argsort(0) + keep = order < int(FLAGS.pkeep * FLAGS.num_experiments) + keep = np.array(keep[FLAGS.expid], dtype=bool) + else: + keep = np.random.uniform(0, 1, size=FLAGS.dataset_size) <= FLAGS.pkeep + + if FLAGS.only_subset is not None: + keep[FLAGS.only_subset:] = 0 + + xs = inputs[keep] + ys = labels[keep] + + if FLAGS.augment == 'weak': + aug = lambda x: augment(x, 4) + elif FLAGS.augment == 'mirror': + aug = lambda x: augment(x, 0) + elif FLAGS.augment == 'none': + aug = lambda x: augment(x, 0, mirror=False) + else: + raise + + train = DataSet.from_arrays(xs, ys, + augment_fn=aug) + test = DataSet.from_tfds(tfds.load(name=FLAGS.dataset, split='test', data_dir=DATA_DIR), xs.shape[1:]) + train = train.cache().shuffle(8192).repeat().parse().augment().batch(FLAGS.batch) + train = train.nchw().one_hot(nclass).prefetch(16) + test = test.cache().parse().batch(FLAGS.batch).nchw().prefetch(16) + + return train, test, xs, ys, keep, nclass + +def main(argv): + del argv + tf.config.experimental.set_visible_devices([], "GPU") + + seed = FLAGS.seed + if seed is None: + import time + seed = np.random.randint(0, 1000000000) + seed ^= int(time.time()) + + args = EasyDict(arch=FLAGS.arch, + lr=FLAGS.lr, + batch=FLAGS.batch, + weight_decay=FLAGS.weight_decay, + augment=FLAGS.augment, + seed=seed) + + + if FLAGS.tunename: + logdir = '_'.join(sorted('%s=%s' % k for k in args.items())) + elif FLAGS.expid is not None: + logdir = "experiment-%d_%d"%(FLAGS.expid,FLAGS.num_experiments) + else: + logdir = "experiment-"+str(seed) + logdir = os.path.join(FLAGS.logdir, logdir) + + if os.path.exists(os.path.join(logdir, "ckpt", "%010d.npz"%10)): + print(f"run {FLAGS.expid} already completed.") + return + else: + if os.path.exists(logdir): + print(f"deleting run {FLAGS.expid} that did not complete.") + shutil.rmtree(logdir) + + print(f"starting run {FLAGS.expid}.") + if not os.path.exists(logdir): + os.makedirs(logdir) + + train, test, xs, ys, keep, nclass = get_data(seed) + + # Define the network and train_it + tm = MemModule(network(FLAGS.arch), nclass=nclass, + mnist=FLAGS.dataset == 'mnist', + epochs=FLAGS.epochs, + expid=FLAGS.expid, + num_experiments=FLAGS.num_experiments, + pkeep=FLAGS.pkeep, + save_steps=FLAGS.save_steps, + only_subset=FLAGS.only_subset, + **args + ) + + r = {} + r.update(tm.params) + + open(os.path.join(logdir,'hparams.json'),"w").write(json.dumps(tm.params)) + np.save(os.path.join(logdir,'keep.npy'), keep) + + tm.train(FLAGS.epochs, len(xs), train, test, logdir, + save_steps=FLAGS.save_steps, patience=FLAGS.patience) + + + +if __name__ == '__main__': + flags.DEFINE_string('arch', 'cnn32-3-mean', 'Model architecture.') + flags.DEFINE_float('lr', 0.1, 'Learning rate.') + flags.DEFINE_string('dataset', 'cifar10', 'Dataset.') + flags.DEFINE_float('weight_decay', 0.0005, 'Weight decay ratio.') + flags.DEFINE_integer('batch', 256, 'Batch size') + flags.DEFINE_integer('epochs', 501, 'Training duration in number of epochs.') + flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.') + flags.DEFINE_integer('seed', None, 'Training seed.') + flags.DEFINE_float('pkeep', .5, 'Probability to keep examples.') + flags.DEFINE_integer('expid', None, 'Experiment ID') + flags.DEFINE_integer('num_experiments', None, 'Number of experiments') + flags.DEFINE_string('augment', 'weak', 'Strong or weak augmentation') + flags.DEFINE_integer('only_subset', None, 'Only train on a subset of images.') + flags.DEFINE_integer('dataset_size', 50000, 'number of examples to keep.') + flags.DEFINE_integer('eval_steps', 1, 'how often to get eval accuracy.') + flags.DEFINE_integer('abort_after_epoch', None, 'stop trainin early at an epoch') + flags.DEFINE_integer('save_steps', 10, 'how often to get save model.') + flags.DEFINE_integer('patience', None, 'Early stopping after this many epochs without progress') + flags.DEFINE_bool('tunename', False, 'Use tune name?') + app.run(main)