From a82bbc593eb430a687b43db6506e5d33314e2490 Mon Sep 17 00:00:00 2001 From: Andreas Bulling Date: Tue, 24 Jun 2025 08:38:09 +0200 Subject: [PATCH] initial commit --- data/CGRUM.mp4 | Bin 0 -> 539972 bytes datasets/__init__.py | 0 datasets/avsd_dataset.py | 205 ++ datasets/champagne_dataset.py | 279 +++ datasets/dataloader.py | 137 + datasets/nextqa_dataset.py | 86 + datasets/pretraining.py | 156 ++ datasets/utils.py | 83 + datasets/video_utils.py | 97 + datasets/visdial_dataset.py | 183 ++ emergency/item.pkl | Bin 0 -> 599 bytes eval_visdial.py | 81 + eval_visdial_sentence_embeddings.py | 273 ++ generate_parallel_avsd.sh | 71 + generate_parallel_nextqa.sh | 51 + generate_parallel_visdial.sh | 67 + main_stage_1.py | 177 ++ main_stage_2.py | 186 ++ main_stage_3.py | 185 ++ merge_pred_avsd.py | 61 + merge_pred_nextqa.py | 34 + models/__init__.py | 0 models/backbones/Qformer.py | 1216 +++++++++ models/backbones/__init__.py | 0 models/backbones/base_model.py | 247 ++ models/backbones/beit/__init__.py | 0 models/backbones/beit/builder.py | 107 + models/backbones/beit/st_beit.py | 1752 +++++++++++++ models/backbones/bert/__init__.py | 0 models/backbones/bert/builder.py | 71 + models/backbones/bert/tokenization_bert.py | 546 ++++ models/backbones/bert/xbert.py | 2160 ++++++++++++++++ models/backbones/blip2.py | 268 ++ models/backbones/blip2_outputs.py | 110 + models/backbones/clip_vision_encoder.py | 83 + models/backbones/encoder_decoder/builder.py | 141 ++ .../backbones/encoder_decoder/builder_orig.py | 65 + models/backbones/encoder_decoder/outputs.py | 19 + models/backbones/encoder_decoder/xbart.py | 2044 +++++++++++++++ .../encoder_decoder/xbart_original.py | 1954 +++++++++++++++ models/backbones/encoder_decoder/xflan_t5.py | 2075 ++++++++++++++++ models/backbones/eva_vit.py | 455 ++++ models/backbones/mini_gpt4_llama_v2.py | 895 +++++++ models/backbones/mini_gpt4v.py | 709 ++++++ models/backbones/mistral.py | 25 + models/backbones/modeling_llama_v2.py | 112 + models/backbones/modeling_llama_v3.py | 112 + models/backbones/modeling_mistral.py | 1388 +++++++++++ models/backbones/moes.py | 287 +++ models/backbones/moes_huggingface.py | 234 ++ models/backbones/moes_original.py | 247 ++ models/common/__init__.py | 0 models/common/config.py | 474 ++++ models/common/dist_utils.py | 203 ++ models/common/eval_utils.py | 224 ++ models/common/gradcam.py | 24 + models/common/logger.py | 195 ++ models/common/optims.py | 119 + models/common/registry.py | 330 +++ models/common/utils.py | 424 ++++ .../VQA/PythonEvaluationTools/vqaEvalDemo.py | 89 + .../vqaEvaluation/__init__.py | 1 + .../vqaEvaluation/vqaEval.py | 192 ++ .../VQA/PythonHelperTools/vqaDemo.py | 73 + .../PythonHelperTools/vqaTools/__init__.py | 1 + .../VQA/PythonHelperTools/vqaTools/vqa.py | 179 ++ models/common/vqa_tools/VQA/README.md | 80 + models/common/vqa_tools/__init__.py | 8 + models/common/vqa_tools/aokvqa/LICENSE | 201 ++ models/common/vqa_tools/aokvqa/README.md | 207 ++ .../aokvqa/data_scripts/build_vocab.py | 45 + .../aokvqa/data_scripts/encode_vocab_clip.py | 26 + .../data_scripts/extract_bert_features.py | 50 + .../data_scripts/extract_clip_features.py | 51 + .../data_scripts/extract_resnet_features.py | 62 + .../common/vqa_tools/aokvqa/environment.yml | 36 + .../aokvqa/evaluation/eval_predictions.py | 97 + .../aokvqa/evaluation/load_aokvqa.py | 13 + .../aokvqa/evaluation/prepare_predictions.py | 31 + .../aokvqa/evaluation/remap_predictions.py | 44 + models/common/vqa_tools/aokvqa/gpt3/README.md | 14 + .../vqa_tools/aokvqa/gpt3/caption_inputs.py | 23 + .../vqa_tools/aokvqa/gpt3/query_gpt3.py | 79 + .../vqa_tools/aokvqa/gpt3/rationale_inputs.py | 16 + .../vqa_tools/aokvqa/heuristics/README.md | 11 + .../aokvqa/heuristics/most_common_answer.py | 39 + .../aokvqa/heuristics/random_unweighted.py | 38 + .../aokvqa/heuristics/random_weighted.py | 46 + models/common/vqa_tools/aokvqa/load_aokvqa.py | 13 + .../aokvqa/transfer_experiments/README.md | 41 + .../aokvqa/transfer_experiments/predict.py | 126 + .../aokvqa/transfer_experiments/train.py | 263 ++ models/common/vqa_tools/vqa.py | 211 ++ models/common/vqa_tools/vqa_eval.py | 324 +++ models/criteria.py | 654 +++++ models/modules/__init__.py | 0 models/modules/temporal_modelling.py | 286 +++ models/setup.py | 358 +++ models/utils.py | 266 ++ models/v2dial.py | 2213 +++++++++++++++++ processors/__init__.py | 1 + processors/base_processor.py | 26 + processors/blip_processors.py | 214 ++ processors/randaugment.py | 398 +++ tasks/pre_train.py | 413 +++ tasks/retrieval_utils.py | 435 ++++ tasks/stage_2.py | 373 +++ tasks/stage_3.py | 1051 ++++++++ tokenizers/flan_t5/added_tokens.json | 109 + tokenizers/flan_t5/special_tokens_map.json | 74 + tokenizers/flan_t5/spiece.model | Bin 0 -> 791656 bytes tokenizers/flan_t5/tokenizer_config.json | 902 +++++++ tokenizers/llama/added_tokens.json | 9 + tokenizers/llama/special_tokens_map.json | 74 + tokenizers/llama/tokenizer.model | Bin 0 -> 499723 bytes tokenizers/llama/tokenizer_config.json | 107 + tokenizers/mistral/added_tokens.json | 9 + tokenizers/mistral/special_tokens_map.json | 74 + tokenizers/mistral/tokenizer.model | Bin 0 -> 493443 bytes tokenizers/mistral/tokenizer_config.json | 106 + utils/__init__.py | 0 utils/basic.py | 309 +++ utils/dist.py | 25 + utils/easydict.py | 149 ++ utils/init.py | 154 ++ utils/logger.py | 286 +++ utils/metrcis.py | 174 ++ utils/optimizer.py | 35 + utils/scheduler.py | 240 ++ 129 files changed, 33981 insertions(+) create mode 100644 data/CGRUM.mp4 create mode 100644 datasets/__init__.py create mode 100644 datasets/avsd_dataset.py create mode 100644 datasets/champagne_dataset.py create mode 100644 datasets/dataloader.py create mode 100644 datasets/nextqa_dataset.py create mode 100644 datasets/pretraining.py create mode 100644 datasets/utils.py create mode 100644 datasets/video_utils.py create mode 100644 datasets/visdial_dataset.py create mode 100644 emergency/item.pkl create mode 100644 eval_visdial.py create mode 100644 eval_visdial_sentence_embeddings.py create mode 100755 generate_parallel_avsd.sh create mode 100755 generate_parallel_nextqa.sh create mode 100755 generate_parallel_visdial.sh create mode 100644 main_stage_1.py create mode 100644 main_stage_2.py create mode 100644 main_stage_3.py create mode 100644 merge_pred_avsd.py create mode 100644 merge_pred_nextqa.py create mode 100644 models/__init__.py create mode 100755 models/backbones/Qformer.py create mode 100644 models/backbones/__init__.py create mode 100755 models/backbones/base_model.py create mode 100644 models/backbones/beit/__init__.py create mode 100644 models/backbones/beit/builder.py create mode 100644 models/backbones/beit/st_beit.py create mode 100644 models/backbones/bert/__init__.py create mode 100644 models/backbones/bert/builder.py create mode 100644 models/backbones/bert/tokenization_bert.py create mode 100644 models/backbones/bert/xbert.py create mode 100755 models/backbones/blip2.py create mode 100755 models/backbones/blip2_outputs.py create mode 100644 models/backbones/clip_vision_encoder.py create mode 100644 models/backbones/encoder_decoder/builder.py create mode 100644 models/backbones/encoder_decoder/builder_orig.py create mode 100644 models/backbones/encoder_decoder/outputs.py create mode 100644 models/backbones/encoder_decoder/xbart.py create mode 100644 models/backbones/encoder_decoder/xbart_original.py create mode 100644 models/backbones/encoder_decoder/xflan_t5.py create mode 100755 models/backbones/eva_vit.py create mode 100755 models/backbones/mini_gpt4_llama_v2.py create mode 100755 models/backbones/mini_gpt4v.py create mode 100644 models/backbones/mistral.py create mode 100644 models/backbones/modeling_llama_v2.py create mode 100644 models/backbones/modeling_llama_v3.py create mode 100644 models/backbones/modeling_mistral.py create mode 100644 models/backbones/moes.py create mode 100644 models/backbones/moes_huggingface.py create mode 100644 models/backbones/moes_original.py create mode 100755 models/common/__init__.py create mode 100755 models/common/config.py create mode 100755 models/common/dist_utils.py create mode 100644 models/common/eval_utils.py create mode 100755 models/common/gradcam.py create mode 100755 models/common/logger.py create mode 100755 models/common/optims.py create mode 100755 models/common/registry.py create mode 100755 models/common/utils.py create mode 100644 models/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvalDemo.py create mode 100644 models/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__init__.py create mode 100644 models/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/vqaEval.py create mode 100644 models/common/vqa_tools/VQA/PythonHelperTools/vqaDemo.py create mode 100644 models/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__init__.py create mode 100644 models/common/vqa_tools/VQA/PythonHelperTools/vqaTools/vqa.py create mode 100644 models/common/vqa_tools/VQA/README.md create mode 100644 models/common/vqa_tools/__init__.py create mode 100644 models/common/vqa_tools/aokvqa/LICENSE create mode 100644 models/common/vqa_tools/aokvqa/README.md create mode 100644 models/common/vqa_tools/aokvqa/data_scripts/build_vocab.py create mode 100644 models/common/vqa_tools/aokvqa/data_scripts/encode_vocab_clip.py create mode 100644 models/common/vqa_tools/aokvqa/data_scripts/extract_bert_features.py create mode 100644 models/common/vqa_tools/aokvqa/data_scripts/extract_clip_features.py create mode 100644 models/common/vqa_tools/aokvqa/data_scripts/extract_resnet_features.py create mode 100644 models/common/vqa_tools/aokvqa/environment.yml create mode 100644 models/common/vqa_tools/aokvqa/evaluation/eval_predictions.py create mode 100644 models/common/vqa_tools/aokvqa/evaluation/load_aokvqa.py create mode 100644 models/common/vqa_tools/aokvqa/evaluation/prepare_predictions.py create mode 100644 models/common/vqa_tools/aokvqa/evaluation/remap_predictions.py create mode 100644 models/common/vqa_tools/aokvqa/gpt3/README.md create mode 100644 models/common/vqa_tools/aokvqa/gpt3/caption_inputs.py create mode 100644 models/common/vqa_tools/aokvqa/gpt3/query_gpt3.py create mode 100644 models/common/vqa_tools/aokvqa/gpt3/rationale_inputs.py create mode 100644 models/common/vqa_tools/aokvqa/heuristics/README.md create mode 100644 models/common/vqa_tools/aokvqa/heuristics/most_common_answer.py create mode 100644 models/common/vqa_tools/aokvqa/heuristics/random_unweighted.py create mode 100644 models/common/vqa_tools/aokvqa/heuristics/random_weighted.py create mode 100644 models/common/vqa_tools/aokvqa/load_aokvqa.py create mode 100644 models/common/vqa_tools/aokvqa/transfer_experiments/README.md create mode 100644 models/common/vqa_tools/aokvqa/transfer_experiments/predict.py create mode 100644 models/common/vqa_tools/aokvqa/transfer_experiments/train.py create mode 100644 models/common/vqa_tools/vqa.py create mode 100644 models/common/vqa_tools/vqa_eval.py create mode 100644 models/criteria.py create mode 100644 models/modules/__init__.py create mode 100644 models/modules/temporal_modelling.py create mode 100644 models/setup.py create mode 100644 models/utils.py create mode 100644 models/v2dial.py create mode 100755 processors/__init__.py create mode 100755 processors/base_processor.py create mode 100755 processors/blip_processors.py create mode 100755 processors/randaugment.py create mode 100644 tasks/pre_train.py create mode 100644 tasks/retrieval_utils.py create mode 100644 tasks/stage_2.py create mode 100644 tasks/stage_3.py create mode 100644 tokenizers/flan_t5/added_tokens.json create mode 100644 tokenizers/flan_t5/special_tokens_map.json create mode 100644 tokenizers/flan_t5/spiece.model create mode 100644 tokenizers/flan_t5/tokenizer_config.json create mode 100644 tokenizers/llama/added_tokens.json create mode 100644 tokenizers/llama/special_tokens_map.json create mode 100644 tokenizers/llama/tokenizer.model create mode 100644 tokenizers/llama/tokenizer_config.json create mode 100644 tokenizers/mistral/added_tokens.json create mode 100644 tokenizers/mistral/special_tokens_map.json create mode 100644 tokenizers/mistral/tokenizer.model create mode 100644 tokenizers/mistral/tokenizer_config.json create mode 100644 utils/__init__.py create mode 100644 utils/basic.py create mode 100644 utils/dist.py create mode 100644 utils/easydict.py create mode 100644 utils/init.py create mode 100644 utils/logger.py create mode 100644 utils/metrcis.py create mode 100644 utils/optimizer.py create mode 100644 utils/scheduler.py diff --git a/data/CGRUM.mp4 b/data/CGRUM.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..881e52aa92b72906a1ed9453643ebf3e4c0ad123 GIT binary patch literal 539972 zcmX`SV_;;@6D}M(8(S0Gwr$(CosEr+ZQIGlww-Kj+fHtN_rCx4!<gujj&-6^q z00993%v?MjES>Fbfq+1O{=5IYm<-*F8EqX{7=eI*pv;_1O@W}WeQZq(U4C?`A;7=C z%Qr+%yN*^RT2g5K0al5xu05F8SOByDV|yo403!`;pVj zDu{~HvH%3tgntxGOpSj8!uAfHHl}7S044?oW?Ci&Mz$ZOg^P;=7d^eZyE~nmrHQG% zjiDW#y^}fpf0NKzxY*kK=-4~BSlZh;a{-JEjSP)>839hFX1vS*6H_A_dt+-}MlJ>} z27sZRp^c}rDKCQuGZ%vgBO^1w)|A)6)C1t`YV<>~100+^e@uU_`c5XijC2e?Mn4yT zt)+*liT-~c8GkJFoeb^FO?iJpGq!NDw>8xNF=Yg}IGNhmSUUeu93C7d#x6gEv7;?7 z!%ql?CSLY-ro2pyG>nV@Gec(=eFtZ2ONak7{+|O!2Yq`pGiOs5URoxAi-psVg)=V) zz{cL*+R)->p#MJ}Bf#0l()cHq|1lTe7+d3G!{EUr%w#dcF(9-V5#gC$s;eR@2PKLIo&Oe)Fr0?MQLtC2gGX2noCWa3G zt-?s($kNdHzey~eO#kn?+)XXbEnJL#WcCiGcKYV_4nNZW$8`8HwKnzq@y*M`%JBb@ z`nHyKKSO}Cv8kP@v8xL&3&Ve5IvM`ArB0^K7C+`r#`^#Na{rS%8S@%DnE`B#e)jdh zW&I3znV9Jq0FM7HhL?ek?T2*u&+-4g8+!1vasDuzT}&N#Spb#}KS}y|AbxWAI1CJQ&#T3sQ9yMu_%>G{UKYL+m|SHcXcu0Z+uF|AoB3IYRlbAAhoS zBVY;TwRrJ8^dJmQwr`k_9VYjnBO9TQ{iL{!sxJPsq#bew2WLNj8Yp37w0Wh}GK?sn^$f4#loNKWEg}qt2cIWI z_C}%d!LTO-D0+e_&BzZFyR*CNqO2~3c8RQm>X!Q4LZR7o|^Py>LM2gu^d3QTNdj(P)q?}nbI-r6rbep)Gm&MH$|ey z%Z8USKE00zhi0UGGg;)2ElKNA@@{|WTTwHHy$(J7w`aGS3{f~{&&2UZfhB!J zMuaMj1?qAHdChIMb}^;JTja_{Q|p`%#2l`)f~;eg4bzr32La>ZZhM0~DWmFiHD`1n zBQkBG*tEJaRjvLd3r+!ywCnrse7V83J%7>z4Q!RH3l)*>E)KiLfG0z$;E= zevm7LuD7ZC14RfGY=A-y)*q^yGh<*nqOz+K%?fL)15508T3* zHp?P#_0oWKMT(bOGDQjjvca6Gk!yT5Ohc~1A5}W&3)jOzaAU03T>99TVrBL-E=;RW z-*MIed8}XBy{I%|f*$PcsjHADinR!CoQ7D?a-ZXXT8)#$we++3>1$_#c5sfMnSiDX zACUNshZ!@yRPxO2bFaa_E*4~lLrH>CgrM^JlMwTyQkDO(_^BOQJL*mNWMC+@3!Fq4 z8(EW)B{mXJ%IZghirr}tw&ah&0#|U8jZa9DbV2@=hnxxN?yjAXE*LQzX@n(9fek5A zx`Kixd+&f&43*&#^E*`X`^hhPSQ;(YVaU($D)9Go+J2omSQlL0U!rekvO~r-v`3q&KY#J!OL=OF)c_*03WZL$11o)+tbXCT*w_Ns+5L5d+VX;zr zYhT7#W0)n)+qcdfulT#GsME*!uQLXFc=v_~Mi^X4V2tYb{xzz}ug<}5%4$G)Ca+j5 zx3yXsXWXg)NtWJpdQU%p$SC`)1@%)Jq>qMe*b{>4ZDSG9t8ZL+@OpUA*UcYkv)=0V zSea;-mv{dO)RH%#4G5|wgGfECivk#YqKz&M$z}RLdMowbRg`M44r3j2bcmEUp+V|I z$T1$*4Z*1)g68s;srK*F`MN71Yhed2O**fF7(hVwkZoWx1A#3m$d#^=r?+)YgEgSaJq_&@|JG6 zIErNqRUBc*iY$)UJ|c6ewPy61!M8IdCr9L<65TzM`I12`j#|)upmH(L*US6a5pxSw z?^TMrfwS*(v#Tx{#KWJ_SO1WIQN5 z*%8IX+qW>ZmWlDrjzf?+8>DZLUa{A7c!vb9>=;&7lm*Tiv2~I`t!8~jzWNv5-XOe) zey?!CO9-9`|J9wXp`c~5w(JoG$%=~>7eVX$ZJ^v*aonE`2%knYneapPovmECw%KzE zlz_=Xn`uQ*dw!0)TQ{hg=u!5bm8f`b+rRdaHz_>)B~DmPLui9#?A^ZWPD(4|Pg4Y- zn`!l~*^4p)Lxxexr(f`%Fp@O7YK>N9&e-y7lZLXze$*O(qO6&=CjTwe6V8bE+ID@;C}D8M~$13#mL&2 zWRHubehm56+5V^Pr3h-sDcMQQ_#dOp1i!p&!nCa3+jE7u+Ms%**H_S9+%)%1mN zq=k;wqu%oGkA4JNnsjKO#`KiXlad}L!f!FC6Jjx@>9?Ean-KZBZ>S)k>i&ym)bflve{ipU|! zkmA-7Rkc8mCm&K;>MxTT#xH+jzewtJ7MjzEKu8RUKNH zZI@@SamSe(B(N$+B+z0b7`WD&-6C4_-;Q%d$1h|Ha6B0;MSj2Li4YT?pO~eOMv#1G z9CvRXN$94|vESns z;X(JJP#!}TX!}M`$>YJCj4Dz_mFL)t``@K|@*D1LX3Axj(vh3$Au&-vFpr`eWy`sF zBm}R_0Q`0vNC`3Jl+hvDO{dsi7dR+2!h6#Qe(c|)A6{Wm(2Txs^0qLEFMcU4oS}A+7R>HYMcyuEG50b((y2r?{y%O&`_) zc>ngiX`xF=qy1ZAwuTRJ5Z0V`7`X+*Mzf9IN+F^P|1RORLCEE#;mz7z_l(bP5{b^r2a>zjSevY+^S48qmhhasS1sZdigrwW3U7DSVDEf`?;jvzdb+!@$ z#_`fdr2zKXD&7d3i1Qm|*3LjaSE#8e{a2*Tx#^*=2nyBLB{emEk>m~)_@p1X)bV{A z58}nJ3~OaBN~Z=XyYA!RG3g7A!`b_cvUzmGIDs7lepCF>W~>b9oPMiKyCvMqJFPFZ zh_E%n9Tnc6?{dGysw7F}7Qi()!p>C$l3bA6bzB6H zsL+502z`m&(Nq)*OkZ;~J%X)ETFekT?Bz@v z{+@0F9N6}4tWx$q(BuoHn`FW@Gw$a7joDR$O?tbU+-H%(;V|DptqXL59pzH>(?0*u zpxs4a*SF}gOfH?mB?csg%JUElLAr^&WMNILun9La92jB|_!Z}DZ}zA_G8VNBnAwUi zjD~uLO_O}`DQ;X9A_;%##H&Q_6UXbhN-SZ%PYp0>r!1pys@I0A9{_RT%)bj98OqAzV{)d03Kuckb_8IZBF$F`)3M zB*FxFU>p_RudVZ}q3^^6D^viw*=pcQ^qdzF2fB9CFq3mJ_+y5G4A^9$Odce3YYtF- z7QSelOW3dhAtp6MSG5#n?`YFU0QF|fv1CA3`ncFg;z@s!t+zqP(wJ<2zoxHQ#fJL~ zDb}_EPjDJ7IqXwz?BjbOY6%Z?f)LJAx-3_&&Bd z(AjVEgF7yQ%WeIFtvHO`aHT#Kb22xQle)No|SZi6;c|a^+l{ z-<@*kcJ2z8WCmaiwh9QNMI%7c66NztOGS994PhD&_frI}7{A|&pyFZNW~X?fHGrV^ zuDx2hEbOJ1wH%uR8Ia_4W!jm+T4aM%slqSTZ^$aK#V@RzoWzb*L1h~9i>;v7C9WwM zx<}U53Y5Nkz0qc+d9JX98<>pQm-FUI$@!XMQN&TVbmaWiX~} zbz2+fC&*^JwIYJY_$Bh5?KT$o>1_s8%w5WQN@yg9Po8Hb*dU6x$527Z%^*Ac0$cID zE}vYKPwhu~^SHn63Wj_x?=g6L4e&|8*%uvpF@ivJXnt(3V@aS4dGNvMDxXnAGoJ9_5eYl4$)L{QaG zdxReMVKjgD8&UU3gqh%HE6d5tKHTTT!V%J>`v?m23(t5LA0VU_PvuE;Oo<$~{2DrC@W~D%xPamR6^^!aR?Lk1K+& z9>n78`qkGU%fsCV;r*(0_KRouMzR5ThRXUvrCzbj)lRh+W!V?{yBL_F_Dsl3Q3r3| z#(x)x?IV)ek02DpzQHQDaNs7{WqO56qZ!%M?6>`R4pR4)#&VY2EA=FH#Cx7z>E+T-)s!LDwC@g=e|m2KhU`^eGq}H@TqV z9?b@7BGIEv=kJy3c&~0VT<4GT^M6MwmVf$!EC|ayF|5=wR6GwH<55YKA5F$e?=KbB z)o~xN!u~M!4An|12FrjYT>ZQHERPe^n$<|2*D^cQryzqkU)Zja=Lc09PDG#A0|9}O3T6=5_^tD-j7x6z z?9YTw&f?&nQLr=^hV6 z@a=SrN%h41rbzo$7zh03q5%5Q_`pefp-}iij^Lrj-L_mkSP##Z>!q7)Zt8B}{^25v z?zyA|O1%M8t{Mer#87bp;Psz}K07hCDMNA^W2N2Bzn+Qrw=*vD{Aa`Z*`!6HS)86Sq1qNZ(STTG-Pu z_7~@ddC{)<+P2x@km;4|4Yu%M;Bj6W+I<-RdJZ29to>8Q{Ro?>ZkZ0d4YNiI*;2X* zcw1z=Vpe^4GeZDXEZdgtH2rX(n|=C+UmGy1z&Q$fsR+bDjur=m_eu#lT-M*^G-u7^ z=aB@N7T3%xuHshOJA?cdD+ym;=_!AA1PNvgJLP0?*bGEl^vi|07Q25-<5fasiTZ== zd`g0LRCy*)Gew#^pVn0Fw1u4HkGdA6f7klufKx>Dn2<5C1aE@`fv71-5q$A9C~~bU zpED3)eD**Wo5cF>iJgG8WREc@V{PGtKcq$q*To0PNv;&EkX)<|i&%wT^)#V9_#PR( zz+A&qN(FlSy%h#9Tg-x)mAUe0Oq1(PpMW^wf)2>zkO_#h8%CSk8iib=Hs&D-ubNO;Tsk^(8Kk$BT=2%AZv zXspJ-@Ksq`AKB*|7cw75*m9=n;do>!Qm)0viq<$Knv@ZQ5EBdXu}&daXs>Wo3$o_t zUC7)S0=?qENZl^g@q+ISqRbuYH4%UmY5u{wQUU`~8{SQL1EbbEAOiYHwh;8%h=gHN zu2+_aaJXytmocT5V_Oi&(Jfm*f(sDpmCzFi*P`aAv4uzY&4sU zl--d+LZ?m8pw}TJ zJbj*L4oKt*N7}_d7toW4CA?5YgsHMS0)9DYK-K8IgVjz7xQ!zYVWsg#>F?>~RI`)c z5w~73LLGW+JQyq$`Gs^EkOvej$7anNR<1JkTnDZMV)4GG2ZFWA(AI;E zA%)nY?zejp7FgKJ8jmO4ycSyaPC#pp^Ha~mf*B$_go%`4-`fSdomu^izi@8AJ_7B~ zZ!z!F2lop5*+l&x_nQ33FI1QFw7w4Czp` zm#L#@N%IXeS$_#D@D%f8Fp#z)({_H*lwD>mqj0QL`p!ZgcyfQqsE#^Zq4MAHGDhYO z+uPCR$;XraJ#F4y90N$4>M8T!FU!(_5ZccSbF@VC)O*D>YmL2vYL6tS`s0(~4f_fv1I7PsbV714gTmn~sLZXBd#EzLCk9Se48PHPxHag{`zkuzm zZSp$4y;o`HxX=Zr*CN`V$V}m+CFXAhelX!8=(1-{4}3_LwicmBsSO`$f*`XyP&Ki#Edf@yl(1-?>gu!x zC6Uw7X4_y_PA;{0!Bn`JSlJhv+Ck-{KQ^Fo0PS0Yf0VTU+slp$_tr{PD1R{vpo=tW z5+u@Oo6dS-9Dy&BGyZ^-lEM=jo{yCud@}y9=v)xGYK*>Hz-Su6k8n>VEHR)07nI{D zAazMIM>O9a?X@a$uKe7RPf_OdrfKna>i2A6(rj9kT&+H2X;z492x@J{{W6Wrlf;q* zO6qAS(k<7&AHc~5&M~&~n<@Tj+-aW9uirL*A=4(cF_HcCErSVghziQ!I__LXPMo?+ zE-9dA4g&8J#i3GaE~DrHLYh#|V?W2P&m=u0scl12hrk}PtaZHI!_o3yC3ze0E{^g% zBAu2U>s$0!u*xO>r@kUzc@H8X%G^ITvcPorRPa>-K&VRIk!t(9I5QUdw-L?NM#ZuS z8^xgbL!+JZ!|7?x@CRz*1vAVfcM-^x!TG<_IM8k3YpV|9@PvoU*mWeEdy043;|C*` zg)?5Q#^)q{4tK?FC%<2xVWyyLUsEkuwAC%zN;)F#)|cKY6-JYr!|FDis(zcZr3*X{ z7Xs+a8K?Yx({fK1o_2K4Z%VEp+s`EpCr>A~5yfsU!y8)|g*UYAgc zUKuN29ku3Dg{q`61IVDnxBZ?vv!sBJcq~zxT{Q4F=Q)7!)_^MroDq22$G}kqj%jg*XDQwEYm6#SCEMn+6~qkm*iP2C{|;FP(V~kkhVvrU|mnuN69Ol)w7asxI2|x z%|i-9aNwu%;mQ{Cxg}>@g-Ghqn%~m85+x*ES2m}EjoGyN{7v5z z&ENn$|8j)sHB`rGon_twP2mjyXNPs)zF@oAvn6WZKxs|AEG0i&o@6kf9-hjqtB~%~s?BxaNxLQpjED-fkco+ZN;N9K~#?+dysTW$U5{uqB?7(q0xI_kS?*2PL` zGp6x_$#L*-6+-DAD!`x+TJ1%rDWv}WT(l!?APRGa3Um#ihBB@PS zz82A4G6qIriJe(=4F9H|8PXpf$icA)dWL=V<(NF@wu&Vbd^bJ$9(usvr@O=F_M{Iq zuOUSr5b_{LC5%0C#FDE2eFup^!0uTcyD=S$lY4WJ#qB&zdsG&l+2%XCJaZx*nPFL; zYc#z-a6z(MJ0CY+iCb|k4df~bo)g9%w1VA`%mI1W0ak7+#7%>&-I&{A2=zMzY+?F- zI~}(GibM?o3GavF#ot%dKq+ z$U>&A$7q82QRAn-NegFN7t^^5DPMtJ6}N=ZH@DXba=e5o(U*a~e9GymU9je}CWqh< zH!N_?L>^4zGAkVlRujR({dE5O^_B6r66%P2l^V(FOluf5?Ryv--X?%9ayjt%>`%G%P2(J#f&QSoY(M2kPfyB{ z(li^>(LP-@X`|O0j!o3tnZ*1c^hYtI3P(ZoySNl6QqKcEc~1`83bHX~1QOvQ(gR<{ z&vR0a|H2mQQeaY6Tq|Na&jj0;1WrYxKo|aKaNJ@m9embLc9XT>}Y)JTY<-walQ(0?2@(+qbUXh>9{5;NaDb$E-i)*nNv z(QrOnFU`PzHyGAYv}}SMFw$K9VF%d{`<$%WtIG&wwpj%*!!1D~E&p2-aCn@M(yKO0 z{`d&9bF;*66bWRVI>v6Di%~QFR})S*R5t&`QaHwL4o5D0RP(vEn@yBRf;O9%c(S~F z#$?HkN1?vX`*w+M`o*aBrVvB1{5z3N&v6Yq1Jl(Y9dSWoTMT0&TT{CNT#vJ#xq7}> zfOO!FHgV#o|J*OyzyoY-(!{b8WosSFsU!@SJVKEOe)AwHLG}d)kemWW`*j+jGA>di zE;ntZaL+N>GA6EBl@f|tbTUS0CFJ}zAltf?T2|;R%Zq)Nb2T$$XPprAa+U*(SzKso z^EZ^%50NKi0-b5t^!zPGsPzr|Z5*P6ZeJ}C8(l54vEMH`ANph#F}$N#hR>I_>*sOx zyj9hAqCxL(ERa~*@n-VA9PJPeCl*MUs7QofD7Mxw*`LC$AP^^zL{&3*^4M)H)CxTI zBawtGRQ}3V8rfUA=LbPaSW%&yNgn^l+S(GzT07~uR7ZV+&TC}2d9HU^T>FANqS~d$ zm$0WoYH?$z=dPv5%_-k@59?dMOk?u=JU`v@lpYds0Rdu3w#HdgdKQE)1C1^ZYSRdz zT2Bdyiz7YP+>?gs!q?(;S=bj41jmI*o^r>C0=e9;MJ;g)6`}^0jdo^#z;jKCE1X{= zG7fprE#Y9%#2);%`6?>X`s=i8NIz?18_S%qZHPTc3aD8ecmCQg?d)uBWh~|A4NDO- zG-Xiu1=g8!T9E4s0}P8gcOKMLlP}8t(9jABwXR-O+sPz-pRuCAx@_N;pC)Vu{a9@s_vd18|)9T zA{Pp>{_3EbiR++)RvA&L z)oJ$A-;2rh?SXF0BvT(KOHoNr|}1vXlT|L%<Ft2zXFp{0Kj1Un@=8 z_f9pYH7*w}w6{AQmFZ`z!P!MQUx>9MUv3Lh6qL-HMxvzhM}VAzv?>jmIhdR~Lduk>OkkPlpKcKm%y!nJ}ipgZ2DMoGh=iDc4;dqej zvJC_TrY)E;`9HXt4`a4reA@h|paPe2zNk`mECS`M143zw-TtTTeeKI{+gMfpg30rA ztQ8|zhh>oIkh!%A)%Hr|JYLXFNoi_e+W9+bdiDBXnbvVUKzDXn;!#lg{=oZ@PyocJ zY|G;VesY^=HZ9E+alWJh7;{U0+B<1Tw)%Nk^+u98Tw&pBQ3tO%R*CfNkACdfw)Q&dM#6^B40>Gs-_a7^hoy&P(K}P4 znqQ1_#$O85ydqWIZtnQ2988`*!Uyc!kViK$cC}m7Gl`SBoemPP>C@?Q=;Fv;-(g+; z92mHvVM(^WK)>phW)Lq1^$$~J!To29Ev1%t%QgNjZexP}k@qWnK>iJwS<(B28%c0F zf4F)|g;q>12+3=P;oiH*u_Ay+o^YgqwlR;@o>xy*=YPQv0fN($fay6ThU-i-XXo3e zn^Ro{ju$pRK5L&m8maA$h0?=DpICBUiPTXVyl17ZU!bwud1^j&e0O~zi8=i5(Rh)d zg*O7MVv3Zkn2Pvw0HVVPtkdBpPs4N{n>q-2BCHiM6S$tJDw2zfOiRkVMasqwtslgA zGR1==8!Yc{4)KsKqFY*V@SN3=ed)X;W^Yu%TaPb|a9@Qe4zl7GW3q9EVO=MfIR8$n zl-hXmT|kbuH&dL)xy3wESmlF)is&;InTs?ra;3Y`D~*?8AU6TUY7&bByV#XosOQK0 zU~4`&ipdo;BZi%Xpb!fROpx(6i8dXS1J`_b{XlN5iMyktk9kZD%(4Fc1=#9fAja1% zgGh~#(`e6%Ybdij?Z+X(bR#`rcj@gk9)lR(f3 zEO2upv-&tEiYGe}=bh!RnVpQIytK*CP632~QF1Gt;9midtV8c{lgGUgWo(PJZ&%PRP1looNX2U%67=s6 znv(SRl4wPwuMc`QccUO?^vnLUiCq8aX%WVY2V9Gw?&LEY&c5OOk#PlxVix&@?d^Z; zV5M@Mg|8n`epqC-gl&jbZUzJ5>fQjmHy3;@zZNN6A6JE0}CveGgWLJMCPT2J`>VyHN`la-YF z{%FZIapB4vmks;WBgU5Z(@ZEt*?{Skz7mt3d!!(`VnuN_^j1hLr+cWQHww~4>oJ$O zTp^8yIljEP;VqhsU`RA3VIS(n1G2)9i{FXG@0@&p5pJl@q(>cs6#XWiAioj7vIA!g zl8F^phWdDw;_q?N>35s?luZ}3jA`{h|H0HI;GA^;J-14~jC z^*mgMpwsg$=q5TjF_9Ful>1>aBoaV?+2VsQWOxq zhMIJ>0!`N|04Na!Of>v8<|*4&AqC+e9fs<_n@~ufc_C;rh)MQ>Q5+6zUOOPWX8m2X zpHp0}V8+$|gGFAydROdUi0DrBNv8Yo%^4EOjAfGBLXraqQKEXoMb}^0{Qp3V1bq7* zEZS?Z%=feR!taG+7#T=YSX{=sn8A~Fw{>i~rqJNFC5w|k5`7+(L+q{|7f#y=y<5on z%r<5^-Ynf{bx*^_m=NVn!@|}6aOQ=fXznuz${(E^H2X=%$W<TZut*AQ0dkrqtk_f-GMZw@#@d3*$LxJlj_x-FV|cUVVZ z+KqHwftDClCzxiT@1ofPE0qf2)Zrs7-bivt@PzOe)1bzrX^ldg-KK?r^6YXp#8{y!Flg+*?&U<8Dsrnk5?^+Rz(0Yu?$^2t{?HLj zE1YqE<8vLTdsGK430Uxw5Q23&^#v2EZE|k>-hhBDQ3kt3z5y#5tm${wpP}MJ`)o2; z7LRI8B#*Fpaxo(KhQ?gitLn$>BH7VPp{RF_N=I%FEgTXRKg?eIfOz`w^JAf+ zC#&*b@u=P1p);V)i0EF%}$N7hcv&2{Fz!!PMiLsp2Bg&>RW^L z?#U<+E?YB%7so|T%SZ|fvytC*;%M}UUBDKVYoQdQj+TAhYG|PB6nT=_`-2%**S2mq z0if;*kqKbeJ74$ssIqNXIA&Z)e>yc}xFM#?i6*VMWjFF(FO_Us76N33daImi>#(;9 z6EV<=s$s!%+?$lOl$!AIR>z(rS$`uOK?MM!j4yR`0lra`66LY#8S>iJs;`s{CMvKc znM&PmSf8i1Bf94kM^-fO*V5wg93E_{KQUW9WpZ765pba@H?7tDFJSV4rPMnqhC(6TrfF_+p1WLU*-BuuoKW#Y zeN}>KZAM^|%xjnc>|=XL8^B>~G|nT%ESRDp|BU@x#LR@LKItga+??479_Gairh7_U zO}g+ib1nL3sY3fppP)QgH?Om-52M}>nj359L_yy+(SVasv>wG}{5zqd*2g60U?nq1 z9TZ6`Ucn4DDod3u7BH4AzT=kj3@B12H}(28CKqkIHgsRjQI4CPyYOfsEBUYuyHR98 zX&y0(Fv7`AImc!VRbi7BH}&yN`iLbU>0j^)p8vk7UyE$87x%=j_LL%pf=X=sAUn~W0v z(g>jigT+Q5ZtbsLc<1jL*vJGC!&A|wL#5OUo_$f5iKXOMln-OGBMAy<&V;QH!;LP! zxqgXo@+}Z6TK)qx*In{Lfg7>6DG#B6qAev{q9xu9Vq^xwFod<@HDf==Fi62n#Q)(< z-PVQ37qRkDNTp10R$Qm9h7Kk=_85Ew%#P9L} zQbpA!lo})Z<@gsV*jku|uonF$JxkG^1vf1aG$AehFG%r!DQj#mza9U)^*0ISDxZ4o zJJ=&dfE$pKJb=Q8`-UD)O>$mbvMD=JAyPUmkKOyz;(b9LKaSc=r5ldul+A4no94VS zsagK68^z_i|5ph>euTS-y;#8RJ=yE zW9R#7ORLNoLE7I~T|Nw^+zl#1Vug7jXVE1=ONupZyqKDv47QetcP8WUUI`^*aPD|^ zf&X5x4eqH-bn{92ipqs6F)!^u&X+YnndCWVkALx5+;>Pa=p=;|lT|K}m}qCqVV#9W zNq#XOSh3vOkT=XHyR3K`IBfbsGISMoQ4w+x=>}yxDkp?G-00VrmbBm5p%Gm}ojbCT zl3ZQI8rv=qSFGUy6!z_sWOrw~m6r=DFUfP%$lzCMs1cHCMNsqTURw0*L(qg4bj(*| zXeUSP6(#;bS2a)EPWfdc>tlJnw=+fDkViy;44&PDD&rYFKU2l6)>&a^q$9u`W=d!q z^0!I^^G)3jK z^~ENry3CV-0&tSvmH&J`a|>Y=Xb3IG$}RB9Mlr0h!@cJ?pDiVycc(nZrCLP#w6x!?)4-%u_MyH7$RKLrZ-d|7$Oh01j7I49y?dNBqk-gOgi5Xt-aLnayO zjlN_3cu+;1<``{WVv5*#j#<^ML98=k;qv*#-pTRDWzVW49?mlz6fFAk>PGag04sJ} zp=By=#n35&p5W>MU{7<6@s0}!m-pWrWOcr?>RdaKb znbd+K#iZ6ZLdV!o#R{t61f}vdw+y!_k89E+UbkP56SKO)k%l_1m>68L;z%1Wp|p|~ z5n(?0Byl9XiCvTM*N`e#FQn@+Ln{1V)-z)eB()9bJ$Ulg#odsctCbG@j7A&C-bo$f zv}hY4V(7Pkr=9~0kW9)haB^7yqA_4IvI86PuJ-@a-NL|Btt{Kl*d3m(4^4R(u_O*6jOj0XjCRM*qf=)tRl=zI+8P?CMRA%a)4t7dJ$Z%`a(-2)ZZT$mCZcH*_VKdbG)rwwk4U1I*&cHt z2|gU#IFkuwlk9Tf9GpupnVN!iEY527Vv#`#CwO7|F@Z3Mt6=;@@*Ujuy_LN&eEHFx zfM45dXL=ai?CRMTcBZUh*B^s*&+~-{_57)_IWW#-@y-I)j2FV>m?Od*wGTwY{-JU0 z{$w2$MM^eNA@8vXL8ghT9ZgJvP?%2@&DJM|bo7zkQIRcj4N$D;~VyBM&Y^j>=f#W zE{?j1+u70ag~uq$Dl9}5qoJK_0C0~j9hIgiFIqXD++^US>Bv!7wwM>oKbtF8 zx7LooSDxt~%>#6N<=|siqvj%@4ElA48uRbyJwC*vn4iw4-q~o_Ph|s#ftEmn+J-D> zl(ICA2;4ah6$352Y0ak1U4~?ZAt<_Ja6tD1+uoF;HIR;Wbci~&rVk$vb9!so*-KeB zD`_(jhlYhbyMyy6y~^l3&Vl8@zcc8U2ivD&aI zn9WL`UNzbNreTUrGLfq&;>a)o4mrCSpcY||UDiQ3Xm-f0X%yh|sWpo2KQ1g-l}tOm zIEwhGaEAbeyv7rp{w$q3Guu;pSavtY0>MPQusbPY`ke3TyEZFg*z+TiqbXCXkmB|r z|4thJvY7vp91Bf$3_GpY`g4-GO|QZEJ$+=2tYWMGX`~Omb+fmKeu|LqfjCH?Lsek< zYcI~RXGvuf6IwAmOW}rN%Cox6l1rV@zqiklyyGtI)p^;L*2uxS!eB4olq;ORj*8!* z=%ri6wk1$L9LEY!;_{m=oF1Y@&iB%~sPn!-1l{ zQ?2w6qLjk!`S1WCZk?mX_dW47 zDY)tV;x5?lv5yQS7<>Kwt3XsPsh*(q6gH700iS63(pVZFPD%b*7%jQ?N}5f46#4j z&A{sFwW!!8QmUGBo!qtIQMGFiDaR+^3nYVtbDDDCL;cU9Sq(I15O8@9l$WV(k1Ad9ee zo-PmefT?NVtbv=oMLTBfg`1@7JDi_3tAdcDv5)CIU24`eygc&olF2`%mvSP2PmomE{p3KNC|lxsbIoSol$g&FKxk zFxVw$0dTP6blS_ST!XwBL~@vHVEiVb%cV}*1$?*n7VA@^YSqkkejuYQXMq=s%X5I4 zw?l63jAO(xLI+%y`_{R(^8yhS(+B`mg@tCw5UXM>fI`qO1=OdIMDE-V{z;PiOp*b) z-l+x>6mlCJVl7szhEH!j8&$KrER~G5mK_J8g;PRxmFnf-!G5 zRK5by9`}8X>l}Qi_Kw-u*R2)%+T5AvZPExcu`oF07{=Wae4geohMAK1d;WwtzfRs( z^Y|w7|CBboV5ZyuN_#hGO^zICccpt|Wn=T*z)UsrT~A;Ne`>N*^gSlH0RgzW=Mk4W zkj09`f(n&)C50y%+`2`NZ*k@R9xOHe<+5WF+e?n*B&yk<(WSAA)&_UlnnI za7^rOL+1{fXuExMD9r{1+E~W%{r@oa4&0T7S=()F+qP}nPQ|uav2EM7Rk3Z`w#}1% z&**RTf7o-~&%(TRcwai(upv}#TC+-=S2QSsdjdI#=*Ua&HFNKCj+SY_F4b&r{sUfG ziMcv0^lUt1jpxM6P3k}@&@`_lSXAtJrZ~lvArXpnbT_H0aF&YFY;Ad=&bP7yE-HJt zq+OsvRydAo8vz+#@-@j?j~Dqo-ibbqgEAupp<#KlnPP*kF*2vV@MsQgMI9~c1!pbg z_wmi(Jv63pn7VBQ?;JYacweBF`r#q%+tyc@v85%ML%nmkC$ILF=pQ{+@4rJC zKXN}+w!5!_zT&8wK>nR(QvmgjOmuRb=2~1G5v-yU1DUTH@XqXEB)CTvB!{qI6O&U* zYYFwE(;?!6r!Cm^OU05{0O%5*Njb0>oL=VpZ%3fem~dP2kh*a5_)OEXU#fCmF}}xC zaCWW@#!sdy1fm)d;wuwn8e+IWqk}HX#Nt4MvvGMfpb23uJT<_~HHsyvMqi`uo4HIK0_V?PNSr{Z zxewROBr7T_eTLvV07y7XL_*T%R21;x>li2HurB+|f2l(ol+7Xs&4c)faLvF^A0qqLR(i)|)g zZnC9s){Y5)ypI#Bn5sDw$RflUS?5nf*?0fkMdkrT5DShnkNImQ?}JsOdmc%jw0eH_ ztrEa^*KB=)M1(~hn{YjW+>$m>eBeJ|Akv$rKHCpl?2YAhAlO1rm;{fnm`%QNGX9U; z;2yF)-zniCMmHB&zVI`X%d=jkyq6_dt58g zH3T!${>!UxL{an8q&G&*QaGW8?Xm~FD$c~dJ7$L9-wzv!HSJ-t>?-w~5}o&E%; z7I$GV;;lR*zUx7x;0Cd#?>1~d*Au10W8>}%o2gTB3k(;Pww&qth(ndmCg?Ii6BPFP zamiiq-*>ChOWELF!NPczHx(5)nr>m~Kg5!9QC4ejdBIqfZbaVy&=~p;)8%Q0&`VpC zl?lJYRk4<+&%48vSyaQUEe`>t$+jSN=<5O@3-U zrwt^w<3(4(doJ`{V=ec!=wCN`XjD5B8mASvuz*0YL0UFDVFtZ7(5|1uhfL)k(D|Kzo9)(dynA7~9L!t*m z9FgIcYgWyV3saP585jR&8{N5ipKG7RzjFgXg#2hnh&k{88V(J*y z>QQv5;z5I$bWHldU05kQ!2Wv`68S3O8m?0YjC_xvMF^Ey5+6`3gcMsGk00UIhnpb* zPTYMpjWV4G{4T#NaZ$Mo7QcfMm_#Nz9>Y?PKRql8Sb>`%gOGj&hcrb@VEzRz;<@M_*gP?Ze;Fo5frKM3HCjFt9yN0Qv4VVyZx}$CFvn*fO~=W?>z}GjD7UG5D~4ONYiZfT(&hYt zf0&7R_{$G8_-4Eexy%gSR~=b7JJDm$y)F6%MJYqNFf-+;bLFOqpHV^u$1-_=>1#hJ zNNvAoQq(CQ6oR|;fEfJ4}aZe#cmG5-E ze~;gj1bAsP=dZ$*jW&;ywfl%#%1+Ym5aJ+LK-quB`upQ0w^hf~d@HD)qHAt*%1y_r z$H1EL2;6f1QqdD#mX2czK1sshN zau-QG0*ez^5<=oNektLTeb$U2bvm8AZ$w~@Qw@04K4joxN#f}Uz2b9}fUa0KIU9BjhrcxpcZeHsdVoi{U7h9n{Mmzb5rCb^*ZC{?EU{r0@*FPaYxfFwM@HTC}*<;AX#;T(@(&!%$MBqgB zEzJ3IOfhIMgYF99*jY-A5$xk3gg1aXx^5cF*M)7tqEtWZNnBDN`k{j&M72d+t-3**!6`{o;` z6U}24&+<8c8amFAa*)I)??_7*8k^m@r{V3HKCg)emqOEy)4vSY~ zVKaBK>Pu$cge_%|;zvYgUHG`oXgx2pe&9s8qJyubE(d{2zMBvm+rbep&0j#rXh3wa zyLM6KNXIU_s}>67&s19sS<=emF60I>S-#1uc($fXk;m; zv^(2Gnc_LRFIzOn3^$KDJ2GJBeh$yHv48rkwzk^l_gq`8XzS0+`ZQ@W=#8Yq!j{e{ z!eqdq@P%`MknEKG6fKi(693Bn`*;FG1Lm$-`lg-aTes~tlt* zSEmBJ1P%XpZnd2(cK0v`^>a8Mf z!n>sXD;H}L^S7}E`V!3C{4b@oKCkC3F>)Cq$N9?h6^gijmftS@9`cp8Q1Ry&J$oVm zk5`|d{a^{Eg4hhH_G9d{B`Q}cSD5`lB(N(1+9kwc%J2fs2Fx3$r)|pSvAaFKYO&_( z1U21O!?rn>v6*SlIxgJY*1h(~;PIFz(mXu3AZ;j^bia<+xpd2tK-ngXXp2^8z)a0A z1x~v?BV2Pu%f^cspZtgfu}Z+WgarAI!#lN`d0kX@rDga=4%!^DIGAt9Kr5&Mi_ssUq@;wo+QA$wvpt@?KM zU|JrGdl%TmG84`!IYj}%cp9{ zd50Bg9|HrP>}}%db+w*vHKdcND;?Z)PA);qEHmwbM2sT+vlsu+4R-quV27HJ9y``t z`_LnvBfHhot+g51l}QFJcfVA*BOgm~iXI>!Hd{i7^Eln#(pal!Fp%1*V)-xG@n}?> z?gbdW1}oJ~-6%=~p0;PTo!riQYr-Yl7U~l8p6gn`^$$9|45D!LMJwNrJW*s;ndt__ zAV*Ty=tsidlbnX>Y*fNlwnmN3%IW~adenuFy{g?q)k>f(LPOj$A4got?>;qQ5B%Y) z=370P(KybLN&_JJw#eWZwkIWl?6(%ZVn3)Det5Hj$TLlb6)*&IY3Wbf0Nl+@%^Lu< zcL$rB#DwtlE)n6R>fpeOv(q)y{mRI1dhFz5%>6CAoqexJR4<-o>%r&`@G)>;)E@uD zgybG3!uCzOfL2Di^jr}>Q8{}+O+*ZFQ!iY1WX^CS2t9vj7@DAZ_IX}u-u@Dq1(^F` z>DwEq^UtEl0ZFoRwCVeLmVbT;`@7rClMk*&x7d;og|(?jwUCq0JqU)_q_xhBFFX)J z<`jzTiXaD{>f79V<*(9n+5sk-bzdF6A~2~?VDqu*^EitQYLJkI{KaKk0KrnoKv8ED zlyOi-1{}2~qOgbjJ4gib>P#)Adk;8Hp5d&iuLlq+u=5EUW;%|Wo#c>1EXAkG-tTq} zO)&HOzku4~)@`xA0U1}v(K1{x|doliVT2hQl!Nh=~a9RA`m zEj8Q4ZB<5TO#Ps0j;jxPoO1$UVbht56=h@W^;bgWjtRbbqsO>hrpV8wQ4i#mBby=E z0<)xAaQU{UKXGR)z1|#4Fh$;9Y=sG#d8nM;dgkB&RKL~RSeQTHTJ`4WmKJcLsSqoD zeZnoA>I~=B%<3QDTY%= zb1k!kGd6s8Y&AoabeE(S28l&39zVFF#9_R84z97|8f}Z{q=2l3yYnFs#N`JbU+ww+ zd8G#YIWiBMj0c@ z5^W*mVCtsXz1Wr%X*<*mnAte+?D(F#+4j#dGA6fbxq(l)`gavx!K69kuNvh?6MOig zb$+XUjQ*rMoBq6<*Fd!JO&)gz1Z*xf{O#1^hxvIEQGBJjzXt~7Agh07*V7ia>KD&O zl_%RZ1}c{0PgMN1ZquI=$=*I%07q3gh22Zrbd0YoQkygMm^1N_v3po`@pp8mdpev` zzQ;jhmn{f#F5WNz9X3Ueq92t9xz!uAl^4(w9;s>}@R5v=zntf9lplWj{bGIVVP&cO zMT3=g0^v-#N2I?#t2iPj$!V4*ms6d4+=Nu02mWS+1ZF$r80-Q`Z))~|40LOUr!RpF zAQS_i2}D=r1#aXA#M`my*ciOi#Tt&$ze^r}UptkPe86!36#b+#-}Yf}=*|Z71TA^> zk|F$q^kVzI#b6fxX$wL~_FPgy1~u>ej!t6zSW#M=nfIDmCA=*SC$ktWii8e8RpVHrT_Jefe5b$wos?hE z^jJ-<`tfMEXQU#)U&L;dLtg*XS7e0qs44$oC>}}P5urmMOMQU3Opift0%=_hS@}ns zVnSKsB%x#3a=tbY3talR zY=*HiLw>%%AbTu{-ngT_V3RD=xM=M@E@Oa`7mST`j^i+Q!;9m1Cji* z&28{oxyAr;HEA+L}jnM{Q(|KhJ23$ZpG^g7xOyaJ2F=!rDFm23rq`9$=eHrn-aaiw2f z6`+y`81v3fU6KtTYsZd^CwE4CdkCR|A-BIJ91`agXvvMfyb60edA7|({?i*0w%HuPL*Cx@C~WCR;YHdcN7 z+bs|XW&r$mszBaP9j!Ons}4}hPW(?cuA^Lfb*(gHf0_kW?2*R)tXJUCS!3Z10^Iw` zc|)hebt_m~R@V#dbBlpRheIB}hPhqmV+Vs@#khch2I(VMh=!f!IU#U4IAM3ZkVbTv znS`l9k&z>Y4rga`S&czQZN@K%!e~=X`wq^NJtY1Ms10?0MdXr*O#s>*2_70lK>}K< zFrgRJSf#Z<=-=^I!jTnG9S|hmZ&H}*%o1OC!B~2Oef)-tUK56|tiq3+v`?}GIaN75 z_IluB9a(JovZUsDTkBwE(ttoUSqWxI$ zIJvnxeE3pJ3fa?&=DvT7C}*Ek7aq%337|>}Y_3V0q2*t=`|m02PV4z1B0hrLm@Tj> z>vRxh;7TwyF|T+^w(u~G*VoO@c+6mY$@ocm>}y%Ouv`-QdXjuP`Z<7*j9}cFto7@P ze@`1+sLW1*vX~~6k6F+O(Bx;I>t1+KNrNkF-nB1)^`9BBLZ$kjrWqx6(@d7Ekg8J1 zt_+d$*D87YzY_EKEU2e_q?bF~^(TjhH_Os}+6s{`7VGj;7(vmx;^0La0rvBHQ@-Uk zIIBSe(N=I@uUvT%sx9!}klsmPQ)tjQx*Wt(YM7DzUliDhs9^Vm@7S{CUV+dtodaM< z|8oB&&7XLSMChT_af!d7P^XT8a)(u0`_Vf zxYo&x!zUEq599r(+BHqHUUr615BPjk0 zskQa`j2cHWljbMRoFu0KKia96Nt=f`Y2$RAp;V@abC)%tov3QGW)d(5=A3x0Mk&=5 zXi+a&*RiNL=17#X9z6mt9C zEfJ*K^hY5k#o4{czi*-xPXqC}=!VYd2HQ|oSIgElOi8Ul8~!U6tbCknsD^$_9;F_R z)EPj6=NiWq%#0xk7p(ZyMFw%T0Dxi82e8|k?I*_AFX-W9${rr=cN#J2sWcLJF>jPI z9*>3E>4hce!Cy+w%|i)#NR$A(?Wi^v&`?a}EDK_5&x>YrBe|K)_0_BbkA`)@U#Fu4 z#?xE1()1eUY45Ywn7B-FL!2e?jg;^aUnCEZFdUnmE<_8h^vw1g{My!>gWur%Lm+k#{IjajlUz(m%WBQvMO^go%@*vXea zKJ;05y{u`nCOTEfovhX+(GTWeN4RFb*SE3o)aQ;?)CcQbaO_c}QHE9^F3WHE=L=;9#CV%$MnWl63nUCI#C*)p@gvnv;FabUn%+}3eFZC7_ z(g=Fa7g$6+5U0Zyaz|81X2%=Q5pe%CzpB3!?nR9;=pcU*VYKQ7&d^^cwkYRz<5-XK zAenPKAclzS?9N<&3`x>kC?25iH4&{(NfO+oQFH33U~G)*Sd42r6(11mJRk^GrgN+b zl-urOdadv+zVVHdS&p{q`CK7xlimlGrj0iaB(4GynEipESR^M;)9@iRrF5;FMe;$^ zNx&uySS;Z*1F?38NBMGPbyuF_pTUV)M^O>K)wp~=NI6p5Sji?SG!^P)Y>)G3XYSZ4 zWovgB`I%=dz>9f-)m#7PhmKE90w5s za@d%w4_O4s_s<=ToV0}1zhfXq$W$s-gU;6ug zV=XLwC#EV4fS8Q43v2$+zK*Fc41(9?1f@4ScjbjT=wh0zL(6ev%?{_T7oq>2UT5My z;}seWFmd?ske=El>-bOk$bCj}|6$ELSC^`uVSWK0i#IqZ!@?V$jru;Za`Bcb5~JAQ z6?h+4ZF{E4Bp^;fxOc7?E2?_M^CCfUax}v0M!rIemiRK5YYrzMkcEuQ7}nkHisrr= z4-;YzBCQHTA$I@#M3*cQpu|b#h*M)2;cOF`H(Ahhy^BS>8suqkp-+-mWKCMQenKRnml*9`j zXn1JoBMdLGV7sGu80!8bcR%x94_ZpTm`T_ZSV5EJdYP(2^P#x2X832$zfUqB^X35fdq=;NSu@X~gkbl?Pl^S{9<9I9Aeke>Ea!tIf_(xgPR>RVde?ztug3 zT^JAAqJ&gl4BDYv^VU0rGiz8e9;L3ad(OYW{GWbtMCtOk371(~pPl0ZV#d-~XZ^%; zH+T4!BF}E1t;&boqiazo<^lxgv&eRjO^K!Zz~|o$DKmTzmVVcAPllV1UG@}ytl|}h zBrT59bd~fB%J52*-o*7woF{S-R=t*@8#0{s*o_SqEv|Ep2DYdyYvsGA2G?*Zl-Ej< z@m*|k0pO?Oe0sQ(d>^PaD_+|4#qbO!dI>BAyHuR%7U=o~4h2}lA2X(S3BPy~eMm{c z9U4OTrhUmv9e3a&;Y8p5FOEOgiFkfFxNc^w*j zIW&*fE@ZNE%@SU@{ZL+l*g{@U&HB5(7H9vv>N6+A$0YNX(t_L zfS=@XVhPPkzkB2Xbbr`5b9VQG`i7?t1FJc`^yB%44kZ z2HQ+xeXb2Cu0D2CN&*Vn!i6lOxXUS4NEJ2zDF8M97c((l;U?^13!9B^aap-B82k~~ z*?>tRWb`a6QB{=?rF}B@*izoN-P$i@$z^$Wc=CgeAUNa|q{`cA(z> zmpj4hn{Hxc=4R}Wu~A|1{*Y*I>R5za>D#0vM%k-nQSM?V!h>1BNJyDbI{y{T=a9Hv z8jkN>s|p}_1?fauJXY>G2tVYiqGu-v=>~FK*L$-iTy3NLgT8Kg{A#Rhw+)U~Sh#4A z)}k>nMSEps0XI#gSn9dX)<1)YK8K}pJvKwu4DiVm`if9na2)VW)LZD*^TYmb-65Fk z5g}TKc0UVf@+ini7yW%WxKdqECc_>IK3fXCWK*P5Z%X7K>~(26Hi{z1@y) z*)_qgDKOd&8T;yTD_vc8toX87{9RU<65TIVGS~H?36kga+y%>nDxv$6D>Pqc3OQUSPHkhMK)$ zOVZCf9vuk#;=*LuZumfPp;fJrM)X0_=97Xkp3-=rPc(|Y6koQ;ss8MA z!f98P6b0kgAcz%+9&J)-DAT1iUq9zK8M0eEnfxh)OhWS$@mXjyL-`8H?kP&m!>#Ao zEwN8lmq^+et)2UQqbgN5P#L(ahHyRCQV2(y_ne4=6Z8`In_F+-hc&3}paiJE24T0E zsWK^pOq04@B&K$?I808EH;{S&hg$CP_na%2ML-$zdG>ME5D4!6oTW@k*gX*tyl~`0R_1Hh!%j;> z9xeYrbPU5*VWd1CmX>SakTQiYH++r@2(Z&@p4(PC)WFW7a>T@Q_w|$fiZy#blmw2M zP8InQKxBEsWS4GLlSll3m-lMZH6Zc7jD7x3Ov4iVsl3a>K^@=<48bg<3Pa)HeUBvo zMmYyW|4G(p8&TJTvCrmvcrl6%uT$zD%Pk^7y=mFbtRA%umnk1`aYsPAy&%x-ue((U@`+jnQJoQo>1N zZfiNJbn)!dm-=V7e!@xA+ZTLu@UGoy$EI2+(E~Fm5;8TpE#JX|q=6Kl^3z>Hc&m#@ z7Ixx_TfcuV<}Ned_|+!H*pfg)7$;%@;X{EVcD=f|%ik9LYP>;cnDKODvl_ACY}x@I zdz7iPN<7HNkjEg%0un`6_#DP0r9~2CdFE`0epMpRyw62NQ68e zNE;ayUz^x;wgaB;^C7j-bpxVXy3_^70QhP(EaX+(aCNz<<)ds84t`pf($(bHt+Wnr zDSAMRMO{Qr7Y*p%G9>^kU2v^Ny%Vs6nA?YI8Y z@Lg7-aVI;^y_7@SX&bRHk|$IC=-)~l__OMhG#HJxq6rMM=a^<|LYDh6jh9Co6a}-1 zet-TG#gO}dQ4E)6)Zj{n-?~3>rsN;?Yz;g5Ky&uDa1Swze1&4zTKqh3RG1O(64fXtXI$MYsuclMm zi1sfP^czLk450+nho!c~VjJ1+$6(HY9krD6`FvI$-+vKrvZXHM*_G5?)!bz(rI(^DQHr4&-3m zlXL<3!wn9P;^GXJc-<k$w4o2C`T6C+X)h)xT91%V; zjNX^J@wf9FJ|&RN#bY`(`DnR2r?9>o<8 zZ4bTn^mEd#xkNPqS`K>06ynVCnRV6~-gzGPqw{r+B*?IlBT^~$omV=g3BO$u73{$O ziPqd}Xj{5ihGTujNMNST*7cZu<~G4nLbvBB)-{KIu%J~72nMMwXr0=tquyH?J~3Wg zmzLvz72pDjQ8~1&C;zc@v1gKEQlvlvgzRpn&y1+*%yA!&p92$#IAqir)X1K-IN8j* zuvf5t<-&Yk@4VNzX4&mZh<8YjRKiHjB~1b--1_z=(gNBj0Ucx~AL5#AhrVKY8$7*e%XW}ye8|kNqq#Uag6nQ5p*iMmUGKvvELX5d0Bl6 z>euHn~);F`Cl|bF246QmcykI3#;5xk=p%eNaYQAm3UkE1tNA?$f z`~&1NxAz=ViB(vCEi4Z&7x=X; zP+fymRRG%>KA8fs9Gk-9Jqbq~D~U#&SA@Hcp1blwkZX)}EFQ75%AZbRX1o zB!W*)iJ7B|0+709<`qsFRzHz1>9~j0n`H{6!xfwtd?OoKlq#aK`xlz-jPzEgN}N?cw^`;FVjq9j;L2A@JUe zJgiHfuba;Hl~3SmnTgwATe8ijsN^+i1a9j@IXnST6{T~N%%Ap(btjWr(`i|s4j>sf zSdbZEE89O+rQ%x#wAL@KDgU})d|gOSxXM(`Etxe12;wSdC>3cue6jeNc_;KH$}HB_ zu6EptkccE#<`lTO=!X;LE*8 z9`cmcjHl=Fgu|IkC1=8`v;b}~y>WR5()4>Vii|Xd?>QElRt>032gW`Iy{fXzT zHNorNhKEqVbEbi_OeDl1%0RMg5DG)J?_XuBjSp+kUonLWw-ya@+^+_@y|3ym1CdEc z;e<1ql)|2Lmjc={iMJ(>q~@8b(*ilM1cl5B=Ja4EpM?+2l+)dE`#Vh6i=E0#@koRz?{EzjK zRwwmh0J$W>7~zK%Pt7~8F+*P|u+yx=J#Pa)4g2Jn=kCN%J6wmdAToR1cqP@pY3etx z1tSxjA%XR42pg$1;BfK_@!V0^(O3`nPG9I230183y~Vlv_4WKv6;Pxl=tO1bQDSMF zI1ZZ4OLaoaOz&!_K=oj7##K)#UGsjwpw<3AeVqTtdf1t>6^>^;z#7?(#~zN|5~l>f z1sPu5P^+CbwxMZ-F-T@d`tC)e;CM_(deNpt@i8p+j!ifGePsp#dy|}>Jl5oeR82~0t5VUh4?=kA+Se`NCD6*GP~*r-H6Q8%HIv;zJ_{0cepi8?J= zrb|LQJJlL@Q&oNT&=HGsjIdS>X%Drde@$!~+gF7KapOSdSqy%ahXPk5Evs>y&pr&S z$8}{l7t-Z4Hl6p3DsjwbXd6d|K>}>>BtrsdCES$nrZ8WNc}ksorQC}O&1env3}R6b zRL<(C-CSo6a?p0^A3z*5561%r-T*X!NU8w?F9TsHaI|7LNytpkXVAD6fX=gw14#$d zXWC@NWC#8la>3>t4>kR6^N4JCNJWE9F(M+Uf!SAvy)~nN-N}n|d!^zB<w(%$4930T)S%h%&vKx#0ksW`xdrMXvO0 zVNXqg<2V1u>ftf%PE7eTq`gxd;J+d+mM{6fq53|0Kp|^6DrHM_`5m}wq++AUC{GJe zaL=(LF3RrPT`1#A*a)FdVae=dZ&jsm0ZgH%*`G}>>#{U%y|>9 zh}yQ5v<-_mns^^J4wu0rZ{BMGmPF1@+8-41o_< zR$LbBS5|`l>`mbNf{KczC^4>`0uP2FYt(CAxm>zTdb$&@M_SLRW78dimtc#Wi}tbihfw)bT@1P$Gb zf2n`B%#pw+sEAWyZ{a+|uH^YAqlO@*ci0H~jrzJ$e+9zRzzlF8x@V%QY*}24g-V}F z0GbzB*t13K{{5@5noT>Dh#CdYJ>dZ0bc5&bkdM5R!a2oE^7;BIJ(KSV?{H{0(c!D{ zqG2IX9i7xYv2HK%j>6x$e^N_~9|G<;w+RxS#-RmQagQq={(bX+eAMcJ4_fLb13&4X zMe5=M%W3yQdaj(m{qEmxIFxDtx^Cp}(|L#0hqr^j|NN1Ko1t~xg({{wUof#yoF>0h zJNRgZal0rsu=k$MC*lBG2V{3F319LaPg>L0Sy#{gzRmG`ztx)jma_V@ELlTx9KG00 zg4u}OD!N4vcI#}XaInKcLn<$=6y#HViT#9k3UDEOh+-b*ZL)-$S$2@?-Q<@6hNf)H+UZmwyU6Ud&5BHgBVplS3lNQ|izG0TBkZkI? z>*MvZ#H~7Cbi3JlphvM-8{UBp#i~@^Am(3@iHrX;n*^9Vg=OK?CuvU6kwaO|K1FqXue8^SBQ1KyT3)4 z?xY4X#>O(|Z$F9GJ7v_p15#B|Mg~f#KZaHs(Jkv$x@}s%;(e$x%Q;a$7UD7V)`MQV zhyV&3r0=|6)Dj{lub!7y)*b7rq7cW&`S72NGb;_mvcBkjo5zW9=Y|QK!pERpHP`X{AQ0@8 z{FUjReVRh`pDzf0pZoKQ^m8<3vpH-9lm}csTkQJQMy^G4h$Ip_i8DIzL8Ncz{1A~{ zQe(RL!vToI4L423Lx}gluZXTX&BSqcHHF~=#Z*a$nY5SV(z`yGdnEs9 z$=Bn6Jw)4MS?FQA&J_IEv%;li0{n>UltWM_Kr#eR>LZBLi~_`>>XI|x?!C0~fz7!y z-Ch1g?L)brHHHY8^N!6Ph{~A_`Ga5Z(mQ~a=hEoil#CV82!6-|-k}Q0=O(h>N3O_8 z_Q`)L1)qe?*#};m-;sv6T=WKPXZHh)l~OfTrkKXkN+1ZCK~NYWvMZT>dCOB5M}edZ59wS)gR?XHakZK7yWNk0i6}$h!d4|wMM=v z^`_mHf*u#-;bH*nIHoBBcyUKcP!g51vuHuq!j}94nt$75NNKX(`eZfmm>QV3x!4|i zeCV=#1_jI5CjGw7z*${7=4a@&Z<^R$_qT zna;LM*a$4wp)djk8mB9htqDl?hbl|om@ldyyI979bj0tDnPq$tZ=_!;jBPIQE%kcbd5|g$3wSB*~HJt9sgrzwgRFe+?cfLCI^7Q=Qf( z_a>;KoEtBZ;LAR33KtI2JU=Hu!Er!a87AT2qhs-mlkC}rr%sh8EIPSEU;51{0?|v@ zpQ6MHwY(~fVRod45mrw}$Dpgsv$OH4;*H}VG2dK7eNk@sYY*Za_Efz9e|_4I@i$!XB(N4CvR zeYC|Ia!u&svaJa#g?YoF?BtPzUi$tOu%a^mfSobmF3V*XmZ#N*dPMrP>WQ6$qAPkU z6(Ft*P-LT>?{~^^oxyak((@Gx(8@PJ2BE)$0_c%ww(pkL*@K^N)x881GaLlDt1=(R z2l`Xu{$3RT)%=fIJCAOep*vVIx=?$E5_Po?PRSLhP2GlZR=7Bj+Sx|;#1>2c)0oF= zgLbrDoip5}Mxweqk%f*eeZEUmr5)07Jc+t!F7`lX^bYTaU|v!}{i?d=$v-_Ny@>6J z3O-^~^pPYl>krcwl1Gb(_fhjup^?wHds}jo*Ezd9 zWca&2%nfJm|J@30s#pE!G6s4FDg376pLr6x$I=RhFhU<*2t|Zf8EvO~`#kB@!A9+) ze~WUgAX8A1;!IU<6iqNTa7!k;7|@cD20J|6sW|N%iwi4R_N2B<4l6u1x~A8++{(gQ zylS-;0p4ki`}oZ;e;7ctxE=-3;Ak>>qm9H3XB;jU-h9je88+#lns!Ytf}}Y0W*JB* z6o|)8sEjtGNRzMTC{>JnA8Z=Jor{eOqBlKb?)yL)zd&*rG4*;i=0Uh>pEUUb)Lfs@aDHBfM5dq~txfA7-sNi{y(vTZLNp z)57;h%oYJ%=oYyg+YIrEAWrfUZVm<;(G~Ze>E^G3M3&zL5Jdaq=HS|HEirv-(;sOZ zbj@m~BJg{EZ6zR59omx&NF%uoG#A|RaZkHCR0R~!8P7J+g@f~ zacY)ysh9Hk;%pA=g8@kB__7gTPv+_C6HJ4wZ!+58bH%31<}-cuUNuY{$dEQHkAZVULM%vxi=A!QQT!;HqH&MobhI6{(%=zB-Q_oRAdI{nOD_@_KBO|F^U|S#Z1^@O;8?8fU-i!==C0|L!&eTch=}0{pDx> zfBe;*{J8H}n}g~Am2=`CfuF0`oT`0v zGg8X&XkvbhCnqS#e1D-wa#5p_(l#)O0R$ZM7%y0f>UGB((Q^Wj#d0HB+&)KHcB^d zoGvw?ai&YS;&wRs&?%ROu53C!ffXHy-|*%4gSB!Qcb|qz!2m_`nI)gp$c$`gfBD!7 zQ$Fyyh@DenAZ91@Wqf;~G#zH77Yun>>Xup1>V|~}D z3+rtNn0d#DzN=C@&%CCO-dRVB=te7rsmv#%41d;Rv^OS|E`4L9XLb|mlg+8R-rx&= zEn5^X;5x59ZQU-}LEA9?#yJY{8^jGY&+JNH%1AToa|&X*NLRaI6F&CnrPVvqp*j+; ziPUc7Um$q6lErXSJ`|YL+bj0e{x06s*@*$G7MY-#T%?KVL5d-%AJQ%wuv-9cO(~?X ze`uiTqXv{wFfPH4B(S$0Vz9Q)-8zVdO(G`Js*KTrl?)d;=N?1DE-rEF*Sw>H+RJFl^JM2SUie!wOBuN8Eb*1ntQ<7uY=nusz zLphQn3KWG{l5nzZl{IRxFy+9989Dy07+fhF*g zb{LOl5W~1$gjS|@Jf)5xJ z5A8duhNvU(z!yk|O-?XrkrKH1U+9=r>ytDWfiYVoSJ=5>B|L9q_fGCFv?rjl)u)uj zys0d@GJQ`n$DQ#}3XD(l`>Jjrm~iE+$W=wd#9uY?sE`MCN#+>f!{1A66)YV18!K(q zjZ_$dsZu1MJJ=;d;H8g#W80Y;vC{b2JU`wVsChgU9?{|0X3h5SNK&|i(tGpUOvT#) zp0?4{nB!CP0=@Wz=v48JkZa-^jszkW8(0OVSwv(U$^t2i-cNLm~?2jT@E z7#V6&#+FTG7+z$0_oiY7Lk5*^VkrW}r*d~+i*fD|A!aWqq5EX)_6t61=!MHl0%28TAH+*MqmjiIp}$NYwTa{p81*kUS^-{Gk&Jtz(N+ z)V0zNzb$h+VaT%PT?FURo8=4GLGc-ap@zK%=7;*|=sR8#AWrN<9#RhXy47b#XnFQ< zaABMgVZp3W31oalM9L15Wd7?v`9I+VL^UEgcm*3qRv13+)xdW1ZmM;2lbC?2rr`%j zR-6M>8n-q*@09hvFjfboMZ@({BeK-({@b6VFAxIo+x*q}m>2(^+_nH5L3T;3BiDJ6 znrRKD7v!2{r#}jf0cN;NA_KnDV`W)dFNDCM{z(K6e)!=V{1W|}VfU+=hQkAb0PjI} zblXHgzC?jgJzF~f!-5`P@u!v_GQJ&|~BB<1aFDC=yVxKs@Oe@vYN zcVN-BtYh1@JGN~b9ox2T+qP}nw(X>2>*b#N#vSK3tX(z7T62F@^lEG(y(VIYk)V)^ z4i5aXAP|_xy>GaDDkN^PGiYY7FSu0Bajr{loNP44RIA5kS1^B^FQMYc4AmS^rb-0t(}zhd+Y(D!oBO@o&uWmqj5s(P?rfQ#JAag zQ%3HFDHlQ6f^dN`wBgAh!&_i}HbH4s}MtnM8_)b!`QxRg%?rGpP*sA~yiVd%Qe3#g>U{4AD*_!}pUh zAA(YAArcw9m^mdho}t-id$9mOvTmg8;Qi+?Eqc8*8Mi549fi^YsuP6#@$EKm>I{av zb+{^_q`Rw|hyA+M6g3IvBh!{0l-wFvc2>#);cWOv44Aq{MUEIYbOfFBRr=hNQnq+h zoLicH6&7WzD)av>d|=p@|p%v<;`jKm)kN!kM{hadQY8ksd6XWSySc zxwQF|#XpX|@QZNop}BwaBjJH@w>y<0G#UUGatTUzAf@FAdlMYKKdC!nlp2sm4#>4s zyB%bvuq}+JLv-LZFIkZ`mn91B>LnQ3(EVrTjpV9o`COfQP>J9D6129d+^H9p&&js; z>gnYK=p*;WKGr=HWs03O2y}DvVn)_LM@YR_i?W~N#X5WI(JIi!CIh5X&04sL?7z5}Js9u)aGzocJ{vi&d}#L zmC5^SL3RtDHM|J0rLFw(gdfCWlF)plhk2nXV$WVx7xsme1nT*P-GePY089)nBji3q zNs{sr?d}?lC&-*{=ToBAe&`B%4Pf0?+{>O=C6{>KH>5x7MtLq-_L~HAbX)Bm)M*vJK~1&LwJLK$2Q1q z+OcjOx(jyt!4i&eF?wf_o$DT5#}c{`1uV&f;hloO1itb*%xz0OQDul2VSvy;6~hNb zD2_cP@hmV&X2>O)GOTfSz{7wQ;s&i;zYO6$NsDx$g|0Db!0*AdtW|#si@#J2f)a#F z_1u^$qibUUePY#sTu#;&VPyGTS2{^^v}7jv+bEei0!-M?Lr(wV0>TVZ=kKN5h*h<{p zhn&)^{ft4TgE}+)?%K7fe+>NSuZB{mJ;ijQ+tyB(n6;2lscuakf8z9-CU?tMV?P-1 zoAUKjdS|klt$PaBhoNznta)m}#6r~RiD)BmGzG%4=m@T}X}fKlT*X#qEba_ zfwJ_fu2H9HHP=}n+J{96r4>DXtTQGI12AWju8g*9l!47^!n<3e%xd)Fy!RvTH`XyL7fniy0jKb?@)TIStsjZIEa?PmcN5l zPZPz$9wa&S%Zf5YKht6!;n^>gHXxqa$|?a$h)1tdM4B?g;KC7;XshtlmH=aF0ObKj zQ)Cde1#C$FJ_|K1YzVnhBx^Z#23!Grz>nMsURw;J1nAMmp)Yt;?>AJW>$yCt>dHKU$+Q zb(&Acm!L{Oy20X2P}>q6XBqKb{#&nc@tynHDKP;IFH$08ZoH@<6OH}<~&lYnR>W3*(HOB4e0x| zi1YseRMpI+9lWZ<7<1xpFSxb32{pdC%Nh!bp-H*&weDcn=(#Kv`jEQ{%nWPMkQh38 zY)Jh;FoG`fANZ^cDqI9tUJbJBw0B>Wc_#XA`Hoezjv54#KpZ%vRb15-(fj1_wdWZ` zE#3S@=TJ7O+VsLqV4tj1Mcl>_zRKlelq@eGS`i!A^s)TO7L40@L=cNSUB$;FIhs{z z6sfIkP6dDDda;X#2FWx@kr zo=?U1cOI@Q#Cv=z|SaoPo&+bl%z_3Y=>XqY3Ntv+rYsEla@iG zUiKG&<4J+Uw(?~PX5v|Zc)WFmGd6OXEe}pPFheOBHg04-BLr)x9^#kWh6J5?*sNxO zinUqhQ&lSh@m3zZDzvEyzg8cstA&BJcCf30x#R^5I`D@iFD~^n7y>&8qN+h6e8!(42{|t`ixn);d%%9{2gv> zq*`0>4)+ity6HlpMCWV%%6g0!5_hv1o`>8Qo6FHcQ872ScqR06xW5ZiwPtXlGn5nQNc4;1 zBcn=Y#a*_r>*6J@)*aAWnTNKpJX5aAVV%k0iJ9Ol>lYk***Be26)?-Dv7u)wL|+-2 zWa?-;;LWNnK1GiXBS0;p@tK9XKqAu6r3!JMQT9wx}n zEUt^#(HmrF{i#M=t6|jFXC#@lQ+*~@vFBX!_iz6-j?Vw^p;spA;Uuiv-~ind1{A7% zX7ks}ZY_Vuq-`j9tKs=WYOn=Ec9}NmY+L&z{;qt+I`12+7{Wl5Sbb*&ok?fKTb@vy zNMJ12-Xd@=f_2+M-IH!zW^-D9xR+f2V++lS6w}bHYddd01t6tLWA>Fbm^)z!S`+i9 z<=|Y*zvu?RkpzrzVSmRPF+T~=#MRYoh_1S~pGY$s<#hG+O6^*Il~mKUQrDnmE0Epk z&bLM`#yhMcQKj4awF0jHB0_yPlu5)WB9WVsBqC4SR}HQFe0*B3_@fAQp7&pI$HPeeA&*OJZw5b8Kn%NEOYJIzG)5o+U!FOEbPDZmr0 z91?1I0O&wA_F*C#Cu_q74!;e`(gLA7HGM>E&nMF@T)Mq%@?-3`9UDi9aEo6%n(Sl; zWS<^ai`u4SC_ZYFGV2m};hubg%NGkXbS7IB7Xx0$DcG481e0n5=f7>Hn?{3LKbASR zddtx_DwjDkCgTq9uDs67k$4qq6w4<^>Ey-H&rJ)8A9@nCS%MdPtKL#L4n?&EjdR9m ztX|$^y8e~I5YMWunHM_Tj)fdm=P9aQwF#ktnP{ULx-H&Ax460P?Eudyg}+F4GToFI z`ivoz58i%L$^ttm6WUr4lJL-TYjNru2qHs5)=!a{Qp6IyCw7DVAg6j;S>=m{+9zs_ z%<9-%tatqu6XF+8E++>qHkg$Mw8aZ^aJDyYve5?PkAorVvrLA;^EqR=$vktSb;$L= zkS6Z=D_>%To2Sk~(2{=>78Z=XF(HWqIEc3^zx7hB0%A5%U%x<)pa!Lvl%D9wisd)3 zZ(`@T)ctS3Oz++oD{pJ6d?PyUmq>1;>#KHQ> zA9*MP?)lk!3#gLJV)-5JgN&8DIv%zFvfJ+(lyZn^|0`hsh7U8FYDxZ@W?>Y@4r7r5fLd83TqKCx}V!Vga>q46N2F zfl@waatc*zL=3wtmxi>jlSFA3ZCz$Zf!0;peqFsu026N!RrJdARHe$aCdCWo_Vg}{ z+9a4?_5ac$d=|dd&?eg=nCzZm8%=U)UBoTR_zs@6<{2~8`|?qrXv%GAeIMq4>$wu? zuXRh$$%Ftva(50^mcGALg2*DqHM)6;iM3JuP75%!L^a>*r$Ukva0io=MPFzx0Se+{PQkGMP(W6ag9sury@N~h5% zP;4gbJ-IdiqE;g zB=hVPBq^*B_q33VLaa4~P7B^a`YIleB%pka{{?HN2X5Np@${9-5UaSF9q=arjPQeF=}Nikc1& z3=_kr6-CBi#83btbXVV~L7lBxh7R_wdWtn19Vy>bOb3qK7iej=eVKB)?_WmSJ9+5Q ztwBU+ARA4}ALnw8;Ry24wo#7u$PCdgabc#}JXhvwHDb=8hDYBxP(t=dx4G%|iQ<=EL zpQsUxphS@*;;9y2zHs7_$pDgEq)o#v7K3~Lu!O#QR4j*c1T|Z<7_yY30YyO7evcT} z0y7vu{>VbgiqJd}23j9?%}zOc3c)$GN+B-2tjHb}tp6-t835?~LEwKE?X<^1*t#+u zGpDFD(%TCIyaBXpS3&l7kEgzjx~*gfz28R`%gX1HO7OrX;@R5whdI{cEo{O3IrC1k zY@VzER$q?8MpNV9hed!)EO_yVwT{}OZz;cLbtVm~R9i?k0sYPxgx@x`l>LF(Q@a4f znHa7f$$Vp`N+M{#Hes#uJtCpE2c#pRWcKcGwIY%u;Y!&NHg6@L8JRmWe_v_nUd>`+ zcM?)+{r3>Qj^ow}Z=-Eul~0R8!`@tKU5u>~E4a2`qA!s!z`Aa@S614HJXBPK7;XQE zB;*Go@n(Fq=&?;p1C@DMqqTWZ|NIDS)Y*SZmoF@VeunrK7DqpShtN9%NSi?XM7RDoJ2%n?;i?ljLGpu|fNh>jrat?S?X=|+<#C6Kr zxSd|WgpFDuTCVn-#6nAOBMzr%d=5pRl`6BNRTe_N%U`r4jX+KDr zY?89umH;Q-oa#h0JxPQ9A9ZW|7y0RT84Oyu#F0LEfpgd2(_o#j3JgnDCYTT6pJj^* zIuPEilTGt=$rz_JWq6t#$+-;6D$K;lgU8;1rmn+z?A-8x)E-m}FaE&^4(&mUhW3oK zu~a3_x5(h7x8QFCNe6kH#DN`cg&MczIM1g1J;kdzUPHAFH*buR)P|t4UgB0)rFC#z z>y`mgDk)u zh8kt5hpO`lvvuS2w@uUL z1pYsqqS4KUxuojg%?n$-m3f3@>L_0v-iR?=0x2=`&8rK*3{P@pP6OpEN}nHXl>9Nk zawWGb<#XqCiR$qSm;e0Mg{9{%i+eg@hp$racMhgc)Z2B_L#SOTXk$8`6&xSpz|+{f z#pPq)W=70$czvmp7XCoQ@=%{v^RN;?M9CD`2wQ4dHq_dfWB*FN_Ly2!eF zvSPM9M39fFpLM(OAX4LvN41yXaV$<4A1R;{-u0M|9L zzs19Ua>2{pwqL}KBiQdbFD2@|FNLxfd7+?0n&5HZI=koEDYx)>Z#R>sYYql~T>$LA zh*0gY)O}rUXO8ZLWQ_b=72j@62}9HU^adu@&JCf!^X}k+S5pK@KfaOr9mEU&HP+$u zyn^RpJ9u+1fkJ1RlD?W%c~DT{5elzOt0m9jSRe@JL{XuMe{v0Ph+hz%H7iII0`8se zoHRM=Y?>K>NBG9JCN7GtB`5Oi3+xfLiTfj`aw+N#N;Q1#L6_)d^)L#XfGC@ZG;wG6 ze%P2|r&NB6aM?aIch}7WH831(oH&4nU1DbnaxGa5Di_uf6!^5t#|BPD;r$)ndi94~ z%i*9hl*=Ll+Yv>fFLq?;~#7HXvk@5wFE0O$cnEjzI5D^h@{DL<*fpVZk16Zmj|>AwEg zvY4^$8VXhpXMSL*3Bh(_evItdDd<(fM4wxrKte+HKo{@mHHj^LI~fd4UL&wT_(vS+5v9E*k04(k8%&RFw9M)9s%xx3p| zCeL=yti3U;uCVKOSe{44IX$vTow)Y-A}Tf0egGCYMO^N-&}>M)E(lcR_Ac;m|48_X zZ@-Tzx6RKvz7wE#@t;ma==)2j%d!``wRAbvY<6FUBYC>{39edy$_1lnY&0;A2e=GI zm_aVLj|IA>2z>HLwKZ)jj%Iot+o-JYF^f`4m3R{j3Ex)yCldW}>pEmbK8n?eL5d$r zu=%O@B)g-9hUuqE@}l_;W=WR1{7E~&&kD(op2az)gX0?>+*`ZzHIot3QeFkHP@rpH zjTpZ|Kraj0l38(sh_|_@_9a15BM{^uHx!~jd;CfKRMeg`M?vfFUn)wkCWSb$G%$7( z;TghNgMFQ~2}eG@H-X?J$1jIHFesvE&~r=gB*wsFGjeRbp`UNSWWJ8I0`lKU+($ z)cY+2fy&Z*jR0YiJtlxZ{}S4^vZi}UFF$}jRR_29B6x#`4bk@|+Uvv&0Heo$36pA6 zm_3JL3tQHqhaBZL>6ryy0kZ}Z$UbeHE+n6&|j8vA=Mwu)^W4Qy;P=x2 zL_tJ(6;QPt@0}BsXEQYMt{AN*e!|w>h4R(0F^^CT^imF7pAn-wtHlmu14-e;ikYp3 zdrW7rDo!+95{VMnyA5j|d%s!S{_9v_CNwsA*JmXKOY-uDJ)J*HxJ1>;dyjll7=VkN zg_XIHnmGndu+T)e6?Mh26MOhUXy#S%T#<5v??Ny38+@rLl$1xCe=8o2v2myuXhK!G zaG1*E^-%X1{LAhVMk(pIO%dr3$&t*OXP|TfzoOmtOmoP{>{Dh1u(||z?S!m@O=85A z@wNZfiqZn!lk}eo-`WD%S2;4tyw`fOmT~n^1q^85H&^aFK5RTEn|w#piTDP!e6PA{ zvJ^qs9Kb#Wd1{mX2v)1NMZ**(D#Hwq;kWmyXd_QLtz)Ow`J-Ua@eQd|OV;Z0EH=di3bec!g15dR=~mB#h5oBP&~4oKljUUpY{lHg$(zydxHdm4 zuRxG%Zx^c!;lR7Iru10+`MS;Hw>$6*6hL^m`LaC8YcZC18>%X@NT6iPR$9~jqwhS z;vTmiEl}dputs=OJm5URddYd==~qRX6|Ekw1MfHrQ02+&`WWosH&@!&crYTirvS4p zT|nhz_lrKXxv0!CAt(vq!>l_Pl(}-VG7Vpqz{ruPYen8;z4-Fjz{qs~8Q{o&_WSRV zq0GhBDALn_NJ7BR9j|iU7x=}ow!q}?nEy#GiOcFPuI{TrQ-%2bngaiS;j9axp)eN! zi<2OO{~Qi~E=H2yI=xpV$733ab^4YqwhlBy$dt!}`{BN{e)L8^3G^IY|8PcZ16HOyKZ~sa-`!dW@5>5>uu^1p-TDoJ9CXRZk`Wh ze0B&2#$DM3`5iU%&*d(YK5->9E2yMq$B^0I(ygoW&B?pnDpjB5U1Xs`M3rO6X;)12N2N6^-PMen0$QezF`H`WKG4gpWN^wu_1Ex|{d8QZYU>5&CEsvb_NBRl~` zp+Z%ClP7bH`kkSuf`}e{_UjB8oEJAxqxVmubL6?{FX~J9%9r z@Q$=f7RC^8RY25ukc|njj5Bmp0J%Y!TE=?(2KZY0oRafp6sM_56USS;ZIbyY%R4iG z?t{+Pjuly~$w!1UgYJ36xX2o5V{NJABwmkT{6|0-6q+()4f3mOq_}GhkzKh!%Z_T3 z9%Au*=-67xQ}F&qR1Wl&oH^W~WK6{T9Y_S}$atGq(12&w0 z1we=xlix&4LKj>=vlg1mCmz{xSWjw&_Mw{2!_sx^XGVKo_Uc*D!fMsTh}hls!;g$9 z+9u0D(>2U^nyS_^mg}ExgK^;EN!Q7<4w!aKpsNH|aciG1k3DBMi#tf*vdve_Z8k3C z--xmtGOlvD!=w-=J*n5xaV_u24#R8g(?QNdQfkz@)|;vE7|+i%XlMhr)e+rs^h)Ev z>M2UIXw$-ox?hsFTsBXeVnQXMN7wFWu*zch0|CyR*oGx>|E~-D(zz-5oery{*ooQU z5^~R4a58^oqqs$2!#JRJW(QT?@WRa#3t0HpSS9law|#2-tIVWpLK2E*b1|8%86qzE z0txI7!U2GEZ1j1Zx|kFsZ^tubbmR|)RfPDKvYVXs>r2YFjnEBHHIf9Zt{q}xvy1fT z>Qk^GSs^yBnpav-mZr>&Vqk#&0!}Kmf^k*tkw#SNvXeNo+z!l&Iv0sfjXhgXWAr7( z+Dp!j(b+sh0;vn(I~x`4=k@x0<&(l*zWR^*x7%*&TvfNjoprZ_`lv7!)G zKTmHm?hBI(af{k%Q5e~T>ejC;a5G5(D*%ALLhP7OF@>mA`5e=olr_vgpmvj-o<-n! zUU2b?2$eb!u<~So|CEus3m1b~8m{u1OwXo_xZn;PiaJ86dUdZ5Wz@7YhZ(7I0R<27 zWoZ%ElRi7t$`-pK^-F!@EE@sv3(b1R0J36?aiY1vY#>y{Htn175f8J<3VsP(As8ghvV4h#b-K{_}f5O19q7U4JT!abpni z?aU93oelDGu#jMZAsqE#eMdYY($M5ghz>2q+;2vph~ureF6e!nI+ks!pdzOOEU90` zf^A4u{4k1&+(w+OYYLx`i!;C2aMm&t6&B=C=N$Qf>&l*_w`dE)4wcArHH=;oI49G zf(q5(e*sod;?sP0)z;nT2_|rt{Du8>>G7TeE_BJ*VcPAVOe@tfm1}50){m`0C+-NO zO4{-a(ClJEq{N;SC;oZM`bk!-MY|m^x}ZuNG8 zuc%U}9t2n<=eM}1`)H**U9Z))^LukU>uLJOeuL&Z;L0ifOCi5%=24J^ z#Rk$bJU?Ta-Uy)jpsp(_3rRJfVOGuQByuDRRZWphos715qLoN82Rj?VK4+DrX!KH~MG{y}Oa-G3kFcVzxxKX|XbEDU`UcAJ;fJh`WCG9|1HOj% z3;XfB#S7`-S2LX$1EAkrznOZO15X#T#pX@^mGDgzTj z+CCdLnxtW`*n>Cn{8RD9=nkTreC)JPVSq4@WP(C|f9s+iMOV7h;z060-S&1no)OS|O~L>k!`Rt*T zMsx^Xn2KKyj%{>Gax-60R|KqDQCuS_Kgm` zcR+xlK3(V)w46|efKHcCZB{!JA*Z2(%V4W!bXU`zlk!n`)~~pCfN0Z}l6zU@yIu++ zQnK7G^cTC$_;$4`?*&rz*#C^^%VAN`Mp9R`e@CdJHRDRW%@*fnL+IWHpH_(&0 zw~(#9uNmD>+g}W#}+=BtHJRZc{+$UX$Go6mVcBX5UTDXttQk+^mrqQsYPOR_%BYyTyP z;Fxd_sQ7w0O~0PawM|UkUggmk4Nr{9w20+QW1}8oQk`5S{=eqEej4rvd&8-fVblh> z7|Gy1Yd_G6k5Ki3rzv6+Ba6>gkR;?s4`_6C_tjp$0IFH%-iXD6x1{4L0;AfRsp`QyL2_-iS2_eu zFY(4Grq1J2$_qyBH`toG*=7zo2`qbzy_V~~HPr6umHo0{?7jQyq!~v(q7PNI7IO$MlUK_I`sHJOz<{WZxsl(Z3-E~FSZs4%n6k35g5_V7+A+BCT(T&E@wZ|KJg3v%un~U50*$Zhr&_Ns zno-Q`_Dwfnd{P4_Nw|_pXkHQOVb__DM2XTD^`y4#5}Yth*6^Tm@yHhpR-CpRth1;U z14segDmGHLAb}gy#!XctL-$Mf?h!8kMQgG@3Cm48M}5gQC!lb*xa95w^(AqNNp}o2 z8@R{YMrXG=q8avK@m<7Jvl~o4+(-C&?*y^&X)wMaw5>G- z5etGx;~t*tc6c*vCGKn1u;*HOqBs9WTC*k~@$zhn@@_Y$>$Ni>O}5$U<011s|0~?C z|5O*J+)t$(8#xB$ba7Ekp^M=;7ThU4YxMLpc!HB-7=E!^Ws*!eG-}Z)!zS%4`Si<; z;<|ikz3>(0GBr8mz`6v3bQZSVj)v(y88Uh_F_^m5D=farpvvF6|A5&L7URJ?%M#eT zu_G0Le9-c3#*^GdLAz~jGsh$s@s>^&SuvZ}g%NYmQOwv@gdt*$8=936n5J*=<&tTm z?K_UGm|^l`D(p%O_ZfU2T)++oJdok6%J@fv&h`Du8|c3{YA=Z{eiu3>P-J|CFKVTg zjJ3eoBQ$(Bhs9OBH420^9=5KZv{Hmm*PE^3zskGKKj~ry>(lH4%kBgjvlxH=pe^89^<ZWOk{k4AsL^_KUiR2apAs8Sf|two_Q^%qwme6 z#gdro6`o(JmEQii^T2M>$zKV8R>8zY$8Ny_{$#Mx*cw;Vn3J!HA9W3*LFSePkw>Y) zz)dy)&f*wG-Mn)tR4}>$O-)!9AMoY_7xLlpZI*gENaGnVF6%IH9ft1;#k;B?lheju zfY1(%{6<(O)7vvnQ2Z8#H|@AARZrM;#c#IkTa^Ob#^_Nqe&53Os&jR(B}Tzly7A#( zhjF{_6S#J?JS+76;gWEQw~hi7Tsb??pVY;YnpFfGIyu$!n(>}dGbj0}~_ajyHp7}tP;G$L&IcZ=a>o@5na|G6af?LuZ@RGc@mNk+r~oIKcuutKB&*&38rf-20D!i z4R(f*d41*2j+W5HJ2l0GJjD*1-&&0%e$?G<1u&}i?mI9Xy5ij?X<0K>{#npvqn*tn zQuIMk#0nXi$|HdHyvl2;J$`YcG|gU4nvQ=eJ(A3#rfbBs1KWy{v1f7sX$^MSrh)CM z-IQ3t;uA7b$zD}bXCUHaVXqDaAf7<#3y_P}N;G!o8L=xT)L!}J6_P|gD|xkKwAP29 zhd{Q{R_4WW%t^RY^ZcEe`?b=Y>V}aSw-sb6bGWS0Lh3+oLYI?~4Qaq%)g>9$4jJZ;mvchh94b>#aSVNxYoaGJ^Lx#n%I%lZ7%fVL zTjLgyzU^%|+NV{&a_Q9jRRO@@e{kt=Bsn#F_lW34I22nZ?w5uZ`_$i%4QaYU#31P(HoPPeum*(m&H=zh!DlD)Ky>K}yD}v+J+?6Ji{=v1 z5|yPdqIil+ei7aA&)~>$0)GMzb%ZWBxy+q(RLG4JrIV>yVVs=QOMvzo|7m=4#S;BM zhTh__|NmM2e|5zRUezEZ)L;MprL7o~;Q-9$N{ZBx>CT|5VpmZWc$=xzt(qXX$RVFlzuHz3hOgk|0S4|d<^F>^*fmUP0WA?Q7ahk5~)!f{6~A+FCgUcZdp<(z`~or;xZS!mc|;`*fs(-xr>BSw=*) zmZ52C&Vy@9t-ozzA6-o%<}Jg34nFEQT!sjmX;i2#eSxt>^@{l_C%}tl@Yps(3rntR zN|L}97SznQ+#D;PY&Vw|W!a0A z&suWA%&5E>O7%Bw2zDOrmEz@n9dV*EW55=8fP}@8Gu70pAqXQh#-yvBC4{j9`(wmw zVKp6~Qh~-G0J=Qa9?uxpjWbEAEd=$PBt`P|(n$lH0u?43G+eY+v{qmSdmet_;7M7y zDvZhA2vPJV?#XKcg~p=4gl(!t1q^oFh5D6jNgetH3E>-)BJz3SPJgQJ3PhT>8e0{4 z!9>Q{P+R#hoL7gwnOCJ&6>y2Xd!jc){2nCeAqH{;k&sRBQyvmg2E1?kL>w=-p+=v% zuQ5(UNl_KGRl17%Ce{9bvgtPdQ%ubA6IoB}Lk(AXXmIwE#l~oSR4cI0=|qc0kA=rg z7)q#^?$9+x)^9vI=HNnveQ2_TRLI&8YVVjYdQOsHp~#XM$~>H6-QQJJGTUrCHm7Ei zKO%Wz&3_~06?-?3TEqWtUp1eg5y7h-y46rkomnZ|>OS#>Ck$(O>h4_`+N6StG}_qc z;OkG-h~Ro^&wI8;S@?2HQ{k|lmqd{tpFqZX&1(vfyf+f$rXQS1G87F+@VM>}nluYM za9AZD7(37Iked=4>8_)o39pwAZ1|A3QQxNiJIQhW4+!V+?WHW!Xc!q{7|M~%H$DG} z;Z*@Man4q0(OiSo_YwJesvf|`<(amM5eH*ldUm zV$(WY$V&oa0XE+f31+p48ByXm^msR|ALBp@VLOw~+f;%>yzJt1Bg$(MZgKU=2?usQ zKGrUn_XF)DQWYUI9&s^cjSl750y&=A#mUAG5gn8lQ-h|dJK}R?D`7n}kej-(+P%84 z*9W*yU&||yc}%`Eq+AWI7xsyyd_Lc&;1_d#D!Q-|RerppmMk2GL(Fu6wmB8pxr9tB zB0S3&VsYw72yLDwGPi4I23}dH5_GBid)wT6kRgdvF&Cq!;uY5LvJZLI+isJO^AT$! zxb$HGr7b2Ld-a(=hNoW%_+E!aHe`)U>-)iF;TqsR0%C$Z#kU-{#WIasdP9SGx?L$A z&+7I?@Wz9tW9vw1=Lh7hL!vQdC@efzr-2I4M}eG|;kF4&=5G$NzF>Zf0UTt51B;C3 z?c*>N;8htm1~FUG6ZTNDPfD>x*<9j6IcrjDCN;d*hGYTsxt&FUvO#5iScnt@6OL{`ks$GK9>54S^CU z+(u|`>RNVrfbY_olL3id2Ws-;Vpju*y0M*EmCiTeT6}12(>cs*j z4(H=@rH{A|eh0l)_55tx!K=J36you4>N$z9FtaZliKQ|iuv*e8!GLS|e6%lnSLiE@ z59Ehl*z%1RMgHS9O#yq%vCM3A`ZO$hWRIzPN4fji-xp zOZfO0!vQgd+P={6EOkA|rQt{CXV-8F1?bL20PO(g6v-4OCCMKax)KJE%iYI@VWJ*j z@Rs3hoFGt;1)A;7Q?ZlY!_;p9S6BI@wS3}vYO*B=dH~d&tau6VZtGj7 zXvytjV>PSE%CBjYzfwAmN5I*a#EN-<(dLs@v{>Np0oZl4W zE)4WLY~o^2MHK(wjhq5h{E2QEO;MsnFj^QPB=B?mV<21!GU;;sEur)$t&$~8IdFWZ zfc2i`vJ4{{05D)z|>m+=9`x zv5@>I4p2THbc>Aw6eP^m%8lcyE?8Ze3%C@BNZ0$0Rf`n~)aRVPU8tWt6y`U}DLmMJ zEWGG=WdA*;liGO$i3+h`;6l&9n5-FvBR=luL(+}w?TFJI9W_cHV5UZW*RXM-DXj|| z7nU={fFVROOKp~{IGgIi5mm0gZ!BHY_lf3A;a~|jzpORM_=BOB>?tecbu|&C4XRd* zZAk?XzfD-1r!4=Rg&HK{htn1b=5vcLH(@_)o*A}YAnx>2l>D*z&^i?(2!`pA-Trly zng5{d(q@<^X);dVGI-nWE2iV$n5J)zE)J@D8I)RR#+nroCmxjoL?NzEL2mJ5YQ;ox&@f}N7OAa zk}Dc#AC+D3%1)!mRJI?x1G2gJsJDz@PImC*lv{@UsmUBWgjyHFXHCToYb|cz{fKgM z*%iiL`=Vzbxmva^s!P3Rw|CHFiN$i$IAfnFMuoa^Si0w1Q7nd!apnRx;HNO5_}5ZK z{*Pzp@uhyf2LSO2jafjt3ISdYWz>v}d}$J^B1@sZhCavU|2DUf`8M25w~R$h+~xU z>o8m&B%BjXHsW@l131wx+r)78ZL^um`^f6T0nIz{3=JqmmKnnbwI}{))yRCr5%M38 zf;l1TkN4VdREXqV7+z7N6$83IND)idLxtU#bd{^M}SOKi1*D z!&I|7>T-%ykzdM}PWfF%rWGw9kyGV4=2ay=&jNxK0%4rX+1G9!L^0=& z#_F9%3g2LoI~FBFyO=PU0u}fHWi%jkB8KB9>`m1x>iO}6UeB%x`x2{4y$CY$INw1H zDI5PTIr(Sx*^MGZyTzQo`7W$fgwM0UaGSaco-@re%{b}?9+L%8lKv)L&SC^A3`FYl zFb+1sB$Uz7pJDH~bo@4bvxcIv8)MnS_5~jz-Y#b=s6D`! z$}O&31;T3w;Z1;20z0$5r=#2QFuuF?6a}z88B1ET5C^1#y?QWVlVfu*54lJ^hmDxR zg`0*NO)+QLAs)v$M^EA$GJ<~?KRjH$olTaDsR6ESJDIQlKc>!sNw6U6wqhnl^5A{-KU|Kuhtz}~_%a@~WHvO|o$sJex1V`lKwb23AZnN}j*{H+b;5Ok&z zts>5Tt)K;rumC5v7X=;yCAW$1RI-i!FuAu%pg{gY_ZP8^&{lFU@O7_xt9TDqcY}{6 zqkL=>!+~?2Ple>tMXEt1F^J+MFUSc!^w*v_x2f+nrk<~X&g-~Vc3_pRmsBL~(A5Im;{B`iPX#n7@ z^CX}}+5#tdR`ky%`HdmUU03N@HrbL|LJSXaRtlvH7ma(F1&ow8fPVEhH`7pFmUbau)9p&|z>Ri>d*CS0qRt;TZ*!Zjj z{AGrgDfd^&+91ou;DwN$Dg>59X+oA~P`of|CTp>3Z;g`5%7E9+s@tM(HXknzy%3z) zJN2^QM;w)LQH;4vb1KeIWf(_qwozg7jr&X953%{?-i2rvT>W5NYHKSjoaX(GeJ1G! zX&RILJ_5vF9|ojf#NhCC`k#O-qitdG+x7iL#o&>pexr)-e~tjYpG(!Pn)t60p)U%S zCV?Yy-IXY2BeXM#1UMN|bb}@L@(PY^H<0iT13U1V6U`d`*pJocqUE?*(kb16_Tw(9 zSfh&Vn!lSoQ)Vr>$!zelPCo?4z>8_#Qca!Qk;6ghBwPrzsZW#5aNm}fSNN-PjsoZ5 zMca@yBgNVc_n*zVp32$j8bF;CsmY5c-42HS5Z1ufV~JK_s_o!V90~%FQmISuxhOU7 z-!s60mDW0g!7r5aEv_sEbmXST1NE`r%ig*L&@8Y#kq%69b7ncs&?Jb8+hL zfinP!W+|unuGvN3?=o^UP~;u^)x0WM%AB76oX}`vi*t-CW^oZm5iEC$8Q5F-7pz-y z-2DBg8f&`?TVcfFK!Ez%KA{C?4-AfA^AX)YpTcErOfOZi>9Z^Dbi7Qy}Q+J`~d!YV(BX@%tyhN=cE%fL( zaeChMk%cgu<~6i5^^kxQE@+V5ULcmWzNS%^C^)&GjZ27W zkB>j!;bIZwkGdvC`1n*b9ABB;Mb}POz=nR`u3-t4VOxF#9WQh=4-)FIY1RXcv?fj4 z;COJYInf@YZ9qx)nGKO30?gdhm#M^_O_

sS3H^a3b5U9`#3bzox*OJ{p7`(dY1G zS_L1-MBi$M%L>(V9uRp|IYL z9g@K#-IOU=?dV`1ll9ZV|5IZPKZ&t;-af&Lc`3?e&vtct(LT^VE2k%bxLL(A?{&0U zB*;vR1U)MHL_|~R4=u-t?cDt<{}ZR=A2j0XM0xoO!@}`9`~UtQn9ujK+aTpvk*BWv zerIhD`5hI(9Hr&d2#NIuBj{* z#Id)4HH3dHl1#B=bJZW*n1A>Ag^!56I9#F?p8vK5Ol0;qgKJitvRbl#!iwa)7Yx&BV zk;<%yN6{2=QbqwgmEGBEyTVxw&9$AxLsiiey6gnXClhO{^^8`p0MI)iYEAG|CrPXp zi>0S9NcIyXrcDiRpCU6OUa{Rkm8!teV_0K!3D(=cRSXzk2^qwtNnZeJ;kH9@`xO5Q zyL|p6@9od0D!Iw)K+yXY5FZD4%H8JdQxQy#;w$OOE^Im#0}~;<;?oKvDQu;N_%V?q zSpy0uNjHDIA-%V-sn!U1ajhcRTABS*@{^v<*|7pyffY1*zmWf6w7sjB{b=q&m0+8- zv(5Bkwfb_$;1f$Om?u;^z@($>TEyd*!Nt0c6lPwpR!FQP`~Rv_QfpbWC66A1cJG;|c*wGEXz>T&zbj=+ z2!T3fDZgF`&s*b!xO;QaX@VJEgdv#(bqO^0dbo9~rMcMMnBI~Y6<$YO0>HDIGV{Yl zV>3zJpQTvDkrB%FyA-_&rR3y)YM*wFE2uXHbcQWesfupe;U~w<17qmPNAntZzoz(2 z!MZgb<+WjyQzf`~(1c7vk!ob`3ufaL;VJ?L(PM{eJO)Y`!5#>v__g93JGxrySglrM zyjm3WtBj`uenv}@ZT{Zfg-pBOMcVyeoL0+ z|KO$Goeu_Lteldncc-9)y5tYt_G-=!s1PNCMs^MRDsE<6ozyKH(N?aO*vOIRdRNd} zaYjHsVOeE%RJG5O^m3axw6&-erbE@;44*;v&-`V2L;ZHZt8mwpLZKMwy+g*7RGLZV zE$?@uuKFKP&*LXVh7e^|Trpx3drmN#!(;Z~MRT5j`=}P4sxO8-nqKvcQrF5BX3h-6 zX;J7J>NTPNfPDU%HNpj2-Q6qnf{!c|)lDKXpIb`Wq)@y5+6&3AJI>a+9Uwu7DaIox z{?uRyNKbE%8xzV4s`&xN2U{)SP(HW1m1BUgsk7;lh`|b!#j$Y37;1#VpGo6s1EB?4 z^Vn~TcU1oDJo%L^%^J-klC~m-ma->e`w+t(^HMmrBH#Tvv>)TteO|67|GPZ#PU{&~ zinbKhssEmyY=ciQrjbV=GO_9^(tCf-Q9FtP-MK!#9uGP5_cp>#^zv&=rWn^zriV>F z#dd$#ZV9I?0I9O9Vf)~N9@dl<*ekVedG_JHj1uTh(c$6oValhW9lln8;T0|cS*XCA z-Y^*j5c1~kBr%X@O`m?kmroI>MnZt?Kgl<`pL07)Wh#p)Gsk?@`H%~t=^D_~GabxZ z(W9^cyC@pMKY`4rOg`tLWoF)pYBUm}9~fD_IpGW6P+WMlHP3+-VkASX2}LZCF)nnd z+X92(w{XE4b+mawr%YEpYgo&EgU{$tL-x@L)9p9W2jH(?~jHW7FfAAA2LXl1w3u={z1T`U4glINp<*z~TWr_HfE| zb0Lr}qZ-p6&~}wxYmzU5Eu7_c`7LiV`k`0BKPk+wLbYsmbQ5e*kXcp$#*8X@H$^0W5B1*BDe{leJ$e|9*Moe4-Mn#^*xW|k0 za)$76$vU5%%8@1UVX~tsi@PUF7Y(L?TmSguIgWMX;9fGC2et|e`&5P&9tgZgtO6IM zbNE~ZOV!i_WccHR7r&u{z{h+4_mol+&eK+iZ-Dy*e2CN{7K!we;|H8o6UqZ~ z2!KF~g}2e$jbMc2Brn{K(G%nyQ_H>Yep<=_nxL`%=Y;U3RC=LH3d8vaTLsLCXggHM z!?-zG{p7H38q=kII?um2i#`1<3gBDBkrkN z5#ifr8apIHx#c)rasm1;7R->v#QDyisj=)WR1Zu4%;-wSmoKW_b(bSn4)HR6RJea6 zx#~a66EN%lA^gQJIWZ42$;Nui&bI;g2*#5SlJgz+@!U30efT5XYZ=Y~4|srM2AFi= z7b5U`@n$srpjpJlXw-icXJ!c*Tz58}a^Qe~cdldsnc8FqpW$SNL(JhtDc(S2C3&Xs zDW=}m3+Q7K27}n5?NGel#?7KArk_9Udr4Mi<%7&Mdx#g+T^8o^1L`e^`AEM&{|?v5 z&B8aFOGZWdvsXtcySF*!*uk-38ID@dBwdT0`<|Ivw;?P1-yE@|FkjoV#3mW> z%hF|rrsMN!j8mQMnM$WU1d>Qt043HF9+^Z$nmd{&<%FB1(2aR{t4QGS1M^HT=wT4} zYqV#6aLP?pFeyLO_P4g?1K5RMdv#gl)FF-r(ku!VA({)4{v03SfkS=Z&r{(59q<-zj# z4Um85Jj|H7wBQSKG!1)&S4w?%LAIRTE*Uk)8O5ehd849g2oc>aI~o{)D$*{wPN%0u z!6S$PMTqEmE|pUD`C*;R*6m|h4fSsT$LXLQyNqqY25oSh zoV8x4WH9yXq@r-wS1zF(E}q71B$4fB>n=kd59Aj_p-Z*3Rv+l(-}LKLqL_!`-`e&tl=D@=rdV$0$_5e=iCCfA2GoUyT8(@8w?qIJ=g%G5#!>^m;|N z=&}3KsZ~0{ZEZjNZeBIm&n^IrRbucxY3`>(y&ajX5WYRmvV~sNjsI>7?)n?ieCfz} zJw0$g2u86JTf5h_wWu_Y6z)^V)RahTBYiJCs+3VDtA(9@D@Hc9f0i&9+0h34Nt4z1 zxkEMJK?os&*mFtq2>GNA`tG7RYQNJd51MFv-|js+6biVd=6)${xv14sR(ma`NVfnl z5Mg^SoEB5mDq^3)`MDF}gVTBvjr$)Fvbg5td#kO3Ck~OB+oWdo4KNI<5Bs$9T_0pC z2!6+TfgcM_nP3+T>UaFA_T|n)PaAcM%Tc{cZ0_iY& z^UeKQae1OPznHp))H`Q#I=IkWNvO-d0^Q2LKKlt7na0Z>hQ8CJmm2NIC8Ca497jy= zKWX2=-)_vETW*q6GFX-YJ0`o+`;b|}FI_qLw_w!)IUl-bl#-_-u(3}>Po3v33?yoV z7kmIJ1$i})rGk!TRqzaCtWRlGgy6K2JEKtPnl6gNjy@bCgwHXN%&47#$52jD1y>rL^#Qve687iQ1)uxtMcg z$yZttUZ3GNcH6WBao`MF`iqJKE! zM5gag?Zw(9Lk{-fiLgApPHQ~odWaR3v08_R<{}aM8G#1-@@pM84H*77*?9tR_tI|+ zQS7{-ziq3ys*L-kqDObsWp&}v`2R-0%6}REnK`T;t(IppjKxL{MmZ*&Wd2C|fs&IK z3*T6d@7x^NW0=%`Fuyg03OxBVJ6H765uUbs9H$%{6tQT3$>186Q5(}ml`h_xr(o}z zY`MJ^0B5}2%Dz}p_?ZP9(emUk97wg_q66ai>e@mdu1INGqeR*{^Sp+7vf|Uq>+8WG z7jZ{ZHSF!EyzyTJy_we?`zYL`f^c=>e&%e#Ee{-#y>bLoO!bE6S5!LEa^-)YjUDc* z|EvwY*BxqE*+31emR7(Zn$wX~b%edWzPNmZ7yO2U<^QArzjq6r>lLdOV}5Iu!jTgR zj%PFBY#c;xeW9jTi#iGu=f-ZIQR`WaX?su^s4GYCsH^c&4<^-k_F{R;=-r|mUZ^I* zwrBQHV&^2sMdd^sg=+BKTs`JDp2AAJME8eh)U@64`_IXT2Bt;|_uC)o;#U$QPV>7u zOx$24Bh!O&tS5$I__zW}wWMOyYEepwT3@CX;b4{md<$Ibd2`XPwnJvj@^xoaJTPKf z213@?h5r43LPr>picDUypV;RP~GL%c4cI`Ms5O9Q#Dy zX7Y5gI!qqLMu6ajDKUoK=$>Hx9Grb;p)2a_7ys_D5n77Om8cT!vb!%U@#bbys82Ay zZQ;ejpL)?2V3j}lt^Zc#!US(i3uBO>(aL1Mh9Kqn7|{Pq?c6lUX!if=@ap`9urAwb z`Q!{p-@YJ}^bH-2VG7u~27FP_k0X(3Kg@3@1Et3R1cR-II-ixx)}7NoehOKFjDbd3v?7ERjAD8asb(k; z$45|~J=C`$=RZ5cS)=2h4#ozX?NLE?Jxf*wmC`8oslpW`f3~Jg z>$neD3I_{AUVYR)hWVBQv-Kk8qb^oraZz&W4YGwXn-~0M0Yk}Nw0)SPVBGD#}VKq@2)k^fur8WO; zm=$hCajdYT^Y^h`628HSKn_vyp1;SL4u`J-W4Y`6U z6|{4eMWFir-eM-f=gqA*8I&`DZDg&SQ2qXQ0zw3X#w3lx7y`7beTu%Ek+;UiUOTx$ z=*`IS__p(DHHX>JjW0|kPO@?_A2`sVrZ_a8DWl!wl;wXUr^M}A5QjocpD1e}OZ$~M zv&MHb**{HT3nfg>d7i$3G*lR^)i$OLefOxSBPV=q76PpFrMpXwQ=jr7JtP&3z= z@f$IN|3_rtECd0L2R`h)Y*cmwLdLGMzbEj}R2oO_^d|)u?%NM}j#O4l>u594XmDlm zYTqm+@^7&@@px0W)udV6<+1>Fn|^?b%enDoI;hk3tk#PBir-5k2a;V%zTx)hm_fB{ zfEO~*?Kh(TqnHnQou!)=8Xc```ML#H))RAUC5JC9ol`WJDf^zm!`^FhjWFWO&00Bp zQJ%L!>blw7X3ZU5cHI1Rm9Sm7hWKF@)(Q#j&0SsZK#F&(sd~J((i9mVwgN+zqSa@ap6CRtJsM@MQ^Z#@wt(`CTKiEEN9)7;{4 zoe%ZA^O?q!86BzEXwSEElt0kmOWb%hf0~%;1J|g#Dxv~Jw6I(1L4nPqZ>=D_VFNXl zgnDjixfd`x+aKE}v1qsJQa0x`#WfOorKFs;O-eY>*6MDq&N5xLmLDFJJ)s?R6lhtM z3x@gAT#qHFOn*#4JtJR&yO{9}Ok*sijHxN-hEuNrgR{yeEY7B*p=)GPeDv~TA?gv; zkBtjq3O6>q$!{!NQfvbMxpl$DhgIBdXL=xtqK16}?LsbhDL)+8Hnf=XRBdUt#(?`Xj$)6m7eLY&61!9FI#VmM^ryCbBVZ8kb^1C&_cL#mWGRRfmn1JfU zR}l+UE{+?76i_e4@Aqt?)~+^!G@SQ(N&2oN6v7G?jEWXsq1tE+I=a?bye7#N>4~h^ z9?hN!m@{F2XIJj8#`XI%gSzX)XGA;)R_50GdJt_AXAC$BL$+wr4Bts#TS^b+!76pK zN)VPcH7>-Qt|ZNub++eo=3W(G6+`YUU!53m8Etn5pTt(p+jeL&;FiW+J-!VsX7JGg z7w~;Fz}3w69VoiQF4a{iedeRvnd8#>z!x{ShnN|j>ZKg+aw@d-ee-F%IbuKrtC2if zm5sXdy(?Oxwq%?Ce0sW$K(>Ws2_tFXUwU2d1QLyrPd=Jyz2d5iTF1mw-@W7(E-Jk9 z<7xlY5-@<<~RFUXhvn-OiSVZNpO_#{s4uFRB9<%=K zS{u+stny75;0R`m9k+^zhuwR5qoa*C0{{RZ_RQ};d`hb`<&S4(d`;mh`?5<5+Y3sVr&#RH# zMq9h~+7_=hFtUvJ4gLTJH-E;oQW_XpYLrOu{b|?&RqwV)Z()>SFfg}I_{Y>>dkD4_z-tNN6Uq9*9>}`d24c?w?~6;g_){UFZguSt zn=|?g2V+2)YYT%jVqYB<+%$lFFykj0Vd%pMH!@W%bF|)Q?U`i(ka8$cJoq0AOy$$` zEi3~cZvp(1`sm-)@Vt%*6=Hf%U4g*ecjvQZXKOD_(tjwV_#8*wNd5Ec@N8U}`2b3D zLcL#2LCtmLLQdOLv79g$w{eQ^0~V2;dPtvxY+=}je%1Yzt&|~*I=Hd=6;yIK0@t71~>Z3~1B z*6!u1SAPg~)&DgmPgnY)RTFR0C!uyzJ?`1%{uX{fX57Uz|9 z)N5Q3GQ(=ZXi{b!28uALAt!udGY>%K9W&Ivg)VrC*3hPAZy@OHexp;Lf1*l8Mw?hP zgk$50LCV+E3*l8Ewpi(85mAL!=H-dBL>zKS@|fKsDROslo7l%NlxRWWi~1jqLM8aY z_PXuYhzD%dKsj*a+%a^ejc~5Sr8H0VWAu$uQ9(t+zMS6ObH~4pv6?;eJ_S;SBP1k` z-xS2w^pqAV(^QRk*agJLu)uZN_EubYhx1#zhz^whfT&}&3E&F0ximJZpDZJMN)0z$ z&q9P8n_C|=OPn%$(9QFT?TbjlehrUi>YP>^*5Ow+hodbw0*_h-lu8oLH%#}9WhkL$ zX6CB7$!5EB+0bU9YmoP~pf%W565i&<%;IkOL3Nu=e{u8x`bi?gTnfEJne%|qDCO#R z4gTkE>k05b_31-1pRZC<=)E>r1abmrzI0E(s1}4g>WQFTs)_M^xcCN0H!OqBj6z>?GX-8vatn)iWv{)9?jG0}F+IJ(rv5Ms9dk z>t|8gqZqe?^Uq#>?kw(jFgA%K5?RPGRH1vOD?k>u-zN>+@j^@zYxw2pfUEzH*J13f z(M0}JqCPs_m-+KOzpN%0R;yql^|ocu&>4ZrZJl ztSd1C0!Gz`xz^124vU*~e;6r8Z$@4*ym#Aq1!Yo2TuF?HOo{V-Pp-T^ zR2XlIVPD+qb=IQDe!G~1#U{>W=fUSIk7Uc7&jCYg9jmed5h%ZPHRTB;nm%Ylrag6e zP4*n5W+!PdV`7kIIHXqRvZ$AjSzZg-H^xj^J1s^G9_`)$T1Ol4^p#^~YCFt{UsRJT zc`u8kZHg?&NN6mg5fMxPmbT4J^(JkH`wj;MnQ*eKM9px)+33L2-<~F7Ynjlk=1Lht z;Xykp=63&z=D22%y8KjoqqwL~6e?D#Gf}z=JIcgl1BN3~%&Q~x z9$Z@#UWk`y#3r7XU|@D;^Vnpe(r=U0UI56F8gOCtQ>K`+-#CMfDVpS^cbSI+CvoH; ztHd8Zy1zznLjaMscfW?%L!DPaW^*M9!%yb8B<_+&)QMG0Q?{jZLB? z1h!hG9a?^F4>RiEs}9#)`+H_6|4Nzrg1j(LmPy7=`&*}Y=Th_$^Hf#zBF(0`NU^*L z^tbUwB#rN$hDb%|C06eh8mit5Q(n`lv}dkBTpS>GjAt1`@2o0HCGnEQM+b4DWi@T7 zf;bpTEU(bCU)CvQtp#HpeUDid9-C4|SeC4`p0Uc}zImiXb=ZkEoWdI~lXr5|M$1hG zWu;R8BO}&gFDemTK7mAkAR=SbzpoMV+28O`56y|w1*Belqeih9J0&d(tZA8`!K zgjKKURV&1eZEOb+^P!@aVaSOPy*rI6(=QG|g_ZH*eL<*6d&fb@1?>{yspUS_-_I^p zd7a&TVR+IfO&@;%F!dhHb<4+Xhvxb3<8%HNT|Su`r#~W5D5n33ZS1?-XcHQ5P%EG7ZuIeeXTom!B;TfZvRpL$O=$k#&)BXu**`O2& zz`|QNSXQLY=xqs~3_lG%dwjPnYZlSQJz>=H{i|BVB+>~OaFq`)e~Z4~BN6w1^bvaf zqb?!kp~RPN=lbaZNSfsecA(2*J@J48RVUf1p>j}T^)ufL3 z=(R>j#`v@x8Im@k>QkkO@R!UR&<1%oW8jaWMhOij5R0dyW{Q5oO`#X})KWfPNIAZh zO9T{Pp`w3ekCt-5dU@5Gt6u5Kx(MSS2>bC&6ucT|UgCt9g6e0AR zcl-a7TJZR#|F(KDmiO<29HdeLGOJ^Ao7b$5ND3bpj|eH`k+Z`p;X3W*;EyqX&UXF5 zw)|d&lDM)N!b-nuT&~efmS>C1-{RY~Y=HR)OL|caBW3z&QRO>KzjkU}0P>R>vGO!~ z*=KP0dm7JQAbJ}>LkHGfvvze7kE{u6>dJO{S>z$=vyu4VWg!qWI>(e`BzfThj?&fc z!krMCa|}4_gBV05h7LD#u~0oiB^3YF$KyKGM&lyfIn>?c_blEVdPs$u`qMK2W*;}e zhia8?LwfUHczo_C*sa&=t<3d-E5>>$(%@Fo?z=IZ9}hKB3GG&bWDrKaVSamJ2(;(> z!%Yc}rpHTv#}yIH9^PdK|kAs-9WE!c;$R zQ&!Rv7Ms02i}6jn#mg-azXGd?YqV|=X8!;ZCvJHSL#l0hP78-?%tE6@DO96xL^Fy zjQI6b;G}OXwr9$HU(g%U&voxWrcQQqL*{N3I0&gUZKCJkNG)|)F`|uHlSzdbHeZ5s zw*2qD(K}*#+f{Q<9~^;50tEhcEX&rSwc*>GdQlrH^-`RSk?qIZz5Sn5aT)#S!v-%G zkWXgRU~hNbfwRbfDo4q|a!B3pn_@bSU;uz4P&^&4`jqIxc|I>lqInANdFEXKvN6lJ z3kzV_FZsSO+;Uf~irjqveF?!a#ViA(QWni>bq0ml(fPl<(n+#_{h|EaX;KO%eLNVe ze$&irG%M%?EO%PEQh#RLErOc~>{4S{>TZFY0&WR)V%BZQC`e$j-xvhbd>p-ZHsdg5 z_0)btlJ7rO54Cd#K|qGBDZ_Pj{!rLmbYhi9e&GJF3Jh;+v8}`r zKkv8{z4X|t+PHyp@FTO7$4_LL-Wqh04}MMJpR3tCTzNH{jbm=~rc*8tmO7DgRohEU z9j2!iQ{q_&zuCPq(<0Ql(;Tw?WZoJ?lDApu{jRzIXU%y;-}CbPukQ@~iAZ4vKk$&+ za6HYj2~iu==1jKgwhWIe%Lv0T*~$;RxT?&4j678~FXL`DcQO8&Rp$yBnvejZ;IB>k z17rME0Rb7!1CCfsN8(q-yPl3E>!KFRsF;CoXna#lOwRR?u1xL4?q}d_Uobot5QQIp zBn9t)cuxW<_>u;N3BPAgo3r}Qf<%k1E1ZdM%C#sOPIh}b?f6qUG3>>8)%WM;8?vdv zQO1dx4RypOr0C|~2_A^U|MVj~ewj4=+sE?pO?wnzT_@=haMXKAH;6xu_^%g#^o@;n*ifI+P6bfuIe4==-;JM`(F?XsF& z+6{VWD(&!kpee|xhE*6z3V{wg+T5m>VO6fTrdN&^V6VRg8{s&R)A?EkJ9sq1o2}~c zv$-5{NukBUWOgk`Rn>8vAB=4T;901 zRj9$SE6LA&&pUg5_E&yt9?sf$*oA?-LtOa`d&OOkH|cm-`^r4I9O)} zX0P>YU-%^K_s%Tl=40hhvl5Ig2Il;E&mHs>A@}u+_?ozxgU+w7RlE>Ju(pWAMgFVy z7UkYo#joh>tr|nn=&G6hch!ZOBqTR-2gd`kQzDmgpyjstvCp>=atAjA+p8R3sBQoYBUHo zTWRzY^|Pt)=}GC#K$@M<_CcLKib>9w_5ZG3R(JLurB*TF?$ z%5#tqD;G#MgI~A}OGK?AP8n1xKj~_o-Nd%qnn=fue z*Q^dkuRNtwMWaP}pgpp3WvRT+5toC6&%FoKZ+56uGk3S@c2^eiEE~2_2y82B4HvTw zO@FqXzj7S~5r>o$#{z5(_mV3ZFz z@i-?b#$`Z&wBvJFuF9zyWv;G9Q#@WjE_fzzRR~|#n03;L#j>$Rpj0ikQmo)=ue=-p zqYneKW?Sg1Rt_e!iwahQnZW$fwhz)!?M%%Hnn}mpF?GluqgJ1DNx2gH1hT(> zPqLBIZ4YCuo5*kr)6iNED_(jxdih1SKvB9kZ9ve!qS+wZt07p%f?k#`yF)fnjmB>l zg~(NsaT;7BO{2WvV8^cZCd+P#FNT|B5jpTTkbf&r8721NNR>c&{02`lfdLFIqlD0i#=&|KZ%fyLOZ(4Rhi;DRYs@}0mAJ{I?Vb_V;LH^O7;F;Qm0pXt!ZZqj zowF@FMZvH*#O0z6H)(9h#tq4@q+}!9275`4`NN+PrCX(HM%_7i)KY|fS3~+yC%0xB zvP-qX3mykMZhh>TRF_mZ`zTc9*k*ER{t$8jlpa|*e}{xk7GE|b1tBBZIOP&43i^32 zQ_epPt=iM34AU1g#Gsw|9XO2@6d?_{QLrj))oqUSckfZ<8MHGuJ?rtTq$YtVrJv}E zXjKMtL9{<)aU&e;rI6wXB-Ri8@dWWUg8w!n0^PeHHXP0)PqFp))l**?u+Bo>TNPo# z9!VGzz6CA0r3L4Ovv{{T(t&}iy%Xv2zFd-NtUIrldm0ikJt8C&dG#xJ(cAUZDEkvl z(WcLdIA z_ro2*43_SexllvbAmWEx6$ltwpAYsT>&d_fXYaYdRygh_`UI8p01!fkoR^dAGA`fd z{<43EeB7yv*G%7wf7{LJWk@<)Uo|`Rv8M4QdiT~R(sghg<1OIbw-R{6o&kQ!1nR_! zLE^MYEAOZ88!nw|x?|8OyR2LEB(3{#{pXmiTH*V zu+ceQkDVtc&bLbzC2OG1U#g*b!JH2F%ZrqwdyexU7KSK!ockO-H$2DMTU=w2_Ej)I zBKY2MvX^>c=P&5-(AbH_mWv^G+ZaUG)2nH|V>zIPXXA|4yM7BGp1JNulskB0l)fPh zh49aRQzj>dg~WTn$>gLjTqg_q#J44bf!yji`F&e8>R`C<~qf`~Cu=r!Ws)2il z$O($`u39GEAs$2qtnvs{i$dZGp7=&9FDdAjr}!Dft|&s(9U}FAGo9`~iXXL`rbrW^ z)l$o`5fUgUq*82wjV0FzXfe<5HV^=rIV**8$s8&>8$Oj^+0QfvdOy)I!aN^*2&6>N z*}20rjk(q?2$`XD=3MBxl$OFyXXdiKvg**8x_t!?^%buF=V1%;cufG!dK0%y*Yt&e z+4|&Dg?%qz+zdB5mukRkd=dxMcu!D2W3256kcVCi?yL8F_JfC4Wi<{|TBlDajYo8v z+adtI&|)L<-w!s>r~cYD1=n!CA85nFuC-F31eQuJ(NKD&cykDtY4tSwkw&^zwjrLs z+#$>j#%G_02>v9gi3pHHkHPc)Z6Gwy9_5Vds3lIKkB0CX$^OTQvR`*4Z{v*u(A7#V^)4 z=qTqGh&wgwcyuM0Q`SN6K}%+ZXwH$uoY#&K1Z?*0uk?ba0lbRqtT zz{Lf3Z7rO;=pfE|{>riyUFVXP?QA;Dg8jB!>U+g(wQ`BUf|6z_{7;ut+CnY;?Dn3! zeg@Y+ikC1r(@wbYA|7_LKigshmsru?2GcpmZK!NU)3$X2(m~Z;tS5PtDWF`icGv=< zru6NIz9=`EE0)Pa=peFzmt-JRo;5w0n<+H@-gqfeAb<+Hln2nIXLcdmzV=J>>V+%` ztsK-$ccbFQ7;*@F%aqmoX4SJm*1?Sl{_>Mi0~y6h6IZ2d2k4IbtE?`0^n=i+kl9VP zM`%dDk9_U4N&&=b#3{~t^DSuGUchH|b_G~BlKT7MKwWvK-`m(XI^CEO*w389SsS~> z$^7(3Zo6}zqN6vt$t`;sTN*@dEA2wQ>l?^7`7FrD&BdR#0ECo)x%y2tN#c^l=T+a1 z*&WF%9^yDOC$3F-;GXYh#n47Wna=>tNm#0hWn_6sg~)}@YszT6fJ|n-X<3$C6Qlib3Uu)t+q;bXL!WA30)+Z_v%t{U{x?gSI%DG0lC;I}6NqMF zedZFr?z61jCFq_+uu+ok=g||OZ2~{k!)tAARKaevK(OQilsx2zTu|`p1C8m%z44)J z=lvI=cAhyDVX6X0=pF5Ojf$Hd`A0pmd{1nx)~4L0&RTbeAG$*_nIvFsI-C0+fk9Fg z6JG@9Bg{U0ytXh0o&jnzd=vO_l(4_&h8s*#w!t=S+X;e4xI!D$IkH+TYmC)Z7{eut zV=l#{T(~m|X<&+cf$#mOr?>e|qp&a=jXPu{i@a3lF7Ony51KV5WcD)JSbJE{p!=PA z-`kj!9j5_bxF}sWkLi%pMZv#R5h0O*>D7#hEX7A!w?G6OXpP?7hgr3QnfO|t7TGhP zG<;k5)8Z9D$;>tF?Lw1%Pp?|TUM6c^go#UY21VL6I@kq?fnx)^m-GH|XM@2w)bo?x z4Wo}km5X3{b0}r36leEZDo^?A4Ro7uHftjj31f=sa@A-=#M-Fc%P-U3N53LY6P!15zXHUcm&0s4bZc)Hdw-Mo znk`=FI|Rc}rnLvLIsG3p^ab9@oiD^}V2OG7p4ozv;#`(o3jW{y1o^)*DZ12~l?FV+ z=|G{&{f+&Lg1JN956G`n&zFr+G0ZQIxDyLL6pT&4S)dC_SrsRmix5{fu9y!NKJ4a# zNe?!i%hp5r|zY&(s z1jPh)ysh5yaE4d@<>W^8;p#Qp%4c~-Wd(~-k!Jxh4ghaEcG+vasbHCxlGVC2MD;si z#Y3XWU$?$HmQqqjAm&hd6yys6Wffco@4JtTVAD=EX5*@^fB~!5Vz#t?*X*B`3NE}O zuF*^|CY+JSLZ4vfz&AftRoLq7jzgrQdMQgbSr`weSYZl)E6RjGLGf?*z52=+ z<@H^ewT*EF3yoZ-U!77=z;$vgA4a~yRM*pW)di6b=Sl}gb5JN?)H}0otWVLSk&Y;G zK_nYSrm2ro_j=_|yS^zBkF;K~70b{y@P*1!rn@H}d!R*S+e7oY6E{0a5@5cVI`3hD z;EgH^);Ayk$#Y|m7P`c@fR|T9d=1Sx0TGRD%ABwmM;*ocHR1{U?IhL=8S?Hm*!Jg$ z#OD8F>K>S5;kvF-$F^*?K7=gar?Z!kyRe3+}}F5>5>r(x8|&F1&g(Vnst8&wDd6MuX8)r$1KPY zQC27!)2_`F(a+SQy%GRh%WTxjqj7p>y(r-Eo9D9kRM5E!@H8uy9gKImQ?ZEGEtq-# zV8!8KSvG0hnY73cU+b?mFO9X69tnN4Tp3!}jZ(}@Nh&q3hVZP9aC6$_n9+ZsL%|Wp zt}~hz9u1@mAAeq0Xh70Koj<@`510|=c}m`auzN2^*C~M1J)xM33Cq}yFpwY-Tqunx z-sgBQ*fiz;y8c6;79agOGyx}4wXBx{oe!Xt=I1Z*RWzSqQ4235(egr4shM}3I|MX$ zfa)#;Xg%OcFLNtfoP#%bmx+?iG%&m5wGSuX3b%fD9#&!~6XD8urc`Y8m%v72wlKA* zWp!c03d^yaeew);M-UlkG0Us~0oH8(-DMN3v)5dS`lDY5WrW3{yQ<#pe$o#3_yVxR zv{CyL`{7h{M$=1q`Llz#Pm@yQ?+G`z)a0nIK#FxqrdEl`?M#oK#u``((FEb09j%7I4#*j3qIkY2edH(qN-Q<}IkQ7Q#vKY&mZV zZueqP5>{xPLfQr)h1NoEPUJ4CxjGsMMO3rqqG6r`AjwDdxbZq14mL!<+XF(_I^UP5 z^A6nkXvEQ*E?^F7K=@B-A|VeEo5s|%k_SP(RTxxUiL4FG8nX$MiW$P2pH-Pn3OP1i z6`m+BrCy7a7>D*<_qTlj>$%lYyyIK+lwmBVdqLI(67lIm_ycXCEmn4g9K=$)i}xQ) z`6IH_&-cNPg9|80WQ7_NjRU48MacSAXGJ1NYghrV$Urzd-;*Pv?bB!Z zu4_QL_Eyn;B#lg1%HJ^1`_C3r|HqYTRe|pxU*;i_OPrCT1-x1T8Ux8LE_7hDAfsyG zmX-L*dDpw4>JuF+N`-CX_i|F>=4H3wLja>DUW?(|e7(xb7Zv>Gwk*b$s0EGq#0$Q ztTpt{R4}?$&Q~^{o5s*>5`PxGyNaH){pWB_|CVfnsz~bLA-OSunDAppeCQ3|M+}zb zI$bE$mw>g+zXs=^#l?OW4m9r|a;Ybqhp^Rn&bJAi&5{i|W4SxDV0lt6+d>2?WKMD* z&4j)JM)Y!EUw1eDe*C&=V#ywhS6DKkRGy}f9q!xDX4vHh{x|$d82fHw`ccD4wBcJJ zVxQOF!wAp+XlH4D{IODTDrjNyq7&qz*`WG;fRJWhVMzHZucRS3`R7#h{Ui0_!kc2w zxm=HdZTbfom%K0c$M;~jX#<}GJ4@-YR4Rw7%fk^lg@Cqen@f9ho9w6fOe@%LsMtWz z9RvDN$8&9z(DC`uD`8eBbsUj3mJ0%7mP@Izt6<~vC(tx~0-|97=}~wM_k0K*#b!a% zWwqYmS9fHuZ*w&5X#^nHox{Ap{>6;3tfHDPaE{27)4Qkb#e@VYi=sb9sR`x*TeLdg zhy*NX6oyfR9d(-}s8mPLe1A`rq|u_?1Vcz$(PFJkq(oPkfFC?Fl-d!_qVSoJrpb>9 z(FlJ;N%6f%8vEb9>g`9%6rT(T-eA@Fe}=F}q^=Ww4U2R`OUu3k{<14?`GfL7Kau-V z)>5v@y-uvAIUh`H96%6Yf0A#h5U=6 z7yX~0+dgw0o9c4Ot>9zK1b+{PFVgM`4Dd(ctz-{UZbB}EaRYlzte4M5SS>EcIAxTk zP@X=nMg))lHhRn!1k``Hsi-$BR$Ia zTYF%BOl*AY%PyWv=JH6IqlP%UFc9Nd-_8zg4I}i9;xdU1I32?Xe6UVdt>nIGL1JYj zc4JgQ{;jXlW%^?dy=7^_GfEd;4ZnQM{G8sT&d-?aUtFGP6=&JF-L)3hblrRmE?1Q0 z7TE^Pf|ZCFYkeYG)8jljrg<{9@~X(2@E(Wpe@dd~C=EN8 z>nAWeww7umlhqdci+PRUZ#GnT-i;Gal~j$vY){}Jj>z-da>!K@FLVgd-EhFiCh)70BkERXbttwuYmX|;q2q>-Cn z)q0m~>KPM?P#B&(r*VBdbfDUthmaD23=HvXV`o;xF!s}B|bXUEVTTc;1 zSpB#I5Ktdi`kY^j)JLh|A416?lyfVCKvFredZc(fXOt`=UyD91@rz@5(LH?DiTEv2 zEB(91&4))G)1zE7pJ1gmG?Jnrv49!p@{x~zS2;BrZY_FrJ|o+}wuH-4Qi!QQISv~k zE(Y{OhGLt3XUYi(slAXyVL^l5o>~6)X58@OH3!`o1r!Y(@cX})Ie=*YVRh<6MjoGh z&59KEOT8Sa`m2+h%*CGO*A216fKOqHr7e%?siF{nY~q0fFENYi^4C6w@8}PY+5`NbE>tN-^-3UOw*y4lP_@xKcrbd?=mU zR!jdJPBM~v(q6UuRVcj3SO}$Y->elX%d6f9RAWre_@6?@E$UM8I93C}5Gft5WY<#c zs^lobVUrp}=IJv!d{e4dKSZL4(8y1gw;;c8wC4y#`1(Kl?w>F-tG(Oz2>dz%PQ=MGfI3z7#+e@2Emyha;jeI zs>F=3E(I%zTfU0Cb^&mXd9VBUa!JDimeZU z^-S7l&?s74XJw3>|>6-%~M z^ruKhTpSLO3=450b*KXp8f26&AXYVAN|DCa_fnRIfA0J>3eBf5&SH1cf4*-DR#1z@ z!vnS*=n~qz3%{^N@~tVnfIM@~+FtrzVAT$-JUmmDUG0`xs!8Hj1jV2&*<^ad{vHbrxL>?9%-! z>r+CM8MWco+zNIkhY()XgAkZUTxQhlHs8u%qF-ZfSjGGhQ(0mWw;g&|x=R`)psR|1 z1hzD@RU16iXEGlYs}$y#ce=Qum2^J3JZHWH_c=*}(R9(Lsn*~Er?YRIKt~&^Rw@RK zGbmGm;AV<8lSnIF?~)n7>UN+hOIfB(yvL zIlAVe`1(SKo%Gq~3md->cs}YThp2f4cI1}=XcMgdMfIWQ&sEj@@lr-be{NQV<(nr07_drE*j9Szna$bBPB=Li8lD@=QJ zrh35rJemka=tdk{1{v-BK486au}UkhvL<@9zE6`+R#STj3*9(O*Kf|Qj#>tTt%hy2 z=ZWsNlleoO4n0Ue_g2hWywF3mnB>8`IQ!?{>|R-l(+emj*|uT4^Y6Ie_nwupr|D4%o}s_yALapDAD#5^(Pm!;%N|wCi-e}=6zng- zs6$eV5V%VzjOX84t}?c)$4OSH+JA6$vx0sK|5!Pkum{(n&>!UzSGN1&Gc-e6h%0@M zi+Ozj<)Pfm}C^_;GF(;R^Fj6>sg9oSN4;{m>V=3!3|GG!fBRkm#i&Nn!#EY zRO?Z<_Ka-r)g{i_xe@VKhxO2c6hF?A3Qv&N{G3D*6lT)CQe3znQ`J5iM3kf6tlmXJ(zew>5X~6|}KN{ywGM zosOK{)XYvJfOGI}&ku(m>1a-lnM73*5kI1{oMEI5fTVP{{^o&K(RPby3<=EQ&-ned z2BrQV_iy|T;mP&2eQrD6+_yydC)av9_vHZ2?Sq6EYLTsHY#*PJq+gc z@8fP?Us>15JBzIJfXPDnuO6aS9?0B2aoNB{%r2Vhz7m>Mr1U_k4W)+$=92%g8yR9c zv?30ZFNHGP!wbyS>_!F{FbMc6yqW%N(N$Cp2q58O_^-J&p*=$$v>~R((et^L-c>x zxOLB$?IlkAKRp5Cp0Rg{mS79t!0oLe4!cQ3n4yiwUJT(E9TaFa#$H+)sj|TlvC8^U zCa`taCrazQ_`0t9Q6kP{ty(qrh6lHYICS3LWB@_@B*&VnVj@Vs^np9wf(gg56FhR;5b@1D_M}3BR-b2*8Q4A#9T~6in_j z0;txCr<0QN-J|2{ex3QL#6FVyuO1-4nu0=VPer%k zQN@LPXI1~hzTE&7vK=DzX8R7a))4j!!^+rq*xBEA)wW$^?!UCVJUzBlXH+ka_ulAU zJ#w_28Yq!;moCTqd$XbvA5QSb)~hSv-4`15)!$7XIov=M?U}_GHj{JRH8j;oQmhMC z&@x#1YgxCKdRXG}qz08yN`adc@-MSHO)`g~$3>t0qg$y3$3fz-Jjfr~%3RvIT+`!j zcrJ`>xBsI0nY~_v+AlZy;wISeAyzrb7PQLn{#ie>f!=@pcLfW`~2RBc@V;_ zDE%CM+!{x{x1EjE9}EV>Z7$F~HFY4_ymsNfE+)JYUHy3VX8T2mFG>e~3$$I~9@RZ3 zfk{H$oB-4^;Xz^>nZ_{%djF!DVj{SdYf7|6*V)!O`;_p0uo?)!v~e;!?w`=b1jTlQ zvwK+4S_6D6nMhwv_ShcZwFCKq;Ep5}ec_~5IKYY2WbaE0%G)0ydd8Qek#gW&T4Fkw zm1RPEev~5B-;#~J6k4p#whs^iO-+jTjm!W^JGWAHc7<=A^gedVI`#*(U#rjqSvUR2~XB9xu@GN=M6K#TiAk2qPddr@xzp*>9@lqH*V z$(c<`yd%L~-cp{zBJ)obL;^7^*D{!FiA*2-@JcH1K(or_IFWEi$zh|4P#6&cpX4&L75(p4QKj@zyJO3397_@#-rc&`aWsjZ|xh)MqrqdTRV|j-r9&wOkIet zOkjYa2n$*-3LXj6m@zzD?s_RGSafR|Hu-8y7zV5uI{5hf5wbCQ8h}iFUdW=7T#FV` zWb@R=6J!^`Np?KWMGk|2;MK7tiIdbz7YSBiHY5GN!D)7A>4z>wjnQ7&2Ci=yjaKZf zBFN>y;2W+vfnK#7Ektkz^t%`W1B#)m$!;xs%d+@n7e00r`7oC`cG9}U3WYN1FZ5g6 zV~n}kn_lN`8}xI~?ick3HGVW__}6C~aaplp^NR5qUq%kuZ-7<-($#k*>E^pXSA;0;9re#{*294% zwpIY{UMGb2S{i2rJ{|K%HuN%p)vb>!^-fX<6_&&XW3A^}x*EUHv;RLB7@ zynx-Lfl?rK6t0(cZ~6T;Sl~;&zzR02W*$F6A}ab=sRxg!`;tuQUl`P(5j8yD`qY?e z5fwo}AtyJxUJi~s(U&>mP$WQ(ae3Sqyd~GmAd*NrwHP&R5z(ePKdq8(<(?w{zb!sW z(;KSiSvD||81FR}EYkoOoNu;F`a6rpEQknJEz;{_jn*t+^pRkT`J4sA=nw*-7U~Ik0U~2~)7jC2rz_7g?4bMzbrj`r=yC_++FsM{!lyM*2g7)eJQ_ zg2l~6nLxgPG=wy_X*96$cBt0CQS5%9cE*X=u>QoA zp9&lMfE5IXwz$a`&^6#T1a8cHbWNyvhH=J3E$&O2KGn(+vDt0IMNWXAv& zi<~?V`!iXnN&fNy$F>S`AzJV~03R@uht$V?|i_rG#^YTmsnurr=I6k_0Su27gj z?W;bOh+E3eN5l$x4gOarSD><8Nx&80qt)dzZd!aM*HAMPD$vS3;d0TsALZE~;2w;1 zMf34}Y3Yc|t{X%YdTs^eFH*&-{E-?(s9ZG&{Ng+gqqX|bn4#zB6sOB9vxxhhZgxcg zl=8I{_4`Tgb**eF7qk^2mnEIRzGM8sk62pIqZ4yTu=5|tM8|`G?l*T0Fv7(Ttid!% zOcdmu_F)S_k{bz-`~cqf8YQ44#x*$8>2D04tBprFioDeoLZC7a!8!;4B2If3u#)2w zzU_sA0_{o(_Vn)Ne<d#hmsa#%uw8hl@W1OmkS zDZ)Il+>QtGy{?q$UM~l9!4h=C^d`VDyu_V+gKJs<4U9lR+Gav$a{PPvI=X z?O@NA@4qLCLdMmpi!<1Eu%*EcmxlB6WZf9CFysaH{U z^f~7Z09+bv1Fu+Au&dBx zqn7bC{*F;tRzZbLzs5gC!rdkiM{TtBXaA$4e%b+gT_D`hD}(GWI0}@M8fOAa4l4gIaDZ^r}>IQ zqNPiu@|xSiQ;FM#WR0w779X}6!w%qDA>Dp#L!y!)9LT)K(PdxaJCEO)volYjGR>zD z)$w-QKVhhR{r*^ZqK82CeUk~Ay)^ecDOVx{Q=Bnz!0A;fHQUo`CQ9lj*TuhbAhD zaGN%Z7ev;I=m*U^;$$gqNCm&yE)IH2Dc|_^Z=8bv4-A{B(}^|f*`UX5)=pwrr=G73 z&+%uGuW1p6*OlTD26R!etC%mmc~gk^ZZttZW`@1|uJl1+S13|wmebO->oBFE@)*!0 z8wHJJzRM?6Uo6A*0B9<=riP#a*e4OanX0DsE}%h;gnLtM0jXy=Dzg$IZMRPf^!bgE znoxM*E`%HLGG9%p6EXacXr54aO+n2`Q{uW0^WUDx;iwCU+Co@3Zg`0bRqoQKju&W3 z;Y(^l#t_27ls(7LCyt&-pBX?K1DmGHM&_}lndS=mEYK$jB5$r7hwyp_1v#>Lr4YM`(?_t3@0~)}#?L0et#F6)1A{8@|ckMrpy6 z`qnp0<{Xj*K!fXg1lzA$MIOUul7IOBCT{EGi0o_8`{>O9#wiVsX~od^J&IddpnJ?p z_VXOg7)fuk2U!e~?*xMtFd2{EZ{H>zNeFup6+kqZ<`eaBX1+p#Y zyCTX{EZ|zSK!QjEz{b4ga^Dhh!p#`QY5NcrU@Xnf=Z4QkS3kE${8}U-bFJTJjG@y& zdFG=NFrMvgtkd9T2>_4Q$T%fyJEsRO1vwwf#An@lAp1n_y@liIHIuGArC`Ki#A8~U z-|$9YH2ilXdL#YgT*O=dIrUJAgA*|!4v5M}$*>VW4;xx0&gQ3)l#t(S7GF9h(IPcZ zR)Z^;PRUcCFwAeWq6K3fSj_LSHPSn&&fhXGQEXf|ZzUhQ=8!%6R0B|cp(3Nu%@wt> zw-EB+oR&VeF^4Q_urU3`Y?tlS9c*V-G_{9ZyUrP`0DLHEfOnz|9Y(UQ{(j3N8}j z?3S1$n+}_;rc!0G-o_I8VF+OnKB4f1o4_kDINI(xu>?4jY%E^aAE}hsphHZFCZ)5m zi`DLFoVnSNj;$_~PwHCwYj{{XREH65KEG>ix}_x!Pd0MlHV>*=*~+SEkZ$lEsA_!A zY(yYQc;tn5iLlIw01|^PpkvZL!8MIB9*{X5H3CN9O~1_OHe5q2S`Xw z$;l}wx6EuELP^T>n>n`sp}Xq*b86_(@>n!2i>;#eB-o+r_OOlQJU*Rq!l{;FCwqlR zen*Sk81)2>f124QE`jVE z3uHm4{TNKMfC}ek+1T=(d5lP{N2}&C=^r-s>2oWQ{Y%@{Y_)oERFA?iXMoDw7I44q z+uGXiJUeuuH1a7(Z*~nfF(ant3`Eq0m9PjNL>%EdNLm*`w^bxJiqbLN*of=c9v9^M!kuWU_4w8BvGYmUl%0YLte{{h4})>a6U=Z zN+C4Q+Ly-hab%z~BjYxI02GsQAjm2sEO2j;<+!SfkG0h^9H&~ub5@kARC<41P9gZe zKdta5m}^CvkvEB$5(Ii7%<3yk0Zl&J)wm;9$P=gO=2RPTB$i9St)y!2o3?8){0qxz zzs&_;!w5F+GK3OiMWfK`{T=!Y{r@KL?_Qy}Z}s2aGQfHB@ z78>3l3p!nFF#0$D6m)vMv#801S3!`{FUHOwsei7OC*P40@gr<=@fICi(;AK@dv5RD z-wTT-77hd9YVwyIKy`o5@$8PG@kjnNNRfYOR$4o5>(MODZ_PzjQ~$TDVoAC zvZ*5rW^P;427`I-2PMhj;KbibzPmWIa;Q@dfkb~AWCHg)bm{h#;CA|F{je3{?j#Gb z0!?Z0m^n;k2GF=8Qf=N)#(@dWs@}>8&0f++{SmFV`$x}TJ{DAra-yU{W2(axbYUpY zeMM}BDp=2ip(^`!D4-1s7+p5>UWO_?9mHt4S&cLkj&@_?@#y+*ulKLA?*4rOF?mEx z&T;(|c7~Z&#hbA1xSW4xMmP{D7-Lg44EnB6I0w3Bc)V)f^qFMT@BoEx8VlOV&G?Nq z=T9B|i&Os`mNWkmo_BN70v#t8UzIrplNotX(|vZ+3+)m~W>b5wUTHp<0J$TP@?%Ir z{COGgoS>8EpXIOJ174m52O9R0YiaWqpbcNUxHo88w+uD{1EMM@lT~)VF-3W6fwZ%! z)RH$ZlA*)g`?m5W07YpiSMgueqX`-gSf56 zA2$i?IZk2zyS8fJsr%*JL#8JuPV-E8_a6L4P)*l?F!xPmNwnvkCx6hc8$9+Ii1Xw1 z%DuFd`>+eKSXAV~+K^vpZQxnr!H&xHm5tY5l-R`?OKvDFq6nAgfvAtn$Fii=h`NvH zBQG>$Tb^lId0>|R1GQ$SD8?eG{mH7GMZc~QU;~YF^L0)?$V0vq;YTWsy84}D?0NXAUiw0rld4>-9Uh%8A2UoKmnzoS!wid7x=VDlhHR!v`Z^VOOQAU1DC1 z&9XV0)uQeFr)YrMQN4ubUb_TQc%uZ2z_PZBW#NwQxPuTjrH%` ze+o7yVjeYf3qn4Uh;6bnE7e5*_oJ)kP+w{sn(!0*n*aT(s`#g19YW-k*%adM3~-< z#dc+fl&+VzuZ4*1BN6g}JXfckDjpdK!z+Oea-OG9;#peY`maONm0;_N2g>r2t+7EurFe*J}_Jv37leVc;9(l?zq?&`g zch|&&VK&LtTc!Z$Jnl7Knz{1w+T^v>4gA1j0|MjkUd_~YWNBAk7C&&xhgrIparpTuOqqy&m?SBqXH zqsl*%%FVMmdf76St_DWA`a5k1rov z4*Ik*09}X)bFaSiC2YqFm@3}SjK&_Xp(o+SgdlYY1g^f3N1qk@!QMRou%2+)_t2nSlu^17tDIlD~nY=X-RZjb+vR!W#@%Ki~M) zg>HfB-=?tJvakrxEhV}`v8qy1ntHcf9B*^XUK-=)sKGMW+8h@UTvA1G>&AsxDHB?( z;5#iRu}x(fG5=N12OklhvPis2Tp%{9PeUxvR?j|-q?(Rib|e5+U1!8# z^{x2g^lqkt^~UES-Q>~EX$<4!!&|B&cwlMQLFU6Qp7m@^!GL2f8QlnPWP{7pxA?f+ z!AlWkRQgU>ATOL3t%A+MrpTnqhR~gM&~>y?lDiVK$kC$0e87DOxrkbF9!4kKRIE+a zKGazNEArW95yNr)sL}#(J2(>w*p!+5F6j+06kE&R1L_1Ik2@MM3%rQ-S7mdD5H^f)0v&g7Vi9?JPT9XS$uE6gzLL)W!HoYl|4- zD-%ml2tX9UC%Nl{;L0lTY0rUU;xj(-Zg}CJ%9hr`dW_XwQ@coyRh z6ryU{rW`x~{(Vz5yEDZsWbQhgh2Ir~uuv+?^3~2W(PctpK!6-xKb#GC*_x~QrbE7f zdDJ=rOM*o`9X1j}={Hso@gQ#bD|2~F{Ad-EcC9c4{PcI!Aapl1ow#%5Q7T!L6`ln< z+f8u)ZwG%Z+jV^}<@M*Kf!+sYqo(fo7Z+!-j~nE{mSY)YC}jl_qKlVO#fj7WAVJRi zVc{YphgdJQ*o_E+WirE0I*mEGHxfz|;>H?KC&^%M+1QN95H!2eMe&T)x_ED~;UBQ= zz?|5l_+awGVQzALyDsQ+r8$I2Bg$eT@ic!Ci#!$rP_+r&Q#Luzwbq37d?K3PjNto^ z@K)zsF+5CwxsP7$;AV1M;UL~MZT7%~1*f34z;}%6tPfy8iV~OB5+R+S$CpjI*`PRB zhR_;yw>0l%FDbzGXYL);dsFWb-GR)Nor#V;C+_qD5iwCT_XaAho_8^bY1Wo{dk>6v zSwm%Mv$I{32n>oHT;D=yXAIl>lg0kFWzs+}2d!HYI>wE%sgc5}&~u(7`NuJ;zWzkB zWC3jI2S|bIizZRwu8abA8d;|PE`ka4aB;yK&HOY)a>Fw>JGt}-?4hNGquCY#RSzkma+elEA)H>$Cx4`qI4IAr$BR_S zDPJAoy#(?vJn2Ni$|ty`baLTZ7U*z7GzVKk3;RSgtR`0mOH-EV-}(WK_|DnQnf4&aGx|Z(7tx8rG zyywAV4St!t#aWoaC92BW&62dUY>rP~L~g~;$0E`NK=zR(1q4d#TYcfbL#&%W%CdRM z;!#G?>gAY&ak_bzz=iQjIy^O8)=k4Uj;>?7v)x0ikvk9 z2gd>HPX(umtbMZTDl4x6=oCcjh=fxLYL3z!_fIN#t`rt%-Ao_OQPk{~Q77>tD40l? zh!CZDqDek}+7zaF_yD?}gAL}XJpM2w%A@6JJ~qF*oWR(-=wTW1{4shRbSCGpU4b_P z<#Vqjn#iZCk}~l>5XJ_`p9?WNiLDv0LmGay+63bB18xvIw}QcNQH6Z7GRrc4Un=`4LqHWBwK2Q+nxiqM z8J%^m9tMX?g3{aFA9S$oSk6Anl*Hy|1kks8g=nSOn?FXqF%u9VNR-GeFAwJ-OA3eeJy}1Z@dZzIP=M*mxx2907XEE0$@V9a@%BP)1|gMg1fw-%q)qNsySSU z>tNbT#3LkAF6a3BcOi+SO8ZM}FbBlQ`clcLnq_>_`d9cgouI9?zfk4QiVGBy9-$#a| z316y9o?1oVbT}~(-(5`Z!e{U{S-~Y1@T#<<(z$8f{Rg#Bkf2)!gb;VBS6=v@{UhJtvb#7?jiq?{zb60IegowdVg(>m*o(i!HbyfaO^bo;Q0$GywF9sLAlz^6)9kGdoF;I+T(_M~86 z=%z%-h{2!HKLY$P*QTDTIXBTx+RXF%U&h-YeS4 zjhyaWdzzH$$ zG=}KN!5>YdG-_!M=kE?3B<-5ZmCpP=>;^6QHRh%FAcV(=0UzV>6;OWWeLDU8GtuwE z*W%P}@36Fe2D(3_mo-v>eXZ)c{fb*O>m7E@+l*nkf{BYEx#(ew!6abi55PNhb$?hb+6S50$( zc-Bt{I@FGcs~(d$ze`_2t7;T0F6OC09C69b+kjBP=vJzv?h<(^Jo%wWVy2fcr7@_m zUF~JbJ&X{xdR+&J5u1lFnrMD^WsDpwL<>oPH-O?g!+w&*Y_}8Y!KwBF>C zuRsHb#~v!gIf|WUy^{MAQe1OVMba>cZ{BnkNh?4wB$VhHEJ+)n16uQNd3VXz9#RHg z1BI>3Rg1)tXi*(>NjVLV!=!^q9Io%MB@aFBs(ZZ-Iw&3t+M{zNFHo9PAntD^ZtShs zY!0$#^jXQWqjfX24hbv=I0m&iKL||Y0D2QBsRD7`97`cWyS3G(mcuGFgfpni+;~3UE&O77+kZ^w$G&*7N zH=5z@FsYno&Bsum6|Ey9tFSU&rPz0h(^zQjVDtXE3DGya;GuDzs`qCyQ>a)7^K`h| z!g2K-RlOxI51=B+Qz|URT~j0&+@Ag`wy>T0Nx)$pi}e+={xUmlcf|62QCtHQntw|mx{HQenozN5330YxkJ{*4FkqT;zv5d|eW1MwkLd*$l{U)` zR3k2VaAv}+@q_cNfCLMd3?QYU7b#s7)eMrnQ{z!K*kBd8ZQP%6=dOUm`JxfBWeucE z!a2lqo)u~n0SR}(qqD5Hpa^5!G|sIMfsc19<%{*W5Gi3v?jDq;C%`BAuBi_)l}MKY z^e3v-Rb?m_*TmtA#%0ftBTTpvf2_tDdsA>w7j69S(pb+T`Km#B2$Moz2 z^edANoaPOat+#Se$qg4PBXwD6QS(c;`ri8>u^^@u(IPIp#Q@O75gRmYFru7_U%rvf zoXdBE`p3-J-#T6MAcp$*cGj#? zuGKAWkXj-bz&%=Fh!-AMARtCtGQ(1UiEKs}L}jARwURLb&a+EA11 z8M0PXp!3m*#ucta%Yt=MFxmOulUZn$I_8q%G+tcbNp{rk1nDtx1|sSB*O5vnoTcI3 z_*sssLW91}pn|M@rK^)_)`KhR*Fwzle>)8|Qi%IotW_8z8LrTpiW6Xaa zK9F@ZUfOt}2o{fNWUKTfAV0-F#wv03=RQyN#(H5@+u(rxEF-SjceL%QWu+oB&!Wg5 zrX*;2T~A$=q>yfVBU0_RwBY|D(i?seehQTT$1g;j zWOL7nvfBIP=K~N{h$A3gtEQ|v(31B=q6XhpnP#I#TGm~rdIFw7b5uul+0wolG(FFY zi1ROkcML}yvZwx^Be&1#aSTz`Sj9e5Wp&O)66NRxE_(1hVR$X#ARO(sjKYotL=x-S z8uNnH+z0m}lV|b3xb$Yg@3hD~LcD&mWmt%?R|hkCSXS{TWr)$Wg{ZbkU+guBnR$^tz&YY-@wwc}{EjGpXJ$ABb4HqIv{5K0mGadq zWZ*E1wFXcjf0?YG(MqF)QYO!~=PK6!$J9Bt2Nq>ZIJRwfY#SZhwr$(CZQHhO+ji2i zZe~8*XMV&vReP;k^?H}m(a~Z0N{26ihrb(e_5PK=nPaqZDZ*2GP;V z-y}9accWn2X#-T7bPbP6vB^9lglMlIet5fRydRlPBZ0EP|2;B#>x$H~w}`mVF~ixr zr;W3MGUEMwb1)0@u;m$#qD^GpNvqjzafXm%p|8lQD@)Mstl>h;NNerJ<$R^S8gIPB z0(Gv0?LT#XU1HD!K<_50G>i#tYW;zhBZ_&bP(D@_*Tcg3tm7J?CeUI2zWB2^^%vU& z*a%7{`_j9{RS{FZQo9aN9z?mOn@A91&PNRKrmhWHhNAtXo)p;(K=TchFAhN8f!$vv zc){{-f}A7LSZS5yckZYqt?XUTbIbZ!23H)R8(FdZU@k*%$zgRUfY+VW1ml^}&`MLn zB$+X(^(>9rCd^l#i5VbjkPFjw5F}xaDkx*-L-}kzAET}NW3Jxt(twccn00kJr_4=P z_M5Y6ScY(=Bgz-tLQm*&+n1l(ors>e5$-3TnQtM2jVzZ%idmg-mG11gSYh%I1)o4P z+n?CSr_X@(yY@)G#1{E8lrU?Z@LYI>noKHoYk$4qnl-4K9Gi^Jbr?ENqbV7jJk3}D z3{TkJK)6Ox9G?hVoQ<_2e>;MgfFg6Gg9&fIB_cA8l6<%93GzJP81#gv2@w%AZo0%< zHx(9%r2Q6OVVHSW=uizu02CfUU``(hPR1Bl^MQOb@rQ5T)n->Jq>8|952g0M`|&fC z!U{U&%L1UYPH%{S2ckYZYeyWcNSQsTw-G2-=f>HK_y{z<>F2CNQHU!%fP#!%Ck%Qc zZFL@-l49_$5-KjHOBju%e#(^s(LpQSdWBTtW&7RVYWPcL+LxfqEJamt4?OAXyG*Vg zvC~yp6dUu`IbijM^(hnq&Pnq0w_Oc0-J&_W=##CB9hAQD|X zxEU>9`>t#_$%`1EpmCQfCl~~@P(r}y%Pz6;eh(KT_-9k1J2Ww;j11pVIWc%8j> zQKW=bp!7s@aZIypeJAx!0>z2ouW*aYVoa#2`umF+#S4&+=1XL50UEvsUr?{*bBeZK zEs&}d&!YJeiPDDMM;Nl+Q-tSocC5FGShzb;Tje~~UZ)=&WF{=^#T=`6m%s$>Uuhxw zqCcUEc*a+Voa(XKRJ)?UZTrep&I2hM^{P1wdT}HdIy9))zK?u4U9(p28!^s}l_v@`HpMW^=!iCjDPt1s{8)mO$qE zB}2)$T9ayv+YR>nb>~#~mS*6~Hns(HTXkSOD z{P>TMsm$hr1{2zg&i*2osfoZEBchW7UmlfDH5K?53#@ke9qA3}u=oFHr=PyBHXF@j&ok}qES+T%8 zgn#8Rq$P8!3wPXFCm#*826a^ONP{`rJE!cn$8p6P^57Qa1gq-B z5Qw`%2@+Bp=wq)`&o?v;PqX=2`_X8OnP{jQB| zxIGo>wR0cM{#U9%emNNbBUQiQ1l>(2r@7?yNi4aII|nV`iUd^F-<$El&#lE?#rP)z zj_h7e4&!Y>?y^RE@pbQ54$x(H8~YK9e4=8h)ZW}IrYci$e7ub9A(3@*u8Rs6$EPX` zCbaVa=I#eG2BiA}Jz6IBHJprgG8@iZS{WP33SQGPGWi+xGGWi?<@w8)*Q6 z5xORrSTy+?UDqjR6$=v*cPP8UMQdwHGD4GR;OL3b=7cowb=&UOv4o)TlI+!5Yi6Ix zZCgH7AhcQxrHZo&{G4!-75A-Ccuty6Ppa$$wKSmarUd4T-wEaSzILQ1*i1OkLMZr-aqMz zyEpO^j%tOH{SM|=)VYJ`(3&;ZZ|6Zr4GiLzx_6Kb^w2B=8bf#Ps$zdRKv@Ia1eGPns2G$ zaeXK&)ghbFb6J-j*eF+xCKUx<#7Jpus3!(TR@A0QCu|WmbKHYufv~7)qapgN!%73t z?zQ;`0TL)JC?j?D^Emj1kY{*1JXO#Pd~*B?`>-F*)*vYv_M5ROO=M#(S_`EYUBh+3 zzb=4Zp3iQi%+TnVaP!;?e9eXD(8S9Quk1R@H8?Q1E(SrGK7y|i#uSdqI75L==e18h zo1kZ!7(HIOn#qy;8ug=GqDc9|N;kC0?P0vXmL<+^FmMcrvhjmw zcZ%neX?{2_Xt@v;F`pA#T5RzGp=A}m?ls;TE$x*grFgc=r7H#vLCkFBTdzjLtJ0}U zQf+@FrfiYSR};Uc0o6oV;&bc91zEwXSUl0@dIM{K{0qIUFcM#L(fK5@JTWgbaE_;f zFY}Nw2kehnv4Pt3Kwq4Dd=Agfie0qy;yHreZ>u3`gdXetQ<@ildK{3WYmC`F2|4m_ ztnw53v96sTM5CH@4h|xx@`hEcCfifI9c0O6qMjg5u@B6Fj8&k{T{ZRnUCNaCQg(k3 zc%0$$Ca7Zd(PWcmvv>^P=!`@HiB2rLj&@%2Qao< zsNvM`Sg9Pzj=|w4a!TdzTxVNMcR=1eY+!aCteq7AreL|t(H3u-=x-~f>;o)I`6?4< zH|Q+GBFhjzPCw?y5wS9UqOht?#}x{Sb?vj`+(C6L4v>~hZud!t48AOAtIpm{P#Q84 zLD<{aVSUf=4K8PBY0+94wT8UvQ<_a&Zf29CO_geMksl|5C?hQ_9K-6OMJHTP16v2j zab~dWLK#?gN+KCK0y}MfBJFQdPw(A?d=TZGVY2XjZ!&EpANWHH6u0QqVs~!{KpBS! zXO%c_N+3Hdnv!$2kfj6f6qyo7c(g3TID-wcwaF;rR%NvCR3toXH_9+|nU#Z{^EOr* zD!g!`{Oh-=4ix$qHS!-x8DEs=S6uCaX~BGDdQiVt2W7`P<+v?}J;yfP0KvR4E`=ewQ!u8E5HV^^>0Z7< zsu1l=%;x4!lBnXv1(fG9yI_L6*Io(jqPdVLE)M;vX=;J2!k!APCDD((OERHHXjCQ` z$i3*9Q?B+rg@%3JysMG^>(O<_-QilTOGx^S!O@m|oPmeL?at5IWUpF@n0p7D`If^>ioh*b`ME zX=$Bq>L7(F;LXD1O=RU`y6uX6pgQIBhV^(Q%k#O$@{_cLtV_iy&xxz#8{F#)Hx_d= z*?pTF6Mw)+^1kx6JNKvnCy}F_V6v$gEv?SQKoklHoD2Wi0+To19>v4uGA_<$;*WUfhzF53rY7aOjxfI#Hjfsq+M=0)Z)3BrM3XjB z`8>vChfaNpu38vXH@n=H{?z#|<1L0<6=~k7S`RTI03#xr&Dj~q)e)04zhW$+>B9PR zS8=DgYT-ZL*+V%n;+=|YdMK?-O=@6oABAzG#qO|(kiMS(#z`7Z*&~NXD3-tJ8nbrX zYYE0CwJ6Fhsezg~A&4k%RS~1tt7LH&o!HV`Z#mmFl9n=b?ysKAqC{CAiP2VemFarB zV23*OTtxjJ-*Y@ciEWQF1{&&gv>UWKCASV#NLj?$cS6rxq`rtDKpDnq<}vq{a29D# z?#3N-+t5=eQyGadE>xR>I;s9)Uv z`PoNmlU<9AIXPbHv5nliqfWC*O(N6o z{q!g$KjJ){jXHZ_3NDRQ4c@3Oi;>iwV5(c%;WwAJVv7@yErI6i3yA4od%||#9C2@c z#fi6F1aPOLR^3}M&`2HfoQ!6ZVa)G;!x`RaD0`HijW(qG@$B^9otTp{zQhaMQ>*>g z5Adwtl>v8z8Ee1ThF6~Rl0MBI|1t#N6lH34iQ+c30dVEdg^Q=^-86y`A?TsQeNH4RQ1sH>AbXSPPWI-{kdC;>7ZUZ@ zP*7a;`9Yq^B|u|gF_FXN%$F)S6O7XGQ;C=rT{`}9ATWMLjWszmt! zaJ2Ig!6>(+)$^mHy^SdF41{?gBh3OSL5ovwKFI8rYp#;WyS+pw2H02X=<464$Kooq zTuRkcCvBASa&3nj54P2NFjy4=KmUy(*^{$9J8AZAhv@m){^?gHW z#pKn*gmvOxh_zC;`Y!TZ_ppvB>JTD>TF+A?XnDa0)Csmvneg>kdrmpTSdvL`D&5mg zp_r}Qi-tvlAu#JCS|?qL6%FJp>Xe=k-<~1}DweUqSag!yurRutM~_#~2qEYDm5_!1 zp|Bi(c4f_=;GJ1X!_M}$NHYtOa_!`R$Bu-8%G|J$(l2t1?!8qBR;4X&9YW*U78Hw( zoY3p-e@ipQ(d`DsTIWpZ3)}OOvn2ZNL?|I) zZvspPr4Ntr*u$bOcp9Xaa3vz)U~oC$#W?@;Lrj*U*NnxqD;dP&e*?V??SoMw`E(^) zd*67rnN#jGo&%RoRtLO5wmB+L;aH1<2kNeG0Znm`fB!PU_1^9aq1Y z5bU^(ZO9L|BDx{Oc|l*rW|g@m7M)d*2^Wb(Wt||Jv=%mTu|Gs0J&gbHUk+rL;R59u*g3+x zg>a`g*LkFnGvj_ut=nH@UNFTd!2{$bhCSY02M}Moh3x2ZT~R~3=j=W?-S;)o&#&wT z1E&}i7t_ttD@IWk8DNlR)FO}mN$su zN(zb3kIYZd8h?CQwp0>S`F+z__C$*pt{}B5DkKgZOo%wXi%3_s`pOIZ5>GL@t(`RW zC1*>AlZGc9=aYA2vQX1Nnk`V`&YG{q<{FHY#EEZ+X6+dv3MY$aDj{KFWjp*qxlyWNIxgw__5n$4zdT1 zmOJ;^UF(MXG$*Fe17m%Va~ZJtJvw!3vLn}=n}Wq7>nG&LEFtxtLr~YYS}k?vOq1#S z_!9(Y2sp{Si*69sz#_7->hffkmO!$;1xEAa#Jx@RKpOj;u{sF(G~eA@i;ji~zMSW2 z>W(@k+CS=Zt|_m@B0npwhGc7wuSmQz{nU-lf#%&*#FFis2jZk*?*Nd7etS_J*JY06 zE(z}y#o8RF!nhu&mXk2$V6l51dt?O=fm1r4B~NSSPF>X!-o~c&=;qIjgZbk9c3ela zTWbkys440$FU0dWxQasU^4GvN$L3*hkjs~5z@N>#uiw%Ul82=3B0UC=KBZmpm;Ays zwosZ=+wS@t%2pXaKb>@2-sQ%j))ql!3H}dY+b;@FWDnX2 z#!zr+X-||l^#r*Ej9|kOb1-O&7>Z=DyG{yPZt(xXrz!_=@?fk!u9nq;6@o`3OaQ3$ zD4i|BdH0*sxtUc_O}$wlOwjcLXz=Rh1^|j&!U6I8sMB|0<5l!IvB_2v^Q@8%Y@Jaa z!Gnw;0@Vot)I%6(N3zR244>lbDCwkA%p?3&Lc#xFZhVz(Y)B+GJK8+EgCC(5)rlL) zq@n(G6QRvZv>y7*t!b!c8jSk8Md9yWhKYCDVr^j`{iWJPv6qJ<1Cb1B>GOCqgZ96c zL+yC=iXBM-r180{3O8RfoX3ZTE9RMQzW0GND~4!o#tgokiXDa+!tlcB;D6Q`Q#c_C ze}5!L1qtGBV3F|;G~z1lEGJ!)tJ$*;a%w!FUXV_*cYmVP4Kz7F#8A&EO~w$VC#K0_ zZ@%rI0U|%%h^jS*I4TM3;OV?NQAiqkrJpl&v3R|ql3mzsUxqLMs5D8W z62qjKEDA2QmIZ;SBraV2vL_flYSBol8bzAb0ajZ8*S2ATUzg)<`W45c|0m*b=jKus zCV3>gTCIQjo76^G+Ul8nQ)fb|HG7Y$A5+jWiMcPb3GHF^N2h!Qz1EScv(qAV5R<19 zu$Vu%K*kGR!anUJa`qIO8oVlwMRk;E1sxCMWZdN5W8VjIX?_0ICNE~`;=|+kIpX-s zqi4B1-n6T++I^(A-A!f|{#`2;w6@GWP>GcVlmvti(>L^xQaVM$l$6ruCh7L zxZsxXziI{CYRtj68=dh}dLYd**wZ~1vxR^b`?Q%-Eo<*Ga$HSl?f&WVG0yRVb+3Ty zlO&gm^Es^V%24wyo|uK6Bk)b1z(KVu^apX~7#!M{WOG8wHR44R7PB9y=MSfF|GB!y zLjh4W-n=2Zq`!`-8nr%-#ALYU1023+XwkRaM>g~B6&#A|*r+*ExxRwsny(T(IY8i1 z93q>DdhG43b;@Uzbb2BQp1q-*tzRY_en{|~awMO6sECz_UOy5SK={)6_saJLUqUu$ z_|uR#M5J`82(YbF6#3g*NF^A!HC2PA-Urww9b+R7KK<+87$oWuj}&v4pDU%5m6JIgi5KtQ31z+H`>|4#qYd@EOt1=gQQr_-N$ z!jVm5ce*mn%gji|2*BcatZaU-vj{rWT+<|4lV-g#V#}A2;y-Um{%Brb??>P3keE2d zr;TT1mIcyRXQS%UpYGN)q$pSX7c7heKkp*tHkrLF2hTPCii37;VV0qotJSH_Jy2bZ zC8n-@wfz1*dKF{&c!ui?G+*rD&h_E~dX#twMx5@&&Ad}hRVXXIJo1mcn0m~m1e*m2 z%x1oOFOTt3b^~rx=>%RPZg1gXxpTOIP(uE=EUODMATHKZ#r$2UZ)X?Al^QNGY8W$^ zL2J}#(}KY`Eo{C6Zzw>+eF0vfLyZ=J*qw9PNuhOk(<3H4eF zNbyoGbE02re7tNh9G-L|^d|Zj(Mn+MWkR7sgt&^}UR5vk$k}*-4EI z!G4o7SAfzf0WnkShNrGD+=&XiqzAX)i7Nlvf(ZI56Ah}X?S?=S=(VP<9Ga)%&dV|8 zoBE7*ZBRTEw&4$ci%g4!F0P*A#;nH*ZEt{`BMBGIXwW9;2+m%H* zvGQ_d{4mc$SRZdRj8&f`TW=Mt?`&wM;^w3s--Z)TVHRRF3wm>D?o={GH{AT@ zpFD&gW$)2BJrCQ;I8MBT^7fjc1?If!`iDast!x0v_mgycvMbpt>4 z2HH!1IT1Z!;n%aereAB~_HgQ3SE%xFnE+RCgIb+K-6|E3qC6~vl4{O}JI>>P_psL#5gk$SE0-SsB?vXD*2ga$ zt#ibC1KiZe3(U2cKXNsBG<0$;bGK#7zxSgM9#!l)98$oYI#|e=j2&fw7&9Ut$ z!8j7(OEIx>A@PeCiF}Hf&yv#Iz4*8{?(=B8e|{X9P=`eJqXeZv1f4pC-Ril9x&Fki1dwwEtGHDf=Rwmp;X@S~ z3aI|&N^bVJq5TrH&%fJ^OwJ&d{2hwA1$#f|IS%UhAfN>)2gEF z)xH?Zv1h?`BGMG~s@Lx#@Pz@%(NoA$@MHVF zKwQn=!AXtLKD{VaCjtYwPsObn)PvY<$abFxiAN-ARoBPm%{uuvekF28dN=3>1hTQv zy&fS1WKZ?9enfV^p|-!4B^tn#oLz`nk$+zb*KAI+T~g_MD|SGlGu*yWttyqY%%9gX zhHxzOMn9npC)>*f4F&NqhYXa~>e=)PSeoDgeUiWjMK+jBS2@++Nb*MN219VdqdGO5f}Ft`(kih3`HjoXsg z5NEO%}6BNe`x=#yQ5)Il3B8cF(Eo4fT zXnQo0pn>V^m#Yv%rU2Z?Ah};IZed0CsDLEV%Z(lb@G`_8>$WA2j=_ucsE83;*HA+j zMr{=|NFuNoafqS7tkLf_^Ry%ZR~&(}I~PU>(t#yuq+_!XgiH)6K{sVEsbmBm71R;lvw0X90>CVN3)mhg(pvRfCrCRHAhGZ$X8gv(&{B$q+xaA& z@aSpg1}?8R=$SmDSXix$Id#0j%AJgE5GwbJ2w_f_|qO&D!Z7#pa8 zD^?|AJ*cLk?D3NgIXO4pB0+dkzb(y!R!^VEPGV(Gt=%jG6}~T9z={e7GB7{lCni(L zKkGrCfokWM>@35zY+2a}AGZk~MXyfh{}LHxPWQb8-67rg*GslP6`{XeOJ5YpF`{tt zWt`A=nOCk@k$pcZKZTSujBX;bwAK?5Z|B?|8bc!k-E?7>AD+<;JQh)s()00 zzaK0~34vFNxHgt$Ekfke1de>3?X0Ni8ZLM}1vK0mtStnMX_2&V*Eb%<}n(bhsuYGR+lMsW(7MQhWR zv_UaM*2q=g5T)QlF%P$Mn*H{^4EI3IP85kv@=ob?DQh*G2Dz6rlKzXQ&h>S;3olW_ z`LhA#&2W9gRA~BP6mHPi=|%n0%|jI*!?{#nGhmd?cJ8g>wOIwsM{7|W+Tr{8mVtFfzXg0Y}8WwoOkxtH8nk}dJ^qW_L_}I~=6R`ss&j9Eb$@+QgiNdO8L!&%5 z!tU%p#_bqX8t>YQiiEf!oHT^zeUm@H?Xs{o14guhaXW3E`mzXhDS^tXr39TUqjPWC zb26{?T=6MRz@S8kTFSnd!I(RQ9&q#%N7IZwHfxSkf+O9VnGG1T3!Ph#K?+of(n*y2H@_@W52(Qne`4%sz56*?iunmk0Lc z4HNuAO>lBIOh$kT{}Kl(IQr{c*IQ5uMC$h#W2H)-0QK(k&?#JrX@_uzmnIPDNGwD# z#H;AMRH`ZeqydOp;FL{<1#;gkpvn(BT^w%;APz5KG^(WIiiPucV zTIqiXX0qnviPeKh^s}`EuLbwjNAd2Wtbz+?v{PLxBwK`rZVc!$THccf!h|feHhuv* z*3wbK_1bXSKhg;0N%K1MsG()gt}C!fH{$B9MoX`_^eH%ByRPnk)2dW?>bni3F?pDH z{V_MJSx}sea%ho6e^3+5;X}R<-zG!~%(x?zbxTNo9oHZHqHP0u_>uibzM2e702mPW zsU6izGbO?++-k}Gb%fitLLw2N!EY+{u@V`T8s)#5@FHj%tzt7IB>(PMGO%g|-KeGO z_fSTT%yiVuu7n4~h8*d%+>LIN3cm=zkUt4B$;_<>#B27+G@E|4NX-N`WPUPtxesmo z{(eV7vIhVF{yUIy`JW6G-_L>)!nOnw63m35xMC71vzS4uu9tvIl07Cy3v-IVKnAujIu@1<>_DFd1~=cOm%Cjvb)PSL%;12G7<*ZFEF|{N?inlg z`6O-$5=&Au8vUi3GS@_iIvo2Z%ScOi$I?+4Uh z^1~W(gD6F^qPTC+IhURJ+D`7lP`-~N2V_8jt?dL9jmUo?>XPifkm1FRTF&q4f6Fyy zbXHs6Yrwq+9W+zge}7YII+9EjP~jSU*}3@2kAYO=6*yDB(`)mB8(|uA`D#>Yv zHnz`noySEc#~lS)zYGWdraBY8xniBvpUS8H*O^M#+}`f1M3G@sx|}Ciy1&_mB>W;b zZJT}kVa{7qjRf`@g6&U0wdU@<_)9+OYQokijKW#&6*(9-t?p1EbGAmfJ~;^QN3pkc z{sd;Uigty;?M2jT^swbJjHAXP&<)HvsbkEU!-I}eh7MBuAo!`i>Mc_`S>2qX{tAPsS_{$C;TuX^3H-F*0 zfU{8@O*1c1Li+PR6+Hpu-T$5j#E*CXp;61@F>Rvc89F?zB!yc2OiD!R4Fp`CTm)-C zH=wF~E%j+)!iU`Ak1+GO6Rc9bv&eiM1QHVhlK`ZbN9CER%|0yHGMOE5MZ%lv&jhuh zOJ=(m;YlWhVHv8-)}K$a#2Wjn$WNX^SZjg-HlQ(k_E0i8u=$QyXFW^TM&0!ULPpkB zY>7sc6VX6w)KJnE#i^f#jz_iV3;6{MsfJ(mVl>R;{NnF8kLPfJs_rIiBw#~8n}+*m z$U1(3u6Xk`L`iGSt_#3IQAPb#RROp|y8qjUNi&gaAc3hn*ZA9gagw7Jsu>?NgMK^} z@;Z<2J2k%^->XQ3#~x0TQslH%lTB^?YZ^D6Fu_ctb!2DL)n`n3UEzRD{kAiyod1?f zBzK*N5+S^GWv|i7b3)PQ|kb=(SCJET%L0TxxI zjM-5$ACmlB+8WIO@qjpY@tD@((N$Qb(A#{;l`BqTQIviCeb(9@W40{OQ_IC?;J+v) z6Y+a!wcLAtIwb~1&60+&vpu6k8819#FlQKeIXO^g;!cW}B&hiop+Fz!obS;~Ai?y% zhqIqY&zVHJhT(NVMg4XT#B&Go+I^a$tSX6%WPzt8QT4)cA!BEqIPa~X$P-#hi-r4J zNk<7gv+Tp4N;34&5~)A1Z5mC$Aw~cReT;gK9TjR6YBB*okxjeku&GPH8&I1Dk#L9| zK5rSlklAZ5wnun%(y`q;Ly{>tRd`m%t0?b$7>ZGG9^i|^<2cz!eRDVQ8>b^43UmEA z%UIjIYwflrm;$?Q@I}S>Y~I6tEpMA@8DTiC9-;-zfaAb(K4#M6w)QAuM*6HHf80JE z0=?yjRXEYb-wa>lRE@qVJ8%UIO{5aR?QSs8^x z6NEX}Yf@NJzVW82;ME3>-H6oQR0#X~QWis$@Tjw-@K6^1v(ez^&7h)soe2u}DAes? zAdMfohxR&5RT_8?T8BX6v`tE?c`3<`7b63nR@P11i2S36HSZ&m*(jCvtQEX^??Y-(3 z#rkd}L{p+wNWm~hjS*U;lPaQV^{wd&-<9$@#1x_VnJSR94!`w-mnGwO*pM(_Psy`c zgLho42w^jF3Y0?i(fv0D0>(K$do`gU_=x43{FNO#(YZLSy^<{V@?xsmAJEB!Ddeog z?6olz#F4S+h3F7HNI1c5m1+r_IE&~&cXtT+EhLT^4ozOHO;I7OOUsNH%cYgx;Jc?E z(r3!hzekRDQk8Er*o@^`&bd2_?WoLsJdF+Ou{Ic$+06OB?y{vAloS%?F0v()SFnV~nL z!s6>)jDKVkGN>tzR!;vGMaQv7+hG}JSw%O|EbqjWrONEjmyxY z^?L*bFA0JnOS8fcrm^0M$wz2GsMy|phI+^<-L%KiejBN=+mil(IV?&MNobY~uU$F@ zdC%5s1AprELnPDeKgHl52EEU3MF6&H`>JGhW%ZV=xZ(dWzOyb0UBA>2S4UatkCRx-UMHCM}aeV^O2ra$p!K) zpy{HGG+?xhwDBCSv@HwJ_#%((6!;DMq+B|}msqgO24@a{Zy0KpX`9F6)EmmacrrJ? z+M+oZTw@u>ktQ(Ugg3zO+-u8=bu<)?1fu3np^Fg)U$@&yDFU~>9}J}*g*&Jm$qRi( za@g%6JSp^wp&;t`vh6du?fJe7)#=0u7mh~~U#=MT%y_|k=r1|iG6;Hpl;2m_cK0~P zgV6$^pZbS->zATfn)J)&)XA7V70+~3vR7@P~tSMM}r_M5)_yoc*@5FO-bIODZwZr&Mo$;RZtHYr_cNJFAAGabJKTY19W&g9rfWxFszC1@a0UE7?kz*a25mx}@}N|zQAWf^TAYZ418?GuQl|B#WFYx0E(rpqXh z`B1}fX2e1eCGjE}(rM253c6_u>!uIQBo(fnairXvXl58V%C*ezgJv8j?1*XgnjXT# z^W4eEb7@~eA`9;_l6@NH16|PWKTtFx(d5_rqO_OO!@l+ojfJnnxlcv6&*ci+_I8UX z)6~h`IAfX`13XxJqB9OPT(y8k3?3SDR)7>wAeM1tmr4Jd0Hq)cNS-m-7qbuqk6v;R?` z-ZC>G9`F>`VBl>w{tX^PQm@x2sT_zdMIWIR??_&4kTR9%?@Z9pX9A+d#_1KksYoaE z^{ZY-s9}ew7RsY+@`BBoDTWBUd#ln|6PcrG9ZLZl2NH%c%5kqiZyh7K9jSf+Cf$S| zOAr3mbi>IlkF;Qt{IWm)^5M=T(!>F*^Zc2bbUZ!5)sH4@iqrWVp{%^Gvx5!%MbIhv z01NULi2+EI$`jQi%Xxj*v;-IK6tFD~48iJ^fQ2f;U_#}X06awL;w0N&w#}r1c}tRHo#b&t68L59 zA@ufK!k>eaA{v7kr#q8yjrUOEt|c0mj!aVu?j9)}{^uxy?MybbB2q3PJ8%4deO-3R z+RlJ@n?<*JFu=B;Uw@!@tq*WoCKw1hD7GcDk?9WnB-oXe;a=?~z8599NL?gqas5L; zE6&YyY3roE%x#d*rfIatk3Bi~r2q>@!I@kx(<15YG<1*Zf%M&g_#N~ZFH9r?29#Qi zah+BMiWk>O*xa(KS2|8pbxhnVNlF{z`nh3&l{^{WyMkWfA2(Qa?t1KmvIU z(WrlC6&x_gqtHJ#_@xG+L=v-KBU4^OyC-R~Fu8$huBbmarAO!`HY|7dN@h+cP@q!1 zdCykJwP&Kps)&ai#Zyqd2+#8=t2G*FIMq|mZWcUY$^S4+hLc>U9Y&~48Rha1Mq4&G z6?B;imXkH`3e?x7E}3Dx2K!+59X)BXt=$)JYN;QKe9|PW+$E-mWF>3p$fV0!};po3p%pfO0LyQ||z`>emPO2<9K#bdzlKJ7UOt zkIDduh!=Q7h{`C<$_GXLCRFI*ch`uRV6?EfOjbmn+dl9PWYDF&bXP7MY4eC#mqqhZ zYj!r?H$OT4y4GlE`U{r?jgmr8)?CL(G8o39Neu@ADYCs3%`)DE1h%M)OTYxfOxN*~ z_+Z^2#r-PthwI)=0XvB)uYa(J2xY+y^l+b^Rh`lD4-e~d4Fqn9uWEx%>!yb`(y>9{ zgE|25${-Wj$+1*z0i$ix7G>dNb182=w!`x2FTG;oh*poo$0uVIJK#7-kUj_UOk-N_ z(x@7l*H>s$=vn=d`~0W}<$%ZU-f~E!MBuMQ%F$mbLBj(m1!9}t`!qTpqGWB9teHbva+o>8_ zV0ab-`0VnhOkJcX4_7>?pw3Ztx#2uJ;@0--3bWZ! z`vhV0RzB6|7_?c%AxHfeDw)WHt{*%#$vYp-F{_7qTA;W+U|`(nMZH?5{I;|f><*f7 zL6^*>^mDM0snRqjNokgY0$g6})#A{Z%MnPF!nbxF#Z6|Rx6cM&`j!dOmT=E50xJ3v z$p1~JZ!o?hgUB3Quf=C+^_rpuJBV^`kOb!mU1NFQlY)g=^9%jk7vivJ z;A?Ix1A4C(RCvW&XH!K5GWqc?nih359Xh-MPhY^NXd)Dht%kqheJn!T!K&@Hi7LHd=@i@Zn{IiXu;Gs0lC z$2=GUobMg1oqL1SR-Ez}AZKF4OWAq_xv){hK|8u=+(!{*P;N>Qcs4DKRqFWcw!v)pQl+xw9E6 z5#A6&x-8wr<)2WpF$BE`(CDz;Sf;3lZkBh%Z;=)QR@?6zF>Sn~c)cNh|F+D74`S!h8 zAA>J*tu$BcT0xu=$_STlD;5&j6l~w=-ld)>3R>1us09V{-})`_s3(yp#&AQ&)3;~) zOMsJ@ex>Z6JM#&O1l!X|RV|X;a>BJvj&HRGc{$PrO8#~i7OaiuCbn+}l_e~~wPog+ ztAwlSWangCa_Oro2|l+@Jn$Ej+1!8ycc>v z;6SHRMlVR1LT$;$xNyVUS;#8n8thqu1R~${ceXjOSgwY@vW%l^zuUeImhABmfFE=$ zuL&LF^y{6gjl~923-xD7nuu7HuGn-y8kg>-OwU(R89Afd7KEaZjMx@&`P%bV_Wq3xEgZ~d-3`0f?q(axqWI=AIy7+y_i&^s zh9HY4ehMH&WIsYF69jk-;@EcPUf-Rf4;@i~k>e7-&2T;lOjr-S=C{n@YP9pAJMsaUJ>q|-du0a}y-dCzcfOmF~LKV`nI@>EY_b7jgH zjI8{Xf(vC6C&zewvbG-?JRr(@={DLDXs#gzC;8B774!9 zTkN?IFp+3v!sVhie*jKDY4a;{z(NdOAhN@{f|?%!laZz=Vq3JMt@VWI_xr|X3R?_r zv6yXp4i{aU-Y3OKcNrqRqBLJ{mv^eI+nA%%)LfBIVGmCIxjj^s|(gKd$DUd^Z5 zjl`ZN|7eMO**`CkVwkPTxPWf*ZaOO02LEG~-}S-I(!tX(c5Xn*F7&KrX57o~6ZiNYMi5Tr0V58A)q$EZc4VG_$QQDNvTf+4x^W~NenSxk6SI!IL%+P&jFG z{C!U2w9IqL^|1b4KPMUy9v}?-9{_7Wl)p{ehVG0I{X~A9R>4rQeyZokktw$+O5_(` zSd5rFQOgtg55c)B1NHxlzko>V=%vD#hFc5Hf^we6b0I+`m-2^UPoXsO!w!z`#B`ZzDyKOJ{ox(A>;;<(qaNc5fgCnDN>xJg4w z*OOXBC4~{}$D8Qz{zhSLT?KV-QR5Ao9B92e-04H=vW;}Qa7TIq*iu(U$m}lNt1B+a zFaTRAaBK_45k*dUZGxd+$D@R~W!B9eu4}OASix!@*QEX+8h*W`a7?-nWsC8y9ReYLCJX-<^EDthewdjOTOz6@V zGNGNLSf_&dAi_m}t9*ApaZo|Z1QhVZnIKpo|0KZkIFZ?cvD9e8C+Y@C=Nj&2Ju&HX zz_zGjoZ&}6w9l1sb&C5m-4rpaQT#!Tk4n}o<15`*bTP3!Yy3et^mTnE<$_>`7y(S? zlq}?m&E67SOERjco8unBQgiosVqZGCHqDJLc{Q6&BtOR3o2M2Q_3W!R9zfaAbgz39&q!rkvgr}z#ELvSFkB#+hbPI0$iJw z)dis({RCeO2rlg<_ong{33Dc$SLlR%Y5!X;WbS1#yc}=bFbXh~X{VDjE;5_yJcoO* z(4n<=%r(^2j^bRV;_kyKqNhEF`g}k}m-avVCm4>P-{N#kp#Aq~{X%Pi)0UnwwCb`` z&H>z{n4!XGALu7>Q4c1`wTkp~hRaMIjjC1P_Z6_fi5+GtZh-SS3`5{PZhKDh_yQGe zEmU$K3~{vSA|tIbZu$T1bdEC2^pI9WcVHmVk4(@4sV>3|wtoCr8md8g@DMSO9Y*_C zIDZm2BLio$+*H9QrY|>U1OiW&vW$m5Fu>;jL|SZXIsMrqxk(AJEGC0xzEEu2?d+kd zIuOQ}@^Qv5zqQev2L8IU2_>#uY5cWhwhRS450_j0N;3)b>oR)JR{5Wn*jFtei$?>1 zE>u&0Ndbi0P}HQ8(eqYp{|oknZ2V0z$qk)vL5N6i-7-=RmV}?=K}T%}21ToVuaa29 z8B}}iOF)(tvC(qUEtypkvo%AP5bpY5U2MNKB!rlyhxhVexMz96(b)U<&YP3m8ck$p zq|dkWpGwOx*v|}Dq!Ge=*>M<_GIFC?dUx((qT6ljH5;~T>o?1p6kQM6JM&NB|5a`a z5}#yY?-yH|@*lUT_g!!EhB+0ui&B$WA2ETU%u`>#cnZnIwI``tP_H3U#t{O>0003m z0iW4wS6u_ETSy9z{3->0`mQ6bpWw@wO1~|^C@r}YaaQ?`r!PMPyk~gg@LPw2=^$71b+pXo8-ZdPB z4wF{Z1Hxz~aq8U`keJFu-HmY60sHl#A}-dzK2M2#@N1@E9GWQVP$ZS)LK%)ePd+=b z78F+6-25Pa@+f>*SE3`JK0iqum+6LdV$vUk!7+fhRCgZMFo{3mNV1&(00{X(o7_v` z2t&!!Q9x7w+k+_q7^yC&z{>(ppIkNJQ}ccbfFGTqB6LgXmfeXyUuWTAii1)3_F{7j z@k?wkL{xa)WEZveRD22T1xg$v!cF95y0*I)`T_^>oME5G)6dnu281?+($U~iC!9d& z4z^TLX22|87G5chkOvWyEljLK_~^NqkKfT3a6B8ZK66|!uPV!484nc%^>N!r`YHVL zARxbYS_|_|Yy{Y~b}SoWDZ8!u(kDo+;d-{|#tkqLl_B%56J(RaV3{mWr7JO4Cm8NW zE9lViabuYKdc}q?@&J~|H)IoY{-?UL)%vohQ~F*(Pedy0u0jMR48+K_iBhw9IBCYU z<>k~W29XSrd)}nn9q0yyE&=_tq*ND}0SMJ|SA1Hy|Ec5eRsS}x%(y3L2u4z6T0miwh z?X%x~cNY$OQsDkE36@aMicUAo_u}kcysZEuj1bQ#JmtdG9_b~pJ@wV#j`hVIx7+!e zrX+t>GiY{kGeI|&9*253N?txL=VIV7Ny_3@G?}Tl=vIO5aVHFFH`u&AXqCWdvW4We z%Gi>=z*!Cb5eW%-vs}|1fssZKPOM`YtJh@>J%;AUKTeG5M_RdC^X zJxBEu4X|6?nsVCz$IXDpIY^(Jn+jz6-GWkYz>V|G!$)^)^JY z-AJW{gfoSONy{Ch%DTJS@SZJ`{xVPgL3)pheII@o2&i9BNx2FDuz|6;pu`1f6l`wa zlCa1@@~OwhR}jBET?ILveDd{@>#IK`V-ZWf(PTrWzi&*kz+6c5z5QBcd)CnByJHyX z%}Dbc0Q*=L0<&ymcNUow>*f7}Qw}hiZ{EOuJy)PF?G{cq#fU^fl`>wUIJeQ7l#$T- zf_$gr1K}X_GmGK=&Nvftr9a&KIYQh!ZzDfs{nD(P+>Y-vISN$xu(7`C5bf5ahOub8 zJ1i0u{R-k~bs>+$19Q#mR2Wpb-_(*isZ>2(3cT!{YPEZ?%3XSW7W{ae`ARxpq2ovn z$h?45nRmoRptD-Xx4TmUt>v%W<9{E`yo*?f`KCZr^^ju#{X@20U9A#iE(^%x&+ca& z;#D%1-ugsgv{w!Rw?wxffm~nuch~cP7EBM9RyZB3ngo=M7n7Cey70VIY2vW<_FdoC z+drLKa*D7l%Kg;41n%YBn}2Y<9q%tT_wAqXq~CuQ)Ho;XUA-XV%#AV8f<=n%XFDq? z&!tExG$QABp9i;w3_Dir`gRSG zG*I|MKD<+63*dp0g+ADhK$5eMjz)>$IhYVePj!5J76*$v2Ku|8hrr)Bw!`wQer#=~ z>z|V_C^;?tKo2!KpC-KI`72glLT>*p?yh)j(br^F-28J9m|~gUYXM53=`)}8Ob6jMewrAJgZzAw{|UHzDwei#W^a5gD45`?Tzw`gDYePT3NY7m|d7n>fQEvXjN% z@$JEH5*!MMF{#~L*OatQVSx}3nlf($lFpo9^k*hZe)6Tv3N*-DwOpNbpzqg|eFbIg z6Z5URn3h}4odhUib@hUQ5T2d}f`xPt2pcoOY!cl&`hC11Lku0A?&M^NE#R4Wad^N> z;ExXG_DBiF#2QMd^1ErVVVj@?)P2~W>DTpaT<2jUE8S8lk#Eck-p$1fzf%{?)LBo+Sm`)Wu{ums~-|GS;Yq<-@-B?jmXj~#1+hB8prLEDCrgn5v| zgp_2MWu#IYCtCDhsjR?$Euyblvq)WLF~`w}uMg$)eO@uevq!Y=+K=Iq1KvU$Jx8s{ zhKl$u+c;xNLc_dRSJawKt=~m__-r+6c_0;c^2>_>)S!)b9QoFwpnH=6s3FxUmf}*i z16~qiow3o=&({f9jIR2^X505TE8(5R{OV77%<*T=#dkQ$rH0Uo#Qr#ig>e-k)c=X} zWAo7^Q0^oDL|@t<4&A8D^sCamnW17*8YA+a9P)_bL}Zt$>hK1b%q~GS!c!sUfJWBhj6$i%W(v@(WD;AN;;ti zGnZzM{-1Rs8?I|tSIAIwdQzc$DNp`XDOeaacNEhI68d}Sf3C=IX>_wWJ0G0(U>J)T5m|84rk(l zj-jSa0004W0iXM7e^IVE(s^k7qj(90i{;fSb>c4La1v9SzoAnAJ(9$fgFi0Eb8H=5 z==o3>3)K503v0X)@C|VYF!t^we2t&4iBRD5$z_mCNZ!pvwcKMwy&uOfQhT}S9rB$Z zpD$%tjF$8lwPj79YT!9U;O38vY{SCh@w5zcHBu3hG`szu_(1`gBlVV&RApv}bNLA( zJ|8F2D~oPYGzL;EpDts}nX9hPaZ7A$RBF65_umP5S{j|5?gcf@l${2Ys>8_)B7U6Q zCCtRq6^z4zETTS&XKPb82;!q76k-TtC1pPuoJ^%1kpfc8VHSiH7vdTVyX`IE-9oCX z{TRmOVZ$%i#~eTJNjTbpnig?L%3*hxFzn->zBMTs#qEN0Uw#t}%1)j%A6{Xdy2x4G z2z=wB#?eQ)UQ56ib9Rvg4Nx;<-qXJ~8hTM4YPz)Ql+I!f1)^#+e~vH|SHshK8h0X* z%@@Qrzi5{~)#=cG&t{9!000XVL7VF)g`yfrhE zrLSe=+o3M?!<%M1l%Q@q$Y!j8Lw6}~K?$Jq5Swj)VW~@uJ2q&jLR5}zYkH`{x+saC z5&H^w0T|mOfOfCbxGYv$rx50D1SCyh0;I!&u{jeNLTSPi^;(^rGS2w?vW|XM#n?3K?ty3~Aa*FXtR8>cZx_O*5I_3+>eC zE^OE640M>se15!h0mfEw>`pNl2<|p&jON&eo?TBdi)m0(cta&t-itLqF zm^|z~+ZIrh@r>`Pj8@??R&TmcS;+I250yl$9Lvt@;m?9J$xn_R7Z`2(2<>9Kc>gK- zg&)nL_>1$(Pof=(cPc>`K()&sYTCsiB;K;PrLjXsM3lWt||Roi~@iOpgj26{|su zH96qpE29o&i%l*?gJtD(D!@_il&$YXbBX`Bbj}-g{kYbq{w8a@WMq|f;mnlcL_ovb zLHTcJ8K~ca4f1|iSzA@V%vcQeS9+L+r%1SdB5d8A#LT9D&Va{rY z-ek!RS*g37la5{}`wxQ^l)VrSs}6qf>j6z9SwlI`qKbNNP-dj?MyK4&5reKG(Rabq zA|4u_cDWD5NlM`z%Es&Q%Y7>oykNh5L9(p#DS%YGDUoE4-r*T13^kT%5tWaGkF`kQ zKf)GlaEUF8UDDK*Zj_cf7mlKtWhQlI5!cfbS=zsFXR5~{ zK`j5SevF5L0!u*7-4BTKhG2{dl9fSBzARz;!afK~6*zgj)yw1JjX=8+cl>?&8}<|9>QuDtzLvStnuO><&N)O2`k{%|#`*qjF*6Xf4pp76kRpeHTo8@E^xZ1a7>mIsu zVf$>CM2@z6CfV4+&{L`_qNUtQh(_TWrquqgun-}MWnfy0%t2W~%>CT0@m9pqLDDY% zYNNZIN;F0jb_p|F(0`cfWoEVB+oru+m~AC95?jRwW^)vm{CpTQwd5;lF-&_qzAv=5 zZF{X;r_^7nN;*=Ovv%rF&?G0O-}4qE?6l&&KdvfqNGe0j0OGT1im=sSF6YlS?r zo?Z_g$?WvX4iLqWur>;V5W4>MS1eGE04HAT2H#r)xMLtUiOsc_!GJbjU(2><(K>t5*$ zDZK#-l^G!T>i(P;j5p_mPiS7zkndC8%^gBSyy0aSb_6<0|2&uH<>&eXU|mj!9kTs@ z$3eKYnLh0v@!0M?62iax(*!?U(*!iUYWQF(t-ejsLLvvdS3--yH5>XAM(f?`=KjtV_{bPKTLxw!Fnfjnts}j{N73KgEatfC9mu{x$=X0><`Tw3rG_*d$bgqD@J_83NO+Qr@aPpVFehLAeR{ z4GGC)s9b$TSzak37fJXsKmf4n8l{tLm8P4GsAwCRXYd&uQ>o8R-kx-i zT`{|Wu0GWo+@4Jm)0nimFn~9toN1+Nm?;%W(o33g& zw)F#2E3sEPT)iQGR+4R;b7j8)zD-L5)7T;whxm=^v2Y$*)#&4Na`1+FTOn9B2SYID zG1N02)&;r(J*W4u$|PIM!6-tSeR&z0E~?(}3Hz=YgUn4e+dRx)=ss@zq655M(8{!?a(>b_F{fH589=_ z6hr?R5$J`;tR??^MA{ecQR#$MWL@bU#JIB*Sv(RemeB}4;V5SI)6de0-p$^GzxH5< za2j0Y;H+iubww~>FTk%M#hivQfp~KnT48&o7ujAHhWPaVk8Z3TnZP6(DOZW&udGVT zL4Q`_@G2bPHQ?^=P&TwTbyUcn*Qm1YxBd$Xt0Uz$P!vTIv~OIxu81>SO&5w{q0uZ> z^#he1jtNx#BT7{7GZ6urrUNpjEmw`-CqoiQJxsSukZ{Tme8Kv*GxnSg-fY6sB z)N$T#i7WcWi5OL$_VC)~AbqlY2PENpm?5I5ESzzVC?`~7@+{N~J`4Ywm@Ffn-$eTP zb9uKWn@XFraFI3}%gDIVSq3VRS4g5`Ku;xaL%-V-Yo(b(tQ=M-6%_p-Ef;idX2Nlf zKF+e`B1qsniM$Yf)mKU0QqmX;%A7^66xT(3>-{1h2(kq@Vc~ZfLF@}!<7+dLKrARw zKoX?V{LiZTn5Grj%C)1gmh*V%79La@dHZ>7o#k8=wakW5d0;{kM3FZ6PkI|(K`wSr z$GFLXshUTv#I7nvj3n+^5eDTxuJhrcG1-oP&W{C3(5rgM_S3$ZOVF!wz0zAkf1vh01{`(gTfl1s_U`fdz@4T!!e z(wodL-0B43C5PI=w8cZuyh`@wPDDhbcU=V3jvA9Bhd}w_;JzcwN z16|vxH+A_=6N1GMriRKlf*H=SWKp=2>(VQCsfzZm$xG4l5+#xiL8!ZS!#_dwvRkGnn;X{{c$Nxq0pAr z^v-Dvvdy`~3d`&6UCj>vu;Y962KOo$2lP08eKcWS8}C1TZ8LRDin*A8Y;uYFDu2tMfWbKrS! zD2G~ku9e)xR#}3mf9RJQf>$1%)k0fgxSQvl{w^ug0L;tjn`$|+CB2Y+9y7@(Ltzu2 zn7aBEicP#QA1kzGbcJ%>;e6UZiZ40{OV_lsHyFhY0oh>6$hH!%P%GRM)}Y3Oyq9N= zccKS=EkIs#2d}1Cp$=s01zGEvK1t;#x59g?k1T7#cQ!oBR+wDQPxv_ zf!?*@n{(EeEh{300i2s%k-gFOG5a@q@8b-z7Ls)j78)SUD{XiZ3*MPPrc*U(UjQgS zUrRDBU}E>x-YrzTIl2s2^ff&}k!Sse#F_6yJ48JKQ5EyreWhyG-KDxRNs_wcwPQ<( z_M=({U9v5Ske?5skj6RNJ&cBl6n*E@i&qoo!Ey;fK6ci!694zpuBpGAgg5|Hq5GPc zOm7C_N}S*Mgt#QI?PYt`d?GDj{o0R7(vgdRMb}QkZI2|yIOM-y?=bB69yg)h#TzXI zYhd-O(*zZsEF9VE-@WANC(1>xZVQ)qlt3 z-V}xkBRd(P|5rsU04C1A3$sBkSNHwUN=Li3RqNs%Mo_`uJeE_}h)fMtyu{tUh=p4x zosgN+QhFZ(;OD+IEtwWkvMb->6y<7WW9rswfJNrG4R0t&z8?>S08vRR;7Bb{=XO$7 zr~-^UZ`p>@&WL-*%$%|OWv9Lnsky#@btNy6Z1*>idvM?=07Xj!NAY1TYO{TFqS1k% zBM~HCtu zk2k}t=c@v4MNMQ+`LqwE&9<7ad5-HH3*ys|o;jFk4g`pUNDhg4O)S#42j;$ zkiPxGgy`7o>Exk?EiMb+?g`)K&aiuhOm&tXfk3Vd=FpqBRzM0`yj!((CaPmh$-W+{ zx38^^#7Fa%ThTMBtt&dg)Ik2iD0>TViY(%^Mo=|VsPuCos8f^!l^7rd1t|3e?}N5Z z2!r_UOj{lUyxyU^#<)F9#buyo0w$OT5swzt$Uoh|ynWL}kZX&+Q3zsc!^B(f-HWoLwn_Pq9*$QuXDAqZ70-m5 zk*)+r&q?taXzIebEcQ3Wm(JMZ zGl_u4#ko?|L^b5w>&!JMn&N(^b`vF)l89D$HBh3K-*)mav+xHP^jI=qGtWNwy=mLdkqe@?4PlrU6MrBET85KwbjqPJ@ zLB(H(2J|%hR7clXlg(vPSYO1lV%>+0jlSvOFsM16<0*RPPNeSc4x%gE^F7XV8DH^{ zO)pM@9nkW=v4G7hr6y6ks)9&7RHnToWz4&l*}+Y(W@hs=jt(QkY` zm2A=0S4jEo?{Xm;HE-PIrMAPa&6Cxf?xGC{DmB{kaoxi{t1*FJ6{cJ6LDCRd2&Ci6 zU{eE(`mhp_O$^tEI~2sTb;1b1y|d-xMAP?^oAhY|v5nx7ThmJl4O)@Bi!(7dS__+F zHQOS&3JR-Cz$JWwg^(kRYU#OV=USY`Jo0(%bf;oHv4NR(X2vfZH-lBved(M=-$=sT z>@TrLQ{UR%J|cK8P$7Wx8cgy0_*QuN+dWeKO0zeB^h>=bVb*}9pQg~ShstxxG~4YS zLJw4K7_eui2z)2&qcp<=v5W_ok>|bvp7ptYBL?;|IQT^+jU3(|d4~;iDDjhVNXDQ1 z!)RcSJ96f+1bx49q?CeGc_vleCDZ8rN=;BOKB&Ym8Pz9R)GngDt0Y+K<$}7v@}&5 zduw;nOJqo=?Gf+2W&xNj&5`*G%^$C}85{PtV`p4;8@gwl8hS~lIqD21h^NqWGZlzN zzlOzlbqqgKdi-@()Vr2J@LYBrX)w{)^p^kz245`aeyd~<1hjt@14Yr4)|oFpRNZvu z!=WC2;Zp3t7Z3kJfI0fJV~5{ryt58QSi$Y}M=#Em5#{*9)iU{PMr#u-7DI?*rx&D& zq)wLFA5*Ho`1wvm58`|OYt#}T;>D&r@hu_!A7Nd{y_f(8jI5tC3b+1b4ggdnx!-TEs;JGMf2oz zMOBz$NCQ@34*hIR^8`;FfdR^(j%2EL8;p}J@}JEY?0z}G_G!-mvv;Xi-I6h&4>yZi zp0FW@okYS5khwJZk!jb@oO)Hx+v1nZQs+xLO*!qd52I9}sJo4EpQXc*dD zq|4rlZD;K2x>A9q#sBdpZF*hQZCJr`c;bD$`g`?a31(hXw)wBGbzNj7$0FkgJf!9# z(O5(TfaVkIe6H6YU{jAt;bSaEriMa;s3k)cbaX8;ZVrgCmD&VtT!<5x zva>ca83gXS0AY5&fpedAyn&%A$d=^e0Ca~-jGY=QH3W*dG=pf6t7fP(jL(Q~_n)?n z3l@~!G}|D1jAxo{1Jsjc8PBEuu~!i@U*bS+pXpKS-JuBmeHlhfWUThB10<0bs)r_l zV=Vo%MP{GT&TN5U+ePJXto^OEnlO!4+h$=8V=V5V3>xpU@Ff9`na!?-!i;3o`in5DN>OwL=b@Gn|(98VS(?Y!~R1xAUY7`chp4$P( z%H+=PoBvsi6;_{Y6zm^~)dL;^TfE7eEgtz=VKEeQgH0nO@q^;lYV3UBTOoO+(Lb6~r(WeIN%;Ig ztD>0EZ3(g@hT8fgf}ID)moir%S$3sz{V{sfO~e*(E;)(^gN)+b6@i&em;JXNGdvnM z7aSzA)V^aI)tqd;Ha2|wKAeR zLxu6oGl`Y*uw8rKjp9xG6?KiY%l>`;!QaUqNY;e1bBLQ0Nj7+U9Gu* z4fz@4w%oOMGPE<8 z;dgQF+Tis?VVi;t{{&yYyw{#K_yacLR&|Y4*P%ujkP2DpQs^EePbE9!dsD?op9=7B z5#=eCOfMuhA<_Id%e;2RP#iJN`+VKLm&mB4m#zn5H%fBzQzJH)5=73NjN``>pMEN- z000U?L7GNMs6l9%Ob`Fv{UQmFr{hBfC(cF^1h#ocCnG#DYHi;mNtx^OFGI!rKs?h2=IS%D}Etr&OEmFP3Phe5$ZO>p~V zkb>ErZxfh}yWu$Ao~JE?HE+*~BvFx&wD~XVm`^oFE;7hwVE=+-joNYMYEBfnw~0gV zE$;6aRk8j1H{OZwH!jwZ7vpYKg=ZGKekp%Lzgfgd8JQYux7y3PeM>}k>!6~}`^cRe z4AE4<w5$}rwtYp_`4`cObtjl&`2_&RlTYfoASPt5(>gUDF%5>UB^i=OktiXJoUCAGFQ<~vj z=}b@gfnE@mUaSWIWnWoJy%aQLQSvl;IAgR;VihBXuLX`+sda+b;Cz;tMZNz0y^86` z)|IKYYGu%<#0eT>)Pxw6qy=!xB1&Xu&d|mh0LwecB_AbcQTM6vCw0Fk>zynE3&~vP z9y()nzZJ#U5;AHoZ&@U!nQnH#Ftc15gsTWU5-EN7MP$Fv@wI6R8`K?MW@8fV`k*g- ztC(4mdqAV#AQi!;hkEmOPCirGTpS$(2@qoP2f@0PL#6%20d*uY9rj#zhfJ2?6ET*1 zb&AfHO_E#0ukd7jpW3(%6gyjlNLV&zH4^%aERs6zvyRrr<-2m;ckawKOls1Wo(IKh z-eBB=BO0ctwEmc-6t2&OG>~kwOL8D8QuFa^C`*iThtnNWBtjV0XB2Yz+s~8S*2I~U z{}$K`oPjIrgc@rBT+DNyatNn}m%{0)MlTRio7(RRf1(iR)O0{?hK}Ut&MFn&geGUl ztStjXEJ+#5f6ilEFBdZiPhn!P140cnH3UZ!sU7q#LtO)-!IfWIbOzMWe^d=$&BZ|; z3z46<#z;X;ugf@3Uo=i{Lr}jed{tC}o}v6amSwzj^Nb8K$^@x_U>@7%d2U5=&d%o} zC^$+3R0&9vr$F4IhV_9U_x>1JODowG)1^tbnu6d&lPA!>8MUM<4~z-av9u!_$DT)4 z4plUlBes;wutZ(H(6#|hR;w)(w-8DK8CuxMrc}74Q$&m)mH-I3gQH#d9Rv|tVO{ed zqCUBg1siKzViJQFMlRB#%$)5g2$Zs}+3lkrb**gq5#krjEXznt=i>XCvB7X~=>KLn zQ7;H*fha+L?`knWD-YVm(qV`AO5ILrYZeV?2P%@bxROGXTixdv2f3|7+^r%n#grw1 zozFQd12Fo34rh&@?Xu|`pi0tjs=AYVa1)TLf(QVP$&|N!>X#iT2Gufaw7^lf?aes9 zIIOU8T)_~I1bvUfwUf9%*?9sZBSZa{@SDVLyqHUCIvWwv8(7(TAI%fgezu9<{I92H z5JDk=ZPk=001OCQ(Y&l*f&{%Pcm?CW{ih0zaGWRg0CCtu#)z}a#pdy-d)grpDE?}_ z1SVg`+GZr4=K%fwD!j8FdjKY3%Y%N`;cGNSaLo~WOyOcw0J#sE(;+%4SR73@A1!+d zv~F~r`!T+i8Bg7^!tf@>+*C6V^~85ea?=wM^PgR&j_^h5V=UvQwMn{IC1c-@$lQ-9 zBH}xa0DrSlwgjecteMQl9VdQJgOU!tEJ-iv=IGcD;k=F^9UXeHB}MVc_7TKk#gmZC3`E^@u)FrcyAKn*653V*y4BhbB!j)Sb|iMdkVGl#h#@M zdPYM%+Uws0nddP%K_V!IFU!Xea(ent+X{^(MgoHEI!AD`NMF8yIr&ma<3Pp`Ff-Ca z4QW`B+T@)&L_Q;{)u^^CE5qr8gCQJ{ynk4~6hcIKlp1!PXSnL0xAgi?(5y&Eg1L(? z{1OfE6LIITJBr>H34QB$?E=?p8doi?`zCGf7+7Z7h3{)VY4!3o-<%z*4Xr?_SAfjf z0h}EP4JN;P&G=hkS5g!NFiqx_i3>R2!{Z!ileeFaUbi`Wa-bJMINVtvPx~JT7DrC< z8l5>>g(4ty1VQ4l-bbRBnks)Ku6SrqDJXXwPNcK_V>yr_DTGBzwpY6v1}% ztT(o^!OcOfKL0PrqB*>}!9EJJQib*P;=W8zpAGL*iQBz(19OUPp_9lR>xN41Nf34| zGhC1!(b2)Q(=Ly{Q?wTDICcl&Z_=i_7N3s2pM_mO1P6qg9u2caU*f4eR+EcuU)YP3 zk(<9TrV=~liP(ryNFM7UE9g`8QhH(0!vOI2UobzDdQ=PQm^7=n_QFpa{I7Y)dI^;+ z21J8Wl6y*HW!!T}@KBRbeA>X{K zcs;PE^x(d>EW3v^_-i0pfqBR4rACo9AAgju4_6t55+VEqCbtZq zX;p&^+{%0O3X}d2V(Xa}t(Q8#gx@#bP{0te7Fo@5@Qef`s`>)pe^UR)+_-e!VSq6a zlA!-Z3n{aD8+FZa-*Di=>Ahh*j34{7@P%`%QM0uP?0u%QHk(-l`vf8O91TP}A|{!*mu_;{m&(4W zaj##xmV&XiZM2M=#wLScDQVV*b+%XXQv=oWy>9}!j0$97Zn~jCoMud%jL5rrVzy#T zL!5N(F1Lq{P$!ldFYWd@NZM=kLnAy#cufG(>B51E%8q+y5mOxs-Od{@xH^6he+yA7 z-?c2!zH4m+ndXVNcfvCXM-Lb~|1Fhi-C zxeStZKdDWdgA1YGj37P<;h!4$a{;O>zQ>Jp!v z^KX%cKR<5CV1gxC)zb7kR!3Wft&#R-IW0JnUfVgi2^X-{d`3&xWmbQ_v}eY|&?u-_ zYo0{`%Xj4M8WpbV1FBD0EI?#nYM!otxvv~F%5$?10^zT5)#H@Od*C-ng`FQcjG&cw zmEGx8VKa9&eMSta<)lrvv1iGt-DdX$5A34IT!%&tW1K(Oc?e-;O3vTq%3!Z|O@}#R zCkPt6l<{Cxg!^)1jC_fv-0Q6#dh<{-ZE-AbB%+Q6JT(wx2Tgb!0N!0-*Z&6wG|Ov9 zHHpAYH%?Y5hzo4Q8hMM+sFF=WC_j*Qq1&c|6wl_f+#vt}0XzYogLHpe+F?ZZ$@5_- zycr}k*lKXg9kP7H0)Mtn`#*c<=H8d)Ky6B~f~6ll*67opIO1+)tY|m}9iXEjN2v2# zj6th%gggZnbnNN~+!f8FMg8Z6-j#r&N<4E_nLJ^WA(fH(mA7xi|4pQ#U&9AN5~@il zf!YGF38Qgi99JasyX|p|emD;CWQjk5C3+(MTnOpO0dW=chC!uAG?};3br4_#w2Fws zp9=ot8z3{!@X22siU178%I{Sex_xLmb2 zFyl)|#fZW+JpcR=*tW6D`TUJZ2~7yWm$g4q_9{i{HkZ65tv5>25eWbQ0Q3Q#g=&B7 zWkXx$-~cofwvv;}h@OjIg=fDt2*lJv(vGleOcIwMgzu!7N9n~19ul@|1A%ju4kf0NCwSWq!l!$UH}8zn(hJQ-@V-HE<0~p#x;o6SgDBFD4lIIEeqHcwIzt z*4@+=iC`RcNv_1NAucS|P5R2bjw{H8N46GkD3Ri|al-}cIwl%TaFKX)iIRYQ)A|)C z>(CSsf(1y69Ku9Zcwn3jZqed!{+Ncr^-BTre8G*KQoT$591K>u$r>$Ysgaf1Fe_ z64~~Uzzq_;Uq3B49-Lx$53qt_AFnTN4RNIje8d(`{wrQAWpgk&9W{l|kP=JBP{RY@ zV^u7n!0$h2*rh!R8l#@ecP3~is`rp2M~5Ch_Q^lnrbje)lr`@W`jC+*{&JLef!&TN zJijtCBaI|+?}E$r@#9f1ELm4V4tCKQI65s&z-8eQccQ1wJjg3D>Hk;k*lTCu34`za zHqyQ3T~?&GDsi!SH9GHv=-R-{g>9&ZbfU%~aPZk}7gixA0lP=Jw&9NFtzSI&`w11t zlKT08uYDs9zsFCOJ2-n_!Ergu?Qm?AJrdbMu`${Xr06?Gm1z7snAGc?*jx@1*Z^_A zFpVTBRZupK-ZBTdj*c`2srO>*#0j6Mk_PAOrZuCo7DBp~-ftWF07a&Jgi&D`g`8vb zE1kw(r}<|KW)6~P*fDtCBM0#c2mA@d=8U0qG`mzu0c)DCQz*WZk&?%Xmf& zVA>VDx?K#-TCOcZ4G~Pn$Y1=143KBwG|Qg&QiTSBhO2^;pjhyvDgh`iIXouir}v6% zA~Yfl))$d3u0NC7+6K&wcK%#-d;uoKSYA_^2df8`?T!!lh);Egs?%d%*^L# z){=mG#u4i3OrD-nc$RJTNB1D79#SA{ZG_UL)wYtWu;0oJTwoOQp96_1$ImD;5D@Pp zDOf)vHAdOZRbn;uoBdEE?HaD@EDjZ6soDOTv)?g^bs1Ev%vI_u`)1=)tC&1UuAjV6 z*kGMZhfnzR!+_=nuUK@Qaga}ScLre}vo|@~Soum{TbC6J$<^$R@@e;lWum@6GP{w5rg95rDRchtb$s=kIXnUOK_V`KL|Uo!S$ms{rFfD-j;orRD%$`Nub-2oH|zlc2P>{66hEh=iN&n!)M<-w(6X(l=?lhWN4mLmuW}o zv^lg&6ahZ{B$Ig$9z$f*Fe5f=0J9TzsO}gmD@F?vi0G8VX01j7VaC#R(QZjLiK70w z3LEFjfFKTktnx|D?8;xPkPz3dKZF;C^2MYd+I%olspa-R@skQ5qx`&w=)g4YzQja> zVJhrtzOe5xvup`?Up__vPG&mkcNA!w>J^4wPN?9Forvc#HNT>2w%E+yN@8ZvM#9*r zb1K~jiA-1rh5pdpC{(zDf2%U*lEvIaO7FS=P#1tOEs!z-y^D%@vVO`Zn7Uf9yzNbJ z6^gPnp+Bo@0`vwUi-a-@C^9sC?wX_NnmrP_YW;`NfQ~$^l*G?+G^v$S-*Zs@`OuF! zz1f@*)R{HBKXQ`Yi;=I1?^RyOVh>DH%zXb7-8ncmWbevh#!Dd5m)7e(;ebCkD&SD>VjFa#sPuF zp~%XC1={NNGA4D0*Ri@q)w?;k5-0OXLQG!GP~3AHT!Wg5P(f;sWoXaNY|~7KZzx`V zJ&?$V3Huv*hi-Qi%v&X#xSD8KHz_@ySO{gnDWOyEA6d)_U7P%mKv}Yd$hI=AM!a1> z*rYIcGqtnL14?oPL`~ukeACXAzQqbEnZ)_+AScGwTtEu0ms`vdny*OC(w>Zab;fB( ze382s9Rb)&>|Itj7uc#PxfcTeeVUf#*3}!dju5^-GW6^}RZ;UhSr_KjY6f9X@Q-o! zz)WYKGHM{+DMCs1S_U{$1iYSf^^oRv3#+V<9?XT3QP|6MRv@h~mXABe_8KDiEDjAk zSL{XX4zV!BRi?`dHu_fmg;+&WuuOSItRC*E&133d+^V}1w&wqXBTArX%omm)l3eg* z40FOmnAtm%WZw36liiN%w<+Lj9L#Q$PmQ~3wqk6UX9Hh!t(OCX)E^??$n}tiIo=BU zaCzn={QX9Gc(uxEom5TY*_r1ujjj% zY;dje`5?5*BZRzeTh;t;C$&tNV8f^`5osyVZ#aeB{Xm1!ejmx4&(%2B-j{w<yDgu454U1?QEMZ{~J)|ZLXIc1Zhq*2b{kYmC zMD0+0`G6yeswDZkHH}Yg`4DWX=_=JpA2W>;l$N&2S?pbXiXtcAU0r(LDdoR$xSX~$ zED}W_ZHvvAVn?-0a&3Y&m~&eJ7UZm1+$vwDv{SR$7PnY8ft0@%G%_*3?1|11uL3SklZpMO|q;KhL- z8JY1~f#_q@9bl)1XrLoo@rNC<_cF}P+oPu3DH~25_ZSV6!48LEoXbCB*E9eYyrf@p zCcE`>d(KHmBEK(&(SE6H`*m;gEthS?xu~>M2^3$=UvBU+&f~HVcRejKWaoC4DaGr) zp@jLNQ@&Z89h4j?k)<0AOSJ5fZEsKcg)Mcb@~p`C68Z(^*r5JNGXBSKzsT{xGD%h| z;5E^Aq1W;#ZH%}Sq${^h8eD}Wh>0jG!{4G=a92l0n2Mtvzzs1O+4qe59LXa z3Bd)S@Z8s@E^3QNY!!q`4FWqUO_s*t)f@hm`jXA$G}g0J|KUso9XA4>tk%7m&=REs zkp9Wo!#7?nc{AY03HRNlt5}Uo_OI~HzYjw?RIqXFrTg#_!vXNAKyXa|dgz6>6geAi zlhSb^;brStNS``I&<(gF3et%I{EGkp0YyQcs6`bm|Erw2B@4aR<4o95NA=-PCKg+! zZV#rrmn{Vp9WI$-8kfFgek0=S0+pQR({k*g>s(?aCzGpz&X&;i77d?g zPS2qtVP43wpBvAmqF4FYEUc4ZvyM%Fqghq>fR7>KZUT6HJkV z7@rvNeNu<}kX4+|xYwXI5UHVS^2N7h)L_gG73K&+z>+xtNVA@ z=e6tNAGs6V_(Fm&WLW2HMFTXWgF4GdPC-0$TKA>qwFD90Tq)W4T=n3r3d8<-PCCr> z5g~Oc#qxgr9Os>}_5^ro$XUR3A67~W+bjXpygU8`eWkBN%B#MD-ZyVhzT>Hm|G5f} z4f@xo?(vGB;hA$)@|L5rAdDCIh3x>W7s`jh8rplX(H`y4aYNR2h|k(#YW^dj^!5N+jOg z&e}LK7*S!i{%8t>hx7HY|@LFyH+#J|H)4rxd)u&=9~(H@#A44AN1nK5^F(! zRnbGf%$6=kTppq=XCTy;h#Yv0jf}?G^JtT(l72;F z+rY@8v&|>4rBCi>f`<8lw5V(5PKy&&5_?9OvTi%K4R>-EuWNu%v)~G@-D)4}N{Uf>@TuM`)TeOi133#c zEwh&_MrEQLQDT~68h0N%9l!%mQ*x|;}_cC6$F_spg7r(Gz;<3!O0t~$8U314&vkc8-Dq#H=tRNquN|eKpEb4yTWEKP> zW^SEo-Ub7v;)1sq)-;LC*+|`BE-Ej?j4mnki-En5JhUyFaGu36@sxR0#R~Do0wL;N z#ydYO+^F+Hjc8%BdTFe+VXbLk+Ao2?d zyv{r7XM;$|NIIRU@J^D=#JoScqZ9yTT4NzTK1>q28MLLVGjErfP&66IsTOoNHde1v zVl|hUiPB@qE9qDCG>tXt>!`y44GOnCWlqF2TQv9CQ7#GZdT25+xG1vFOz0_Q31wnz zufB_d^O*_<>K5Zb|{xNrmR8}N@~%7e(2i)P7x~SM21;mWTzaHeOFR7P9{FDuUY?QsW9ev zXAG4#_a;7(I{Q+f@ynjkuB4LN@)ce-FMGOURwrSNc#%Lf#-x%9ynC&!6sL>-479e3 zTm{D>-(fV>DqBJeThWn_q&Xv9T)77;k_^XKV`8sDJ)M1Nfgay#Q%Kkn5W$d)FQO}3 zv-O4e6HTCliyDCyMpU$A_`Vu&tMN|#iLX>_;cMZ*fA%_nsu(?NpO`-i7-Z(fY{x2Q zHTA)yv1AWNQ|=mUFuq1<*hCsyCBp3#ZV`?90*i=&`*!5x)oC|R5XPO(u{S@RNw+Ju zdb13}Hu)28_`JO2sb1zHyp$0Nkj?lCA;nFh{JWpIAcY{x?v7E8iPcXRHc#ARp+$El z=k{U{dc86T@ib(MJaDD=knJFhKw-cE1pGacZNm|+11dfyI@@@ccSI?+x)^%BW>&CMo{RRHHm!#tF~7-M+ybu^PUyedCMjjRl;;u3B#L8|?qJgPjSOh0K{^q9qKzst}Y{l^JM5anHdfZ|W4)L?Zm)dsBcd5Xl;s)H;VX}wiONB3* zjWEk1$1VDV?kV4lC42!wWLeLtvH^2$YzmvsYFhIF$-D~IMV)9(liAy1@qwua)q3K^ zd0p0_7OA3`fyw<=@ME>dDyGMT>&M^l>+kd3a+K~?-*j8_azE54v{KQrN7^pUkK)~M z&nL+l^p+wwVhR1ua@57)2MU*_mx}gqI;HrC<9jA6=bSIg$=pWR%&)j+K!`l8BBMQp zOC5)m>q;g_T;02eAIcPN2weO3cM#;8ZFmATa9vN+ z#63)4Klp8?3kVZb|G4XKEieAbcstrZZ#AJ;tgg6wpWyP2H3#Ws3fR9zaIEc?KjvqS zQnzcQ-+LmHn?&{cVyT7BKoMX+(~<6S^qx%iR9RtpolV}8K;K5IbeRpC;19A!Y9PEO zM!6HxpV!S_z;^;gT2X*;a(iNNvF_H+JMBfTLa8otjLb7)7C12HanCFt5950DXeb{pGC|-It zDB1H68&7@=hzS!Rjt+zCJ#GQ4CzF*=XCM@T3xTfhqlPkYdw4(8AS`;tkuW5=CG*)N z|1vhDVA$_BQ}^rTuY&){C}NNsZ#;(WP{C@2E!$i{s{%^@*|9kB40s!x-iuaeDC!0V zryZ5Yxe(>;6W1!Gm7yzl)A|d$+pMXTKdMe&jn7YrW$-J5V7%!yu6Vg3vhXE4hm;P* z`WmXZJZEI_5a8JepW2*G5FuBWJWuK#SKJk2x#aQL@orr4F@zR_I@y}*g}|!-93+-T z=`2IH%>*Axl3}a+9#qhUf!o6bI*z!h#c-3-)!HuBq(HMSVG$vGkG`Zp39CFGe3YEL z_ZfM&T<)Hu9>_f9s?cvSH%5id&(pv7q+BL1PC_GU0G?lgG~!;?@ZNP`^|Y0tZHFgZ zBxWQ3_9ZC*E!5q~d9|e`(_rrt5(Q0;0x&- zg2h-m33AR8=TA8VM3F$5U^o7+(KDxSu2vNlYBY3uKJU(0e<5L>*$C5TeV89h?GER; z7jW}C>%YxnEK0kIgRAbDcbx9XZ{dJ?J==x_^&SpRN(+hRSu@tiPE$UL6j#PsRTMO+ zTCq6}sQGqM;T=GdHFt1KE!569ru^n{0)#Ww;BW15cFC5A?1&A97G451v1oY(lEQ}X z)9)z!HF(`hr?S7N)*|5(tUcuaxBiYZ0HKCayC^Q+r3RWhp7%z)4E{B$)mMSAD;SGZ z?GRnQ+iazp2V#Jxtw*u9P zBPHu}ZI7%zc02`_{ZH>R*)@Vt{VSc2VdV3i8zQ8r5nGTkLNj1I(xfI?{A*1rNS3`f zhP2q}$|$=b%wO!8ZB*YL&uuPo4UYp60jRnWD3ur7eqtR(s(#Eg4_kVx$_P@P!2R-i^mv|gyp2@ZK#jx9sm zv;s|yQKY%a^s{5~d8>S3KHW$VzE0npd?3vF(9$hTJ(>?&W4($3?5Oy7{W3)jDZBRC zE(IlY_+|B2@3+&98JEAUJtNVU-#m3o&Qc2KAqYu6Q`XC=BEsECybzKs6N-YusiFqJ z%UOaA4zMn(F@#=^ZfaeC8li6rCog4YrG(z(zGO{!YHQTqplcNYwy=9(Wc{Bo|3M{^ zmi!^>=X{;mjPN(~u98~>33FO8IugX60o|A5skD3wt#=oAdl^DvQosJc#Ya&N}UA-iZ2dTq;c=$O%>m zo_#~=hHl$c>!3Y6=o3b=OBhGU>_^s5#Qk4b*}%@fClYo!4~E z&_w-IB|Tte7DRj{c|PFn2#GGk+7r+G*gYCxKa@db#hz^sP=27QL1Wu#yx8~aMIn=t z`LDQ#5s_+X1}+gOfz>c%|9}7h0jWWr>_rtV|A?eYkbR2eo-WICRv+dabr-fhRL3=| z$=Zt6dh2pW2cr5RTfVGsqEa!$G6h>tBkaH}Q<;6LpmV4H6r`v-NEb;Emqvv;C2}EV zteAt~fI8GlOZz(fu8u=4VJ<9FBl| zc^A2~2$RXx;{;qe6mz!85_wgcxjQ@?L}@?Y&IEb6hO3EI575wKwA z`K4V#{o296-}+xYn68;M=Ahg?{>dJ>5QBUd&M2qn--kK0`KVbuWa#)A(H86<|KIC= zMOG5}rE=UFP{5wDLVvv?3mx5o< z@N#15_Y`nd;g3cURW!Q6z@?DpaxoR2sD9IH zYBh-0*J59(FCkG_M|h}(7v>dnj7-K`7)Bn|*uce?CE~JT<@yB(bu+Oe-9_Q~OpmZ7iGV}<6z_9PewD~GLaJq7zaGs(Z3gN^Ns-uw`nuH&e})QOMuFhD3#-gwN^*_s=vfM;*e0% zrv5O|K)He4Q{a^(b6Je+D%g1kN*!Kamgk(4UnPXzR#&P4vI?N|;qp;os#K~IOI0^= z{FSXZR80pg1yU`S=^bV%gDnOduS%gf}6`TbV7&_E}kZ6x`Vl*Boh9-R^Oh?yW7 z8>bv5bKx*ih&g#4xynefwytVVDOl<5%N5;V3Xl>1C6n-L)HjSAouk@?3vV`lJemXn z+As?{rh!TCWhk4a#xB4-5@FHcFBsf?NBqLocd_~%YgdSssL*z=f%Iu=mdp2}QXErM z-L&J3RS6@=$uVyru~Z5VaKmDeZJts~P=^SWdG1-ILR$a;4_rZ;5=p2*Y?(|C|CQd7 z_n1?N0z*1reLB_ifemIimUJV?TsYbgNEoFwXE11=f|?noF~`>w6YfJ-zGCC3s<4De)I|+A$NYWwAAcvrup~ zl!U;GT^E>fm~uUB4BR6l33V?i+vW6q%;3VZ1Td7%iOBQGcN+m8u-f9@3GUdWm-tsa?-9R5=rq^y{x+?NVAU--amOZR+bb%$4%G?2fv>uWU{nfe?|`Q5H9E^ zf6u!K;y#67D728_U6}xx5NmBey#E?xPnzzGWw%+UfTHFOAnguwD*{_#zj|ZhOxPZE ze`z4O{+a||@AAgCxMDC@CNezKu(N+3oaII;fQb)3u%Az-hS0U8%+w0tQ0{Hs?xX_M zr%(7h+445;9zxcQM3Dv+COkZkwZzx3We@o>%`Y9Cwhb{W|4nj)4NJ8jHZY zYBqP+pKwpPXP`4LN&-ENTg#<8Nkx$ZQZ#DLWNhDx#O5uWaAzwc`jK@Mf;$#zfQ?wt z8?{3IJea772w+0CjijfqkDDfAGTg>fZ6xjavARo3Q9B{R}8xM`RU%llZFu z+H2W!E9X5Ok+7Q^wqPLUwX0 zo5FBc(_-ytG&A&uG2uSKw|M~_EarfLws{(ASa=QLvmWSVn#Fp(8UYy(@gTtavKYB6 zvRP4$+XV0O$;NU}8k5<%gz&se5Y%iZYPjcG5r$kzPE3Mtrl=O?yKe6jkkj1`X=MSx z1(wv3PMpNZR4}jZ5ykJAOYA;%n9AJFu`$7mi3$9(ZNB67mh+>g1?9I^?6Di|bg!b) ztVilAhxNjXnr3b4KC)N=(P9NOo#TcJg^#E*{gO1oI+rU)U_2vD{;XAA8Q^&`5B!Ay zb=*9~(j}{@5n_tFULhhOQ{1fc=31?y@>4CH?M-mZTn8aP4n-f*2(yPQmXao#Hsn<| z3Ux@C?1oWW>G29#=?!$`!r24PVR=A!7|Da1QKg{HQmcXqp@zCEY~jGIPKp6S>IZuX zWBlq7&};xJ$GcRp5Z)eGXCN})5db&@J_GXMJpTd3dWR8eNEkZD^-e+ zUh~ZAvrFb&uTW&cvU zJ)17BPLJK@_Ydw5?T(eb9~XS7Y6hm;-B|y8ZEZ6KDtiFPb<7#5i}7Nr_KF;?k7pFi z-u_)6nkTY#9IpMF($kk1C{L!4br4S-rh)kZ@IuT=JBt z)fC8UAO6IVnrFn5f-Gfad`CY=ME=7>bJjt%g@l4Whfwn>1P2Fv;QYQlSmdy|A3gYyM8>yc45TRA z)fn&&3P3wv4hK-S)MYQ2Z^QTcwOUvG63fjuYU2wCYY_YZpPkAWV=eiQ-CL)>s0 z7rl{vv9U0mYkN!)q4T8@!Sp zICFpfl9t>+{oyl+?_UurCvAz-;}7|X$0NbCwi(~s#ajn*3YBpi;05!%j_!+Qm*oLV zOST(Ae759Lm|4$;xIrexN>7ocF|sCB4oGts9ouDD*d@|}?}1{b1c|4vMk`a>D>iqXf@ppn*hLhQ$7rv^b4C6jyV+JOR2NE|3C&Zl$}$&=#C+jE=uz5$Plg%kGWhe_-KSK~TxXK7@$JLoy^8(l3wy^q>CcU)hRP z%g#l=$;3VNL-+!lT-dx@Eu)eZ2XoLW{eGxaTXWd4o`Qq&W%JZTIrA=o$<5NHjj0mG z*pbEc_T;orCV6V5*|>%do|eV&E4ZeCAOerJ4avwF3HT5 zI$~P-l6fIQ+G$*w7{J1*tkMBquVD?Y6DpH|>XJ_Pn%|G8gb*V=Qcnd%gjXgLWR73N z=+~-s3$~$iz54rQ9?D>RK>NN+5XD{Dh2$Pf9vB-l0f&>ygDFTw8+$I6U(bqb#KnIL zfU!w~5vsf+ak}2M$LoW%C?(d<6)TO9QfkyqigbC{tW7PPlrJNbn2YZ@$F3h-Tp z#>WbhBwu5ZdcYIWnF~gF+(@)< zPZ-p9%#SgRtjyzs3^WxbJ)TwMZ=EXvO-KC`1zGv8G^)crMww5|3LYJ-0YC-3&bvAk z>H`DW?6RL=X15-;5Gt$mK~i14Ku> zySoE|a1%)(M5K=$f+*Pd&;7%OB~Ji1e)j{2Aip!|>L_coR6Ff(9HVO;rVED5^Vgwu z=Ybu>R_xCmO7k^4riaeW^k%0-Qwve@Y{puC0kj^|Z>OmP(iZ1xodFjtCuZji`p~VS z`ucBp_I2#27M~ZeVb;fl8~1QAKghT^Cvs4$B31 zl)*m+#tYx%Nt}Nh(D&Oq2b>WUCE!YLM;_jw*k{?-_1{T)QC%}Da-e8-Jhb&?{*=AN zdVh-_KFngDt@-vyQCN;R1d()RYE=WQCxG*B{=FNPv7O}!ed~6XsR7?VPtW~TE8zL+ zy0Pgg4j5Xql^YYYv1=A-eGhdC%+@%L<4Z`%jA+q;B_X`(AWPFd%fv+Tfwlvp`g*^C zP-v{Bh);^Y{#)Bht~8Q8gRTDo1uZu){Dn}bKX+sQ^r1m$EYO`vBSj#!EhXH=_76d3 zae`aM6o`U@do}zD_W~3b6J-;?wGXy~sd#99R)^IJVkr9P9)lPZ8-vZHH8ZZyq{V{6j&bXAZD+ zrFKkiK9$kaC^q256Yg958CDEizKGc+u1eqW?Rada;zuzPGfcSkKsdN8e($s5OZzv>8zXYEIvFu~pjhhaB&sXL z2tv6Qfz2q3lCAQvpAYDM%}sK6O2j$?qN#nma)+B8<}`n)0u)z*$i{s=cy58W{mP92gC=mxX5YTq0ffn=(LA!R5}gs#u{d|+Lu(B zD-oVkFBC~-x-Ii}ln%D0yFK_K$D>B=h3Nl=mJQ!Zv$Z8J7Z}FX=aEE(ZVz9Xh2WU( z$r3pK>?2?;Q`ndv#0Jn>AQGv`tdHJ|WZ?e!zZ#qf?W8&d+YR5(arjV!PJ#+f=K zFL#RSaJMXIA)EZ&_BzjY4vqmIN7$pi5uRi2Ln9QLz$6>?`PLZZ(=}r=&;5jDP$9fN zO&FR=p0ms?P%s37G~z}$dYzzkYs9KeEtUt{FZ9zZ>=nhJxt!BPWFhTf&3nn%^U<_H zEthB>mEQdO?eO(lH;RdN3SGI7fL@1C-cV?Q92S&Of~|AocPyPsBLvEqr?n0haJ|#o zOc_y{A2TvUJkN4&Z&D!BsC08-aPisguBDGy={r~#Uts_M4kbaGR!OKqY?(|C|M^zh zR(KzFg`9-EmH=nUD#%f1=C|iGju_qHs0QS5G&H7pW1Eybqi;QOVMV zcnsS?oIuufRr2ID3(5Rm%5hcU>3bWve3(SAVIa4p|M!kYo|7c|s znwD%(s4XkCaq#PAqc(fZ%pO#iB&ieGAEQrx^yg*YN9hR%v%JEJe4AgD1gtv-&#WEl z=x2o(pCD4K9Mn_@x&8aobiG%>_ zBko6pu?d~-PlL*JB0()PZ1l+W0`O08*;aH!IbSqjCc7?cf-G0c3gnpGY2oLP=Cqk8 zpkg7(zsz7r6Hp1ty^@iw^DIIxe-0^xz`P}_IekIT)uFx#T4L?U;*FFllY84tDh!Fw zXeqi#og-oa+t-e$AU(r1iJXTJmPC3ZV@~_W`B34q!l_Ox;N?FXT+;)AF+V2|)gc5E z4+y8GiP6eT#*l9M2J?DtnM`1|w#^A5AYL{u$ZIzOY2>%mOWPEzO%pG_x3!$;l`oE%;A94Y#9hd5`o6TwSjKP2&o3an=* z`101jjT@0GH)S26k0mT1FD@-)YknqRG%DOdycC0Y(ly1cwxYFWmWr>Xf5CC=HR*;V zE39t5Irn_gq)mJ33zVO7+lm|n@cuH2vUNC^JuI<7j9(x7S1NL zcaPMCWE2}>E(TUvdh-ev$VKR}G6}KFNR2-@)5b>^8(C}XNx5nE<|`z3yq}mxvC-%& zfO`_Hmxvo+3X zpRwZ^qh?7;x!1wwo1q-a=#qaB#wpTbac7pz#qvh8nSw?OZ^7=KQ(^%fut z^Gb48JByy+*DIQcnXmL0W@gA|=7WVSUkK58#0u{{V8o)HF|rJW(Z12q<1thtM-gTF z8`qOvTdE?p;y81ek-@h*91X9@_MO#)mDAEYjvKK(e`r=phb_)V<=P{+ zhwm9gn>EPI2Xm~7SK!r_4c-uHpvvlsSi>G|X@ymuWbVnwK6p=%$CVTtLh!=xvEDCm znBQlYrs1Lvu8+HY>e zzEI%XkVklctu3;QOC?L6^G|U3CMWy?_Kf38L3gQ>^+lN8?0PBo`YG-uFx+>{59uLd zW*RMS6f<0}sS94vHcw9qS_%BR3NLTEz{Y|)%H%e@G&7c+5Xhu|`4P*g3|FR|PhJm5#fTtF=3jO7Q@thyGF-a#oH*xL7TyFO#|cQfJj$Hpau-a#51#RoG#=VgromQoZXsjyw->wi_tg7>Ke`jOc2QY=AW+n%f zmbfV*&_4Hdd7Ah88cT+&OFE+p$tUv_3iu0*rp z?QM;3{TfdXHNkYG`!X=b>BJtvQu1VT@Vo9TmnhLQXgFxj3i=aV_&hwS{byS;j;Kh7 z4MCmj*yTW_!-Gu$*)2iTSvT_d{z?0ieS_DkqNvqQpdj$H-|hF>vXp|C5p?-0IJ(np zdJIOR79FJHV)Ok(1L?%dpooqX*JB7Tg{{@cEN^;ugwxFW!1vCYNz+nOG7yx^DqdLl}X@LK(hPD>9 z$IsGWz+H)pkQQCik85hm56#3s`?+h6 z!cQK8=|cbb;N^Y3`&YziXjUe=e!i@G(@WtE{iKEfPy<&GWinMIdO(ElGhXou@G>hey1f3ms~^-p*l83{LP~)_Ab$#>R1)8#>+a z4?9eix2Wl>q@2z)t><0rCk#J|Vjx$>2bTvj0zx{aW~z`}K${%HUp^%u{4lwPUd$Bg zO#SJKbq>YfI|$IZKAWK8g~M9FJ+qZ!wpN1A zd2a#>EmcbPZ))<5wv1qoTR+LIDNV$j@hsiANkJEWg!U75a-qRH<6XTo?H`cL&&alH z?6eX7TV4$oO=orYt1LVcPirL}Q!i5Ot&-&kgyOWoJh=*%s0ptd5|c?b257moQ`c3C zQ`$SxXsC!uJ#p&$0#V^%56S~`Xv(K*+7I@G`x!2Fq`M+>eIeQ+5i!ao!;K)WF~4QU zS&t=Fn5MFOLO?1?#v&|jh1Uxqy;JJ2GS2CQ*Yv-yycTW7+$T{uMLCp1{gr3)Hpz@r zu#Ukp+r{d15DkW}%EZ3V7)v6_)ae)=Xt$pXug14Dy9kMg_0TF~CSeIyFXFF$-jU^p zBW|FNxr1JV!fdKK=zg?ojbw%9E3 zo+HY!)!SKM#@hDIBe1!? zv{M&yXAuBc9fFs8Y*yV@vgkiU#O=R*xbzk4{rRhzv|pt4Z8S3W1pO4hB}pIg=@HdgVOT$e zWj32~7f~XY!QzN-p(>Ldu)Yv+QFGX&Zg1FTF-4{JNTbp-EnfQ~#pitK#h4-{fQ$3r zv_$L`wFa(dL)s(nVNU&IbcMLThNq@e<_+;SDE2VJtRNV9N*;ZdHq_VA>98|24O2N>l%n~tLJZZn~KdPBnu!gQ%n)G{2Yl$OVr zScWJ8(xBW|(DiUdP&+OZ3r6yvN1d|2^YCFx?>Vgil+f-{ck4%X$uEqRVZivTVi~#W zGHu`z!6@1avZQ#_^wbZRH+hwfp6`SxsH-BeIk>Pql9bt7bhyJc$D&=Y;OZy%0003G z0iTm}UwFsX6-5^>CXpsN>5qiN!vjS6fRW4#xOTiaQEt0zLZ}SRv0qrYWxU`9BaR5!AEjFMqSjK2;1VitV*0I=L!M~P)VV-6o{TEsJk=fa zv-FV4V`;ves@vWa?z)B8Q)NVT^1Ls;ok=Y`c!IW{POx{L2eK~@9=CI9VIEdTy(KeRw7ndPE=n-h z(LASi;Umv#OtcA$lAJ0 zl<&XujpeGst9cr+&CX_FWSUg>xeuH)u#xzAazElnh}N7#gDxjZija2)^z}^tnTKAp zZh@fDqnH-U0+?v!XaL=*v}H0BdHYO{E90(~w0C@zoB#j{gF%~`NvJ_=nM@1+zdr$I zRVI7D+3m^sTL+PjS2dubY6DDzAbx5Z14}%HEHAKjwN7V<@6AN%WTOByax?9>Y43Bo zL=j>=G^Y>za$i3sr(GlbmVEDtO&EeZ_WKE;E*NUK&TWrMErL3>XBF0yjrQf_4o;~s zRTu?JBSwjQ_r+8%lEdkB=lXr;32~Mx)$7q&1jtR5^LyEEy3a6^OP}fFP1b2qen1v; zg4b2};SFu;j4$Ow)`Ql?U6pyF*LGP0XqHt)Erqcm!`fD&du5!UtF3Y>s{JU_ymFzW zDMzkVxabRa2`3lps5rN4^gQ44v}U#Lqe!Hc@cn$k@SaX_KMZra837rr?D|D?x;Z*7 zn#z3yXyn^fq;NBGIt})WJ%n)r9e61s>=P#Vl(I?@Z5=+S5p*Q4uPDJ;YOpiGDxmFB zfV;sAh@`xyim}k;Kp9}kPW75>^$S*)Q1!&GdFGz7U;Do&fB`?(ske{yXoc5@=7_;_wKcy3NZq!yxcAVopW=oiXkWzau>`qR*?uIbiz!ee zn;Y(9`N-o-9yB;tvMc|zGwW-i%k7}t%TKn4*-MD*saJBy0GxkQ7#$$OT$IchFR4Ts;ql?TRF5JC@ zXWziwivUx+#xG)g>ZsS8qeUq(-c6iAzhffU6p7!IY*k0tYP_YyRh3#TJ4Dk_=07%V%^drLFBR7r+Mq{ zpQNa?K#mR3xc0>g(>PNE`+=oH;HG28#XAS1l){YuO`6oaqdL4#zj{dU6ufTsnW-L| zKa{AR&UZ=cm4n>M1mPer-J!%@an98~eQrf3^Q|kGP?lv{nANCVP<=1~>i1X7?@M9@ zqvbWzm>kj@6@Q-4fn=x|#r@iDjX_s5yb=~r-tq_-c zRJov>@-ZSON|Z1F)<7x0nGxdg#7wmfl(iL4Suo23bg~@`aKl(cg?pwq{@ez2t98!a zp1Pn&&6M97`|(9(d7fBKo_yZb4y+Hq=6(F=ej4x2A*tv6KhVq15j2~1Bk{O#*&ft6 zR@BylBGQ}78yh3%jueIExl)NhVUp7y#cer`#|8dVc)&(`GgerV%k3Wq#4!{Xf<-dT zOm1pa>e$G?1!=AY)$EiXcMs@YUoiCZm`pzcX-GC2P5b$DTdh-Y2Or_jIa-bnXlsZQ z%FE$uR>D|_7_r?DL`<7Z^*&sPo1e9`aD9Y;UOaaMlp+E}YrJ7zqBKfrXQ2c9A#V>d zU3EJH5mbus$dwW;yr8at&fo^_QgEt9$4JE3rj3~_k^z4S=*H$^YbK4>UL17D1fjz6 zh%k=+=qY4sl}6_+iU(jx7gJef3l@V%ql}>AV8^^`M^xkM7)WV>O?B zlFt30$L^F=L>uB+S{xS(G!sp{I92fu^wOW1+-7FDZUkNwT#A*;dN6uK?6u`tn}nhQ zjz{HWLJs#%g7{taG$WrmZ1Wo3`iU@(Xn|Gmy094lUBp4nY_(}Lgm4~Pr@KVLfzL}k zju8*HX^e1;byHTSSo|a)J$^RW>7GPb2A-sOL5zp}g?sQLyXi~>cM;}TN+bLvULO-E z7oEWeK5u&6G8H*ie{T_|c~RA8SQp&&STf8yU>q30NG~=23aUS6IIxA*jpmM|f+=Q2 zDxTR$mxoQ z3Xw``JN-<1CYq`|8+SFrD;}B~r0YmIwOD_E-~_b?c3#PG5Wn%gUX_iH^clku_7>(e zp<9Pjler6c0$W;f>hVJo8HN%yR-7;Fja)(!9>F;YSE7+_L$_V3*9|;=m4+<1vxQKY zP2<2GHTgyveNp0Bt$iMoQSE!`6~Z+Zs-fek@V$&v{Nc=2u!h2QRxxOVH`+^fuBOC$ zKtTD$s&U#i$103G1`H_4aw7-8!Tt05Mq9xI&{}{v^Uqt)Aw}0f(TAI!FH|DHPSgfr zYjcT#dRW8}y&x2>de_oh$mf%}y+4#u^b;6>{cme37tNG(+$*xs(x<||l7WbE%O4(b zgAz>k-Vpb*^msoe34epH!wj5EF;%v?X+Ba-kxioq)X1?8wShXUfWc&gq(fWb7cd$c z98^x~0~O>9gXhBLQgBJKm06Ih7&K)_rS>Zm60Y*%ipw~WJ*GH5nrKfI=n8n00k!pp zT-;Gw?OY2HNW{iX97U(DCUttn^P--vg8lpEXz1}l$5fpYMnGs0O`z>Sx77GX`?Yg`C+-zVPF&*1yCzwgta zest|6k*cEHZdJLL_DD`*prm(pUCz}=IKv9x@CY@{q-C#jY>L$^psxdwVFdrWjD)NG z+dvOyYhfZ)n=!9-j>dvJl!{U8gApF#6bhjaO$mUya4uPP_iplbxmnx%&${}P8eX{; zzFKe;G?x{L;^W7jZCAjuNVLjA=Ss|sf1~H!;)tbZ5%xrxEL6}<;rc3bC(!N-JB7N^ zFhS`qP_QmpLP|z@QK(N4=R&`CW$_-#h8C6h>ssZc&CViof&0AR+>*-n7)(#`N#9D1 z0)byfCR$(=CP?Pd#G2?Jdmf7OBJ-c6iaH@g4>p`T-ousxDU&&gf34AIo8h;J_E^hN z!EdLu5p?BFO6334(dnkl5a}+e;jkmg2N^sR1YcK!(Jbw&t|Qk=mO-y!c>o*5oJh;b zw}G)fc=u9U0v!8~MugGiRBM(zYnD=?c??Hcz2@LvCpr8skIXOk&`Cg{i z*my+(Zjesa8R!@erH}PXTQLke%@;E@w zB{)z@0v*l_7P9T5>(9Y_HN9lBki&N*`)D7@U?uW18(j5*Lop@U^hcG={@6A8pY}*R z1w}|wz%%9k=5b8viZe@BPhB}{=HbeIJixrfX#fBLEJ2^RMHMaolzLarun|0VY)S!S zsCD-Sc^>cLVncqvxF&~DSL?7C(`gH4;%uK5tLA2V_zEX`Cc3ZxQ_UYVjjx^+zbkKZ zduEFg?}bn+II;()7FqV(bUE1_ zT<>SrjB&^|7Aa$K7z6q-qVj9bD$+F&sz$1*AQhWByvc(=@5PySBT2ZTCfI~i$uD?% zDcXieEVi?o430+HL)-+ImOA^_Td(X+#u0BhSLL01L0-FX89hzu9M(q?6Q&w~CV-hI z8AJQ1b1Z*L%%+n?_A5a3S zmRW>!>TYtU=u-H822QsR15eS`VE_OD3IU(jbbpmaRw5^YUT!t(;_;Yej8SE$IKXhJ zZIz4Q*d$fFcgzQqipAj2HQ}Er5y11&b8o8KmH?HaZGagBmt-^s8o+CcdmE921FyGw zYFdj;fUnQ9T;>NRk)f|EwWA#yd$a=1GF)9$A|?UZF`brcyGHy^!Y6Be7VwhSqy=Er z+9@x(cSic|bM@j`fZz|8OsX?*3LBRt20pL$~Qr`E@QHO=9fHbW_tXJ$z;rBZY<)Tivfc zWx-@3+t+J+8eTFO)I1*Y#V#K_mfK@;!O8wV@I=D^006Q9pV?|(AZF-mfcPQGOXSk_ zofuPqS4$UjGzRh1OdjIKT3mAyB>Np+FN&VFqjONt_)x!0wpea?G$ypWw_$SutH|_WRkJO;}-a+O`>oxFEll0*8jUP^K`(Ji4_l zGJ-|w0=+#Hg444gk)f!-?!h}}fJkkx=tE-zpS5(eyf7~vQ!#~0GL)?45hUmhS+Hg0 zj{d&AaB*Gvb*CAXzvn!wb~F$zm~LuKlDT(4oZrIb863&lg7HGgQcQa5Vg~*U{Qz+H z^Lj?aQuvPDhe_B-J|&%EP~+1Y^>5*;F-pURB9+7k48~WSsgARe6LiwkF~*dNlAe#W4xtIbBSu+Z9qbXq+=VKQGJ>^t*#} z44hj=ICSyE(Rvjn?Uhcu6qpx*vZw{BRpx?7BW(>DUVsY;n4m8VB!I;ehVoStR0T^HVswJXi}3+Fnt?ihQy(LG*v z(nd+)r%#S=xjkH|@U-LyvnU8~Tn$Vs_=wz&{sR0qw%Tz|)J6j8E^W&F3~@~$_y?~S z$YwO&Uw)Pgnf6U%-bl1oKdc|2vW>v>gR^^|-m2pZTGD3vrF`Gy8;w_Vke68qvc42F z*`Gth;xT$jXJ3J=RVk3RKC~WyaW)9AGxs~KntN`(%0BzC%yaGbPeg>Sd_a9)gB(Gts7tkkn_W%RV^EhMx8A zH$jC`ybs!sOGI|#if&hJl*M!(&iozl_cjrAn6rp3?@;)=JNo^u@OSXdU$~o)_U@iy zB;kUr5A#$^+mDyn&n3(dE*oANutA3H3~4E2M(HLOf7>oP#eB?csGQ=<)3GZ@*V|10#FjzyK=oT>I(42Br^-8Qci0jF$I5a{Gm9eTvc@pjV_7UX1LpEI zX{U+;W|n*WnumWn&lxSoyLEX99I^PZpKxwMDRAIu`-dftph4)U{FuaT9WqomAO4)4Z3W=!~}oh>B0oI8r|DF#meBCs}XNpFKy9Uc(~^gu0#<0uDOB zmgNEuf&u1lq;pgECIm@r9T#l8Peg}z113mie%hwATxJlvD9eckGUONv#4W<_jw|!& z`R7M{Ga<&XD`0kwRqFl`R_0l?Juw4*zN2ai2A?g-H5k_QmC9g}Oc1ZC$dytkJu_@% zi==%~j=p28dd8$I)v8r@FCc7puC%0>U;NVY#y;&4!Rcion%u{0*vshda2A~JC_~v+t z%hnz;!7P|6mX6P+j9M~D0N;8B&#{LUHVeTnv{oah{!SoEONr3ap>KK(K+JlTq~`If!^d1NT?}kCltxEG>|HI)7l+vylkB+FM``{Mg!MGPX^O(7l(54^ zEXlvjtTmAu)wU&X@HTLszH_j+AM-fV&^VFe&ENn40C7Q|{6!Tl|69((qQtElS_pOb zsMnSaTMg>$-tg|}77$*R2%J>t&%;ZYjBjJ%XxBnWe{PB?>kc1uF6j+4&2Ks3o6+$K zP&qUz4HJ8ffcCG5ML^iLtN>!5@W7u&ZiA%%!MKy!oLhyHvD~$7i|%Cr003|So*Q(3 zBp0*eb~U0-F#3Bw*TG$Xr_Zv$2uj#CPe-SH8vYKnwWQT^&NXdXbEO!+v(mv?j~x8X zO@8g7j7r%t7|MA!pFFA+&*@bLF3~*fHO5rEP#|2oGt`OwihbRT6pv_G)5_rn(Jqf& zxCDIw002P&o*imS{}G1wKp6+5tD-pxIg!8NL|OZMhH1W19-uzWuv=fb3u}ao7uA&= zcB^Mva4RO=4PdoU9ssNwBz0H7000K|L7DIJK0$ya;J?;Ops zHQ0UQflF}%I+)~a*Ma#uQ-UDKPp|7Mt)e#BUzIcBoq?h-Ml+G(FO)EPD&OJ2jn}&S zkE6$GY`P}zk7~#60%$zt-cCap?CN3#V0H;mnxg0IHkPl6Kt#xw}2NU^d8HD^}=_)#dMBoWr#O@_KnUGH2nCR?v8IGAq} zGetENh{0s=(hca?r^)lh916| zqfYO7XFtDc06}GYCBR*M^{{pz@++EZ*xbZ_h67-f%=hP=UZ%;S0}Req_B-Lq0idYz z$LNFe2`u2zh;4`f?58=g?qh*5<>)A%UW1pg?nOsOSckGBZx~QkG^n66sEQLYiNftf>)_Wgl8LhfXNXQlrcRAUSYLWT%3fBa`oj?fGN51*v^BftxwA^Q{zzha z-s6Mk*x3kUbVTabK#en7^8fZag=m2gT{+0AYSw)+z$ws1vND2ioSlQ09JIB@5+B|y zt+Ddnk9>O5?<{{#qow&4QQVfVE;oDiDvaNhTFZ!oL-$4}oTwAKC}UzLDy}_P#{iv| z%uhxLau9tX7Y`0~kNYp|%ERi^JcTkbGZ@6WM9{i(;sw1M)AgCcHJa|)c;QV}j~K2L zYL22S)l)8hlmV$g#Um zQOHYJN%4Vkub`CchvMtKg`)RvUOqqmu-`LK-*D&ILeja3l9|OG8}pfuEGd0Q=NgIl zL0hSwgc}pcVH~y9r4WusCC;r^QwB6m#TMOiJwYmomyMeavjQpmT&JNFLhhMZShd58 z08cbP%PTGed|Q)Gvf))jCYikXiSTQu^yNMz#ube2oRpP#RJA{I{_}(S74Mm?77WH8cnQu(wip>V zGgRtsmlcd{#!~Oo$Nr`xE|yZkVmH0a#(&o5Jq!UfZP}4i+9_*!wX}`lHft1Z`&dP6 zbHq$QVp@WL>;*Q*IzjD!8gSEk8(gYc-k*DD^Dw~HIT(_n7=+b!o?o}b7^lx{R1HnjJB2b_vQ{iFA5v!jT8Y7jNy1HwQz0X^$G3z~Zm zc9~6Al~&!i;Y&li!(|CndTvd# zkPQ$yst*`C2ROY-kNbc!t*t(GgfaIcwM%tQi`k4`0LE5cMLki@{y>?17|2>mVD#X% z4wB`uucNrBT`1?xt!|*A~wb-VJqn*f<8V~)X8!ZzT2PX zjEz+XBl=At7Q9jf!iR~%)lqi5TO{3)y3)`ZO%8Fyy;eUlW_)*gZZSMJ?;hDJJ%{6> z09|EiMLA`m#Y3`NC@bZqn6o<$IEZ#g?NjLEM2j!iSb2Cu15#G)PV2T7EnNzdi_+?l z0-Trqb76bYWI)Bn5Mvr8*ek4+I|q|(5+__Kr~O`^>M$L5-b`S~CWt8j004gho?mo-6eFCak{gpymA`O5h=~d+xe;Bw zvaAkhH7ozk&KVVxb&xe7TD4MQ#@4D%czm~u@6VN)#4b1pr}}+2K}rk{$gWxd`M7Uf zFoc+^4fswPOyqEMvoj->UREVT^Zni6H|PR|(WiU~pux-|ATe1Qdr0!D;S9@Ln!o@6 z08{~~%{Q!5xZUKTghClZ|Cv4Lw9F{$I zjM`N~mwZj-a70;S+;|B*D!|JsiT%Y?vhN+@bX9Nw=*A(qTS1bft z;_2lMthm-X;&p(R$|1u8nnk8M)*&yUDBXZ`-`4N^GENtBARz%UnRmQaww@~_U^VA= zUBiyysmVhr4KrNv7I4|tUs&}k)8F~hgXJ+LUa%S*Ydp7j|3DM-=9X6*OURXcXF31o zE)*FK3o-oVs;L1o+X#$kh{arTlCkN&Q@iPVauHR8!O=I70oY3suZmZC7QoiGErH_L z5)8X%_S6h~Wxh)yGr@rt7wEk0=X5~`(dKC(D^&8HL)YPZ&EO=V<|vS9e*1czAx32t zm#7&?FJ}*k1>|rJ$|<5O6czCGVlPQhCT%odmb}0L8^%J~D%vM6wdLzL*(-BkC|^|I zK5JB}-+=to#pFx~QIPMP7y=#HZx2gFhSXhvGkNEnI1z5w4%zIy6>}sNK!PDaU~;F(P(nL0j zQylhXsmZy*aT^U?3y_g*MVNon{QkjxtWujNzvD#sf<5LSFT)g}%)_-mQ@8$8s9Td` znV)}H^iYh#RSqCt(Pjt(@!JNW8w4$KEBf>{JIh1XK?oDCi;YUz+(VT(NIQ2+BC8T2 zIk(RD+^55ZzpyM7He}WHS6o>y3aB^9J%rsW9DwUiu>5DFd^T-btK2%XR36e4&jwcE z7xO#$Pzi&CIuo??#Ftt-20JA?ymhG*l|nN~Z?y(eF3bElB)?n%$68O_ej4#nTSMAH ztL))Rd*=@rG=s+y%O(aeTNLGBmP1eg0JPI~f^0DocS#79565FvNE(%!QutaDTv2bE zSz!^SZOj)xDvO`dlQ+ym>ff#5LoIR=zY^bB1SspNS+FUwX~pNzlq zn7liTN^Si}Z8Nk1P=LEbFLEjqh~CC*69t4D4cL8%^6;fZhM5)ZY)jo@iN9PmN=mx=s7k@>^;hdy$8F}ho2Kd5h#`21zVdvP71p|y%(t(|~rO=PFhH19HIA8f5@7FwLOWv}~f6IpD9hIo#$Ktpq0tSKd+KpS}+pyDvK zW>~~DJ{T+|3@0*d<>|NNp2st`P6gF&WE7p?m?^m`tR4-5O-784OlyyaB=?>m@l>F2 zZs1}@BxgfXX>T|fPS?MVP@4`M_;R#3WSNi-RkB<3kvY&cjNy|3B?ER;yQ0L#@7^xx4cTwVBJLVw&E0VIltjiF5+?ow2fzW z9uDYAK7YQ7@oxaUJWAJ@d-H8|FMl@A9tw4QjD_;6o=~#_VTm9qL;A6X%Mu`;V8*?K z#CL^g!K_m*n1JhRaR>?ql%yqwrr#izKq>OP4HxphHRD8QKTPBWEq(HMS6KCDboiad zNS7sX6Y$BHz~>F18skL9jIl|5I|Y3fG63!t;cPb;>R-kD11m^`!WWk@dUuSk+PlAR zD2xB>w3af=%p6S7?>;4Y+>HxY&1>4aJlnOiSz24S0f#RZ5-cPvMa(~86>8p>21Fms z?vLCWq)}=dOw56rq+s@L{w~9!&by$S^)YATL8oV}cdKGOQPYFt#fFEUT9@DKsUYI> zR~(DyH`QeR@*haj!p=GHh0jVQq2dG59mxO$k;eg6h`{@(Sw+d&l# ztZ}=xhD^oDc}@X_*;-a20002#L7rqq6)pdiX*iv5f$(vbq!kr+oTKhjHK-XGuV)5ms+M(i6h3^V1_h0L@q;$#fM@yUrm zE9v;wULdGQujF_LiD~}eT4zDD;VZ{f4bHCdJeMc%u&&Z_J->E0Ik!oRN}xiCCYI z%~Gp!(~J7Yc3ncCS_KpLau>S?QwdP(?7E|})Mch!S6`0C#fIM?-w;@CFtW+In5p&P zA!%OM#2kkM!;Fgzgo5{N_C*p5#YN}QK*6r1Z16sfzJHKO000KoL7Iw5s6lL*Obh?n z4ydwY%W5%*hF`>hlm-r0B(}cykuah!?ObVWn9m%^h>!oYYt{tRSlBsl$OsJMAOTcb zN7iFcMeT(g4x4M9_({;J8=Pr)B?Kd=VT@-a6|vO)`cpjG9Rn6T?8>9Qalz^0=w_E`?Qb@{(; zBvls@4bIOnv(BWU1q5k=G^YCinfR@--C?@GQ$0Mv+w`>UZ~hUEStC$!6-734+CTTD z7=bdG&AFF0_*VcYQfW6ZChmEPzpuZiZc_um@4E<9|Ai{y3!PO zgLz7K-HnU0irm6Lw?7dUX}zz*N*T`E`mV+2oj??_1Rz0<=En-bP31{*UoBYn5#Yb|mHeHt)|*So^o>k0eu-76^bb$w zOf{ZG_=g6;$fE2>fxL#7)~pMhu`eudhC=)7j$!#37p&ok_8WIX)dJ))7xd zWCHPLnsY8Y88=Xa69dmqjvQ)Nu(h$9y`L^8eGWfXL0GYx-Cek&KmJ8?mkQatblHsS zQ!AVa1j_Rx=RmDYcYf4~;7um6qe3p3aMkYth9KplmqK-q_VgWhGf!RbbZWku{9589 z7)qg{!X2^PY(GSJV`ouW(v2?Ip~@g!@Bkf!i|1;c{~-HB98GD$Jc-%Euf7MzP<@)M zn$Z>0lUdF&`{QvC* z4}`IV#E#A#=811kBw<7|FCkiI)w|kq{p5#mzl>{9R zg!F0j2+O_I^sh$apBXW+#L{eiWK5^j+%9iy)J!Zl8x?DH%1K?Pei1e}OMZZ#Q1$uh_%&9I|JZnR-&)DE=0hkk7lHL+H33 zj-dw>nrJB`r~e(-eHibFFzOvK6msW{;RkTWBPq?S(vSc1{R$p7UXK_fC*;~dx#BQ} zA$rys)F^Ep5=?AoJN)-K)%0635zdhMIf8Oj@YI1t^>kn7?^&VMKOCj_>3DNU=lHxr zx`>8-&DSL6+?jDS_pOP5pV8NNgt!WV!*n_sRfZWD1^YPR7I+M$l%=5rf?;{)BQm(Y zik=5V42Ix%iia|;;s-AvN28sSMQ;*pwg+ZSACE0aC!{n4YaDBQ)a!i;s%GwFuDX!}6m(66 z=ZFQ)0>6%hLBCA~_TVVHxaC`_{o|XBffew5#bB)$T`(B1GSn4^C+(Vj+zJtY6k5{a0+^@w6sSgkYc7 zI=eftMrqpKjr8?S{ChjL5&P;+vF$JiF&+FgDmU7m|K0CX|&B~5OPpZ7L2**_R&F-(i& znOn59m`bdfb8_bQw6t66hQR!1+BJr$rg0}tGdv-)NEsaX%rGe!z#lEo@VB*O->m0O0xkIa|d zG0W%H0u0MEz(_vU_MFESzyJUMhyk9*bYD@;3$@6%reIYC>8X|&;Ydv;?IJ0}E}3s- ziv1JEsJ#sqO_}%Xb#$r+@jkl&ym`B&l5J*^)p|^s zzyJUMegU4zYF{({mMHyimpjt*AR&Vv1!7EY!k_6Nb$fs0|`L@?y)j6IRGh!cMY zI8GWMy;pUB(^z&-$RyBE+jmqk*$n+iVvxm+74^{8CZ>yT^S^Zrr%LCpUZamxNW=G# zNv|?y(QEL(6#)&6=ZNba{;}~pb5hEn;9oOW-%D^!GnKz?bI}olMSUJ~h&$q%S3CFN zSy#Nm`$6Dh>ps<9W3@qpTdU`F+H;|n=|_6UVjMP?SZvkCWGRT&Pp+tvpd(e^ah{Rd zC@;+i63lEtkJ7t)T%QD2L={?f0tc#+Wn7BKzpoJ^NF7*CSaA;b1V$O;UsD?Z!UI^$ z7(8bEc_>ze;YEYUKEsUmGTZ-Mp>XPfAvalD)e`jKSB;@2nt04+b}JSy_+RWZ+f6_u zm9s}8E}{e4!x{;>T9i!d@crgq2MyX=dISrK>x!?adgejcgXs)FqOIM`TD9a7XTJ-y z#@FIS7a4RM^kpsJOI<~>{YD*J-Cb3hS*l74ZBOB{WR0`)KD)`8cD`#$s@rAe!JBG- z0l*R$jxv1Go1}(~3&!A4hB4u(2}5nHp2rhW{j(j=xF3qh;NU=9r4xVOo*kxp$EC%s zSmFYg!wd;@EPD~udMsZ~5k95--?8}CkZVwcjgcz6>+u0|j}AnoIN{%(Zl|2kz$M-z zCenLnH1!Lj7@$hvTP;NhgZG9G*;>#deq_c&^TqBn54HF=n zw?m+=DY8qNRO%MEW8SZMR}q|)IN2XOA0Dv{3Ky5(?Na+ItV{_pvnbN&h&qn8u@|in zLZZ@_yuU>0eaBkzna(`yAfNw_v0EaXjBPmQc;q=|Vmjb(SBlr*^5^N^}4y2H>dz>`d1YVxQYg*F#QnLupl|jy35P79h z4d~TM9dNghlj333@$fsoCAV-Kb@W~91h44PY4hTaJ)#(cD$`q=^LgS3N;2X!M7)cW zC3kMzfh>C>(!^UnFU&d_c|PqH(uWd9MDA#+DCg!Yz<^Knx`X@4<+U%uMgU%v#*yNw zb8M+DE}`cV7-|K6#^DM*orr6OzGS+Vc~FXI<+IE-U^dqS#$p(&lH9oHXB+YLVb03G z$Po$=y#<^E9IV#g-us22Nj>>vOeHv6#0Dn53pwEw;vTZX=!R#;qyEL|Us3m&V2%d| zr_59W=xv9EYX`FqxpSK_+MPZDRa&L`$N{@fMmg|4~CJQz%AcyiAF%yI{pUbR~K*wG77l z*46dR_Dv^~^^J`mXdFR9+>56-^YsGR6CsGrxrO`0ZclzRv`P8pIfnYXsCpHW_e*88 z)D8Re-01bnX0I8p0yj;qKPRbdxOmlcvDcf5mk&%{r^RM-`8iq6%>;^_>T|9Bt}8#F zA~XhuLLUbEREARc?^!n;CucK+KUcmahbDLw{3y}bVq4e+E~RMmIwDSrM{xnqh57%p zItD8;S&&mW(^{G%r<-)yrL)09+zd@F^ck8?gi;8C5Cof{3<10RvfWf6-RaPOV`Xp; zw$pOPm3hm4|J1tJ!FeWw1iurQPLJ~|pyyb1uGbEKi#0`qGI&6SYOCTab)8p)_Y3ki zo*3_7CLR)8I`W$(6v5=wf2?#0D&07 zT=2^_R6rSwpUIEtKP^e^*S~*n=gw$?Qm-)Ip7f)R={$LA^Y9oFg5{C#10v*yPp;}Y z-khlmS@rvb^GMTCFlg|arp+$G?!3$u2L*4R{fOGa;SrMxFBK^u=#B1sy7bN2sI%o+ z*F{@fkNWaq=>A4d)(|v;=Ln%u#_=D%ddK%e8CgpG(Qb;;^N8KsvVUMj?z%8{BcdH3 zH6Rz*@Z-%d={qGncohOdlvVzD_hPXUw9q`2?h}rhslB6I6C+xO_A%Ntr8i&8;?1Ya zkAtacC`wBTIrUKm15xRUSg(#0bvVx@BGHoLPBs4HYtw$bMQzwe&>*0jdxelWr!#F% z!?|dguhqP0Sj!yJBGK-f*HUI~fII`0*D)RJ7)P z+=p6w3Me{o00038L7wbI6)*pmIixn%B-iX3R!vz#Xqma%SUkV!RNv@O-8IwvNXm^H zbp`L}-fg4FA8J85sm44$habYE@KU0|k$as%v(xSpZ3=h8V{>8qfu3X-l1l^TQ4e?@ zF;_}!KW_GwT@p$Xkyp*LEP@Strqel64xs8QzOm_YbXjJE`a+~!K)j5xm#Y%(Xf=?s zJ(drf|3r2_k0j^*Z`8rih!35j-R&-BHWKjvscOc0^AbJ~Ju31&N-Mr-?#P0d0!~3f zN6|^qP1{IKy|Ju;1+CRda1B{vg`%IsG+0C>+59|Jo7pW6u({o004^tp9^$ffuklA5SlMrqpZHf*LO6g&Tjb1;~DmO(S0_s z`vjVr?V*U<%}r7Tt;wx^AZ^3(WLMy* z(pqG2OZDMqsIxTPVyQojzA^%xE$XW&x~p1rl1z!a0$s6<{&bOC29$P$m_-rL72sY^ zX`uiB0CWML4QhWI3L9cU-QwcFL6#@&9W^vAMrmDs+p(h~Gp4yfgk9i_GlOn*q2c=K zsVjPa{6XCp?cm5JJJnUZ?F8RTCQgm4J2xwgFzLq#gjC3glw0H zxSfz%ia?)Y=&z!La4a8v5J3O{2y8){5=p2*Y?(|8{|3>1FK_P(r7tzwS?FQ%+c6d= z(EN~0oJy ze@-X$inO~6Gg*=8Ly^g`R+8rCvG76*sNldHqn=$65{UT-ezCmzk;8#|C*$Qo76Niv zId#OJ%@g*_=Ea#VNZ^&_684{9DLKD7o_6TbM##lh`~*n@qMmt^3|Ls z?{&EkNX{j`g+wvnvlYf_&}R~ZWUjj&lbNV9QF^ou6=-_Uw;T_a@}6w}$T;`(h%^@j zYq>X=aai?qIkYZOLC`_Y>@u#>@h-^20);Wb(l?*kl=~Dh**|cNI*T{0=3ks;hA`?( z9?vYrm4!&{>N=hx6Wu^x{{y&Si@<)KBr1IaPi617&Z%zdgNZM3(Y)&Vs4&RN9r7=+ zoHaePmevY}LI;>?4A|~Z@{IWR{!NT*<#yPN?JS^$Bf~pdM{UPq7#`0fi->I#2$XADL3#?*0casWmO;18FO6PP;mL6I7`(qOfH59ByupC}x(fBF&cR`EFVP`YtetDU z;3=fm?$d%tBoEIr83GPqfg*=7`gaCt;#nCDg6Ibtsan7_G#s2$r_xTd$C55xMm{vf zsgQF`bb4`iy>LSKAcn}7tSBx#$c%)cX-ZTcG8p8{qN6OvIpr`nc0fpi;L3vm_+_q` zl=%K!sf_&fmE!UcFaq+@w1-U7Rde`zRxywDNcA-YEncyfzm6|l1M)4WTOXjT0NYa* z71PmZs*JD1r2XXLmC|EELvuJ2fancFE%bx}^E+yw9X8|%;+2tfCkLV|fA|2xVs0ea ztN>`S4v3KOmfl667v|yspd*tiw*-oi{kda2R3J$3>+1u~5vQp3z1}2wH>SEiT!m&n zvsu)ocs~ZL-#f>CY59JJ04nRRZURpB=e{lIRFi^C-qd#P8njOIZ!&}1n_)53}$$05J<+k3H06P8COZNIsm9;O%^gb3(SUHd6ajm_|J8?r0* z)Mpg%km3lR2*O`*PmC^044QwjpJU|M^U8(BMWpmSPj!_4aq;9pp)*Gg$py%);h>9| zAZrJq9_;*W%#$bsM+y4a^hrgK&{ zBma*kc*kg%UFgRE(keAI%SuKJPU(SidQP{^!CBh!jxKP3eI1riuY=^)wM6dgEf4Fy znJtUM1U$jYV*Xme&@@%4yO65>Q)_uf(ask;vF)N~U)Nnq#H(&pojK1c_sy0uPPS=`}Exy0wsaZJWtW1ILDwCX!1ZN$glHwn_;C~)Nx?mM1fga2IAR3Coagj_? zEWd$^zqjkYg6E%}yKw6iSj>YW_fb0~lhw|ywlP^1{U~aKV^L1Bxz>>ZzKoRlhg?toQzAJC z_wxr$i9+0`qOCIp1dbMV?!BX})piN9M6P9X^5-+kkd+9rBr{$u142K=IB5|fda%O0 zQ};Du1TFuvhsI#kRy8o2gg@IPb2u2O3C9w%O;tOZP06qb0uL(Qn++c1sDaTEXFBI#ubG`-XonnA%)!Zh8kd;|7q zGnnWv|Hbrw`i{P$RTV!%-E&)+7Erj#E z7K}z=xCX#BvXH;7&#Bkkpic=0&GK*IX0*>2VqO;m!bW2Lm-kyDmJv7@Nmoj&+M$6$ zi-*CdMBDe$8cON&R3X}SJ8^5S=Ee5XixqsCBI*D8dSJ6*(B7_sR&(#LVawjZIVGm*h5US#Ts&S*YXf|GiDcpFBJTCCz1_A8bi&U>WcS41FV6y*(98v z!-6?cFS^@kB|C-CA@JH1mQNngcd6VoN<{1fnRVL?0U>)mol&;9^l*DoQ7SDS=}!!r zGu@-FsD>J^d@!ivDRpzbw_gr15F(KL!BuR;dBz)X%9QkDca>%8sU=LrLt6JQ7&T;y zR=GO`PsTgKDQu6&d#w+l6v-8hKA%&3hR96aV|ysTk|^rfwr$(CZQC|hY};6|ZD+-{ zZQD*x_P%rHnSapzsnFGL@yO96Td)iH#U~CE}abkOlao{OYa$Gfw&9?hpc2?lI7uGJ+b^x%sKe5K4 zg%@&1q{;IAmMZ@|@f~f=uZs~!l&jr}dP|9It5Qx%#(P5$Vzk@pB8|?cXe8MEdI*pY zL(PVYQUi-tW49fv&21RTGW@UaUgJUJHOosX#B?~TTQ~(G)I1N~GYxMEK19bd`b;{v zzHIS=IXInUf>Q?^2aka3%#R4t8Izd(j^ft!!>1Y3uG^a)!8Wr#&___t93F6djf(tg zoBO{EHNe*ybh{fy$`5~fU>O)HMBHDj%Qkeo9uG%uUmvjH$%U@t;#+}8w-o{NSrGq+2sQpYYg^%e zL6~=8jEP>gX5Z0%cUT=2g_Wb$uy-@$WsriRRxEwNXmF?`%i*{%pKOLfHdb2fTzR<1 z=-4=^UB;EwyaxCQ;_&jxutt)@znAg)k67%dAB(04DL~q=o74Z!)14(XpMD8S>xO?* z-{$mCbsJCh8@}3&0&opbURR|`>R*(|jR7_Pl%j@~@!z3KzX_{D{~aRW>L1;mWT~-! z3@^2TiDcBYlifI6iFzA=Q75pO#ceRV&hpkzCr_V3If}KB;Iwo8&5yx%|CbJi{(&<1 z!@*Xip4L04j>yVf-dkTK(=33dC4;vnC(axC#Pfkfj#N%S_hXXuk&nQ4E9OplWGFt2 zVuEU<{z#7&d5SQh_pD8w>9n9PO;;Y+MU8<{8%ofoe08>P0!%hp6ht{iAFOBf!d zt@2cKIWmO?HgJ5ei~(XPi6YIL5R)r>Q*l(1Y5ncmgVwi~`3_2umcB_%hi+UGO?At; zb4};F%|!4#Az{Q-q$sRU`ulXves?10QiQ>J-|_C%q?Q{y=zBMt60pvLN+S{DvNA$F zLSzk7fw7E-L`>Hr7DhtKfMkoON&mrc)7Xbt zwy?d1q`qK9EPX=()li{9h9X0tNiw&&hiSMp6**tg}Wc)qO?HP;_Ty| z3(W}9mJ1cw*Y6ArH~bN_p|<{KltN-|g-3OdJLZTUZ%$t(_86CohNz9h5Y|;~Me+hI z_S^rnMPN0g097;EggSv!UWaOpBce^stM7$AU5BD(r+R`oL&IoUs|PT(7?QT7|CY6Y zwZ6*h23?|VRXfH7aQ8kgS?57v56AQwT5(-RnGnd7@FkY&H~(_rC?Lp1V&v(PT`Pw0g{JWbL$fFLqSFN=`0$7# z&Fq{)gcjA*cwBbc)n#* zaYkRtNBi6h_%21QH)mUO=RCt$$eHOk>kM(P5kBKmy5DA2((P}lR$`BJJ5d|YjXzgn zbT22OL_hj{S&=sABOpeG!)bH&>?m&O-ZO=3XzYG8KZs3k;e(5WNe`_J5#q+aHG{ zUHd25tEbC>56;n)M5nk~pL*vt%1LZmy7qY)7yhTerjRe%OgS!(WuTItKXzpvRbs6l zX-&3>1DodjCebce&j>QNm@{Z~p5}(lyN-a9VwXZ~Zs~uYo zIo(wEbd03NpVjMwgbD^pY=me>;(X$K-tK$o8zOAX8g+|=ab{xM!599#x+oE;*nU18 zg*q@JMsE#33@{HjN80|;ee|=cpi|o4w-f`6s;&eD& zQuZB;Tw9}MCpMP0CIZr7UbRm~($*}0xm$+vc4>Z0NWi=5ywf;lQAV1HddwZg-ctm} zc7UbuW^C3GvPDuK(NtMZ_^|XOn;vJW}e^7TiHtFVVL1gnLQ&m&_LP2M=k(^mi zbNJto&1h5VDZYT%4dfW08vXF+0VgTKqph#`AU7+vFclY@Zg;dW1ypB!6~Sy@OT~I& zGkf#l$efXXlHz^S?k#y)awYQ7Pk_L6hc8SuWybHCp1N(NymUZ^bb%pOT5J6zi6M9n zmBpGtVs`u7YUF0;uo?^{mZP`B2#rGj6`UF0JL@irQJFUNt4|SZ2071Ol+<X?r}l57&9|u){e%y z(WrBN6}*Czq=SOY-uLY;VT3CypjeHed_rEF%ik(E*_rT_`*zSa6%i%e>*O7-V|BiO z`YJLTe=%VD@tAakEjC|@^bq!;_rzi@4$kvfBS$#y%(Gd<+zOO!1^t>hRS^`t*z_mG zdvj*ovy>BqfesPEVc;g_-UWgim9~utKW=Ed+g1VC$)Ggg-3K()W`l~+9U3z))mOoU zpWELI%=f?Oqg%_CDs#k+f`pf%Mp8VT2L9dDc7J8o2&~ZD)CzL65@(t)R|cml$l(nP za+`FbHH3Q<7d_^umd2?MSc8YVJ-&izzspk{UKsNbpq!2iF-q6#tRq7o=-S!b;FSs!Blz>Jt2 zn*hlUxoPo}9(m#{DaM9{DF7;el*!4(Ld;y*Rl03eDipi;)7KyDY}2@{_jHj-Y($N% z8M6}~#P35DTK0s$AiIkP*Y;LV&LFRUB32R+bY+ZMx``VH^&~pK|E_MD0NQ2>;Z3RM z{m%Y{{2u|4)fT!C$+2awIF!+y;WYd+3@z?Ok;O zS(VWg6PWhN<*cQ8O;9MXZi(?dz{lnvN4wjE`Cz-(%6hm!EeqS%wZX5*(NZmidjt)# z^SozoC37#)K(o7OSX@C`*h^4^KH+hidDfT0Zoeei3PJDomBUX!(kujk+xKRuHcgG5 z3(^3!jJ{*O-FmI*keu>8X1f%r~ZFyPT6Yvv#Sla+}vq#~O>&Y$WO&B&$QyDV~#NC@zK(^NXoALXR|2zlJFO=!Ec&wk1A zZgJkvkIo`=I)mUv`{mC43hvlGAhvpOgDLJfQpZ*?~?L_ukhOw;Wg0*~qdDy4HSz=eKr!3V=9zZ_70@6smkI(9r(v`5oIS^LRF z(OpAe@yFz#;LwYDOO2_pJvLq|**K^}6v3^u2uj3uO8E!oRfN$-+2%zIF>Ci{;JNS$N*62$=4g$L;&p4g~b=k}&?(rOFQ2Lkq)LnnSKW>(@2A5&yq! zzrZyJ-FKUR37|yu|(O=@woA!2E-90k+e+>U<=QOGTkLqC!n^Y_B&g& zeL>aFmzZMcLM#NZzP-Z*@6Qx%{7u*F!8N6J*ebhGc_js;fXPr~=n896uzgo~g$NF+ z7!WDY)9WuG7tY*|Zthj}qr7U>!i=2A{(|Ak_nquz!7XB0a}2!y+&dx=sCWybs%Jh5 znMl{AJ?trfwe`I!3PT%@&T>1Tx@(8cyvxw@KeM0Fmni z-S}pylWv>X@93GnBY8s3X&JylLJ<(5;fye@mW7Vsb`jXYcviB8t-J!Wbd~@drN-Jm zbN*qQyUbQJJO?ti#Sf<0>U}^Qrj+ zeiL6k5&778ci*-XZwx6*u^Ij%oc>2h*b(Gm4XwPF|z&Tb#<1zvbQ+upWWi5<|Jk}Yg?qFl63S|&Ev+417Fa#EQwe2<$OSJY^y|XbY+fl(7i=}FHL`q zVNWw5tcI4RWW;Pf6>FoQKS63V51@^9Kvy_TDDa4eyJc3%H=~wOB3Y%t-^*5tf@0}i zCKrxW7$YWdFl3A91^vR9yYQK)R?!R-+ zoKWaUR*oF5dr|aDkc+FER~seR>&pZ?y)@pQLE;8-_hu?80VK!q!i_O?klFwQ(?_VY zMX6aj63Kd9;7dIGoUkcUcpH=wLvh$-L-nF2Wo3%SMVkUMLx2#Ca*kLZp*T^sv2Qlg zUT={e@$X^?SEFZg|HUIs)d+p)ue-isAXs(nEpMCbLhzTg%&kZZA_|FTTq$M|_fY^H1&wyPJFM_`DNd84ZLk zU(o`o*$3WW|Myi@9_O9=!aqVQY_!8U92=e8*wv=k>$6X@4ssZNKzm%~1OjL{(X!ewqNF!7NJ` zERyqogW=KtZ=glG3=0GyB>S>MZNNf*&_Mw00RMRmOi~3=z)WNr*F0bu?OJ*H*M7Yrl*&~$E#n;~f6KqKg7(JbvM+#ANMN!aQwFzyOA39gmx^*@|ko?td-?qe`iqz8OLh+BAHrFky(dfY-Mvs@nJrj# z0~%Mx5@LK)tMDXg6YMr^VB4#Oi&9i_#M2XlvPd{NlVK)^+D=Q8;{ zLe781FzLL$^oOHUveg+biRdvI0;^&ag7rMf+ey}iDL*4dDVuDdNdYHubFM%FKNwdW zSkP)=G) z+x_zfyj9WK?GYad>cTUOJ9@ALGTY0->N$GIiSdadvY-2p2taxGZCy1VS0>maJJO|y zp#cCuGyWF`ga3i3#BNOhd&aan%PnW8n@!b`^-nnS{r9ZKG%)83{NU1_(=&A8FHyNG zmq0X}Lp)W>#bse4vSDI}+KZEb-2hzwnkc|dyY~lAp@PgZ(1dDZ1gpu}an>nMQw7JbsG)ik!51uxj3q}pn?nkD#*y38S@*t^i( z1YB9960M@kdCAL!H7lWRNM^5g@bz3WUzi@nATpmszpce)_PLOr))BPl^0Q>MIBJK zJ=vM$)xM2;zzXo34#gN)V38A4oZ1P}nr`@(0IG3?|Edh)`H<2b}d;|kre@lemd zjN8``$osZsynB6J(GOk@v=E1FiHpU7f#m?v0w6JG^tJVt-gc<@QlWGmPNhTng*A09 zuS~Z18+rD6i1~sJ+}B}9g=kg0FaA;okH(ZaPOL(dbV&JNl5u{$$kg>x&O!a|k4?Ks z{TP5Xz3(( z->+)56dF##6I6COL=9J`toCT=IyoumjRw3}U!@nu7h0gZ_jnH2S#AsPpJfF8eJybJ z#S1+KD=@odbNB7o2o*rZ)uKX)pvcygo%sgK&7r#48AVYkO6i>zgBF%jdRKH!=y~qF z7>w@=&uT>%>q9H2gg=o`BW9x-U3tCSNC>7yrZkj+(SkFT0B`qBojEReO-Bc~E$RhQ zyed8N$2+?+rq|LHYuEm|6?#qH;{`mAK`g;~V{O(?eNINLMZO&5i~8v{U=H2)pK?y= zf+_b0tujJB;eBhI3O1g`^x%{J@pv5v5%Fs$MHKl(&mG_26SV+9C$8o9#qqj5S6wR0 zwyDnGE4Rw32$Rw9brV-dR6h!qSk>mV6$yqBJ9$cF;Xi&#&9u_*!7@7ZDRHsqbG~0G z3I#q+rJ^W+I893>!Oj;PXF&PBRZiK>%yf(#LPm$tNY7SX5TS`AKHJz7mNZrb0D}bB z;r6Xl1Ptmg@A0pDC_X&?NtwWB(dZ@_#mO{hrkZoqY||Rs1e5WoMI@5L7n3V`DlwMb z#HUMkYScli?qbFbwL-%Aena@Cl6_!kOOw{lc|1ynY+Dk>pjZWd$2`31gDt0PrdjHpwBBQ3*3M{bRF zg6b0=djJIIO5ZB>QFmKD&9zGR+wiB6%i& zBkwPoI+<{O`wuyXCNRkJXj?{u(dBwki7d%ck^sa|(>ZxTnG41o6Z@*C-;+C#4I)h~ zY$$b8Qs^zHVo$YQfeQey>y{1#TlDIfl8yCAc~56@5cUqIzGvJ7%yHW?w0xA8ofamZ zXqd4}) zL;aWp*SbE;OtcY`&dSkO`saY$RCySVRY()>0SF#iW_I;>twazUtQUlOVcGgt-X zp?`{UbB+~;tUjsSsbJyV(Kp6}8y2L`p69$mA0|EQ!OrjF?8 z$k$V3bj`vK6dHW|`&a(>4`Uz`aZrW(U3ZYA*R>3ceo+d9v7;8YAyZ{^40`Zm@u%6f zEaI-l)emq!$W^}{_HkNlvuotNNf<%kL|VdG*tTD0tXuJq4@YVRhuMg%8t0@qldUHdy>9JIGg(n^Li|T6 zH_AznY~y6H4`NHEX*~QHC4_Jm9cCm20!)+Av6&&9LCfv{Zw4|4`I#-L2iRb#Z*9PD zXl?orE~URazNU>7t?#XCxa$ljh~W6o)8)}{liXS+VsXd*;3bgqcpVSJ`rvPHgG+~S zhP+#%(||)fvK(F%hv5Y7TLxsLeA6Sh>tJ9gWSODEwv74TAJpfp01JRT3RLu@U(y6O zBQ#786YE(>ouF#3F*}?`QVgcrMYDB=pA!t%9<0{(p}7qC>&_3dRMbINcw4uNY5 z-f3W#Lq2uLiImymtuP|u{?I~A+2{F@ru_HZz5fBK^mRR$Uquv($h}KBR5Dfn(8roW zA@83DuA||SeEC}8f?fg3?1CfX=F%d2=u`#4&lDTMW;l?d>%;?C&d!IcNLb-XJ0Af3 zw1MH>zS+{8(PXuT((}w7a6$JX`6A$YMdeuH`;Hcy6qiQfRzd!8s#j{ZKTFpfR_nLy z;3eX>WV!~)`Akud>JgqkD$#`$IoLS1HK#6PXOh}+%S0nKrwR~wqq6w973U-w8 z;^C2*yk>RDqKfGR008;=KRi~vN36kwaGO7e1p={zHu>!@)8It>?T+l8Y%<*922nj1dY^V!v>uE@p@{!E$ zNzb^5U_;Xq%Ndoq79iAe<*K(HcbzR~ByCx@0muz7cl&fk%{7`j6)>;HVTH*-ze!K9 z2rK>X9Cw=(XGqv8bjN37H#b4P+s&+E869)?Lf8U}Q-c02-AnsuhBT$suxUDM>{o9X zzm$d!uJUMOk+Ni~MmVR2B-dwF*s!Gq!UEvP-5Oifh%?K=CyKSi-GFPqa5T9jO(R}_ zjmE?=@>);}!f-pzHUI3>3s?H}2Fqf1+tSY@5$s1rr>8;HDFSTY#5_!4vJTden2uiH*= zyCdXWX*W;Aso1jAu%{D*P2wK!L1Ic|OaDF;c7zhyZb+nQ(m+{viXl0L$BS?JIaa7n zGalC$A1UiCdc?4KsohldySAGor8K$aQ3YL*Cx*fm(*|0*DBJ9zf2r8F7C3elSvRMc z5G-P#H-V|H=<(ZyM^l_@#}tgD7nDmK$>KN(F&bW_&FxMdC}&I-x;t8VI)qyk^a>w7o502d=K$M(}|Fj`YfOxZnoSLygHKr6) z!qLlmdm<&21|=YE39vtDW9Agtq();D`T3a{CCBj<530l`-!l}(7bG11?g}95O7a9v zPh730HEf3zcekt&-Z<|`IXkm8^&%EOXC5p02_ndBQqE1y6R@23#6S@KJ1F)0I89IO z5Jh$p)~2c}WVL}N2Aw_HDCd0mq3O)m5XD(oF_c5>bSHh2v@mr!rfdy-t^lFE*D^1TTYS`Z!;P5-VzeeJn{%6D(gLj_t^6;d@i3h z)X#%6HEazvg&5 zWyqJpGZvtz#&_Fu>~=6W#p8qZ4&gfExc#U=Xsi_N2(eyxb|}{J0-0CjCTxFyQn3^m z4P3J{3dtaGdP|yj(1h5%LlO6RxWuKGi}h72A7$WY1g^r~jQF^z_b`8xvZ|(7PZm{5 z+QbUoQN}qD_CZ#EM{Box>+r)usAPkj@{OPex9gIf7d)LVsXvfCwy)AXtB@<3 z(2XlxvMwba@129oZT_Ot`Sw+NzYIf73!;M&kT*0cHj12FtRsyN z+?7^8vM&p^zO$M+TB(P%I4)@e6^?L;g}^zK=}eJ-c%N=m@pV4($@O0aLx5gA6MF*2 zvqsO#?PYR=8G6j3jKZ^PX-u6TSQqKxsjmfljtkj8t#R{i>bgg;CbG7!{_L1HM={@> zEK_p}>;sOkAjk@3IKj)fO6daYOM9ImT7ziH|5b{cF6z1T|2=m0f8{2NBhWHmmoWKD zzln%~76*%(&MX;hBdvC0lWF8tn?ojdAMVH>9mG zH$Vw&oT7eL-Va^pdt9xzeLlh;z79V(W6X%eB*%hWX}RS_wBJv1AUXdlKtcaF6B@Ug zjBPgm4$9FQRhAnU);fVPZsB0|>S5*wc4|RbQ%$D~oGhdOM59S!!HdPC1BN2e4Pn%y z+#axG<%ZOnOJLSS0x--W;QE7<#A;_9FhEi$ptP)kMz(nUBB>6is4R9zAUJ+qMm|`* zsArz0<3Qeq?nS`f?YiwgjJKDprSIHym6Mr|XvYFMlCBs!rKN2(E~-b}SAb0qKzWRsO~YjH8f znQt*PpSNOc&MF=~`CuFxfW~8Us)C@bM0f5Ud$-6{?gt#}pxsX4dG^+s(&Gp2RtQgh z|2Q@Vr6=m9{tYSVx4IcfGE}GN@KZ&yP!x*cBOr8X1+P=u1m6M+2@0^NmyCA5aZal8`FtWhhx-nS#0ECr}D77y?M<&lKBiqGa z1I4?Bl-y#hZvM-SSRMU_#`?Dj@LIAXVR%>ic0iAF})%d{L(If_XLf zgx~~-Gln-s7y|A3=M-qDvEYZ*`bwTLS0XZTMM*S=1>YQ5{S z74-l_%E!*~Q|Hx97`egk^YJ?;!};fSvhleJ1Y>?*C>Q*ZvwpC$I zywo5CJ7z_1kqJyvt8IaBF>g+S9EBlWPU53jjacfs-agoZ!b-qq*H&gq9!;8u-HP1r zj?DzkZ!%gDciJuW<{E5MYU#Hm-0!o6jtVO%$pUch+RI5K@r-14<136JPNH&K~>N}63exS zI-C`t>+kuUVTZU#d3(a`S+n)8&f6(+iHb?2OAfy=-*Ov~J(>0magMQ>D3!+{O9EQ) zs%YQz(PWUBJ`v-K(Z#2BEGY<;@n91sFy$~fGg;BspA?uR+a$cgr`o*bmJK#iERU)dvWnhUA%*s#Wa?sYY!xR7Cq_V;%xzg4CP0Zd*)Jx?ne_J+ zUbQ2vP3J}(rfXzn{ZZ`2<*cPbL;^~tz4Tp$|7Q!p7sL{dD8MmhEe(3HFXlOpPteJ z(kcp~75$F(q6JubCfh9;Kh^Maeuk*}pC19L<20i(21eO*_Y2ydzG2jFMQ0x<-{bGP zCNT9iw?TSc32dJ@xK+FuGXY?~y1vPY>R81S$#VdIx*3{%>12PzcMX3d%jkd58+{c< zTbW2(1u=Cpn@U2R1R5S1?~P@8bp#^J`hjs!i3Nxme}b@KP=vA4&~a>o0+eQbl)11I z79)sOb#Gxv1;oi{fo^j&!sk7u4f@%RVYrH-fRHzO6 zi{r1B_q%%amCo3nM2!nvX@5LBh{ni8J4?G4uUFp=X|{QKE5z2LjF7RsqEU1nVR60j z!!AQQXWj0VsLIrV#Kw?=wZ3Z{BQVYS3POmhGMM>yTH0@jq2IgI|9_5-=~3h8HV*SX z%$CTumv@nl-#Kd+9!2L_T6vp0B8d)`G@)1GbY-*t(Dj;9RVGRog|gP19;h<;kO)V2 zPI=_PRtbi=4#G4ax^mgrPhCy70oy1}bTE@ndeWj7mg}rUgq1R^-iV+KcygSJ%*7#z z!2|Hl+kTazWa1(CJKsKc3a@oabA*^c3|$VT-!azktg5G6(otee6I1+)H>R)q;TlaJ zp=Tk3s{e1h0OaKV&^rzT=?^gNEEkGx&+#>xYmR@a@l}}`@le;P8v3ywadf&pqSEo! zcDQ0chopyxTwh_7#2NENjqxInDk1G9*6h{P%;MhR8@3)ZDIgYOBV{S0^t8P93oC_v zCv%VcE1^&pG?@sKW!$;P$e|Df0~u3w1->yBDw&B`wJG;=XZe-fjxV5t5$#(G4rwI) z0D$kOwC`s0wn0hUx%8f#Bg25!9LohLFP*2oMChYV%Eu>YW=KYfjQHL`)(9~}F-_A9 zy(Ea~6GC>@hQ9gI{XP&lR|;Pej4mduiuW8ZO4hP<{ajJ67rEDt_`h^`V$sN4Tw+`2 zxU053#{D21pYbR}mN}0=AYOL@7i@7Hu2AL>GqQ{(QkX#&e}y+g$E6(Jrnf5IcFVPcJ_eXllJ2Tmsolhm zh1f#C?Jl~%Hp;bi!PXN%{{MD4)ABLjee1V%0IWA==BdNt@}-5y+N%UVw~Goi7*Xk~ z7aWUNP=ws~H;SBkQ?aj2S07O9_X@z!LYM7x+>E(wvrGy?tsN0WxcZe-QHQ2rJ&#=qlrz7rmA4n%~X zoPHTZ5fV%h=3q>MzJyptL!$}vSXmVxHHz$A{~`wmZ5X;o8!X**db|PBpvlJ`w}x&U z6R6Q3_7XFN$#`dc1%~Rc8WBSkPwI9q5c^$@o|!ZKc&|vjC(6>YLaaLJQdSeRh*Rtq zx!0eX47rE=(*!w=<-|VJ98ivL^LaX6+lU5BAPGT{LdAa8oW0imu|ym#I=&T(VC*id zNJ%!;5~pHeCHj?yj_nb4fc!uhlRs>fj`ieQr}VoT54R>^JrYLl#D&6IIh-#4pkC*L zD(?dekZlBsAi$5s#U?OhZui15BlDB?D=uv#yyn{Cx=*gvsX@f-;j3clzSLZvgW9C z?Z77N0LIPF8!h>%``S_#^f;~YS0Ee;syPx3)4L^tKy0+uifzB1$Y@6manpCii2Db# zzC5j!ApbzZP-vR~o~yxbYzBtkjZZbuIB{KelPLbwc-ENHtzK#xQ2Y-7nuFrFcg%)a z;s9y7F!-9G!y`P6U6k20wE+x2eugjLeyT-XAf7Su_mK-qi>(Y=$biIy^9IlhJTx5{ z?|wuU=}HWgg8kbG4Ioq0aV?qj6CWymnFZw)|Bg*-FCxIhO(>k0f>quTY8nw?04O&B zZ;`U7PT zUkh*_$9Ce@3P=p1jDFJ{@-4y!ENndS{Kl-~C#N65;|$h3KPH-F4;>1s(S*cpuRPh8 zzny<|E0qnSA3eaL-M3x4?E2wbkN*|kaTRBT^Las)hJX}5%8^|LhOr)hBoW-8PrVuj zJ2!IYn4Vmw@b0L!cwObT5|GU_U|D7VdG|W5mW8}9% zjY7|dT1stw+@x@t|JK%6TD45}pkqRhuWZzInw z=XS5T@4;1IH!~$6>#JDb)rnf-ookq!9P&8k>RZcX1$>Jyq|jHLv>$3dzFbdrA18}! zR<5>#b92Gg5T6aOh6#}}0ntBL!kH?M=kir8%8x?&tNlNA!95T%HCm#u+&Oi-$MreQ z{G(F7Z~oG|zz`(^jh*FVT8Axv7+Y@dOiq#mTlzme1GBc+81gfg0?*^xHi6%n+Hl;Y zmb8SI=^0-^sD`z~s(AM-YUNo~P0jVo0(XcN)ZITU?LZ+6kn%SXi%uktDvX2obbr5A z_oE}CHq|{aaCE#dJjTyePFyqruT#4XNEHCfH%h+T*Z+YhsH;+M3)8W>0B!u|JD|2? zs@iqS?*}=pHI#=2?}1%JwrpA8%8M1MZt8 zGQ9`Kt&XD1+h%Kf&x^O(Hrf^nzyN|2HkQnpEJ`j=U*w`TY&ym%8J1pLrt!e8h%Z5& ztIiJQcDsNh80sH#6yi!vztd*PgLc}>xule5R{D>`?ttfN7K5PLMS6*$xGxJa@04>5 zC5_fulP2}{W)fZc|3z3Ky#q)MXz79k84KvTp)m*~`%)fLwR5$RR(+b~?V)V!ga89& z=k7k;QMST8UCBob8=QMpN|qzOLQ&WyO9B9Z5&2)B2Kpx=$xFY8sSQw-E#09`ze9xi z=%FKC*WE{toI9tO>D@@FGz67Jypg-~`gUiG^9d934#QeVHI~)@Ps)Lr&U|5UnjG~t z-IW&Tp;`wcC#KSn=tyZ={em)30eaZilI_^+j9IaAx3j2W0tQe2&WDDY81AJGyBZyN zJ46APe-J4J`c&g*k|w#QEAynmIP5P);Cgmd8%ZRL0 z@Wq%My&1t zWcbN|BM2kb>WS$QqZ9f~cy+e+seYIDiU4HxUlvbMIB7b?Ftb7H>G4^EUmZ%~`L3g; zIr`}Rx+804L)^hd3vdy_-c7fiXxPp~vhx{~31ZP%_gr-X@3;!k5`U8S9Cdhzd}9)> z1HzEV#?dQ$g}eSLN=Fp8&}IWf+iZh)066a&_ti&_g~qj#_M!^2CHD=lXKy$ zI9k(g?SJq{`egSRBX~l#=lzg0$heDO#%9{#+|KJ!vzju5(}WPRvaYQ|6RnCBcd!$X zmR|w6-ulc8{m@t(wgmC!$4)+;FE%V@7gWh`VizC9^r47OW;A)vJmDZ$(GQY|e7q;PW}8UeSF*iBTytn=@s7!YR7<_e#Mvo!u%dikF-9RpT+!Za#q z1IH&1bQPY9jmSev&&+S98sF-mGe5PS>0aDjURJCm8l`@_I@%R?Vbkfa=Zu1ca}N0; zd@0v6?)jwua_7G*i`LV!=emvptLxR0cNuWNVZY)q@`GC30w}F@N)&YO+GGOKkEX~s9P$9>Lil2jc+-J#WS1|K zy)Jp+M;xLKfhA+lbHc#;in)psgy%G+0-88w`J%Zrte z)o1Nq(ndn!RL)fa#z+pL?0G;OJz|CAm;*A60EDIr1vkD6vR2p48!-rk2zi9AcSO>) z&%iUH@|iXYT|@m+)is`;b|Wu0#*L0(5-De3x=n_WYhS1`|Gu9m(D3~&4bs6SYQs_( zLJ(YdHXx-UrysC9$)ua7*bu>0VzH|E=l7yT6}o5ewcXYt!03}Eptj!kd24#gOsP0* zrf-dt3VR-trM}^5I!q|CkRzkRK{8hnx7>=h#g6DF#45So1Qs2TUMDu16x}~5@8fuu($b4| zB=5n}x3Y#9OzvLr+a|wZJhfJjK+mr)^go!Q(3{v5(pZI|0Bd4Q`=&^1FGIl2>Pxq@ zWp!U}$4nSWTts=UOd}uD>%dDz4Y&dr&3Rm#g+2AL@GPb^wgPO(rk)e z1+O%>!^)7P`_`r0#B+>HJY)0AcpHjrTh*hnM%y@%?#rClWBl4upOyEG9hUaBtb|)x zd?e*wcPq4eOP=N3R%!2)~@j>F6x7)1;CqOp=>t0c`b~a4$+*V$hX5oq-JwzkzRHO z!U;7tzyR~)d||u-+ce^}0C5XC$kXvsdv$o6HXu(c51}^{Td1r!Vcv{7e9viW@cE~U z!$b$$C18OGauW;uOjo**Qc9KcgpAx@P0BI){LH zgCmj={uGJ3EZ;jjEjkbis-&-N^kWr3km=P)WQ*qgvC*EJ-iUDIS~4yA`Gex7W4%iE>3-+@nVPlmjSk2q z^3P)6Yl=is7{t%pKSoaoSHg?YR(;&Im-%MrsP4^QMgbmlV{QK@87dT;kW~0!ogO)^ zA>uELW_J!=9F9gb%0zw-`D$eUW!c7Y$$a}({`u%|!HIx#A5z75@+y_8s zHU=^s`zkOg0#E7OH5t(d&n(`UUvWc7>rasb>cpbM+==ZcjkY|nncj73cauh8(K`?k z_UbKr+!QqNYI57?uk2vH0IR*ANvCi%u5Aj~@&aGq+KHE*-6`q%C+dshCOnJs3K~n0 zt>$kQYSuFiS z2Vtx7sG}rOiZTMfScK0Hrew@O8#BsO7a@~u`$pk(Ja@iUSm6W#B^%G$5$_v+y!+kcLux9SY4oKhTH7UqSPHVM9&40x$q(9MqziM%sBq{} z3iH!*Wmz0i080G7sAs!^Us?;?;`7|$^?$T95 ztiLF%-$u`~jQS}lclSL?)^7d&v(HJ;d{-=`Nq2N>&dB=#E#1?f1rh5CIC;swbV+d$SbsG%K2r%pdd44fGj_**4Ghdg; zhg^@~g1N2b=%6CPMH4d-twku2ko{_~dakoe6T72Rmy`EUazJ@}q6lZy(A&kfb*P2PV*bJjM+Z8!t_n)=B zcAoVV)|FlAAhYezsPn%y1l8Urbha8KQGEDTTPna3f)=6L-WMs`Yi-1a){h-M(UR}PPE8PWzCf-vlBQd-D)GA#hI+CUViycAd$$j3Be_R+ z$Z*AcwYLD;4gG3xTO<=cQvU+jaFRR|>^DdPeih260(Gx83=_-IFj*KIfau@&r0P6vt3v^#z0uLikUidFiykVSv z3b4zsWUdt#(4N0c8Lf(`$7xKindWr7IszI4f;Y$Dm{o8yo%!3u{Yd+Qpi(*rBj5fz zbSxW{yl_j;spsHJpyDM;C67VM1cUR8@+ad=u_KuPTi?ubuOgD(&z%$u=3UDrOO&Bk zdHXx4yOjJ3*T;rn#r8gL)E^RW^Md$Vy+zGI4UhG`A%g|F;{(v+W%1%Hp3K4rigcR~ zaPfn=dZ*$aWUZ+QY+rL?MQm?cB_$(u4%6MPv1kI_rPGVTW*;fZ-s{>~`jdjh2Vv0m zH_nio9L?dZK>z>&MFF1;YF}Fh)UWlcHQL#er{4|DOY0r!ZBncAxX(CF9Hxe+NtLV> zLw6_TW`rXCgAW>~pIKHH(bla}eJZ_z%d+Q*_W`>YZ8JXq`nO$*MUNB)mbK)H>kp9j zU;iyoL7j{Vj9U#k4zsRIxrpylyq@=notGLJpg92t>c7*7OvITFI>e>{N6D(G2cXkq zG{>O8eSh7@isu&n89kYXKzbP(BP;kFDGP5F^fG%dPLU#9o+ zT*@y_+aYm$c*nF5F05bNAyDhCy{0myNPRHd(^3e3qjDNIKfjme<1$NIzJ&OFQ5%>( zb;6+dcf?~HTDmsRaXSdT0RQE;jvt{UG8f$|o78XB&|L)|1tLbW5yt6uS=QmCTx!!( zo9L!%FjpD>!J6Vq{9ljv{Wtqgs20P$SAhTk275u95=p2*Y?(|C|KGfC05(cuMh+e| znF=;5uCvGnx~wlHdX6qxf5 zq0}D99D*JoviNegBDvR$)xJ-!`}BT+XTj_^lCU`AkJ@S7#j$?qfson#dY)Wmg(t>e z!H(Xd=i_n;6G(;K4u!NleODUV)kJ(WYQFskl+$tncc`i@7X)`dg7#n-Lbn_-4EF zJ!v*ORyxYcBEA~LU#V%~o=oa<{xgS*&jidD6ECSe!iZ8zFk(!pWDZAuYoAB1UEpfO zQ~A#N zFB?D+&J-fL$Aa-IwSfQ2@J(cVxX(c>sp5P*9%ONincJHSTP$5$GQ>_ zTlgFzjQ*?b1f;q=FO}bw9@0t=BN|iVtgt!(E*?;V7d%c3=l1$U^!Q*`x$0@c;$5|T zu@p6$eZyMl-@$con1>B*L_^eAFBJNik>zV4d$@YrfAIkN4L?DH!)Z?W5O6Rd(BZl) z$vU(HdLjh_bnTh{%cWJ29Fc~d4^a7}8aWuP5r<46|LRAul5%g&079t0Ul07Uf0}zs*iQZB6c*-p zZT-AE?uGn}G|0Gyo@9AZ3hgQOtMj^F#Na2I(QGg0yJ|IFd&YaY<J>9GI*I|$U8fI+5K#H5IcsxuN9gs9w-$9j0%hv z&3Dy%v7lX^vih)IuVnAkN-9l5iK#J1;EaF1qDNf$%{5QYg}zu8dOP*(12MPGlx9=CsO6WBSU!(a;fcMpjfc>lT-w&Z?%5m zmMzhiVXgfdQ>bp45%r=BO9*jfCGSt2qTt%@0ZQedlQW}khr#tIM4gu?1h-{^n#*5= z;%}D}Ccj5JgGmu5Lx$&*$gsTD(v8nDbq-FjGI`ztG|+%8;LGRixb83l$P-22m}Dd+ zu5p2X;(Kv$`Zh*TzSADZve&n&jZGL>h64LiV8nc2uolqMAyGL9Nf_2RV!ogBEVTk( z(9ppv0iRtTCoO>PCYaUB^nW3mYa5#pBpyZcHRDCLJ&nRYNgFb8?G-T}kx~QRN%~DX z)dS?yep%D5?5EWMH;9eD|M74~Gi;PqcFZm1KDl6QpW2P$7+!mGs*j4c8%Y;|3b72% zhl7{pNJq$0eKRmqk>&>d^St7k)S_7CTs0JzSHYcx<~O;O3txh*F!9%qDN}<&In&OZ zH-sA>1(V)}&P{{6}5LWV+Vjr0x zntrWemn7581%R)Fjneo(3J{I7EXWhEiZ}fM|D>Qq{)-0O_{2c@%!%nxsABF$#U$N~ zB}xHZC$z*L)6lHwZ`ta2-qmU5U?pXp+06N_&tfX+{)v9_3=|wJ*6+~lkX#^l@_A%Z z>G8=&@2Bcho2u`HQf<%fu#~0&&(8(#ldBkjcOW~8*7UZVSRGI?I+$ z?q?!BtCmgO1J#NBeEFFoo;3>9?R1xrF>dlr{j6l$oxV+lzTgsX$!Fljk2W^+#h#-W zSs>U47cKHE79kY4_z`{x<7Nk{Fuc1&NpMol-wTRN?n=|=eFEan(v-w)YI~ai_)eRP zj{mt$wVgfx>W|hW1_l+^XGdRK|8Tu@mZa0(*3K!7!gIFiCxrH0v9d(f_U*tKxuMvrkalwd zBC^lz);Qq9`ZVXzrkr;P&&O%rHf6np+rn7zK3l=YD#B;s^oqz?;xmq zS&gb`s?gLx<+fW3;dtnDH{wd+`{k-w?rK`-g84In69tJBRl`2RxZk#PZhKD9*|-b0 z^*k(Mv*s-1dT#J*>#e7y6z%334UUPa1iF6uTzeaRCS3|JNXBMr;eo=1E_ua6!Z6cx zpk68E1IaMe^Y-1=nAY8G(Oq!!O<*$8O71el7Kr5ciBZ2KVpd*6000460iREFU#}w% znsT{bWL9=Mi;wzxOVL6z+?KaFM>7m(+h{bFk5?#{8ou@K;R5>_)3MR?wSN>1R|;Ra=mk+IVB6qnsz`PPuGi4iez=^} zOw01^YgUQg)9c;7|MpZbT;5_G&tWbz^MSUbSDBfXiAbFkHcis4o=wo#dW6( znTJJLZ76r3Gy@?9X6RYI0x&_QPoEcfGZ(D}ShdvlF#bvsD<*9OAIvyT-Gaj^vf0SG zV8L@ksBJ)+rFDR{9*WQBzTpMKH-26WC@^j%B{vh@_q<(?eG4r}x<@XH$ zk7XE-z3d0d>V^ir4au+BfUG*19kkcWE*t;=0Y(9zQEGovGigc$zODz9q||j?4MGTj zhIIUFwH?IyMHD;Vq>LkOa{Lw9W5fvL#s2_~WqRjpPB*=$k*Tb!s5UU9Tip=rD;;0hZ-JNloDM;b*VMkA zEflN<{-;eXP-xZ5{(qU9C!kBdp59eaNHS+TlJ&iBR}UoQ-Sv2X!@Fit000U)L7P@d zs6lL*Ob`Fb^Qq`uY(R14{RDvVSK;ArO@)N*qano4cn6N%{E{j1B5dr^x6&nm48pBV zBVC1O%;|8By~93g?hN6thr)7pJG2aR&ayrJlkrtrj+<^f`Vc|921lrMqXf~|4__n9 zW0Hj+n(S`9n{^@32diiww||AAnsba5;@`5Rhr+$^fcr!gmIBP&#pb_^d`%U`_!#gf zMtvvYimvY9{mf=*NV8V}`SwZ?@1Ie25Urq8ao~~N9T?eiM%O3T7mSTfaONnmE@Rxge1)%FJOo?yN7n)_YXPYgM#htZ=Hu*_CAxD*{s_aGf&lT5 z{7Ug(PH!~DCxaU*(ZC)Bx>ZA+XV|z|LLx5^2A6Fa*L@9Lxi9KD46!s@Ef9bMk{`E6 z^XdwHvLE^dmfj5;J%%ChcLp9*ll8unwdyOq5^uQ%Eu5;qiQ)16N;?l}w_>!jX-GP- z;Pc{6OmllK)mTZ#$IGnw6TCX--aWY_`rwht&Rw4vg)r|P(v2sNM*CkjfI^4WcCQdQ zNbi*ZuO!yTW}}FBbb;2oW(Yxv_!=QWoKa3F7})z8&g^)-$3l`l)|?pSe_IuJ&cK^2 zWgaf+1EgmCF`G+ICSHWH(|5PrL8B_xZqa{DxN?|g9ujbGWD&+vWu!rb-tPoT>yE)k$ zl{b5V=uZh18njtenQ!M46#=BO03wpwxr=y2Zgv-ASe@3?YgBUu{M*-|hjRi*lf+Iq zGBd&bDGvxoxhnbcQL+`r2V}AyxTJwItiJRYMTnx!@54REs2lloXyJOkTfxck*s;sP z)BPboo=$Fsxiu^+OqFsswF=S>&EB%6$J8M62+t3or!VZ|`1CF*b(I})ankAi&RqNP zYx|c+P~*NHg6s$OS~CtjE6eQSE(9i!LJN2-Dk>eDl_6*VT)O8d*=Ur-KJP+(r^xYK zL$XaJW~#C|#2w=#&Y`u=kTPf?E(rtoF-Qc*76>e*&F)O*qwHN$hS^b1PQ`mO9)p;% zan3DBA*s&Z*eq_xAX0qMY1yzO+zR5S{%YF{^8Xa}O<{+FJLEt$`qzMS3n>!SW7okx z2x1Kb(ZdjyuAW@^g4?2lVgnYkuTxa7-!R!!KVg&cB z_>v{49Tka!^>qCU?i+PZ!7v}T)>XumHwM}aO{GzgD%h)9H7CuQlAIJ$OLnwhpb{=7 znL%@!6fxZ#;`CkbCz;SB2iXR?2z;w)#`nD9aZ5WvepQXKEEHYSk|bwV9TgVLW;%&{ zrRqQ+7oMw6Qq%v(#Tp%^vtD)a>X5UgPzA+{M*Z@uMR*+s`1dR1ijtvBT$4MO_cE01 z2>?1d%8P)mw1(o1a46GOA%7eM=vNBnBxRqR+B4Vgd(Y?MceC^!1+>gs^u1)H0QRAu zEqM0iJDrP$kMM@P4_<;hBeZ%LI=U`!rIDy+O3vTIv3bw83nX18RB4~}Y&GOGMU z|9iDy%)N!=4I$G#9uF_h3oNhq?c`tatjT54b3oMPsq5s(*o-2TCKtn|$b9PnQiR*s zfDGR|e1_m(t&zWBs(18bXw!ov+^74-HFS1zUO_eJ#sLR2ipW|a5bA22NLJsy8go*#92uhZME+6?^2##%Ddq#a}6?i*F z)Tf~_Q(H(U?JGSq;yZG|$E=a(UnE@zdeR+7%#36SbY6vIHGWjBudwz;4V593b@B21 zEH)a7TR#?qm9LrwfbgN+h@GPEZk?s7wuQg>*l4mU$|B*mU96k6!$iZ$t&x zxLqAa)`V+=fQ=p~vb{jgKs~@4ms@C2h{!$T6DAJK@ujJraIK!i1tOnS%7klUDcE?L{1Kf~-R=cXq zqSK>X;vB!7-(!{*+cdPQ(FwHH|K73-BiyuZy}RcE3T$iV(krO0%uG$Z|%GkMnV}|ymj>x zD7K{c;uCm{mBSlx=Vu0_9`Y&x^Gc3Af1sOXBSV7z)2bNeNPqn+5qGwCsRo zMjh#+zAlw0SLcnQ9#?LS03Rb3^lp8DAg5TCn`Gh44O$~e-{z`fYTIC15PmyIM| z9RTCRs_TV8skr7~^v8b?DO*jR45X(za?Zvw`PbyxGK;p74#A!1xO~Z|M=aMtP`|}x z7L{sEF-jyqrdYTn>ualG&m7-gQNy~vyI#7kZ##hd+CP0Pog07(H=MkY0oLq zbmQ4}kxU9W7oii;2wCy};MJUOg%8ePpJJ5=irzt=xo-6LMWQ+#uNV+9=zmD7BATgw z{Zmk5o|QVuAG3_KaSB{L0yWc1Yk7dk^24LGy?vjS3$wc`CyVzI4#;8N#ujoVzAhHxI z0WY=|2oq48#?A&|l@F7ITyIlwDR!JRniRR4xj$)vhwfja0DJm>BpJLfKzbTU#9+u; z9lNI+@PgmBH@tbIh{4$;pbLVO&Ab(1D`Cft4ldZe!B#z`t(kryDSsO+8QwWoLqJAY za$Erxje(8U$JoC7JvxY;AM9QcLdJNNYTSU^Q>mRGbfUnJSM0azg2r-Rog|E}p;?xe z3*LB36jr4)L5+QZQ5!UihtSQ!n~`epQ#j5hdshR!Q&w_CtmKnmH(_}k5m>79VETd! zC5lnhYeVo;$NncNn~z2=g{DtcUBdwy5gIjVRdMCW7%#=m286E>G{67=0YU+vlXQQF zP%Rg7zyTWs4+J2|#KT>)p1vx++mW>np3frCrlb~;Pyz`7%V4vWx3fyYb=@F%x4AvK zOSPLuj#_W2L&sBQ^Q|mi0zx}DlGU)i}6lH*=HkF-5 zm?i1H{TWi?aWT)4x1T{wZ)Y+(GfEPAN-bURwbUseUU%ExuKK^)Qe5PRk<49;wa2v<_NX@fEvy zUM$ z_q%2R0004E0iTs>e@iNmYb%X7C(h*ZhP(%7Q#lE`!zxKW00QYrR z1(eg88cB8qhl*_^(mlu6XJRTAHB_7Vo>Gd=WA?}UmSQk1Hea|fCIU0umvW)k5GMD9 zd9se`?C&8c`U!WoQehG-?Mnm^7xqPZ2Yq*2_j0@bBF^U z{Z?~tDa0&MlO&MLk|IL@tm6QlJOy#2PfK3bx2L-9NQ-Wj%Vt)W{q8LceRb`^cf77!ODP0Obm*Lh#5sM6(1@q;1~k0n2;mu{xM^D#<&(;r-;Rd zgP5+Ngwi$j$rE}+00l4gUz8pJ^R+Ha93BH~8OGI;F&u`n{IhEo&xb9Tg+iWgT){i& z?XHeT{9v1W#c-3XT=;$Nhf#AV1YhRU?@P!aGn+MOex{VGL&mCM*b{u%2-ZEno$;s| z>~;_OIORpBeS4jwJ+6o0;jVRQAzR$)Jb<;eFvcWb-k1UUM*k zZ-Z@yck3TLOb+-d(1BVuM|Ro(a;_a8xkWv~qkGUZ?~UkB`f2}ntQ%1&taFa@9Gmt| zrN^R##efQF)QxBL=?+Uy5ae>Cn8Q2uO5 z3%JL73dDwib3+5SneVis_nHg`sOmy&Ql7aMIeQ5_qQAu;=GnuAnmdMjl#nB$Um)}C z7TK+uzfQ2(r=)RmD&OQ+dV^=SuX_%SEETOPOM^T($(|qz0!I;addr|!OQ??m31t6a zi&(+xQfhDpkNdk>^pZXMOna*OU+L=(Zl<3FA0&AA%Z~GbTHIU*S{{Q7VgYEkCUv+o zqkgxdbX3@Nx1A_h?MQ2aFLS4IQEtn{I?3%Q;~pfr+5z zXfs_;ib4O*Uj6JCoOgRq{|;i92swW8EXt6l+1NEVEqfbSL=`=0xVqn>!vR8y{XnF( z-J4Yt7XqsJBVNnVo?uaaS#UhG&bpWUB4%lU`5m)viYLc7cLPwYvf(IsNxrh*KT+3F zngU;Zme4qJE}20!05w`&z)y5X&8lw-JW8DiVeNPFqzpS>jF0#Hdp2PlTx{5;o^s+8 z>`-#u8P%Ja^sgJ|LrTT;w?+LViJb}E$u~H4>m!*D1WCf50XP@11<5j{{7ZJM*0k zvHXYL9w+poaO74(l;7IoEx<9P`S-|>m-#G__1_(_$*oF^#B)OE$COZB6YCM?P5rBRGA z|9u;EOX=Z?A>13KD$3wSxi4pne!s{O8H27sH^L}4XKHYJd(?4hZgxeDe}||``EKCa z^A(@*?Sz(e^v`E8dI3_fTy7WETN!cpIt15-QTp-URiRhk&PZUUa2k|5g^}XM9L^xE zGo%Ema0z)kBpN<35nu2*7_XigoXB~5u~O8TZVcmCA(Oxy$;q1etb^2{HTlfC0XA9l zt6~v0^+)2qJ3V`?iJc!1s_|M8|E8}^yGQyBJav-0Cm->l_WK!+vun>TY7BJq(5<}VnVvsjaJXMXEgmpQ_WC*9$ z7T#!X&+$I#-OSFsa6P<9ky>hUQ^Gf0>&%}Pm9#MNCCJybyiM5zR`|18?5G*%WbV*R z6xscL$C)XocC8>Hpx&VB0lQ6{JR!+z|Mbjb30uhTRxg>=oPbG6Zk_SDEMqeaj`&j; zlq>6dP#_lKu%6EyA%AJd#mtNCpiub+)U3@geEH5w`z^u@xDIKZ^AlwcUo12!P)a#; zJB&hA7!m28vgXtPJ=Q`{uS9A(>3LI!?M=37lTl2SeVVC;&&f4wns?@A?%g(eQys=$ zondX|cq|(~L0g+^&=lS^9@AqN2)STix}4s+?It|h@+Dy}4U%l0AW=jY7B7JUw;xH` zK;9I7>6V%4E3mv9V!M$a=4`=9bKHazi@}D`yenE2jqMc3S8zogOibx;`ZYT|Z6zkT zWsWgkq?B-2_(?pX`)*xo7sOqW9;9{F+}&4#9DqCW*M4TWuq!N zwk|cYth^eO2ptmIyIt}LdGghhxvaD2&ilK9m4vBg?{npPk8P<52Kc0;}5%7FUQZ@y$di0=Qoh4F9Q@O>70@1REYjuLu# zwGq7LmE~R_(cutDGR{wz0_izSM$t7t@G?c}R=&F;O_&n#Xv1h@&OmGdxx=qlx;4$AdxY5REbQ>4>cWF)vJBXIp=m* z;Gp$B^az75A8+X7-jC3=1J%P&T%!^O<*rV4QNEPG79R66jg2b4^NknrWLLA5hpPud z*z~SYfRPX;>wi@ufr5T?Z(RWnHCjWok&*8HuTy*WUvDu*!>qj(k=pdmsR0fIPy{xd zA_XI=FxWkQPyi9Ti$XggD6;>FR{I2}S&?*V68w4m^rU~~<+D)6{roW9+C4(Gf`7k_ zoTe}fvx790umCzvqIbqw^6tb@{g_R_?IahKQe1%Dn%>03;{+h)>ikuwPuy+LYc-^~ zJH)pP)xEEqT;_#xDx#Gn^?Bs+(y;u@VlXIRM)o@xoq+C>_`ieJZGBu~v!e5$z_p$< zy!VsVO4k6K8ymZOBWW_{QxTnv=o>h@%mw}fCrxI8STY-Tu)L%1jsmf)3fzymtdTkp z#MseUpC*Au+>d*nMxW8s_D?Pb)QKzt0u`6VixxV zH{Lc!;a@&adIulFMBi|}@p!35Ks-SXMT97;8(E@jaNFAiV^JrPJj?}28Xt%{z;!j{ zcQX%lw>KZq1_@9^gV?n74?**kIVe`%gA~+(3~vN=EtTLp%U8QbT~V{JYJ%ITJD z=*drc`n>4*xX~0&9Sn88~%SI&#={}z;vb4u6^xB7Q+#26x(ytc@nbVcrF@wF7vO*WPMV#u?j-ivF zlTbElq8px_K5PoE)jsWizV*n+GXu)h#H`dDu8TVeRBf7y9xWb%EJz| zcyH+)2gGmbaL&A8FtKLvcS1(168pU-nYPUoeX> z@-jBbwp+;gW3o4ECu745l;|Ww5(z-i_5)a^7g5A>WeE|pvA)Xl!?29mxfX3Duq9

Z&gTS&F{20OG|n`UeTrSX-kIV;h&cVVg?cOAcrH z&O(zFNC4+Un*%i7Pv)J(;sH2Cx)535$^&$T9 zBVo-p-dr^~`gI1h)w3pdgK_h~KZDu}DhIXia{FV1xQe=?eFI)!$zLZXXYr#T1; zar|ms<1G4Tukza1KB$FfQxwbKqivb^eV1BYQyY4iCCZ7(Y!d8awnX4uVe}2fY&ZWJ z^=;L?Ou)61hW`S1Fy1$&kMgXxj>=zhDW zVKI===jLn6to1|}kw<58c*^pstnWB^jdx$KsH*?q$Xx7ivh4TQBo%Y}N zc81Q8w1IX2DW==pdkUkOs?~vtt`-xbU^{V7r!Ls zmtxQC{)xQ6d?scZBlxEhfFcgIBxQ((61S5iE>1Z=efb$Tfb?3F0>jSkXmqq-SqAV`WwI481A-?c&ovfSB6)?^x@mMSw3nUZ*SX}$HpbMowPTV) z=o}HLGoG4wUub_!{vax!+t5&k(@iDbZaVswacUO75D*Ug9R-bP;s~`2L!aizG*9$?hdLLe1BPyj|Mqb zp}Iu&tw}~^PQ?_XK9omq1onpUD(p_47>$XVh(zOP+>TeK63Rx2x2y9p6-JqwiJvpN zz+=!SDB6mqSG%A-`k}UEOb#jlZ^;>j z`x_Uu7QWc;J{_FtjEkft|M!NWGWp(2;s7KWrkxXt=zYdU#Eu6gs9f;GnLWR93;JLI z%a^Aij>M;6Z27t$JIQF2?}kUDrA-AAop^Iu#lyozs>;S0?|N{@#Qu9>E3JUZqt)0g zcZoxvL`5x_iM-|bSvEWM@H1yh-P?F!_6+~LGr063aY9F`ekwG5!Dh@R?bzL7AVc54 zGqAG;Z`l?dEEea3@wBEwJI!ZC-h|UBL~HrJ4sMDpw+R0>R_KDHF}B5&`Xn7yzn_P! z2XYRVe>`dx3A$D)<%W3H2Mpn|T{sO@8AS1fr&_GVR(_ko6{K!r#cRdX#9q+&t0&CQ z#rLpPexZO|uQ+jz5%p$y#FSFS-vq4_a_C0)s2{@Eu(HzSw^CYhJY^Q6PBF{aI<4^D zYjNWCznU8^L5clPTw1W;q0@(+H;y5nq2@2)Lu$dfgny==+<(i1amxf1PzJ`$O(c>v z8`$I9gYgomzupgI&a&{1r}%V>SI1mu=wKiZVToLnx{nXt5Zory&Yu_jQmZ1I<)vm`mK>%h;UQJk3;@RT-@m zfX3rjFE_;vTMPtD{eYJYWjvBW^(F&bc)0hs)d)ir!@!Yd2O{?{-z;HdEvFDrmOO4H zLPKV#Unvo!@{!So_x+#Q_J1oq83Yl7YJX)&=g%KV;6)k_UbB;2{GWrk>T%fGCo9~Wm#l$5L& zZ9kW5>B@{3D!dMe{ME~u<0aKy8eC=_dxsHm%urfo%STi7T+=GO zu}f}}BZtHNB~M4$sMH^4=>#4^p~U%I&&YKI9u>mB-x<&>WO@pP#>2w=jjsyt>;8)d z|D=LH5R8UXrh$_7$y#kq7irQT(&iX1*`JGT8g9{jae}mjoPKIx?$g&_DwXkQg~jwx z=T-C1zBW}w*(OA7-GJXB(`sN0Jid_H9*^uxEePH1GUL#<`az!#k^131p;)K0s+B}^ z+hd@=4!Ra#f~p?nEbhtm@gq6fJ+jO6;sN9k^K7hd$QU-SQ+yc1WQ#-pg>#eBaCPZgC%_Eh<^}e@qi+005%B7xIdzp9mQlRH= zMAi!W3Q2%$^)GtxN+Yoe3W5NghYE>22<~J;}-3c+Dd=WSp!X&I~Xi;~DUfX2MA+7bg8EnSs04;UkF2SrSLWQgwbuzuMjc&1%k zu0W2(-D`JYYdJnEP3?%bf=4H@QsGL&%29-OV;)Qlu-*qi?JT95R8FKB*>@ao8p%WM!2h%RU})Gd*MVwF%FWj3P>N`JY*qNY$8$Y{xa8oCvU(4;z$MdM}S2) zVOR5C%2UVfp~O6bP!HZ8rM^`{AK~_D4Ele>ZgeJ5-pX@Px>~Q^Idauvj%LXp(#`Z9 z6fPXhFU!tcB;?V1h*N9{_rvpK!3CI6L8f0X9He3VwLH{_NEaXYrt)Ub|GP{&tS(a% zeohI?R{#DPSbv+t(JL3NJminaD#HR;v7nxQ=|;||;a?DL2ftz^Dwe%$)W(jbh2m&FaFi6%ZQTGa-(TJ@A`rF%52@Ba;FwXOjy z%`pcG6!$+q(DqA}j`na;WWZD!%xVAUI0gh&&*D>~S;2d9mm}cDSJ%;VZK=FY#nYqw z0F89@ttff~KZ-8!z?0^HDT*p^4+w@rcdDl$i9@leER~!Un_`$yxpdBz9|se+0O~P` zAa)+NbJ+`#iODTjjh5uv!Wc7B0*G};&67ZcnFj;NoT7h!{o!mEMCoX~Vgw00y}{-; z!Eh_?hCRsqj9mS~sEcIV9xB9n5Fq9a++Fc*1C5Mpg6{FJYT8AvpU4-xhn=H`J37Vs zMlR2dcvUdmnGjrV;14D4<(=e0_~N@d1?Hl(2+Ch+Y7&hq|5UHbCyx&wYq`XWlE zapK&w5AQ*_jpA)5=~^o~gpZR%G#!kEYj|CA$L`Eqy#o+K`wISd13jLfsH+HG`4}Jg zF@NxTMoTVkybAz%kIWdv4cNj|yBr`S?vs5eZYre_;wPy*ngJQv|7{gj7MHbu0EnS{|MXnp-2g02Ek#*Y4YF?#EJP9>8gO?Ow%R zlV@Lqx?Idy)d*}P}-&{ZI>yFdE-??w@7uZ zKHYr$Ena@$fE|Xn&CQuGL$oB91R7?J0ZP%9Iino*!H`klhPMvCZAV@vd>y|2-8|=NVM2U# z0qox*vIW}!K_4Q-2V7uF>>#Q{c*O_|F2~vh+FtU|kfkoO?!utb#&}E-v%4mSu zqy?2Mogtk7;$G^T2MX5IUI>Yh^U}~Oa97mRq-CcgTqQJY-~>EBi@ zd5QE63a);2H$)EmnLB_|jEyRP@d5a9S_*#3-voFMf`Ls`<;h@&!g3~2;mMMf!UL01*E3k=B|2|5@PFrAey5 zf6cruO?)!O^^8#5?Dt6^4QBNNNeCk@ES6kHjhU;j*&|N9;Ko=8Tza*9v{qV^puu?Q z;=5D$$c`IcH`p?go>{>o2;BFLvj!K_QA6=fQvtJT_R9I}#hP)U2rPKf6%Q9iqD=Af zO;_(+N{`1qZkZKZQGJ_WsNd}#BL8~8*mS-ztsgo*Z4bTD(K z_o|eoO~YckH48xz6mRR@1O;Ztf+$wgi^H3}ct{lHJw3>(y|eDO6bDgSbN7PiceBGf zV5r(i#@q!AH|(SiDMB z>Qml%kq3HsIQ=q|(4^a0`_&5r>Ga1ErjW4QH#cihwE53E{CoH3c>o}llhfmYHHkvl12f)e2#SM0Xr)A%7zHT)yo@@OvF2E#F?ygtr3-0`e ztjV~8Z``*`6Z?VkJ;qL6RlbVxFa8PQxnHVfEa>lwPNl(&}861yZ|;%wxu zi3T3^cYqig4jWNqJCcM<+7*cp#t0MI-gqF}b;gBcf&RzkFP9*y4+ND7V_yg#(yC;Z!%?08)m<67*Pk{xE-ar*LpKrsdd9q9B?9}mf2k&WZGW#!+4@)gST3yi>KaC&B-N}7)6d0SH`9pze4!f+A1?z zWE?%m7@L!Cy>%wLm5^V}WUbjVPe#bSk|#Rj*bPnGqtdP4%Of*)c z)6HXdHI?@sHQoOtf8AL23`8?4yQP}O4Ekvp22f8Bh(z9PX9JrNuv`Fb{Uxt{^FwB* zBgS1tCD#QuWQj!LM%g&em1b7HBHJZmqO6z?#T zBjtMJuDY}YATP@&F+_83SZh54{X-?ljX6^DEnt6VS>Q6lqnCPs&xOd*s{YC{T=I(& z!hqVEm|jay@p9&lc+M-SbPNI6!DBOh7ZoAE^_J;H{>FtV>{4Y z&&NT)#jG`0C8;3d%Q>lb)XUQS!@Q}6x}@;dqMjon??x;E=)(&okVJs~fB7Y^T`zw< zphNBTT5DAnkK4VuxPVwsF13mis5Es9E!%~_N?Va%A{cpvP&eFd?LcN$(S96f41aZ} z5EZNYhK!=Ft>5=93**9y-XhqS9Umv-dG%Ht(GpeFvvqz{;b8y(1HwU?BW!=nlHydd@##4|#Yqm*e1!`ELJJbtdV8N93t>VkO9*Tn@C4JjW`Ou-%DM_woq1}Xyd zti*su{Re%dbEjOfhaQ=$BO-{=6{Ed7L?%57ajfxT2cfZLr~k2?RNjE0i#LRy&Y7J9 z7C&6OENc4QA4jHiA~+ct1fj^#j?;ai+E`gV@s-|^dzON#;xuRv^C1!8nx*xY>vN_Z zRMuk-+TFQ#&^faVxTJ7#TE{#^iX+1%!75uW_D`Dhky(@S z1$b`2xAqa47}`E0vEpe(FWQE0OK_f8tG0>!c72j<=kjU)aUylNQSKwg|;*F^`v3D31lnh|X`BnN}Jul2M7rZME z2H!cK19e)@m+c$&I@j^paF9h5!g@f&8V70=nf+*zGdyUw$)b-M2Iq1Tf&lG2`@zc$;Rb2@jop)LFhmKp~hND2t} znhceXD^~;H@;Y9R9ZeGua58&gBW9vdqH?b`%j#A#k*{=@n{abA9Qn%=wex#T|a*NPz|lrs;8-t3*rgP z(=Ws^(Bky2;9Lnv0jse|e6sR#vn|9AtKE%IY8+V8e7uYFSfdFvYq*482+$-RL1otjIZN416@L&N(sjbp ztZ1Nl(`Y9p77+hyG`YEACBEL1bazBfXo{$jk{HhXl9|^I&-iFRV*+lH_rv=W%GsYm zK0AOUfK#M%6NVE5VOU6;2xtDD{}&cVl6hA=4^#-uWr@__$x2E*ZSjZCZ@_`HSxgMo z%w=$!-(_6((J=jHrob+OP@}3Zy#Ys3j-~DKUesOxM@$^j{m9Yiy!+kL%-qSZt?4%K zx)cDUD_=LZ!^AzN|73=TyK!gZ#}XXK2f}vwkIPBK04m#j=LiKwgi#o;($^#%noQl} zsXdX@#9HH47oY68IGFvL)U+XH*Drd{Bd^Y63qc2{RZ6FQa8#GVZZ&+qJ1{x zhIbku5pWrpO9nm{@@~6Bt#hkiIySF_p`?(n~HTT)>v>YeRlYd;% zAMPSJrmQa8gd|jGZ$#Z-0003$L7qZ*|4slfGXgH;4DcZ{oF>=m5cBdue2CRM032;^ zZHXD-?39Hdfy&^$2s0HoLGi$R(a;4X-X*^|#!?$C_+3XsT(`%6L_3zHgDPDMDa5e~ zVWq-!h?jA^-x>Ew2lnOLT;*rdR$hJnsPLY8X4^K$xW&WZ64_b=nSIu^+Vy}TEuUb< zWw{Bc{w;At32%MUc&dW@PH%Z3r<0pC3l6Mm5%} zdXdVb#R!7wjGZ+Gco&Qo6Ix32u~1{OtL&R$mc zwRUDV^yD~nlHhj578b~=BA2b72*Bdynb&DIWd+{{bcs4Omj`dfIgfT{M9J&3r^Xfp z)5U>DdD2SZ5c%Im(tYy-b+ZL6JlY#2pet3-xvaI3GMmfd9urvyJB3Xr*jNeBtb+{W zgG7VP{q9>@v*hOpm~B`Hf=y7M#1zRI9~i}9iOFuV6sLCLKU=I02BnHjJgufzt<{L1 zVdhPqR)GKj1F=DxWl5+(XqikD|H&enq&;3rfj;X>#)6P5Q*%$FlrHyUJ<6ra?s_iv zMYV%!0_=DSu4$Igh}j(cjp*WpRBMQ|b(Pw`5g(dOekVj#O?7|R{JNuUaoEV#atTMVoS9;E zN8|G%B6cfY^MY1gvs%{R>4jpOr9H{Fp6%1EoaX5+W@-Cv7* zOiB<+!VnR!F*8axm8HI#wzetx59p<|@RfF$2LJg}NR@SvD7?;q4`tUt1&ZS`ao(k6 zUO~#wqH7csM&)knW>vU5Zed>KhB;Bm{st{@QJUTZMY@I4<*|?LVM)jNfhoTv32#sm z0Tn7R!uID-AC|w&Yv8|{I@#2`89%w?FSfp1<~3*#G^-zASo1|mawF&_u(%{F-iAQp zlZG!0*KQIO4gB@W_dhte7=20`X)ruPX8h`wBOvJQ4@Z`P15r8qw|Dk$YlY^)()!?$8@Z5Kk&W{k9Zu8te} z^0!+D_;&NthMHlUUuib_I!-s?fd2}Bbx_Y!s-2VgTm#1)ejtLc=kpO0mFnO36Jg1R zTBpYk#Bx-%0z+Y#>7f3zGhMkY{zIqP;r>N)Bsq17+!>sMY4H#Eo9T-M{%dIcOf?9T zhS}4X=EQCDyZSUoet-{~MBFOZCVZk-wnr9CUhjbT-Fhd3uCWoc0XJ%bC4zMLt%Ei$ zVx>?|U#~g`@<3Y~95k}oUcBAj176~-*a7fy#D5~tdTQJWu%opyNvA)Z%Fuk;iupOP zmQ+BG{6C~!mO*=^@wU2Q60VDh`n`z;tGo^OS}POIo=Mc}kuMH#0Rg6G`e`2hD)wW2^L; zJggrDT#hZ9K!s66{GY_q{et+U`3r9El@Q!Y;FM~xWc#brPg{ASuljWy?7^yOXJZAv zuIgbLcOA=f&J{NAmn#Troce^|@tkq`jvi8%$H@dZANbYG#?8MokJ8w{r#9?GSm^`J z(VTXLBKm(|K~q?UZP#XK08vaE#Klb}TY6MlEfqDQZ5Se)f4Q~a8X z{!4G}T=@`IQFvLCOFJrAuvzvzyyy&^9?o>}R+lr4bg@Q8pElaqaaQQm;`;v%KCm{q zr^Ipq00aL)nukf@3Q}b-Qvb;!n>&|&D@iYz4e1@G!g{8pi4w>LXVqzz0v-ZB4$u4# z(1)>|kYX|7fsxqHw8GE7{^^X4mp2C!}wPa zAy7jqZn*h(!}{9EAFcWzXSEsXRvasgrBF#1Wr|3(df6>O(b~qpJWeIqBeY)zL zmkw^NnNpvWf`eTpl>f20e3U=;VhW3+@IkL{+wh$(!Skj@|~r z)cq=x9ZFz+Hq%Vn04={EB!BVfa$(x?Kh>LkkDc$(zVSvVgpd3|dA{{YlcHt>g7WM8 zR2nz-ahagzc>b2`gjb#{|9)G>-NJ-H`jk3@zdzv{HX_vT=i-8ro%)}*k}j8+8Y;K9 zoZ6?HVa&ZP7bMlbBh@22+>m<`_lY-f{a^31x-a(Mu@2EWy)q47 zVw6k#VVxZpf|2;m1?9L1$i(pPx>PSjF$5Jegst||T^AIZz*OQKFpL5}?sm9U4O04%9Kx?8|}eRXQ~bq3@xp_i5YEY8R0|x53n5w-Cu*OU!BqGQQ1y$gTVZq?rtF_C~j_t_w5>qyEH6F znvy+Y$=cb}S;pcqIUc!O{qAv0k??pZQ9wWgA2ehnbf_RSm$x}dn?xay#aVv_fBoNR zd)evqf*8P0f#>~+?!B)D-ioRHm%gZ%|DLdGw?4-E-_JP<4`G(Y*0(lj74K0}BBPY$ zHKg>vsfc95*kk@%OL5B6Gz2!BM{rmn%7esDW?jz6tZe?9^f=r0_|O~6K^-JI`jvQb z9))uqLKM}f?qH);vp}&rEXu3+f_V?-$zQ4TpvTtJQW*dM0cQc8rfPpb zMOU^x#C4wB&k}Vx&>}0SWeF38(9AqwAoIa=xh;3G)hG99)tNx@F$_9SF_$>lvZ_##NW|MT^V6oWEBJiy z0gc^ORl=oC`#Pl#w$Ic~z=bf1ZyUp`SCqjv0N(WJ7Y#pNmr#ulb3o*HdaQDtN=B?r z0;4DmTR^MNTvY)Rr0`4ecR2;nhHMJ-*b%_6q1<%i+G^f50^^5D?Q`Jt(1+PS(hck$mr+1)wfS zCB%#_HDWIq1KnX@&1cmH6tMk>6L)3Pgjo{aqmR`vH=woJj|iPShcvV|JJgZ)Zo18- zO>!SNlzo*BU5#$vmnzQ|WF=wdlINs|o37pBQ-V`F50pt5*@PMSODJfjhApp1`^e)|ao(SUZIM5K)cOX%KDq2F?X>GM@;oL5yBB^3cUq2Vq! z-<=h{L0~e#ITtC{WYq6J=OAWoVZlUGw9?EYD&tQh7Qe6WyUKjiUmxsqtyrfmCvex{ zg=-tB^VCsiLQj{zX9IH)i zjftV4?hhkyiLvW~Ej`ymQQ>%k-HI9}9F0f$i@d0Bgwh7YDox}enZp#Q;=k;HE)p1C z4-caT&GIbT=AcK%2tnmF?eFe)8gD)q7HL{y*v{)vhH{~&oD-$AWOAf^#T5H|Mj`b+n>6wSi0Tag!*~^(aFe*m#VCKA-{H_mr1kyRHRi8LZq!bP zsNOX64^c7jzT<-6Mg;y(I?!u*hMtSw92nWI{KjW=3`h+C00A`tp2%u{Q|F}uA)?3g zE#Iba$^R#Hm6^GZ+;kH!c~l=S)5ho#n?_X4765N$(~1sbCDJZO_Ql%j=ZVLP^UsJG zz7$WFhtLxCW8(FtX);ELXE+QccO7L;xPR>8l|&3pcAQM=ocarL3!;`$vO!!~i(1h{ zfK#QuAa9AGO$>n_S+BQl)5c&|*xQgsIzU{>YfgyDMSa>p@oEv#<+HDiI@b{bqgE^h zw@B>C$F+!|4bQft4csIMQknLXqp=q1=YOdb692L5R764o2eRZI@h9o_o&F|$`t$g| z#QvXrl@f#KssxGlU!+?h6Wx6Y%>(r<7@x?9#7F4lLlN)9BUHO>1OBRwZpqMs%UDqH z(bc&c2&VQh&I)!-UO8buPiF~Oi`M7gFQ|yw4;|0ec`h1|000B`L7K}+;SVNL1wa2( zua{!a(0J7Ou{0XW`TbKOEPuw67=h7@ms!`%Hb``j2AF3}RWX;f8p$mqD)pn!)gUM> z?YHdvB;YC&I{Fx_avis!mU6pN0DnXxIL*HiSKY_ii-uIj;CFvNv~1~Q4CfW=(=gyl zHwt9(m1YmC6TEEHDIo5RDg{~o+p`BhBnk@kSVis;yX$M;$r9QpT@V#TdPXDt4%HBX zxXEhaK#R`E>vSRWvp{8_Q0EcGCxpuJNHklw@YWP+E&g5HZ*GTlj#)HtTO_!8C2pNl?#ciidcV7sfo%_4V8fNgFG{$Ev~SU0F&31Rl~~bWDq7fV znqt4E7(9G`Di2|>*RKP9T^GvjbCmm5?uSnlxXexdQpBd@c)FFx9Zc$geG!K%ItAjt zu^ADTFIy}`2Q(Thcy0N|&oJjCm3P1bwSs;`-&*UEF~0A>Zv7} zf#=EAVM`c4m1p?ZGX|0tsreO0;{-2l!+6*O?{5QYzwfS!6d)J%3)F%M7L54;Ox``WN(eGsvEkU53cl6LBqh`n&-n-0M$6cimXZ2Wl{WoV zV5l_FR1y8X)a&^qhZg(fa+JkG-P&Kk2F{LCOUy4xv1NqGKYIzwMAj;autMe9c~jyw z$Xkg&6@9qSE_bxj2IDs?k0;b@m5YpFif0!1ccY_(5ABeN0DHtF-;4E!{G4Y;X!1@` z7T^9zB|R>pE>!0D!^bEJc@r2OYX*Yq7n%l$|>i zL9$(V?G3RWmobuOW+fWQb8gFUTzwQ0!Xyr=L;yY9lx^x<=H^xDB0Uh{^K`Y_p{saz zTJxqrQjOaB_iQ)fRw|_c00D9Vp6Y5}>3u7Sec{aMsGqJRdZ;N6}n* zWoxaSz67-r`${L1um=u!`xF5pPVP_!-#*{8*lL3h?{k&B=aPhx!i*}!9`o{|T|U}U zR(CEe%nwcv&XeyOv>U}ef9M_A+pse>Gwt@jy3~7D2r~e3N%3gi+ zakK)`VAV(NrBshWX%5uI!#ob-as7tq!2kdR-$9z~N#PGBQw2SLtzeULg;rTu9IA;; z_%T-0`K7%?MkYHD>{en7nC-K@$l{+qb#y0P9TVL+1z54s1hvL|It^KLg?x5u;U=R= zFft?Mc7jxB+d)vzQu$@5l2Cp4_159%!%l8nVN!jfE-XP@3q%R$Sh*l_eZPAJCp0=C zc-i+?zQ%jr!RtYmx5rE;J#fqbhoUMS=0BfZRWK`PK@9M!SySk2QKp` zn@nKd>TliV3&ycc7-tUN+L_8<=63ZD_rzO9T(>C6TwuU6d%kvN1-q=%2<$1ReLzHB_lo(MqFoapqPhBXjOc?21vhK!*!evE1c zsab}tI{H0+2P1JqM6F`8VS!$-T?$jx4&{qS^cBC98j-Y-Sj>(YX-nZf&5s*+^1cw+ z9s2r!M*o}~9PoK*FQ`_St6f$`pS_IqsY1B0s1KKDcS_>-We&M6FP*?;77=I9fBonEss1Q*h4!1O2+>K_x${D|uwG@0AMB+< zvRW5KK72;)oCy4yGf1C~RhX{>zdJspv}mp^W&aCW-`QL+X^+^iUD_ z%asGM9z{<_1fR>y)CK|ZJNo{OY#QH%F9H8sC!B`U*R?#SY6|$6lI{cWIHkMqagFu`aBbz`nB69ptkl&^ zh+ayZA?D|vy1eds@#^tay(W~G7NXC80d46Q6d6KKxxvbGn2vpFDd$1U%|{#!XKNi# z2UmuHj3Tex*W+4PIO@`&r|Jj=NHzT@qzTfLsk81=OSz-WAntD>s0>jy4ZBe6U@D!W zC&^vCNDYURIF5{03p)?(@(xXz8tCA^o3}cdObzhzgrV)S;ayCrr=OUuKH5fe3&8tY z+wBWAP@-Lr57>kUq@pfUwc6%&5?Qk)*Wm};r?G`U!Sc&a1c1yJ@MDYw+%*MEz;aJ` z1+Rf-#H@YMItS3b>e>41^6U^i(q$NL1jaT+e@b}-lTo+a zbYFE}r4)_CV+}Qf8WgQR#T));*XF5PhwmnR0005Q0iO(Ne`!aM)(IzKDtI;!An|+s zq82e3RQ`A8t`j^irO82nx+neAU_qfOQbqsJ^@QMJ<1hi%U6E?zJ_<)HN;clLg4|wn z9{NFas&kYtpV4QT3`2-L(bT6%;9Q7~Ff^?Ue!qq3XVpgx z4TqEsK`eABz(ZZ@t6yqp5!L}`V+@X+O*$VZCbhzUUyK8T)BvGCiKdFx;APk79mfYZ z`X(@52Zf;5HvfLEFM7hg2-<)(_>ihmj^dSk4|5FJ5l`oH8>f1;KbvnKMXC4-O85CI zFyPN-IPJe|oC_7%ugoL%@Scvhlx*RTIt%R5?WH@@6Q4r^aezqhm*LR3z7GQAskm5q zn@d9fq&ebYV(H#u8mIb_eT#|yTK-p*Dsl~}nS#mkJ^g>aQrakZFVacQU$OwN0_`=< z5aLez5s;C3kZ!=x4d#Lfh^Fqsbjqx$lzKtlN~?L-5;w_0I8;y%-$7W`KQ^2eA~id8RioOlyL5 zrjZ*l%3S#=O6_IFrrKI%yaz}6b>l&20$gd{`0ELPE2cagGk*NO{eHL@jy7(>Dbm@Y z%LwzlS7GsHfXPT_zvmB11?3EaE%!TCApBR2D(iQ1iJ1lJI3_ZzamI67Ap$nrj3=1N z8kv2fBRFHK85!%{PC_PLY23xEE23*dyW}^}KtzmymB)W{K*iz$M^Cn3ZDJA|DTYxs z%vd_xt|5=`XeXiOdoz#|M8IBmn<%j%&{v)`p?J`rVX0yFUhct;)MO=*b6#iE_>O49 z)PPUVOk5Uyv7J-8d*Xi$s7qE7)J4wSd%RjQt-hXAU5i}W#MUiMrnbdWi*Y3KAH(D&b;8bvd7$a6 ziK+EYRAHjVy3Yp?bcWHHg6_5w5%Iy91|@j|+naqf-^ZBr@DwWb?;#Z6?QPt6w>~Qy zJ2KJ>G#3LGNybo0ZfQ0u!LF`FDz3U)74BxAT{T1zz!TiteL1O+0-rn-?bYjo%&u!u zAP_j2KZ9GoyTM^Lu%U5;*5)0g)Au*=0HIEp9=lx=G^Fhffrd>e0{akr(suVMb=_)5My38nh#2*UjK~y6PRMhGJaK_u z4<&B@jJ6%})yj|uVHnYyklI7}2J?{+kh~#q$Cu7*`BZnHR_Ar#-|pj{8GUg=tG5=% zASH+p2$j;NjORE0kr6`&%60E)0?n_kBWQ76vP>y~KQy%ZhU&=nJlD?FCTMzCOjzxP zq^?|`3{u2Em&uG|2hb=N=^#)2IXQCjYLBrv@!6R3g_1A5Wrya*-WbgIiDF^B(-fN$cL0q&WC4U5@ZNw*E@U+(SU z?@<=0&)#I$q*h1q$KDU2ooRhuN~G5}WdQ@MV3X`j#p;fkctc0={-gTN-xPesrjkmL zLo7<#7%a1c^%L?umDgBG1`&}ZVU@n9_T^ui&AHLFI2muhGF>-)M&j}v`^vrB%6TKhu3vT`bG zl){Z^YGWURML6F)u6yWG9W9r&lx!EWGF?tI%0-hoYj|YM0)uwDW~@;e*UR=&~OIQM2i0*@!uWB=bBe{9S=;a9&bohN5eeo1MyHD zmk|xC4P?4N*}ZM^+=U+VBUgDbpphTd=cf$0>;zL1J;kSOUL!Mrab99?-8q_~5t^3B zM!|Mfjnvwm)&}y}5I`ZiMOwE0{6_O3k+rHs^}lHXxbZF~#Br3pwD4V$;vnh#Yt3&H z_U`3>DM{vsPXJa}jY0^tC1=BT2G6S`OyON1NLK&M7$eQgY$%Rx1e8@wI#K%t>Y8&W z&1*w*wdL^&SpZC{C@xEu$nCN-(i56kedpG|xu!#<7>e4-j_n;eH&4Z)L~lW}rt8;N zeuK=UfIH-Kf1quQ@3y?$#7ezdD(_IH)&OTYtd;~hV&|Zb@R#D)7;AphPV{bA3)wz( zu=o3wjlV#}N;Hr>cSfm$1UqH^x8biaupQ8oVoLCf;#+&r6P`ylBG(Sf=Ve0iXs>=a zSiL%o_VYRD000BjL7Om1;SVNL0{_`0yTe~4$bZRukS~0)4RiN~0T}hGnE~*gY0`xb zr$ZA*I8U=22*DN=g!2dx#D9l1YP%L0* z5|pF&@yLescKvaNHSQVm#GP)5zwXD2wr#fAI}Xl@IH?|7RfjaV{tF^79wI}nKi0M{P~FXj-K;|MY=LP zxom(9^zv>7^%TD(lUWYgXx8`>3mPbWHX{|LW0%~#IrZvKApJ2I$R~<@hc(#+3m)_% zp2fGICm9DSi@XOlfjSYMjO%0zJN!8jE&Z2SqY`xq8`Vep+}fSyo#Yr`sB8nP11KiN zLT}GS8+p`yG#6{%@dw@s2k40amPz4|stQ{XRI`s8ZvTsdl2$AkDs3%q9ey9Lg0Rpe zixl8RF=s|Sp1)VrDX=`G^jn&-f*tD=fPKG(Vnb&PY3)ZXyj!cPd((M>b#2j`qWz-5 zgFK|&GKUOb`ES;HN?NzEl&<93{w`8ga`u`j@}HsCO^Ei=r7Utf{5#88YODV&o>;5q z?kAKp)r~t4SB{-2YUp%s)POp3{bfUf+ezF4Mlz61oFm2iO#baf~ zU^cz$000HWL7P!Y;SVNL1OL)gdmn+^{^$^i+Q^F2qP$aP6Y&KQVa3xXQMO#;;g-C= z??L>V9_nmJ6y-vUN;u+9hNrnIdoZYrcdqap-O~VO-lU!>am9OPbQYZCLw4U6z%ls0JfcC;6h-lf&luUjAuDc@*!e3wy># z71sCK&Gq@vr|G%WH)5=?0Dxg1sk%mnVUjmPbsA!9$}L{O-waFz5^tW}RX0}DOncuN zG3X5f)_xFAZY7xU1mND+NzL(71ClRi0tH>JaDMb;8N{+Mw<^t}`V%KsafO?tO7_+W zRJfEfg_|jPtjjf65xrO0SWnM&yf?~GOhO=##zG@~#ett>~CPyVU8C$@$+42|^w*ScV>rpzinMN&K zNmUWx{u=A&^qG58X)N}sS|XzgQg-y{illGS&re|oY86vLtkJ^!W^iV+x^N*| zFRYolmLA-Ynor0A9X`0eX~m*MQK*P@$1pb&g79DN;U}t}Z~1VpIowIZ8EZJNrF}5q zmc4E|GKm5bN~_t*KKRZyKEr<`_7F8JH&ffLDzK?PRI_Ejcbr${VTCbn=8%D zs^_H-$52K|Bv7e+7a^`@q0^NEi4#yS-N#gZUDZ_w(DXJ{P#d*)|?-}BTw2tGbK zI58`b`_V+H*N@va1|5@%h-inOo16Jzk4LQl$tBjE{2WK1=D#H%Z0`i=2rQ_3d~|uB z)diBiQsSUuQ4ZHUIR5}y@KwBuUYOf+n`_v?7#HhNmY^qg(iwOZC@36$EMP3xpCq1< zNztk>nu^Vq*`~wW0}e7*k^?z*a;@m~q*z4`4F1yjCVF#wBaj3+>^*t0kR*JJzAkJT zUn!po$3og8-iv9PnirH6Te+lrI}fCn+yO9uQj3BoSV>5d+yg>zmBSkvc~?p062m&;t_zsV->itx>gUNQH0qYKq`l ztj%M@EF8E%0u~S{y+AdW4Bx2%w~>l!VejiZt`5}%4LTDLOCo$PiUoXQdmK-#tOO1u ztK71x3FUU7yYD8j^}#8Xix4t-_>C!5eCy`3`()kwk18?LF%s8(;-CgV@jM~qVNo~( zEt}kL&A}a+w>#Q|a>U0n&1a`H!0&skoFd zU&M3Zr-W=HthF&p{91>n)=u&dR#Gf>`0wh;UYS#?&}?Hm2w#97^w>VJF40JL-SHB= zes&6b!eX=>G+lwQuqFk@nr}ewIux}f>Yie(y#-`Rl`1HU?>2XL^JlEoFoX9137(pp z1NvwC>8PZa)!a9tYqS@<2;1Lh#~`ApV)k(4RP!f-H<3;+*CS9(k|$7 z3pq+a=BYjuMe4zqzUrt5}UC(Y!=uxa=DT#2yvWo|e=|97U`iWFgz>I!7Xf z^VSKN-$NLH1p~JE9$>3u3ddENyxE_sC(Dy|^Gyr4D-h54Sso0bnqBqWE3^Lx4aVfw zGg<1CFJB2wNF{YfhHTe<2ix~s<#zzAEo2D7DT4L)!Z%@kFSiwEnm67ga|mREvlou9 zG8NNO%H|pCIErDK5E_McU{G5oVqmxEY>ZM{)-IxvPT)fYKX++aE%IC2hL27K+4>6! znj4DgX-vZ{scYc)$6BIRVBL`;lZH9+jllC+qPy1xvX7I_I}eab>QnN5xD}G(xXA;Q zV(Wz~$IEr5BHP6*NgAW1&{u6RFd(M+DF%FFN2-Jc zy|P(zr?gq1pN{q5*h0S=Ln5-R+?y0?w2wT2LCC}g4#UM@wAJmiX}n`}1vQ^Ma|O z=B%{vppHr=dSFyvKd-djdUupNPjl-!hx6Wt z1onq4ss+C?tgL z3JKTxL>0U!$%4*V)v4MAf zZgw50MPYrdJUi3GD>pH`b(2t3RinHsz@y;|T>)|+sw?smJ5fve|lz=8wJT&{Aw z4IsQk$%DwQ?9e)sQD(Yo;VmJhV{TA>o0%`Ri4P)%4vxZa+_}&fY0PqASok@IynUsd z-Xg#5lX4xn7@s1B$V~*c7F^z-_E#pT4`2?lJFkc21iu`J&GJRHMx<70O-DiAg)Z!s zyJnp6xDWsU0mcEJl4@6TMrO_7AcWI5ToLJ(dD|} zo)z|Ck6lpS&5tR5TK}mA9wee9W{!Yanq=_FWLPOX@dwfs`t#B$N>$M&^efIr+3`0) z1D=${pjvA&Icw{cU!&w4UL3U3lXAw?mvAubo`HF9$F~EsgPR^C_t$>I_2u01diYip zA&k8!)28>jI6M`E=+Go`--NQ+^`8QYzS5!-_-mGARSZt<^?yyI%F`6-U4-^zyv{QnMm5{7ttT4fS`)7@ zf&8)!3e@0WufwE9PYy(Dns!sng9it+0ZBR?;E=Ac3m{X#+Yg+O7Vhns(zNp~tzvbN zssE4d0ipOibg2`5OwEt(6XK zw%&$dR%&PfBIzzu&HcwG0Nf^^-dG2ES$Z~v*M*G=ziU4HI5DhB+h_r>@u+i z)>9!ag;!R&0R;F}@!c%L*EQ1+>ri~2U-+$K{;gqFl$Tv~L0Brs4eo_?R5A*R3TKl# zhMqs!TrntQD>+kouwJ*AQ3zD7jF|Th(I*iPn?9vT4As3vM z;9>>K*Ff`egGw1`L={8S-;VK{AYbPrOT_vLIZ*&(7VaQcZ-F0a?KoenJ~V%!+=FvW zu@85`S2=OILs%qkMh$c(rI|B*s@OG~p=^I-+&1p<%YwS55@&~X?`-9%W}W!6ZE$Hr zo`_+?qojZWuBiDqBo>$wDQKlAJE~2jvckM{p*U4&CwZZEIuCo`i<(>OrDo27*4fD5 z-vaX+&D-q)ZycmU)SSO~gK_F0&M?XRDPjd>+``3pX&)ER3=)}?{2{VbULi0l_{FZ7 z+y&%l+~}At7&)K;yeb+3a=h9Q-vc|@UY?f^k{sNdwe*4DOk7M3>wyC_I2BEgc>BN1 zHhb89Z6dKbgaG}(yb`pIQ5<`sX?TzrQ&zFHV50zoIBAt7b-?hojObP({^Xuyq`U9( z(;{;|yem*AuZHcJAPxW8AHxrHD*RjHQ_02(=4EJl1@sW1%!?;5FILL7Ubhyw>PGE5 zfwj67ADki25$$pDvSzIHKik*A1G_lsQb>)}J%7sIiuG`1F_m3VizbbA z$6~{f#TXb{GZWd;d{|nJA8+TW!9ir9grsvxY6gHU)c;vB;1R_@pg~0&+ckx0r7dW^ zML_I$zdZwm`dMq{#g>2mS#UU##N067+l%7cI=!sm>`{YKZIndj5=_bfK2+*U)URyD zLaX(o}@3R)EFz*xug*P&J7n+;<+8pnWgrAnMx~F6A`4B9$rZ|^4sgPE~UBU zd~9xi@c;k>n?akkN#PGBQv?6fr)N}QX+o|GN7Mw1qizqvJ8`4d+n?VMfitb+7$l>= zPLSq#D8t*|t}^$2NgSZB=QS-+&)-4@V$;xhf`#`*&O_!^h0u^d6RM0CDAP{>1N{B9 z_%v=5^&o!;Q!NW9>!>yu{gBIhE_~Dnec;s^6XkwGos5zG8mW}_iMs^M+k;z=_r7^m z+{OYKtS{EnlQY`=Eh`!n`>P*iD%!oY#3bniM9tg!Psh^dkc3j8-VzotXS~MAEo3V>2HTQlwJ-*OVtQZze4QtigY9{Ad^dyn1Lh4tm?+$W>fNn@3+o& zpKiHu+~~RwfJ?Ku?Sv{a37s#Nx#4kawr|&dE8%LyL{w1!TOqrG+$O-b>SCsn_GroA zKKn$Mn?!ZhIWZWzr~U1RpJ*NglX^>m$9Gpn`|NlH+jOtQ;ee8Fn$03W@F(sxl!)@m0upGPpuTRM>M zwlrN7D*79p6T2|6G(qoY!q2;AWp@TM4uh(Io7k2O@tyPd)yt5VDt8EDyxQK~JAeE&(InlGH-jJ;C0q zfUViL@DH&`UhF_Hy7rnJ)4tu|g!n|4eh=SpIUXZ_x$mxZA2wj<+~6TGjmXBp zf2laeGA22AyRnujiUz!*6Y7JIdgYB}28roH$)W7LI9;8lw6p@%_+Bh~bH$Oa@v|`K zvCek_9?%DUA}Q7sKJ9uP&V!FXA(R4nEmv9QSrpWyq^K6gA_**TPeiTi#53K&AH?lk zW+ri1v_=Yhp$AEZil(rsO_&;I_l>rzPco5&*rM7Qi#(+Hq$Ha&fdrFlLgp|=y5@Ca z*0*U`2!awBX@ZSuJ*|Rvh~a_vI12!9TQ)cs+x)^cT7Q=>`DlI>MzRH{3(Bt3g8%>q zt3jLBN#PGBQw0$}|CMDsS9d;luYMWm_kFETOWQ1@16e@OJHwS469PhIg!C?9$ghw$ zJj-ym)BOkpLcdb7bbv}?gbSseI4;hrej6T~PIP~SX`cnvJ)j)g_ipN(WE=5yIP?8y z7;3CkyN!u5(&|2v!|7pY@6c_E!zFc~IMM7|VC06K@g-s+_q7LBU*mv*nw1nBsh3~J$;@)m9R+ar7>sx)g_eH` zN%D`5I)3n4`4d>mnRp}rfBriKZF<;L=;jjJNAH^ewy8!qY{Ag6NAl6ai3sgf&@;qaW{QrWx$ zN_S}rVXy6jPi4I0!TC?-WA7u9H96Mdt%}S_g)CJd4ggLZbD$tT|1@JCtMEr6k)XU< zLVE9*JLoEo7Tvy=t^8?igT|S(AfLXYx5l`e1Dw%DJlHN}kZaEPplrp8#Fh27#8dz1 z&B6(jF@O?0;}p5oSa{RC7_m_*19D_U{4%7>qr>iv=ct@P&3Hn9R6^f^C?bU=NyZNf zZ#%Dr(E>n+;9%F=G}r!!rPGBtZq*b@wxFOwA_8&B_cp?`g|~lgjcsm*$C;VhSwqVS z9)0vPot7|si$ge!lcbZ#{g*Gx)knf~%?a?eyr#lYGi+o*D#v)S%TYn4*jQ>YLiC^3 z1i)5Io8Lu{U+SMR%=ntGN*7@w^U@{wE=~v@kbDnPAV=bVj##N%;`S&T8O)CsJOJm1 z*_Uq2;G4=4`mqV#0$X^ON8dSNPvsa>rGTGXN`)0uu0cc)OClpabn9tQ5KHc7rX&J1 zsu^PiU5gj(5@bJ1!(OSb1PU-0`p8=nMVUn|VeN;{lmhbhLG>!SC4F7Zq4sB0*|4H* z@qPb-e^?1avtI`64d(PYAmJuS;F6bu$<;P8M^roP=a|Vu!${N&D56ivd2Di;#hvbc)FVQs>IQ`2 zuLxw7&c9>;gdi5|JgTt;)1Wrgjda;GZqi{uUtcc_;l0j`^Nm*^>~)|z3nI_XjoX3v z$nnb5Y0BMNUvhO=@E76s~Hm&sGl$l6T z>1C=8&saIT+Se`7QBuL~R898|@j_I{K`XQX6KT5wHABK+nXs-W&Th%B+@WQhZWFD_7cj3={8@sjCIf291{Ey*33mK;Dfbv<-Fb z;P&8#S(Uzj2r@d2Y&)JqWcTW{PXNPg!eG!^%QJ~^-*6_4&p!xZ3V;wA)dM8;vs|91 zn^V$w_m&wn-&;H_NFBl&!`&}LIF-s=w!jshCp*HfT<)RDHf$`OoIAa7Fa^jOCmRc> z%AC9vpOG}^MGd=vHxy3w7V+e&V^aRbpM1plG-8D}BEw3Qk6yD}xG8z#= zcQO~p61lSs;y)c0&$R0H5Oj;dRGi>E0f>E#nQC_R^#K@#@aCdj)E-L)LF`crE=s8X z&Zt9t$30lRN7|5>cIr5@j|0hxN>c8My%l0!l7n{WT2`5{(zp!_yc`#9iaTL6+u(?T zIkht?M;IR1x?Ow&R|+V{M@I<4k@y|Wr86osiJ2v7-qTy%dy(U<7eQ$$%hQCXXiv+h zc}wRPJ)i$|lBSgu>yN3mCDIHZle-s!FMWQ3j5w9g#Wnt6c%>eyY6VO*qh*XMNZLc@ zMJ2q1?Kmbw$?Nl>UgV%9p*Xu-Rb`qeze7kc-o8JD9>YkLe1{9>U1!b+ALH-tFLCg$w8+4oy=~kDn8FOP*_H)! zOvl32V9_>%4cIJ0QsZ5KCqNgGlLUR=es05{ko$V2g9tA0$ij)_TkR5Z7P?v=m*+o>rEaW9kZ z!)6TGS&(X@dx`>(EQ+{|VuTIZPOFm~6{pCxWp()g00nnJoB2uM4<=IuKmUwObNl;7 zDG60Z@}?Cel@5au?p%7RL)<*y?e6`WlB4vJx!6fgh>neG}^tR9CLo!~60VA`8I zAq@&drkVk&1KEpC9W>er`L+zliCCtlwYsq(LIKEZ5CFx+cbemw0S2Hu$ONh5zLB|W zt1Sm8=@dAibJrIccA9z}&#SM!G8%E$hfm~ASlKneu;yYWKz!}o*Xl=yRA0lL);Iu< z2E?0>UF@wg^e#hZ2AWY><)3NbMkBMD-i zFAes&65zjJG;0TY=5ESEkRLku;u;nJ(Mi@l^Ge~0_u(4Ck*j_irBwfcB)^s>=D@&A zz7%i5q^Sy_YCOk+up`A97&z%C7JvQCtQ|x3>D2?0%At^rlXaj2jG@&3uDp(&HHmTB z``1l*qMexB?ww}XZz3s3CNjn|=m*^e{+|7hU92rHFUTH&;b|D*q&npoxJwZ-*pYxN zd;4<8WDh&69JjbARO-zTkyA#U8G+VCKfd02F{G zsCVA%BGy_#r;3Lj5S|qJ+0-|!W_AL3@i{VSuAWEPjjNympZNVxq!2l^a5;tG9<4Pd zpR0vywse%vmPc*${3AW0K7Y?eorEw|=d+{vg|lqmN{e*9yRa?%o{=J=BZA%+hfC&L z<>V|WND_P2F+etn6i61mdgv2ocjn^@)5XG$fO5tn3|l;RzJ;fqBXRNgFR5{n#54)K zGxF`-OR@+3!gZGv@H%1_(s+)dyL;0KF~op9=&qN z+W~+w)RvH#mJFAM z`oIB}2Kh89{m$Ug09uV@6RODZs)m=Hm4Ny_GA8mpwlAI#8>x%V>=&x_v_Vy#9EU~- zYdc1YS3gPqthN#oU|(#In|C>!ak`VfyYeyA##pKyXEX)@X58rf8UbO#9r&)3N2zv< zAU3SVTe2_Sde6j_bv=kCS>p_&LhQb35st9I7#e2uUb75pFU@UmyjSI;cS}yIVz&({ zzUSueRV31&_sogi*tjH|y*`J7Sjv*?I&|kk=W7>_geuPz{Uhi2aS7%|qXa7PwAUFj zF?2wSPGsvnF3lRGbEVpP;Cks{7`RYa_PACb62Y&NXubqE#?{)h(z{&So~w?6Vn{Y* ztgw_hEY!y@IdY?_?c*GJH)C>Ewr;!zgsdVxuCV|B0YL$t7;1k_Qy$5!O#!Dwr<+2C zpD&IuZ<^%Di+?jbeGb=Z;#9X$FnAASg+kIRA%`Zc+rOrEC-~GHb5CG1GlD8ry5~|p9ne39bOq#zCF8jg1gL&;3vIp!hYZTJ8N##XaJ9LSgdqE%j zwKT8eQp|WU%ilX6=OvW@8Z0#HgVv49e*t7ewiIkZkGxdM~00ebGnj1;s z4<=IuKmXEw?=>kqn{`CnV#9oGIOg1YDh~~~uxcSnpG?V0P*Z+?yryx@jcKC?hUXB{ zwUj|5NK}1+`5a|Ap*ysF^NSBE%veHFsibqA3dC8;~V8bi?0LHW^tlG&XkBfcGU_ z$IJ?61QY?uCh+nt;+G*eBQybCzeygY=@5Tz6VoZRXq3rdsIg`wH~>T3qbdPWK`M*N zHaB|rVkW>3A))+EVV*NbqJA40))4 z)^+Xv2}mt>|6?ik~30AB~+piQ`5D&~f7Sitae zFG9`+h-afu=x`OIpN;l^ADaX&9xC|Fycist^nTRn@p534hvTcgohiINt&bd9LRXUb zw|M@zjI*YP!aGK*D0`7aJn+xge#(UR?CII~+)Qa;(D^A8Q+MQyIjT-4cSAp9tuJU!)oKGdxRF6W>9lfm! zRfx>Qi!8tn^Ae1fUjAuaQLi$_X1p+0!-9U4!~SMj=cA+i%ZeaSC`;N0F=`83k}T?k{5W8W(VhSRg*8^N7! zWD{99BBeL51>T5UM&4{IKr2u4D#Z{fzot%ucdjYjGsL4-IDt=k0=o3zEC^nX-3c{t zsWuyjkXt8)yD_ka83eMqdGTiV-U4=kj+H5i<@7X#ncpA}&2BjptNgNI@|eNS3jgA{ zztytPwJ;vw0004T0iHT) zUx8SEa4rGe>*6p z-R#L)%vh;8uJ6K^fi-e+jT7_gr(ifr9mc9j7i#w8fmnE=8rfMC#{yOo8Rr~g4t_^l zCaoe8Q0i{^7$dK9rC*AysblqiL&p}oz3S@~rXtuaP^VE8l0x32`FJLRRWqe|mY zbXSD-^v5RR3PZ9R0@;iPRlnUiQUkqz9uiILY#0zRY^mR(%`!SGro<}e+bBO~1zcl@ z%&yhU!d?64S)>*-G)Ki1dfKz9TbvtNVJ+oO{9hU~I#Edl?W=>J`Rigj5n#nW3uNb3 zR+-W!Qnq0-3NXAVy(yU6J{O4#p{8zX&gF-}IPNw0nq}Pf!*hUNexrir@3MS?U$+Cd zw{^F2Q~#sPei$Id!prej<6Qv8TVCZB=*L1^gf>k`%!?O$@)$Xl%U5GLBSocG18O2* zfdBvm4ndkdN#PGBQw2T$_EBd^(4<0R*7bseM~7sfi&sbfBygsTE~uP?sYs13$R#c0 zNU=}W1BMHbLT4c)oY6Y#+2 zd5YN;L?^ZnBCblcgYa}HNE0(3t){7y-%nnw3)CIDO7k=jx!+BTOP&$7FWSGDEhV7N zO4bYH70`vdsPACjc)fSx80;(HLO5rkat72N4dUEEyJl;tj%ni&bD+YX*!0{pewmQy zfN0hFkYVLh;GWydj~o)Hy8hGvOHgg$oa8y&@_>N|vRHX3EubfTJs!ftjucKO!Pfmr zF6BScVvuWa8-+=*_%HmLiqhqa<-=iT@wRHSfIKVbcMzX ztBDdz)AW_yTj1uQwQyWe4`O)fFFX?A90Wl-OHVuv08DN^mQ;XTCk?%xPxzBC^K^vl zVQu^n-0Qrb%$1MOIFkj8j)ajFx5!CJ&R+R_e79T|r2!~yuxbD&jWT1?w|!sYNh}s_ z*;)$fv$*NYqClsoLT1M9>Q*>=thWzsU@-A0wtQT=O)oD7T;Bdy!&Nk9;Rxv|Fm51y zm_*>5O7>}Xx_v)R1cYUny@*g$d9lAc8k$W>QMM72#Ki#Ta9zUK9Jbu{1`3LlE{}2* z>TTG9H4u6wbdm6B^>6#vm~e$8Ntt|37Wm_-xpbc0Uu%1;hRs(!7@zDS{ukH(Oq7d` z6keJ6to2BNc@V)po(HvZL0eruwnM%BqT4g$r8U{FR)o1$96O1ua@jb!O4vPMlFA|1 zLuzD7cmdQ$MD}nvlmH37vmP5_+F9e>77-VzbMv4SX^{)UGHVx0Ja+Xe0M!Ta&)R^u z!|2la;OWDV^A5VWxY3+3J+j}@f<8$N8}hNo3}y<$X|LikgoQMwpq4v1@Nj9tQZ+>s zjK`v&S9m;6EOh`(Eq979Y6W~Y44e0#>G~4J&PQNGk)vZ3doeoEooJy0-426m{39vh7PI!uMHH>yKLf>I@3 z+(c4#_&_)|spa0txte@=5atS9yLgYz8|R_deI;ea#1P88a-YEhQSsnlnzkQwwWr$0 zC^QlP00AZeo?L2w=33ai_7h&O`26q`$N=8sn*RH6VR%tigpn(KeKy+T?w#WX)wtdh z{XX6oI|-R1EIr6;d3~JP{eH#s;TJHJobjMZlcceQ1;8;ftCptVJoMkEZH$&(LwN|~ z5Vvsr?kn1FMfY)fcs+TaA5WrRiXlK_7&R}lr|nGK><3oUv>3ogvQ?Jn5>V$c`TW= z_pq^CftGDT&x9E%@2x_G=g_&f`BD_-Cu~<8oq-exk;qAVBDaW14FCWI4ndk>1S{9)=EVnU`ny>7_B$=S|}Zd8#)^5?+hwyCoL+qB8FU{i1lExJUOOL z_?n}fGr6v?BTy`wNVPCZ$8uvT=H>~NiqO&70W_8BVfH9fWQ>_h4+E?<6Uuf)j`5yR z(O@w$J>5yrql!*I%FOvr*$Bh007dYyW(T93^m}*q)T!i>R9~fj#EWr5qo>i^TH+3tvb+yclQ1D0#T6M-z_9^Zvco!pnnuR zY&{@I2Crx8tO#XOQmiSxU+FPNsLkic>Tm!ai$I5v^O9#L&)J1)P+|e@$>*0%OHk1h z7v=B;0DS&pbE%qVgHRE7a|)@C>eV-aU5+Ibe8iaaSkZXH%BChClZR^5x=kGZdhvbU zG!T}DRZP4elnTXK1L`_?tfOMUMXOzRFqv!d8=){p>14Xwmg%&D5cv3Uxj-~r+DPrZvwglI;&**n!eYmDbfCAb;zr(u_jtIvy3_ zC9z+5;xK?B8ZQyO8-UJ#Bd7d1uFyL)m=X-%=x#^-Z;I&jom`BRT4q^NYzTfSQ{3XtpdellVO zJ{%#uTkeLKqHXpgwq7b83g3_okffsAV#N+e4>Gf@x(?fTK!CCa1aO-g=%M&4cnf9H z?Q)weC0)5`99mDc>zO2p%8D<(sKw~XJ3Jr|I&>G~V3BAGe<`tzu;l1NiGfQ02;s`A zIlyRvEgZJf)NoC0dWS^AjSQA9O}EJ}FqB&$t3pToO7ZdKI5e<9+O#gKzTP>CowP8| zkWGs{(8B5$TF!O}lJd#O<{kP=?hUx%u94ND(#+M}sZGXEJoy`0l)ait4)Up4TyWcqL>QeW6kx3^z8mei*WN^Ww2}S z;_IRPh-ZpJ)onMCpCUZ}>bu7b@&3DFsLDZGOG^mVYakGOvRz;`0;)UL@v|eVn4A~K zICX_}Q#Fq+0g~Tn3T6Dq72+37F{fIQ-o=onYQ?7mv+*GI$}#W}3Zn49 zx)BE;EV&H{}uwQn5(F5EVvmq|2MV}OTss%6VCAf00CM7o_=b7vb28m z9oHx*2af|36n+&V4@nkFY1KQgMh7 zFU>%-?p9xg%O7c4BEx1$wpCz7nMWg8+K&(n&}!3@URTnK&8r6)+l0tHgkvKkUwgHe z7PyoZYC@^$mn_7CT84d#Q!#}>^zil@7i%4zPM z!6g7+%w(C^e#kN3sj41^PvSKH8$3}1ebX9iUS9Qd54wLM(@(0!Cmd1XY6%eQ7l*KK zGJ>d79fXgtBNSfDC2v_j?&hP_Ke|7b$N31nCy@;zWhIdpiggaJ|0m)uuM*|Y20~2h zyi+5OjT$)HARldh;aEZ{nB4gZq}K+TH~;_ykwKbaN#PGBQw2Z&<^<1dVNXgWWUCq6 zcsGC4bx%msQaTafpb)eAhI{}X%45|I5IEJdn{25}_a7o#-1neT=sj0G2^RB?kw+uU zj4pGBIiT$_*e4)|gR^{hYjNARj%Ay(dr9}oBaFPhyfi!&hR4w^0K4LBc0#528g%Sj zxOAH5!A^r?t5RfrfN{OszyFfxj0E8tr{Dm&OFS?0E}*`m!zY5`UQtTpZg2SQTQMB2 zFk!QhBSA5&+YsVK={_Fsp9l9v1gF6|I056#iCiduor{dlmw<@V)m8^$B=`EIh|1>e z*Gw~u$&tom#SC8~P;)x>ICk+Ot%;&47>m_Mj^$;2t@He_fVm*RxNglukV(&Py$1~| z-Q~Dvc|^Xp5VPQcbt9!pPTZG*nF(@K56L#<}_z< zYJMyl=y|tQ)fA>znXa5EaStd0@w138|7>V9e3K}0eK!xWwZ=D`-=g=8l9RQ5MY}Rj z{<&6o_IX^rtZ!aK{VMc|ustikV&Iz^_Sh6WqW=8|$$xoo<@?|m@lSCwVC_0GQ8BQw zTBECxzK_ScE0iTsD&{~hpMgMv=+^v7lm5GT$jaNoT60v+t`I2z&{j1Oe3_lK8m}%H zup9UWp?0b{Ba<@r?p~}LAcyUk-A!%^%z7Z`lGFO7F#sqsPL8x$ii`Wl4+IVU-*)sa z`XF)8%m{iak*=SWDUFnxb)ss8c0@vgsJ1*lf~D({Jy%R^RxybN0c zuilWLwBCNes#QRYH!MGt`olfniiEEG`lLk!CjJQY67f=R&5&quyOp^Pe9#y=dPa-< zhmW<{`|zr@qyd3tu<9;ogc{Z1cf=^_4KGP_h5+%rHW`>I{Pjn5uY0I>AWxd}81*|a z^*SuKlKqCeDbakP{`J}(0JcCwcNhobbCIp|ah!(E0vCr*!#gBOB@K?tXMm8}Z2O)4 zeysqzC)Y=Kj&=}I6}2!(%se^VoMx&x{$22G0Y(q#jAteK!L)&FHi|kU4TIE5OiCRP zbQQL60CpuwvGF=bM=udPv@5y10LxGGOA9$O5hG*M{`1z#r@lvDUws3vQ{>k$6e)Y^ z%;Q5kL8^?KBw2FTPgC5X`F9!n3jw=_4Jdp4{A+FAj<~VTkf;mzQ6z1{qK&WEaPjKC zp--a>&IQ9~6Wk1OhSTqD06+DDInH$afXSmoDo3xKPK*OrF*XV(&|^=U@#ReAhNyV~ z;N+{fzMvvZg=W{ae!IHX)U*NI|84%0gfp=}r+OaquNy2?Alz{>Aerb5EikJ4{sau_ z07HSdw-?dfyD<6$3k>=9R;zXo@L#5d+E4I{DF1I-*$IDqmYc&l6sl5RAW5Gv3uD*m zGob(g0gC~ifNFoOLBy0fbF#V0p2i~9hER)5?IYmj{Aa|78S-H*=9SG-&6t&!7>+Kz z{h~#hOCsAH@aw7TH+G9Sy&80`cn)?t=A(E)c0oR_2yA=Nw{I3N-lQror+0u!3>~cB z%hvkzqOLHO+_JMLcCRRlteWeY?zpiPynvgMrbd$~kHjm0PXtljsV?T2Z>=4)Ot^QE zWLca7xFKwklBM!s>@jm+zt2BpMR~Se`^UVx=ehxLpm`&grs|BSrUmjm zG5P+3@8o~?98gXt;zniF95sJUV-+7SfMZ8TfDa28g6vK4>8h`n!YdkCCxa z5LxyVRiaB>R3gIuYT9347oFDDVlh0lDD5m!4)OhmP;aCQyyV8S%k`Dc_m^6$llil~Y{xZ`kq_5c6{89|zZN#PGBQw2Z& zl$%%xm!Nh3R9;Gg8Xd^Rec&|`*%L7k5)zCk(m1>sfag~P;=?L%8bBu^ay^oH1)K5_ zyeZpFvwnVJzeW8&lx0wQ>5%j7*AWC_R_W!S{y%-zm$Zr5&2@y9gb71LR{Mfw>JNH% ztwD^9F9u}5dak7sbZn`tQcT74+QKhUzQv%bi6VH5eU|{1?Bz6%_P`>l?$Zckjqd&o zv^vctq-EOJfrm*VtY)mf<@#h=Z}S}@N)v-CU`vW>hYBaS4ksaB6Y4EL%_;KKJ=8-l z0yM+yeR`mkGR zb0C)~eU&PcjXsiRF?xK@xH6e-uzTLNM}xMZCqykTIqr8JUgC!iOOK-4fr<(qiNhlu zcGzEPE;bj2*1Wgb_GlM;3qYlsvmHjV(o{J-gC=Q87(AF?n*+0lzM1UMtCwtOcLxc(wHEN>2TsvHG)uD!hLyvlfL`*XZ7C&2uMjbOd~ewZRb<5)B^FNA zRYC{Xm5%DSV^mtCE_UpnHrcl{)-}z(Vu_N+v{EsOF$UAoUnQ-;_3#k^FpPABQ0?;V+bfu6E2QvgYp6GHBI^ApAo zzE!<<3%<2Z6&U8>n!Y4&YIE|@dQ=@2;Jto0r`TQOdsl5sC(KLkNU57>G{L?5XI;wo z+ia*DDst9v%dvUo6Tt#GZRnO4ry~`;=NsxnkFVc5>1<({P-=hj5PW>=GP-%W-bXvX zzBR}wyA+C%Qu!|Vn_{c_OuqHb@-$(|3L=j zNi9K+?Pu;8bFB_h(H^J2djr5i4W7JjB(wym%a)X&*fS8qbc8c?tKLB6b1}6~Pia2v z!~}r^vmK*Qvnb4~(>dodt`L9FBq+;w6G}Clr4#g~Ukw_7vG;TUb;NX2MB)T^G^xr; zi1`~4*tEh-CnnpAEwXYj(~U<>oE*dj6vXsxTBC**T(3>Rz?yit6O zzbz~LZuYF)Y@~{fr3!@$_pTDH{Z@fgZq9Gj+;iShpLTilv%PvP^bn>u0ZsfwX{+BT}^6lC!5mRg*2YN?_=x`TK{Kl+3?LT#UVqXGv*OV z&|fB6z5Oj#TvD3mA!9Hh5!5C>Me3F;(U?=jNOxpp)XDKqLMEzVk`unJEXve? zmb}o=N840KhFK;_P*Hfj!EG=s)w-s}3Bu`}>E_JDB$=tulRUc@<*e-^&Z zKER=P2T9zwMhQvklrgeZo-E82SF=%O_!RzWkuWS(6~K6bU=+a$RL01nRb~J)s#E>|K>p?*)E%s{p8dCe0q4i5MLdcbz4pqJBK2H? zF?CLdiQ;N|wFG6o9aS0+R84>ts@aTe~R`A`;zDG3Xq5D zpNLmPB}#-Lf4$OJaH6@0vYWmt{T=IaDu9UWhsnGg(ne1#TA)gfJIytm=Rl>|os=I#GZjI7V)=?1+1`_;eqode`R3bMrkV7E@ltenY#K3+fR9=e~>) zCg}*#I=dT3i1wM~gE1A)!(B4<^=*at6*ta5t3&%#yR*|8O>B77!p2*6{}#vq-c!ILG)O2Ydp_=)GcP;e@X%GS5_#< z&8hpI_DYqFA`Ym~xQf1(C7jG9ch1+S_DHG8z>(WLY9wIMzejH=cHIq!B7b&v9vFQe zV#C9Ck$JuWBY%wd-6bfMF$M4BdRhat3~k)xC-Ry_(rl`TGw|@9Tpij^H@CdGs*rEL)FXHfH9;*=$2z2s=Do+%$;{@i{VBMk?};Iy@rM^l09 z5tDcTk|lB^#6wjQml`lU*@C@GwbH%N9aBm~5N{f*e zDIZTWRj&dO$RN)`N^Xwgfw%Ib!tZwyg~Ql8NVAf68J7TA_O*6>d8eA%kM!K*D3JEc zap}_1bIc!rhXZRH;tf0_;oHsA!kAa1OuQ6z&U~zh> z+WdmrvNqyCqDe^d2K~@ou|5$~4c~y#gZKN4EkS3X1JiynWRJNzkcF4!laVV6ck*LB zLBh52&og2j+`me#ARzXHObs~ps#*wevhm}>_NMsulQL`7Yh^$_boapK$Y;}kE;P=} z{)ojU#UBNc2{|W%N*Q_rsW=)&^=>k7SVxvJ+CKXGheSfu9C-X8En###)HQbALFSJM z_S&;9BFj$f5qQCKj^mB3mw$sAk39gP4AWIy5BGCfOMNOxknn*TDQUkw<7(bT**LE{ z@9iVHzr%2m!$4LfW47l9JRIU}{(ojUwOL!c9sFgqLgS z^qJ=8ZrUbzd5Q#9b_);<#{IB^OB zKv;}}yY$qol!M-_3~}89 z^?cgUvzj6HIwNP4UM1l}EauvXBE{Gde_|GkpF*`eLn|FBY{yA8CieQ$_^-Rq&Yt3Z ztqH|iZ`N?ewXU>tyJnxjn{XY08Ouzz*KgYT#!K%2ww1X;)YnF~ojq#j_vp19Kj@I^ z=3(jeQv5_g_8OvpV8V)@f>7o)A}W-ex7`(l^b+uHjw^XFCNTu!U0N@;`m;N@q!T zF6R5qn&8>%qRi{hd2vcj*Ydyg?5?$EQ-|qxWm(A8SVyyfH5!MMTj7fMT0cD9S|ABQ zc(obh@W3g$qQHk(9+w9e16LxAxSMS zhuW>3rCsrmnG@~7xRi$CsIfVLsIY-DPDNuuM7y(ZSL!NrR@>1jCi%mLBW~`>2ZnNs zW+ukETL`)j|6KOJZlZ%cLjt+_HaGd&QmHdn{w(YNCC9Xs6{L_UYLDG&X#v6Y4S9R^ z#q)a(2WO`2aKb(Z(^;Oab~wl&(T%w(&9gX*LHG(rq-r|6UsXqb5%Gkn2o(hrnkI6d zp=c&{*dBXQr7g1KX2 zQ#eQu>uZD0>$o?ty%FaE)$D+z2en)Ze`J(F)7Sw6m$3+AP&dbOKgI){{rhrc8I*qN zonVi2BW>jeFMEDOM&}8inD1#;z2H;ozibO7FD05ojGPKW{3Z-fwYckqgBgHAI%Cdy!<&Af{-fl@Yd-U0C7Lq*=^1*KQ#IU7cdDw%R5#OyGFjTDEIl6O2 z(O0j5qa?Na1@;-&a z8dDV;G~!yF_3W9U>(}R!7d`k6T4A`St9Zw&pXH$%Q$}f`su7{4a$q9Ye|2loz}i%q zeM7ocmITOIBVNvZA&T7}aA|>U=G)t25e()DbjD@OFHx><<_xQ`y%ff`YBt7tvC!KZ zF#=mgNZy_K-*KcT_Z8!vdYF8_ZL*c8zF4+7@89NgIRWx)%BkdJppg@WebQJ8iyfIp z`?2uEW{xS;)Z*mE6})(Iv2|Du=;IU{KQaPOQ=B+!#Lu$t>? z&~1K~g}(enOH{GiwzU!dw_4FZAG|f}VX~g>a*+JgC5H2`5J`J)urD&cZU#0e>z2m*5dhRl zr3Ta*g$R72Lm?vj`TvhN{mNknJTZ2)|7;4cEABaLMtXMDGw%Lyx4E@iZy$!TvYw zEg39q;8+NeNDzDiF&+}ZhWMgmKS*o=rZ2%ZZjb-~1P?))=t;m3j6Mxz z{9g_kRjhsvID=_YzC;$Kndxwzsy3yCoU&!;m6e?@rd{>n@aE)rr zpu?3_Xu)Vk-Y?6sXM?7LsB&WCf=-FPEzZq^$d%_MaKNu7PU6B%j0B4-xcc1$mx3#K zINS^&o7YeA{ULerK-4wYd~z^E?#tDPt6=!d!$%)no4aV2GfT^vV*8Y+v}KeFy-^q>j3`y&1fch z>#1iae>UWE7#KP#@#@vPK&$*dNyHP%jn~1DNu^P3*%OR8_&@v93EPqlPm7p(p^xlv zP@Q4^+X=J0Y>P3og7fQ9DpqQwkF-2g^2qnpzr}}Nmdg*`CN+G8t5Ru-eJ~>p@bl?$ zF!jW&*eas78b-KqY6h}tgKDG+r=ySrOcjRWwF)BUP+*v#*~khYm@-L7otD(8#_PDc zA@bY4H)Q%FO%jBVaLlb}MB5E|CvO3Nerj1<14x+Ec4wH$7* zubA&)O{iN5=6jO^g3GYKO?z`D{53PF5z#_$xe}0Y;%Swe96B_^@YI|qYA

L@hJ5Z`eebb#B) z+CYLj)^O`kb4b?i7LuJbL)z%{S||q%Gv#uQ-F8ob2Ugd#1zhVAbcTY||KDP0Uz>R^ zS5eym;!%#eXzRHx#pOrOa5h*anxd|1#rQfP9OfmEB3J2fdM$l1(e-;}6@Qe>*=+%2 z;zaW*0Gy!{MVuE5Nl-O0qG*5wBJq8|0v~jxniGF$zqeq%Y??B|rrYl3+%FP>1v4Q34+)Nbx}M;pl8jP$+9lT-R=kt0K*~S+ zhd3XXs)|Oy=ew2M2{R>$|3H-fO2clu&h`qYLWeZ2lFB$Vp7`J(ML|G zV~u39;n2SXO6?(z!*~Cn4uNIN7R>x0o=tUcvTk$FFlG2h{=qW!SH(yyz4r1ww9v{G zWb%Hdu7qi&8@&WnZp9d*sRZqe>Q&}|mYrrx z4&`Of`DLFgf(U4{CkaF#x?&Bumqo6)=eTVDfPRS(XalCP zL!rhk$7$u8g70*#EByq*#5%PvbMr*3ImjYnZ#~(31)P;vBt(LxVUjM}Zbt zG;T$-(rSbsZW}z8`@>cg?ni+%lTLtNTNeTORmN;)y3HOARbw#c{}eL3*d07G$U8O8 zWR9l#uoO&kzXYk8aZvVLJ$5^cd=iXgHQD{5R7?IsU3g{_)1sEDShrUN!kcOmwHUqu z4DL^(3l)X*9!*{KowWxy zt9O-RVj;V-1f;~G>jFagT*&6fAsGJ3lHU8%bxzPExn%hXFn zgcI`49VYA7uKl?Z2K^}yN|m+R%u$~Z{#xsdc!3FTq1@;<58N+a6wHrAOfu~kljYlv zXB=_DK6@{rW&&U$p9Ffsl*`?x^09pZC6^9wKqBgM_fD$Z z#8IhW?)8Bi@JSkxZ>#1n5A~tioOw6jhu=?Hs@hebtswXXh-c@~Yp(=sVxf!f@+azn zFMfhv-1t~*h$mn$vsvC>OC)lNs-=G;j&q9kX4S!@~3*2?-*jkGgavB7^#QLO$tp ze)uG+oi{<%fAbn*M2b1I<3xY_nJdE{oCP4^nuC8g-Mk7zkXf z{YKF@z)-cAm-gGGj+lLBXZ_`mVRUKZ)rF{k@vUmQ)Na%s0(6~#vP@*H#rpVz1&cxm z5rHvc3l8qw6}0hb{=uw`k-m`~ZLe;k_ErK4Z7v2@ogxuxzYX)w&7}9zsgF4nsp%t8 zQb<>9DT*1%D)Qq&>3r|cAq-!$ZuzZ{QLl372o@SS{`;n1>$0{$iU)N?EuLo$$dp^< zIGvI*-&4NJPo)WxlE?XAq>)%^a{u+y^IlsVxu&9@wSXc}YC-}XTm>1%a7u^vwlyUS zcHxPDqrE(dh!?dF%rCK%7NOZ#dm|{-PToX^v<}pP71G%t-cSPFGE`P;I}QEeOeF z0uHIcq2P{N)S`}VL5?~B(?Nhuq1%~hL7gaB#lKTH=X0wt zZGI-rs=YU+h-zxXW6vU7i>Ouu(nn!CMoO0V43`L#wJxKBeeKI}gvUoXR5KAF3FmXl zXvDjTqo>O~t*Hb^--lej=S6buB_~A3g-R461i-8J)U0&-ujrSp55D3WZtya!9r#La0qIJQ@#%dz>^wRICVQB3$22dXrRHttCnwykr;*ZH}n2e|WbZ&|CBV}JN`xIJxqp@)_f9&(@kZI)T> z{MN*OB41NiTji>j_ic8R?N5w+0`=<%_r3DtNFd0)#o>t{HY6FHsYzxT1)v@VB;?oB zCnrhU@4LuEY|oXCCvSLH<-?ZDYMHH?cEPOFXL-|VVN!}w_#VzY*fy{it`YL17NbzP zoYji6Lhmp;w=wVxq^?l;>w(xP;@0Db7(;e}m{o}b#gq+hK>nv_Zi!@+Bc1%QXKvCL zE4@LJLB`!t^v{Y{-Es&zv9wa=^XTSnfE=bqS^|IkWO%qG1!^9k z%e#OUNjL=WMW`vrmpA@cH=_juYV;S16biT8z-s(PA=3j_ZY_4qK#AdFzNJqnrnDw* z6e}W`+89E~f5)NpwN+4xdF*#b9)W-NBk9Bv5+t6RQOGfN6)w3HQ!z7JN1gX;4XHXkr(}|!FVi_bi$`@8XOz+*Nu@@j5z1fj>x2Q<$2$S zxq1|b!nH7NU%~04^Dn+KN}ms37bkdc$caWaSG3Q_#TB|WGe~k}P$}g-oyV1sL;_(Y zo@hM0+cOmi^zL6=ms)!RQ9_@wQro<%qOnk+UT7l?oWiRwjTfv;^erVpJ*nrixE4q< z7RJ$DspLw^N}G_*5d+Rzr_AE*PHGzAtl20r+()&E^NQ{S@ha@;3QU7f601pSNVcjM zH>>huN8^10p`iH?ZY6kYB&_K3BhQlKO@h5N1*=wuUgDAG?BG~Ntu{`2caJpGNjd@wuED>9>#KFjB*rZAm- zT^}hFb}BN^U>?)Y2#(&n8i3B&Y3@&XZSdFQ6;k$9;Ocp0GH={=;8{z>hmwRo_z0^R z%ptwhM>KzYTWU3D4t{8W-u8Cm(fDbl&}g)r_>~##pG7yU()3bYa`e2UqndHy%~M&% z#s?rN06w5RWC8HVpZgxUP(5S8*R1<{KrkVaULZHoO3b%pF2;|(4=nHfk$$cq*R#t= zDTq+beAH>Qd0pVUc-J5?f*LbZ<8Js?)RfPuT%<8FCLuU=xsy9!7)Ec_)*sZH@??5R zmh6Aah|EWyEHe{gkn)<3o!Ndo@-NJ`rVH4k-A*RvufGOv zrUw`w_fyf5Tlm>2 zO~A;`Z|cJI+VweO?+-jg370fk{Q!PII2s5f$jOZbl~_G^0e#qLH-IkYK8D0VLwyxY zSR83mlYH%@H2J*T|i{!?q;22$KrhENFFD%jamhyNd`u7@(vvj1OXtJ$SYOx z1COQ)jgCOHRz?TKpXNmNUKPF?nMG4RKeZ~TVV+r*RNXX92pqxKjUnO?9SKdi(Q9gp zSr&+A_6uPIz#PlpgC!CRT0W>05D#}0P+-k9hcPMQ0|2@{QkSx;a)?@qzKSN*TA=M$ zA746E+j1sA8qi+T%Qbm?olBMnQGo}Tz$;Y&+%5{Q6S&T$-ef_I3o5VBuySAO9wfwL zgJfudoWCkk!UZ!n*)J8*mB@QUrBC}|$8+9`W?v({Mf?Y7qgMlVg^&Q(SpGOi$h z%FHl-F8GH1ivAFyOAUVx^^J7S1?0V+q|I~>UXeqV*2RL~^=*Xmij2`s^7Q@8u0TOK zJ*7kQdQca#fot}S5s9V|!LkI%^*0ybvY0ELj*|iryWGu7-P^;v>`v3UTcUL-wH+m6 zk1KKc_TH~zzm2jioON_I#RHOZ(i#tue0_TY8$45?%pmcMHN6eE5Ybw6N zgmxvOYO`%r8u^BcTv1wvYdgY0J4bc*7y*H6iwzh5CS_~T2mvwDYm@5`lee|8xAXj3 zwM9{04roP!yYrXmK)`C0a)2#0@dwS2{tEgkkv?G=^&0n%&NHDPpAWTI%$U+GF@{a9 zIQCsR&fN!X+&rMGzQQjtT_NhV&-*TKqs5X~2D~dvHz$(0)3Y^%_{g_6z98;7qq^DN zV}A;TTF(-SN!q&7x7xfXY7D{6qzyu?zykj!csw2 z+0MRUu+(INKDhwjW$}fN0OT?c!o8iw2-U)Iad4>Pa9O=yrvE(MWpI-nJJ|`qAacvX9}@^g zBM8#cq0g$M_9ds4 zi-S=DP*}zURihb!ylUE6T1c4rMW$52A>YgIjZJbr^Q&YTYkkrVwZJugI*mIRjR2;N zRqlPSrNu*!#N;iMuG4ZS4PBh%Wh4wi*#Pir{X@3+(FcLXiRz9)sFxdcTzQb@=uTd0 zlrJ)=juU8tE3a$h`A)9@ylX~u%h%4x-%kw6<61Opz^QD)(OsW{#V0Z?#Pz3^Bvmy3%!E~4`}v>q?A_SIbs8RK;6;s%P@@N}?MUj{t3j0>44 zzx%E%Vml^6RKh`>Wum(h)&Z|Ufjo=TsfeGwa zG3GWgm6}xj34MmxqS$Xyd#+z)-+&M=x5laM`9ghc3B%V2f?j1DHRGNL?0i7PB0>%6 zvkKXqs>lHn7g2SlulCqUXKYlUx7s3bwqOZg3^7ei;_Y_wg`KpaQ|4w^c!fjc|6#7X z0eeldIqZx$@wzXFWQ_J8SP|@~zmFFSk8#|tbhtX&MOBeT8MWW89|bScWsyLKU_*EMg(=#)t|EDwUu?PO>+_iF~jy!rO_#2VuT&2ZRT zPH0V6A^)3$l{*s4Vw?`3SG{>%&M1PY(alvem3*b*WF~i24n7TkP0$mJgEZWmm5yN#`!EXtLy6oOWj9?X4gsRCR3rwEfA$p!1T*6Pm(sjG)tJB(`_%!Coj_BgUb!`QB*o>c5#AcSiFFi_Z?cdmei#I= zk*=y%#>9MN`50`u#7wj_Z~`%rLd%MI1^TI-kprE|5|W%&9eVvJx%#!mxt7IVEpfc% zTAW0N<0-X(AY8jC;^T+%tDetJQYzKvJniM#&9WfE%P)2w@73ZXC9Jc(fBDSG&77RL?(Q@>`v37Z4CrCKD(UZJUU* z-_#J1X=hBf8$?rn-WaGdhzV>2c$D^Eh+i0xavOv-^m!8NXK+6DcyNqnEBt23Db>ru zlW^Uc5wKe1bo))&oKrNceROe8%g9 zTtHIDd`Og}ZV?fJ@wH|r%!Yfrd6V@X8M_sF}6?TnX=Ljc1 zkD|ifV%u$mrnhdUXbV-$<@$r9!`Bz*?YtKlMvSkvmu0qY&HU8Mk?G3)bdbu@gC}jc zmrZ1C7|q^X*9|sFnE>IZG<)Q;IS+o+1Z^7v!}QJdB#+VSefhgv`9O7a`7&s-xo1du zB0079FjZVO)&cUrGsk~*V8qNQPET@=;X%D!e&%G3JAkR2H>OQugz*E4Kz%eWrZPRfdV=N=1eK2SSsVOxn2$#E~Md7~c^XJC`VAx(eD>H|&+&E-wk<3{O0!msG0xD+lzR?Ua9PR!jzt-UWGx&hPG)-LMWmILr%EViE#;RU zX(@^T#e-KntUJXl*>;j-RN$%=`A^iM(sb>$u%5*hKw5;mo4{Edc308sfO!FS&_tLb zTrkcnQ3FBwz8Sho=RfVJukuJ4z8To`&-1Djv^B#W3&qbNiF)+_23uY38zWw%S&G-8 z!lQQOm8XBgV*vyLPgSxTD>X>ic$#d6!TmLMDn3p*X>o%aGEuUz+eLqiT_v9jQFg$W zJgRqkga@_NRc!j(7KxvJ0SO|Q(eNK2yF-wIk{2?rzV12q06O#O>h<6BCYZJCrT`+b z)xk+UkSNtN6NleCGNsG5Q+=goQR^BfWNl!9PxgFDX|etVQDOBp+OX-mb9Bk5P-6`% z`wq8Oy{98(h{Y*B=}ewASQHxfMz+z=EibS`bG^{Lpyjmh25 zob$~s*3J+lW%6{9N9X|wy;p*sP)JSVtzOG=K6+4P_+hy5eH2WrV0I7H^A4R_)|ri% zpeG9uxw7_wr_V^kj9^gCF4Es{YIsS=;8sGQ-8bjUWA+gzX&_di&YWg7jfEaQbMvJu zuCWx;z>6OXzXxv&IlFzl%p&OQ z96HX@&wB-7v?R<2rI0l$DSY3+`V#8|QhET^|I+KgGKH~CI@(1}UgATQj-L7IM-O6& z@fJyruE5Hr$ju{E-xP-Bi(M(~S?F&K31nTOq&cJV_%gRwX~HH-kIoYM_R6ZmHw)sb zTj!$IYW&N7sq(qrw_oYRSvjM@twJ%6Du`NL@&m+n^cT^)R{vakFP{ya^i=A~iw@Tt zQW^B8Uw;s97=JlmVOM6NlnSgE-S-$wP;QrrgpYc^qKoYl3Iolb4kVOX39xb|t8gz3 z-g=%ojdC8i8zG(`(6v z5`V}s`r&c_MKLj?K}~c#3Jg|jP9*31HaMAwOqtXIMewqDJ-8E-ikR{eCGSpCC`epy znpTHSf2X}O-MEFIhB0gZ#}^!c)6zF?4n&{3B|GuC5WocJCoNL8biglbyy@?GXy2pR9T5^KY;Yj@IY8g0p0tXlhTqhZ@U5k#a!sBZHS*nO^vK zUy{ib)<)XAc!$6Mn0jcxF##K#>(tADz>_w_dgOYW+i&&>%wbv;`;XO&9j;{S83UvirL_G0s z@e5$?_KNjwmB0fX?Uzf|LtD*Z`dlo-(s!Y9`A=!wtO|T*#-!VtU_gbBO}Oy8y9yCU zB~8-dx5g)CW~r<)?rXYfIUb_PH2SI8E1lIrimF{dX-p*QWw(^)wRZ^Frx?{bF0?#X z!yAS81Om0jT5DAOZoSuDPtbiyP4O-_B%^tnVBylMs}56zCfHVnJC+g*ja1<(k{0BV zT~UCu6Q&h}x~u#heVX+E0D$@hGnW4A37`W)AGoV|yPTgpTUlTIYZhStGrkB$L^yKP zJmv8mdaP_fFppI?t%jENhB)qQnpG_~p$Jv`r}%ns`}{?m7;yIb1%cp`k&is$q-2>l zsiD{!JLkZ8nJcF6_&5a2NPXZtVDF$XOBiAvOVd{L?E<&4zpAEYmj^HZJnY0R37iC} z3oPypjmQKr4Wdbs`=|XPJ{t-s7Xq06FXB7&xgm1jF_i2PV_BnpTd)Ce2SBH7qjr9M zn-L)^K@}|OeyN4~W)@7nNmgjrM?|Y)t(M$V~$NoJuUJ2I&fnBOWv^#=yOlm8O8>8Bp5a`!!=Me;&ca;;D;K)DiJ*e zxy~YrpugDhRLdo-cedNuD=Z|4>nD5ZCTnm}t>41DkSvSS>k&At)s(k)GgH&B#VKNZ18CDiJ3GE+*FOu1(1OV?L75L6Ce0%}MTL$Mkbi{}1N{ z$L#zMJfFbtyAw=uF1mO+oj2x#q^Vd$_bIOT<#uy_XD!Gx-|dU|!o2OewO>6{518}# z>+yMgN*_5hPs7?{CqxF(XAC`v=zWwe98AZr7c~LTcmep-i2{>t-Rc`czjX@5f{LIp znv3VgacxCHR@y9^rw#A);Y3C)4aV;gmmI4nXStlOPQ%d|=Ogfe4U_Em1azJF-q!rU z1y6SzMz2wz#-uM{7=kFodUB_JJmek!QYrZf!DKRox+(IqY-NVR^2&um8 znQRM+x15zJAj{bVc%bV6Tl=fxfUN{GUjF;`NE?o8nQ2GKm$PWVuq0o!l!f~yZU``a zl3Vx9WWw-H(8~_UR)#eGfPaMT6QpmWKcDu0=YWd-p*V(_3y3U&muerSi>U588ns9P z&*FL|P`Y)Et9(>Kw~;EeP8VFCzJ`A!9^yTFWYDnZ~4&(`Hy}X<&Kzaw!!bX zj%FP+CHEfd>R|KVeYR75J-+t>Xw=a^LGCLR?&1=lf#vqh*+U^!n`HLgDFEA{aF^gHiTgu58su%pToPt%6RO860w@`d;$ zig@A&bfU9V(Kpk@z*Nz>jK#fKVPLe_Mt(2EaWTo(U%HZvbLL+b-4x}$z){g|m9 z5h8en1)?y!Wq!`}Ejxrkrba_|hw4d}%S|d2U%!L!*&a;>f$S-n2J!rUU@Hz5jfP9b znE1?4^weC>5;f@tg#P<>KMcor^C|_;bH?sz)I$86X9TwA2FjNp~Lhk5j<9 z(*SLfhZB7iT?l6e1aTa;ncH)dg;{uK&SMk->oS4Hx^C}Rluh^|x3lehMd95XhM0TT z9`Qwacph4^L_&hs612B1FNjQfYphF!iZatONY@-csg8{aM-iO++@b(#y0*dmSSkPl z&Svu;T^FtYWG~y+mnL!CNZRNBS8B93XBzTLj^*((i#Ms3SDAdksq`ridsX z>6rgl8vPKI4t=fLl4zM|;&C`I zFf6GgOMzEF$2u~#>-5PtZ)8uZq5zPvI*n=qXAR!(v_CWzNf)9>MG! z&VYOErF3=KL~)BRI)_~qeJ!`PLKl|{aOQc<4dvoZQO;|YpHJrjwr2wKJ~|W*qj-PC z{`uUBtmroF<%I2nqPBs*L<4_;NUPf<6->YDk$gUlZ}d0^Y*&H3Q6TgJ6YWIk_6AQr zKG>>RFe5*>f_QZREbUH)Wy(fwT{Z+Y@t3C74OH8A&MCTuf??!wX(Ixuh4{NqkIh>z z@$rt)Np{5hQFN`IJA^z@HO3qWy>FYQJsjgX)VBa(yz{%>&<#*#ZjjH>D zU5(?38ETeZSEY~QR1oOHF34Sbx*QQ4U zG!&p#Cr@+qCo>2T9s6_TFttfVE$u}AbAI` zWX4I4tatCT4{#Hg_Yc{kmTAV>BN4PmJt&d~PmzR&Bd}ugxMBD~n?eKFS&Db<> zH!_?kk2*%#Q_4a|_4F&6ogGF=<`T=MicCwv%d; zH-;hl+9Yk+#oS}re#jyO{_+OIVfx-LAJzf0Im~@rQPk@cF&eict+ z?;DNDkT)UVHaC?`i)FY1>a@OiE!-C=<>YsCPGlc2b&qLE`KV?I$G$lAPOt^IjaOvb zZGTrVQ{73!&0YmSTua$sdB z$itiaf+}9y3dWU(j6&6Tu3ey|pwj&}R`QlxznwmO3f%M?1#-3nsQ%+)&YPj)xIAj< zuq@oao;e<()p=c<&-NvsSwqGCuj!Eac$CE*tO*cz)zYMr!hm~kSw>%Db$o4MB+eg} z0M?`=z!F;*fW$JO1!>e?i=G*;FrQehk)RVrY*$x-OzgDKp+YYYD!ddVPeg{6-rmYo ze)gw4z^&>D#u=Fgb%Wdyvhw)c?l{tw*XXNc?lD#f>5I(_=b0;&5v1YL@M}aR@tY9S z8ZYGs?lX>PoLX_J3u_3{R~ofoEuVGD)=m?#`pvCSStAd8eU-|EjBTp1R@Is$MHmX-!3$=zcE5oGL@9mzbZvFYbnQ|HQ;#uaB!0{Cgvs1=3O8`MJ;_h{F+C_pnv% zLCF)T+mCap&ly7}e@gO=++0hWH7)A>d@S{3%%6Q^uxm!u=Z_Tm97MML8F69{2CWvP z9wjIku?JFyB_;Ec$oAr8IrQJT{>*f~eU^!{VqxOJ$jiU9E8vi7v%)S^&1>Cx_oMmt z2=@(`a{U7YC~b6;fE?)&V&mj;L zAfL2+lL3Qg&g5hDt3t=$PA(9fi4@Xv@bRT~J!7|zM4#>zHlW;Y+p}@*z-8h$KYw!4 zd55=g9%{Px6B(sxTu$7cy9*}#h$U9~Se=36Ry0|6nr@j>)f#%TI;^ZSf1TDh-+0y4 z^Alv~U1}`M@QbxQz-)DMpV8HV@!ib_-}B95rqj+UN=ql_ z;4vMP2w-ZY(8aJcQ{E@|vuPo^wpgA*~UK$xJlZ=HP(Dc0MHS`^bZ6nd1hks5sz42<=)3#+9*E9P^ zn@<_Y)gcXg&>Eh%7f5Ph$~@%6pspBsq@Nc}xyc??2-7)bk-xt%>oQt1vlcprY*7N7 z%M=sR5ZKW-{l@%xu#0~r;P&-;;Qfo^@vUU|oDeRK*baspvsCL*>VQ>#h%IF+k+l>4 zO5o4(L#FrRZL4={ik=N(_Unp{Fa^5{B%J4DxTg_d2^=H44|RQ~cT-UN4mgzd5GnY1 zss&fEsK3#A66#L9N)N3E*Oi6p{l+6w(+?ODjfZV-Bo;MdFrFd3O^!Re4La-jO+y-- zx#u{Og4aZ}7_tfbmnvA9X*jAW4dS-bOk>aUVIn%1u@WDvug1#zr1J4&=_T(CGDe{= z!6dY3NtY*|p=nZyQdVMaGgj1O0GNFQbWCI5pWsv56m)uR`2u@?K>3F=s_7q^v|R}* zRI9bpz>QP~b8|%MX4089AUZ)T2K; zNZv!Rl2um1n;okMw^JwKS`9~9Ri7q8xRPc2rxP27)?SW`%hI`A0p=nB7PWeMx8H)} zd<&Jwy~2&+E|cD}$NgDb9_b+?y7A&o_t)$9iN1G!y(bkqPr$0(D38xn+I6m7Gb*MB zVSU^W5fr1%7rcS$@W>%_`$&TGx^^ZAX{QPfa_>IA1PWeShq-U3@{1u?A1HTOk2+B9 zKi@`+%kl)pJS|)&vu+234tvYC0R4KrKwpTMKGNGCVyCD#^G>cK>0SZl!WaYQW(|G1 z5H%qqn4`~*%n>m0oAuIVoH)`S=nnw`DB3t^)XGUF+b8$!u4~k?!M0zm7Z$r~@s=HL zKsrbyRqGCd{s{XkgMCR`mGe1VYh0{py(`={u^$i2)1I+d` zSHT)}uc;o}hx+OKCrdnm$iL-Ltb!d*^xB&gK?OO|f>PHZ4{4bqAvE|HJ*|xjwA+!9 z9V|I{`pzya3m}P%S_H?04Y&tu#s{*jXBP!6yNlM_tE$YwBa*3KCrVfqM0ORQowB0g zJ2oAd-K&r#2&g78l{n^s_TF{L`o?n{M-}>SO1_pqRr1hC`z7p zwZ_1bSKs94E%En&OUUU&ZQp5ch3P2BYo+5grp-rKg!mT>nu6&*{~ru3)|)AanO17_ zD|bBY)5~4gx4;u#AyrQO*v>Y9DUM^|1Wb|`a9>jM3GZwl*N57o7m^_*tUmgmWYZwX z=g4<2VJc2EVyXU1FtmwGXjf=bFww5FX5oc7fd{7$a@WDKJMO;E_D>}el7aV61y^aV z*SY@=T#QyQ6&~p%+nlTn6&=d49aTN#cEu{Oxzl?sWptH6{_y!e6=r+7@=j+*85tnI zm<1yfpgeYDzI0!3PUvz{_evQ{cWL5f$)8VM^_xdLs>2;p==;adzs} z&j{!xnvqt$y_l(vivCr^^&lSIm0W&hdGh*Hm62FC1htVuw)oMdH|AnXv|naXecQ%z zQuuPhdSYwOL}7m6jo)E@NF?i*lz1!CBETDGyfona{l=yfV z60k_`OISnv;?+SsU560H3TOYZZ@aC}E@zIyN6l+$3__}W4hOWqzK#6R;#nK$5|>B- zWCyC}cCx)yAUAZ+CDz^y*19pO1! zL~(%ZWH*SpkI!e(iX*MChxoQ+3p?bw%K|C{n94K5WdSa>V%8}7!)TQH`Lik-Hj^nO z?u+HHnF`H#HJUk@w|SRi5*Alga$7AO#3pI)9%XsZFRsbLc)^UCYIlp^Zw%3X*)X-} zIdmi04Z_}JB2j*6Y!3q2$DyZkHn$7Wv|($*H!_@ORZ`6t&^Me5d5a!J;6g)%+l65} z?@4&@{WROl1qkjr0U{lIHdbX#z-_>>QB-CV_PmECWf-{O*@Cx0EH1CKF1DbXky8Yq z0^PE^0A`WmV?u`}7KTrz?7E9O`JVl_nXDQG3PtXuR+F#Z5H&&Kku55eBU;qs!-5AP_H81*!(cH!sOr8Sui)HcMa zT+y9f&`aZgdav7n+1chkO4ib+PYcDD>``oL&{yphh#bLhs1WV_giL=T@rO&9RG5Vy zZmq@x^|oU4cxJzXFvzcP)TAmkBJSnm1TEu(d82KroC4WUWniy3%y8BZts}FqQhS6k zrBqE0T_OEuS;rI<|KNJHL(oi@)x}i``*9VJ2p?*kgax|&{d9ysxf1JVSjQN| zal(W{%d%q9Z!uz7zvMzLQHY=Jyo2yG0187Mu>`;Yw zjsj(HWyJn!um2Qgw(C|otV3=$z)DcE9_R;Lk_vZkh=$}@7ZU2ETpyZfynpN(vnzL? zY1rwDELhsqZw9CG!%Q=9$8#v69>#0ZIs%VKsUc`8nZxXYC6Jqxa4w=C6AKq<4fm0; z<7-Jz3S*BCLTHtZiprd*dR*lu7kAAJYsehpbENr~-C%Kk!=!TT<>UDyFmyZ8vE>fT zZH);o*gr{%{U-a(jrbY()n(9-&bcGH%5R>T8~4}mj=q_s0ZV4Y4e2ulDSQhDz3Isk-ez z;xtK99r+>Ev;;HRp}mx9QAbmO;M0T!_HcA4-c{2dSmXok6@(=BAI9P(K9kT5QQfHG|rOT>0} z?LC~u*mFo;0Rr7E-ik8{5z`76_(Kh#&Ym(@sI2VfDj7ruE%B%Sn2>E}D9=ICXgB~8 zH>LDlFy!iJQkpZlLZ!B*k_9SWn8)Zgq_D`N<}})`;>Y-FbV|zu$yWAOrWkxdy`p^c z0Uq$A>(}4wf?;%b$7iC(K7}wB2haK&UQEKMuBF_+%l+G$r+*R;33ZPwST(CxF?xzdI~q zn>oflNj0?qHDz!nlt}BmfN8Ic+jc$BtG2tu14*j&nK4b^(qhK-k<+_`=V5g9U4MDw zDwfUijv-e|e*Q^OGkih@A2*Y6nGX_vBm9Ye?MP~P1tgnqy`bElioFo$5r$@7W$DL9 zxf;KAQ7ZL%rlTeIiX&|M-(taSG1hMlL5FtRK668KnATnUC9ca6XxJE;suox zq`)WGv&c|IvTX2IL_v%&W!FAhRYBNtTeNlTtIZCS+P5E;2pzF8?|HLNN!$dseUb6X zls*Z89;p!KYAWBqb>g#h(y5%7lHsrS)-L-7hYGCTb8`zqh%VrZvuZ`SSvA>QWe$%u z(A;aMzUlzC+t~2LYtiml&Wt(D!ZOBNqm~@vj>0lh9R5ss z!xx5#O8O_?X+Tm$miv=hDF!I6+fe5&#Eq-=ga2tS0i8J};iwBq*#UnnmSna~ z#v#MLp!GOdg+Sbc@d!*~I7mHt2KA$xEF4KEB~y5uiYOP58?4QXE2Ri=!hlf*%;xpa zZg(8uH)^H5rOS$DEi8*%6gR}2&2pzlqDu2kHmRm}+b1@hZLf&?vmF-%V>p$T|Cz=K zFo60KyJUq0yzLr#*5dEMeyw~Ks(KMpI1N1Srq-OAw@OPRK+$s+*{drtJ3_$rK(WPi@*O*@qxq(S)-!=7Pmz=0Ox?+pf% zPc2<>aq0M@Yljo{YpOuu5u7r?dSJ*b+$V3W!!Y0>bcyKA6s7e=?Zs=E zKF|^DXcILey0Oz4=1ZTD?NfGXvw1G>=W{vB?FRJh#6%YEa5&H*khmq zwn_s>XuV~$dQuss70w1-dnF7d5y|ioOYlnft@Guq?#2FQLTx*`((j7gHWoi*Z1s)& zXQs6+6-NiF<&}89mmgjT+(wvzf?)p|)W?0COXB(ee+ier8Y)-8ma13CArEIG5zaGxi5GN<+*d}Xaq2M+J+ z2!uEl$`eJN!s64^44Y{@Usj}}=&FEr44YyG+k;?kN zY%$m({`guR!GI6i{bSBaREgZ)%OysuU;Z;mwuuAyQ&8AE=#>~k;R3S^~fZ=wMp$Pb^XVFy{dg!!leV&Y*-!rwhV8 z$|pTm^LYT=5O@C+R7?t9R5{|%mVnPUKa{&$R%SfV`(c;I|_;= zTXiS6M~1ULAJ6ND=ZlTyZIG$vZ}|UK_;q7T8Ej!@a+$hwNJ&T1ftu3|CzAz0 z(U#~e(3BE@GBEK!KjaerjFbOYI0BB?wKco71nIhfIsJ_Fjd=*_lYwgAL*2ZDtha(G z85UKijeFX;VSe#!t@PJ9&KUN+Gg0S9Jf!UZS)Ri= zN7$f0&WYpS>gY?6b=cT*CqK0`DqbM=N-c9ZPL5_&~m_Q>>*U-cPc$^ew~_^KFzbsOp`s|(II(~g;)dtoH5>M$g4Tzsramh z4CHTXRjLWHSgdMjt}l?F$fFQnCmlt-)+N>Ll(4mbc)cRkuk%0&h9Oxm)p(RU(?8^r znHz9>4w;WHgp4DYfk2Ck0N$~hVlZaTV^v2_{ywpwYz2Mi(#EKjNY9ycm$SnW=mq|% zv#ODZ%IlfIl_7sCzFk^&C=mo;W$|`>bNlDt1nu2hveHJVk1muOsYep#z%7K$cHoLv zuD3@#({18)(01aSBJ>&-bV6PZU(Y(J;ux;j0VP7Z{2S;g6=k~IpQN9e%SldQtxAX{tN zL82}%;FvWQXi4ns1Bl(TIyRV%YBN&D`${nYRbvhH2f60LL~>5DZ! z*lW2mV6G|xbgT(uJbzcLZV1?r^^7|7l%RLZ&kL$;UO&xDb%lWxS+lx_4Aus1E+25_ zFMd-$z{L~(3Tu|B7qBGoXQ)8vAS)MgnP19fS*3>F1ro@+cx6Erq-1}3$gdj^@6r=M z8|Qo{CB%Ya=kNnS-eNWt!!Xb{9IiDDD~Gqz1lV21mhh_ph63J?po{~z`4g0G;vF}t zBT(yCJl6_f|NF?37%&&iLN#Jni432+0fUuA8v6sA;XGI*Q>ywW&4NnGv+Rh*@MSs) zU&w$S+%R37l5-!}SdQvGUA!KHmu*QaS+6!K4nLNH^>$l|`7K$VEf)y*my|6c!@KlO zV-ndjwJVhe%#*h&EI_2V+&a7$g;fu^0(`S3i+kGlxJdjPfJxpk?md~^)tI8lde`Jt zJY7mr!*Sk1l9!;qpnJ}ILl}~%pO{BQh9P;S%B1-Od&Q636rd&+dRUOLGc6yKR*Dyy zQ$LCPnNfp>ApO3&FnJQt>-(J3{Q`!*h^v!4pP0ODk-UJ)oL17Rqhsuw1p`Fnhy+47 z!F2>|MUj_-Arq~b<6_Z#k*qDXwJ|N!>vdx$chLll`<+^~yqmHnptSX$h6eEetWu!7 zKTd)Ur>m=`#tz^5kBMu~VuF7>)=yp~2c;$fsT)p$?zdWWAq4=szgehlM$B*A%L1vC zHF>ZA@k6Qz(|pUph;$%#0a!tq7@m}zJ41 z*5#Lh)5Nid%{R`CJRl0QJ99rHOE2YPL#p9+Pc0!9NUlTk>2ATVAiqBa8y8U&O#N=U zAGAADsvP2vO~79Edjdu7ouAFvkZ$dmFb6s6vI??l!!G4K*uKY$IlTF)AEZ>2KU+_I zHbH*CPA5*dGC`EGi<(I0)4DA3`{fU0pmwuwZu-ND8prEPn?t8HMk*3j(PN7a*Wi0( z;@@6bRom;;VA}Q7$tVQr0t=yX;%!u;_UNU1g`rx(R0-w3^@e;>5~@H~+C=p{ z%VkdS@pYh3L$k6VDA?E{`^AW>tA1`G!tBD|d6?alyGGUp3?p@*pl~)U+^{_?zpL%t zf|ppr3?e598sCTCYrqm}t}d)riR#bZL(%Qe#t5+;t4~cnmEX_<$d3DoD5+$6XUp}y zslyK&J`ZPH%rDs8cKYn`N##PCJ(L2Rn$+?Fm6;29Ol4lf@BJ*Eq>$_pCESTY$0BCJ z_r3$fnes6SIv#4P43+tF49HH&7RtY;L84>;^M9_nZ)Ag+I%4|V7l$j*06Nl!v{M2L zlA(|?DthKzqMVY+jcjpwZ^?3BF$JshUr?+hT`niqaPp`ns_AdmL~$dJ@-zs3nvFr@ z7zP)nUW++vaN4vHd}C+JvrBCkD_h2QW4}h@t-o3Br?C-d(QJe_k09*5wbfBQKk#DB zEXBGonzU7h|7l^P4lNpt9X&$)^^&NeoSC~KgZJf(nuF^ju@kghE%^XmR`Hnqa*&$C zVezb5pp84_jai?UW@{m9z`{lCI`m3;|2Uj60=oO1Lb3sK$t`^GnvG2e2KI?LHWHme z_UYIw&rJX}dK!IFNn=y_t5|J+zZk-^arf-FGf)2eAX_m9OeIgAAb?ma%jDPTZY;ql zgfdngo-VTFU7n0<DU6aeeRqgiE|XjyDS1_!=69 zxIZe37@$O=`P%>iPLWf_5t!;c6+`UdB9!2a=XM41Ni7GCVWuNA%5Z_?nMnw@X8#qahBFwb9a=l5{9Paisc{Ul6(7 z-F(d%APxz#N^%eeFS`ei@H5+6xmC@a=9Gk$_$@mF%8FfBBR@nHtuRzfke>v{gWR7T-U`Z97P;+48mq$SnF7V3p zBGUXT{|3N%;`!Od6-_{y^Cw}}5ah}gjGNv>ng>PNB1qvLckCK6i(!?vYM5N>*`Ifoh@hnSS z{FQq_Jd<&atjlHQ3J};SZ>j)0Jt{i$%indo*+emzWLlz<(qJ4|lQJEze2-gAI zEE~>e|H<1o>ag@_NvuFf?d25z3 z6gykVRkm}gTOfSWe#q3cT^#Mt4wJ@P?7!pLUSAab(8sa$f5Dh@jRvmKLBsB9faqpx zT0irQZ4Iy{g4Ct>ogHrI-=1}Gg3vHo(l~6wvb%Pf{T73P4;H$*d=99}Thjud&XAd)#Kf&quA!&YK3Vr_Nl zK>#(%sB@w&Qj1uV^4tpPBMqiULg-elGX??lcErUGC_65TXnZ_IAf?wz-N$t^3R+ai zfFq4708ZkOh{loUu7+H8mnycUwxssz*OJu(?lC953V-wqI9qthXL6W#YS3p^vbcV5 zLC2U~o7>9^o6fh}I9Bs}ixp)<{3>mL>i*R26NpY8e9&x)J2tu%4Z-z%$b|CKStHeDvvUitTKI7GvR{R{ zYZ8QNb#x+L%RIr2ShO1-su*@r|#!!aKj7=M?JRO=`xgjYthq1VVSLI3c!! zb&~O}sm%fy7hFzZgWE1vO~UPM;daO5yqLtyoEYlFQbnmr_(bJcpbshH?aj#n=t#Uc zNq0bt;)U|M{Y-3ASU?AYp_We$dp_M{14{hxR8I)*ABk=C>R|1^lq-q zA8~EInR6D9tqPN*krf{#Ey(IF=>(2Cb7a`a8RW5d%B7C1|NKQYcXu2a8~_~Bt-}_t zrHw5_ujiY~WVD(XKSfIU?VETIH%C=%308*H!g6v(#qmdR^D5l)>IpAKc?Gb94<&Vo zl2_)F_yP4-YP{|)ga5Pn>ir*;$sm&xAdy^fm7zuBjySsq$e&ed-&@IbVUd}xI&#yd zsqsTBq-(?)A9AnZq6SPIZ4a1d#DZ)>zK`T<*n_r za`hiB#lRn=O>@uVCSfRK?2Hfqr8i>8`-ob4M}DETZEURUAg2buIaZ-9#;|Q_*$I>@ z;eMK2my@Vp=4u?b7bG2PYrtCY4WCXk%w-;IlrMs z;&b12`l{m~Mpsmx&?#T9Rb@MAWE5tfCU>F!dy=FLEN#`<9g|@a2bElFF^WL%qGn|MUw1ld zhpPamt?c5>R)iE+}1Nf45cyw4v zq>uxkoXwmUUi7evECp$qp)cnTB$l;BIsL#%^*kA*Yqw~Iidn`ga*Ex^)2JT1IITH9=c-BojS52@}(A1$1m^7`O z9_kdJV%e6c9MqkWEpTmmdKHs+#bcutXd$PH1JpaWMl9>8KhiZ_WV19SvWh1*d^EIx ziaX)9#5e9BV5B@#JW5%mGcel`j?C>*!UqemB;yX*L6D&H&)C$atMU-3yb>f@C`76^ z&>eVWxhOB+wqbD2D9P&WVO0T5TO^ijxA;U&muKQKD*)4|Y|HMq27CW&w@CP)xm&ZR z>!s6Azt@>U@eE_FCF2(FpioXftGCvR9`C6?fHD7@-0yAUUKvR#v3{o38A_X~PpqAf zkUBA$kpm{G)-8{u1GzV{=E~oto>G;|CBAkM>F%95MktNt}S=;I`aVt$GVCDlo8U}JLdTBfdUqyv5Zr_h8Rw*Oo`48Wx)znSoYH;I zaLq)n|F*jP9e;VNh`TVb6zN)l#fLP8U?NsYFs3E~VajGxMrq7DZrqiBvYkjBWgYVB3b$#`7uGxOL3Y=4WUVecb$D5!9 zUIKSIx1AjpCtulrEjX=>LMOntZIUgQc|PkAjXH)AK>>8vZit&&0zCGmRNR)oyIZSt z*2So6yUDowcGWEIbr`26$jM$OjwM=6rP=ZAq=yZX9B?-@s~ze z-Xu`BKhzz%A!E@IV2ipBLV6soGHXVre6GmoZJ>NBIlPGlj=9a!)x+OcbG_ zjqEm{-Sw$$cw4s#r0ns77+*kJ(2{a6G-7r1(ef*5T9djwr->mV<4QkRuUNX{It(>8 zJ|3+Ba`z3p1kt@V-{bRd&duaKqE|5it-})(-hocUqX>3bkE&ctPvf745)(;yb~wPu zYSI7v+oJy~N0_0(evd|(%c&qa&O4r>YO}i+lQ_Q+9Y5EO3a>~rV*YkxR&a9=TS*{{ zME0XWl`ryK#)7AyBH9STy$niWK09TqHLH$m%Ix0nkfq3`mudW)7YSbZH#l(cgZZw) zqX+W=b%de9^4>3aQDfDFL8l6j7a>0}qbq?njrWS91j&QD2Ve2&Z^mztRQ#W?fcIyo z)69wEN~4xLtk5nXYS_xisqH8iwFhOKc63Ta9E7&3HIuLCz;}Yw@1`aF+-A}n`qY8! z&5>BEK_deg3Z<&Ie^T}}yP*ZYe}+cx>v`3E9%SBi!qr_~|IR(P$vf2}K{oTpXsBr< zk`~=#=wTNF8`!c-%F7%!y@KWkqIDs+KR+kees0%s3-lvf=JAwn^iEGP1HssCysTW8 z?U9ajJFCQqr}SK)=lZYx>@J|dNQgWAp~p1~LswNpS&fAw-y_Dk)Is!6rKhI{SUxEL z@4GIY>6F>2dHZTB)Z5z!&85qJ%5upvC+HaruR=s#82fqnvB~=KvQtKG zA$fFhj;sLlP4_ON^5E9=^2iJm#EtJGM?3|<_p=V_I?aUIf>*b$J-ru97c=3o%gtl| zipHhe7S#FM{KF%YU09sghFhVT54WJwW-}kx;uO(YZ>X~R)%|;NKL13@MKkpJt)Jun za`igBf#+J92vi~Yo+595mi_#f2mSBF4Xp1p6Q}^&vYt`)E#7lfU;Fe~V#B==NYxs5 z*Cs!QJ`5eX@MRDQyFpKJIcg9^=8N3@)}?S%ys`CBjK4lIo*D-$+32%0wlGzSeG59_ zzBT%(bBaCBUH5d!-&pb;dz81uC8jbYt zQ>IH-Q%k)Ez?g=EefJ%Y8?$ksg$@7_R?vP1d9~gTAM?cSSaPOspLS8=W>pfw5#`1UwaCLqnwi2&`;0ODuM0HtM<)KcUbO|-z95` zA&s@$o3vc*kWYRabLCMw`ix77Y8=W=vuJB(-FpQ$?)2!}DRW*eh?|!WnsZFAW~s$! zf1Myanh(1xUMq+IRIq-qlkaF27ij@4gM{gdV`4nMGsu@|E4Tb5pH*CCJ#lpeNs7Q> zj2yj{Bzd3ci)GNklc$DnV?xvWO-57XX?T}p=UDWXFD_AdGw(jgMC)t%$GH-l1mZfW zawhY#WYUKzl8Zp*?rPbSGmC^4A;|=~>TUCPCL3JI*5f5On^*@|*229qJm&VrHaDzu zh_CLHss-((Vp?6cwo&MlAw+G*?1@rHAj^9XEYN)Z2c7AqJ1}0_>*izd(Pd+USrO*W zG?=o^W;v-rzbwuqRg;jK!Opp(5%Qofpeph`0OMsAE`Fbc%C~<;z zdW3fg32C_U2~X!eGw8*>Z>`yd4z%q^k6Bm&R5ePn`cjIA- zATue}RRedg)%H?`j0Ym^cp6#NQCJC@O!k^F)zPXlV69*x^RZrAV$EEZV1-l*Qe@%L z;o$K0G`h9E829+)Vqf?@jsrQq=%YrXxP!%S8MN*e!{(ZeGq_)74HH~AM5mt*k?SM0 zXDmM5NST{G8lqD?yK-)Nbrg>fQ!1M^dN7DwCfbm3E2E03hNUHYzqJT-^EUlWW^IJSKMTH{+S7y`YUhJWhRU=R~+J1WU< z3omY`pNc*F8Hb;Z?5WQU{;1<~f@P|6_Wo@F<^SR7RilKKgooC=TySO-Z(;G6R0lkm zB*&&;&)-IWgUW=){Iw~4tCVwvc*I#rk4fW$r;^(rhIA5IgKwpqr{|L{Qpew{L=T7b z(~|3&^iN$KKLZN0gc*r^w4N2hZpPKn;~CGB;lnfr^qO-xSRj79T0vkX&&weP*6Fvb zLE*hL7R~WtA7bhSRLC=w5W}Zp+0;t{tkltM9+GAKu@g8Rq#>k5H2-rLeEA=ae%{-w zXWBtt3T|s30k|7mBqaQ7QT0~#yvyC&a0vWSff5!s2%2=k{d{jatkPE~-qiT%-VwNA zyFSX-youf=jxAjJ#a}x$^t+aV@sIuJ1;;lwy-)DKCQ_yH)An+;MW#t_1YKwciX8Zy zN1I*gum4CZvxs0$2h66Nt#K^GHWFepw)4$kh5qFS8BI*KZsC<81XZT|Uk@i;!z8;_ zXXU>c02xp7Wb8)bCjoO&tp)g+m4vu!13AM<$(CvBI483qA>f>P5yMjp!voJVx`35W znHq-NF-a!q2ImJ_iPBp^)2&)>ajwTEKnCBBo2b-am4$+$RoD=s;q3j=j=MN{q?1=1 z$oC!qj0*6 zAggF5fp83<_{9p7eLYi|ibxUi6}xVYe-r_1bRwgg2ri~NZ;xxBhxC~&tyIJaM&YHx zi=XouT-r!;Mjw6B)2}UCXQvxq017+5-*GH%n*_*N47)eG^&B{k7W79QUmS`00NGFq zL;%CXOYQxa3v8OPT{ufjz<8s^robixas}0~`Y2g9vq$S$qc%s*&ct`bscjZkYYYv| z6iLtf`K>45Ny19Ffxa6Cf+>g*G=h)oDQ4b@7bUx1yi8r`(kM8}=bfn3S?!4Jx8e;` z+weGyEzi*t!Y z50l)9MHEhTk2zaoivh|^$Tg(;Ht#qv27qyhliwGMbg?89Jc=OHA2Y7kU@kA7UMqxr zi0h5YPMn9yay7vN#XLWh$_LU0(?n&+9b$mG7q%*wh^9AYRrJBC<-1-w+jNYAy$es5E&$GFOpe--)qwnuo+ z0R^E^!{6YWA{GFW;9fVjE(8Q7)r1{RISZ4ILH1L`EMBB+aa-68mN30=yfrkh6MqT- z(9zeb+d$j3p zWAHZ;6PV=xp!NTZ$B9j^@;WYKw_?e{lvXhwggz`b^uVQHU$E;yq>oV<0MLCUn=P)9 z_<#Yp9)!}2NZ}?TY^o{To1!KXg`W<@*p{dP5%r~G$l&+WuwjR%Kvw%@esJHz` zaiM9!6wJUI`PUPFY=(fL8Db-aAOtgLNCkC?=ETgl%om`MhkfpXrP~oX2m5aHLZgnpvq|BY2aTA}+FOTZQTk|vFc zE=o*wai5XEpv?RP3_#KGq1|s|EQ*z`fM{mvC~u~q=|G$iknRT?s;l1$Ex^nksnBvP zo=uLksGVj+&Zg!R2jT({90zn=pRhKSQH}KswdV^eUyu9O%J|V?B3Dm&#u;mk@3BBh81*0`eUjnGtYwJLCE_m!Pr(rj1`{S1Uur7!{CSb z(gB9(CIXt@;R1wN0k^P=1vN_$mM%<<+JOcN6WuW0R5L7qjMgPzvL`}3RJBMz0Uj4A zjJ80lkWw82@}8^-qQU1weaI>~1q()*ElKp8w7p}TREd4v9(be4fV(3*8e0gJE|@RD zkDI&0jw8s2^3x>OOz^9di!~rBe+rVUvu4}Prti(EE5PIGz5Twy^{@Tdv++S8FLcNb zf9SpSm^(T->RaE{ynx{tZVJ=+wZ$H5o0?H#WtVyS0$}7bG@A8A!f`*_^<3k8CuI$L z2DA4IBxQz?NHrcqc-*4YiAh4kKklku;OqO%{5(BLJWqunHT;urBBq zOy9L>oZ1^ap~^_Tbh^x$!Cb%pPs6cL6uRpm#E&UnwAAvX!{nKRp-RNK@l5g@?vR@< z!9;8>b8PfA`={mz{SW5$!@|Vic6B+%?HZIIAjiD71L9=Xm-axF!Nz;3m&4lcW(*ke z|48-Uj!@f`(&U0Jz8n=NFHY0rC+Q2A)%%$5{7o!F;HwWg9sJMkWb9USmA?H(l&4D( zyu(^+?(`gvVBCcLM!RsoLn^?%Cwbr|;cydHR~LtiBv0ZIryUbvPU)0keE#NhMP4kd z|1P6B6Rcq2jlw?QG#pm{uU~e7v%_H4RGXZUYF*8OEC-Z#UXf&*DEQH*KEhHU1^1n5 zzkYk@8&wl*7eJ8r6PZDen?K(Q?RVmJkqlQxOAfvNr)9^8lgqk%s&G^~#Dc_erU2kw zi5OI`z*?zvsY|!L^k}|NeLk&$PL*jsLY}Pdy?-)nID5S%U!T90C-!*D8K#gOCjx(@ zOJ@L9=v8KmVl0Pe4g}iisG&cc{Uyh5Zc`vj)RA7S%ng6czYRhe@*QvbM z^vwMOi$1FLK+EP=N693j0ZOWu#n@tc)v-fwC%7PWi65$I3B4eLY?aFUmW{-&Cfba; zGMuO*#n^Uo`ZCFU*7OL~@P8iO=Qz^c+KvV_2(0f^YIe}-9sZ26f}os*pp3*VAcHs8 z-NEMEYwReNe0*#kBvREsS96W5{>hQ!K(eNL)~~czu}j}V#~LRYD^-ui zhf0hMWVkw)5-XG&{o$KXm8~7TngLR918@B(0+&qdIj1Z)xKFcoEDs z1%Rc;K6pX&ll)MTDqyWH65)?~hS*Q#*B0D#?rp}w4%6m4t{D5;YDg^qmi_*J>Fp{! z%1!i3J%T;WJ9Exl3#@mIuoRCi8!4uCG?;*or%VJc_0#FiR4sIfma41T3&SV;nic&z;!X zkY+KBst=?_9@V0npqKQ4=fx1Ry7(YmvIleVZS(3;#m1;de<7&2efMg|gf`;>qY@&Nj7&A$^!}rrz2Q5mfwwEg z@@X5D<-FZ-s7y_Y?-D2<8AH)A_bEa$J?|1NZg|eV-&u4jm#A&x&#dNkMRF2_7Ndx0W*(qXYdP%L z8a?C+RR&e1tGZ@FGa@Nto**sQ=x*lDbL@9y{evY_ZA|PMLA~P5H zyIdY+htIS%BzQQMd7^1Eano&AU;Zkj!<{{g_FIT5e}xsr1*%SbH;%Fq43IrBrVcNb z)CM;B1&{&QT@LRFbk!XJDo94KRKDYV=64{G9c=!}y>)CO07k9xqKLiI!XS3T-Sgp} zbXt;VMAfnojKrqTT^EO>boj%GHPCopd3O-6#DFg{SP*Qx;W0}ggpLq&MPPAx84XpZ3+-TKsny;jHvvb?&qEXU}7v5pBF$ay}ZRn6n|FmTR`s-lRT!MM0>Pl zdd?;yn5rRxSdoN^b#5mD`c01~M+8I-^@zmP;?Bii2V^0QxXFXP!bm?(ixg?x-df&H zqSYg-ZdG>UfU&JJ#DKNZi0S&XT4* zK;EGfTdqylErpimCMTPf3+&x2+jGf}>zS1u4E zb=UQ4_FI29%qhheZXTz3KSDT);vbMuGHIjZt@c( zHmlV#N4~z~KP^bX>sG#w*&RuD5tE|np;!nzq3Geoozsf~@{6`t08gu-VCYQR+DogvTbU+S?xyf|4@5(CL874h&hZ|XIMZ}_8x48=NBLF3V zYIwd*!SCUmwUUWVyYdX6H|XaC1&*hw1jN;i&bU2{Zg8~>R$ij1&0FJk&C zRW@e`7!oZVA&G|fEc7|_&P|KP)^F1mZy>31#BcU>V!W2G!Q4EK#I5d3ty zP$-{^sX`7;_#^DjiBQ|3Ag!#KJQaIcpx+Rzchv+rmpytbnnNXojxZH;Te;4o(1u~%#whQ{{PYuCeQS&};c(+1b^+VliV?;5EU zt=7LRAajC(&=FHH+!q4VnNjcp;fp=R`$FbzO=zsVht`IllwRdexbi|kCPBiH zK>5fii{eCv+UOO=?$j~=q6Nx4S7@F7@ctFP*NaY!wazrHsrzs2&3|BQeB8;AI_*ih zOaV_&H*E^3$tCIISzY*8Sa~e!w${n}rt$8L%5+mQE|iKS5jT89)4w67jeS6hxEwES zi|SJ3u3l8F6 zDk2dhU;34$@8S32hn+sP5mS^?jbuud4bP=zf_epXfBsoD&9E2`=B4|COq(+o77$k1 z`g`hH6yD8$ypgS4mOW8ymA3g@K@Kk5Y3IOENj7t#N+02-)wNn4y zXZ)vg{2#vURmCnDi~ZMV{GZ$_18z2Ry>Zj5*oVMMzzf}_VeB9rCGqM99rFBEOAuMb z?4OTi>8K^00Qm3D$&Vo4eZP+geL0#=eq82~1l`&l_R3&r6vipJFn_LcU%MJ3#909S+dRUEv@fHC1cU|4pGP!F&_D&ihI5MGQedQM zPt+i1r0%c_sFMfvNjKNh`^FG#P|6vtg2<^OLskojCTZD!rW+!RVv)Sp{WivDP0MAD zpFnqEpwcIQZ~hMpmv=5iao?+p4ZeH5^0>=eBSckRYLi1`NyFbJHfJL>We!oA5c2QU zb8AL);#{)r_T$==9k!zeo-WdY()SErclu&!9w;HsTEI}UIr;8R>J)Po!N7uWCa4Ys z%sQ0G8TC)RN4<>Dky(+A+{F}jd#KGrZFfl-8{2oqq1gNS+}N6Yx(uCEDy(5Gws`)X z-x5n|WeFKo{JS|Rsmd(3*RnD26*zqN4JaMtBWn@h3^-n2;%twsbiJEAh}X&a@q^a+ z&^HY+y#-CKhGqldG0aE$i;vnd*XX0qG9vXomN>3K09Dbs4Kmsi*Up_T-p zYw5(o`EEiHv@X-W8%(%&aLXhq(@SI(b%oq}bpRw!wWJ*^T)uCEr=*m5WNb*z&1?H( zFX2lx!7g&SLbF>^QtJGJCVzl~Xje^=9~Z9rx5KKeSxyVhy5$!`7_h|FUVl>Pw zwmOtYEfd~$QtFK_HbOByx)Z$bcdFR<|8Z=pvO&pxgrK-+qkbWO?!_ckj_@NEXRELg z`HhUR{S*Bsg_Il>3fK2woa$j*jgPkJi+x1L-6Sfz$cb48EsgMjwVY1lc4Ww8of^1C z0Vv671g4pJsZLNH(klTfStat5X^8rSfcCQpA8d~KTy#DVt0xj9CPTtvTRVE_s??en zAn9yft`sovP6MH;Ff|8busv;jfAoAYwR)hbaCsl5=ZQ<)1zVN0VaAJLqIo8r)ZPdi zCSBBscaDwG#ej;wkO^eJ9H0wUej%ZECH+`P3Jfph-zISKKM1xe<(TW?D{u2KDxN)@ z71Zq71ZXq<59XvdK)fWSSu`8Nd>xOFau3OSEITjiAa$T^w1;0A+$d00qO)ff-%15z zC_J^Ht~y76ySzD}2nQf>Ls>LBA4F46_M~=B0c1!HjjW;E2u~&y!)DH+R$H}CSR)ku zGO(6F=>+^i9qQrRqHhrh?sO&ly&ZG zu=F~KFDW=sB|y$0T(wR-#?Co`mOvqPzSAtLw`-XaS-ARuaiwoMwr8bxjfGDViu&H4 z^B~M0*UZd+%_0evmz;!6*BtcxNTafyd=-JjOOl3=fgEh0xXl;!q8CZ4Ti{>-+Y(lsB&E1t8P4; zSY|~cUP-zwO~J*J%J8w?e_^nmTuH_mUc~aNV_Kp5^8<)vz!+$GY6azI8T2w|d$))w z%9icm-RRzXP|xE+YWV~gIo z^zqi0CsXfMYQ}Zcq*`eApYkhYccfMaC-X}OIAXl0FZ}&e-ss2X-jYaPr$QD=qs^_k z$2fqi-hz<6x?FS~3f>q`P1ZA)%E{-RW@HNTh!U0FTj4MyW$M;2+i{G*WTAp6baWJ| z%Ma!qKzHvpmeY*Z=CfbYB`=Z|ATQRJv!v8p2Mmctly?>wObwrn@!=BBGp<%#qi0@7 zWYz1)M$1K999r^GbcAxE#M8S9V8?=ruU9|tb=VPT8=T1pX!{S#-L9-rbrn;cFyYkC z$a`oEOFXOZ#C-%yJspn4Fk1eaM^dF*hWS+a^9@LGc_wM4CU~IfJCV}OTxB31uz2D? zj6)xMd%jUw7OvXWst2l5f89B+UWJ zCuwv=4NG!J9Dd=dk8=&|XI@SdTu+(?y-zo3)^}$4<^dOaicKMaEF%>NRVwoSk`(jR zmvxpBD^U5MMXToy(Z4-t!vIY8OzG{npvdsJrFwMI0OwV3Nz924v8PdVmG7~oP-x&z>m*ceXb!Opi zApBPul}fyqsQRqdt366>F|@~V_CO1-s3h7)B+v6n(?M{P(b{bax$>wXciKReNoXkw(sE??yPSN}`g1q82#5nfFnPkj0o`4|&t*cpLve(pj_hyu90p8`LH1W327qf<7eRnVGAfOlrexY(67`w$IUt)uz^C-=y-$qK?D+jk&yF})HCxM z*qN^&op~HH=W^eUEiQt6AQ~FNei~^1rDL-^9SD6kA@!MTMPkFea*zp=)O~9VoAjH4 z0|x&bCBT9JHroYy?1Tv5k{%$ulN8kW`&Z^Bz$YRHpOOD{`8l48RV{2*mO_ga*cni$ z{{N_Y2PQ$ZrU|oc+qP}n?rGaLr)}G|ZQHhO+uV7-jo7&RCo)f+s(ey%g20%9h${oJ zx|@Id$fXxu*Fy`VQV0xT2-a|KN1r;>Fs!&;z%O{D8?itUuDJ?;RGjA|QH(fSs2isoBn0p0OG>#UG_c zEYw`l45;KT7JLmGa4O{s;`;Lgs>-#z1wZ-8?vUq4jTu9zdMiyo=#t>|RG@E|+%lv; zh%NtSd=+tB$$A3!J z?5Bn7#XA^{y^voT+}f&)+3RVN4&e3zL3YRAGjo3=Z^O#g>g`{%!^E>LIow|I-U>4R z4c9>BkmH7nb zUji42-Wb}>)kh5yqS>`iuV+SvA}u1t!uOu6?--Ed!J2-Tc9PSx5fudN`0lcE9^vAv$ZE52&OeVHWlzD9#GZ&n+Mn_lSQmf`@NDN?J3Q73_b6fDi7@*Cjo zm!rD-n;tybHLOeKZ@NnN=GU*FFA3ew37ZofN-LMf0LeysJ3Z0#_@TXN8wRD0P*i}J z->1m23xv1}4~IRGW;L1{Wa~3T>o8YZOiBUR6UyHf!glU53zx8oWHnU@JAWJHu8)0v! z@9ljLxL~cN(<7pzTVRX39t<{vwcCgUw5Ph4YR=)%Z4sjruJ| z)BkQxg0K-M*kdg}V%9We=KIye zTt^x-D}YJ=;)cgsK{;hqEgvkJ(nluT`C+*F% zYBt&Ptf>DS&L@ohw3)*2<)?}HW08sq3ZBecjntl;5QTfiSv*pK&l)8#Rg8z zq_Mt7vG_ti>9{Zg^(wST{5lfqYZZW zuHAX~{=|s+MB|g}>fi3Fv`mzd2<}LdHRb^8EG&o*vLP(CLQ}@*jwIY#3Y-dTB@lAu zt6CEPvkQW}SN@$vIxU>y6@P|Pm4+UMhM(915&+J#9Y!-6EuuFr&}P$;guNPdr0phA z)&ge91F;kekQ~TxXSB^az2Ju$W7ly$R?67rm zGk07`cVZYesnsL+C!pu`)i-3!g6+%u+}|*f^P##CDLMq`s%%I8&E(7F{<&c09?OJ#d0>y^DbnGVWHxe`N{MYsS59l(&~3H)-9Nzumi$iyeZl)h zQ+hg&lg|jXEimd1VmUhYMjtRvqN`rQw71hr#hM?Igf#H0FcPb#|G85szU5fG`CyLP zG~?J7^w$`MenPFbWTAKCJzE(W{dHpw0eb1sxY9x>1wYd_`T%2zy%?R2S)Tja+Da46st0wIu9xW>@c zPUx1Q&>m=t8MrM(iRKQ-5eU2AdXW^3QAlrSjT`D;V)vS@AtD>s>04u0;`b5@JZLq- zB1(ygn89ovzID>POjqSaSl74^eWr<|Bzn2>C$^0?J`kwtPoKZ>BfJPGmOpNJ<#?3P zC_4h6tqaqZUUEA%*a8J-K6)?qD6_|DoRsH*kAe(*$|5XuX+oX-pK*3S7X9&*Y2_~^ zvEuPH42;l+86Q6{1kXD(hcv><QB!~hYQ-ceSp1pjq( zS{YoyUIFEEu;_9}AhG>B@JAzOVyoaC6Fn<>x6U5|Npm43SFg2Fqe*33=Ji(QsTqG< zbH^Z9(IG=KG@GPY?2}ZNInayXU$-gOM_4BB-svE0OMAL9qmwEwnygTi3-D;d!y93W z#_5lbHq#21txWMrO#^!f3Gd&A|L%MtS`YK<1GtneTfl)1BgmgSF59{%gI8fID((zK zFzcd|4lOY5{6@Rvq5F$XCzHbs9c$$G^qpWmc=?WD-1Vb23LUl!)ki_c+1eT0{Wj~@ zFQDHKHvg}Bhs4OrvNLkFu-PXPG~f-qjQik$Wr|3tp55$6nf`9V@M6n*RIK{Dbr;8o zkOnCTw%rRMkWSJiF*O55Ceo@?+TAsadhG|>8b+s;8C5&Cwwa0v*qB*a*TwfA%^HyP ze_3|5329E04EzhVHh>k$xwQn>%wRHNj3cZuC-y>cX!Unp$b4BXG75OO)si1TwN&7i zX3wI)LN;T?M01Ienkrv*% zji`@igP3n6*dj8Xr=B70UAV_U8c3^}QBJ3w z&rw zgvec<`cTN8?>Oq*IxAWPbZAqE)2$Ecz$gezMx9nx`3w$Abt{D%n!MY{?uafi000n; z|7PqD)tZmlb1~&mn4O09ue!yer&R!X*jCB2Dg8PK>w@o+P)AxWME3`kcT6^2hECfk zAHGiC;f;NSG7{r5e|7!I=J3rwQ=+!3eK(9VCrU+8<_pBm=_j7XgM`n>Q0hd*62BK+ zyjX|f!!rKF$n<*P8Q^X!1GhLn`Dl2S|K+nH-wh=o#*ym;so^@q zb^j#9j*w-TEd07a#%P&=!AZbEr0{G%aAs>rJNGMSV*%phAJ6D6uyY}?J@Sy zq^W(MAE`!2kYou@e=UU={I?6}%jmy>p|WxOAnF=z?pe~L*Ej?(W~;kW_1^x2B74&1 zPZ|4s{Wn;^peRHq@M`|=<31~t*k38B^<<_a?#c*iT_(FXXM+!bNArh7--WH_JgjD8EfU()L%H)Xpt5Uyb#jwT8G(|dZX^hpIt2@-14_%8nvv}pL0PEKFiRAPw9fMYp z6xgQThlulV`JErx&MFR3IroDCb0@U`z~Z6i<61*{6ko?<;OG<&F*k`CeItwB_Wn0-YX?|0J$U z;{!Oil7=69B%SDv=&`80N~Gg%1I0+%?}ke#@f=iEguJr#LyJytrhk_tCXPf7_wH+l z1pa6v$mT}RVc7s5Y;A&`7zg7p{Q8Jt|J)WJGlo{r=>IeWOEMwUNXz9~y@`bH1up|m z(YXcOR@~-&U^hVQlsqL~tlKMzENbGr8#uUXt#8~1yD*fLr$)hFot zmKaHA%YE}qR!^_)#Vu#d*Kf}Olk{Tyd)baAx`K*W=`H0}(3@ z6TooBRu`@KReU#<%=vqRJdM?$^A5p8S$7Vcsy=uyus;@{6OUY*{T?P5A(?Eh&qh2ReM;tQ8?62 zL9_do=Q+gN%}JC}-tTT`8LfQfJ8)}>dg!N_r7%fI2{D1voT>h-KpE8X^!-kD!F@x_ z(m`P@xZ|veBSyi>_Wt`L|M(x zakpLiHK*(mNp!M7%38vHIjYt8ra@X>73gsG(S$1(5JLc)5v!HwpS(`Yq1JpI!Si(| zc!OknTDlBAgtLb7mwhbILa8Bj;2xkQ2eOyTR^H(t7OY-T(%VXAZkcT@7U!L%kFA7B z!gMJF9LXLddphMKdKZoGPn(jGeM8D;6c z`VC8ltCg0`$2=v~ zs_J2Pvo6XK82f^)ZA!4&9}mmuQx&(Cb({+Z^tuVi5K3Rt(>Y?`bX?Wb9oBJxmLKXS z9jQKIoD*ITwdtpd#fglr36vNyTF!F>nJkKK{)5EFv{WtsdQ=joFh(OwB;oDLvl6O+Wz6I?&P)^6r#5I=@ z#+^`-8hMQSy$khdv>8-4D;sbhY-f^RLjkNxsmF8b$ph@|PGsy@yvT3cfDvDI2p2oP zjuSJLN&=3^Bs1Xc3CtU+@Tv*a_EE7hAD+5pGP1}F3TC`+I*!{qUQ$t1pi^^8pJF3d z3y!b#h$BeNCtf5>*Ked$pl8jleqi0Rap{oeai>nWe05Ib5uRo%x_Ub_;`5$1_?w*} zeg04cC#4-k<;|1tg$JVSv8?2AEI#jNw8Zrw-EJ!;N#G4%4 zGAjtAj>4I+V}Bb~2k1pw2bi7BbOqsUd@ay6&t+@XKT+N=uQvLN+t*5&VCP|Q254d! zxcgf=vd^3+Yd=C3zEe|%D3d2sAC`&73qbwtxc!&p+hJO-|F~4+2(KzMzT7bGjzpNo z48CNR1@WVE`{|qd$Zfv@STKzW>p*%^4xY^N4KX&`Zs0fQ+eo|~ymT5>qQ%`OkcGOx zVad&gA~j)T{PXi%jfTBT4_(YCUx%$;Ph1)x`Na@J9DhbM3Md#fK*)|LV)()Le_JM0 zT)3#BT%5eyTUUQQ%NoTek2&L&kHpQ5AO*&KelkzSr?Te;x12{zXkN+Nk zCcz1fS_N9sU-+dwi8YgR|7%|zSCO91?ry8jsg9kQ-lkYEB$UO-L7X}$NkQEyv+Ns` za#2}tH<^7)y}o_DYtqfCs8uT($c$&n9!eEx;hdAa!1Lc({3ZuizPu>1e8onwTNhSwpbDoP~#J)MEvK_TY#w(Vq%{y$;f8b!4V>hBx zfT#O}PI+o8J~{dm?V%cTl|x}2&-n;g=_9FPe6aJg7zyoMGt)58n7Sd+b=0zJq=5d% ziR8D+F^`sT2taq~V+j{tgEWVY-v7R2JSN_EZZoc`1&c&~qN}6aTCH!BL3IPV@q=Ub zV)A-?Wc5mIFGBOfEih`UI}jgCNJuvsZ$_SiJyr_b_unHH`~M&lqg+}@f&+1JT?=%b zB@Du@n{>(w{}#|93|lW-4rzGUjh(S8G)KjVQ>LH}X>2#X4FZ+Jcc7^Up^HYIZ>#+6 zo$&ku(%wAT<1Cm#Ae;2CoYn2BvMy4FGu~{!eXqxCmG2Z)<1*|wOcJ5&4MO%KIzHlL zlq9D8cbP+{2xKzQK5#=wKLh4_0AqU*m?0R0Jhv9`ZthMf74)2N^8X6MRVT?PygF(3 zS$05ml7_g($0?5Lz9Nk3^shHEWgO;!@^WWA#@AJ$9VPelN^vJ^~?7+aw zztJZCg+2bKry2iYSS)xanK4=3kw&hSjt>A^wy*JtN(F7w{b^RJ9u>Arv9ZkZ%TDDf zvnRkvV~pW{DRO{q$9P4<(R85Bsgv2D@FCDiJCUj8chEbnddH~3zk6sbv-s- z)qKdE>is7{F3q#9uH4Ca?u@gTU-`E14{t&Y&=m5mzDIZSAHt_}=e?>(cjD(&JQrUNqYM%-EDk-%hWYP0$mikc5sX{7J&nRWDp0?*pm)m2rfvCdfw<_x9fe%SSV33?|lMn{Ml|9=@0hVi)^nxQtr(h^4h}DHy$E; zFK92faAX-9SCLAfVAuLrJ$Py0EI}>)i4;QnGu5S&v30-7XD}2810a1kxY%dr5aorip!9zCpp*|A zcO}8Dg~tmOD}SIxokhgj_3wLvFoEq1ugse2{`^U>$6*(>?6=7`Om#oOg&U#4tZ0{>4kWd#?oE{A}77u-91E z0o&6VbQzPWyWAS_^XIO2WZ|5h19c6qOu0=<*!AAt@4*aBZmt5USm-xL?Qq2E4!O=v ziSf```?cLkj+MJXj;2_~KtxxEwf|Bd?@U|h5#|D`y{k;<+Q6uMH4i4o-qsoRV1sx+ zWXY#R>R@Azj!mfD{fEXa{!~;=Eh=slV)uqBYbI&Os)US!P_iHgR~xwmD!I*ODhiFY z5}3l3AUWJOCMmgd>%iYSGxR=D{_J4E{VIc4+-EO0HZY^J=v!Bp`vc0M?ES zX4<3R5%}QFnzrO;X%Lk@`Lr%1Jl&~X z86pBnLRjf6Fu#cmzVXV3s*cq<8!fpN+DNMC@jm$*vak6m^kpFqKq5!6Zp-lC=nMv# z*~duFB!out1lO=DL8*mWZ?_QQZhX=tL`uM2^3BDoz;c#hW2&o=RSr*XZlB3fZb3Pa z`@gu2%3*udX33Z~4S>Sj*ES@!#g(Iq|NKn(Cx3Cu%fY+rR zOMj+%(!*30FQiAnUDeDLVhHIZe9E213YOIZJ*59_o*vJf6UL|oLn@xfV=|+EX^|Y-nS3G!Anvhi~&aqHm zWm-WGYQZP-4o9J;s&W(j@N`Z?Dulyax*Mj^->(}9+SJNV&z%ZM?wv-}xh=q+B_QlP z%Ea5^riy(gFn_=D^7V$I(jbH`EwR`W0W9L*3ED({Tb zN9vXg&aGD{%a*<;J|dCMqJbgCF${d0nb15Xi4IB0bP#Q_v7J5zQjaHFhZKFQwMINq zcTod2+EaE;r%ad&W<0L&ir{(7_*a8oF~Bpmwl6If$h}0TCo1o=cS(=D@8paKrA!Fku}>A+BuHkkQ0Esv?-1dKj@0E1hF3 zLmyy3b-_gY2m=3b=f~HGs=rJ_&0%f!9f{B3Z6PwEevY|A|Lrq4K#^Wh+0qy_@KwN| z;WZ>B8QEmMc4QdM)vNplq6E6;M0EVL0dq5Ltj8saK1fgF}XT{kb$4JdM{Bt z@C#$KdI_uH9$P?Qr79Y!u-!50p|)f8pI_LNMUByZFwm;#aFOJE{IU#DPT*&!>LE?~ z?Y`e#FfHILKPLJ?A}D=md-oArKFZ%~wrDjy#t`K^fwhM4h>^J6g#2piDr5i#=7KEO zsJU|Ri~c?! zIw)R&3!(nG5-AWI3J4HY3+c~(QEnWm5A0QRS*0(E0=WAF)@#>Vum+7&4>kT)GpskqCxnRzhrNN(GF62q0ucABlN+}GKf+ZYP>5E2?<6LU{iTQx@KcNk>>S} zPY7uJKN^nOP{^0I9)-Pe@cQ{DSiY*|1~K6~o}q_f_*x9uw>0|g`Cf@IYrmUkyaQNcNG=^|IQ0IQ*%FRmyu zThum}9MBh9kjobWz7Pnix}S!{4;C)8^@rSQIOG`a*lC}hj(vyF0To*rsK=i{$3Te~ zgX~~~Z!~w-@D_X9jkF9p%}2WL4wV*QdnmEPZdJ`Y#1XnAz(rCi566AGdYf^$A_>k|#JQ_2^P#61_QF z{5QTccgVlbDd~X@n5oz0ZS?9eABF7fRc#Fedo?xt7(YgGp;}TZ`%k9!J zOVr{Xq{t+lM=>gIaM!5_fQZdTAfCD=MTIZr7lL&F6?gZZW0hO_1@dG^{11CEQ$5|x zp)~J9{WJ{Yzk{wSWF-*}1%-qigX&R(pyX=A9~iQdJ?LMMzu(%Fjh?Heoe&SjZAK+zYbC;+5YqZ7SYD+y!gGn!hwdF0o(7_V1gDwaU1?jb@mw`t zCgJWiDLJrC8xZ3Nes=z!-x1)#^XsqZ?X6#YYX^$2#qx@2&Jh(coVZ|6-@Qz!E z{fdKfYVF)&+1H>gVG~ghE;ky)4PNo(qThn=lVZ&q>y>{ z?v6Pa6(s6Z?&l??fm`r3jj1)e^>?$=?U$idE zz`wI`rir}WS}?5LGDY+qyKy88!d27m9F74lvp$vw)qL`VScFa-UBK4l&wVjl9hLAZ z#vv*lDqT(VBM9ChUylxP)Y?z~=O*JubZVTu%rzXH2%z@YN;w+CwR+?4EM zNF3gKP$Zl5_3lBM+LE~!;_r`ZH^a%^@c@ew1?e6Fc8MGX2nO=$@Ed}<5<4?u4QWv~mI*JDc+~WXJwW$l?EsHRzarV_fFVT*FQXoBX!2W; z(EjQ|O4>vvD9IBzcy99aW0KkpuNUGIVF9ZLDA#(vPPT1l%m#o-zvlCO@buLU|3zuZ z!EPCG|0+Xwc8XGj%3wdcG?|08tuEoc0|+-WX&pN*T)?kXWbgUT^k zfR)m@n{twY*4RQx@#X$k1bFd+@e$FpHYt5=4o-7}kc!YHvXzmcYLD>wwvtUEn(qwY zEjUc0{cs9Ce>k48IH;ezu}C;;y&=)*B8WzQ_FzP6NaUsST#2(JO>;l&bC)rf;8{0+ zNnbEf4^hx&p!a0NgN=2-Si~DhJ71>Ry3bo#hkN!NjlqkBZ2eOsqrD6Vg+e3g8Y=)1 zwhrL5S%{Hehyoi~1k1(fLz8II8f0gi{q>l+^Xt0-41(G{R80Q}R3Bu_wLudUXBy%b z(sN#yYazY(k3q5x%93W$*%6qQ0C#p3Jzu8BVkuBD6O6!yGS69-OO0WhO^lbBbnbF{@>oQw@E_iMYo6r%>LUj* z*>=Z@C*zN!p?Z3A`hp@5<{DD!CVCvJm;Wp(bhnanU2(wmXG33Q2cIAq!jUo}bp}kb zn=a%L%rjb*e(3S8w{|gG(Ss`e>!wyPQBx2DshflSCaubw3XS!~Ir||Ag4XR0#9$n< zwSm5%;x}`6Ah-&@^^b5DSy?8fulHKDMYdnK*EMB^;UkTrDkg!~sV-~mxUFDcEAM5r zc?7Gvp_PJ_kTMD(FTp1FBp{cG90u0751?kiVLqEhj~Ie&G5uS{VIj${=c!gx!W zkP-U};PF1sH_*Rq_B!&A-h6)SjcejtY968@BhXdjHAL;9O90=bR9vCU(g4TknQ zE}**Z?{k}`tC^bVmJ0Vwm$}8>ZX9_hPg#toTcJqF1%C1B#_jtUVE3&b9oG68O@5y7 zmaAsJC!U^Ce&^crw*TK*hr0ND^aAd*!e2gL#8?Hby}Rh-Ai6=Ks+xtj`xeuKEZ_@j zmYsG7-DX-QA{oTfya64|V`gz1dt9gnY08?a|Xf^gXfpi z3Z-3ZU`h#8-31;pS$Ir6y7o5|>^TL8TV4&xq=#G9=l-SUN{#v;8}|oz@vzehlx)a* zXXDTb+6+D!ujs+`>X}&)%zdr4xa_56r;kXFHzj?2@M^sU*3wZD(e-(2)knvXgjRFt zJ8xVNVjSEV_#f(ay|COq_$g>t+F0g!V8%kkyp*zaJDKCjaA9xw=}BhGWZrbWRVDQ? z=o(&_;IO5cAf8fK_auY9e$jVZ#{~Gh4Yn{lOs@^h<}fXw`DNN~>b8V9qA}pec<6?O zaL?_aPb^Q}gqG;5G})a7qrWE{iOOl8xlS(IFZ$xQ2+zvmh{-nopPw)mkbm@#&yz>R zcLCNs=&2*;<3HKxyRI8W=VpES!_(npZ;UuW34o}g?lXbz^Fv=6K+xcPmiKemZUg1v zW=DUoBA3b7iq`kKUjxOQb3V1M%9U&~2lJBEAbZ2;70jIBaAnB8zn9J)7~fXQX(1v8 z3n5mZ4>wXsCF5Ixy)42@y!(Bh1&W@(_dCBvqNP+)N^}aJ6GK(86`DtD2(CwPY*yzK z6UOd;DFbYY8V~Dzo(poLX#7J*$}p@%!4L0P?a_j~Cvr#7AtN)r2;D)xeslE zc^H7|@6O+_od1^0G_&&ONC4#s7RHk`!@*{9tVqAaW9PkO!3Ui7yP6DrXAAM{xb&YX z7*}xbA!Qq6rSDjPJ140N%(CHWot`NPw|ypFE^RPeNTv?UI(~hFnuLj2ni6ps2k(!? z#TUzT>Z@tZKV55-?}DHqw1*0D+Td71m_zW-xTSRi8?cmRMh8I*+zbOc?GuZRsXV7{G%ypuL;Qm^9 z#veGXof47G{8&PXLWkOQncYGc;STPi|MOnXeJF7+#=-9XlYv<4z2ywI)IOH{IbZZE zM$l4Sq+PI>)FP}N9M@f{+Mf!5B}8stqT$RHujHilKC3tUEgPy)C%h^ENx-q4gkW_b zKx_pp5)7$-8g?NIyR0YLFbmQi9QymO0HY(hO;yqx>H05YV>G=^s(3+OK*=>4ti!sl zY+3VQUsPe1cgO>o9L;im4yi7^kUFA0g$rwqWUSJM^FQdKYv2MqQ*$G|a>|am*7XM( zs*tCL4vEC+fMgGD{!lE%@q_)DZiH?m*y-tM47 ze0Aha9`Ez*C}2a-DfeR28(6?6F@amhFcAXfpC24|gCKmles>AsuAo<9xu>cOWOC0-FTf;O)vrG1sn1Rw0n%oAst4cIqI*I z?(m?*NUTCnUdx8`C|D_&662^oYgsG;HP*! z_+XXE^vT#SN+@Pg1sMi6-qLj=($Q_}s(E?IyGPZjvYKqy7?MK&> zX^Kr?De(W zb}4btx2q`9v#xxv?~eseu|Glami5A{bz%=5n59EPhq_o?Y(i(l-!ay&b8lkq%+?xd zcxTD3sKZ~F^laRAe*{549_ql6|2yb9p90QO+7O>i^GKuWOMei&Wp+nQr2`z6@U=T# zn?sl}8S6EFzKd~I2Q&!G9UBv4_RSL+9$aJEqy~pye8b<0qH<{38hg>gQ0q>$l0`;Xl zN9na>^Mo0|Z)dCNQT*8x3YKM^d7hoOJXRD5#ZIf;-$Y zOGPhNA*HE!-d^NQ``H;PMp737x*wnNHLe`UTz1ev323nFOeV#)Rji#k@*`r$-#`7> z>D~3lE2{T$l&SNja8;8u%apZOV=uyvqZEppp%iMAM~>WhYk?iK2QCn1pLX%FRTW+^&X;9gA3*jIP_Hdf#;We5i>*fq}D z5{oCa%Uu^D=uFisC>9ztu?ec!pVXQKr_8Fp{c%hP#(#b%4{Kn~Y<-H%j_z1gA@4M% z>xkw%)iroeT(|U`CER6}>nIemV*6XXno%|lFpW3R9jxMDA(l3tA2*@9eWP0kr@9}uJ(%QsAXY8LH#sU719C-e9p6}3>3*F|xy)TgaQ+np_d*qnB zqI7icEZ=9ei>^*~Rf747+<~;4pb`uD^%ts0faNf9T@3zufGg-^KQo1)4$dedbyg6` z1;vM{+%sbf|LzAOW#%9`FMzdU+Q0D|Qg2`(ST>*$HZ@v-~oQ| zt^JiDNg(>M?XqN^TqfW><@B9tldTV|`XJX!Bdg<{PBGbZO3qh-g^@%%g@o0z&!>#TZN&=CqP^QZxY_#OK!gm+6gA6V5^<<%?M$ka)=$MZ^c z8Z5B(Yz9K})vrf)htSQd_XCjhLt{{Aj~0|lINhIamU^Ykk9A|r&rgSAXGzZZi%MGK zv=I7T1>LDh;&7fh5f>msvk59Xc~HT_K8+??y0m-Bz!m=BbY28;Uw&m&NBMO>bJN)* zsBK$Mpvv7EBgJaU(CxnA4Xq|fZ(nLt)gJcBX>nK$K86`}!J zg_&p+-J4f!@&E|&6@TeEx@O7jxI#OdBhoo#5oO;aG!b4LL%~bSf)xmL8NPYi(={{| zpy1OrYVLKsBZiZCuv*DI_p8R+l9F;7N(GKMqYzhv z+3ex>=dIz4+_xI13*%U{24wq;FArR?Y3;!HMj4Z_9&iMS);V?kazH*OsY)Rn#JAxE z{Zp#4_ZrtqXAz^f0a>b1zX}_}5(td9_XRSwRq4TGn+Vng@qTufy8!L9@mmcCfM7B( z|AEa^!wAUMKQmyWbZcbL#vfqWmam#rpEu1olsY*QX96K!$?4#%iA*1s89TUc5! zkDyN?Hg{c-*j^EFSM~FNJpaf+FvjH}j12Om3=_*6&q%m_W!pg9Z6OW6Aet(Rd*0dL zPIP*mJNy%B1}Zi}6?LBxs-%n`6?lsd6l%xVpG(5(pI00L=> zw^q&1yeXuBY@1d5F%thm3sOLapOj7jmUGA>u-y@_ji3_=-`mkqP*Rx?w~W^vEUvVN zg1Vu*>Ld|tt6Tk4dDmdiO#;p+jP{vi9cP`&{rESK+uaZv$jXubC5!o06yQ_X zi*v=)xIb_hfDzBj+C?wT4coC?AvM%`)$JaZywysH9wGKe4K8qP3* z(fX9tz0=pZK9p$T2{HZreNxOHq?V=iVzsm=Y3C>7kGvn4{JaCMt#Y@r>Un9#=RQ8t-2Sa@nz)K`2jco3RXT zN{Nxi1`7`Y<`Q(G{fK22F9%zo5k-OfS2)Buasj>~ZA6_5{zkY3h6K}5u6}>2F<&T2 zJ`X#ShDpYJJqc3=nE$qG#4u#LGk$|3xdTFVe_?mt$+Lehp|E3gKm1h#2se!Bg;5U- z{LkOVR7WhWd)Jmgl@o2OxTshkY)ve|jZjuz6O{`@r!{~fjz-%nKH*8fpC{Buybf4P zfaMX!ffktsh)tjn@eajrHy{tB)Yl`Bqgd1%<=DU`gI`T8Umi`FH^!IO667xCxQ)CL zAbBAV-}M55xEl+qR_BBTI7Ve2nl{VkPXF$5P%`P62xOP7mnF#z*D+S$Q*_(t(6Gq9 zG|Ox$>8M98@G*1pey=C^At>Z%X+}m%aA=pnbnYoZmJSiL>I2~7&1cQ>MOWEsgN){<4vuR_Fur@ zzeZCP{9`S-rPA33BC`uh-iruQ!_cl{=t#ei%fE$mC`DsdLec!J3>;=j@5CWH-xoea zV=<*u2E-i6b(t^{(opD_jo&%EKD0z3GEfB06&yoPzHMmLP&3ImTVwfG z7+d;QH?Q!GP=6KtIvV0t+fmSerHZ-NG-wfhs1-RNo`jF=I4rEiW?1-zU4__`&Ms^5 z@BBXd8oq@0Jrb!dCiJtDd zYw*j?p$h($#1_=)^diPcA7pQ_5?HkSKuu$$)?DYZE=Zg$tU>jlX7GiudD`))^1es1C0E0|H1l#t+I1_Pr8H=aN3!4!$(xY8j^KE6PQV2cOO>~cE zxxu!6?rfdHGNdAED>=LvSY3z1H-OMS@YrJrceR0gUi^} zN}G4g=5b0dC2vB&`YusCy`WQpqLeF1{d*!Xt=nLj^FM)AzUBp|bF&}uFjK$Y2?^^T_mt4iHxerGC#;-wCB%^<2z&rc~Q=r*cnM`ib zZyVLAtRmJc3bgL7kJCz3=sSJ|4-UPqG9YbGxNqBQM_k!(6foo|a5JmkOdD|YyRKw> z^WRMs=VTp)`_ifC7uvMPFh2pm1C3i!Ym0{HYB6%i7T#afrR&Tpg7Q8YW5A@l!mxz| zE01nYSA?P5d?pw)yCrFa9x2OPB5#g|g(&7|**08YpMJ*bO6WmcD2QECl&xFoUCoRTpuVHq zE!j$@6JCW(47O!U=Sz6y#q=glXAxj#T3{b;i->*2C9FUP>p=#SC;vZ8ol|=t(Y8fn zCmq|iZQFJ_wr$%^$F^Ytt`d=p)2(H#a z0cVmkO7>hTy{cx^KqdL)g=J9uhz1LgK5aliJZJ~#JBuAE8_TbcOdh^c> zqEXU$Q`810jSJCNcI17~1XU@b7N|MGJx{dNy?E;`6Rt??RDvpy%E6}BukL{2lTcq1 zm@IKw*))DykMPH=V0sD1ve6sq;8++xI$mF6^pCA= z&j~LPT+5}CauzGL*u(!W)!F&;jV$jX!v!-3e_rjEGUxYGjLJliH?#rnohtlwHIj~d z=OK74<0AG*hHLKBvz}8dP>zj~288Cypu)_wvB>4ID&s=@r%Gi=I)l0Iv!gHB!kEAl z;s3maW8_=kbLt|J^IVzDiz8X!oy+Ob2k3%TPOapKPjnuka+Zj%1W*p8ATx=#ks!X| zO5fHL=Aw~X?npa_7a}Z8cW7>=4D!u#)~`4nRkTYXyYu^@cP1h7ZDN3%Pu;8871WZNRCl@&aBA19H8zFTXXX<@*A3ynZx$3c%B^onV8JzE&URCxMbC znG+~J>^9$VR6Z)iAZ{2-CTBE~Hf@-gCL$gYCVE05#pV6Ik&Zt8Qt&%7jqc3zI2m`* z5hr&KADaubZnEJ;__BA}pe;V_(QdNx#q`utx&sE_gTp6w_UdIrMGNXOqIn>x)T>XH z>?D$9`ztDK*6lXYP}Tc%1US9VJV95?R-2~kxLL*8rV~6nKnWxgYDUh!Czwh0AbEu@-RG29wgN0sq_=aCEf63wi%`q-Rm_(ZwNi#NJWFqvsBG%3h z-F!W6d8&6uk4}t1s+Qch3Oj2 z-G^VX@GAuX%=|Ag*i`0IV^&dP;QEcO0%Ev9ROY-gxuU|y^IOpB_2z4vTnlITGx_Zk z>n^FzKsbggb@5xw#{9bZU2^`M1um~XwJpjhOmpbrD~AZiJ^?35Mk+DoHL{&GFt3qc z=yBm6uD7FF0cI)l%s0){;9iZUwUj@VUP0gveew^S?zAOiU ze?JHnU1_7|vOq6AI`c_&p`uhvYiv1;cx>g{8DRp}E946l&F&7yLAU`Cr?N-D{n7FK zIY)x{YuMfG5~l9@ko-hu6E1X_Yo(u#-^!Aq!;z(Ito2LlRZ<5DX@qRkkb%LHTB%=B z$h#7B-zrQ7=!zl^-+qz|`a0-s@IO*kSW@%>it2{=_ zCRHqXvc$b@Uu`NKfy*4%hI`^tdp4wR1+?DrFb)`8C~F$9&cz~938K*P^_BvbtoXTNpDd!sUF~sNbUC&~BL0Cok`CEj9ECHVTG@_b z7c@u?3i@~IWEV%gq|o8tuL(3(&>de-C`kM^6iBWUv_lN+RSYAIFom&$rp)Zr53CHc zW3EHwspuFzH~75j=k#>T)?02|ck9E%41HmN1$z9U-N@$9YN=rk79femg(|q$Iv)}) ziFfAR{Oi3&%7)r7jr@y8y~^F89rO`0<9FcT+)Ts)RFS8rZi}QN{WH>Ol?uW%j5YWS zXqluG%`NVp3_e&qt=&1ZqWf2^Wr9E|?ei(rGfg3a%wB0{jQWXU=Hst-C_jz zgj8?Y4f?pMGm1BDtC54-lhY7T!j0%5c&WZ|OhJ&sMs-VMdAKvN3J~Y2i8VJ%uMg>*w4Ite zaR@66tXe1>)wQ}b%8kEOREzz)SFyzgStoEkP-t+@GOI_z`N1@66dB}%+&e8gvaHv0&E3cp#tA>g1&N}&B zPl^VC5Onh-CxZsuWySS!TMi5_o5Ngz3XR-(CGH*kz5$J3G`P@=bSD;r#_l3Ne0ozV zh*ll$>tp@#G4Q3`jlnqSMtaV>ezSwguKl;pZS^VVRTNmdHi-RU^$kq}5oEHvB4xNg7HC`B9-i`y63W+lTHX=4s&YzZ7LKj-_{S6%1KLJ!oN8llzZw z<;CyU_^9NqVMz10T`Df}1W5XcjDgvJMfoLJlAWQA8#N>zJi9a`_I~~fm(aVSI1{?> zGB8ibmixFo&F7)&7$C2-&%?m7@f>4!S$Zy1{)G97P`i_Os=+HQk9%x2nHCA_Gcfl9h2g>?-f~)dL%@#`552$%zG*1)MnXs zxni_9`vq1xQ@LKKO|RrBK4P2J?s^iSYi`Ye1p*G~i+@7eS|X<6CmOMK*QJdJ98_4G zoiu7(lhyRm_vmz|%*XyuF#oVKs%y`DdaRo0yb+6mB*Y z9h5aFoPljKuv%6rJa*$t+(@&ZY@*y~is1wa_h9Krq8n3kEV5~rVs0+3EKq7G z%4GYU6nj`j+dji<<<<%~$LlJi0XN)Mu7x>)+O;F8DZ^}BHIFDy%FH6(4bnMY$6F)v zXqNf))!p%t**3wuPGjOS7r?Zk79+Fm>0nK2IAdTEgP|DoTgm&;8X#w5rS6_Z<`G4v zOx-go+%gj$&NWrMfo4Ij7QW@2CbI`1=W;(S37wWYb^RX5Z~qT;e7ReIp)5b3iD^YX`~dzMyOzH*GI$(TrBG2VDw(@ClO@ zr@i;!rf6Tebe|E_AfyN$F1pCo&52dPur*%~D8E17ZLrHX0epHGri*UaI%PMMW5i7`53N0xaYYkOfvaLvT=PiV5;nS%j1+@wU~d#TjP}W(hq*Uv7K&)0hjG#56Cb~ zK5v6PJ_O0R&E|qi!W(TqH~#PUe)lfc^5=wsR8#uvurg~X<4Nu!F&C41t}JK!qvjDX z38EPOR>6-jx^c?@%~KTmlNy=nltx~OgbD07NP7IbdOJ%zuDx6lIdWt_M?Sb@t_2it z&T@fOWab6<<@c(`M2yVqOH%NW5heU^^nv9c6#&jhGkWZQ-C{Gn=!7$Jd;> z`@C(qXVQ#lI7ATlr9L`T80@KPfwlEtxtRH@Pi5bp%rL3#N2PLQZl9eSps%lO2<|w{KgrLn90(bAQ{%hg7rLcfXhFVDJGNc5g-e#vj^@e zy4ELt7!n!PDe8=HML>bA{MxT6Hy=F%*Hp)LRvWs%%+oGIu#r1RX|>Y<)gETm?8#~e zxTwsa1f8K&k#2WaQ)XGe|28uDc-7DUMpA-h;4_!N%0%UM=^JYm`bxoRA&35k1@V(z z6f)KZp#fESL~t9kdMa>55+?^}JMr>3ELT2i@g5A1DYs`rc(ai|XybUw_>*9;;bLK+ z8r=oOTDkxwy30~|Y|T_BQ+;FqIuyvdc_Be+Au*?HeBZ2p=%hh17V# zjULr42T$p4gY9C(`D9&|85eAKc2bBn znF4E9JjF!o-2J5B)g!OLC?odZFU{bPxEFT}wFJoBJ|c4i^8ZX- zcm3V7zjY#ll&oK=$k!@_*mAa{GtWhTeJ@;~c24xWEUB#$|RFEX`xg z)X&bB4+nS-XQTkJz$8{501DT0E;%-Bg3EG%+d*0Zn6sEltbohWUj+^n5e9rrve#g^v$tkQ5h^da7)HoJ%92fRK zi&_uQ2>&bSy}Wg06rJWOfPuje{rgx+`P@az6gu?F-)2cY&&0No=%weU`ymR{7EL-Xq`?dW*bUzm`tq zAkLG^!Y;eA3t;`Ubk{v&fgVTL$SV`VqI04NB1OZCxa$L9?X=k0Q^a@A7 zRoiq6(#G&ka#0Re;#u0Iv^Z_Sa}JWzmD`Uhp;P_Akd@|krHsNqEP>>~S=u076Kv+? zCF;R2#KHNH_>Lz|M%YgJrkuqI!oox5cj#6C{s)+Uj`w%pS`F(b*%PH(uU0tzT(vX- zJFV9)q^#-?l%?J7^v5iC79F-ywUQRn?{iit7cb5F*f3rrl>dWR9Lmx`^`*O-9^P`t zoMGo7>ml@C(b`Iy~s^8gUdW{YA}Z3`2L80mhakM z%|Qpaw2#zJfuOAoc>;Bdh2z~Hqy8Z!k09F9zRz?@g=31CnmJXbv40JxBSrb&sMxl< z3)J-`5ki$9&QIxw^E1cQQfMpXS=V!WIx?BhhlfMjBk11PDcjZc=q|iio)eee15aUs z(iTlR%8R0V?!8qSm;C*D$&p$TIs=i+xK(Ooa502fSw0C2R8VoHm4!Rto8gvOEH(fY z@31{znmOqd00m9mV z&+_BkPhNm?wLklunuRQGz4a_Os8%4IZ?MOK!O-O35gDC=Hu5f zW?;q`G)*Nq{e*nDok_3UNW*ZWcThb)QmlMGzKsOjHbe%6vj~ZTHW5mO^!w!;(9{=H zxcCunMvI?0Xf6)f3o&~rT<+mcFX0uRWPW*Yu@F1F{=Z1Cw3|*ulmr`X0QA&{aP2>h zKs&BM$I}tBMEavN(mMz}Sf?guz-xr6YDs%&d+px|jL8Ef%%j%?O9+9>zU0rJf7(#) zKlTwZ6}MIMBGT;aE;9lJN(dBQ8gh$wRGr1K_zu11!hNy6nLjZTM2Y&jovCAl9MIlCEt`Yq>;n4T^r#2)jvq1L_fi+6 zBvLTYZqDr;T(KD(qOV&Z=?|?N+_x3rqQ=(RzKRM3SCy*C**7(W()l%W=b;ywUFjF! zzi_)xwfZST!?E7gWBTXSv7x1-CSW~N(&<%s%tT}3;ru(y%d*n+7q)e)IwuVwC~y`v zLxZAuwy{0R8hGvziNINixAo(_WH2J)<%Ji`Y%px~?<=h-gb68$xaGd3S}%>(r({MF z$Pf!o>Hl>Ds6DE)wlV`wlEQ9<6kc^+tPkPck`5P`*aRAnL)e8F5>zT6o1Uhsj{5rh zn(2rcWw6KQ;;y-nhD7v2&bTR2KaPdTxv^w~KAE$Zl96@Rk}KE5A?M4!)o9 zeL3z49#gCwXv05{{u*hhe~Z@JUZeyc$Vzv<9Cv$8telgb7s3uRU8O+yOK z%fod>;a?HwHq&5c_znPx!{Mzq5p6)$$;I*U21Xsz2tE<~HSOCg5@gd@5>zxa{KxuK zoMj?JES>?FflqYU@YZRk5y|eb#$Ti=gN=TFqd)>*%L?JK7`P;&UIUwK9-RWTKz3Bb zn#ad)3u_6jKFeBS!M#fD{SK`{y;vVfGpe}&1#1v*ozue^8KV7C4co~IX@@I%2C0PM zNTWg88=Q$FfNz}qm%fxCQD(0j8_~#GVKh}IA?i?|kP0u^PhUMD+acMuZOW>z_ctjcyWr&?Y_3lb zZUYs?fEIC1l@xE_7Cpa=!<2iF)#T>-w;2?BAn29p<>9PW)Bi~a7fsMd4R`F>>YdS} zt13~@2MP%T;Y3~}Rb3;_D3|;on9e&0#vMJkI+=--VB4}zHV5ORAcTc1 zUCCYun*}1RpH!L3az~W_$BLzkpDVX%oyP}|g$k0mIZ8NOd{%PitUCSt-G(t34Q2*@ z=sSIUu3?@o*br+=rc7^adv0w4LVPu$0r8Ft%@?blKnwM*+H<|~?Gmn96Z98#IuCwv z74m2Cy*H2?tph`++C%iwdjnM?>febmuD?6}44NvxB;(BEK4p`|hHluey;0Z?UO3(P zpeh;8+uu`ceiAYB@7HNvX(HZy$+wrrjgl}Cv$l~HYa>D6@iIZ&VUkD*d#maVL8nCs z7s>&2_91zsI%b^#VFZxI|KvmPW28^f`%e+v`#+`&wQ=&sd{D>FnII)MC$XD=(La<` zL@RhdDKrkx;t1n@Hm9dxr^{$Cek?oxG+ndk!QleN=Q|qD5v4ybsN6LKS^a)YanE&p zJXrb(wiIeVBN62%eL>MQ^z$5_{jL2p?%`xADYcD=#Uu6cV(x`N{ys4{{{Yi^k`J0} zpKRAmVyyc`$P+4=H}ZG(@I@a5?w-^q7tmt@xJO#ZMe}A_DkSFN^EC%sl>6RC=iJ483&cO+7u5s+1`Ve@(Cv$kmG($cA-#Sy3R}ZQxvkg5?_pu=bHZpHe9KjbC2Vw~9xAb5uJS#TZAPhpZIf`LcGe_P|(P@n?fpXG8wB9v&tVvW7yne zW(KZT-aH+iOk2@<;>xreh&J7tPm?5D8IfAD>zjcfJEr6+u2B^_M|w*Eqld)Y);+G) zbP15))-*CfNzMq)MXStlfswZEYjEJ;BP!`Y#uhR{AAWMxj`UONJ6NZvI^sEOBw2$M z#W&-#gwPXP!>jT=yF?TTYQyEa(P6g>eDTCqL2Z4q(KNXi0vGJyFRYbGyQ=62#y|^^ z#P>fke;u(J+S1XEDj*A@sgcT?VYjZNvnH8M(NvKQMFDMhRv5u|DBN-fFwQtKq5=o{ z?w-sY14$sPU3?VK-gqbrsmM4<9)AY5yB6D+KDXOYbxizEEJcpL>oSFqO`}@o?uWak z@UW=^s&fZ_>SO5GUY@ii&GA4xPr%|d4EMKf)ytBlQ^_xA7<%;;r|30;<)ogPeX`Xp z)~|A6ZY>%=ciH^S z!KoivBGSFkbJG%e!DwST++e82%~L#7&-#LBy5syR3DU6btThf!!6V*e0=j`(?UhW) z6*hsp*UXaUeIkssV!jydY~GQ#KO0vR^6J#^yO94qnNQ_0l-Js@Hz0?>xpqZ9{!`r) zkUC%eFx{>KS*1Y$3Z@*5nTFpl=B)O|ddZlOc*#d9JQ1OH_*_1ufg&W_e`Ko=rJJn# z6s3fZpVHii(jVbuSH{GBtG>P)(hHl}5HrAe94nfq;2*wdp>}yvH&B;T@vM1Xcej;q z2t}03*agt3@};Zj$o)prz)xL!~T=P*K8eGBm9!F0G5IqYgTreKx z1~A0(q;J1wkgB?;tFa3tJecH+3iQl{}Kk-wadQ4>dH9d?Z>%jUnkjwB1`l zHt5LF5lE#W)^xURd7vxD!%3xehH=j3;laLFluE)DAzr4$(d!M5|#z{_9Ms%m|7A#Fjw;w5IVx{aJ0qV5@Q^0mEPhMM71gzQz6o zhtP-!m*vDdgMR!`?rr9ijNCxS)L-M=Dl?*Yv3PGHRv;8A5ju*RZPd++-yhHD2f#cD&+)+}thrL zojJj0!}y?(F*-Id35dI>Y6A`$gc0cs@%()c>}WcBt*n}kyrAJa;=%r?BSz?g+vJS$PR_EnI5Ca&5cou<_5y5jY$_M9N9-k=| zG5r?Y25u>S`!tzyxku1Y%D5PsKnt?qk6KnLAY_@VbjkElCqrFq8H$H%n<|i8Bmq;u zgjc`>R(VzB7DL2LR||*{^Aeviu-_;cRPX;4Ex)61*66Ca1^GAe7hZ6wX~>|exl7nK z$#SS6pP?KlQ0;}I9Z(Mtp>^_vePE;2Q#8j0Z>kUkQNBmp_57hlrL4^_a)K83MpI}C zS`v1B2+Qb)Iy$Felh>OqBR=Flwpx0k1N=*wA!GGYI)HstnneVB8c~ zB|KLwaPaT9wYr z4lMMz2xsP-ZAUFu@BQ;AhQT$j^?tTSjAAc$7*!2ivO~+Dx4D+T=&<1lSwOr_H|cSJ zyPRCjv8wMnt_4feF-*7Wjgw9_YuKiLf2wQ(Q&?(jmCWC{QH%6f85;L98n*k$U8US) z9qlHznBVZ4Uw;g$*xzM*f$tiB1qI2M!7d}yE|5)H$9Dk8(v7B_2GLm6c zhL)5Bo~#Yg$%m5(VWxVFnln`gX8QE|F5XlX+OO;ODx`jTQ4V`9G0x1qZ&+|bjvZA( zs#yv8t`)C%da$^v0vtHqXBq~CZz*U4l-XXJG%gKV`fAsrH1Z+g@|(Pl1H?STc^qg%{4VVU9O>(B;2z8Yb3K%t?Cb<3(=p*LU!loeB$sTr)6$E z(!6dMFJg{1z@5_GA2kXb&#dr9r;MECBeY<@GlLF6_Z#(WX2Nebro#h;8OdJmKH_Vt6cf$&{UsK1J%I> zt%VRveib2zr=eKW2CTvYpVm|_xMLYJ!#QdVDsr0|XxdGO{=>@cnN-Ro;rR=VRpFHT zg5@`6L}FmF>;th3bb5LdQj($~%|T1|s`{+zy5p^((H9uKx8y_4X}m3&2rF#Km-C(B zk#G61rAgbG)t+{S>U}@EXpwu(`3}TcNUzy;URMj~l1Yy5{GrUA+u8B{*)X@paI_Rb z-)~YIMp^WhNu}1+-bs6$oBBGQ&2=mEWnUfN_ij-#N0kFJ&F80q$KdfBXTQYG8_h(7 zUT$GP|B4SqoHDSA+H)ZmoODYy^(wSM|5{G{nwfH@olvsP!3YdFg(cBG3=_zViTE;! zx2!`pDc>tP)z(fnV45^rJ~X)nX7Tj)X!cblvC<_?h;~S)j!t{?&4(UE;*&t0E1C2N zNjnY8M3jYgV(I}NI?;FSI5oxjl-vu2#0IR2?%gXDsA~byPTli`e^=2Wve9+YLe{Ao zQ?vV1qQkEO+>L>i=A~S_2!i`vnBf;P|9XB`x5WEe$ zz%~fAUX#`!z_jU)knS@o*X-6U$lwdZx#MJ9by{X0d@PcOlB3_V4arNztCgkvO~eME z>4OzESv+cETudaZX_HLI{z6_po8kDL!YGYOhBdm6&(;!SAX4q{nc66`=FHdFe`Nwj z{KTGXnT|zdKIZ;~(qb|e-c9lS=^%cjrf23|tVOk&6a?md?*UfHqkwt_puDl^55TQ1 z11m+YE8fRNrn=Cu5aO`jq-HGIiA;aKba9c}D@*@qTt zcpr{!Cbg9F>!Q3GXMPN!K(p~hE{tVFZ#-ZUpiJBH!vuu(pCm!2qf#^Xvt81K4I_E7 zDs0Wm+%-d_zTPu(RXMpwf+-NwWk!Sw{u8*zZ9gJvXNbSrVyXmH*T^T40ZKJBz_O|; zy^jEWQDNR4?^^{pLZqEVs}(mMp3kevru4>J>JMaSNjPu{&TFiI1=V%!k)xiIg&f*k zyQCmU?=Ej>bMb-d*^#YnS^{=@6H@~91mJU;u{73RA?)*zlHPq++6ctJKRJZ7d^NJp>X{x-KA4~L8?n(blP-3gq@pu5W)>QUJa5;w{Jq|nQjG8u-}#MtLg9(u@znZ1 z!k>BB!;?!@Bt%ZGwF!+oukd*lzx@v6hyoGM=q#ge4_J8SwNyU548}6k>H@&-YDRXn zR}xw|04{~mcIl48C@$8}BHGO5p~n3bmJ<~N3!W}+1Cw1rWbY(yjl=Z14w!7_Lug&`5|HWM-=YqB}d0H9;PO_AFtDL=t%wp-k5o2Y6q z>p0$uFcvjCjQ}L524iyPF`+2BU6)VMt6N((TA$i3=)hO^b!X@k2x$5)%5MmIpu)&) zo~}X?D6oFEbUU=9s!XVW*jnTihT_GHgqmC&81WHevqd!}!UCjxQVe+^i{-IHlzq)O zxhNDiu)N8}1_fFz($@(8c`4V-o~dNZ!WwNk-%w@vrsBYFyQ$)h{r$%l&UNhurA(IB z4H7=`#j)X9%6I*&HRex{8PI*a!!7;F)}J}=5IKK`xHiWQ%bUqC;XOAz!_Xu4dY7H4 zKg&XNOlUwtevk+UNUQ>=TvHOY7%TjSNO{2D_Zp8><~>Tqrm;M>OFYe)<=x-z2nnq~ zM$p==L5hBhCKxSXC%%8KVE#Qb=lS7hoJjh=0pboelz}q~$TGFPaLmPa`?aG2P`l%_ z#uY>R91;M`=ydh)3K?lkQA4o$c*iWsDH?i=SjU;r`+gk)b9P?2foFrpwk3zY$(L~O z$>Q3k5o~ezqq|eiGS)_(B(UHk81lu0IEMIke>tjc7(&*o06qKD| z^|s*#XLw_GmXNzrQ0Sio->vZTy(8B590q;sH{A;bF>eb|=2mA7j%7gNh!-x#mPopm zKn9PW>H329Eb_Eo;vUfw3s&*H8%C&0)XUoEWUc8Gpvlk6F$j}c7=3^W=#50?ld&k^@J$C2pIP7pCQD3JyR;ee zN#53aco^TeBG8fnb2)%7fJd;A28z@W94*5`!JZr2S4cyUjOCe0gA172vVc%ECY&uZ zVm+FlsAmp*%vY$tT#?{HHoUK^p0EDUQXUV!`fwdj57eetE{2>bu!-e?? z!J_U2#H?_izTtr2$}F4+LG5;8aJ9lX=Lh(*n(1zHJzR*6{p_;Jx8_8ra4691n~`Q~ zIwn5{rzAu6zkUn3B*9(yoQI=$T|TClP3`euSDuHXX$-5qbmhE4%}E0szPhS# z*FL`y(Bhk#bTf%mSK0K%lf~67C+q=6~izoXR zqq@N#bPj*zQtG7__@4;` zgmF7Gv)Cp|9qNS-`S6oOhNR(mA>Ye?wI{pj*nWP&>23`d9Fl0!W)EVaAFGvCFhW?f zv=EYogIMg$KJQRWUY}jv$ea<c$MEi)H- zzjKf6zj~_gk{Tk882n~H{P5<%N{^*Ok9)CnXBJ=%EFDq6Y)AAz{^v*b7ls0$!VW^t z;qRLzP06~3&}_WJF|mg5?f`%1D8nUN=$CjUCk;idrF!H?;PLqu!r zPcZ9_Xg0w$Ym+jpMlC(+_IiUOS%H1;&}4oy2=?|zw>c0CC3 z8BZy2Ux`cv8|W8u7}ho5Hwp#E4MLAf$A`z`gQ+n`ric6=iGu1*2H~C#8YTRR)9||o z{J$yV=>My%czkkRzR(TN=l~vO z1U@@hr7%~Wd>;Ry;o&lmj%0k+MDi5hBQay2<>Xj<={V^_g}C8O{c(pKmND-roVW{z z-6exEU|9Xb18Y5CyM@s@ILu_0ONWIgD6k!xM<21bZn=I__;MQXl9Vx6 zD><-jQSZ1D&H@6fa^EV>a=SHd(*|wO2h2f@LMKlRI|WTPP{2?OzPW1QrZ%4<+R(|o zncMrbWT`?4=OM?8AU2nl(V!ECyVg^k$mU7#LHbc&qB=rz_s%n!B`Rafxwk;pEpv7{wuv2yL#n7p#9J9&U2+;e_;;iL_kPdd4e~Cn=|rI0YwwHlPV$ z0Q;1U9%ib0a(EJlHf?M%H4}^Uw&K}`*hZ18zUz$cLWULYYq3L45NA(m6dXf08ZA3R zl6ubfh^yYwB!4NXik)Gh+O-lo?jvj85j#%z!9-c%l5(Mp&}4^Vd3LsTKxD9;1;(*~ z4Px^vn_UYBKhO1GAdr5<7u`rgTOC0P=e=*>bkXv-la6gurUfB&W{RrpY}wV-gC_Xa zpiCxScmYW_Xq{RANIq8zAjlRKy9SYDiuNm5H)b6-N~MFnHC*2>a4221BEX$i{iL{Z z8-}QRH`A&PAh^kvPJO#(hjtGIQt?CKv00`04X>X{HHB#r$%U+HzogOk3YR)Tm)M(a zS+OG&BE5BAm!)5@@c1||L5g8HvBr8TTT(&h^soDD93wGh>94oyhp-xKl}S!0(LJ5dc*tp6*?Akb{MM~xzeEX|5g5r( zYW(-NP31fngAqI-$F;dP4~L8Md2hbk^CP0Okzrr(KiXHW>esV4Q^I8o+vYk7V8-T( z@xc1V)RZAQH_J3lYkz@IIRjRnC8$fVkx)=M)BWH$tE!W;(603K>!-q{0jBgR3UI$V zBd@8M8h`*h5u8R~hER)e&(s1Y-#(SmYMPjDR>~|spl@I|51`v25{iL0pknf@_xK}x zYhyatm}dEupdVPT``h^!2WGK~_XbXYbRf-bcO(4x@FZ%ORtX|Rp<8Hns|Tr?`5nsd4_1iK2_v`douYi zDc|A{kM0j*(mqC*q?zN5CiSwj%b#9Zg(098ss@>CQTpN5UNlnr^B7p{gP~-z#vPXE z*hfgD?=|ld7Q~})T%nZ)LLvBNbyi6#Sei}Z}#aF$SPl= zmdUJl`@}ne@nalA-%F2}O6L;e=~qEzpp7?NO8ffMfNYQaX8n%D(|mF1j*%xdrZ_LM zU5c$bw2KKIr}$H7<&}w>n(Kwf-C|HThIpsYl5F19T%$!_TIG@q3-rLg%oLa@A8e*W z`G7h(YM&=WOC#AJ1nN7C=geZJ&K;!C;>l#*A!+3Z*5ee+B~MIJJ#Q@3BW-4<)^V(0xM)F8V{gx=we#7xoForFN_bXBOWGJRfXdqsp>}0;4 z&yX2oz!NKgoQJ(!-5!L1;n_j#7Zr^ld5P~azkcl$npBM@GwsBd*~~?0DofkLD^o8T zGpv^lBLAQfqb}d@Y96xiTCr7k2YN4`>}&oXWlQ(yBbF&T#lh-JU@+sF{`ZBJfv)Mm z`5BfO6+u=Gtj_(9cw#y2s{tnc_=yUn2L9opJnjgSeCKtE@Hg?u95n)O7V7uZ)5LoN z>bYaO^E-`>fTsM8SY4T#L>$+6!pIT2D6fQV-f_fBv>_d8_tJL;dE_U2nu7e3ndGjl zki86C6WAOp<^XWM_dP~fT)$H4ZI8@E(~Dn-B{m4VviFc-zPR^;{fQOR4td(|Rvh_1 zj=6UqMB@mY^HtStmTF{oqmkQ$=cY!9)%ya_Gt9|C8NR5vm)g@A^D3lBlEy^9)aL^6 z8Hy=Vb7_+FLDw$<9Z*z(mUuf9ATGVeDePy%89T_T1ArMw*vT2^?c7p3KGPZ<8~zuv z&*Q;Io7QW`3S>WrAXPO?Lc%PKCg+VqC3}(8MSO8(^_)k|DdpY}8>z5=IEpZ6hs~nr z?$mh26KK!7ASF)Txz1m|7GvWTOe9oB>f*WU`+iMr=k@Yw0vk0npDrfl3%>Wa*Puc- zXoUvwyP;=e_ZoZ3C2(~dFl;iF5E$qglaizK$-V9_zF5}CQThSQIS2_`RzP)reyxUw zhoF9d6SqZ$AM{1l7D19b{tHqI7rR)Vw&DD9Ildci#kgi&+r{eITU0JCio~$w^pg4) zC4F5Fs4u>o)n%_hYag>8!44pPvYFW$Q@TN+K7LU2mf)~Kf?5LgFO1cPzlC zyl+jez@bi8#0VU{OI3ZJXzQ^mB}05=%IgKWpZ~Jn07)#Rvq7kmj1%A7D2QE?FdpZo zx72dT6a^~NV=L`}E!sDe%r#e;W+XV>8dal1t8v};x^0V7VA%)8gU~zxT;Q0){ISa7 zMz}%DgmNoPg}&lUNL9on6V_WSx>XGkCVef~P=CjsV&D5Qe(z7u{=_@Tl|@;)*#N=4 z3G@E?zJXpGBSb72AJzPe^L`W^m!v+1I`vz`5)XkMwoE`&>l#|nXyW2dcz&}HMFWGi_uNMURfMfk0 z?{gdBAYZ%CBd4y2V`m9>#0#0uokv!#U-&4; zLYtWj*a;*)l04IA&gRguag|9@1JQJ8`o?JGyby(!YrZQ-$D_njA+W(BZ`Sq(Q(mhn z9@yPK!+%~vgJz|W{4zYHrNwF|KhX@gvL5-XJG*{Mq!#ahSb{&K?cPN_LT@M!Q$hJh zdm_V<4p6!%D?^K$QcPrBNytsVllT>K7mhB?}>SneW;D#*~2;!eqQTQIxgw zwtB5Mo?mTRl?ELg+KXb)&}DP%0WPCT(Nh0{9TI8`#pz}%wI}vD=e>YstgiMLJHlw3 zQc&Tko=Vzr1(-uSHomJdPac*c(m*pB^cR`kZ7bx`aReHVzbPl&FUle5tsgI%%ZdpK zsx>+$MzUKh#RWfC<+KQS0U4eiR_Lr_@>vfN$($#9czHp)erP}uDWcOb?STGj#1_Y! z12r>)_}n=>>E|{9yVRkG@DDcgwcQwInz78x?+M9rf3vkuSTyHyMh?oO(i5C(Txj;& z&(4&$SYX!ggroYem9JL`a#8wgc+n3Yr$t&I0!p(1iL7pMy%T}a?g`WldZ#Hk2Diod>S&j;^x-s zYT~+4pbi0`t7mG;$=`109z#C#VAV^hMHuMlEqXLS<|xEJ+5k)y-^ zX#B?U8#I0HzY3npIzt^C;6^8{GYq}ooK)&@B1%|!+c>QyHr1mDmJ|NWt&_O3gDNC9 z!Vr7qjb-HQh9bHqv{LhJ6ubos{4IdI=m2n6#F^zGu$K0Al3nLi)l?A8z zKc>#HIk2|twy|y7w(WFm+qUhbV<#Qkwr$(CZJpfb!&}d9*i~!ptJYj&XnV+h?)-Ue z=5Ui!yKI~nabMC(=#5&A6x9E_OL{s_H!L&v)y#;7O$L(vKnwh&ns+q<;sjn+ieI{S zP^vbAY8)x6^aNo{ITt@^jGF2y3l}Vk+E(Xz{gh{lE4t}uEjsf?R-^$YwO;)CWwtdT zq5pPDd_=vB-1~1&rlcX8c!jqYLMC&~mM3<}?C72}2}9e(xcwhZcI^+`?e(zaP1TkY zjCZ+I_{h&y_ykz-2|Zc z#$lJ|)Q^UP?Txe~kS`<$eem;!8h#)7<3^xvgov#>3kwrEvQ7^JcCmS1UgZz$J%RUl zQNE=iv6M;Fo_lGRB2`sC-z$u3NZ-uS18KuzQIYwTzrxqx@2V6%xD3OQu|Ga`wa!GL zw8@5HmN0fBmCRv(MX3xyz`WDRNEAjIm^WbU*@W0y*7eQ6``U-m9R8t?xS&9E?wjLbD0<*w^9sZu$>QR{ZIb7I)g{^ z<~5~Q^#$i#{0%dgJU6#NESJXqpkfrcUgZ$ixD{2Mx>VT0+fD={swZ~{{}7U@36Dj- zj&mm?Fx1joPY!Z#ob_lG8#_=G%GwsoNw|U`ph2@mO;WQ=L$8B0F=p%Zz0aXF!5OV$gMUXC3uPiBF$|)`_qB9 zuq7c@-)3@1l1Gj?kdA{%$VjCqglOX$5VlTgp}($0HR zQvbpk)!Zz;?mTrWs5tnm8|q$13=!u|vYj%S6!5fSNT{F1jVi}vXgXO{2GeX2^E?9( zr%;{U!#A|Zq)pDozTIZ}YHbax8ILlhh=aB|0~9-ew>~!DVsTwNm8>Be^r#I#q1FM4 z#d>3y>1z+id7!SgL`mx|CIceHySx}B2n5aN#x1!EF2)L!0P~_j&79m1@}B_^zfi|Q z63xf_^*Y7Bhf1W_{1LlioA{je8s%4lK>Svhc;P9iqgctOqojK2Y#5@U?Gu)|lwE!l z&aQqwR>sHHbcvD4>mk4w_~e>scIBAv!*GeTZEJT;sM`)w$EAj&`rDaf(IeMu5^6G; zEcl4ATsA;h=kD%fRRQ|SPFLuC>ZldMbnHN+$CnriU$oz5q>|iTDnSb5X z;+ZM*5^4T0YZVk8fxeueQcdGO;BQ=pIy-Cph7|GtcOizGysaT^*3Sy~qXhgZyS)?E zZ1Vy<71C`ulGbN7qcLY#*%L?h{Sz4Dzt-%jRIWUaTHa$GMN>_{&uoxO)zT&S921~} zdQOgEQPX>Z;YCkYRj`(@4Pux69BVH$M+Lqk2+!advlkyB*{DU?t`C7`3upX+Y51p8 zkMRr3ev$Ki>lWA1@tMSGfa?GWP(Y6p3=x>=sqJAatd2ioq#a;E=uP$qbt?fwy%E~U z{F`CfLx-EM#^VCq7xe(yoZ`B3c-Nh(9f3-HO5?9@k(4LW2vRNi?%*S@8J&hVaVQa! z#RJ~lX~Tkk>{VxxdZX82zDGnwd8|)DC1h8C=iK+^O1tzO|A8fTfkAtkx7*686U;t3dZ@O-mZ#+9F* zqH0F8Qof3I_!#!(9Z=_>&yV0t3#1+5QiOjh*2^_rrI9y7Ag+T&%YZ1ScgBlZ zG538>iCIcbMyS@{`>cIMI;=3VVZrz|b>>FkGxJLYZ{Go#o4Pe&&1Jhpaf@aguH|7D zAQ3jM7D8X|JY!wKUMMK_AZp{hs{Nua&PjyML-umXT_Zv^i&Gj6W;nO8_&szg5bBXh zN3pwWYWJ>5B;A-whp0`AR(tt|x_;-04jDRTdHZ{*$bue|fbQbFir?{o?juzgNGB}< zg*JuCV-6WbgcP+stTPeCO`U2I8BD}ZqAEjlD~ZPE`I<8R^*mT;AGDEGd;&APi-q&<hH9r^`lCOfWuG94nf8;y4{t#<<-`-&6L9oq3sH!bPr$!ZM?$8sz zTNht4$7;n%ptcT#c(ld9roQlk5Dfv0JtRK*i>WqVyh|6dVMs?zZDEMMzRQ=D8G*Y<*Gdrzh+)a%Rj?l$ND(uuFa+ zE@Em>V08GU_}PU!L$cp0k(-5K6r3WHEkH2e>)Gj=< zx%AHQp&1sDaQ#_=4|S|P65hUeW>3JxH0XhetT&vRMAzlL!cnqj7)l-p5qU^}dD!kF z)iR2~2UIWvB`wn~$(k#bbAE7!Ki33Ny9O`yVODNoCAkb-%@Mk~z7jKMp3z?~73kuW zt-*4Gaw9`APv{YU^400~EzbaX*qtu^2mZ0lJYXSO_`63V0n)F!;Nb-Pqe)pcTNP40 z(EeehWPGZ^Il={>$T#sMa11y+yFk5HLjxC3qO=9jD^Q*tup-c&PVZU`;;0*;;+ln6h!RN zp{wb<^al4Ucl4fq zh^lKi&8_uJ&EUf^%-wb#Ko3waJ=knaq(-YTYCH!7(kBw#0|V94#xVijJ4>kGzOvzhkRY@;G^$Ir2)xf()DK$IXC0O zu)P5ag7-Fd`r6yolH_DC$rAU}#4BV-HsGq#SvK33m^;>}>a#fMyigVH4b&p*P*02j zcA53Fl+9@`gHu!^A<)qYPX`RAG9*Van+IHJP#C{RvSjrK(AVYrGZp-`w@fmzMPt05 z^BVLPlv4(@xzFNwjj0(k_R>UkM z#Ir%nQ(Iiqi94=*-`M5dA9}kQBBGqmPVd1|SWuUVThk&@tSEACX$>Oa zVlajFADQJaZ>9-0nR$P!l8=~R5%IUfU%Ho+<3>G<L4*{5X=_x9fMGil@QUu?4PrWub(3i9YV<2RDg2;37g00ECShj$DIM!d736 ziznIb!&W?rd;GC^|lA+ux@=G}&u-+gd)zQjpvp+X! zXnHZ6%7ku|3}zk8ptERiUela#D%Tm1o;xXu4`M()yyp_Wa;Lg%=Vq!A7vY|JTBKV-hoTkfnL6nzK>MAk+-yKeq$ zAGS6tKGZ|i=$@g))pX5{U`7kHRmFHwi82Pg4rC@(+^FeXEY&aiepn#r9IO&1;Q~tU z8S0d}0pZ*7F3s!M6I$(t^{MwkQ4Vy3ma3Zzo>Elk457@|hEQ#}Fo`q;rlthx&6d?< zI;rZiV(xpF@}L>Ap}3ukW2uEgz^>o%M*5+`S32-P6j)E_qEjq&JPhtckhxq%PbbaM zrEPB~L=sI&#D)YvJhq9>TAFyRcukSiF^edgH|K4wVI*^<**N~iJaWRttYn^QOng%5 z_7sXikQ6mx#IEd-oBXL}-L~WodH8f@6q|Xa|ZP!I)M-s*kF~S%*`NYO)5pP$ug`0ldQK=4Rl<(Q}IJ zgAIcn6d>VG*GY7ZEoxpsN*Q4n=7`JhBOQ+^z@Y*>DAp~r{F(&7;s5ph+0nHD*H%u9 zbuML!Tv7?WmvN%2Wl+Nn*_b3BWz?kMKin*;Tcn#rt&S3}hZ23QwV6iN6kR#2F$n$q z*Y)eKd81;h^Zw`izD^t_wx0YI*KR}*anK(9!yI;F3&Vh|!v3^lui9X>;T2Z8Bo@yfsptk z>-*2+RHB_uaDNU9H(gNLG*pK&dp>Vg!i6#;AvtOHD#n3LIjosUzn-<%oTcMe?la{w ztlOdZiscs*1_(R;Jn(XG5AjXkNyOoHiKiHwY84ol1mVh~M7TzA4?R`=#KZ_yP(eQO zVEfdXwh)EZye|*VEBg?A^vcoC>QneOK2%?L#dG&<)Ci>k6|_+QYJ6Glr~%|h)==he zK#3Cq^mquIRfZPtqq95SHicJGYkc|!1WrkxpOv(f=ty=@9yN?gadBHZjGj>Oh`4;| zSmP2X>@V|W<_nSP6=v2EUVITSYTnk+0HffZcuCvWlT4^Cghy}Xh?0-FXuXkhm0Z<2 zrA#{WGf;oJgT-G<6Zmu(`4VoEmpGNY7|@Azj4t^o(u3FPN)|LI=XNtuG zDP#7ralPE4aluk#Og@d|F>Ry+5mu?+R1W@6`sr8ad;$IB5?6NV~k8Z_s;OcSvk4vr6v(zH3gOl4$;|fUaRbc@KyPkP$)OGZLo& zy>-Ok6TpzgFhk{7{JODFnjCzL8@PeJm%qn9{Q?#qsI3&*iSgb`)MsTH9ti zoIMIFciihQ-uEf3?px3=Bc)`9E7yyy|Jp;^z4f(m>H}0&&H_WUJ=bJ1V@%y!&1t`6 z2T(ZO_D$gZ)5t5;Bm-L5#Z&)NpO*yb+rH1$r~BVRa8sv=jXK1_0@7we)n;0LJ&Z++ z^$0P$mXb8(Nw%zW8U3rOI9c5&RGFDQfJD}+(v}tg#hJ8&rO=u;X)GH=7f1>^`{ld) zQ*i};aTLUITln^8stIyPxV1TViY;P5{-}Y6OY`z$KfRrqW>F-#nn5W19SCch3aU|4 z;a{bc)J;Bro3Rk6tM)=@tX0yc!lk|bM4}lZqhIACgnd-(sU(WAy z7Wsc6)!T; zz46WvbZe`mU97jfy=qCLFsz~IxRfxCgzfhuM2=suVNw^T8ZMJ_ei zZez9y=L8;VjoyiL-=EDcTl6EuQaoZRDb=C9VUFEo=QQbAWriIi_|oX~Y3mr!!ozX^ z2ztxT!F+~S=*^n0{kfyQHblEPFBS^P1v7BZ>0>H#Szlo6yuGi=DB`;tNUW-vXng4^ zPhOpp@_*doJ;J$f9wpVJj~_rUlFG_Vjc34)U9f&{3O(;nr=>qa`Ln{?I2tT;SWHfv zMeuI>-CCa>JFa30uapN}8ZfEmy<2F~rIydNdl>{J^P2I!a3dRz$8C89cX9M#iSV zs`7$41h?r$qJt~nL080(AL>Ac-l7WX9FUmkxM?Sz z_A5M)69IOV(aO^s)1()JwQ{P`iA&MwUX01FfSh6nH$+MAA>n~vAi{i(kk5b}=#=%L zrSCQwY|hpqJ+Ls|@k%#P0wA9 z2}L4y;jMSm@$v&jPLSs_GZ!zy$OzNM4Q>_-PY}%EIfk`xk=KvfSoc6KPc24dixniv z+HnY?v2V1MjW43)sKRXoW-hA2I``hX0-7*I>Me4&f$&YuY*nJXRnbiZq*4-2k z18utsmu~U~^7~jv)cc2Z?dH=MI7tRXW!u0ba96Kh#kg6JS7Pu;SY_anrgc4eCxKro^zY4sJMt zaLtcyUp00;4gnj}O`LBuB>D2r{$vO}trv#ew0izQWqab{G&0Zi=~wX+Kr0bhlDfi%oue{l<@T2WmgL+Mks!?*C3QsQy!k zs?ne3hImiot~3gTDME0$4Od0=T}-hJ?|J!Sj>++V(~5{etOL=2K?cX|!55ks{!L!$ zNmsxKN%_%0uv7Oo=YMp8icQpB>)XN~qx1CS{3lX_>4EV#)s_MPynHE&p43A<`x}k< zfz|`o&bXMCGq8j(wXsQ))OOs>;-tlxomN5C7(cYsdgxi^Nuj0w^DMRuZjGGtz`!FK zD93WS#N7_)Es_gA(fK6xaXJDDIpz32~ z;p&5)6G;GGizE-hq57?>68{DXc8K(MmT5VV>AEv7D$Sa(6X78q<6O&u1Ey^m9%^u8 z%0W6z} z$=rr8a8B6#{%mz+a3AvmXcpY~W59Glq2Zohc&?YaMDsK>+_%eMed`%(i`57_x2Ula zr^1%i@^(3c7h};QQyWc`H1W0zyrUu&R4jff{Te8ES-__bIlEC};pe;P2JVKN|H@FC zV(LR2!uSJAFC_}aI&Qmc9`&yQm=-2rau5l1#*@#2v@V2Q2CMwgm9Dz+nF{P=`R<+(qc@GQpVxA=$CrtM zXmn_Ps-TZ@60-kKUjDycGG3W@UqGLKkNpwFDv#BNOJ$KOZyJyb`V>4ys7)0`6WxS| z!=GK#?x%z!9US|Z@PC~@DpV0f%Tb$Ur>=&0(D^DUi#s>Bdy53NxS4YWoE!S$*~@XGk;@C(Z;E02Pw26PbwN)W&9k9 z9^WPo*ch-E6G3*MjL71DU4Cx3MWEHI5YIJE?t0SaHs)maQq z5ej28A2lUI!dq_(8I}=%*Y_TM7M;3dwX3=2J2Lp|+N}Ih+k7(`oas2>Y@fQdy$vkw zcE1J68rKbbt|Z;%^O10{p$e}HBCG3lh!`aT)&4jCe41b7y{7q(*ZmkfH}jCkj6^$L z@ao^q1T%X#fVqoHwv+_h3J-0eXUp>`OyQ&7Y_Q~~!K*2`X31O1yw1Dmr;>y)qyOsj z2|^n92)+bmj)qH#L7s0}^edEEHUK+iufHI$Wt_G6X&IO8l1FsiQM0hfjeiS-dZ@Cg zHJY>-aFp-MZ(k@}?&if*rBFlC=PCA9vMuFkx6Q7EHYyln3Fj^~Li6-U(lYhD8|G=m zPzMSg4BCb?e0OzNFv?{&wV;@E-izwT&8wF$*F5RaHclW-T(mNtXxu7g`}}y(J&tgP z7D{uEMBBXhR8!6k2ZS@Q45rcVvFnGdni00my`U-L0zs*t$SM~1&p07WJ`KQGv7WJD z${#{?J3Ygn9~Y@O;Xt`f}%;9UQ8t?RpU)r9GUg51c< zzu6CWEEE#8UpoOX!B>WGRXqwzRpp##Rx`&Xc)r8cu1wk!t5Ph;!pBe8;j$ih^2UGQ zSQ7}p<)cz-f%uTfVpcu*vPUCXnElhFVMbcT!dF8m+G?H+qo4vaw&vL`Gc6OZN^87Z zZT^V7gaGmF2z&`1`BEmpGiq2pH2niiRA%2qoCN$9D4zo>M$6bDP2(qt6z#e(B{(UD z%l5#4meXnFhf8Aj#jjCrL+Mw9_xej4^qyH61sSwDno-*FGOKAX`}tii~@@`K%TXI&6jBe5QQcbt=5p-ie|6Gar-~~q#tHYfflg%z)rHd z@~bfCF!3}}a)f}VQ9>%cQH~a(=KGIbP&(u9-^lYRg}TB%?6Evs4H=Gr6oql73Dfc@ z{4~1R_Eyu8lOgsPPIfNS#g)%Dy&XRslSV`L=;`t95RT%9TE+@L!>O*rF!J|b($rb#IAVom4hCi7zLV|*@e%EZJhkkUZ!B)c*jJ%LjKHBFA-eUrOZvJS15n*(Ug%>8zX&KTsjC*(a8J>6_hEUi3u_S1(8= zh!6d;fotFb!BRjCyPcREPB{974z&6o(fEGU_K4!F84B z!Ab3844Z!YMcJn)9KuL3O z;inrx4V$e}79R#H z5~SFWAZ-?6Q5&$S_Q2%UK*moNIDiz?K8a*I`f2QTp?r0II61dVM+;~-u5uqaxvt=Z zx7pP6jak>MS=Vu&8NrUmPH9+-8-7T4jM-le!v=+lC`HS;N{J5|-<}v!nmN^=lzNC& zV9Vk)xp9dLtNHHuzOTW~V%~x&FE0PGoi5W)u5=fM*v;hgeCWBMc~bNJuD;731?}$Z znIwwe=6vUgh!CwkwNcvn6hgMC_wx!|TGgH1O0?|mTMDZCyn1)U1|2QSmY z0CAPSyFXIykKj1GSOA~+^$E9>%d|Z3IrIawk<@h6X;@QmgF4Sg4@z3n+bptEYBw!f z`_&w#>?Y@y{RXlkeREWbZdH#}ODOz4&{Q;)cazAQwKJ?3ODcKb_6X*-{cV7A*+t_b zKtGR%-44u|HlJMpt5?$x{c%X6xK%+_DOpghm z-42BqXR_|sdcTZVXWue=PNaLWEaZa6ipjNn6;?P#ivDA+6dMeP;*wl!y`e6_s>Ak$ zHS=izHISPyv1`GyfMYEex@qc3?#ON4hR-4BqWWI@Af4ZBk2g_RF zjy8NEx7~lQjgKMPtNslRg8$5zzbC5v@`DgMlBzO` zWt=F2+ktV$(9nPS2$NG&mWLQ+0FBj>gP5FnRpoxz#fQxKG44TP!QV7}$|p|{T*0cI z&O11$XDOw#`HAq9(osDejKs9`3Rx;trcN=>lnR=4EMWo)ocdx;Dvxuq09e?S*M6ep zs2fTUz`+p+5E?6(BLM#9u?2@1S^P6L{dmCcViYSFOoS7&PIq`ih9EyO9@%;XEB~eMXx* zpdR0J!&w$R#MKC*yYD&lawQ`;7&uxEw>;T9~dW_!psl}%Uu7%DOP;sR4XUJTa zPzP{+v~2q+er)KX`%M!1|1(wc`c<9Gdkc;vuk~(197+X`Y2G_il#k&MalqL{co6jO z#d6xwDXR@a&l+?T&&dVGb(dP%T3!m7za)MHAT1BWf`j%pay)U&QAcRw=u27|+&Zt? zJv3QN6W?ONZGQxz8yz%C0EuwZ8-pxUm*RangMAx_iDNYo=x+o!plBB6I9G=Vjvd!3 zSZ$VuRFpq8yX?3p`+`$cfb$*BmC9$Gwj7P+cUX8T{7LAFBoalNt|im2i~1oW1wcXI z_J8mSem^+kFTquBM9g?rqbLzpxS9C~m~*xf98oo7$%s-)YMtb z1KhA)+38!o2}1NNOG7q2X8*`3yv^%vcz44eaE!qBo7XWl#by9BCxm5^Db{{Bz#E0 zu){5QUXHm(@;;s>@!z5ZGhVm}{uV$muG5a+9JQc73iAy=p-&(7TDC}b@qoT=I^tqg z(p~<%_FpyEr?}82-Te)4ZY)5#09x=?k`%L7ybasYpvACl2=epQj%HB1@qtwzSYnpr z`GZ@~R_y!AZ0YCXMS-tH*&gXfbvu2^Vn6Esg9LC`EVS-8)#5rx_uO@z>>=8H2be4# zC(rQqn7+RWW*o3TbA(6N*H;n1&Q!tj?d0MPN>S{3+{;n}fXxyat#oa4g5Zleet-KD zL|uOr0MU58VeL&q^QltXIR3Eet(DkQK+CU+!Kg@Fso+s{>L1E!=X1rlurvvxxs9He ztQJ$=QrVR&$Pa%K7q$-MI{U-qJ$ALI#Zx_zAv90oFEMOE%V*WSWi`m_vKh{xlwCOc zPz+n2K3p7n=Z)C5fLi@3rngbxh9CKvs=Tdo*LkQMF4MiDDGx(cgdPHG36(D+UQK#P z#oc0~&}{Crqz#KW3#DBjzu<@yCusq;0oEu!BtjwX0E3hUWhL!n{b8Nw&;AM6hD@F% z*6{!_XR!gx906bpQUZ>()^RZ&({om8fgC!L7Icw`3ZVxgfS?#`&2s_ z9_^Mj{V+!(TG6M?M3%D-I<_D0jY<*mwI2dj!}c(a)_*+4i@=G>$6#CImW$|ul1jMd zimUX82gINrj%?*w>+y#+hY6~nWChybL2F6pZogLoM^7BNc=7>!0OLm?7#O&u33F zf|SUtRpIHP9GYi=FQ2lk2~^v9sphfz9N@M7>k$?HO4aOrG%~L(;H-IP!-U;LYcX)I z&(1yZby92-qnyPq8EV?qD-J>wQvr9xW$gJE?W+Qkm%;^=-VSV`-WK2=ta+yHm#bnB zaModbU@Od3umuV?{+>M` z?x+^*5JaSsQ+n^}42q^`JQ?cS(TuzJHlkt_x!m@$lCvj_(R2zpo6K0OQ24=0bFpUA zez(5})5{ZVi%jeuk-%K{iXZQ#ErGvz3VuD#i9J2ckZg)!UMaJ$Zs{(m4K^4E8S&#c z(SH%d36sKBRNNpAH&0A6=Sd)2U@B{;QfQBu#>dG$B?0i+7^1z$#0ywN4Sv1mpA!=% zg~9TdpG*QHzQ*j*_`p+=KeZWN6yUKCI<9g-sbVfS@G6BJ0Wd$?RO-`W%T7Q z#aLUpXKQN^ExFcbEPsqM`bQ_O!R(Cn3eJ_2R~w!XLiWrrcQ6-j-vQr$aL}#1gklXX zy#fG$Wc;6ik^g7r%P`OsUP$8;hU+SY1aq98DbwyD&eIrUmFE6i1py(FLRD}5hyPF) z@ru^KzY03ouV#36?9UVqmB&Qda%c3>2w7(HC?FlE0o0XsImFqXIznjNXlAW?o?yj? zi0pB`W4vdnd|Hy_9)G^ZeX&8CR!OM@Pcrq4oB=n~Imr-1h8SzwslgT5%vB8|wy+64 z@n?Y>5S?gC%%)sPm5L>a-KB!d7KFIc z!Ms^2U5i#nF<+2z;Y~w+qIukVzZyNS`q5*}7vkt0D2{FjpKYIaV%6cV^!1;`CvP4K z9q>-4o8!_RCtg!lsL`;k{3_sC(ou@7m?~x!6S+=(Vq9}E{u~qK`V9c#A?T-+yuJ4R zN%E*MXlbpjfAM%86EeEKw;CfI{%hG#7lh01;W0Lw8it3?NNxx1jSDVK645iuv6EXM zY$r?cAn*){ivFTr<;N#Z&W(+9V6=bWUoz$;*)Lp-Nf63yXRqWmMjlW|MRU3K;}d(_ zM#uU*V(+?Al4pEy2!?+iQn--W7-ut3Q`5ikLB?)(9d9fcp&Jwg(|$6d#4FjWZf51` zgt@Z14W42OSJvbIN}1p6C|RxI@@!`#8H($lmws zSHZwot{6Fau#uJ=kPz7;Lo&^q1!%Fh|gu(-` zo9Dbv9z^aJFGC^8mR|2wkUp$^XuWf*^FlWISAB>{CabH`Jqivxt!?g&@sGmf)~q`o za<0fS=fg}ET4v;x(#f9NYz{nKm4j~(+b@%A3%j_{X064cvi&g+qB{`prcwzaG@~#2 z!+g{=QUJ-cMs?-aQL||~aPPJZAWX-+n5SsE*v%?g_X{;$19?;Vm7HqnJz@$UVPT(0 zw#n|bxT+QAHhjx;@U{|@7gz)Ye0%>30xjQZ@syQh;^^Yo!Ccw483zB|-dLNV$`|!3 zZOGGFqTL-8#~0xXICk$HIX%nTCAzOAK2PY@F;0@M7|Qt?0K>t*kAnJWE2Ce`u7q$X z=vuM6Tazg<^UXe%E+ZJ1o!vZ!G|o0y{L&>*-=OL`i0A?GGDoiYFous}w;oEqms8n8 zII0v*>J4{TP7PE@TlMv62w!nY-#6Eeh0tWk>!u(sG*oCoLD162Ai}GlgtPIUGY@T_>RWL<&p~Fe%_k@gW7wiH~^=tiqM>fEJB0V)iJ>~R_!!|yt zyK4z!St|S?sv*Ul?)Z=l_O(-egI>cY!i+vx_T-2Lk@K&88LJ>gHjn{zQfY;a+4Xw> z$b*d6H+rU;0Yw_fQMlgKt4zh{s7#p7NS}3>=ywFJ@EAsIV zg=RV2EMpE09^AJmN5wAqLo_c|QHId=ra07n#?>WroI9cGdrxqzEWJj&nH*G6*&;$( zmni*sj5SxZ>2HKq6}=5Wc#X87@4gi~2fPgw;xzFI-&M$JNQQW<| zI9-bS_+T5t9c>>aa=JJe*G61b#LGarpzJNFSM{QyYM*aa z$n7!3h~Tt9#Cjr8HTHW%Y!<5W+39oytfSpi;Ipsg#8iXAtPz;zE_Tr4swQBp!Xa?Q z8?!B@Z8kbp*JE@{jU9Msjpp^So?+`}#mMt34JA;*&`suLzsZ2Bmddp7d`qv1oK6_YZSqsO>|e>Q?pjWDwSGvQtX7)7jqYYMBZ3O`K*l(B}3= zKvY$mup#OinfluKw>^j!mYO+O#+%m$DSBNaX<)`ZUhbK?5oT9NB zFui6ZYlGiL6c*OItZtg%bi zq6DvSACO%Xw21GkeTfwC8JD(qp?35#ak|PBiHtuL=Ss*gz4^Fc*!@^pXCbzb;nuJU zQJt6pq#Pj|T52&UL`X67rqSb`Tj?nA2)C^};d=_BOD;bfPt_sW^Tj-lhqyP7q<5_? z4|BEQb}@2Z1gBe6K!{%j+60&bHglR-3!lT0BL+G1L32J#47oA_xvlU^vMii8p2t>~ zb_wmw2s??1+dL?|+3qrS&CIR^({L^f9+_KSI*w$nmrND ze(cl?_nD;cs>M7mBQw@jLFg{u7X?&2r?w zX{G8HPiBz$=g5;B&)GupPSzt;Umw-tnKGS;=|DtX^zO|Wyy$-6lpG4$ zJjCUO@bisKzw#Z#?r>2h+SUJzX$LE=q6i{Acd4t?ym)ZgX6XsD>!C!lt5~I{tQ_|r zUGeeK+;AtebnnWyXh7K^yGCN<)Gn-81l0HR0$ZrflZG11Oq=h5PFc_yfMK^ZdH8g&h5zFU z+i949H)=-OUXB!4jPo9;F6jAJ!aaT>LL4kA7K@pOG^2aE8)>n$_Km&wJn8{+2+yOa zF9V?zgXhGrG}iRGV=y4o!Wkc7xysAg$ZU|h)vYF4aIbfoA|MO9segH?cK(>gCVk+= z*9N6?ZARc>4~1IAnA$>>HQ#=)$m#cLpH z5`ZXL_+}Q{Kvr}yS*g{rbZmhctFZP54}-43X&s5#Q91oq-@)QRy2%uE2>wj zspTJNWZ4E$+}2uL4uw8?SDt?#0%0I$R|eI&SG2UWrkONhM( zYx6TOsRFScVJbPs-Db;}J|qh|IM5tC-GJ(Mque2G?}T-1_w;u)VReLwDLaKwaEtw= zNNxUzJhRXrNshX3tqrZsLN6Z<|@v2=|9IJgY=+)Y^ zpAA|D@(SRIEcQ&0AU_vb?BfoyC?wq?xF4Jokr3s*v4&{ zRW}f^1)^y`drsuL4PZ*Z-ZBpDWdhEQ6pBJIb&7M3N`7&RXNRAXGS+Iz(y!M1dZtME zmf#93KyGdPZG%32JVNZR+$N}unriz~q7HLSpj83wTxzy+B z3l-ESywL#lOs0MF!a?l2|JK{lY5i!nNY^)4=-yj!m>wzrgYGeabJJ>wvFn~=P+&(B z?Y}5*bC$V~r^I7U7K!Fh?SMNQHXIHU6CTOK;`q^I73kN1J19|7hm$tI!U+OO0o`% zV?%)k_?4hV&=nM-axc-OV~o;8w-v5GjEePAoFr4J&if>nz#<3$0Py^u@kwoO50I#S ze@oejdHzb4g`rvoc?RX*3oeJ6Zf|*G&r1>6Ttoy$9-N&q`z(U5qL<)Vamt`lykV#S zdn^>3$v0cUdLoH4$E#yNI?L`4(LhcfTImZ|wM1vvwAU6ZR-Q*+0?h!v3Y$Adp)E&{ z#!q0S09R41qm*F;6RNwQ)JdR;a&I}!5<&1{Yn=JnFaV`Q-oKOMkP0Wq81SLyLjFyZ z$8iHe$C$hAX@CeERg#jvI9fV!*Sb+{oaFtkLakO|ttBM6zprh-|C1;2|Li>6FMgQ@@0e_UKis{E z1n&}R+i5wQ`p7M@rlfL?5x}lMBGX&g%$zPy9MUb>vAiO&{L?siDF^sRJ`PPT@vj`q zp`GUO1;=X;ah;~#uzSdsNbRB8IekR4Yq!v#0t6wcxJ9bZUKP-u&YXKajqmeF!7IQG zoD?S6k_rh_c-tx3Wz@An{+@Fx9*v-}@gq{~4s7pK6@e7I7Rv>9o8Xu-fVLbv3pYzq zq7IVKBotP;rHt4L{JzFOfZR2 zJQ7oTs`iU|aU7S&GoF45=~(Rl)K**d_V;L}s65qyX^2&&2z9@Rg+>UbwzEkOjp$l+ ze3;Jb8{DFL{>63xNmbn)1P^y7m)u+wlpYOE8Tuk|9K#*DPxve$5;=;-Wg2{2fGW%- zQq3Zh+9n#*MF|tN=Zh5DFiZ>Z&sSU40-6`cSZTLXqucMXdN)uzr)L_bZMU**kgT0&iXt{TLL1OJPO2`Qyke?l z&rPg}IzBXVcnjMcUyahUT3L)^^-?Ikh$e)Gv*;8Cd598kczJ30`BQRC05ovl(Io?t zy;PT&bTrEGl*6FSZCryd9)my?!exd<_&kH7^S>-%zcG?m3vN)$Au3 zs$MN|3veS$zAW7Y%M!mspfKkHPR9Hmj0TID&&Wgpf??_@%?A}bDphI8*EVck{?_(G zTxjL!mfXQ@sJOcvHxi=FddZ`v(dkaTOxO``5qXF@&J4xRS3d?y^0W|dkaGxzRoPPl ze%RW@&0r;1(pwSTOe(@zPa9Vmbp2KV2l(wXnY{E;Fc>1^ZE7{YT+ss(VwZhIY7rGWJ#g!pgNKoW=T)c<4Z9J>SC zx^5fWwo|cDv2CYf+qP}nwr$(CZCf|z`S7;;A9fpS?YZXYz2Hg=qwV%79@!-!`)Uzj z{_D2TjK#Thp0T!$fr7U)f@({o!&f)3qQ`-S4oHt5cfhRo{rCxQtH3b=ov`O{q=*@h z@--2uWV5QeqoHC7V{+vqKY8v}#D?Q(6J$A=ow7L6M6amUa?pX>1!9QQtQO(I_(^M` zCkA2I%Hon5-Msl48K5Cf&*_z;ccv=%9e6v@C*p{d=Yx>j?;{g7hmSrJvWBq$oeA&ZHM zSBli^8DyybQQ4T!BCmtp6^tXsl=V`h?!O@@`2XBHL|z%81LGT9fhk{G5gY)Ydv(0H zMRwl-efmi@c>7`4=@@_Sm^U@4#pU^yo`h{jTE@R=YKXRJ?fd9X=5cF=D7_K(O)Zyg zFNn{m$=xJGTZa%VT3_p-Yq*7T#8+hU>j8!(ydN zJJ)zFsY!#7FRjd}$l519h||9v8;){S6h2-+9lNsh{!yju$YQ8v0#l@6lJ2`eEW5e^ zm2N-dd~;bKTZpAFU8)k5FUJ-Kb}G;|%GRh}e1_$vJ6F;TS3MP^2kh5&#w-scPciSH z6%xIs6>P3)&Wl@9{CUKsulh7{5X+JyOHA{P zr`}cc+C3N1-YjeMqehPl2&{Yl0)asJYc7QK(i4I`^>*rNZUmzQ$>~^Ib?{L+av6lN zI#=W_nFqknQ+{Kg45luhD}xff>P~!jA1t23DRzy29%CjZ%@v}uRWj-+#xwRUd3{n* zcDyRoi7_^g?{~A`mhbLLBZK&U-WEK2CpI&#VSt@xvUXK$hi=>S{cCYcP9DfkCJ$c(5<{kQdUY+3GkXuNH1;je#RM|D>;~JYrW4H|Gl%P|!Mkl_3J%`;(fHBdHA> zsF)ECl(NfBD5x~h>ut6^Z!(tnwXu)>w+H#?-QzJ;r~(lbP7pWO=S35^0OXci7{EQy z%)4&s-|XlFXllaBV8{9W6uoo~fyW1?8(Cct*pB9{T&%v4N`1 z31C(U#IBj)M6l;*!tyc!8d+sQ1Sk(Ni&{n&stnjFBXrAUeKGca7}n+8gDKn){Y>1zLkdrTs_a~ zU}N0HPz(gR>N304*a7R4&}ipxO0JPEf0+m_&4n2(dw!-&lxD7?57y#!G5o1h)tm zSx*FxjYgrdB0cIeKg+63q+0F(t1d6`ZlgVw*jxb2T5EpT&r(A&<~Wqq?ZGIZ1dERE zw30*e-hV_sFtH@7(xh^o;&j}y_W&*oCzH~l3}B0yI?epPyy*HhDmz4?-njJc!*o-) z{n<+*(Qz2T)~lleJL@=f=8a2AHZr*+NmBaGL1NUXN*nT9P}ny)Vq{$~pykS)IXXHb zRGuylnL;k}Q8h<9oMTJ&)qf>CD#}X%ozWkL9z3cDPaN=Ym@lxQg+L*(b~#Szw>cj} zR)Xk&dS-h-|E0PepMnBik>ANElQtoNGnE%n<|s>iu!$u|39;8hx>h5^ZL^MkGU&Xt zo0U2`5z|KQA7MRlN9w_+>GS>(;tEhhK6nz1{Fs7#ntegnf@Q;<)G}qvO7K5RBer8% z3BAN0Ix)D%*KS#}Z%zc1Fp+_Z;ZynYol410*i4mF>q4E%oQpR_N&7cY&$eqpuEmr; z5C|}-0Ve3ez80HysCY*-5z9S$KDX&btIafi{VOd~^%i+i(Qy<*rMQQ@=MZ9{J$xw4 zFck&`wI4z2Z$l=ymu|_nRbir`T(ub8t=T|x86^7!^E*QYDgQsq3-6DOe4P^8YUEE{(Fxcn zU5ad>6xPVqqs2U}w}@w}v#)(~na}`XH!0soVS~JCk9q^c+Eec7jsZc&R;_rK><5E0 z-r~)QwETeNL+uJHd;q9J^S#7f11|;I6TXQz>~YXh$oWs!9UKc0 ze8S0}pWtTQ0c4okzzRrSX<(AD916}rnIz16LP^>QCb*qN)rrRLig>zZ!#@iEx5@GC zt-x|djSmWd_WCpdyi=p|fBY<5tOy6ms{=0Q#dU!y{?-xdTA+5(1tB0Gbs-&H z25#Qc#6O6d`|RrK5uj+O)u(4+Q2c{L5ElQq><^8Fx2`K@&S}Y5Z6^&**ScTqVld_P z1&#D%w-ckhoN9BH0V)zp-tsFsd1sFvfWC&1eh&`Ft1{dIs?r?L@i3ef98_?+CmA~L z`y#ebFU??S^xDwigP(Xd;xUiQ2XuSIhS<0x>6`37Ast!kb!VoSd1I+8DxnQG7bgkw zDxp7vBD-?c&9~u8d+++4_65-A37()Lv@)cd4ok=nfKyoUf z12@OAXy#7PcoXtRC=ym@$M_w9jDUk$fV zt%M@^%!dq<)WKspfUehcI5^?$wolpf*8EeSwh}m(;JDIEMfQ(?i12dZ*45@vrd%>3 z{_`{u&!-JMtP#3fJs}J%NV1>$AYhIJ7pjlH&^hAK_wf6p+&@y7wb2R8U3M7p^)ctd zIAtD`KBJlW8CGwD1}Tzo%7iu3JDgis`Y8Q797RMzH;EkhV9 z=f5We*K8InyRrCcx>_mkvPy{Y6~QS0kr0vUqSwMT*(I{%mHFJXE^1z(J0wM^5j26~ zpKk=4xywJ1eZ#pAT8|~8REvw3n%5H1r`qi(vf+s$Sd~y~f!W|7S4o_n`Gw(DVUg*+ z&*??`qX>{aR5dwi0L}z}3nO>VY&8c~D?ZI${<9;fRc?A-ARTD#XVfp19}60!;d_`{Sp zbf*Qp`!&Y(=V+$r(z8)e9WOSrkPv zQ7pIg(9sE6SsXRYn?N1r4r4@(Wae%TCI)?r_#Vwu8~QF>Bq!9sR9z;YVxiq77(Jv3 z&()wKBfEpV28d<@L$ILyprjIYX*ne#wkuBHr8(t|2n0zY+_(P~XrT<1Vt1ev#nb!P zgoT#EX{fZjbYS2AkRiP!1CPM@En=A>SZLAz2E5{b!W5s%xA7Mx+aic;i#KsVte$m4 zDUV+HfQ!SXLk16y?c7cfWxCDx-LE{#=M)Ty&Ut~lCvST-)XIYPj%7s8Em! zkeGkI%0}q8(#f<>n2L;v@WzHUpzb?5;>q`k1&uT7c)Mgr1>8$*iSCGjykA6;f=1x?lMVa{88hCYNUf0|0z^F^3{WYnT|<4EE_a*&*tau@sq2GJ z5@AG!MUtCrYpx#vjc~<%z4T|mFDWk2!|L+cQ7zM4E7fr;(i`+bTn};ctCYZ!3P(hH zbkmH-|L*&+{OM=^Lp=cX82?>MYl)775=CuA)|rKCF2lqG*(C~#39gb`#n+5C??q^x zdP~7t(UI7*1=<6(V&mD755auT{se$)W(65U4B;RPZ6zq^Qz08QNU6Asbq`=J6#L@_ zA$ox{M}pF<%qRy-#hg7#J(f?S7*p8Yc2MTjz39>5Znkc8Ok`~5O7|K2?XRkMG#O93 za#qs#5&+Y0aD~JvTA_trKD~*g9P|h1`_JCQiQ!igBw8;VGS^mh1WqF2--=cv=;S^|{} z0(O2X;4A~)`@yZd%*$fneSg%y5Nl%QbnUvSnpdoiT?53Z5o+|>ch^z@sV}=MxPs)W zz<)Fyh+3T^Y|WNrcLbaI`=XoOrQ|(^TNzKgle!Qe-2iMi7dCY=8JG~s9aF~t21C)L z6e?Tn!nJCeqb?AccC5a1YL>U4{DAq!RmNNcr&962W|{qXn*|-GUKe~T*9m{_L`km^ znKkeb$_@7TKG1aULL3}J{rp}zGIyPiCu|I8XkSD|@6dB8#O6}b84aZyNy1H@*_x`K znD(!vDH^kR$<<6)$a`Q&1wuWa?Z6x)6a0jG>{KleW!-)Fhs+7pXdc<*`RzGAxkadC z8xCA^i_Xt>(a)epSy1$YBD++ZzLWcQs~c#;#qT+7gr-8|Gxw?eK zEFx5~Bor?tpQoc|S}GifWAD%>juu$k%o@c9r^i1ufK{vAMPuwtF!R)iVQ;VMWAOI)7&Ftfrz{0O$9&#Ig)&$B}A1SS8O_boa$v{fJp5T?A8A-zF9T8#4i*mOwc;e z8xY9jw;T>=&z}P>jL7@rymKbxte`lBf};}7e?)obaLm5#pPf2K7kJv?hU~(ED<-{p z(z{m&1R$=wJs(1*6Di$L&Piy7N^X5Nec_@*Q4$K;(Ryt|YsGYZ^!7s&#UW_dFIs$@ zdgziHT4W(??OV3yVYEKC<}?un+)6`0RB>)wP)5IRlj~M+vcO4BDiau}9kEK4T+~aK z!Oe)L%sb8shD8e%#)@&GMf#Z@z@*X9x_z4GOZ)(0F~h{ONuk6M>KmG%hM>~MrRJrtx?D(GkuLGC_YW6g?ql-N z>q+ny{LEN*3RG8roj3N?Zf zof(Mhr?=H?bS+lTj)nJ0kxyqqSK>bQ90sr~6dYLAHEP^8H|CI+?@=Y+IyvOZ9lDui zqz3yk;OF8nNEd-iM&VioDi?R;GlA(>Mvlymy?-8-a{g(}gL_f8%Jia-FsbcWr4w~u7EXv#O zmj?1b2E6?+5T;7)x#*{-HoSlX_(SVYGX9TeX{S&W?a=Q-zlPUEvj19-Jd}_zn%C>9 zF`o5U;@^Ep9Pq5EhOpkH1r))gXLDy^yzi-*j*F|hd+{!F@8&!ck(&%|p7oHTh0!bK zsqhFxeCptMDz1extTaeiN#}d~_Xxt4+vEYXD{X}(jMK^x3>8hRZ#Ho?27rwlbJ45@ zXtp(eSfJRdBXzqjo1chaeOHF@vVu%{Y5!}JyO`x(|IfX5u?Jux=QW!usoRy<5_yH` zhM{es%lV(s#}#P>8r4g(XbVpVFT%R(oS9=0Hj40`8$vI*D+F`;KJi8Ex3>5XsX8o) zyWb<&`9E-bDZgo&6w1?2?Hl|=Vu*ZV^~^4BOYv8QykousEfLio)p<%GC*;*Cx)nlN zr6V4%CAqlviQ&6eDkdmtJI=mkhBru9tvFteVvzB4!aq#@)G;`m+OaA; zh-wHB(#FUHfIdQ>_U$>KpUO;iMS_lP+ZS5#IeY_Tt7@C}^AcrD4uFY0>!7qfb?%TG z!pe0RVoXW_H0o@PK<1}${uroEg846u!>)xJdH#Wgh-`<+B9*7&H40jm7_0`}GB*T< zmhonA5o2(1thLsGSe|XxOnq$D6g82%cLGYe-)#VKgLOg-w~R^raJAi*o8wP3_bItv z=}jTMqQrTdWc7dS#AQNTh6(VYXoJ=!pDat?PN9T|%q`5C@mu_~Xuth{!{QJdEXKz=0_s75WW4m$mP`5YSh;Q#rv&D8_mQ$Y0jb2O&VCO22*8&qtuWmzE9BFCiq%aaqV zxikl!+j#ud@6ShuwA%D5D&GSCi_$oL$Isf7oXU`R|DoA^dz^bAx#*sr5mDu|Q}XID z?JcIWegz(dZJ@xb#scera={Y;&M{osZ89oS6ZRL|V9viSAW3ma#dV3Yg1&$y)n=&k zeKXcbDSRpPOvcsVfW2aj-vNiC$JE*3)dOiX=+GvX?-Q@|5=;7nnWpgXane+=n{u{f zZp^@x+@>xav^V6^An%`syFcG^lLLGc4&fWxLN^o@q_hmX2@WvLUv(+RB^MT$M@~X7jPmr0xg5zLGVtvm~edZZBR2m;Z69sfT-l4K6R7xwUxd9)eH1lm*M3 zTBNg#K>3DG${b5T*3&R*a<4uPa9^AmR*^$McaCU3@qV=!#E*zO!*?0UWo?;);Q!o>Jn*X)AGa`)A+9KugOP9XHRjnLm2;+`i z_JTk>Y+uwFN*zeay!LkbxBe09XDh#7W&OkJo`}?6bhLQ(;aB8Xs)%+Q?U8Iej=uo3 z6Y?F(1+*9#T&hB;y>_yJ7zy)jS*woe{th#sg8Hf_E={HM-WD9CX#5C8F){O&8d;q- zVsbT#wWizCY8l%8cVZ@U@JM=)jwxBC3-@n?qy@0OJHKiya+0rN`+3!c@mC4LvRYDr zGKbaXTY)hG-|~FJXlo0Z#+-Py*PoY34**r86+~s~k|VYZd~L7PR()3u+2T5UV;zhW zGlE)(?^ zBW;Q++_Bky#o6trzZg(e*NUwgv$lXR(|^qcmS98oU_rBp4HF&i>qTx?d#N#h;)$Je zY~yy7X{{mKk@G-9;;|l|=?u2hwa=ioA9Ss}tY)Pt#e|_mA;h&$h)Hk?!QQ9O_Ui2- z-meZgcc~z5SePfjE@LtneYndEfT@kURi?<3&iSl7(a)e=#($XK{IV`RXdEqNLMNVn zofr_#?%BdG(tpQVzICEHN{AP}Y=Jjd^9_jbw0%Nz7V#MRytm^E`1w+)el|+@LZ+x1z52&!P)3AK8)+#h*cDT4SRb>U0lUdb)O1LT)8`Yy?cXn3en%)1hD7yq;{AiEjoj5p>?qaHdQPlhuzGBe3KkA3uTyIRw?+Xl z%bM@UbKBNahO27KE zxIYSU-mK9aXn>mc&!a>MFMR^gqAyivP!q1718n*CrhLF6cE<7m)X*XM@Kf7aDO_58 z&5=sb!%mRk5Ii7h!R}VUs&Cw7Yoj;R@r^r@_nM;&1^G>yAQ4SV=`qA16Q5!0#xo93kCJv%#pP_*OA$XLCm|IM^!6^VL@OW89j z75Bb!5P0lFyXxf%HrY`RD9&xbC04tv0|)49`xH&V*%ZwB^4uN#7^YAR{Q$l=NDBRS z;b{l>)k+q3H1*!6_}=(0zgkInPfS%KsG-z;%gZf%FKOC?k&ExGZz3lt8c#k8Op3oH_J|mJ_6s)LBc>n*ng>fy{j>N zx;|^j3U{f4+mbab=6N2;THqbg)J3&}z#Rsr{G+!F&yuNm=Q0b_8j1@*?;$I;_Mpj> zNGIMYXEDUj^~=DwdEMiV+^I$|sa9Ga!Ct@`(|TK&Jr)^MohQibe4&CDi%aIUp`yG& zk?F3MwcS$6IDBd^1B>!VZe-QA+oDL19@ObvA3==$GG0Fy9$9a|7vxu-3IHhaAK6>A z()Wo%4t&0FNA_;Iq~tSwOO2>!+z-E$QQ*l1dUu|aPditD83UzYby@NHMa5fIrdbE0 zBPozwQ!hiPS@zy0<-Vmy0-d9R(x4VqbzCtNHu1TN8&yah()?rGl6rYjv|w||{;-Ed zo3}hj`QJi19Zo$l@czKrNctNOy>50uWc7~?u*XfP6!vUS{<* z$kik(DFtF1b&%;)iIGMmgK}rbBWf74D0!T6?~z;H$#qRIxDBBwZ~4YzlYh0Rgo@^4 z*H+4e2anW{uD=XFDCyUaPEew>*CC`D2Zj~2O0r_%)JVnh3}JCq`eEvmdRGv0RFoiy z%yNBHJEL*tOFZ6To{U|lFKI=PCk{9;u~H<30=W{2lB`2^$hI1i=ZDadE zc!vk&q{aXt?REAJDXQ~Xv0BFTF^+Sl>&MC#FYc6W);4u}z6gnw6lse9tKTiwBtHSO z%e}eTf_o=tps!KB2>C_I`nPwGYc3jljL=Aa z1W*y5WCh+XX6>RQnKt%hSsd&bu<+eiShed~cGJaU9~Iz9GtH0HG$PgPaUZEIjQ}csMDNxLe{yU;XW()hb_H_( zr@gXE6$Oiwby;lx2rCq(=}UE5cQ;iS$Up3p$eyzxw09$MZL#1CTl#)DlV#Pu2f~%Q zAoKHK$Ut&6uhnurdRH=JLJ=)GeN(GaTa<&j%`>^aKA9#~oVz69{w5ZUa$h_UU!LwH1t6oLF z`KL;Q02=ctG1veV6>^V!uD>;Ff=iLS@4Z1i44vZr-}4cT*+>x;-EV&Ug&Pca&R(Uw zKnN77s6zS%>SDb}Kfb2cTacsS)TcIF@z~`6S|J0@ORxb1?}N1vzduhCc)+BDA90rx zz1<6uPD7f;En-F&xy6?LMZ%0I&SD498?}qa=}#`mkfSDIZLw>GAuq_r;PuPuN?t`7 zxj?~~8ZK`X1eYMhFVM4GF^o~}KVKIUS3r(FT+Pv6oDOd4Z8^pna0X0Vo@JHm0n+ZC zxcg;L4zv5*0r%aTV6N?|>F*1KO6S#3_m<8#`mb}~s&53`v%JJ)!F}ou0eFK&jDbjL&ja9f~+jn5^ovGeq&EgYYrzcyG}vx8bEy z?>*jIS*n0%mJ?xgLoQ}hX@D^&Re9v4ynm2`>`GRyy3>`(v;O+Gr$;t=;GlI}HyJFG z&k<m~x`bdgW-jRuievy=5d z3A1d@(HOx24$#DjOqJm=QZaaBw5EpdWY>5|JByJ)vAxT7J44heb-4IKUkn`j^%9qD z`1N9Jv1o;4qMeA9rh3k(C2uk`V5jZ~EHIPX6w=2{_t6JiVxIF2BK~IDNQ^~hEjzprD4K5R=SxlO>n#j;na9@RjKl+gPT%sLyUB24N2rn$=h;jOuSYSO0| zJ&Y)NqH*yJhKm4#bxmyr{Z6UdD|e!PuqFUrNu=jBXBY@=*eD;+v2~_}IBQy2uE$kj z5(~rXt2E;tw?9im8EvYIr`&(plsYoXYHoS~)vN*AYIf{gHN<-!$$;^S_jXk(KV7v7U zkoelp*y|gvP-U@@tio_I#Jew;ro$&erHEyW^`s8YI0mfqX1w|r=RnfB>%|DfkeCBcc-@mr9LA9Txi}Ib z?rQ#C@8-RPjKr_N7u~r6VI~J?KDBBHUk;BjL)enRrS(n414t0-H;w}2>szGzZ$Z60 z1>J7vb_+o|p}wy0d;;)Qibb@wY90nd14VPr_S3Zy!lXT!zByk)PLg`-U;r|}Ue!(W zxyyZD(KJ~h?2_FdnH(CB@Ufu?*$~=1Os(d{b5Y0%0i&9Xd3n@E0%&ge)xawvVKHax z;OKieCMcbdRfq|^!|M$neH@VwUD><}`j&#J8C@wA9kj~m30Otz{cpBZ#Z)B{U8srB zd+MdL+%k&(0!+X7>Jp8=+#}DGBO2?b^qN&S$Hcp>XlO4>+0AAdd^WgQ~0u&xyO>H_bi!s%1H&$aj)F{TGkxHU>)}m18>06UytIAZ57BT2;{GMh(FfQlfhJW+B-Pwp-@Q! z2UGq?vO4n~y@?7!{8UeGfeVw5I>;1I5~i{~m;KvXJ(3>+GGWH_5d)dpu>oy96_@+R z;TIDj{_%ivi<$xv$C^u@6rqp-c-YB{erST*)>rJhso* zXXUyB0v)@O|3rk0Mdu)yS{ha==6FlgbfI!u1{^ZP)C#YQV`d_Ur|(NQB}OPA=Dx<; zq5kf}tlyMex#yMYYoMQ+h7Ohu!MlsUOqaQa;q;B`$5W=MxVp=_OQd}v8Riqf=wH&I z#V;=>dW?v&i$tuuQ;FA4>0RJWu-YyU6KG3DAE9UU9+Sr#PCGJ&v7x(AWj9TUN`KL7 zr*c(y{YE^_{|LvI#tJy4+^ivv8e}nvCw|i&;HNYQeQ^e}c4{<{K6n*s{9CM8hM!1twjCp{fd&`dUok)3?y$*f3N z$ZY)bg^Xn*F1I?a?*KBUxy6b@vUb?7bQWhTnG|N-m`0H!$ZVgGa0d#r@x#?)AZJmA zr`>v+@If2uSe2kvfvhXZ-3I}w-axqpO8AKwzqh?=wud6dcV1_o z)qs0D>esNYyvj3Ta(a>4xQ?1Ju-*U%GsGhpQ*%kG>9&Ys7RhO9uOS$okza)S#n0~a zMqayuJ`_lQ%KDX(eOw)JEVNciqv>==eI&BG!w+vF60Vzm~F2%IK zks<;AZe~f*J?o>Ib+0jaM0aKoC5}rxqkvA-8(6Wzc?W*9cgoodZkL=b`B?Rx**s}m z9c18K=2op{0gYK(SlCk*fTk&JSk{G`3;|m2Un|syih#Wi?n6Kss;}PF@rnl2A;g}u zcuB9=6n}ZdztBu>En+k5l$;Ep_++Sd(<*#Ld)Hh|M7nOA)CUm#!~UG16C!%W=jXlE zTNiFw2eAeNUmQY7G(LRtysUrtMl&5(0g@2WBbc&h; zx40*+^XKYtfL#gD_r|7BEIwI8N`fSX?zR_4^;%AoLL0sFH2nbh3`RDAZs91pe;KmO zCh~&^2{DUNhOi~K?}CNUAC~&!@WO}v^df)FZt;~yrlCPr@=deucQ6g^Zu?dG+Mc@& ziR+S?VC+`P!Zy-~oZOz{+g!X0A4V8UENmi4MOX$wCjHjE6tvmO$U3>WxYcAkxwcZ) zOJQXF1(-w{>Uo4)zFncAJfEcj=aKEvqU)-uqjNOlDu+Q(b2)rrn*{Cp$TWpd{;kcL zLW2ChtihakN4!&Ui|ozKG7?qi)^}Zwpl@PWR&?U3|Ahnomnz@-ci(f2g6fUpU8AN7hPRT{1 z22D{~XD2_^j~3Ba>uz1}oDk6(2lmg$$$VyDo&_CV*Hvv-N%6=*SQcRawCqnF+H6-0 z1j9{RLh7qP%9pBLf}inuqGSPYR>Z{!6#oXM@PEu*A2TZsj|`fTiAvE!MbJe$~~IitN-(BOEek6yB9>$+2rQ zsIDbE1T{6}s78F{VEL&ctLw*6B7hi>Sdh0Z9_&TQkWKi_m!Bn4SOqa1VND5gQnh7d zvTu~1b?w_D5>15$2VfZW=9G&Z)-n@H8HxWvv`uii!x#3m(#B4*n{3}1>*&4TjCMv? z*Y2+RK8)v$p2w@ta^e1*z=GB%ER9H^0yQJ%{G%ZR20n#L?(IOjWL-yXHoGIUsWOr6 zhWESj8of<(SDuLRUJx=I-dcFuOY|TPJ*#?xTG@T$6%lKYk_u@L5cn{Tj}WyrQNz#E z>72rG8I&LB2-EZ(y{4a>F<6{PbY&P;J=4U$L1hej|De%2pQ!?rX8XwJ*f_k^AiTWxDmGTz_o1mk8@7!uOVJj8kEL9n1Z#qzjj=gS`Mxttt>)2C_E< zl;uC2a_ri|?GX`H1lU8z+<}7UvyfwVZ!ipb8cP1=G$6XhdHB990%5leLk-|>A;0x? z*UG|&bxdex(|oR2&2IW`Bcip0MWg`s1n@cAp$-uEqTG>&C{%?@X)JjVU2q|2Ct;-J z7k*dDZ^fN$5f?S}o6QQjt)J_2%F_^vu0&TW&0^8#=%$Bl?9|^U?bKu%d4% zW&{W@#0YQV?%H^vhzeSOvOaP#7<0rpPdirjTMzgfcJKE};K|n&&&%ZBY zGCfm4x}K*_pG*1{{;X7E5BLDHi z|6$aI1biv#y4N#A>^o=Uq?V{)zjDjL?84-3gV;!{{e#LVw#TT?mYW-4v#RDZPCXsR z>3u)`Cl3>$I(7(q2|TCIJF3$TLt{Y%Fx6PfB>}xF;B8vVyAu{`rcYGK4!}7!DCljb z)!||jy7Z5L9Zc{V_ARLi;mko$Q`Ojd@U1fO{JL=S%B}$YH*T0lG@UAYz82OH)UM$z zH$tC5WMqqh)s*MgKib?X~*O$ zuyD=Xs4dL_9v|`{*q(G#RoMeSFBln_yTnBBlFbxC33>ozQI#cp!n&)TC$WqR>7!`4 z|46tM{~rlArLR`4f6QAs`dILNO>4HF6W^h+klJAi?h_y|_>M19StaG7wxt0(lFfA+ zKl;h_Oqoa%1dyYp)Mrtps@Z5kBM_mLOVu4fa1dBaKCm>@dOX2jY6UYflupQJy%6E(HQ(0WYSri8H`SXCPrLD6B`M z@2Az|C^*fdLhqRrVq_rz5~K1;Yx*d>`E+Ttg`0?!Uis>fKn|$xl()qBd5wba^Nab; z+!oirz3rv4=sIPx4CcktH;*M8a&;hgx1K~`OH7}<*u2GPn zyZ0#k$51|L@6m@6PP{t%+1QqbB%*7`0zHa^Wh>v6vpFzrwC*G}Hm`9q0>tkt6mFgF z;ko@(J9_DFf>bX_z@O-c!MePb!!cK{&fQ$Ee3^YAjTYe)9`aUkBD&iwaM<>cVIypM zQXUIIj`rXl{mDu|hpD6NeBFGz70{Ldgc{Txj zt*;+-2(=&a=Nh@#Fn^i0#Q8~UMQZzi?;G~7{Qaq=h^ltpu4@IF>_84NNCDJyu9+c8!m#^eq>a(gy86-nG$Gu4w zdH~9S-AMUa7;E^|a0zW1KM{eM}WEPM+r{w1A2Xh`T`y+}fa4BE3TY zV!*=(yWqU$PRmAhFNh&O7-s->70D?d#jkqff4FQfqX&O-EW~8`w^{|6R0Z{c58U@W z(DAX`?~oeO8eh{p${7f#Zz`&!05*mA`ey3AjwM zq-nT#ZMgV;@VGNXiIhwKP+ir?Lj{U99JZ&8=Sf`^kmaMgf?h3>jhc-A$_> zU&M%WDlZOB)J)FY!5S4X2&KBE6Smz2@w692knMTtm?l(1t-k1RA{gY-{75>ZJa@M~ z_$Yr=@5}-ZC8m%>%9_2xhRt(6*v^#x9=4!a0MilaBOu^!Kbmg3@KR{3L#djJ?!(%_ zsVpltj=r52%tkTVJA01{S=Arb72KM}J@Io1hnAo)Pd^;(|Blj+nApkMRxf0eHc$vU z4DGXq;s1dRs=upGOGdCBc6~(|TFY~etrxc+W%8zh*|a`KiHjy^bm=bdqpKSv8m6vw zDPc8EHT2SvBNuY3nT!1<$XaBT)EF=BwWJ_d2gO5Wb}GBNxE&>K~Z2G!;($^k#$6U9efeZb?~+?2czXa`)$0B*Tx?-U4O zKTc?+8IOEY*pE_ND7=skjpfJIU``0UIo57-i75!OY3;cq36Mg8QSkS8$v`i&c6WIj zlldBkN(c=_Fl!fJ&ob|cy4nq0 zxDyeClkTuZLiqSTsA4v;6!%V;im+|)a1(!x*%XpliFj7sFC6$Px?1PvVAb5DMP;09 zQ0Ek>QmBO+2Ujz!4#n&Gt2I}>qA(v=sqnbbAX&@!W?Q8pmiRso=o-K6V3SK|&ogE3=qI}vxMA71{)+ov4V&E!Lf z1I`B}`>{8Nk$vY++U+bH3T#igk7~3aj9`htcaGg1+(s;^$>Ul;y#1op`F(q5zm64I zl~UWMD{F!qN*^YD^G746;bb!7uTy@P_T;OYWPVni3Ma^7^6#05@_$TU-k%NWv-{3Z zNmC*sX^OlA3!Q%<7g!_lTd_`6guAvy_Xk_*2}$Xfn`r1WSq;s!s8U zokEgELW-<%Xx0de#x7eBWsXD{a{+!IW-M5B>R;gpG!*p3A z7Bv3Fk3hYDNBX%ldy_ZSPym|!G+xuFSmIRViC*T^RB$k}F<=PGd|!Me>v*^fh~jgi zkC`L*k_l&c8S_*v;}4wC`)}i2)_Cn@Mq5U*q|i=1u}af6y(?yrlbQj<>ryO;q-Nsl zN=Xpo#(kb*?|FVbM`wOqykfrEYZAh$lT7I#|4TcT=@VHZ6ch_Y`FZgPYV91H0Q=rG zdS6Z{sjeEpZ0)ocyf&0j_sjE z+meoL?AW$#+qP}nPIj_m+qP}nwr!(x&h5TW|AX~q)*Ne&s<&u|z-AL1RKt9@Bx3#= zk&Pp=_vrd?Pp~#-VKg{$vca7IHdY^mdI*$zGIMI@q*W`IB!QF*F|)X~0DWA?6^=+o z%?Vtlx}RdBE`Q z3PL8NlR6n$5sKHf4>U7%0tk88F-~ND?fZ5_P~cw$hww z`Mb^&N&76@PKsp3!E53ciM(*nQ0~oEJviY&TMs(vAL_1-3QqJ2VtTNF;VqM7oXSpp z)?Z}H|L~3gXlqz>EI#UlSIV%%QTs>dIb^tFEAUys@&$rOKI5+r^rwOD+xfqQhboffux;NYvUA>1CEfGhq_>2u-9_^6Ouw#$n7# z)(7Ds%J?jM5bBx;YP*op8Kxy^3h5-yf!1%K$%2fSF5-xTF+foZ=K->;Ku^TsI=K;} z4OQ9d_e1wvMvO4Z-y0EPK?~AHOzd*P7V7>-aEf$G&xPsi3P7gu@Rx9zv_H9UQKegf z^RqFo&VK8%H6UrCzDfuhzle4T6Dc>~PS*2d){f~u&Lrato$ihL-5;Og>e^3pZl!>7 zTPDT`@cfyyk=+u3OOkGaF)?{tt=TGXLxSWCgK0}oj=x?-sOvWC+IYHruUg_ko;f{8JB5q@UwoFEZjmB0E&^J3 zLL>$^$6Qsg%i@*CIt7lx8EM6N!1{$kNrYSb)7g$8SPQ?7zjpe4U22hNu^qH1W~d*b z5Ujb7H#i+-g1bUO^}FXZp6-rE$~)+(Y}ld6f4`Sc%74JUYU3V8i_IORwp1P?C~GGR z20zFi?)Fs$&j7=$L~B1p`;dEk8?`qsb^-Gpz}fGQ8tD{zg3@GCWiz$f31-IhrS$^% zz_-Zgdw(gr?Ra7f8LS8OsxrCbWC1KO_qUpbU}^fhLjhzE(SdO5_|X_}@2%P}W)MD3H8`2=D7enO@amp?;H5Yx+2GU!KZDJ7PSW30 zv?KSV0bsjNRpuXR->|n)toV)cE4Tpv)+ZbOFF%;aukG{SWm1ns4p8YB4eA-26@2tu z_wE{#wal)QLFIBbwLN&r@F`kF9D1-@9+f_iGhk=gpQ>)8H~9(6(+$bZRLT+^DQYCo zXFFpA{t7nK^n^NlploBKM$TPi04%O%B_3(u0;))O2~tt1l1370ieuP&`|n86}0_9X!p6;CPSQSA4OO@ zh9o~h(<*tJEi^BP?0+sCfM_Ud$_#%*pwF!KI9(;ibm`|0eiRXL7~NJFTxTAR@UF6# znp2PnsN+embyTge!0wIrbs+CPwuo~Y6h#b?`7P0b%tQk+DuyS#Z1&P&OykxUCfgu~ z6UsE>?&Nt~8V__2O5+=s@1`^Vgo)J>d>uZ-#qyIh(k_py^@}&?i+1kNwMHMbrDjj6 z)#(JwSr8SR{ac2eGy7E1`eDAF?H0DNq!M>grcMcWH;z;6bgQ zIetd?gMq0uxQGL}+ArT799x4TYAdP%Y;;F(W7B>HJ^pmfB8p?+5&ZYJxtlQtj|-L}bhViD8VYEtK9yTS4eW~`gGKD$Ci>|OBE}m` z1FyTi*22CE@_HrvRvGU@gO3iyJ<;#W!vR5P=Pn6H2lpyBJkF&-V^MYBc~)^n>gr3b zt%J+W9ew=~P??kp-Gtn-Xco-+rx@4G&yS^2ALGRK&Sfp=4$Em17-h%QC{Fuuz@`g` zeRBE^1I-L*m{|^<|G$SN4>CYhUg?ZkFwn_hPt(M>=raZZip*ed26r1wefaVA71Ir; z%f@g#?G!yci-b#&j|w=5Gs4M-YSdO##Sr8qhBKK`F2Xe3z# zN_>A#mb%;sv^;^C~GDF|VkPUim^Egph@o zC~!D4#scL`d*?k8{2|C0D$=oG&TyGzO5z$JKgQ*9)-JA#p7#BDK%fva`3d0^l?PHtNT_S`zaBF>uf&ZqK?H#b9qJKFz-DD{XvT_!O#$LCdnmQpMcKIk~`%u*ijq*$hVjC8wh0=I%)XpUr zpA9Bo^Wsd|)n8b~h9e6wA||bQ#N1cmS9%0rv+vJ*0L>uG=lTTCBpr@S{9L z|A0b28+Fe>*3kTHsf>=lM!bM+uNSGn9{yfI8}NCq@cpaZ1Z&vh1i}N#QxTrK~ubHq%utwXeD6>C>v}AhIn^YZF=`JIqA~`A3}8aJGX> zKI}AdmV0lMS_J5xpK_iYD>M>TF4lE3Hr;IjsU>A?xxKQEh!Atw4a8*&7Ky0~Y0q`4 z8;4<<%_?%t>E}^!++Lm2&VV>V2Ai`bMz9b%X&-uPcmnkUITH7PH|5Pgn9f3(GeIpF zS#}2j--o9_U~rG?$!2IoTI?*Pn~4mt=}#?y1L|NyxPxHOaygX*QK_5A=}1rev{NQl zJO{FaMR!J*QPp;eObJ~Eo-sZ5&Wm_TFZju{r-%4$>-MPES>@dsqany0(7)RayhRw+ z07{`@#(DXX-~*(+NK1K@NT}dLA1Nu*HE(2W46MF-!G=q_`%pyj?O3$@xDL_NeI^14{3r zf66;Dmz7b~gn}my4W@<>n!D&VuAXk(`x!RVL^E9 zz2~+T8u%1XmCF?fG`s^-g>132Q3I{k+x66PX5R9AH71uGyUqt z?YZYhJXncwu2-65dKO$@wcZW<*NHpXqT=dx1*~wmbAp8H-JhF~K%0LnAnedi% z7&|n5A-bgWDZ%fZ*5cCUB?h)F_s|rdm{*TY&F|hUO}D3ZP_=+20Rdbk5c^mLNf>&4 zzbms}%)icmqwkOhtFrQ?!6AT()+Y4WbJN!-bE1>i2VVcBXn<*o4 zr^IqctEJc*LbxAyf&YA}jxGBPiSzBVfFTd~T><`N*?`heTs=$A+HFKBJaHeXV%t^i zh5bA0he9`Kduc^0gh5JljL}F8AlKrB8?+2(s%gS=<)&ArUCY!kHQM>aWiYb= z)r=pF`~HF{K8hhT0X~m+oFE~mlP?QK=j92rJF(%L91=g61BjQ%eBahUO=Rk#D}x2O zAOJahoQ{f@&{-&TtzPg*WFnmVB;n6nYp7Juf)^Cw(6$LXPx$BGJA*dq(xlaGw7H$K zGa0RjDIS*JSho1}7gpBw_Gv5!c20erL^1BGJwz<}E*9U~T{@vK|LAL;k)q8-?hG|plb|f!(=QlF{JBhgp?2f99h)K1e zmvtQFOuJkm*G#DqWIKni%GSEB<$TIxnMu-a&DIvC-%zrY(}BZdCpl`l8&tL04p=fV zsR!-$>^~yAJhTp0-rJ+yo|p_f?M^`E#b5LnvYR>OZ!2itWq(=vSjcnKqa_-6qI!cZ zy%_-uIB$rFEpMJ^&-pH|OlvKG$KkbBM*?~t4wHH$^T>?v6L!lnLR_6EHKr-q3wV+J zsxRN~#oQeO%R_p2Wtx&lw?gKHO4alB=WO%eKF37!7(NMF>q)p_1BYLL#>(O%`qJzO z5_=T9Qf|F_f_U>j%WS@uD-vrikP%^0QoIb1zJnsGX9~C8h?NhW5aQO3t*$;UMnvbb{(ZR zXA|JRXBq-&LKDZ3`EdxaR-UCbMQ7qzW~>&RuKlc7xGT-z5Bxl4Rnmkud?1RUZz>p! zHy8LB{6u~C45(_X>yOtM;}>CL3Ma$eAsIFH@Qd2zYn^`L=<1so2FPR5cCn|Cj=#iH zgagUqjn`gd)vpL#i7060VKVQT)DJ++5^d!J&cp9Vq8Xlh0>)K@ynqbhh`Vxg0o3mhe}Mfj$zL~R%w~MK8Cd;U7kbMU$L4fl zNNUA3VeIJ!kJc_uDf<;b=5VW83ku)*mI7lDTcmIkG{kFyXgw-M_|X?EkdYx-<5K<8 zqzAmBAxso#ru$25acnZo)9HCW^V>uO6L^t5T&u}Daf^;$Y2-*1jH8aDf+ z&$~>r{}JBHxj(li;F|s=`Oc2QoZx*oeGlp5^N|O_XNcEi$Ljsncc%Yxug7KH7^!uk ztjKNDyGk+gMv0D^BOK5DKjDhnEQ{{w{tAcaeBwB6wP&p;d{0J`(zozYjbmliZ7Fwv=Tvj{{q}$+(O(fS23;j z*=6X%d2P_UT!6h72{Q7{MNk2{N`0{hh*>~dfgbh(AKVjw7k3Rh3SAR`w;m!BM^i*f z3A^o_&zJD3k!8M|RMLg`4gXP+pTN{5FsyU|Qf&vkyYh`NYl+!6q5b1(GO0{f^61?0 zunWF+VYbNJLP41wQo{APA=oGdd6pMjwPw6YvrpX$J_JjYYBV@o$pk}XduW=8$ zB)wXdjNHjnLM%gEoVSGM4Eh>+<@q#5l$p&-AU<->phYlP07Tzcb(3?s0e3LqTm^$1 z$uVnw6auLLwyP~_4)r|+6t6|DB2V6F!01~R^#l%6lH?!85AGhoafkz;ub-7bp)2_? zDB?p#i)$nMcT(9p(1_;BJcFZYHFkYD7{)sz({EaV-0fItvpLQkbNrQn2cNnTP-jl) z!(un`H~bnj#!{*7F!_UF3Lk}8=<`8H0;ImIKN(A?NYS1rSLrUdH{LM@or(VJ3b)x6aY+pGu z3HocKIA%HZ8&?fPG7qPI(ZW5Z_sEdgQ$QohRzUY1J0vEdfUC?>uNL)>VxwJDlW$)1>x?H?2<9%{hNZgy;b>uu7*cp{USX3 zRKYX2-l^+gRY2lw`Z3*;=J^V9zu=EP99cuZswC&NKe1W@h)jGx$s)c_p2s%9V}_+3qAe0ZwuJmw)*MNc8pcHW+Q>w(?ddmMj&i?mMAVTK zp`&S^u!zrLj12U&b+3G_jqBgEV&t&&YG7>KkugPI8YkpYjJI#D@!EyqgbtKJ6CP2O zJCZM3%dxrm1$dC+*?SiYhCiUk(2pjnFk9V9+D*;l9Y$|0X`z`ljnrS?E=);AZa7vY zr;2*GUb^%xeJMk`Z*G+>{#8$M;F4s!3BIH-cf-<>hoVW#e~KPAU@gc&$mq z91L;Qyz$W7s|1O%fwl+Vsx%+4@z1n0DeRig5jdLcB!>N2<29qeiA(p+lDrSnqV*1gl7#0{kIw_jATFJD^6nBF{V z1HM}e%?yfu;g!jm-QduG3x^f#-MqIKXV&Uoc)-wT%!)<ZQo#xGvKEg*46BT?yJS z*#$tAr<+k`j80%$PEAA5diQj5f9C$M*D>n5yy>donH%;m?dVRSTjL{m;Flw#9ylMS zAf^gKp=0g>X2*7q_{D699)#RUn4{#|xvO7UkqO|t{ITI=eo_U&Rol(F z6qU+gI1S|?ahkd^jx~~Nxu!3cvpBKdAel8)JZxJcFa>Q8c_C}&Acm@?hwO#(ebT#~ z38G!qYC9ts=8Tu0htJJjarU@@tM&r)IuApr^SzzI+t+v7-JOv_L#w46N*3yy_l)>XR9MVmV>zLkOHLIY)m)reiUs_6W?Q9p>7i=w z8VV(q$vY{YMYkBmF6mens18Z>e{V&QtpBBj^Y{&0poJPze4z{xWCXLL)|N$-7;1?f zvO`I0g~v8La)_q@2-jmBkt?JPPzdRoHk-=~_X~%-8Lg#XDOSNmPPDDA+f4fcBxu(Cdyh~td>RKIzATu&Mb(sI;cuo<0!emp#Nh(@97xCc2XM*%|O z!dH$cH5KwBvu!W@%Oykcnj$sSh8h1N{q@hdKv6c_x4ACS_6ESL0^{m>kDh^GM=?b^ zZ9B>Fw7jwbSTzfKanfDDKGtQ;@Q`2CWR6YTKIyv^_+Bf0L*)Ihu8$V?0~P0 z5P-0JK%7Z-QZEUbK@nqaI#}R^GMBDuXg(0v@l)55YEZd@xKwHU9pY24I!_{VEszr( z1OOLn!VtwJJt@{~xeRdYYuu?kns-e1H|wutp6iZELA`MrIn*vAKkGE28P;fy`2D?4 zlC7q34vpmPoTNl}7o_86{L3{5yG|xVUm~2*pv=n{YrSlF#yzBa4a>t_ug+)rJ*+L9 zQdQWK8Qc@M%dFODGjvHw_y}nh*9I>fS&&Gy8!s95+T?7 z>fdEQxGmAd3Iqqyq)JH4R3Z%OO?ZN4%0rk0I^^(+lAC~Qp2Br*05688*U5K4g|`>n z-EnBxa3)bi8b??QGbprpSK_ZLeuUp$s@R!V*}BYN+gwWIH5al8)le0CH!{R8^Y?%p zQ>Qp2xd(M;p)XJP^U2m|bP97**BPNm=-TaJJNVe+*$F2&{ zL)oc9>J2toy7+CflF&?H{@P*CU^!{D$0BJl+`zWQTM{XVKiP~UUb2O6j5Whu8d~X# zUCJ$O`|87ZZ#Xb}$YY2DhldMrm(VLlW~UJVt?NpHX~Iwg-O5eAvm4Ac54onFHJ}L$ zuOE)Q&-W*+wcXZ^255xFyI90H(q^>Z260Z1{Dt<}X6n*;CP?CYsCXPG*9d`~TQxJo zi%VI46FeHv^-ftiZ^3`dzH0Tgk6@}g^txVB;YT z&@DnLXQW-Df>GuIs)oj^0o}Pi&3HI#0a27YtIxmclMn*fS*tV&elveGuW9R?qAa8# zzl4Te1i%bvv`_kALg*CE@J@DvgL&x4c0UQi9MH>W!!{At4;F@1Q01xxvMv_=d&NJ! zNMGAUYs1*(BS7aetWwQvYf(mXA0~@O?p$}l_onj2$zQCJH>-_Vncg_nqrdEqcCM|h zl{QAkK?GwZUnXjBGGg8Zl*&2RHc&E*EhMdB=w%o_f1V(xMU=j*n9F0|esa4DIr{o) zUOr9yJAVQ~{0~cf#94fGQQtY*c&bsQE-F^sQRRk9Zj42DkL)^Uk;%CX+%o$I?fGEy zjTe!%Dn&oUZ0~7mnvzR@^HkgkuJ3($$do$L<_Oc5OThe^4}`0 zH4%aqO(23xs+UY5Z9|QuC?}tO(2$2YtXnChmNFN#$8^s-RF8^!;m@7Uk~CRN4*`%Y+Ws_TXW@FEZIZ^P>K9&aBJ) zLH@q?R>mL2q64(uMaG0b;M+XwF_{TKZbJyp+Qd^@3z)tPv!unPQ8&N{fj`-vxm{a3 z6?bQkpVWGI#)Amvr&eMFB1dF}sNXLFMH$Yvz3hO9xn2yRTPpIcB(_yeUrB%)N&p_# zxC63=di1)QbN@uK>~PAdmT+d^GJK$}Q|wTT`t`ttQuUdlHfZ8#@2=(zIXNg7B3@)! zRQ(`fFW})$5AE_j=?)n>7{HAG#2-12LyLn?c)7GNgNMQImJlj*rZn~iZY0K) zZ!1(6W3n+)kUoLy=^5K`NYsUfyQjaa20*y2jplg(RT98d22(JH@Im|0y6i)fBIyGzsuaMV}fSqzRfW(C?;l?Gb*NTCL0Rimf9`A$*6<_#9-f3ra#j9cKr- zkW*y2=S2t|N2hO%kCEkMns1^1 z*@8;RwCjj^6!E5$wC$4;a>NGK#*?fy*URTv2muz>DZXwW#RF^F0np8P;UsJIElJ)JCXBv&o?Jyu;{W;U%ov z?W00}UdgYu2nM}*SBMeq3~&x;_`dp^aEHtuEQ$T`r2#W5lwJ;31Qt+Gr_E-`YIa!b znWOdV>YxzXql0S!umrhiMY>3*Bq6!l_J5QJM9=11@k!-b*3Qs+Q^3@A40G)Lm5M#mQQ6R#l?Wt;n4D_#&vt3Qe0*b|n z?3zPsr&e=_7Wgyg!VnFHcSwS2B^dypgBrL~54SDW)U%D`vu4iVf_S~GAv?<|kQd&K4BBv?x z*&vq&?HPY+j#573<`@&|iMQ_AxW|az?ccex(B|Q#iuFait{0 z^x28<8J(FQXj^?WMbh!8$IG;pVU$H3{8=1Vbr=hQPrF^5*-#j`&NuZ6+7F zl;%vZ(9^@zptZx2T`+`hYSMAk4-SF1DkJQ}?4CyJZv<@4-RQWEy4%$#@|Bk?(V|V{ zT9xQJj{iJTF#Rujo5%0@o}{UZmf?2b8`q=z%fX;*WAV8Y3v5)N87aC9?dh{WhE@!U z56*>{TgM1u9A}sZ2wotHgee#?LxFRlGbqDk@n9%OHUYC9Nt2D4q^xzp)qA*SkOT)#L&m$i_GCgL z-(yVt#bNa~B;#{?m&XsDIgWHK&=!q8c4BsLAw-S`C2+-EIuGO^kjtDw{YUA8)Uq+R z!avs>WNW%X7|GiDTh;sB2qmoOSd4JK*i;}7Q>SOWJ@AB#Q*s)`TL$0i^;cp)0Tysm zvXE~F>FXuDO-p+#xsa5ju!U>xiWy)f{2`%ih7nzRoRjYj_FI%kbUEKJP>T{~qFSzW z7d-n)NGpJ@RX8rQ8mvBMsnqAJZ@pVjhp{9Rbva);%I?M+EaoM4_-V=ib(-haOOH8suwb zydrOrGxEo^#wLjVmXJKgp$^;C4WY%0_*J4#a2bGwnnvm3(W^nsxkqxAWLjOT_netM zW{m9)1tJ^p;yMEs^=lIJmZrwUobJ=PBfM4`%>K+S^6?y}!B``H%E^QCAY-3@d_IAt{L8K)@-Cz}Dn!;oE9b$rXU!Tp8 zKGslWn!JD5q$`OAfI{OTaMDA@h~NJfr-s7x=TC8{Ub~hW(nI53+NP{1+dtk8J8ylG zx4Jm??{_JE4}pe~98ZYG+9W^U4y+c5medezJ|q!oAvaQ=MhE<^`~dR)!@yR{x)ljn z)&3mMDSHnY?I*CjyEbiyaZz-ijY8!j8$ASS>1Ieku3%C~R;4+%@i8 zwDPCEOFW8gtqlORB>842Q>pnSg6)<#v>~z};xAS^li33He>5-Fb8)MmPMTg}z56pV zNylUTp>u-ZQ1&s$44;2}-2Th9S@K;XIs#jJ@K^A#K?6Uv+6^R4Nu$VJ5C%&kh8Yx3}C4`5}#2(r#qj9Uelflc*kFM%$! zUVDO%P_sL}Bn0c8xWrz{=-j**r32eJk_s&TmAr@3@|{nNACw1~&AosLw=Ku^O#%b0 zJJnOxvMJhQ>YBfP0+21Fi%)^fAt>9_aTg74dBOK)%csSs4FYweIV9ypF}WQEG-UFr zElMgD=(n@wB%g;k)~>#hrYW*fe83Ak)L4A_&`v8U0VrFk+t zzn{v+e4qF?vhFMJu#NFqb1;(kmjyC=z3A`>b$Dl$VgP#RJ}z`4sTRCtWwCv&_9X{s z z)dZtT72GdU6rtQT_P)L)6X-eC=j=rpwx>VPo1^-X)eBi{VJO=6G)EuZm%qt=U-0T1 zDbM6Yjvr#u>A-x8lz@=Z$N|($)?<#{EM27HDZ}H26f9lH(lpAMPp$EExA6j%hm_p& z8wq4MvC zk~Zw~21u}GbqUkoWz>yTjmK@#@`AJ5U-O@HHmI&Ansb!UU5G$KHoTrWj1N4tA}Z-u zk}KEWWv}tNj4QOD$5~q@`NflTmYI1w@#YY5s5vJ@+ob#L6H2F&I)N^<1KVZ)*q%SE z+tZiT`jvxJ)b(t2=Xh``@FKe_7F4CGZAJlKI7UMI_&Q@-Y|8R;5lGmt;tTP!=I-lw zVcq0mm88)ukQUzayXrXT)a9esc&`BTwx3InD$>cKGam36&@(LDm?o(eURIK0 zf64-5d>*y&FkCp%uFnI?jb`)E%NqiNB z>dVN4|UC|08+!ZMT*0n!&D#95~4sp`%wNLU1o-^i}ofG)#`5BD%pbXzcE z3dKU3b=A(0ud13medK`-tOJ?cuxZAs*>@%e$Bh?(4K<<$P)9|- zfuPnT=_j2|2O_J&kPc&9OEk7fX_)bz@pQLr!LFS`9)$;OJVitIw$mok6ZCpkcs58T zv}>_(ENol_c*=DAL8wFqYSkHnGfGvZd>QEa z7YlW)54=74+2z`H|;krtlN4W~-0*^*+@$J){66Uv-rlX}ssuZcC#z16= zbCbR^O(i2~cfev_w?jTG)Sml3W_X@r=HO^_4w4c&kBn7{i&n6dxdsBBx(j`YmlUm( zrSI#CQ~>x|?|Acm@$VHL0$VW(MefOUja_QUBDfOAxoMP@w%kS8K?2ST!Fzgp;#xk~ zv)JL0#ZmO}|KBe9GraWRJ>Nvbho5bBb}PM`yGIfKop#++=HzD*H+u$G z!{KZ7(a<-Um`X=~3V&=hw__cH7hsqa(Z=XnnGjiZq1nXR{bxf$4J41RI z+|&k{M5xeUo>9o(EwYz7cD!RioB&4gY<2mzar*=8{Wu))%;rXL*&5N|c;;+dr0gYR zgHA5;5FC!uLvct`ls&G!oxPlUE2f1SsSA2`#Gae+z@4T?k5y`l`AN8Jckv3h{)ctP zO~3&LyUe-JO#d4VWKk5O*&m?bIdW?rJr7)pEZrO=uKjIw zemR^24Vl0F0fD6je%oVBa{+|xGHeZMf%ygyy!XIVvqcjo~I>~gduzyms?^Id&W z*O)vgxc7p7+~2G|sx8nfyZ-=w>*yc`*x+mn=|MD?QbbmZ8~9C_mXGWW%878ajHQQ_ z+Bl}I1F9JVYQ0g3jRZq^X8J;rFgKs=aH=QMFQHZLwI|7*4PO^kK%~$%CNye!Elw4h ztlV=PpCMkFL0Ji_z5)e_=b+; z^_eF;8WjsO6P;TX%JF0i+WBM2aTv1b1_t5;zpjwjCXO2z)?3k-Q+iALiQ}<_t3?Y& z#QmYRg^fPWu$3d0t3oqU*9u?fq!D5A=iAm4B4%6SR*1~jkcl?aj_p2v80vXa0d;f*XuB84@ zhp=D4m>R9z)_m})=^>;qINeSmY3Dkh!XxuepOR^OE##%Q9Y*}^t=5BL&LSIPa$J64 zun*PEam_3fkKO7guN;szj#O+mJB!!B{8;+ClLm8_Y3e$_?Vc3l1s$5w`n|E&2DUd> z+&^m=EReN9JY?m;Ah1~e<)=4MvWP@vE~s7~SbKnvtsAl(j^;tWZu+D@jt64LlX%kA zBo1)2M}7`E5v*gjl_5&I!z}X*4kQ}o|Y+>b$$-mMN_F?U9?4L=Dg87?pRF> z?b_*3luur&84=MnO)Ff)W>|@+tuFPQj7)_*NE_d@f`fe(1wLuY1G$+af!RC8z*C+K z%aM^Tn(C&x5nOMO;HYHeHQ#umf-Owlc&Uq#>}4<6r*$|6e#ys72DhyUauI*HH~m z#rT*^3{^R%mW=;&4B3WQFSD@Ecw8ZnEw_@;bor-Ha%j-Ri16yTp-)i}Hz7tgv!T>U z>*dmgJA#%pY~t3hY2Frr%3x+eaT4}sWPo=2!_snOuMl;POvx);KUAK9t&RqUqR_L~ z=6r2?fl=jenjc!QDseQ{Oq5p$ID=nrv2v;Hs8sZt<5Pi}17tB>;&YR0%1flTMg2?O zh>NWPDmd2p7t{L$Z2{AFl!m5Ol;`{$mTMT>E*p)Q3nPhsr({ut$q>B2WW)pa9F_jg zP;M{lhDNB(z1KUgdeFScuAcktEjLQg4+AMJ{AJlGcQ_`^%#wxpxIcnYhy&HJMRM9i zM`cIU8GAr`*9nbkpE%FW-yZpic`LE}wQK=f7bEX=FUS$=6xnvs%RWuoC^#FxO#_*Q zc0ha|PO@C=xFVJg2omad=BgHATnlH81iT%OdgPGd-ZF<2&D5#Er~D{M=LCIc$&CCAmefWS^Rx9_Db@{)oS~vET*~|#!$~uaB*E6onpnSqNde&{Im_w z>3R8pf`i<1O{CuDXV9#c1Cmi2uNCh@0f9WhXYmf)yfZ`IVb#f(Pf4>OD|!_=P(Y`a z@nJ*FEKy|qV~MGe@m6@1AgiY#=&vr4{9%M~!lBtVUr?u)H`dUmMOh2h9e~o+fX+)9 zp7}%pz2!sm)?ds&AO>KLm8II)z(#EP5W(`=_of5Uyk++0#}b-K1mD#RNCEh1Uyz%M zE!JnjUjh1F!J;gaVS=v|5|AnNJ9hGcTPB`p3$Q!^T7tCdn7`0EB(J%2Xe_b!SJo-x zDe`Dw@{m63LIO@x*g{^$hUX(*0yAJ24r;pHaHoFaB3D2gIFDRRyVJFiKjH47`><*AU>sC0d|TpDrQx>^jS9?DzLDyHb;rDyJbX(S}_6o z6+jEf1J|@joa=^4%*&DD6a+Fuh?+%6OAvE3c6uO_v^1`b_}|2P-IK)@^&qmc!8sR( zW&uy{>Sz;*52XkaG*E1GnsE^c$fLmpLSyL%`ZE8HLx29GZ@=@Bt;F8{09NhZheU-{ z9lnyD3H$-c%vMIs$BBTdx;jm`m|=N=;%$(e<|=fZ;BSdE(>B3C5$(#|ZaF$;v=B?C z%w0Rh*in8TL!qLgdJzG_7;U;N2R(+BFzUhP-bTVI_~G~VUxsKtmgB8mODuRa_8Dm5 zM=!IaI`%d|H^oui0N1y{;Q>?b3IjDa28QP?bx00i$zHB0{Mdm0Jv8XKyL7mw%U%Yk zADP%QWSsL{PQE-)Mpj~is!Nn$rD>@#Q`E-v(ZEAh{JWae{ITn~j3P8JZCsZNxz8AJweKlfrqS;}4{w|CR7A(&u9H3$-w2{{VLUXQ)0)x=sq#E5?Oxy6# zXuW1xlHFn{0;?>T-I7wdCNpKV^DHoKAN>T(m*?yBGqtFL1Fym)-6h8GTv)2z=M=*4 zz7c5dL`HetDu{W-j-iXb4j=BpH8NVTjkubaKd%e$GG22U7?ScbtO+h|IyKia5;gN! zGb*DsIjG9np0%z%noLiBywNNZGCdEOG2FWAHQI=3ILr3zz}i(t9Z{HzEUCQmtst@U zd+dKl!?q?@LKyglGpY?hQu>!rF*DX<&EZLtAjvNZaI!j?ij|u)Dtmd35CuY@#dWC# zZ-MM7rMqLSkjrcW8*~^Ip|w4FzJB%1D?WokTMZu{vJPXBy;E!;;)r@#B(YMnSQ3tW zq&xFTHBbu{X`1xM8aNG=Qe(}Amg^W@8jNjV!T>>v-NIr&C{T3&{ZtX20>_9sTMdZE z4RbR;Eoj<1u-7jwW3me{ctJ96{>qESXw}sd74m~djiqVnERNGwQN~RHZkC1YV!aFv zX6g=gW>}d9^lTV3p`vv%6-DCbT6a9n$qrV{Ne3to@5|L`j=|%f(m5Do=M}wB-&zsK zlY6_F_R-!FE9=psY-5X4uY?vCv~mnUsl}~N@*A?FjS>=rolo8l|7F*Hm!QL_Go&01 zRXyQwy$DlJ!N1_x;lcY3#O9E@t(MMfVm@h8?bzX}|Y#wr%x#13oLO=du~il2Q0 zGbNbQ*ORJ{z`m&5bDPazh?zr7{L!nVwq8;N|a5WDbSB6L5J`@3RsN>E zHL~N+G2UluWWgdkVpZp~lkv^WbZdG)0nTRhuj&~-+0)x;lp~d=UvQ;bE|OoUVUkVd zYRtGJmn#q!XJ?!#=i0&on)6baWmO?jAwykg9?-U)ckM!}ZU+FE7K}RiKAdEey+xA~ zmX%lqN)|Wf7o*4fZBFl7LJLv=EI+%jgrbT{3>pn|FsGVgXhj;QZ#S)K-u61(k%s8s z2k_co340UBi2Z+d^3*il&}+$gJU?Xg`5YOg%H#gT4F<(UspN$tuS7#>b?)tB42v#z zsEasz_03j2E|THP#gt*C{M6F&c6X7Y=nsFd-##-2rQ;=k+*Dhl18)9ofPB-0j`JkU5yP%x60?2IGPXRZvONF!a%0$nlj} zFFRGd-SfUv2G`aGl#ZqSe~gSHrHa7Qwif|*kobay9=n&zJDrSK1_sD7s<+RHSN6Vo zARqP;U-o>5&PX94r%*h{-QD`j0c!IT#~w?q7{p#HycPfAVU$ub1>4W2&2$fE2*prRN*auNiCOLzGs(34RCmG3>T8Yw-A%kRk1u7d+KURds?rbNu9tTJJC zVBOy66(k*$o+-}27ExnqWNbv)92kOuvR=5$3}GZMuHsdP7#m#cMBScu; zb|4tZ=W`bU1$##dQ_4%tR-Wi|I9QzT$lqazvccT`$|n(EPK~8na}-HCceLZ8&F)pO z>ae`w-}=U|<8RH1p@>F2u6_@lIY!ckU3iHG>(1CD%7mXF#1N&28F2 zVJ;LYG}|EQ=|^@d->@w;gOZkIXjg2fc*8;LaRijOwo>oIn2$9}Q-z|$KPXiSuddGw zs#*Y?MNZN%`I$#c9R|w@&Np5XzJ zH@lBak`&5QzkgA3M`|zV1hHXkhNZPgYbvWZ$SS;wrh-{{Nnec|Lwc#aV%fn3PI{dU0}hkBT|r$ zO2uQCUex4sq9rh>$=B0P{xVh#*URrtXt$Qq*na;|Ncpmd)KE<8KWvR^P5cC51|-*f zyje5YtpBm5;#zGyO$yUDBh3ug>vo3o-es3&&HsbqN2ac9oU)SH0xN8&n^8>j=LsXe zZhgMK28qWj-5R(E$g}^H56U+c^zkQYMAgV1We6EY=+p}A?F_OiA+mL8;@MlFO3@^` zG*8vB{nE-R^T&qFI8t*Qq>m{+rTM52xfdsJ9WD<&Q4vrUCG7d(7ps;n}wSw`u_h;IsS@H;a7_5a7^>E+5EShk~XcHZ&Svj6vH(luP zzGVusfsM21K{mXFqc%7na#-!eMlG_A`lO|sj26~*P4bv@1Q}!}%*m>c@DJRIJ=_w3 zogDnj*-ZM^jF+Ou577+t=XKhs{)IN07pTD9)>Q=uM94Rrq5CZos0s5{bJ|Ut6GGQ| z5P>C%7Iq`Bo=I$eKE@rP=F5Q7H#yzU%vujD+{FK0j_(ikz&(>NJexrTtM|R|uZgOx zxgW};LlNP9x#~bZlYdzCC8eC7frE2Qs-H?ZO3Vr1hBw+S^3Fa#E5m>|^yc?Eul&2O zI#XHEoUub%gThFvPSJuRRXJg&{|SWeY1fIDke&L$lH^nuHz%&Og(Y#l{3UIi8_h&9 zL4&Aq4cpbdLB539; zX4Z-I;i6R$gsD)kLZcrFE@(Fz5oL*%nvuapZ$Y9Y(}UUUU=)2%Rj_O4`VmML~i@J+-j<+8N=aaTTuEM9xipim(%s_>Aat z1lK8Fj^j~C^L(u~&%xAURnnjA$P;oiG2qn8^1ZX0Pi|>LT4pQ*gDLk4=V2JTe3w@Y zRM4{<_gXAhw0m;c9g3{)JRtm4Tj$_P`z2%43*mAm(x~HPtTK^RcScMeJS)CAoq9)_ zx_bN)rveFDN5%3TK=Z4Y#Pm`1f`|L@@_(`j4wyIp4^qGj_6GalXtInl#h2&^PUcDH z8beNGhzBF6h7`;un5iW2ZW}n|GJ6vem1JDpP5=n2_;l``4bR1OfKAFjV3HL8?9H>f z)AP*-|9H|C5#*86Op2bXmg*XnpKYKKtIDBluSF@V4W$vp`rPY5v_dE=tlkPY$u6Ez z3pWqAPWJ_=2q@X;*xfE}z#lh$xE>VGhpFY_Jh-R@R?9V^j%}uXMA=usQTvY>;m6Wy za-}fFe@(RVio=e7UJXYNea8ZqIPxCPN-IbWN23M_%{iR3SF5si2_dea)G*@;8((AG zk}Rxm*@ynErFTKB{LpU2=QP5T??DtYW=09m%%}MAPx=xBvX=gz^kL!66R{JuaKaT3 zkHw15`FB@&vTqumA}u%am}Y_M^Kd-{&hFLW>H!)9`T|T(nD+_jQ!~Z!MSlCt+CWGZ zi9uu0$JzYG@Dl4vOAfjXh1qJ6?$kkdznH9WL>njsT z@`vP4R=ElfJcpqh?8ShBNi}i|LVpQVlx^c%bjyvzy>IOa8Rv z#MP7Xezq(*jks$;@hwg5&^K~$L(AqpEx>s8Y=`sBQn5cV_hnG2SvBm#m0MFG)dvB| zDcYsT)uQ_z`+ye+%$@{0&5I0(Vot?f)9OeGWuCnA_?3c2ah6?3nwf@?@HILTWW}7A zK&37PP_h6dHoKp(~cE{EPVMjr1Zcw01ha+adPBLsl8|AF8J2D7K? zP~izWa-y<4du8wc3uI;_2v543Yn4eBufq@6b6NQg=%tbaDuW@L+OHCNt-I4rhcd+l z2`#=r0u^3nPb(Y0nyaY&NFIQ7@d((66De5FQ#AQWS&08o{;O%8eKQg8srME*gkno> zJm{u=azQqKrPZ& z7Um$GEM%qU+iqRcMnXd;E02iI@ECj0qiCA!b2V_SqDQc@i!I5+l4%anBiQ{xn~B&Q z@5zB?sFiToVvo1*(-0gePdIIpWnrFxoW0yg&4#<5E}*h`fUBX(m@23OA2OI zX2^%z?U3lRLXwD@I$eM$pIU@>x*%MEJ}hn-GhRGAa|u!5rShRl;MbK;_Ha5mG8xS# zgUrAXZro%25C17+XJcJx3d71auA;HS_Ds9iZlkvh*`>v}@T9{}81)CkEf1I(+4wRER{TTWf_|?nvPG-qo$_S6`SHHUKT1A^ z_MbkX0p>ma(Bnvv*z`v+8T4K|A0+MAA->`r0$GjQLX9a&srMaxD zF15r|WXdH{XV16_*5rat{mz_P=lSBobpo_aOn6FJjU!Uz401pr^p9ukED#L?P>fQC zQ%0nHG`2*&Z^s(*mp&uPWCMX?5nQOz=~ca*H)cZL=$U<#tP{Qw`4vTk^z1DB3;Q^4 z*G8N4|565T9@xd7Ui{)WkI9n=;sXU`TXuV+{lYD|t zxal5Nt#jb$&BKOGG#8aB(LtK6&l~)cs{(5muBDP4wCB+e8!UzwO>|`nbfs#X3kPn(=c=AY0Tg^wgZoS9;9?ebKSkm^~v44xj z^oCS1O`9{B88^yA#k#%x^AVvzmo81ySrE6KxsATxIoA=TB?EN^?+^$t*i?~aVm%r{ zZuChU<=I}^1T$}dfZJOy^GkgWNcDN$`S)4WmSI-zn4ZqgClIQL3psWnp^INHOXNnC zzU!5+(mIEMt1uR@7srooNwb_6e?ACnFnZ3Jlf5lyp|dp5a($$rEr3UwDfmh?*dDZ^hRl7{u|4p;|YyyG+IMLMfPcK5wa z5dCDlL31F&$Ojj8R`z@%j%muc{@GlNqC(olxaSufK1X2gDkdP@&Qc8;=PVOdr_!L* z9Uoyt3hB0S0;Q2i0OARVeE{c9!(Tvifpn+kOYbYKV?poVRi#O0d`WqXJO+Bb!RB$@ z=^Rt|Jjg~BL;iJy$fFMv&86*GB@6tj-`(<7&M*7;GW1Ol42p&O!pg{Rd-{nr;28cP z?DL2O8=hKh|2kS67<(5tYI$yO1FIXH$Jclcs}f_}vtwGJ*Apq2fcYwg1xBC*O}VA^ zw0}4%D({$F>_KtCs`@Myz68N~P(IVRylzay?4gQ$r!FFjJGw41reuSMdS3+4Ak&Zy zMqt#E0BZmga8SX?33BcImR4B@i)=CGH|3=uLQ2@|Fk*M(c}<`awOKak*fFg)!wR&xW7aVT~j z#zx~AF)6Mpl#NfB*~sDpvxh*((H4n~_#=y@Z9%t|d#vJjPLzlM&zp<0rm$TUWvL!R zP?z;~kRw_6cSg7at09B_+>+&UyyAy(9_lasDd-uk`FNB>2U=W+T9YUZuZzCt-C!By%lsaoFd0?rl}m)|Cp&!(d48@8PbF$A^rB(t2PpsuF7a4Usb_ zi*$^Ht^oIJ3ybRE*VCC>`q zW=DF@6F|=eFk>!ix^$bwt29!?>BTsgUEP)l0%=&a(T$@Vcv|n&!<55c!)5kaQ=uPZ0B~EGt zkTlQ67qgXyx_WtvH-C<(h61X{{Vz&ZPr6h^WNu-01^N#omqwPgxCtXf?SnJq3~o;^4NDBWy2fnDm=P?m35HW~QqaFK0sA zW^OrtmSNKwZt4CJss=X1?;zK%FoFYSK8eOd&f?hb)!3LjL*u#-G9P}7r`L5UkTXEGYxK^P``k;AT?!f0Gt4wJs;!U8>h1vgg9|I zKu0uBe4YT1If$V$EN+JdhU%=NtNM-2a1rg5yy(~3|B%nAPNyf|0IaRalq)~1V-t1F z5>!=C5QbDIMWvG>Pc+40&+_)#HH4;=;kC^8hBSF)FYULi>iV?x5594Y&b_K{ix|q!iusMh}#b6P5J^OiYW;M4$_-G zv@skQzsl{gF7hz<`lIyr#ss%|jEa271L4R*rd-`)MHXA|fdVRLXmt1c8lRVn04Doz z+4}9>Wy(j^us|q#=(J(x8?tqb>z-2Tl2h@VDoNgf6%5nbF&^=)2>us$e1b4Sg4!yK zji99PPo|-Z^3H24WGi3mkwoH(;cCdr zWkR77Bnwx>k@p;CV!igxANvKvaRfw4l8ArDNe+_^RQFnuH`kFN|9XU=S@b*sd)?D* z@e-SJjnk=3rRP2o@Ciy@3zjz!aJ^##HfEPjVZ+oL;AU@VBlN`*Jkux^cbQrE&AEKsRw7WEhl z1Ui@)4#RyQGy^N@n?O8qwBsmTI>{+bY`cKK*XecokSjdpu>&W`5i{>@ZTHeuUx5a`vdqtUzqO|fZ=xhxUgARG*voyRQSy_8?YLs zI5q5wi*7ZHEt!0)F>#}|nOr>F$6r~leL;BMsm`#7RO-J~2yvc_8Mtaet- z=bNQele$$t1McgD<<~;9#9J>(=56AiX>?L-c-jTK4^9=k+K6%xEjn^QQ;6AC$Lw{@ zX>kEqrSVlh;3ti9e+P3>U@rH&3L?D2WJQs-`1Ewbm@DS{+fw;|Al8P#2{_YI?8+eu zDUe9<0g~XXlvMHl3 z9c22n<|7J@TS1&bt!(E%(Iu?FtMO2-O@-S9(f20Sgqo5EGyPXD>U)iZyg#WqMX%Xx z<#A$ps!{Mk*V`)nI}l359&PEgxoR}SefBs&1_(#&JG1%M{uBOs zaXGY;v>#+H`49Qye4*(UwG7SLoKdAZd!m$L+M^1ZyQ zcs*cQ&)OyiX1@7cVP4FMa=4ts+-jB57;WFA426D7tbx4|9?i8RCGnc^gS`lZ@9Uos z*`_ilsTB*U>4|&(W4^FFb=l)45OC(F8IU1W9z9TxtEYF17k)LIYJC^6x4bhzukvq#UCg>WIkv&{D+f_ zmGcj|h-PJMcn=0w@kV-Q^Gq8uygEBM?{SWsQwTM5rB;`(NoPAs!^sTu6dSGs1#9Hf zC~H340vx>d(aphVk--!je3tQ7EM|s*Pgi{52WqFDs1b*uwMgaS+%;sQHaG$SFH>Ri zvjMsz1MSMGXsN}H&uOKA+NHS3$3LRSlHI;Pm2_eS%K-&dD~u5?h0cN1YTOwNMuqBq zL?Oub5Msw(4FXQ^_>(9Hfm3THVUm4bU=`kR-*Dv?p=Exb!A!4SIZ*R&2)#N=iGPt|BN~$H0w_Oq;hTWcmG0_hWo@ zeW?u8PRSY{)qhf?c~r-?mB+a)SL}AsZPT{C1pe`!V+1le|MR*M!*2*w0!FnS|Cop9 zM=D%{QPX37H1?4y5Qp~uQY3@jVAX58ou6D8Ahx(A$>f%uhA(WWMAabbs?_JrwqdD2 zJD-@(AzP;a16WS2qhzff47h?2UDy(|sBSmkzGJ~`k`0agt6Eu}3clQ}8AL~^asKAd zWKyQs`yr=Xka(5weO|RY-4l8g&5+cF_?DvUGB~(Z4j(Z#mN=#RLz0WRBj}JqXH-^W zBr*@YB7M7=P?1c^P;HRPyh`M$=pJcvr$_s;;r zBLK%CGt-g~NJ7D(p9Lr|N_gwc{X70FKZIEIY6jc*jSU4k=>QW&(a~?kP3MU`9aU1Y z=)!vYreaDT?Wn$vy)rm+7n7K%T__)${OdM z(dtEj8v3r>K1Q6qbEZi^^3&C(_}(s&gqjRbFA$n2IS~8G2HN!n#d?BVcGsqp;<%9$ zQ~Y8zJf-HB+`=g_kXuR(reRu+xoSWKg0xzf<&_?YS!ji0JV!|i0?zzCwWpE|Qa}Nm zX_>bjj%xaDlhl(vR;H1hP4rfWET}dHJ@cObF^nBOkIUpG{u-aYHewT&lmpY)6qOpj zY`aYL>E}EUu}>zWw+!&~Rz9c5hz=&_P&VLgu54D}nS}9&c+Z*t+E!rza_>`^3P?o6 z#i=`=IbIXgG$F>3L}XGmJKRsXoGUd1Fe)^LJaV~K0RHFwilKh=yP>j9J|rE3Kdm^v z=#e2FD$4#sQV8}gK@Vi~nsT#-X=o2U>pF**T@fbv*nkh)46!!(K)CU3L` z-)eye{B~%WhDHTB*os!plXI|mT19I*SNLp5r=YqGJ$t#Mh*{68+e5gUSTrG{t@Je*-(zF{i%DB z#g(#g>7YAJxP&;V&Rr(LPb4YjJgz|Mh~-o4QB~;H$@7AU?ym42t5jtOGxFOZqTj2a?;@P&ymx;V$ zuziJz5R<7VN0>Rcz~efkecq3lJ!!E9c#nhUwD$JMkEteDU}=t0dK?{}^o`A-ye!(m zPSpGX_0l)Jnx3m=m}O;UpDi|Y=4n%usA)%ijyX`@-I5Csns-IfeM-k@XOdB3dSZ_Y z=&d!rYtP>8FHDpI#7EK~XOrbAaF{zF#W;)Dp%*|@;1Z8~J27mOk4FC^7=C|;3-zdn zMYypf{ZQfwL}njiFYd({93~f5m4fkXnxX=mxK;M@(m3;mz0Hb==7w=<%YU~VbOPpT zSgH+`&>PB&CzqlrIKA^}JFLP-!afiX3Y56R(5YKgR>@S+2j8(jTPJQ|71~oe6YuDI zhdgh}ZnvrhT{Xi@!a=el8hvn6GO+3d&HP|zHTl{^TNmRAqIz^{{*_&X2DQryKmY~v zjGHbzMdO7w2*GBFiXILz1<0n|pzWFQNhiy9tHGWGa;RKj}8A& zQfN}@t7IM}vO8CI&_k^Z(2fHms-Aa7<5AM9pM)cga`r=u=L}Q!99u~kQ2m9d55(p8 zjh+_-{0V@(IWVEC_QR^C%5^muA$=D;4>!&cG5$D;Ryd^p`s<$9{>8h094biR^!kS0 z9-X+5?QjISoKq0vRvBoONVdPPRD8zADtA!0gvxJeiQBV~qU<^6W$L)WtgNC*~FB(xljSZ$f9?^h|{l=*oHP%=Mepn5&hlXt9i_a z#n0~R{yCtB9(X{A+{#6NLw6qC;54%-&ccCADLZ3DJUUGyYR)=F9Q;LYVwKjVS)xWo z63zaohLvE6?`Jp0wkOOH)ovypi86$ty8zqQ_aTTsFeE)sD1DAAhOL2}G)*lJsiJS= zjJs?w^9DA;7vS*6mzm(oxRL#g;v)(%~Ux8ux233yCZMxxL*c z{Q$&dE1l7k{h*4e*h|O%44zIJ_K;i>&lP7m>L!&S0~EagE4Q9^Lp{Ne6du*QdKn)A z@|F_-Gc8oPIxVJyr6#a>yIazAXkQ!vjkd@ibDEp>-wJ{wf5C;5l(O)2>e3^|4_Hsueb)g3t4!0rjxy6wS9GxyseU&X>NDgztwE0BGYp!oy-%ROX$8J*iG z9g|=;HLr(3-|* z=%q7^n!o`?<}3gn90zxIU5jrFEP_}h&se$a(}kXgLmz63ok04|>#)$8>BYEnt%K)J ztNFu2=mP&}na|5oVJ$BI?(@2}=oQhk25sf(kZd^j>z^wCtO(M5uam?`+kMHP%HX)j zR{VYX;|n16&iB5nFXTp7#ZK2_I@&-Pd8BNc&pBe!UdkV`PAjZb8NbJ2XHlu7ww$cu z-aI4_QRIm;IAx9t)ZUzQ@XXDnkf&%&y1Gf%%sAoT9Z#sPe?N<%kwE{Z{b7cPI7Z=T zrB+e0@*841Kx3$23E|HTO0^L!xRA|RBteasks}#+7l@>d0^&Jd zf7EAa&AZ}bAE$c^hN{<#Nxvp+hMq61Bcv10iTyFK^_8ME$PHhS;?eK`}-6N`Tmhu@aWsfW@sCa7XBpG zifRd+u+hF&A<)SI7?Pec^rix}ur}cx#mcX^f?{n6$mFovwuCKU0qA(I9OzLg!U14j_+w--%z=lW$Q zX-BPf;S0)gvI9v)`lyJOef8yC7Mr6>1=BpY1})f>iZC#}Pi=Aco?X`8+d1^@^8NAG zfH;_U2pf41UPX~S!Fl32FVpYq`2wUA%W^C8+b0wb{fe>1y1v&wmJuEQ{>Cyu+;s1n zQKl%xoK&cGe+O7UgBIhcEHE;zmYioTuFg4rli2Z&j=s0#nzjRQNHknGDaA(wRAJPp zpv8M7jFZ6ZP-e?sJgYf(W#mq(T?e)&MuszL&=$hu-)IX4%=NNV%l56dQm|j#1Lg>w z@_@wEqqD`yG>N{?{Z&*=LJ2zDM&rEO?B>6jlEXy0?8Hoa;+Di5aoeQ`ZJZFW-2s{n zT4b!fH+J2le@ib>+-mV!&}(C~0tS^XayZV-$l}EwG~PSGt$E-MaS2Zl?k%QX5FHd0 zJ~x?vSdeON0>Ks#`b2wtDZJ@&e8Fwgs|!k@90p4BUN$-qVIIguy)pg+C z*cufd&@92JhN2Xo;Q5uNY%k9KFz|b0vQJx`0i@GIazo%p!YId+(sD*9xDL<1w+jf# zu~D){>J816|7MhsK&J2i^+x>eb{Xns050TBO+{%2;Zjbp`M1`1+xM^sR;EX^)RWGo zjx9cnvp&K1oRukamb|_oK#58sy;B2mILn#8)J+jVUiX&a*{UeTb*e|Zb%oI1$801O|PC7NAIH?GDr_YxGetz<-*@h1%0y+fTK-Xk> z5k@zh_5d7?RKU|04ie*2e9JtzMK8xeBQTB`S>Ik3Uc?)#+0rJn{s&U?{h{Y#UNo#i zVg@9yc7{-)tw!|X-g7;Z-hUKMO4Es9&FB9;HmA)cmAC&)u_1fMtJ;XqHH*lJzc7z^ zm9;puak81UmK57{5sX!7O2|$t5!c*ja0z1uz-*D3=fAjkoL`9(luBgv*rNU%i1f%SST_$^h&>frE!Q(3Um;)xGu++$$tMWya~84z}SG zNX4S}W`RdOqM<{xl5ss^m;18qum1^l0WpR>+T-|O#F?-Z=Jn^?F*xkH6g4A=x>| z&rB&y4ZnHUFbmwOL@nsnn+vM{;3TvBfSZDNH?{BXODoB=>w- ztfZyymq&Xjbt8o60YN)}n9ZdQqBK%LVdFS5<%lx0z8on&;i7RpB;w;=Wpeb3PcG-? zwhP0eS^y`zo*iIVKl)rD2t{$Elx60<+o}dGDI&rFaLL{%uhFk7L0m`N`eJDSLTHIh zF%w?;?amyGChz>2SAmdhN8Zpkon~M!iaW4XCxiZMhK_E{4>LTW=6()p>9$RgmAo&%7e7!b>!7C=C?7+yd zi|k*KT8L|-#rQ|(w^CUxnjdcG0<*;jW)gu>AUeV~sSoV-J z#+e~tZjPl|11Xwd#p%~NEo#&q^p6+GO`e~F*+Qe5uJjjr;$fqw>w31VA=pq(UO1I% zY6-NG@WH3-^b0N}{gU99sv*)YA|o8C=s+Zzx8)Wwh1hVKa&Qxq(ZcMsi1qkg`dxug zycVi6#Q0tqZI+M_HMPk_GP?Ij4>CId@$pECr_97YT0q{?Z_BIn5j^S}5@mn1aXsB( z*gLfjbNgc~K)CDU6c-}|GTU=4Yr5F*#DLr@j@t?}gvUxZEiuOW*e~gq{x=5h_|fSj zqeN^NvT>+q{hO8&G69sqq@_sgSzSgSark-};7?8EQu)IY9=>@Q9*D$PtC5Dxll{DJ zSg91s_CV7ZgiT52xbxbQLepL^9%RATLsYmsB(wLiYql)Eqx~c@I*PXIx9BhSfI<^% zCTJ*)L@psy34W|>Ubfu6=cHHj^ebnZ>Cs%w`bvgx9cOtxyK!)LLvQzqpUK0?@!Ro$UkM!Lno~_P<`h9ti}-(936wuGeKyJp{H&UP z5;fBygb^B_c+sgt$EZOh!fxy6*B)i|4on^PJ77w_KF)jg3d?)#lP|1dV8j5>{!oyy z$MM4on3+-&^l1@~)F0JYA|Szp>0M0K#PNKCK;yonA*S)J}4X)PtSbL65&ZjybI&)UD}|DS7zW+imKso5F` zjRuZH`OrN&{*F)5+L>>ikkaJIiiR8CV6CRL=fJ`)Hr6xToHlb?1pD)eCGClZKVaHn zv><#6{a7P2-Jz1X;Z!Uh66W~vK1A>3))U&?nNnF`bci=q#;0Cjm&4ia<+q;m4CM=d zRZTy#`(5CUe-FnuMcftfBuy&>X4QOyXCW1vK1yH@?~T38a^UcvnFN8+#Ny!koF6Ia zqd3}qTft~v!M8c2|3$sR;H&FXhql?I=so`d$-xMg`tF7L_z;`<M^)3gJA=vO%|56uh*%F17yFyj3HC*MXEQxU#q;tW&KOkn} zhQw+10)4I`r$K@z5&2b$UcH6kTC{(~7Ds1_Vh*jH4yNx7#+>{$G^sxA~q2$Zz2M6iJ4v;w%#^Eja z1$qf0xp04MiLWkDf%={RnhX@g&!k1va2_lPTD?wxtO;$cv(Y49EtanohKt=Bn7wcZ~Fz2p; zygF;aS{HPqXWb>bvZPw{CN8)})e_$C?kX>{K-`3Xh#wfIIJ&z<5CLV}*LE;NLDfwV zYJ!MLLPGwOCSQ~w=7YAX{dVm1#Vn&H_Kh8NLoK1%=gz7w-M(vijUMtM)O%UcU4V~a z`r-}-cBpx*#lo-=uyKtXB6@8xwQ`Hp3@L4qIaCyn*8?p*=di)!2NI7GGr{&hm)kci z0FvksT~*mjR!O>BFV5cHB2SYgbh<~&r^&Pv^xsAe{SE2!4W}V?UEb^XIr}#i8~}5B zE&Zn9!c>mmR-&mUSKnh9DrsoHy~zimC9xQ?R|t6mZRY8GSVnM!fxH6G(qM#EABnR# zYRk^dFg;&V*>EQm@X>?B)^GGGWCS~uZhpqp%-T}D2kU`zIdZo68PRLp(|A-yGS#3g zuGxxjv>3!x*UTJ9yRhR39r&_tpco4J62%w4W%3CNH$R>oZFr&dlQ%4*k~v7@_Kxio z_N7bJh1-&V)F~aFC2gD8Muj{<;wF5&0fqqizJZ&?oTc?B)oP z14U|{L^zHp+tV|i9?)*PWIfl_y=;cN_NFo@Wh3z*p+BpLs+axvA8GnJY zn)SuOsUH>+67>DgXIR_p!iKnn$zhJK-*402Rfl&My~l4G?xqvh8I6qg3W_VcK+(NC zX#)OLz~u^L7XHshV~sEG-KX@s;A_QODjysJ!d-E>zG(AqzdYDFeA^`$z|@27)IlkW zD;ly1H~7OZ-{f0Gg?$I|9VYNpZm$o~?w#mhqO^$Q@zEs{4Ej zsu`|cUU3yMm@3?N-Vf|)qtls5-IP}KO2Z_%mif#VT^9GVJE2h^T%yXMDwj)>b2Roh zpdJhVr1nb97&&#zy3HwU^35xUdcGeXCiF<8I|g#zu`XG%Bpo~#l^}I^xOSi0lsQJl z+Id_bPSvF#(?MIlPJq|FoOdOz?O-1f6?86F0?MX<3-}fbOA>ZAHi8tiKeI~}9-tp_ z-CLRlk0EfBmqw^BJMwe$k-DM00XZyBW*DxlG_lWIX|1w2v#2=SGUg0bnFc}RGM%Sp2hF^c zOIovlUV&C91kHi=X9aDDo}k5T)T8Nq>Vj{fBV&MtcYn;HRw`=HxevKacj3ItKvdY&j0* zn7-O~0ic1$TAoG%a_*5o=-5E{JjS`#pp$3!2QJX2Pg-AYEcp;x+sybb?VKUxRZIg_ z{fXQ7-7f0Ev$#dtiWLRA(ZnTu&ChfNEeI+UDOPW}9%fIr#Hj(DJ??u%2n=LUbjAw9baVb>zpdIA?ryQJ0*um1dgtXu0g9Ltcb~8Th~=KL z29EyCpeY+l6xbDHrijd)#e&f51re@;u`<>PCDK98Qb3B;`_alxVCa3Yu;WLjIchN4 zAzSdg*$hr$+7n6X1FWx*e$WwE2N)> z7#4?{;8&KiGfxk)^cK~pW}Gh?Y!i3sK;3d1F99yCOmFZMYdRh$4n?gyj_u*QV%u3dY4AZS!A{=arza+ z`Gkh;yR=WIRBk4t#f&wh6FDvNiveo0q?&>Ku_Dx{G-|vYbHv`CxZ@}SZ3e+o9la1T zX`Z`YxcDG;7M3VNzuj8hGat+n!<`ctb^`1l$KWQzt}k}Oo_n7WfzA_vX!7adCu1K|pCRXU<%moKR)8z9Oeq3kR(DzNK$ zw}%@6!quhupAVc`+`OFr(>oYY3m4>@22{{MHKL-kgUYE&gSKkws))v?q;wrVb(Ocd z_a;bbdoGT%d0jJ}rZU+CwG-H*HVhw^_+hcY`#dTW#GYzhMl2gD&>=ag!|W0E*sKwJ zl$`kfOvn&e{n*6M&5Y731f+lWB9>^|A_fjN@sfG9_oT~VT=JCGmPM}KG>cou>`SaFlNg_-{}%g`hWHA?vSa7yfxYEN+pryW3C6N zvj$-E!b!%Jz^Da2gH#F7Vu4-cC3NL9*Q^Dshy|VvvlU1?iAuUKpTLXrQ*muoo|t4i zzN<`9k|P#@s-KZ4NwuOf-7Wp-w1_4f6`PC;`5z(e%n-z*%k_}4e^PFSk*}W8n%dTJ zbjuH@8E=-TqR_>xeTGuUCHQs!%)$0^xq<(n>hj`%P^rE6DuxT94GgsC0R1dKU&FUl zn&Cw^lUak|+s=pAo_d#Q4$kg4Nf&qs<5x5LvE^d^vJwK99vCSZAOGf*is)Er@9q}| z_tVzV+|Y#(6;Cl_W*iZv!mAWS84-h)uDiRfz}+=*>HjhQQ^rXx_|&7F;o}8X8S9Mv8?B zOG^>rAsn+R(3#5)c`wLgkkj^i&y{JyA>xBQ9aai@Eyw8=5q6pUPBFF=QHRWJ#m#aK_6Px-gp zf({B~4*kb{v(N^I12ewBBFkIEI1{ZU|0K7d>u;(@?sr-RTXGA1)29+oe{Tr_Eeav} zjq*d|%qI(CVHp|PYo|-|9e2#Tm#0=q0jWAyas>#D1TErh{JoZRT+i0*1Tf_|rL6cX zrhXCgaY?aU46EHT?5lDj%3=Mv+U8$84pW{En9)S0cJN014O)h;`xT1kD=kaSx(_(Jr$q6YP+Y z+U*<%itlNim?CdhF@XPtwbNjAijeY-AL@a&fQQJgv(*0^Q%ZQ~8!+}(=lbzS+ai~4 z94`dk;|~$2?t@fjab(IUE1t>l7y@|7sH$zUT%*vH85tstUozszbiet)k|a3I`q5>_ z{x#OvzA?DuIC%Jwck&UI1G;W!Rh+oW5YCtrQ;hzudi!t_4pG6-V0Nh&(Ev?>ZU0}a znm#E5&M$V^js${cWioE0PosWOY`w>CAqrGc zEBfmnee6-$g^6aYbW>^JJ&LLomrO-Q1D-K-tou z#$YF~)ynzd5%d^W`#)n8)HP6HV60~r1`28V9S%Hd%af#o4`GrR^f{cq)O6J!baG_h zROv8~MDsQ@Qs9NK+5wFL^6!)~1w2Do6(&E{FX3v8{}K>`aJ%U6U{lw@3$9bt33=Md z)mmeVS+5*bhTYf)u}u_Pj#N=!^4bCCo`0B$aHH*eLzf9el{d>TUC~2zG+>itI4|_| z^VGiVS>|g5l@rDGhV-#B9z3tV#KycM(pVmmuVT2-r@-k>e6jy%V_whIM6he0O+Ffn zso!2TwDPUoDEW5eu_!n@`)P>ovfY7(dix26(*hniPLBH<5oyl+ zaTE0B9Fe;}X6D#qmW#`=!U1O;a$@QgzkFOm83h3SLe{R`PmRwf5D}ibgqUN9!Um?E zE|2gifYXy)eM?Uxo-7|`y<^eW5qoEDyguhyHHC0pleWmxOVwa?6+>53m-3b;o5ufu zp!vl$+7bxJ7Aa1sA5r=jO9lv6@^RNUfd}ij|#3QN3JfSO5GnB&sBc)3BZ{Mi0J!0e$Dj5=25{B9<+^Jo9=pG zzm{O(?eN`%QKPWUJ7lA~5f`KzAb6AdNE$StaXhPEyF`&Y9XS{F8rcDMU$l2JObmhFsncFnVf;xvHMIGt?nPGkuOzGqpmwmF zU$CN*srA1D(WvbSUM=xH0>z42`hWj@+1k%+H1It;F7U~O*bUWf@12W0u89d(%ov6b z6&-5i+jbUSnv!NL1QDV@G&HEBBSvH>q=x?a;5o$ZcW^g$+?;;ROe>5|Ky1*qWFu4g zfZXi4+m#m6igP62JG`ph!zTQ<^G^Zh9$KnJo@AhE;K~8=p~?Di84`@NaanKC2?Fe% zuhHoHSsVDMqkhlumU8`+b+HJDEUkk*HD}8_5I_ll;>!9US`^!Iw)k9+yY-oORpE$* z^IZl-H-ga>Sss%?fRL=fKN1;Jv&_MoQw%9z?loi0oAg4OWtNjdHF(1Gm~ z4zW=Nez9%6EXhR$<`@aKQ6X)5x968_S*}#&r?xQ%9t+o%Y}DU_@{2j$F>;Y)Y~%WY zKk7-{--iYI!Ce%S;}pEvIv@YUghmxl-5gQ)7ZdJ^!uaBU@}-VpFc@B@ORkU;y!ZX3 zMih~#KpDePplfoZMBzWK}URnEtPiegMhsC)5zj{toPJ ziyeN7@N;X3jPGaULK(GoOP7hdTR-u&K$bS`EF^|Wlq|9TvBQ#4sQRf`^x0Uetm{8} z`CT6jp!E0YU{Ig|e*w)-78ldU&Q}b=g!AdTT~sKx;U9USIzBc8fL6Sew%zV&iE-w7 zyj|yR_ymm6L7>vnvi1Bu?Yc!2S^xkB96_4qN#PGBQw2Z&(o{n(BgunxTC0$V0Y^me zdWv^_X`SH$O@@SCU`+Xb&hnY!kBj~I_R4h`4B6wI)a58y@2Qp$0u4oYGNQQx4B?Yw zRjgm$!AFkd2T>Hs@H2R)PRyNEv|ukmar%aPL1v-`Al&JwmmmEg_F?3|gY1_LD8O<# zK@!uz70G!0D!zFeelm7>qU%eE(_DLLAVIK=?8-5S+AnJ%K?47Uu`I3J@o;O7t>=4Y z{B^X!sP0v_Dm6|SAdmaE!E3&;ENCY2Y?fjK3 zaB9yR?Kv*KAJaO)@^hJXfEt1o!Ea^nuwn!DwE$-C3 z&WnaSgCw~-eewVbpj}pA`O5H#UykIT9>+Mg7da`ID~}r&1=Q`8a?oUfX#3R#%9` z;;Lva&t8F6GG3k>pUgLz3FMg9=t^s6+z)aiGyWBb;WL-jC8Ok$?(1^r2>+4WvP~`r{a#at|rtPZiIRJow2@L zn=~7~L{w2G5{<1~83tcA5VbXG5WE0?(Cnn$VV%GHBza+*NEuN%SS}voW3?HqVkNx z)PNwJw0q&#u&e%BqORA#=We3Wt^lY1OK+)cqllAm>HGmw-vg7s2AYV-dmB3cdvy!a zDf&Hl;=~i{ZBE+^8#sB;C7u_)X_R~i!R&j*-N5hINm~o&(-U&0!S&P)PLk2*xowQP zay83%s(i`!p$OEjhc49(SN0|M6vN&gsadqG9Bu#(-`uM~B$7LkFVFYPf%Oj2i5$V1h=7`) z$3AvBGRWBGlpmHf?uKbjy>eVoD~CNz>X5VDx`vAky7eBVP!Ds3`!hJSF_Xo63%^sI z^UM$AW6zz1WqzvGHtUlfOng?c+V3PO5J#fX^EbIr*h1i&vBAc`d{lTS)2WA(WI~fR z|9zY>i>An^-RGhC6X#&x!KQyMjkt~NeR^1)wk~Fd2&6{Xd!8Mno?kQ)S9l|7Bboej zBZHC<rKGKR}8*& zM!h)PYj&o-5-XZ`Cr`zM>3H>Oq zTY!u$!ETR|%j^E(HQN|E%OGlC3^iTTHw6)8MFb)mtOS*g$O;VI=TNIjQDU8*^db%_ z2m20S$ulcG;5gAayAK@p_?!WQ^qLEQ=c83QYD!1^@P0yRT#k;kZ_E;k4Xmi^d{-)@KpYbPC3FOfuR*&?HYQGpYi z?9uK!imR(SyMVyKfZmS(?|3(Ub%yYh)2=E|`JKLc57b^{4nUQ<3S`>R$dd6?Qk2-S zqnC;w&rdp!hOTD?Qo2DI%BHLjkQ|KXhTLo)4ai!!CNfeYOt=JNVAyW+DPa$sTA7LxgMwTt@srG5jZq)J)8|Mv|i%S9|PL_vV zSW11boO0i*IE-J!@7vc2!3^%#Zl-8ZEem#(&R3lSF88}l&A&2_(w^MCS7Y}N3BA4Q zK_t?ZK82==X2dNseWr$_oL&kGRToh`T_33RrL~p!pNQ@4N84NSRu~^d*u@Q##0QImFMJsKThN~Wik7+8trwFh&bli@_fg^?3Q;83GTMx8#2)z}Sew;k4 z4SC~WOQbzD6VK#)A&dkD)Dyd2|E%%7r_yC}b9+z*NgM!-ZoU7+IsX@H;OdwoZ>MZD z!*J{4-O-wBo<-4RV|rfASaypg@H1B|L~>D;j$$4&7jE-iL{nOlqBq5+*-zF&YE)EOWYN~3xbja_G#d}GbYy*20^@S^q zXuti!S({&jh0xYc%2SZ>ITCoAK!~NjdOQ3fTv)@+v#Gks!sWXhmfQ$vTk1*&sWekB-8Rzp|1WI$youL*Gufdg&7fd)1|ABR6vgx@> z^<_6zeVdS3UrONTFi{(3dN{i7+SH|!#a@}vs#uQSyLycmEsTT*^`i!n&!;#e&%vR?E(gR+K$0&N*Ezf;7{VRN-Ywq)a?g zg2@si?RvdQJA=ssCHnBllkH|Lfq@(N4ZHE|v7zBr?re^pBPP?$O}}yYwu&VS0=N8A zuufv;nb|o%fB68?-pm-7suLw0Pe5F;uUgjgt_Jwwt{Zfw#*?J(BR$byiB|7EcAp*! z2+USmk9Mw{)P_?DaX5+7K6}26!%B-1bw9-&E}f8BvkFJ2<$&WT?4^b{Q{z34P#SY) zNI~O0$0=a|LK98t%a;gL>?rx{KGA8A*NaPoO15z|js6%!wQ%v_cIAwNM9uxpden*z z8ZwXA+V4cLcN8llGK}z;rFi0*8A`EB$M$uq8s<$V!-mpZm z$hNsL#HLWi&~z~jXn>|7AnWA^O+G!h7XE4qh{b>6bwM|vn`7Oem44%wM7vhBmmG~V z`j_R|XKK|I6TIwEwcNhN>-I!wa!Mf*5Bdlb>Ozy)_3 zTwB{O3?s#YkEtJZ>{pT^&59t1<6PUcNgw~7o2yO{^m;HcP{0#<3$7M__y<=;XU+c- z19va}mBp>fuW+q1r3W-$~5@%rCW9en5yk6kH8bY~DY z+x{QQIH1+Ru|{MwX@|@yq=Q!H_061Qf|1USpb;`Ve(^Pn5cVHf=^JU`-gd*Nlkip`Rp000GwL7OT`;SVNL1wa4mMhrLB6uVxoJHROT z2$=XIjfOVb$R6@MFzp7BCFyxBevk-0&ui+9C3aZY-Eg`du0C5e3H96l&_@4Va33W< zcqg^cU$nZ}Jr`%+fgwOO8l#KchrGR)-#-ZCs>!Dp;`PAVGQVN&BQe5N$5Gk!R6BUo zgS?rlSG{1rfMp!BS1CUqen+amD-r$j7z#fm;K5yhZhbCsY+>I<+iYctmJeB-5^&+& zp%Mhj8qD&sp|x2HHu&K2dRS}vijeIH;48=G8mu|cG~x1y(iyj*uK#^QXow4_oCP*` z0lB9pW#E2d{VlQeFc{yAuPv-}8;4 z#tQiTD>1D`4~oHrdz0%vK=pi3&`EP_Sgg#Q*o0!WOE6J9^)(%M5 zP(2H*?RMD>*51~%C8p$nG@_JUdH2VwYs&J1l6SrW=}3Z^S>Y6E-+YX#u{_qbkOD-* z&IB)JLP(P{x~h@z#bZ`HBwrdy8Ur#ftU`D`<~{b58&lbPPCJ|)0{L1uW%^gI zc)1=29e;aj?6$}ch5UzS?=w7PfJG4q13FAb4A%Hd9)?eVrci}M<1)Yv34Q1oiZnyW zKZUqQb{YF+@Ciq~1Y(cH@GbEXNo0c5V5JBZR0xXibdW1$&O*TH?S~zOpygUpTXJi3>WLPb zn46vu>Z1dZarzojCH3X@1T+a3YI*r=p65SuPh$-cLIp_jSV}j_rix|Z98GYURSHNXEz*wLi5u%sIkje-Z-*>(RjYZ5IJ}gF;kk%iNd$Ag$(``^7k)^jn zh{Ikh7HpO$UB97SE`Sa$(PENdkL3wViC$iu?gmB7L=c^FM}BAt-rk^xYRC&askFdM zhv%#7(;g?^_NCDNK{;>to18SSo-Cw9IUImde^aks6 zO@YzoxLah%{#hL%#hmEY1vycgC;gf4VRG2s`|7!z3&$xuQY;E24=ys+>F~h;bXjW8MG&0i^+-NorSU zbDV_|>&V8Se<L9sxr#t^xUZq^k*#w=|4(qnHH$> zb!Y&H0sEi3oa$^0P>30MJmqR@A|Z;Y;P|3D;Z!<+VwU%+EAmZh3fPpEEThC;P0}^$ zn)tLS{6M;X-kG=XOiE^kJLgk}a8)r5r)HU+vAU20VW?gPvw+pS9f0Cso<37CfYM>x z_g-sn7~6rLgvIaM$(%pOL;^1HnB;i&BVjzQ?wfW&+BQ)5C+=ycCOhcx`5YR351LRN zBm3^PmOF0>5}EIIFn}HFrjK1BH*x!hH2xOc+ZHN}YGUiy=~3g)Tn07KJry`gtV9O@ z00sy_n@maJ4<=IuKmXvM&I1@_#;;w0--OhLG5AN9nx~T+a`1p5z<^dc-zxt+IcPQw zjYqGU`}u`BOThSt&NT$5Mc?H@)Ap>+X7nFE`BdBtH8B-u*7Y{fnlA>N+d{EjIBMgSEw5n{z< zhH4Z9Ad!GT^q)gvtUka?Z~tM)uJR`SRFQM#EC3U^DaR~|+=8}zC?X@^^|{TY5M*$D zuDPijVy4t2ol|&tj2;@eBtb~wDsBnI=@WpL@IJ4EB?P|#O8SH`GZQ**a_-8%7F;B2 zI5X^09d&ZnR@7^ymMO$Ae_;};tGE<&$`ogr+&v=wvcyO;Yu=Q^_O%ow*X{jpXQ)*y zLa(*#MZ4g&yjb)=S^HS1{cldfQTNODI+NzW5>>SG@33|!^;h+|F4jbP!2V1u>mz(G z$EWg6p*47EoT4GrUfB_pMwO)4EX+qY`!InpHA!f@kw!Ob`%1dTe=K>_nSN9`U+L6J zoHFa%`s;7|boXXw@#J+sZV-Zzn23u}=*+lc=V^~G6Yn~+B3$FO6Hh=`J+SSiSW-L! zqD~F>YU^h!#;JZg3focTH++V1K{x>v{wA4D^jZYsPF%JUGP5W$qj=#0{zX|w6yZ|= zwUlka(EPzkpGA-6&|y80A9!UhDa8*{)!MSs4h$@ZUQ5T%3Di_}43OU}DbMyfyQr=C zZkG)*+$^H5y)f@Ls^dHum7z;S%Loh5gG>v1Au^R8W&J@$+JKgLf+`cAx(k}>;avx6 zpUff{9G%)P_~jG}XFOrvHKVbk&+Khh%iTx20NH4~7g|H`xoQWFMS&%)V%t#LBxog< zlurBlLagzV@X8b-rQU|D_f?cO z{wF6$ta&K~0htOKR$Q7faGp`QdDskLdKI!YD#$*Jl{z`1_%q-;>Il&mN`^JrnpzrJ zQeJ3$ zwh8?RdQZwaqdu6VI2F6_zX|uHSs+2Z)pb- zAC@&gd)gWDUR;dAxik@_G?_h|238pzk8~j2*FMR_K`Yym*+>VZNaT@t1)t_Lm;`?- zn<~U@EIFLH^O2vYOCGOp+>9o@r{_zp^Z!Xw8Ox~rHr42!&G3h}fCeuuAP-!aLPecx zK|h81eZz{{)`faNBGrlYwz;$?3k*{Z+(DgAtf5*5V*7*!7_UAzgI$aQzejbt?-ATV z7gMD_qeQ^pvbrJ?Rp9fEVwgjfgVB9YA~!~NH#U~cp>rnc?Gj!z3rtPH!t5*GC13 zFm76z-U-c7_a=IZ4Oy1w%zN-r{IRu?z?~pMS0?_ZE%bO`u?1><@lG!?>YJ5(MYb~M z_6}f(S*|RmE!QV}BRzBexXOLS@Ko|w2xTd+t5~bcz6$48X~GK($J&Xy$ohFDmD> z(?ZTczANx(uKd$Z1iwI4jr~J-Y-knHmfbi2`MP<7QNchdn}(4q?aeYrXHX|(khZrR zZ5b_kcfXbEst2JGs#<%uPGOR7P-ca9F_kAqjb?JTJkCpbu$%|mW7QWc4G5@M1pojA zlR=wqN#PGBQw2SLz46Z8m(FxS*fc7@`d2xRRJaw3r-jCbX`Ab@KX z_oBiCIj|DZN-z4BB1Bfxlh6^Dp>Dyx21VIzJs9$Tj(_`CPbiR3dVwWyiZ`T4y;1%R zxUrM9Rmp4t=+%ns2Uk^u?ZxaBYrO3Qs7L9~&^awX3K|+_g5zoM*GQ_>E)G^rZXRk4 ztv!;%rJ=AkNz^dJ>ydm^@wERkIBojyX*^v@oQ$AYZXcVEStRG{0NVZ(?6cS(tZ zb#WgM1c(F1)2<-{Hh=|*j(u4oGPDyH&^clCzs|C`>uN^p^%UjK8BcfgCbg+ybJIGz z^BkVd6^5UBn=whix|JDS!UX%c-7j1Rp&xmcxve$gKU$@%N7T9tp>sYfWlB*V{9==}x(l?7Hq!3#`T=w=pKIT#sa^3@k^~pi zG%GPwrk=iE2^2(-=#~R|f`DV+Uh8&ZF5Z^sOl?{THzCJ5LL06DWmL#-iKuYfMrOA&rdqFkoj zEw$GL`zT_6S`k71t2KLFjePsL$uzEhaN`#!SsGkAZ6uK7VFt;o`mnJ-!i;TSgsf1d zwTt&TBnj4^+h-d3Aa49;9^j8^ox}i0VqVTrlwq<@ekfuJkUhQhv`buFC$k0RCCglTp4hn3r>!t3bK{eV=A~AU-p|n>#1vBMmWf!9 z8_Gl|;vKO8*H(8*VwAaTBvUlX_)81q z`O>K|38d6c6*G}Db%Z)1*hz;WMSQ77_cRlTMHxJBdNiG$>p+6bt0^)_YiKNar4r-QM|3oY;jmLy`mT3*~G8U*mknXIHy!rZO4 z{ZEBT1Q(Gu_L;MywJGeiX3gkNNwuD6#r;clia4f;j?BVdsx3Top6@KbLw#E7!qM~K z6Ba=#2GdOu88t%Ot5Hf%6Ch)>vCgl(cI^Q7zr8O4N#!d!VWE?O%i5Jn zN~|Z9Ab!-z(f$f=?qj=q?Bd?>MiSwbu+R+(>GLE!em9D8gL0Q&Q^H@Z`?bERFn~^0 z&kYN;?Q=FUBv-z|zm_q+RtE{+{m_7plEkjL`Mbf+RdS)?Gjn#<)$UFs;Cf3n3_E6^ zm#_8i_fCUa!DgYP%8CIi5ISxiZG+gyf2+NysXPZB3JfhyyXrG&KTmYwBDpj!2b%#XO>^!X_e;*1$Ax0Yo$j7OEA9kwT!8e60}y&P2E37Z`@3n>~$U?Yu-d# zYO3YPC9JwNf%>!r$*E7JlA6>DVw!49z64ZapQj;p+6!2cLh0Rnupt0-JGm|zZMnEj zsuQ#7U{{eSMlzsQnd=q;<>99qRnXHR@9okKf~A@Ak_2w`a0dW-3J2dp^_t$* zB%cQhJe^@muvuxnP&A-3vqH)CD{9>nZWSI&iEk1Pm~`v|=g+5K&c;%@G-lD`iIraF zMf;Id@y9-V#S%r}-z1h+f!YTz`1cxrV0PeRV&Aq`PQ)ZtD;lZ`zenVoCj~9^b7D6_ zO`sN}cv{rbg;+S6iiUL_oKRP67%ToEX4-$4+W-Itph26EN#PGBQw2Z&);z_3gYrOB zQpf;VBDm96v3f7oTa@CPSinpt&Yn^~?XCS~o>!tY+=~uWH*@CM$)~&p=+jTy*S!N2PHwu~=KW|ORYm${|H0+nP3VbXcH_LyR zwEzPAsC6=UE=uQ%JC`I(8Cor&(ql~y(G@*$C``=)Ii<_^jE(8bCCl_iWPXr5)P!3- z6}~1^?X+X}hAr3IXwOA4M86Qy7n#+fj5?lrJn6&)7aO*RhhxpZ_3F7YjYYcAtd-5M;KV7@=00F$Y^~UGfs+D?^@cyJ(xd4OJjIN57Do2*;loa; zRnZkvn8qkBTFJ8uKdSB3q^KylbAwN3>z$^s}xA$Tog22n?kk8UmWWc#Co5-h9IS%)>AJ&D=Z zqNA4{=c@vsV?N9WU&Mg-I&6m~V;g)9v?ET&cvuflQ&sP$JL@M%-E?rot0mUCaPMhu;O}6ZKuNhzVhd1J zjSg)&_N)OU=a<-wYp1o#nSNPtTuen{;6ln)85o0ejr~Ng-a;NMz z$%FzK%Lhi4QxfG!RH5oc%~Rm>i9{C$L{QwfO*5digWw4rAI4tJ*+p8D z269K+N=+=)d$kU;Zu&7hoZsA*-??U&PO^&^1M&4AXsnK~UUDg*mO#6h3EDl4yT}p;OA|_T-8$7gji9sM%`d91&5O7DU`$WvEL?P`C)6>q3tWBd- zbp?9Hb-U-k;slt|%*V+)AH~{#WmCTh0RadfB50$)LJ8X4k-;Euc3G%t0h`|!mx}&q z<~Lz+y{Z}D?cBIik2M07XoUUP9*FGVObrYl|?AB@nxje`PwMN z_Gae@QKJ;D@p4|KF$s{mF={V#pKDG%Y{e^A3BmHuvoy*g{Q;JMbz;QU29OmK1tvq@ z&xX&LvVzI{glhxx|G%mwSDR+E&}H52l}gYZT~HHI-qAKF;}_@dK|5yKmaB;$ls^03 z45xpeJ=}3+vLj+(hwvTsQ@bPBhR=T}je01B#s=R7AIkPs>nRLsZwB_$w{GP0L1H&& zH@ct7+>81ztG=DdYhcfhA8EVr2ADBu!uS3_T{ZbYssc??fW8yMIsy=&OBejoN~fZb zuBEW}Jod_q|HGYCc^s>kjsY5Fa|VM*5?wSk_>S=Syzx>>hX&*Nq)foX@D4ASlmy@= z>Jp}sU{s1lbU~nGAOHXX+5w-hYJW@mY^PUk|6_Wx8NNbE^@$eUfPdxpDI8R^nw$DD zgz|85qa{J%nDb^FRDJerJ7o+)suP(QlseTT;=Kq!7q8G!y4E#lKdE0=QlbDuUYGO_ zy|lurd%e5Gm&^v+C6O-sz8fB8BV9LS6a%|TF@^|w&Df+mGHrjLl&{68QR9G_AK&SJ zA0+vdCjUcmmYlmY_tDQhL(GwDZ2$m5MPc&%{%l`x5=SMn$|oDt^>nN* zP(e}g|1(8Ma$mkb-_0GAkVx)suZ;|mEYW(NS{G( z>euO|6bV%G)CNi^VT1$LZqkWF2L_Pm5eNOwweILE=(Bw7Z*N*vD1h`14A>p|S84oi zHDtOAhBKCLn+O0u>-PxW2K(nFHP2jS;wCQ}%jyV4nxEA-N_}KCXlG3%JELa+Lzr-* z6-nY^F`L+y(9Qc$iT;!CD0!>tyJ)OU3K?nh*Sy$_`{sk7H)c|uKU@{zX=8$4cH*RY z5pOZ$xQ`%%BrNgS*Q+wDR12UB1hDXyxAibTepLW^wS)W&$x+UINp5f8;KCW4V1-8(uc% z26%d$Xr*h$hOB&#d+UIV*LZ3Aibz=*C`$Z*!4BN6snO>5@Z65F$S<(8#2VVdbj!fk zNtAZzsevV8@EpY4SD9|5L5}_B!Lbozx6qGew0qf-dmq`XuH2Kq#B2ackf`SyeR7gb zU}ufzs%IwQVHV^#EMSl`_y*%obD5Eb@;j;cOYOgv6)J&dDH%9J95g3ku4M?++Y3X} z_|`VFi-8aZ*%_D13WiQ<6tzfwR5&hbZt`7}02p>zm6cy=8?Zxg{VzD-vN}f?9-qRW zoggMfU}46ebt-;>w*}C7kKJ8RL%?Q~FE-Fkko_c;);B@pMIk*hFInv2@SVL*q4TW* zbiQG=wa7mrpU8=u7prU0p6LnLL;R}!ur_VK?EKl{d)neH9|#xG7zx)5i@x`L@F$OL z>OQbtIsaN7<8PH{knzs0TQ%iBrc6%GENFE=M!rRo$&VV@tTK+0{Fykk_juHM&dvrr zuc8^9P|NCpZ;vxEHKjO4ly%>DsU43dwi4AOQ8P&`-%aI6 zH`*?Q=TEu7W@jVXst>?Os<}IxSiJM822_Anq(fX(+y(Cn3U6It3uU(E^(C%qhRWsq zh8mL9$@WHy%lbY?&R~Np!oI0<*kb0_CNb;Xgl^h@aoB4kvc_~Q4?~e%V^k$@8GXUc z$K%WiG+`qcR2usPj>Cg>?#_K&3EM9;LNu2M`mRRjwnam%gXEf<@%OuQ@yb^jdDBXE zC`-ONmHx6Sg38?xn+faTf+pG)q{?i#b!^Z_(XuSzVOS%#fST&urpZk773anet}^J2 zBQ+^JodA6pr4_TLecGo4$HW$dTo)}YhG;U!I-Th__+Z&au3MZ`57tYOQj5mE}_z#q3C8a@9WY1x=< zgnmO&l}gsWFQL7bw*7f~XRocZ%9U)1s-~4lU+UfUjY?O-UmdK~!+b9{vhBlAg;D_W zB)d&$#=sjXckx7U4L3I>w7$JJ!p*w%EeF@BKDSMBJ{-*G-#1uRUZdZGV+K zq+#zJLNV_y;UUj9)G`^V5$}|;r!)7qNL@daO%`o$`z+MZ#+PDU(NGxW=LmqFq*@YQ zD+Yre55w;s-xS)t%E))Bcgl)$6jYAgkwlIA3=ag?d~_8O0O z^0H_C8KY?*C;4CsmyfPlw4_oiO~I(=OJZH};P^uk1j@QkH0H9Z_%{G6a7hzOKk<^l zr9knI1n)kW>ozbFHp^7ho<4^*>N*SWo=Eg%rVZ#M=Xme~0%^uIR@}M4c;ttBl^!Os z;!^W-z=fBuyfcmpFRaQvwK+29_St>1==`Gs95_YMc(r24J>;Z7eWsuJU$Ifg9XLcP zPt&Y#r?Oq1X;|enc{iv#f)Vf>=aNlhJSGe2ARwuldb6iXCqyIPg+?gpmhMMmDE`n< ztstpa4R-87KppXX?+8lk>os>|v>Wv2g#@+A0`u8qfYA;?6ODcsNstH}VOtWCbA#D| z^%{=dRH_!Ol5N)N*L1ST)^WE%%J>!Im{G(zu)C_+iTnLP$!1&l3tTpy(H-!Qmd^U> zYuw(g4DNJSBZV(P>|a*Qe=byK)ot}R2|r+X&RP2k<%+r#x>PNp?0tooKNT3KDQ5zMvc~}Nf*0iR(VSc5kXsKAo4qVXr8%ZGzT zx)C&P;liHQ7zp)lW$fXI38l_Rrk6Y3-ZI16E1^|NKEfKv@xCAOc_MBcidXD~&DEpE zGalwevIm;v5Uik_&E@!S1V%4^Yq!h(Xt@&Iow6B#(fXzjXiYQ!laWujl(%7JEpmFD zfVuoW@9z-{IVmx-G1z^3h(VjvN#PGB zQv?6og0d}wdL{GZ!nJYeV^>w;JJ*UhUfK>wqx_18>B$ZM7qnn2%Q zN1|Loth7|}cDEtNOSD3u^I&v01KOdXwizT-2yco{FDk~m{d;5vy%iDY&g}MEGh@F# z&o|2}qQuQuLCPsemjW+WXd`e8Vw2*bYxsp9UK%5&5Z+bh+U>2dR$L!HqjJ@3rnCA6 zeH29Vz=S1g9~}mkiJcO6ek&k%4jy27Ihr-Kj#%;SCrlnR zr>G&f{KV-eNT4Nhw2dqdZEy~XE}#>Lw!9pn+_FHEvP8ikI4NP$6Q&y;sE?=v$(tTI z-VVIB;jjPd`g_4dp3R5tF|uI?-iN@G-QFb%Rq&cxk4C3(j}OapctFD>OmO zi41!$hW#Mb@&|#+(3l);BFC9AvO!mD0-VOnF*&^@btY1I%uInZ*%j6Mq;kIdE*FC_ z;%T|&FC$UyeRvyg4&=wJm8zNBIthx5K}Ne4OV{E&=IwDp?a^2f!Df^b?j!#Y2tm=3 z$b3N}MYYFoy{8XGsVRo_IXFfVls=}8tSAL}T(rlgR=eYvA8<`bXg~8wjp2xS)EZ~| zf%@Xx=@G`~>fGC&lH2`|o>Tc|5a-{?nm_q6nV$1oAjcrq0(+Ur^^DA(!G9edo3>=o zA$}3}aT7UsjkGyIL+PrVnN6nuvDsGj$w)9Ehi|M!_Y_?g&igjzh1_Jc)SfSshM|At znP__R4I1zIHF+=slEVGb;*?IkP{trg206crx7_}u3HV7tHVL8~#2}+|si+N3+W8Bi z-EKmC1zV~eC4wDVc7d7J_NjbK#D7w>{#Pr$Q99dAguwafiDgd5QbNfT2FrYB`pJ9B z#HFwAGyl&LwPC;#jT#^o$Q$%oKe~LEUkWf!C{Wbt_tQ5T{eB&cBbbiRlUko0F5!${CY43*n^p9RPh(-ZYT8sH3g!`;)Ce|F!I3k9`&s%ZbjiC6 zQ!W6By;7Bd!_WbLT$=|;1pa`=c!#tK@V)8$J={a#eqdh zNY@S#1Hjvr&EdS76Rrt#_E9e`VG9V>N1|2XtXm3d$Q&->VeW`$Zzdi@qgT=&7&(q3 zQ|18S7saL4m{H>w`!2kdR^g)~SN#PGBQv$z0Lt<)x0=XFbO}ae*hs$~q6F(`hh-_e;c*T~i zbal{k$38_5wb)b)aBhh9@D%ZZ8C?@XZQqst6z7dd;9m5ZnCinP?FU9UfW(V@n+5=S z3p|D5BS#0+Ak=Amu%ZYptKctkyIVM6sbhCLZSt&jyTkO1FQW(+rv8Fe#)YYS;+Ftw zCqSs2fvbKMHTE7Tt90H1&ZHR(&f(WdZ&e_LnvCWJ&CTXJuC;RWKX;%L$&B8w$%b*I zQ^l2;aA8PudwGPS>?$ppN!%QDL2*+IOu(_JCn;tA{4G&3`o&i+lNsy`kd=azydjd| zeM6hIv0@Y_P3Ay}ot}{fAkc=k?$=7Mw&;C3sQ_yz3?1aXUUXh|l~nIx{V0q4p7WPK zv4bnQrnW0y(lVk(^#!|U?&GF+|4V>$?n11iKz1a9tNH?rjoDrj+T&BuPC#f65+-VV zOup`P;}@fdjW{j-Clyh&tI!z5l&05ni#$kATu+n4RpgDx!5*xKql<_`2T`i>muVG* zfzA~e1eC(WJQsT91aQeE;JuFEYU6P92?oI}xc#-99`g3>#9NF9xf0i73^wC!>D^8? zr7%ZXraP7H@E1NMy-(vTm{Dg-uP{93C|8U7FXrJcocFfzF#+x-B%0wA72_xp2k_}KYZ4| zLGE{?&Xdu30G#dMof#_Zk!Us4|6O=NTOS}+)?(o+J3^0`2S?%(AG$;oQ`Lb1?Yk3O7tLmx^Vg7ycy8e%cm=WF<)qydegcHg~cPbgh|O3E-Li zgcGS>wM#ROmkko~gKQzYH!c(mH^0T#&Yc#ib1Rn+UdV?tzk#}a$A$G5BEfLs{yX z@ubRXsYe*+hNso%b#2JjFaz>k-I7JwalBcnIa`ITs4X!_f;60BwE$=(Y009Z#JN{* zjW`8OQT+Cyk15C^Yv+@v?c8LMq8n0Y39uqe7T{UN%2;;~ZK`ES&dVuUMTT%`_^c`` zV6=<`RdC+q?#A&-UpQVXR@L@|s>7Hn&#owghrK5zApR7&4NL;SLz(GF%IXyznVj6*&rxWd|wDpvyBEy*pa2o-@dBAxPFtD3?zxfOxi6Uanex!9qlO%JgD+ zg-L=|&8QRnE=|Fapj2GwZ1+0Z{BzM&I!23)g|vN!9;gLJSo1gMzTB0N6`| zEg8o=?80m<0|ukbs+~RsjuW`npJpi$L_g8By@%AUK>z>-K0%rmN#PGBQw0${0EIw$ zzkjQu@YZYd_dJQ;QXt`PHII0oXkz4bgc)+4LX2s8UmzSRKVBw+7S3y@#{|-K!%Tqu zMOnHGfWaH&AO5xz{V%JNo$y#I2MPr)y<%mQNGpIMG^v@t5>Q5RMKiAl1C!qf7lJV) zB?mmj!Dfewt;u(vG}zi&nzA{frn5MEnFqPOA*qwjZ!XHY)5@g5946)DI5-WTNtKpx z7uEHq1=_e(XlRK*Ql1_soUHpo#Cx*PEw9wmPBueu7hM~HYXJJjNSjr@4Dh>KD`RZc z?(v>WO{?q^PFhr$FxY=`NKpbq-%QvzKFl(~<4%)r7#6|kVAb7{K2E@LL}dZ+^Lj^vt*_ffXm-o1}I zkS-#dBuUFi!%`?seDR%A*rK=R`nMrf;E;(wQ|VS(@oD$3s7&<{6i0Hz=JuG+^xbqA z!idA**`3UB{Q?;m(FVYUmNkbQoBlZh{^bJjNi*a{pUtlvKC4kRd$FUC;PxlE>t?(i zI;ID+5;f6tL>J$&{2Ku*r<=?V1w^{EHk@cd3sCdt;?Qnpn3jw>vTTlNcj+!$MU%!& zj>-bC^rPKG6YT8Me!P6qnGo;Fq-j6yp!o#v40m#5WY1?G2Qil;)y+vU1=;?}Q9WHW z!qdV6*Uh}3&OnWoWQ=6r1naVQjA;-m*E0gaCrR|pJ*)OEYz#c=TF&`L3tSo3R&|}| z=#Yh6-|TF48F&F!-yk(G4{U^+w$;%;kD0U23lXr;(td{?&MdvKp3)Q@{U1y!5eq{{ zXNCxYIDRtz-h`AnA9y(j9Yf|^_mz`MhvQmbe9L(DjEq>Wi4S;nQj6(5SjBk}c@4&z z$iy7AT$fUDUEB3cKX%|zJ_X@}^0|J~hFA~CHNDE~yF>9h2&1e<4q5gx87WoaV5rog zB%#3Q)kLczroZylaw-WP%~7iJsEz%?IR#6n4|!;3GQOYJSnu-CG>w2OtVlp1tye)I zbpwg+)OQcgP#ZZ?f;mMnSS|J3n~R5DiLil@DIVC>NG9pS18_s^k1eRcO30X$WX*T- zC}`1m#w3Fb^DpiJ^m?lYn+}z>)F@ujDG_)F@(=*iHeIN6XDsb5a}#e9Vq@n}&dqkq zznX84MKio()Hfvg8YFpV8fDi%#+_cp)(~!$BBVoH+9Hrro z;7hq(1APJBA&mW+DFH%tp?lcKNi9BpNZ1yiN;0Qj`ZLAb z&ULn(l0QKVY|L$#A>LnG^(x7q_@jbM(24?_#3JhB#Q-27Dhk12Bg@l8vws5OT2J4% zJ$y&`b!N}p-aeF8g`%s&4(yjId^}D>? z1*Ug*L;2=OlSW`S01OkR@(NEuIk^Ag&DwiqZzsG7hSrcMwm8z+uq|f*c{b*1P@k>$ z5j1=)rp>m~<529!ep1a^=*YaXq_ASdW|!M+HG{tLR-b{B@eP`-oDCE_Jm@*iLn967rYk`75z>#*E76{5fC?KTU zJ_aG%Ohqcry(XH;+-uQfv!ixV0*7!Y-5J(I1dR2;@sP{ux|8oXfG z11*Yr5#aTWHHcxD+n&%s8K5Q6{FZZahpnv0rnsk8{K!)9Q6c=Xwp1dnz$k?eYHKQ} zaPzWPwgy+3Ix1xlC4eT}H4z20b(!gh+;Rj>=KqGIL+P-}C#cdH>=0P`hu{X)mYGel zD^XhI476)yuM(D<h104lXnn=TDXYKcs@y)K4l4ZDOgWl%{&aV zC6NxuDbWW@QWRILz@n0;UfpcU=KkUnhR|6r6L(&Xmbg~uVaE#MEmN!=u=^z?5#cz^ z(tt`F%u|U#K(Q+x)w|+iviDCH$XT>IA@O)=JVwirRS{uvg$tfZBgN~TQEuVHyI39m zQsR5S<1vK*zESo16@l&`!h46z--PC%O9Ng-Njg-TeE5X&Qb#Hue|S zLZ^4Ks**TUK}|}cg@m{O00d4!nm0+|4<=Is|5kSYfv%F|mW+ahEB2X?IKE?}g~TzQ zA?bfy1%eu7zQ99^;R6hTG*Kmo>je|p*v`X-w9xs2dq5jrvQFp*7_QldH9;tSx>Qjl ze?E})?=qbFqt{7Y3cdr7ji#&A%d;kGvufXUF+QnR$FdzLd3&uW-l+pFg}AI`#Apwy zAwr*FDaP8LN45QE6vWl-l}ukB-+akknT7+zzRnYwK{$^dZs{~AQn$=tQt6linQ(du zFUMaI;Ii=hxIgZ9&lMcfu@}3V-3ebk!N|tiWO%K;j!xK70O4x`Loq{2NVp`Q<`uz# z{AT{-zCSzjT&XvT2E6xU3rM;z8ZM+z-Rb5}v8s?+l9xeC&&_>HlK2qZ-Zi_AmFtz@ zx&)J(lo8>u5e$p$xDA{Gw23|+=^lNXR9#Lyxg&Rk5YqhE?j!sZ86VW!8w}Bg2(bGnJ23%K)`jQ)1yhl z_BcWks2wo~wnMFYT2eKzhSDy!_cgmqm4<@i8p-noURRxk;FD?@*8D|NCqnrQP^kId zQYW45wlx2~g5o?+_Ww-n(71P^4<@_~Gv&tqZOsTE^0g=$$k~mJ#3t!&v7maRs?BlY z_`)-Ldgzz64h_Pbpq~3IM?<`G#^e+5)*uMI9AEJ{@fl=bnzHUd$!Fk#A15RE`Y4{N zbs`p|5CV^ni}zY4L=s|8Wn4yN@w7G8VSsb-j1qd+Am8v{-s&~oJ0ZJZ`-^Os395HftpxLTplYC>P#Ib=gf>5N08vyz9_g-d zG<+=Is=JP20huOUt3{bBw+f?AP$+I;slil9nn9&(MvxqVWcaz|cQiHpbr3<75-`wB*`EEN8 z0S4OShlt_02Zhz@c8l=YFoIv({>A|ZR;0K{M*(%l-MYl>xXUBjbth+#h=SoIll1fZ z1U1*BsS~mzh6#b$kIAp89jFJHKRwv#V}CI0umAuAK0%sTN#PGBQv?4O02QPetQ^Fd z5K5yICC|=%8_y~m_IDUow+|ad@y!AkvP)v#t0tbVe_%IjHL_z3nJykb;Vb>cb71 zIQevFR%3F5W-uYz zdfBxl%781IA+jyO_)KB8QoJ`xM{X6HwVrI;lBs%VXLd3~Lx_nZm%sgkVToYJ)Bo$=1>2&=>WPn@zHy zI9n<%qI_VTAST%G>R&lw;sLV4Ym_gpWX+eQGCI_3D=DSJa*1B_3BNw*$eLkCtAe@C2_q;LC}*UldaHQVK}2cV7T5WWs{|n(-CcIpB>pN8f((5$t=Qz zOd5(74~eu`kX|zYWJCX)Fq z`4JIP?F{<)!?B*vR_5$palFHb7aK3^<(`{%qwcP9zI1rB`eb;LAo*nlbBRgCyP%bH z&dqsweyBUJ1oZckLpmlJdkU0`0Z46WqqROfSXDaz?k*}Kq0hNOG+V)qR|al7*Xf~Y zn6KY@`iZlTUTCwEM(nW6f@=Ni^kIU}i+bmt(#o2&IoG0QkL=|`8nAxlEKpB9a`YP4 zx*8ei^N0$)Fh%{1fueyg!-SYHn>Gzlz*y!dcT83FSYsb8GY3(pK@)4Yjr$%9bVp(Y z`@=td%CK%uhj~JS&Q6JHy69mD)nwh#CyTGSzcnOYp=MDifT=V|NJ|`cS;_EL8Ih0I zk~&jv>Gg*N&zG^PEKiF~ejM1^m_wkqbY$;*`3>n4CYeQeW2%r|KuMFt?dsHzT{=<@ z-@G~q<2g@GAbHLfiF}ZJS@J2yzPHW24cRRN-%UFt1n=O}uqzMuSA1WlB z$M&v}KD6Rk(01qIg*em21C(JE4Q@c_Orj7;-x~_wd(`SX-aus;CU?^%KjzuECozJ2 zRhN&0z!2ODUkCEAd*X;rPmly|+9(D}R1m|g)yp2EO3`YRdp@gF!ueMezaYtI(3?yP zsASoaDVOSGiPUL$Q221sx@#y9JCSd>nqg)3%g|@*RboaiLG@{X1*nO8UPWE>(u@n! z2(;g%S+xK{Cc@q|%-IiQJ<~MWs-q?Xyl~J!wM6S4bzcEU1cde+NY(t+Y|JaY6wPaY zQC^qo!t#hm&Z$yh%ZFlYcV}m708^~a3%uly;KIwg@l`A-R))_@)W+>D-}CX8b&)!P z<(=Xx^$)Bl%6u)t-o4W3kpPI!#4J>SD+fUU00bmKnt4g#4<=Is|J#5p$@r<5)2+(F ze8ye;(MHJ2KbgQUAWfO6V0v6hMR*uzLa+rXWB@Do@_tkk$Om6Qd%TP80I3sjpmwpEszxQZ9kl%qlHDUosLiiXS7GyMiGJA?Wf(W`!k_7& zLX8p$qJhM@L^DVJ2cUOLnSz4$CBHDedbQ5EB;+6i^-ah403B2zd>d4*b0wTNwEzU~ zzWvFfL$15voZz6ps0Nn7V~!g?B-PHSu08cYsSw?;Jv*P}X)(WfzeC{N7qRnsEOaDj z#XQu1LK3(|v|_cufN?ve_a!P%ssJ48`hn2tjY;Qw`w0KtuQL^<{}+Rspq`QQ-GaL4 zb`LV6gI7{6o>=81Mr4AbLfk@qF@W=x^&vuhAWa1@Uly>Rg}(x@Jo|U=jNZ0^hGs3&CQsW7qx+h5Hsg zR-LhKNNxDk5E%LhmHZ2Sw}9p?<*~3eoV4QRGBx!f_+q9N2Wk~MsyxD%)}t8eDPd$S zrvzf{*m=Wl;`C%OYD-?Kcj_K49#3(VewBZJdqa=JOfKAeN;_k_c2&5xv4D`${6x@0 zeon%WnKsNzH{^30YP;F{b3GfI2k;;=Y^-PCBdq<^t^sIF+3hHiMgqr6r0=x@sfdsS!?YHBW1Ba@O|fQaV)ZC==X!i05x!5 z^^TFiKVB~p``8?Hovg#D&UPN(g3P2brsmw1xL0er0w(^}k1a=PR{ zvdyqvgUC0o1oD!V0`@Xvn>^5kV&j=uKJ>NIatwqrqsKc|WFbNi#YwmSZt-3(Kf1oe zydNk8BS-u^I9B=B-@jH-r#|a3OdfhQdh&;mzeo>T1`;rx6U{1X_Bn zh@zG)&G8BdWe%N|?t4e}rPasA^8({kr>{*fk!zDfyTPq4GpMy54LA0K`j%yo$+;w0+53_o+@^;ywA>qxU4BBnoOLC-6_bmg1OONP95T&pGa>Z}_j@E_ws4l( zojtx%VMx#}zKMS*jiSY_H>go^T&7J@oy)u#=GF5@8sONGtx2Ci)(G>(b70csmEBoK zc>uGpZ_iD2`rX-YzBGCf1q-UecNhQc|8ICPQ@i+Twwp#gQs~fxpVag6Jm`s%<$aIX z^AzhNUF#7~`_4?-W0+bt{xKwad}j7o#%&}a8oi8sYLB#mGCbbX zBbQuziENgh>uKfv#oUZ(QE{Z~ZOKk)18evl$HFB6XcvP%`fO7-Q336lP0wO^ilzB2 zgCN`1#h3RC{}Nk|T<7Ht&M?yS_I0y9;2hCSD2pN^$FUv5a(YFVO934-qA^cCt7jak zPKyr7l)$@+DT#XQJZ0v!vgP_AUq-yFcBKyfIPg##SA$WSSYMvhQykcmqia2FYbxRO zgl8Ul(}U&qeENE6F>pSEM+FfN)h$tlzVqS`v2$hDdhS|u^vPQt6s+A~0u)Lle@n|| z&6`Z$i=bm``yC$fa)SeqhE#96^|PVHO>g8n10dS0@}1%Q*aF;+GV1nj8>MM1<|vx* ziJnW>mKGl-OFpvmxM^Os;oX9g3o(_Ni`ggFe3g=YXANk@*ShWVd=4Ba_#|&%%MMtd zo?%s64Prc4H*g&1v(b`)1)GB?MG~8geZFV%MW=1Ie((C0EShGhm}IZmVndE7ivE0s zqUT}}Wg?*#4ot;FQIr44p`VG@$}`j5-Dr9nx<1H6Rc(+k3`Bnn9%MKm-1dnRCax<3 zMG?X7@4o;1K7s}iaXBx|-U-;b!yt#gxHac$w=lWMEUUw%4U*fMWCQ-i-@x^dThyJD zfHVB~5Y>vQ+co&BvFGT^P6qINL-te=1-wXD1&H9v)yo|lbbRJ@W^T{m$L_4HwyoY| zn^(I6P1EBHE;ryzQ%sdxvhd?p-Bjy1{_OH5`xy*%H`0I5r9Mv0rfFPXxreJ(@=|b8 zV?EYqs(skiDn8nu;d;w7-R%6H& zaN$lzu@Fi7Uok1hN|aCw-6ZGjiFc8Bb0xcrq6Q^oSr{!@&%tPZ87v>Q9H)mL&f4@< zy?%e6t^Bs^0A8Ty3q~OZ7pth?gO$ZY#@M-;zL1t^o#hoaK3r-Oi%Yj zn8pJ_+YPI`v-m3dtx^;Ix*u6mGcs`t1U09m$vKE!poDQ97fEDxZ&}*x(&KO#ETP<6 zbgk7N+>@v|J~=*`l8fehaGJ9mm*fBvgh1cMTlYUkY_G}hTW!XEeDj@jqnoytd>0#d z&K5}Hm4G;pFEh)o%SR?}5%LLRX4c0o*0&1Dd^{+ZDUQ#i@!f*KWb?$D?e7;ERFbq=Cx+Nf+AgAX$lUd9@-^X!OTHS~cV7 zsEBU-Z+QH|<$P1N`cBmQvi0sYGq}hV+s-JTrg{G4@Si+i@Nq)G&Hdw_p0Ya@WAt&& z#u)Z$I@05t=&C>PTXLd|>wa&W>E)m+mebV2qGF^I3JX+{(u8b8WOXKb|(*9wHmzoqd!6*RYyNyZu$6qb;|G7eK9s z=v#$TkW2%krP=a1+giTsRr}$K0Nd_*U)^Y?I;TO~ML7%If0y599eW~V=nuj`Pd2ne zTf4_wI_Y?3rn<#E3a~bQ$C`xX?v_j(qeNxK3?7%SbdnW8t2(F9z9bYU=$TBz$7wDz zu`tLcBg`l|fX1GAbeDs8?uy0O4#)&QeH=ybnaZhpk=)lX);q7VPl|L3khy-2MAgpqvIS; z+4qNqA*@*oN#jXDbhAh2T+I$2X}iI=1iyUc{ZIe(kKEKeh?}(XcFO1sA0PH9HiDtt zS#k@Au4DiDs$zplAcIgSzSCKVxs1m0NZ%ZZQzaA+J!yKx-0tZ1KUE}taMw$Bs!8oZ z)Je=Z_X0kTpme*kF}*fYw{P;8&CP~DsO19fN18rC;|<`mViR^m9%sWJ3A6HgsL+h6 zS?i(wigldN8`NqqzXkntKLOS^?y}BXtlJN>{X(G2NQ7LRVzH2wB{!lNV#iiHzz;wV zrb5|21^C|XISj}FNWDQqB=w0%DwOrfn3~%@N-os3$Afj$67-U?EgG|z(&e$#*1*i5 zEKC5`iuao|P4C%@%Is0?rnMy*dA;pWx7qtRJX4x^F+9p08hFbsogg4M+#$AVQMj^Y zj*h9|zuDd6v2xyy<=-{cDmlRC()BmZ9zBU_|7k-h@Bv5tUrUI05Usx`WEoseZALDJ zO|EAp>k(1!S$Q{RtsdyGvcGc$uZrBh_VfrPPB6MltuNdH=QOhQuL(Mj=CxA`$4lfv z+G1oQ*+=4qjn8cTGIaFn&MAcr1Ii~5vi|ijb2kMOFLrke6qdj_?cCV`im$!UMJ;zt zu*f1;Rn7t%X{)k?buqOsZb?1!BwSvq$ z#N9u|*%#c=|Kdw;Qub|s0y@5i2$j*Op~*B~BC&twV4BlEumX3u9=AOPA-C zO0?hKmMao$(!5#8nfY8Zs9bZe2E@GV2d~To7jQz1Vnj=QDd+j)Mh9-t>a_|lyC~ck zTw4i<57htE$_9mP8WqrmG_#4oe{hE+er;{Hi~UAa#+vHHJ~(8=5ItnIe8{Pp*^{9p zNXeI&UO}z|1<3u&ySWQX2{*r*LRgHDI(qIt^MUq?ITkKwlcg%m)AavbE9lPIyVP(_ z%WHG0({OU+#+#&NSmF=d!3i_Hfj6AUX7zaREp)s=T$e-EO!0fKgc|Lp(30%X<#Zjr z1waYwI<0R-zD_b{&_#a(glPp8`q4bRBV!6)3mKmaVL1fjqu!{PldlX6TZB%pB> z##BTVW>}3J_rLq3{1^IQk3<&TscM zT5G#n8gb5mdv%V~4&mM6UC_bnkdSBHi1?$a1y~`i0c_D(nJ5N-h6MoiJ<|T^b9J2) zmn!IDOYlf}XEYJOgyMHzT)Nomwu|9C)x{5sn@uU1AsjHDcHHzJ*jCnu zi$MKI(Et-fk*xorcO@Bm(=6 ztPxVXS4z38`#4H$s|7|^6d`=yMhtP}Y7c2!O=1c61f`3_l(8t%OjY`b^?UXR?A9IC z@IYLr){xR(33m~W#MuoX*<~#zvI66#v>9#0jUIM|Y09hgV|+xs2B+nSp~F%HOwu3){J zm~WS}eN>$R;^zL*1&LYHte!8Q!h$wnxfv-&-)cMBs$z#;WJ6jQ41%X%7p{&{siLu- zq$Ul&Wu?sf;66e3l;jN$YsXc;yS5=!^*D8wK2>;VMde%Y`DC&XiRjI6seKaa|lL=qJ8YpzK;R=sBgKt(X$M{4G6 z10)-Ryi{AlpOj~hy`{Ux+J#3q>*5jcv1fYgn-9+vN`{tZ8fRa1Wn}Cz)AHL9=qEUv z!)A~&g>*&6S}4TZMehi=`kJdE(({fxgt2B_K?N9?4M5ggAn6kZ&I!`MxQbq1Kz2UgIQXjx?3c7GNrgLBqag3V?6w+-)N&B10lr7>Eewt9^m?N_f;lMO$4D|$V~1& zo>l^1NV&!Yq1?W+_l7~T_SGz}*}PU?{-}Y=qz^hc{ekQpV;r5g{FOKq-I50!K5Ou9 z)q6q14KM$c**u#jg^t*WVnGPm2``ZYwP~O@YPu{NNv*5!M6f;a22mI#XPxyTvP| zHF<$3-wGF7*{=SWSr?*gs`);6WHL0bB>HZ*T{>zSIf>=v(%N6%oW*&0Psc?jw7$r9B_IF*1D!#dA#8v4 z`*m_F=7ERE8)+rni^%2UD2Zb78YRz$$pNTlB4syASy||*IGob7UZ20LHIB&O%`U6b z=p`%u6@Kp+o}N5pAetCT(mSxG+FVBY9v1!;mA;d#NWvN!;ej7_af%?A)rOuk!h;-? zZchw*L(hRcfKbwZ#(;yn)CJ>o6PZAPI-(Nx5Xp|{@9$~qEbl0bcs*oNtZ>D^$WqK7 zE7Ak8-G*MU^?3t*e2@&V`+Cm>R%^E09{{i>IY)E(cQ0`%GSzE)zcRC zAfIv7n?^!?^<**xS^iUFoLyEAwUk3RGOXg7mC-6R@aB`YRtHAVyoWMxupF~c9ftjA z=?{5BO>hvOXBgg)w;<$dN-1}eek7eR;jIVGPH=Ou$528NJ^q#t)0Q8VOkTczcxE);030dKfgyS`_PX82Jb$EkMIe71f3jsXso*5wSjmxIzC@mW`sdrwV`Q z{CyI5o_f`Xut4z`Z4&Ma6phvxN?Q{#_s>bAbZcS*y583z+MBVf-(CsQIf$ZC-zDPn z_!NfIz!fS;%Hur%_TL~DqtF&ojUqqOq7d4up_7xRDQv!q8fWz%Tz^C-Cnq=yHn%^#5>hJs z+hRSeuTbZHQXbK4cW!XMG0Eg6UAg+tIX3})+Lf$!>UVgJ0j zS*t)DpX!f$nx$|_BR(@`=VR=?$?2KPGhRav^2Z|3lM>E$bR=^L=4w9pVX=KH;^@}~ zO;%fMAc=-*{S1eta2T^^w6qqCCgFxU1AzLRWp0$re!5eY{|K`JK`LOQg|y4E+HTsGt{es#%@)64N@1?^DQK#8*834wuk|r z|L0$SNjsfIAlqFx4h`I9uG!Z(g(NbPEe`rSHZf^qvm5SUCjvHDtP7xr)oHzHdO%Z) zn9t89ibexGlc!=Bn~`4>&L*Do{P(+9wtq@he`sk*+=!0*xSA+4TX2@ZgjjYDEppu) zd`k?L;Z>>vhgM%aYpVu5mocb}C3JcoXb0h~PT5wZOi(UjdyDj`EY8+D0gJS|az`*b zCrA1~Dh_smZ%$;x$Mvddn;J=6BsdbxXUdK3+y0xaDDwM{Yxd>Os#d6RfJpZhurUhszC!Zo)fe9JyyaHc6QtzYR2#rU5f=2Q>Pz?N;#N{;BaKOJJ<00;n99wF*1Q`A~8u|GoF)wH4c#Wf5+7;SO80Guk&&i;VVQDf-VC z#XQHd&QuyVN;P|){0~|ccumZ)PL|hTPjCD546~!*H*#ggAM#&=KwIxiDzwjKkN{4d z76=M^)8~jvGw@ySRX9w6%Z9Yk`Bti&QO|XgqYb<);z2Ja`4KM(0L5oWox(aN{P$vf`2-QzA7z?NhBS8xJIBpk&y#7L9-v7;r3S zTbj>_DRk!~s;cPBFVu}ke;hYY%m3m(Dzb1VL8xT}SCm~1-fu3L5-&>R_7^-oi(jp+ z8)2cll-hakW*mgg`4>#&8HHs;n-*G=t0T&Mg`+lX%DpVHvW2cLoWn_CN@#Tu*OOOp z9>uZjSyua z+=jrq**g&c{%+2000EjL7HSq z;SVNL1wa4N7lup3-d4hOzLk^i*E~RY1(zFfPM~n+kPAf^l4{_-6;5rq;l=v)=8Sfj z(|~ET$?f;5?Y1YHP^Dn)5=PB$JTvhq3}wsYaJSQM;xM34T)opMwpQw&S9(asr5v8t z%|{eN5`mbNuPdOzlOxSfL33fH$4ZcK4N|H9tpsFj?Z76fwK?)~u2 z{n#}p`&Rjq3S>l%b>d5Cb1|TXuuuE)V(hrBz!K8>JP7Rz^i{$U*Dd+QA@z7fR7e;= z{tzn13H4a0s}gF%;)G$aMtMoCn@f%ij?7FMM zgC_+oMnIWrVsT=>SKM`3zBuCnuXB2-%sf6m^8F>!qRS>PA93z{cOO1Bw&|u(?@1F= zz~^&#MZJGkv{C#i9$>hqv_Kh-P8CkhM)kUqXNt#6UnHPNCETSqZ^??@OvxJiV+-%r z$8Y~+Hae?C>90q7{4_&Oc|hDc(-)Jbom|G0d(TW3)}C!(S?@SO9hep8P^4vmD}9GM zbgryJ%jt4ucSNB$&+J^JjQl1@WQqL?%=gJ5N_&}_r?nw)hD=7GRX2ghXvUYL=?^s9 zIIc83yHytl_jb#!#8fmN2+lc_e5v_DE#W>N!v6`a=$q>8c09MXYZ$2(T~JEym$-j`pzq=5XZMf>feFmWt|8J|i#)F8d2>-!^L^AF|Q9IsjD zvvjge5po=ZAi62c)z`HSLPZ1L)V=p6B8$$IzISyfpzDUvF%9!`qD&cNG=PJ}V`+~P znd=6Hw991q$Pp4(u_8 zB^LWMH9Rqb;Va6=Cn@Dec3DAM zkrUlp;`p?+g?3}J`W-Z?3>{^Oa-AQ*kff4+qI1X0W~Z@<8}D|#MhHE^uL2CLY%8ij z(*%+>_;@e-fy)?hq+F-{gt!^eG(X&gg?`xSeoCgKP*pwjD=V@?YLC1DU1`G2oWi>1 zzh2q#U`%j`t4=SofQ~t#nel zc^2YLYqgr*T5r%x_-pBQ^b`Np$~J91NH^tSM}V}!?siZ;6_O0kC}h8aJ?yPBg#_E2 zw3Xk7^RUp4ay0`Mqft5x^!<^o+^(GTjW76su{|*2crm-h+%Dag*=u*E?2*rAn!gO$ zBF%TSb%gcrctVBEN*a`{+(0O1?AR0mZsy<*uDNCp76*Kngp0<=LgEcZ|Le^?;FU7YzIH1qwgRF zaM)llb2qaF*bsS@Z0cauuaqN~FPdGN775PbME#k?_yXD-@KKe5Ju>(;w5WhT%2E1z)vua=ZYbdduvB3cU4j-&%acj1zd5@c zv$tyHo1scbyhYxW`fr09gdyV}z~#LwdCs&ya@y`61JMMs*}Z)VHJAoF;gv6MbjU{I zFvD4MzH+IFuCgla-dVyeIXJ2sOP>!>{885WwbozEn>>Lgs++qpz7V;h#}V4cPk#_| zd~qq-_ufNvsVe$(<6bWF1FrBwP!g0)4BMbsCG>z?&3eDEbl+iU$l3ldlSn-Q!u1;R z4xIArf(cMd2$0^m6v=Xgu|#ot{*eFx1^Gdmg-PKLCQ}3d+k$=_ZXlrLIuvq>qL0^{ z=GLY%JvE+e^Sr6Z$tU9N;Tj2V_PwLVEx3XmtB64_XP#OZ&U*5sg=zmkf=Ih{)mRhV zq~mOLUP5VsK9Rc=I?Tm}v9(+wovL#@+1se4DZjc%~e|=UGGpZ!B;XeDd zzx{p=%si$1D`!}ASlBFC;bJqM{VETuKMWoErM6KA)qcv0O>d9?yiB-gP4HQ_nQ_9z z4suufpsgRZ@K$1tdafHVvPMEnZut$P%WhFMqlfQ7tdNY9)8;pMV+#Usjp$Ag1-0|| z_*e4UBfdFt!6ISS&P7m`&oBxk2nV%SAoW@}B9Junoupu_iU5V@prDq%WStue0x9$2h#ySIAU8lIK_HZ3K~&C=|u$J`;jiHntYy zMFm(CpM@+E^YD|{Cj^ZS zJM0(XJu9}h}3=TonPrw$;JfmBK8es)$9GVGu~ zPp^@R)1sO_!b_*)o-oS2Bh#0^R`K7bjnz zaD${w>^Lc8U;;L82&t5;sJq>;Q=8bIkCXfN$-5SEf|%f)?ES zHelNuW~0zgK`9r8esC?HKk*k+;zKQA@%ugY!2s!P&PmuuY*b7z!IQVo<|r3Qhf5wp zbRpmC74RE#fBA6UM;tJ`4+tIP3<>wQl#o{V{Lg|%zea^{LyH){JZ$VhU<3WF8*nQ? zc*FH*MsnayqfX&+R-ki~v0vg;YD1K={jODT2j?M#{($?ExSk}(;S04#RFuB(#z4(l zlHKqBU{~F|-#$W8+vn{JQ^|1qUl&-Q;KYH^1Vx!H)!{(;ihHUZ!{%S*XXyKty`Y)o z@(1OK+4khYp!`p;q*XnG9q5)O*oPr9>_VFRFR#7YPzRaHTx8sxSJcXSDw7iN7%J|& zKU^wGCw?`%2$Oppt%>FD+Sycc!;*#98QuPiO!DITX^nqF`b!jMrXMAU`65b8PUIrR zt&@}tmd=27mfFV$dZLwGh-h;w{+{LU-kXklutC(*fKPs5Di{}}nC*1fd7``N&gA8~ITzh#eBp>^Y@+AQ zyBgL&KLbnt`Tc3FtdONPP6CKSm}*_$c-G)dXY(+#1H7kQ^*wsLG2IZ2Dun~Z%3&+| zjK%f%9KES>k>!uNj=xJ3jd=lc=`3V6!bGMubuBcUBx#B~7e{H?U$w%o^Ne`nJ$?~E zCcN;_wyN(P22n3v)~!HP5F90ADsfG7sGeFX{%aj}0iL2)muVg4XG&b3W}z9BMq{Gq zI*M-u9T&perY(b}P&LN%Kv)h_l~xcIb7i!UIJ~RddX!mWc?fMjgcNMMXxlYf>BKqU zmxK8Pyc!;G4Q0)|O2Z;$ru@-A42LF|QXZMUrc8nLq1tCfZYhQcy-)q;bL7d=?8ie`Xh-Km zgcQEbZV;F5b@cL2Xu&=qyjb|c+$R23lBa=8<+ngrpr$@_Eu;-{U_5+SnB~kyxx`_D z@C)7)WXu+3wGY-#Y|d0`_Rm>?9#mrZ?LF3(CODH?Anqq$Uy3|{tB&KfwaVn^5Fvht`t8SCiQhS$NpSaH9EUT~ex3+zUzIu2+Y!EqJwwCnJ zUqF3Eh~+`ndsnz%ek%LZA~7XwHu{#4%4!o zUP>z(Oj4|7n3@{x2-xSS@34Qr`+_Zy5?X8WVmL?i&;+1aor1&_&`4iN?3G;Aie|7Y ztj@nKwIp)?s64f?Sh3Pbu6>H(QZGw%a777x>qOM`K5-~?K~r*!7)lTS{;L>a?S+g; zpYoCQDU_>(S>lW~f!-gMkh;+U#rD4-39y~MKA@*^duN4o2gMm@Ch5qGpLJ?k_Pv`? zDR?=5V3`bji=r^x&@tVUU%Ny#fS@E|&wD)RJ7!e+MCXJqmbln*JeXK;^|}T>zBfs0 zXE=C4D^YXWPP9N-rUg0iEk1hHJQg8j`$h^5o70W6XKXX~4-8VAgvFU=O&bfp$uGcrcyvBl~E2QsXF z6vm~bcXtzclg81zYyiKQRt;#r+70kxEb~PG9U!nmM9t1rEAdfT_|QF{+w7}A6GkSG zPOIU%34e~Ms?U`DoU}A-vl`!n47wV%V6w@O6`Mc!u)9fhX!^#QraxlX>cE*J(JlNk z#EB^J)TSo+U+U1MUl&%G0LC|>Oxk4&#@G~+2@$&~>!|w+y&B+hj7ID5{B1>TDFP8z z4o;$%4H_ug(nJd8s{#FpBk=kqXF3y_0r9Q$Ur8gRwMxTX_At-YH8waB_1t>;EU6D* z3s8nj(yQ}!TDhk=N~#<+H-696YyC)ajRXqQr_*Ab#Q;zaGm0pb%um!;IbT6WXNva7 zVwI}Ib~X$u$Ir;6CD!imm0i83_6>ApXRj^^P{+wHGUFinem+T=X$P;EsYn?&_Nyh!r3wyJTOc{ zrFwu+P((nBGy#C7-~K>`nz-%3Q5d~!?|Yjh%(xiPz8aBm7j(`JZ0kcx<+G^(00Bq= zp2li_Q|j5E`2$-7ZsWM^gR4zuu-=F@EUZlh0S(N*5aE9cU~2n#;h)bY3?>h#4mqdg zA{;OwF%|e~vOwlc6ydf)tbu1QxH@#QTv8Vm^_*yB!dI3FBsC_OaL+M7=u)li&+|Jj z3lD8NATj@Xil+>d@=6jad?P^7G}YAQoL@WZ=@cA26c{v}V|OS@vxH+i*|BYF$F^;Nk?)&+xxV| zivEK>;>wAB{`D=)O3P8?fwRzVk#M9#EooWpc6(27 zX@kkQ`1>R^%bz~>-;>ma9rFXy7_46!crGQIN#RRGMMHC&PXSKJ>E~rpJ~uL?EYCP5 zrMdF}DUu60adXG4RyTgk>yowt8CP6mcB)~oVbs)h>kH_$4H&lK;}B9&XO^>4LK(I6 z5>W=$%CjDyFJ4CPq%iTWKm36rC!}=W=1mKytBJT@|AKDnGS5KH-bBdz5ub<$S%`#31nRmcfP2Ta$6?XV^yHN2te)G7l<_;f~$3%<7;UAJ{ znZy_|mHWUqAjD@=Ij`(~ky}C5JkI<=oP4|b6gdN5k|&=;Vix2rY-bYdO7Qj_lyX@%KN_M+&1a8dUigGcsL|-^Oxt*NPns*-)AJ%H zDrN$czXS%Ao0UXNW)zn3sco}VWmfeEsB9espkIe2>u+txvC={_`fl5X>C6zmHeh

Os5Cjb>TU>Z=CPt%b*1y-UDsw6GjAzdPM>krWZ*q z(4}?jR-p$?n(5iP@G5EbAb+%zf^RtdQ@p%Pq{=`*d*!f}1Ze?~wy+9~WD4Q%#B0c| z<^1q~UIDGj2tHjP5>!CXDGyUDe2%L9^FUYsWqp9Pc(i3U=IVpeBSAGwN}Su)ka?w- z(ENcB6+z{-WOG4T*^+3DavEUT3u}GAsu;?Q?(d(dDJ`s2AQnOq zogvCApSq>ZGi^d`J!C{nc^m+4lso4Zklwy`ZdcrS8M|Y>sHxx49?Y-K?tqq<#f&0) zjDX|j@iRgF$R_|s;G}0^XaIaZr`NL-s9Ga!-ElsKkUM)@SSF_#C4JYNRS^Pf_c2{j z6%COi&ME@QH3yTL%mt<=Jw*+^AjP4xlPlQe&~T*EPC0R7+ff_3h9*oRG?8bql{gI+_AIVk@dNKzJq^`v%nSCII$(D4RQGnepbbk) zz`v^ryX9?CV+WwTa0^Xd8R+|6!X*kK?_pZ^P%1|SYqyx7X2CbS3PvbV(cIhCP`^4J zH+pUe%SGb*#m;sRZX&f!P>AuLJH|ol>J_xM8p0NnEKpJFmPieTUw?oA%s#d7Io7ls zM{2M`4lZ0)Hx!RiUVF<<^T4qc*mGr3+4vt*yXz~+GX|@<~048 zk0Zh#pWh87Il(>eGr=IsTArP`EVvc zwN%+*o59vaZ&>SJVC5Zkm)tokM^U+|N7eul2aiq0Y5qPL42RmK3_75n&~CDS9+QOu zYQq-|8FiK4!4v8A(YK~?SPj7XPnC@gl4lyw) z;%4P`^qYy*G@JJ2)b~`bZ;G$prj-+G{~fDOVjof^!hpx00o(7F0?f1Z2LkQmm+ue! z=@lvU~xuIhEV1kn!&L*1aG$e(al( zxPp(Zq1Tn~&Tr=)KV0kAbVZ$y8x0gyv=TP_1UMYgDB@1NPhQi#i8OmOhYz+4eR~M8 zGAULNMlcByuTXi*oJ@!Fvthp@kG9s?9vK{2E)-5v+AxP1c&Rdb@q0(OKy^ao0|2$l zFF}6g*WlUSgi5f?=EJ;n`Q)T18jggM4k7DB6zR$@%7ueF`7;O_ZAa|I@}A%ttv^6& z$_bz7OYZ?r3vI)3zKVd$@@3Df5uvM|=O911j<#C+XG0I2E+^PXX)qN++q-5{prv#- zR26*IA5TIh25Ob9$$+LIRhgz1qu>h)Ncly(Ogr{zA+er$g@0Jvw@a?Pi^HV_N=`&A zV`i4ZH!$zaK>5Az1kEOj;c1^%o>lumG98rR+fhx2AhB)C}{4vzqYAp3S^iAC)^d%r6}_7F5Big-waRn-*V55WSnf zN8I?<8)+z|a0c(Q-jBt?=@Pf1B>Fzn3TfPem64Lu%djIH=;q1_?DK1OQJYbG-;B4v z`6}o17DrpTN{f=Opj1+O^)9ZeqvXJLA7BvROV(ka0E35T>UCTPVHKcR#hIuEbUl z(0&tXV;?oy+m%>|$6YYb;vB_95@<1?OC{<924}tSPdP;+l(u^yRl6Qg`TElL2$blF zna+kSFDkXoYE9I}iAj+itSf>qeI(_wu!tJ77DhAy9uW^mgRmsGsx=cINv)t0SB`@V zx*@crA$fNjo=iB zbw6?>8NjVB*!eHlc6~ngQD!NXIgNqe1HqUp789Mf1a5K6XX7akiq6+>4kf1qkcFhD zsd4X+Y6(%Ld~OBr>w07;J~mYZx4nN%MMWRjY=np-+ru6^0HsM=K4XfJfn;=h72xAoOD zR4=UX!xL>eJf$*Bhl@J>UfGj)LM4z!o;)!mY77_6-qxb9W>nzKBQJ(>g&1Ky)EUD} z8mF30#6$k3$oI88Cyl2$1UAEDLVdiojoK-92&*HPlDl{Lu6edD;@>U{#3t3+LcBWu0W=TJhmr6RRdQ-3o{KB*m;BP!k<7s%-bx_v!0rhS`D~2l`qp zb$Ck8stz+IeM0#hNnpe6+yP9&amhoHofP)^zt_#_x@yeu+tqNkxX1*I)sC!O$yK0d z=X~V76&P5k^$nY$&R%K#S^zI#4wQw@h$a`;`Etb~&*+!9nNkkLO0!B*-*m9&q$JfP z1D&-O$k3ycWl{~!zNVPvNaSTN2;l^Dy`!J~sj_kgTKgurM2@s;!>ocnsEl<$-%lfqrH@)*0QVR*xPqYp93S3uq zK`!+>h4DoVEq2TUt~dY>i&q$UyFq$eN_3Ivbt5c~Dd*jHL(oF6^&Xw#%BS=n)nSVOcVD86Ye|&H!{Ihv1vDQW zXQxWEiL4n>vG>sl?V9wpG$sI0K0iKc1qZwkc|inc z9BLf+_~`f4SIG9BQwWOhho9*O{pahhH>F?a*!s1S|D5AlHfglnHg<*euRr*Y+nn=M zm*!u`5&1c5=497%N~)z2Sh>JVUT}aAU15>tYi$tikx;C^!>KgFI1PYjLqB8jcgz$< z+)fp9d-Uo1MSW}{BB{F1_6RI1H}~!?uwR?+p-}$bE{R{y-~$gOsab_yylc&y$OCuI zo^U<_o{GE?#Sx=6gS&EXJI1CW+3omEb9(X;+{O+taOq4H_`Z0mt zZOO_UN$6|3%KfF7e5hWeNZ5`~a+B(=-ZeZO3&x*MBbk6CtrH@l>qtX{e?4iiX|G&z zPAus##E$#(;lqO^K)MdnsM1^**sincz>(YA$oKG@W>2QMy(uyaF8ZA_vBXb`MxN#U zE|62=%Cvo>2&KBvPxsd;-CoE923Mm* zVJ;FT>4O{W8D@nt-k4F8uxC@VZf3f5HVakkO5OilmpYZ-ci_@>^Cqd-kQxRD;e=>L ziEEIUuY8_870@%$^6|8b_|c^6FJW5PKbAys`7+J9CE}W5f1bLBy?c&;aI-1Z9wMt9 z!FH;bbBu*`&{%3^pJzF5@UB6g2~WltyKy#O*W9ev4x>isLBb1_Ej`}C!7HxjRwUVt zvjDkcUFbKtsxYocUx@thZMWOsz#_u)i(at2GNy-{n)eHkx^DVJzv*-U;S*7`Q2aLq zAYD+QgdMp1QD3mr^inPx9qEG*#(NXU%m>J=XdO14;OT}82!kR_n}d>KP>rm84j_e~ ziWU9$mA8h^@GBVv+Q-doHbzG^JWd%MT?qTl^&-v9k7wwi>Ipa1U3VbCYV-*xu1|Up zwE?&b_+0a{_<1^M{{S~H9XWV5E1>rNi=^vq#w4e{y6GQ-^<_URV1lj?G=)@QhfO}bSbhXHjw zc5;~v#7^#%{;ZQzD=#t@VcHP%BL%`)@>u#Nyw++(T}c}9g73q;8|R=r?4>#5U{G3q zQ7<)G%c$jdn@MIheCff-D}lNPjwd_(T>+b|!D_-F3Q0UVKyVnV)_=TCTeXe22lcgn zO=-)Hm31+Hr*;q=ItiZ6l0L}}aWfq6k;~rXqJc&yuduMpNjc0+%omvm^`S9`?Lb|1 zPEI;D6_(AYCf%N6H!YWtG8aLQ2V_I~64pF=T5>g8`1xOkTgh+fOZL2G?R_OT2 z9U{P}7Ecgru}{y_`|GW)AN@z+3G#kDf)6l<`nOEbxv_%8*-Qwe!yvLwN?9GDdEqhY znn$k**pF*NGOpw9SIo``sSbCi8j|MrkkLfI97Z@%utlo_xISo2{*8b)4O)ymsmtis zl^x&T-W70po}WF}jVW^ya3d`f$NjfOIe*O(js5b;1CA82>qZq(h@|bHcuP zyEdV&R;6uVIQb(ZNLKj4r{oF91~$TEUBm!9WiSE3=zkq4O9d%0qQD z3XW@yEM%=k%GQM+VZjp2tg}&U39_55-j2KEbo?1~|2-egD>)fWiCUYE zAeQ{Y2n^OOBI1#TMzSQ5u;x{tU6pdeAN1Ju$9eDu=up5mQpHIi>@q6MdPH$j`ldW! z-wOe75CFPrh0^qCHd}2Qb?>lHqeJ3VAWxahi#Wl#X;Ao$7Fek@9y=Slk))Yo6RRGl z+z8Z*tnOYndnskexE^%xcZPQjmxL|0d&B^lN@lfNFnAxf(3wO%;=gYr8u?5xV2tKTS>UJ*D-8e_5 z16ojex(GsAA1_GxO{x4A-^`DWq9>TW0D69IWgVWH*X02WhqRJCoB4RgIN$(#s%$+lKyuQeH45J#hUeN;=klFro+w! z)=;OH?tXjNZB+i;vOCKVJ+Zb|t|1ge)&{4N+%fTeRmBna9*_=87L=hio6|-IbMv(Y zBu+1@2z$LM=iS%|^J5(f0e9vdUUO5u z0qs^CCKc-Ca~Om_r*UYs+jG~&9+nbn_2zDZ2rtxJF^FQ4rN;LmyGXt&o*-;rX3L1U zMlS;Zju0SWx#;JA*0>1H&Omy+=Yc6rxIcvh^dgxv0FPR+tEK1h@HDrZaWI1;G;tL??*{uL8U$%@g6H2%$Q~YqP-p`mC_l zdS!n2eZy4YJ73Pf-O+%AfH^W2KGFuTkzG--8$sYAsG&>}0tR5x&IlQe{SL(9>04{S z!p5Pu!SIVA7v?ZbLEe6Jagz6+aPllTpgt3p1v(?{rZ0wO5(eufP&7gCbfIab$-2nl zLIpoAE@lez+mrmr!nuKX_dKGdGN8%FM#O2|pz4?oxtjHx@9s3fL#U{hg+f5f$ytmw z!da2s6itjswI_{u7Cik|%n4aI_Q=W?H&+O8@Ev9;xZHK`53*Ik8 zZ(vp9t#j}}`nm92g!Jb#3?D72YCfkdV1^IR1Aa@;?}>>QrLTtGmJ1@L;2o_Tap`qE zplTdC?zg$e8ntvq1Hs@13X{T3k@Yj>Ok#zaz$eV@x?KqD0p@Eo+7Ly&p?-kl#;tl` z5F*kZQs>9MS9 z#wwmBBQ{0?`jkiLlk*`HG1WQT>G$LfuGd1u6|d`dC|#g>Juq*75c%=>K99J}UCcE; zKSjDc3OvEe*FZkkGxx6Otj59KDbXjsyRdKC7I>;7axtcNmE$e}u7v=i{X8;TvCc1< zz3-7ny8mu{Pa~kYG@|@P!fWBFN4-nriGmt26<+UPmO#A`#PJ!hE2Q&SKWw1ferJH$ zRf8oUQz+cBpWIqRo5Pj`J$B@Yp@o@EJd$-FVr+mXAwHoNdV~dyXOAp(5{P{fhh~AY z1A9}bORK3dxo=M}QnITfXK$b{|V`+s+xM_95Ic z@4lIoh+ooRLTtf%KygtSB~=z+#QPk{q|0?K)E3g0M@i_LYw|e!;4qp$)qM4F1WGE(D zJZf%&%tXts0c=(5PHC^LadUx?vF_DFZBQ9#bx!3nX6-y1Iqdqtu4D~yj?!KRztfYX zf4s5eN^NDP;PI?qIEnbYUulqy(cJdTEX>48?yWRO{V#XakF^L7N;m9VIE-q#B$Jln zC^FZQV!;x6mvTK<$#C!16%5F7u(;2er_%2X;nxf`SV^@qFvkR}?l;RyaNwY#=LOSU z3tt(^BW87?za{btSbJA!xOSJ4&R2hsxlseU@9eI}+re2>yr(e9^xTTu!om8`x)M36 z55=WZ2ZSOAi=+tBlZb6ijb7k^4@W+uH%K3`LP?C|bta#LvHKP2@gklOb8cv1P_bg| zII=`Tr@c`dbDmP8!~PAd3flc`50rTjx_NvtXLZeTd{^@ywyyG_hEH?SHu$!#1umS+h6|<@%_0lc`R}~p^s_y9-99-2 zuXMIjKR%yGoE=;~XnrI{bVwNDAeSVq6+fLq9j*8cCk~7cU9G)9Uea_>d1PrL-4H#4 zeG_v@ks4(zu-md|El=zBZ6-o6sixIJoH^5>a6>-XGA)9Mi=u8dfFOz!j2qdx!8+XO z4r%u^nDfB~e~cFP<_KeDu_Q=)jH!`1euI3GqZZBZl<2fH8YMY_=MEr2gaJ>4mir95 z;cWVeO;!qMsoN^PUwhX1;ryUU@`4Ye?!ls3oSoBp=cDz#6BnuD<%-18vPenA%SeJk z!5*=WJAffDnD4>Z?@n%e2 z^;Iq$6g6=Re2R6^Ho6Jxi+spi%`vBb46EpHi?`ek0W=@N-s*yPk}y338r{s3Gn$xx zG+va33StNl+3T7{eSCv%8z)L{TNLTdqM_9AS0KHwncVPWLdVfD#H$j=dwaKR_W@zpz>Y~??uawJfk#H4` zH%-Lr{3`mt2VQ!77%}^omA^5Xr01N6I&#hHbvAw$Urpz`%<(VG9Mv^|$s@wC}qPmrTPw(-f7O0TLFHn>Kc`VBvmH{h}@h~;vWtL8^5FqjeQ@n^%2 z4PInQd#PABX;YJXFEujDxx|8RwC6k!>lE&$_&%oia}C@6_3122}rr3C@r@<8e> zTP8sTw+M7Nt^+`%HCY}veE@~vtm05Tmm#YxA~4>@a+lvypN=FW8?17)h^;9)9MUwb znWyZDVJT32`l9)MIl+1!yBqY^hRPeK4-1FPW=bv?qaYt0 zsQt_nz^26ux+yy+iPR6Yh|lWU&^2o9Ut~ZO_F^8E$l0aJ!__y^jfY8Yz{+&?m_e%T zl=rBkI78urm)c25;*& z_nHx&of3ZmAGa=nWxSqR0Hwm^^f&S=7#1%_OyuC&f`=x?5UJ?*zfQNpW#ricjfWPb(yTLa|p5viJ8sT)ZSbizy+1A(yK6;@us$Zec8RL~X8w zj8%XhIC0A1-R*l3D^tZIBzb^#2aDqj2UT%neI0Juj-Qr85<1XO^t$wv=>O*NMSFdu>LYi3S#gxAa! zoesI5qJ6dmXVO23439KcYoX7Pm)UWb6|9@10`PQRrB=yh;2;&2L&^_86R^O3iv#r* z03yi<)&bJmG5kvvvMMb!o7(kP_8K$6CIb+|cVsMQ7juW#L17!rexpyzFTls4wapf< z{?JubPRpX^j#8=Rs12Ny%`4zPU?F7ZHhPILBymvB*cch5#3a@ErhcxL=rr8$VfUOY zW@i11@OidC3GHD66qRZuWC@iCH~yJVblw7BJ=Z&;StEh<4xE$6$CbxFov34d#`d!4 z+Xye`Z4eOeBvK4JD_uFQt0Tj2q6V|bM1#FBDuI;+a0!omiJD$!scM%KD#TtN%C$;v zntxPgE_HXHX28!&;lKNvFhI>_*$f|0>Y5AGoqK|%6Q7(gw{OjC{SY%mgnXY|rm$L> zu#%8|g$o`OOqth8B0fo}lTY4M{BbJER6gWoh}@6hKvWJgRl}=xZz(Wiy&LS-8GZ&d zT$Z8cENwEvvdqS;J}%U4z)M`@NvQ!j!WoI4>Cic=Ig<@X{2s#ZL_vmM<*IoquV>9? zODSA+D#PH@HXz*ltEV2ooJI?uQ^KG}w*{wnn>C9-%+thAH-_BE8R<|d-F8bewm;CT zOk$$eE(dr{#-w8FuG+o2iY5i!L~Qm>7e1utmquydok5h zqR99Ae6R&HvAwK?{4r`jhN$=+?C6#0vDx_udqeaiTM3pKIh*=ye}TtT~_%1vQbC5Q>A5 zuADy*4Id5-cPonu6oh+Uj?yTa7TzO*dG`-};2%Fg(jV~QpoMa@?AYnAKu)CNTW4~u z85#31+GzHq-!Vnr89fd{n_o`>X%yo2^KO`HZX!1Jw%{a}PL_;XP!ai(=fu+qi5OP)(?c=DXl4xo_9@XdI!LNIJg>82!PA z1!uL8Icvatsa{cfm^Ln`(lATQ^GNBvhU3MEv|6%DFMd#=^`;JiJQtD-vGT4IA^QpRr zR~WW#{Q=0xJdC$mbjP2H+9RdNwn0TgYGacZ-gVS0ll7%5F=X;t??ffq@?5Fg-y+}- zd$_3G*Fv?@`51#{T0DwM9K%NJzjDsc#q}n~^uM1Ajkgn}4Y(jABE!9k{^@LqhaApf zF0#R#MCb@RA^~t(MxTY@e|1*ep1;m5NDMSh?CM$835r#^-=c zOHliOUyU@H4W~32YwbYV9C5L54NGX@gZT+^0h_&#Di=(Nk~rOIjkDp0N&PFGbR~{$ zFef1b9ooeZa}okAV(~OkS98mZyVxlyPUr@{dqafAwF(ymEMhTiD(u=b7qU!gRWS-18|` z&#dI2*3#}JWd*>v(A8$GzBQ4i>U2Ok?+$cH6+*I)_34jQ8=+T1!s=#;C*zaoGrJNp z?>{tGn$_**?%YcIe;s9It7rz7A2r{FOO!%ZwHPHX8;J7Ju}qk)vPS^mApl#R`g)cL zKMn^49x*C~QJ5hiTS;5FRvBl(V2t z@9!K1b_zoJXLGc^>`uR_s>Fvd`G{UzG*SutB}eBunqj5RzJzU1Hm>Z{bX9wV!Po9P zp~;KiO3L1{SW7B(@OIzjL54efrWL^JZ8;`<;C~Ue1DLaH;d5=AZaY_W{OSf${*7Sb zeE55&!YkOa#*g*IMWE*`5SBtm_A|*;&1a@9&2&D0>;)m(KZr~_@$Og5j!RXF_SH1Wr&B2oo?FL>(NFZ zrtQccSNTsI%mQ$}nqwk;mpB4Ud!MP7Nui$|vB_Br7nzoKE%bnYPW>6zYRE8u5m&&U zvGyP0c51r;R?hY&6BZ^!VVor@hM0P~#DGl}Q27t&j+>Ow@D-&ReY-+ki_e?E>{a97 z$#BP9lAx3x_0l!CLGa17Jh6|Xs&L~$rX9;piGqhXs8){1ADI+LuNvKEXC&FEZ==0uw1{nl4yw&k+%q-0!= zL3e%xp2AY;R^wfBJM8GdWEo}mZ=I$FqUGG)TaPY5gLdg1BR3b{?$YwhZ2)*>j}_qK z>ygSPlOu;r869p> zoTR*I+>(w5CY$@aF}1kJ&*UmPXcw8N9!J_+X#dqHrWYE|G4XbT86A2d%IM4WZZ+Da zCt_gN9=bEb|4dzJKgBP4NAK1Xc#Dx90PE(XbEC4Qdae3fh;Lnoyq;w9_Hg)EqOK<+ ziS3Ub3l|+yRLCtT`84z8JLPX@3Hb6id9$zo1p-dok8(9q0_J8)ccMmII!N+ zxN#4BStB{)9HGRMFmFS3_#h`N7j0q zNO0K_YjCD!ubh=FX&T%99Ia`0d5UE9ZJ`>xG zG{+m}0p!CM4=yWIEyA^^gEMG>a|I*fj|F;L@|Bh84_Ln^v~fHL)ZP?`p0*(}0U0WC~l%{k#X!obXW-*SXV85w?sCsJzZ^9VU@AkDv3AZ;+L~$PsQw{;E4S z?omVtbSLIz$jQ4tILBHE2b<6LJy zs&>1Qe35vAjalo@d}W%e_D`FE>OvqB>|N6x7L*jd2%7CCofyC(>11dL-1%n=L{NRM zPss6WRAk2GBJ5dQejCXfSp1?O9ToW2xlf=&S;afrI;h08i+GMG^%m$Y%Q!onhNBCb z-zc#%)#>#@gj#com=_P6-(EW}EQ|&i9qFj&q-^YL=$k);QT{pdW4WY^-xge0ty3{( zk@K?4B~q|+RlL>?N`77__f<;RZ?@N6wmJQI?KVDxX{dK3#u z)5RZbLz9;ubBn8~Lz`Oi&tZkmZ#98nIQT_z^ezGbEq?tYB>~CqDukD(M2$-aBTeM? zS88rx{fy@DO7VSlWeiSF{uIs|CqOQPYt^BNeI*omr%2#fS%3Y$fw%3Fk%f?frM_Y? zs5bEDeND=TB6sMc(G?4+=(PhsTcdU~>+Diiva;5A%fJFp38TuceqE+?E2t!;av?lR zSvI%>%yV&(A=6}OREX~bGu-w<-{0bbDQq#;IDJdhctOICW(#6Pf42Slz*f8@F9O*= z+iut9ZzHHU44$79yom)w@=^RZ=!b;|=&?v(_B@!HdBJ()==$H&6Tm;&{sr3eaJEC@ zspUOm)NeO1ytad!KWtK=6xw_p$rr4&EVE^C&0rG*V@qw88w|}5rNTI58>VWvS;3}9 zjILkv+n`tkqvQN`X$_pc0uRg1Sh`I}5=T)X%GI^9ua1Cep2=vQ!%DORV}!N(a&vB6 zN7K>+EpNOG8)&dL3oaqb9Uv!W0ki(IRAc0aNN&B7*o<0cT|n=hL?H!r+Zb@WdQ!Ew z#=zynV{gGhGAn=ito`UdE#<+nYmau*)^bg$1&k!e`N?eG7p?(iy4dbHQ#Ob@pC3u} z?9C{LlTy>Z83%61vHsOSoHs_OCtR2cm0WwX^J)-Ezh15AX^%P@<`H;~=7teZVdQe? zx~JvDjeO|Fep-ZVviL+XD_)~J?IiS7c!p=-B0WKhF7(W^ew|437Yc9^dRU-%d?|iO zNxa*~&H-^v>&Ii}sG-$yQ{UQT(ZF_DKZ7Od=7$B{7^JsGW;763~l5};2w~xiZ z`kyo=g4uLsO^c;wVKX^|W z2}zg4BL3;_VDCkF$BoyK6GW&J5HWQo=H~a04FfP6)50e(WfxHH)G>bCDIX>%tNO-M zO-jO3&zy?xsOV|-hc_QDhx;J2huqVtT^plRL6VAwa~W574T)M1{h5?xb;D^*>gAk= z2@~$Qhl($l_cW?}5Di8J-L!XJ&%5c&V3iuZM*)9qk|$?B^;`tRI@w>Ikj#J4UV!=o zn_VG)EFOP6yWMX*W08(!!uw14#&`}3DB|TT7g)hu8us-H6uOr}O00uE8KUwP5-yS> zhYlNCp%<&O&0fU!ubx&8hG!Tpn4J9G;u+YHgfLMf-cwG0ZbU}crXltWm|nyQ^|7pZ zC%Mo1@&3q&E(9P>`Sd3`PZLnHx4?<9lFD02JMt`n)E~%#G`>v@URy~Lk3ZO@@f&Nu z_;TS-$N3Lmfbpe04`o9p{k?!fsDATVf^XRnh4Cl5%x{zVV~r39e`~FuV9B`6p@4q= ze)jHIzj}-P^(2(K_<$XUq`t7?C|hlFMJZ3yq)Ge1IUPL$dQpGys8Q|tGIp}M|2Mh~ z_jY50%M9kw*E5?j99$0cktEXrUIwFJ9+wV{gvk|wr@v~v0Xv;kLg%g1sd7ww?P(EP zjwgKlabK*0Gx~iK{DsOcnC*$?Ym;9{>4KK4Y`%uXR5k*@$bHoHG>FR=z!E%^R+>1k znpML-7)t_v81bUq(WlN*sxnj=Th|z$MG5PauGotjWgeXOgIm*cq&MZ;3=h+K$~0?y zNE*5(7JR(P8!z<4So(P#l?d6=a9du+0SMDD=VwpTwkM(PRl1jI2{#;W_*xn!-b+(C zbvAMAW#YPDZU2m;Y>1>%mHH`yr>P*M3%nrRyyPrEN^?X9z!fvvW^+ait4EeiHk%2m z4COgIK#F`T7O-|r1gCP6J23{qEi~BOjjkf&D&^@(cZfpv3Sb$C!xl%^L_Xpg;qu`{ z?^|j&L2NEAcHg*~bwGn-yzif!sUFXEx@+B81qz^wwC~k5AK(Ph z3}L>rSYWmZUyQuX(e=eGUb>@CLm!fXPuCLPoVNHOQlX({;P5RLX^1ryr4oXESIpv5 zvk>DmeDeeaN*&1PYAdgp0Vz|n)BD$=)@szkZ#JC@}IS7O|$NN+y zKsSC=y6FC5NWjf>VU)qpqA7V8@gn*W1}iv4Oc&vaMH_kO(jIn#cE3Lu22L1WXg%AM zCS{)2S5!aqd&k98=6^Cyx-c>esvW&E`cEu{YcS(uZ&_{^2a#z ziac-0Mr=3&W|Z2+QHOz0w8KC|#7&)WVymsD-~B9+G=QE{X(}i0Jn?y^696%q<~`^C=h#zNlz zu(M$s1E}9SIIf0mA;0LTt&C|nUIZ-TpcL&bi>NeK_BKziorr!>D$;x^lqHjYt*&sZ zd~&0HHhGcicYKxc;71~GTcx92&p4o>HYAY)+pW)xpu}WKs&DW|%gU;tmo2x1A}wjI zxXj&P3$bwEYk{CK;m12?Hl4?p(f5t^ji~mMn=FJVHF2P}ANTYXt-n|&GBJXRP65_k z$y;kx;+t)IGoHq^2XrFrKCtRc3KUWt2DBt^{nLO;LD%YDhcM%zj-#~vT38=kD~dLKT&uB~j23&}xu zI}F^RSN!ztjDm(AP2L*S`X6gl&((ZcfQ#jlfS?7y@Y&w&>T6xI?e4_#Y9GWNG!C;?HCw-MrvEz9sS`5j2E`2E`v(U zmL8+fG)zb1YFx=OpOW>6WqXI7eKshNl1Cb>gVHY}%ygj7PtNLkz z{bMctW3-R8SevFu3Ut_wEYMcSBNm~-h?3^yvA=|V2e-Gw)U#MBT6e{b8|Cq}HM8;Y zV~L1+muXsvQZGzb2F?@UVK6TyvL}7@%q@@Q6VPFpTV{&G{ zm>aVM8iO(DUYSyKOMuF3@a8_RG#izp=%K+n7Uustsv>C!JDAt&ciq#xa9wh1)i zHHAuCtzR{>ef|^;llF1it%oLs&NQpGnI?-ERZctF5B|%X5qaW8T&#|mQ7nUpHhi-F z{bw@gPv`%SC-dI;_x$utDC=*7=$0TaLpL1cBwNF<(6c1I)R7Lk|3FZ`+Tth{g4pybo&&t8>F7 zo$llT)uZ2U?rw;jYnoWX;f4|#A%_BLn_Us?i5`0Io+AH-MwJ6-O9C-%IM@)4hen~5 zGqgp^^p!PS$U5Br4R~;>EX$gs!>79)4CY&Fcj28qxzvNa(u)N?%P!+0A7kQWv^S() zd*$RII+?fY^proRYjCGA)vKxnu*|AE-a4Fm-bsjif!m$GWqHl&$P^Vw1KjMr`NEj6 zL$bhIA__@`QO=A*!bUhFX|QLQT`PSPgL(bf4=h5-OqUi@;ZQ;fCn`yJl#`aY&aXxEC>%^_6NY4jzVcJ(-k^d*|~ocJN)i_8B6kC zfy`7RTc+sVYOLqHANsd~X^<84!(3fU>d+gxNysQR>(E@;yOV399Vt-ABKZH8mT5B}|?AG*lwX z74QTZ&^iD;BnOyhIiEak!4AHCqz4OI==oPAjm#7N$R(ln4kG}HQM7iHLTUxR!yP*O zsl%c%(+Jh2`8_g6uKCfvxl4+20E^&f_y%!HE1#{*cy;3jV@YpRDO6<$=Adqr+DesH?g&jK=y zs)%;asCP0REjN-Ymp&-Q@hU1)f(U=rRT*Ubo0k!}%qH%n5l5Dv03E2x>N3uy{;oV@ zOEz+jTLkt_aQ=<-AS3jF6PV%giCM;e__w#6B~uyRU#pG-_I+LfYpN1*u8S;7L^HXY z3BZVR?}VDH+JOC=nFVygpKkb{I4C_=OJt_HY=n>w;UiuTsx2@TT^Cs{*bu9>RFJP% z?n&Bd4IAY)awTC+A3X~z5AD~xI3L*W_@Os95RpB=_AfzlFJ_v;+i0XO)B;Nw965ss zQhbD`IAG^*xu*=hG)zzp$WJ-ZKyi9?z)0+#oty$2*E#lOyW}@4RVNOy6q}7I!x@|P z$fgWMU$3BxFB7)WYv(seDfCDIm_aK^D{#*}wbyerHKP2FsdL~CEZUZJY}>YNJL%ZAZQHhO+qP}n z?670Job$%L&{WP|{y5SttG#QH*tVvwJhxw>A; zR>xt6lH?uTV?R$rLKI6YScdtIsb6jdr5dx&Oo?5nP#d|#1=)yQt$7FO{C@sY;Tl9n zt!4Rz88&(DxvRzgQ1SxaHn;AIM)*9S^{j$z9t)2e8V34W)wDIX zOteS0hA>_{eG6Bqx?}qBm$kG8yV&#bHz!~8+HHZW4#rS}CF-Wty(L`pEB0Ct=aaHV z4KEBl5CqIBOqq721ZG7z*&AMQ@IqZe+Nlexr7n#?Gj=vX_VkNUR+I!MXWC}hnPnM9 zhKLEeHuyG8!8d7JKxn6Et-*39+^cFG;ZH`5<$osk4>a-RV`dHs%TN+I?RKE;no5_v zO4Af+ZX?>6p{aR`Qf>1HK2Tj`FYd-0AMM2O zm|GV|FtF6k4()n?_bbe9Yt5bl?f)Ln3&7mKUvHVn-|6GC>_}|P!ZBQa8g&__s?kfl zpY}Hpzvna5L7VD2KQ9Zyfr^o`Cb?V(L`0|j8b~f%t70qxic^G(iXp(3fR~Q`hjbn< zUmecJAsH-$UgT(FqkrT?qSl+X6>jAlv(KHuV&}?|!r=++1aMK@DY+6pF)v?v_mYqO zYH)9%*}J>UcYn`dv1L3@(%3Vfcl>BY{k_Tve9x2El;YD~a~KtXP2QS{LX$KsV|@0I-EM*aMdbbZ z%cA~=hX>04v*q7lBif%89!9=uPOfDJuS(oad(g3eI|kr_juzaC&%3{y2Q~wgpLlGV z@rekGB$TSU##*LIS|UE*|IGfk-Vsq4Sb8AvUnyO4)J1cZx3+(d)sk~l$wnb~4xuKV z7iGm0)^mp|Mwt8rtY=+*hxGOl+(9ohe!ZE;>(PkXM*tSv0)sL!2pR*c|P$8r!cmM_R;igeRgZNu~jh>Dwk|HM+6?@fd6 zG(4~%T~|Lrp`3%2ue=fz?<5}-XHcQl2^@?>NYoJ+D<1mq)>_iI;`cQJMI(nEjF6pePRiNx$lwat!t*s$j3H?;hj=OdUd`zE1}zd6@JrG4xNul!ms z@l`xu1v2xTS}>oQrSNx|P`Zy_8+PCgv1{e%x@~WpF-TdPn4)39m?YM2d_@O1a?$io5sgUQ^>EO zs(a<01r}B&oks&394T$cKEVc4y(zi;m&c&j<4+gsjS}t7hw! z)es9iFfwPA2Kwc019R?J7h}?D$}Jeai{@M{s=*Dl_i_FElo6>~GGOpX2t>s7>Ar}1Dx)yA?=e@-w%x?G zTio(#LWI6~j`tTVig&KUORb=I5X{Mx3}GdwJzaOYHG+YBSta9Tf?DF&G;2sFK~4K1 zYHEBLK5Ve5M1e-RhgKYm{P~+2*WS-|Q}FGn0D7_U^yW;L)S9krHk(2YumE7Qa*_QuoTauvpD0QU0IQwYKLdzjokJOmk4)m@UKpr+#3phV4T#q|7IGOgm|UCpe=n3 zRhtsC@;U3&mU%~&mdqEW#U`2{h_PuSwgCMtY0lf&ZG1)l zu}M_{-(J?bpYx0y(TRH4|Nh$O`CmvK|Ia?a;Ou~(@21Es3SFwOC>ZinAa<$lqxn{! z)K_`W&RtE$Qw@SeZsTBLU*B8i&K>9y-{!`?wHc={>;RY;zwF=9Uc=;2%b(+ww(Ex2 z$NYn9mnJn4_n;8Zhsj;B_bAnW{)qIaHQiEW7=py+qk!a3PPJ_>QJM@v2r&I%Z`99# z5F@#TaT$-jt7~oGX6RW*j#uH^YzextGe2^5Lri@F46%(ydRV-K7)%KaMV+Bc?p_P7ygD>YFf7Tr@DV?Wu?OM75us*6Kec&`h-y1mJx8r8+5$)f^#Q~Pj)G%>ld7C0rwExg_$h=6xWQ8 z1^oeS6OBl=e`CE*bZ$T53}=D`g11$K@nXC%gLJQhUp;$Awg0MC2vkBO!{RooJnrM9 zbATQzUBQJM=*|0jvgYp@4j4OE+%7xJTsIZ2$G3*3tb(naVE|()RGMLNJsWt#cQ2s( zN=RY|COd>deA+9a79p!{r8A(sWxd-VIPp!9rvfI%8G|59d9HV!0pspJJm;~n7I3_} zGT{6cl+@3#`1kb!I0&0Q*b57u-$X!&F%-^Le{eAapQK0-78wUSu*q%xY*XnS;2a_JJCZ8@FGY>_ z#~LDspb2_x2zln0l)v(U{)mkTBNdMs3dG@vTsX%HjPBNPRhFynq)?F$_mF2?=p8jf zJYnIvn0B4&GqT_s%BU35kLsGB6y(Ki)t?5r^VKkPfbj$zQXqsb$39|r*s8b7^0*+$S|3gvQV&8>70?K};D zD>3$Y6YJaj;tB0@me?(r6^FAYTJWJrvVd@G4wlSz`(;bzJvTb}jzBVt_Y07hwnQ8r zRwd*g2MiN>_=E<7%9SvC2pI%56JDfl!@ZVnM#Hjy=S5nqlM4e?!bxCdshQ=E1qSwi|XCUtFL&iKYDFm9xpyKh_qjUb8PdT2iv z=7k?P4V3-%A`9RM#`4)zeYJX#kx`bnjYK=gs)o@rRQ$R>Sr$^f*O>Bdpjy#9FyoE3 ztcYL%0cWupM0%ecm#>X`N+=p`DIwKQTztJ*zd5O*A=ci~)OWhVy*@uyp#W5;;l{n= z1RGm#i^Nr*FL;>lG*gEW(cM8+9q@{h>g~%LF z^i!BR1t9b$>J;Z1T6qp{zV@mpu!*0W?$qZ$hWsWE*_stVa&`A`P}apTE|xv|ox)Z{ zcmsn;m9+d^di1uvj1ip~-{z!xkH8nWah4vd{c0C}Z!3|Zv<=n%d9GLip&`@pigL^u z%8eQ#RBhl8u*l)7X9*Uqr(;fUGlw5qMbLHaw(X0~X7v$O?Qz@D{1v0H%la_M4l~0l zCmRWSKtl2RmvMcJmZhF9y5>}XF`??SqhZ2=?({}?S9yxatv1>wEmFD&{E#C#ls`uT z1(tCp6a{SrIYD_b(QQ7t^e|e1h!zpbTHT4y`!QWuM_#=XOLf7%ysN4s2au-NXa8c% z1C{})c{ik!9gVjj3w2*}bsR7!a%?Q5HRQ**oI)M*+a<45 zW(RxP&ensPx+Fu&B1;OL6$QBTi_PK0(zRyQQM;l-B#BR728^7uzZ+b^8?yc(->i|r zw2kS;idOyVveF`B~!X%*v z*6I*W*QooEwJl4L!-a<1p_?^TCQp+R$XyCKxu?i0N-N{o&h zcCmGD%2jC`)8=9chcqP6r{(Dv8#V(EID^tceS#?>ijBCmQF9+?ndd0_^%WKd!%IOOh?H*F7(J)B55P$5vq1^}n*jiY#ry$9 z^kUo&C!-}pT;rVD&P>N=uoTxL1%BshG1~*gm$Hn)xrC>Pfo3HfzHiUd8(f-Qj1)yO zJ|PE)V}7))B99?4A{_R}kKAXKgt{P?JKH%9MwlRAYM|U$3c4nZTnQSaf`BF}0yg^U zDD2}Hu&fN`64yBKa%VkEOamZFz*8u&0&gfmn)!Zn+|w?ucr$b%#T*!`KtI{eB7-nz zeYCL(I4%`YCpu8!UkbjoW`0$A4&zA)I`LiTg49E1UDf??Q?7orVQ8m#$|;Ff3E~e* zIDgGzX>TS)53*lq%vboA8#wJLKqldYV!tMxp&gUWwy;yPu-i90;c zZH7J|R&opE2UIz9lu9%b$i^9#z z-}H^*hiXi&7MN?XE6+%Y2`iZ)U~GF(;euUYvNWCk94!2%oJLN*OoSGvJ)SUHOr_5@ z?@s(p!G8*qHQu&xDG89t#7@35q>WNJq%}qc+%Q%-u#>@n7#peI>Vx`L((-L|Mh)%x zjr;E!s~Z!ICoSb%)N`>?zK-5kV2LVO;Mmz0noUUCoOak}(KwOghfZmzbe}`lBA=fq zpE^@*=;MxEI&y*`x5j3Y1$S7^wwM7yEJmT8x1k=%^ba>RDP_kI7EI8gn{dwJp{4wP zpN-Z3B~(ofIz#xuTWKe)4dYRBVP@jp+{FyG%H{jI9ATfDwDpAiC~li+&y@j>xKf;da48;CASw~FYN4=FEHZKQZKKL z(3y|ZDAv~T>)VTI#9bI*LTsA>t?1Z&JD*wTNL>B#r2_$te}F41V73Z|NQRV8@Z+9b zxy|X~%m1S`0oyn6Q~W395a;)e2ljt(P@A*wTB&iSixPF&HtzoLqnh1GDKC$Vik!=+ z7meCLxQXIWEOM41LUWyd`P?n_yf?>X&^yLvYGd|(*YCR~EpysDfJoTpQpmN`#Zl|7 zr#t@KB2%uh7j9k21gh7K%#xRTz`Wz!BAqgu*jDw@T&iR;8=$>U!%khSrcf?QODEE8 z4{G0=_=mz!pn|myWx^l^A&fAld0s) z3-`s&Gc%SjwWQFhrpX+6ay(&tC7Wde37DYfKs}35djkMX9}r4YBp1w~`tfDAl8%Sf zqt|+4dFlBzAfZHBweO?}G~hVUmVeawPrCRD|k z(BeImRw6dCs0kHLnjy}Y&)h6&VH&bg5|G$w{;iSU(FqM0p_gOOyU9dyowDa*G-ML4#!tp(OX}+?5yzKYLgr+U0A!3*bkrqg14048Bf!Fu(Q0Z|N^b`! zW!(DJl5P#1oi#v;LWgyVgfFapUc-BNN$Chmx&4=7Z@9Kv>F>W90R#oh)@}ONWE8+Q z1~YDn(s^~N{52BTR8VRqN0}PMAWajYZVexI^U|9*>S_qpD*KrNIgBPgGvKksZ=*J{ zGjj;_w3@=DZBq#n-P2)WmW#M6sWPIz)PER(6}b;Q)=J={x(cneF>kFujVmP6KsmP( zUR}^LzuE=N`G2TCd(9pnY$#EDDH7K*3dO2^l13d@$HxL~t=>>nXPgI%s0_;ay;}l; zvI85+D+ABT0BIpT@q|eiU{Qr|@;p;%#;yHD(>KEYI3ky3dQx*_yniFKS8D%Bn)2xC zSk-KIQ9$)`n(wTQ%#SW*TgF7sf)*U0iXx@sU-Ay=13~k!R~z?Cd9ZgI#8FGQk&d^{ zt+)@?dZ}swcm=QGCN0q3oj?sNMsO5bTsS6MuHP#Jx&{_b^BRi>-#=N3^o9_AwNG6OYnAE@!# zh`~at51U9GlKr3uu-)gMh;?^?88eduEdmQR*pnx;fCXHAHsQ>#IB^cQcKR7^Q4{J6 z92_q61W(h+zSA%``k)d+>mTbjyh~#ZVKE%e9i`dm`PeRBcx!jjZh$KY341RR{_>6l z1cCbEG=X*nMJ3&)utLb7)m9-Gs~QFu#{aGn)2=RR&c8@!huSz%`HrCu{k~)sL z6JkwfvQ2$7KQ=UqG^~Snm&vd}QBXF`S6n)3vT29S;nAd1p zM>sI@tkm9-desWCa@}8#C>H#ce1S-)h{H4Q0AG2AaE~0;RXtRl ziKC?0d{48mPRXBMnLAHXX=gKK*2uY(Y>Je8Liw8CwR{kN^GDIAO^#G+p7;_rXy?YE zvt5|yAPwhxB~OcG7XsXYqglWNNNT0Ay=8C4`lWzR9AuT;9@B$Io+~j%I)01}#FvXH zsCLG@y^R;Cx}6;R409q*uUkZg1ZbYE$)s4{wkj;fgH6rd`3u?sp_)IA&^yT;kxJ_e z!}PLbytd6Neg%qr$5^4YpI=_rO$X^_B4TC5jd-(V-F%EV;7J9QS>xz68}s0IrYHO_ z|7ON}cgiKVbU_fA4$4N+UST4(>)iC!gCG%p(*DcB@Zoq9Rs4+U4NS$x9CrN?1nZb? zvLGNII|qN{kB!HbfV;pVo>K>dI>UL9QD~rFET=WNXzV!X4B1#01;=n^Muf$a^S_oy zGe9gmW>fp&4d4|7_-_Gj%3mROgbHM$nL z4KdLgMGvp+VKzQHoC-2$`cv{TOJ3&4IR}@`f^VpYfGx3^j~mRt_)r2X#4p~OPf6w8 zv)&}M-Qy#g(cOVZ44gyg2hKb}sOv=j9;Xix!wvIIgalT{I_>~T=qk&bDmlfMlk0-f zjZtidCIy!!v{{-7LNiqJqWA8b_X*DnOL)Hu1N(pQH)vdNo2^p&lWPA|jvB+Ne>PWy6OgQP8ZU5- zEIC5SZ(umOXl=1>K;QCnXJ7Vyzmr=b;mG*Fne(gFR5ac`mQ=iVVHgOC4S+4*!%&nS zW_KOliRi}(61S+iS7vHPw^hxDi~Q)OoJl3%5Fbj`8^rp7se!OR7~U=XufYD$FdH4) zO_TFuI)~0(@a82Pyi9|*@RSTFWrLB$TaTOj(e3V@V2V3q03Y+tdZ)Tr8rhBrk!*qj zfgHJXp|ZO@UuZfWJHZ7NDd95+K~o9?CfNyQ-0gJ~T>k5Qt>2}D$2DHZu)|6W^(A9z zu{~^7UtYk+0=_pCGP&B?5fl7ZAp{2>@)3vaWl)}%L^0Xkz&f`5THkoN3xM^01yjG) zPTPnfG~voZ`8aAAm=igDEYNv4Q(PlCCB(}?tUGWg$}lJSSV_wG4KA8luoFI4DF=Ky ze91IKAZkN{N1A2U)kN>Cy7r}+`+A&oG*@TRv(dIdl*CsDHfU9zh)VyAB>wr3ndF}? zNQOF`V+u8gtPvr2eyq5K^)Fu6hm7^D+F&uuWmgE939uNrbxuV&^@6%%eAMz`s&SQ%fa4S}B8C#gL0Hdm>KOn0@O}X2f4&71`<9pNt^ijj9jWh;sZ%wulc5q3G0lOQ#o^R-B%a!lp|~at-p6_)ZLHae#AHXP_}U(P&~3f;v^*Y#i7X|3=OBRv z^B1+gZQXdY!J20@wz8jYZL7@^TfrDR(7)_k(5az-9>y z8-{96lWv-zHRZ2|#N8mr?iV$ZJ4{Cl7?y5I6F3@z=oB#T7fe*j>-l}h5>i?n{?5wa zotO@rdGo?TktAbpwO-T5CA1M%K`yzJ^YzObJ2*WVdmfdf=ebh&bR||q0+sl!4(?=W zM8&Osjfni4KW2w_=GuAc7~y`4IQ}^DkcZP_HsQeD_(nYVa3>LvmcD+r$3y+B0g|Vf zMM`=V;EnRz*ZQa&=_{w@IYvq%y%Me*pOy}Wv@VGacPT-AbyjyJTY{8$7^ZlPyr)p^ zU~S@2_I(gDa88~w21Jj)xi_pp5e7zg)pYaAv5H&fo*1J}q zEHlvbF#wLg#>`2R=;?Qo#p3ha;<4ayJFb+pvbuJCl){tT+FKvoh${Hhc)0>qIO#kX ze!dPM#S5Hp9rm~kRxB3u1>FMofDfgPbL6E>`!VWuTeZtCewQdnHqqnMBO~ETo6M6#0`lBG&b-q&ZK-7-HaeEM%L$gD3olSl*LVlpJNQ#%4m{qI`KY zh0qH~iTGZSSRu@IDh#GLOX6=vrdo<#q7+-t`Ve_mqN0JLa^GN4pE|-2Vr30mGEEleK^`O9#mEr zW1Bw0dRZVeLI_|bkBdOvDhbAS$@lqyBQF>k8wU&NEOzS-NqL-c^u>d*9;kvsHEh+6 zxdFg_)~kw1_E{P-4+ZnHIiQcreiZ^le{Soz?@P4K(B2-nOB#9DhnM#6OOUSKn>rfZoP zbNI(A;@l04l~Ky$HL$iPcEmf344SdXY(VsMF!VR!0($d^B5y51#X*uOFco_h-;rV! zKxz$#w0KuABgId65oaH)8j@g_>BCIp=OVi;;Op!O_}Wu4=7Et(h5bJS?|Mh=37$tX zQrI#{;?$F|2$!5>;gs|(Em%Lo7ylf&b`VrztU7$cX=xW49Q8!M=V0M;SETuev1Qr^ z3E@c@aB5d|SwHon-C&h`b>gC=_MTr|p#3i>#U#U4LGcU#AaqU8On`pQcwVT_R|LBS zRpVvWzc|I3x8pFcV^IkamhoqWDmFmWjcSQaCX^@u+}0|zl6WyjUG(0*tib5<0{MGV zKNh4BaRhhz5_~dx4Cj6YUscBzhak^=X27EYBo z!Kn}S5}Msj1UUSe@*;Pj!!ZP@Oh@nThmqi`U;ggIZ=CoK>XLIAtJb-CAc0O!hM{%s z){Us`g)!L??v7qtX`1AiUB^bzBG{k8u}BXFIfU{%*!T3(Nq=<{?<6>+9PTaU9xXSdUm^WZcrp3E&OqQF467a*boPUI>&(aDr=su$mUy(xYKmvL(bst=NG73n$PsVo-RS|imxs3Pt<^16%dS!j%%Cf%qgbP`T319 z@YYp~{bV&}=%D-I>*ut+uiaI2WTd!trpMC79e$$(@FaMMAc4MV=%UDmx*!!t@f%bW z(i;-Y7vdl;FG45d8C>;L zpqvLr=XVd9)OmO5&wsG>NRAsC4$HnBxkAS?3-O5{WMuy6{dcW$9lN5(1K~JMaN5R2g;jAY@K!^{0=y6ko zX0F7yP8TKKRYs2)!k~P8V~&P!?n<0$BCT5Wy1XPd1sl@LfJ9L9SJ)TS!DCVKdTps0 z1|l&s1=vq25sMD*PQ;+hVB9hkd}}l%<|36Gjh#vK99V}GmsISrvH^Ivdj}y5u3&E7 z(Zk&Lav|#%sVo6woW`3EQ$)Z^d*E*l9^T)QXTJP3#*8W{>(&OUOUmqwT z%f@EX^FG+#5O^88KD=*bI;-Avt3Ym0pd8I8la`IzR~>cj*(bgAxBZWiEXlMj%N@Bb zdjHYhUnb|B`-LE&I31=MLEZ=6)FNn}NkD^*C0n(w8*TXj#Wz zb@ozdj$Dx-1t_@=D}a<_2&u9JD3ZDZyd9VPD$$BZDltzc<8QELqOw1@X>w$EPUSL7 zAly8-U(?NMC#y0kb^S`;?i7GD*DEL_hOP}?%_NE|8#Q2obQJQ?L< zdt4W^c>9N&cy4IBfkR~Al}zTxKy~=HmsE=+?Tm5{=7YY>7CDMIkFUvMgE#ggajhB2 z#;I6wO#4E5PB5A z0sPHQdNbX0s?UO2qoIG4V-kU0zUPsD;|p3Hxhkz6T0KuX1SUxUX>@fR7s=3Kviw6i ziL$+CEtGx||BEDYEp(E*Y0|@mQe+&>WAkRW%(CTqYEREDMSa!BaqfZPx`9eBJs1&o z!#dYTjlj!Aqkf5W5CK+Fsv_939&x?qM14e(8K}!PEh*(1889eo#X?%E&C`(^8yr49 zNbej@yq>Q@Pib7kp|b}hW_64$aGTU7ZLKooq}6Z)qrtpskdR)aZLieJp%-qm-s>h+ zFe!Leb?F!DsP)WU=`LmecF{}HqlP>xi1`G>hN2b}*8GN>pGG+EvCUY|20qAomEnh! zQsE8=;Ws5jMuIhs1FPP(z+`iM8h0_}ux3?-LX_w8rx;J6MgBw}nj=7t$2{}yfwoEm z#ccDbxNU~0gDX$!>s>N{;ZsaOijPWMDj3JGLr8>nE^oM|6!`-0Psg)4*|-iN-y691 zjB!?+E;dVW zWiCUPYln2PGW31W=h29yL3 zpRyhH3VB_^-NB;#C4kPANhkjhSFwz34T1anul<|$59)u%CJMm3mH$MCphk|Mzz{)H zjR@N#94W)F*yvgBOz296$Wucimbv?E-nIw&8tHZ(@Ef2kWP-TqefhgQ-5k+)V^C4f z9OQC<2*EO_4Jn$&2rVi#O@b4hQ4jvc2q0ZT#dVNzx_204>W$y5mEMg)i7#YvvCgj0 zy{fDaiW_qJDW5k&rUKL>kInKf7M#3Mri8LmD2E@9y3&K6f9!c!+1XNrv^nvrNg^KR zBq5#zC(~fZHj2tsBT94k(j6$@Lx{kzwBrF*(v7l_OS|FSD5P+#R@RUpwLd6w9^;j0 zQslRAa~`8^yIuw%Uki?VW3AYUvj_Ehel{OpB6#;?#~m#c;!1WJ@@q867oASWe8@i|4)`;uW+-LnzmNf=g>6bAY?ssi`=<< zhNK{h9-&79f(N;IsWMiq*qzz5jJsb7(3{!WSeVCvo2%Z*L0Z0gIw}SZNiYo+7ENz7 z-f3e;R*Di{Wmk> znpd-aN~YBd-4-1#cD-BvTzt8}6}tCFfa+0NVsw=NSAhax5=Yt*_1%8$Hf$rh-1IN6!g z$aES318|@%j&D8-Y#&WTtFRX{iW}N2>Y4H+TQz+ z2%4`{t&?w`a%4C0wwj&gnsN1`UNU6ksHZPcwa5iXK6YW}W>|MRW(^G%?;Du)V z-BioCnrx8oft)?;Duo0#o}PfjU+Zoxtlu~@eR8JFy$o0$Kx6{awSX}GIL`hZ!y=pq zVacQs19${zCavO&woHbW22OXUQqE(Vp(FFgZBFvLMftSD2x=@>I|-y9E9FQBsB$a& zKyAEcTZO^cO(lWHnRkeou8mw5CnoB1l%FG-n0WW8TpWmu|E-4?BQjnx`5?~R-k-6; z`@qZ};sU?qOfazaqt+th1;S@#Pf2lrHHT8eMQ3b+RT7taUV}NsCP^);cw;SJO zK++iS2b!14%`4M@n|oDSkK>}BwrZ64)#`Zm58d{uuBn0+a~~*Vyx+Fn3iUm=;YEkF z(TT=TmTNwq^k)G6NH5_!Li+lQsN1+yNrz))gqHkJqn3l|@s<)co#JEQsp}Vpw7048 zzQB~p^@5&sPY#KncHwL7Yld1Yf*f^w{Q@>Jr9amue;=)L zSf_Dtuj2o|kiGnZG8aZ;D4NYsQMMKu(+n_uw2r`BNqw~kWDoiw%7TT=X&0dgsVALV z;;E3}htDlxkZGF=g6V6f_`xWj74OjvlqKQHpA&2I^fo>$(~JJRG!_1=*J^!Ek!OW= zV^lINl`Ja);%w91XisV$RW zMb&`OoI*N<;Sf9)Km0RtS~T4`UL$=E09Jq59c1}=R$QZ-;1Ii`>-#L!f64IOz;Ktw z!`}~`7)R^kT+T;u><7zy6)bau^IS60q3%gM6+feh)!sq`@`vwyaSeQK=2lK`M*e{v z4^40o?V0p>WoXEXtGI8i%`uV|xOqeY@4!IW=L=7?Z%<>yn~Fs|$BJh76|!Hv*8Bem z*$(M$7Z6%6W%~|_vima0SinNaWS?Gv1_)jeYq|g! zkV$n-n7^e;@opyx!>L%!&^b4v1pfTHXB)aYEyU4^Y_;A7ma(Q0Zus`rCU3*W{lN%d zv)XJf42Aeg&LWg|(`3;)CUR8_$POuwSY;i0S+M|(U;E(Lodb88*}d}PZYe6OG= zOYkCe1rI^AyQL;*V8XY+!%qVWQP%urx>RWS6jro|ml}24?Fix1o-@OhBHipIKSq?r#7%iOHmjiSG0$Pa`(}{HPqInfRdUb*F71(5 zTjmM<4FJ%(GEHu)O@{?$LRJ$&N3Q|e>REU67pyLhzfar3a{XdwTW%F$h~<+#0kRl) zrVr!o4*x+mJXPo{S?rn{kz*OfjMgjn@XPi>+|)%$>!{#j!SzUj zdxW`d=B^7wTtT={|Bj*K*u=3^7h3vPwC75R;yILP2^X$`_6Qm+%!;EC&IAEr{yJJ=f3cN7Nh%3>oeg*sUT@G z#3(`03y1I=IvuLmNIPbkgrCeIt^=L;pbiU^Er{+rvv&U&rdPvMv?fkE$l{-F&JHAK z;yK)WM`6keiv;wFvo3Jxmi3Vp5+7y4npU*4BHk+0GE&Z3)f%}i_0I*k-8Rlw#AP4St zEI_c5XLb2Jp10Ksyj9G54cBYZEA?>5NU#aVGR~#f{!_ngGUaS~N`eBJRh??YarR{c zl;W(m0FfHrZP zdCDo!VTojvkXp9J4CU&!4yP35MVTY>x#ewXyV+5p(*coBHj<31l|fVAn^J^}rGd?> z`j06|{OqKO+h8Be38F2zqz!wPf@mz->NXgKLiXKTot2BPXTv`%4CbfYP^`OMcK(^+ zqInDAMd!Oow=3~gMFmBYV?S!l>8C5o#8(K&H5!Hfj9M+x)Ewtzwkvh0xv1_`+6*c28` z!2lT2O_yx%rc0V`SM1NEB$Ez%oXLp3I0249JL>LJ3o*l~DDvNi?6dhjU&M;lKpJ|K zmgI}G`c5m}c58NomGg68fGbp6wA6k~4Hpm|dv4zcgAzmN{OW#po!60P34rUB>@ib> zd{&8KMLXhnh=fV$7IqCSw|W(gAATcZNUm49?S{eRV&wX0iWNYSy19Nl#7~4%Yz4PF zJVoF{aQ^60eEb|mu^mM7`Y5pHsZ5r+1`$9z1N=31j4yi^98a-W#u{NpUmaw#MYiFi zI-XJyo2 zOzcCzKU2TJgQK7LQK8njPCL68u+4-UBbjc#TYZh-{e1Uk8G zD?kv`anMh$Q=kF7Q&Ie-Fpn*QMV}%0j{XRXYWrn%p~Vs|uxZg7qvn?xBjdUI0cBmP zb8r0EVn$vd3j3bO2GzTtKd?59`wGASKtKC;9UZ%RMYk)8?0STeS6N1+_dh2jiy%f9 zeQ1eHc^G%y@Ky}I=aY_8M*L)yweNjL1t+`SdP{rsPCx~ebEyBY?&a;jHiXT0=UW74 z0ZmG$q<)rrG{1?Q2)V!-EJHz>Q(F#wi8=fd{RfkCS)kZR9V1QXCc_n1c_Lj`z_4w) za4F)?shA!Yb(#s;jr=Zuygmn!8zpZVev6XKU#A3F37!wdqV=6ZLZj8&qA@J<2tfIj zzMZ1BRt!GW7?G!y{kLA)IGr7Y)D`K{A?t0h8yuWhivg|JLf!mo5;g^F8YjnFzS>(Z z+?x=S1;~2ihH`~PFh8#c(d0WAE)Tz2LgOqv*BW>K&V@C(s_vrXSMjK4+3r7ZVv!A> z9(sL*;;Riq5*J}LNda~FEU&USF>_FiY#80JA}|zEVK>`TEiiINww}R&?$YQ992|9P z;npyjB(4m9GR!xyC@l>7Mxn`gE5Z3up+ltzpcEyWi|oVG+oj_x z4g&0y=FGh)0z;87%GErk+X|0|c>aa0Z3yA9mETJ^Bjnvl4jyd9;|P{0qcxf{3APGnTSv%nQ($k;HKEam3C>7l_pM(Os$RnhT!`PF#G3{QEmo|mivyA*{#A?GE9Hpgt{18noUHJC^Bv_ zwlMfw5rQixWagU2w-CVa={;NF#$TA;3vjZ9*NOTB)ROW!R|=Zt*rI950dWCdc>V&u zVxR9XSWI}Y1IO3!+H#8_}6DW zc1sub&j)HCW9Acp%t^b=(nHx0j~J>{sO6M#;B6^lxJ=WL3QKESIih^JrFu9cnt9AL%R4xqu!feo#}dt>CpoVsuj<3d@dd@_S5qu{nrqUDS;SrskvFZ z=vN(Gds2YK!-6#ed9X7+Yn9#F1OOCBAZd&Fi3Kb{y7OqM@5wzFfG`SSS21K$Xv0mw=_Gj)Fvk%pRRv`WPK4fqw!BIjvRUNFFs za(Yk}AJVx=WAcdA2xEAc$u5wEx_#ZV0{bpXNRbNIt@VEYN9rNi`ZckG^sG+MQ*7pB z-6UPy{#zAE>mj+2p{AiF_PUx4IQZ%Ml8?eV^CKVqQzH|UlH)xf+hFk2!Q5D)In(=7 zIg7{_WaQz%#+~vuX4}1dlXJX561=_Its6H0$kbAnXZack>$A}L6cAuQOZ%CipLG7z z9)$gM0z_17s9T2lUw{gCS_!f*H3lN&$En0e6gDo zZownFy3_zX(p|vsHD!S?ZsM$2@?aKheh8uMEYzEF4jUsnhe3e-{45vp8dls@^d$#l z1+inl8U-KGx)Cy z{qt%BSfG21-q@K=RP&4K2X<(VH^K0QGVr=&|R;>**6jlm^e4v<^7o1 z_K(G~2MOYgHomS*dcwl;HLX?!GmdzAUfzhdgJ&b^g23X?WoZ1auH?r@_ zLiVylg5}aEt}P~YHz9fO`9x|r&+6vDE+jSLVw zqDxD8{mJ!RurVk9R$r98}>nKQYmk~cr=-Y;hrB~VNkY{L}rl7Z2_jfws|8d&H> zD}GVE_()CND=oDWT)_#%@NF^L#%O*)iXsqRipVIn!GsPb`D+Qe$Kr^tD36VOCa^<- zcyy7<+oZnzHfnca3IhIVmjGXy$3bS~l~<*45Q{I(y{%H;x4IM@#L_rwvUt{GR~% z^jLuKK!TedR~)qFC^F4`gbgZvqYo32xqU1sY3p?n?MvxpF`Y7@>Ts7bQ-TqdO%G)2 z-)y0OXrkPnW8X)#B*v{mm-$5uwulOooue@(Ds?8Awk(8ZCt?(2&20qi3ib1k+9B1tf85y${@CI+ zwu$-LhAG<7?PgxImw0b>fl4d>$%Zmfu_fO*0vk53&Gqz#5jFg-P zuz>bagI!;9U|f?`Fh_$kB#hp52)=Al&cHB89X(Z|OXyG}Qm1YbDgtwmpzNvz&+>%I zf}>Z{!S=(qEhHKlp45%&s7CbkigS0G9A+-=YoMb_fH`YL+E__W4hU#zzsQ^d4Fs+u z`LAL4IrPoJI%n1f@q6id!ej0?enPH>TLkus{bU#9_%l7LR~Jvlgk3N)yVQy_NmRnh zAsbK_V1|}uStZz?Y}u!%@XLEX=`lj?qZ4)>k2>7f;01mv5(I2kr4L{q!3?i7JXux@ zG(uEEXf*y7)EUUhmS*VSFDVUsztx8oV2+Q4@7OX6znR+n9li#38Li7_?)UbXPoVqy zG-{rUjtSD$VogU_ir*-_P@qA}v^1dE4kp$Zn$OFMm`+AWtl{T^NV$upY2flf(WRXpGMQGB9~ zs}!xbU^~5LQ!)aKYa`XpxH?n-mEEsM;Db-N0Dr@6ox%7374fgLdL4%dymcUFBVgob zsRp|&BZw`a-dfm}bQfh*H}CZploQ7ajGyk$)g_X}4J|S%YE5KhY0Quoa#wt?wW-;^ z`{9`2J=R~Gw3NH5>b%vKqpztzvJX8(OH~nU+5|Q8$sE2X6ft4&kvfW$f_L(_LBK>5 zGf}j1+@XXKciVCf$5ZB}ilj+4A0d1^e&0@7{tUhUEkhvoPn-EZ`CFh; z@pBscRO1NSQ_;uDP=7@IZz0k0}`;|bljHw zw$@9CV8(ePLMhvPi11bQQI8D!N9Haqh*cZ5mF=(tcI^}O(G;+8H`Qk+uH)qP{`uWg z9z~>ta!Yhw(}^I+*9q5Vt;ioLvd!F!4>qw%uq4cvf|ejWm7^V%H8SsHs^}>rMAe^C8w8Zcw3mE;a!Wy z3$Q9%M(;tUZxHhgPhZ(MpHTJ1C#TNsmiz7;EQYZcqf*~IK18kyq1z>#FIYLE>tScP zDfKsyb>6slEUE<*5yPYfhpJ5*F+R5WfiDJw4yl(p3m!8053Xn+-ZMJS48=6cCVaSM zD3{x({qFU}^FJ;%3VNG1f2I!{^wB#*-}PXZ_lh3@RK{j+<}zb&-VT@74$+W+L8uKH z)CK$~?|gt)0jYr?PLrBIFy8n?@vu<>3-+LwDkMjSV0Ef1(ZI70Uf_+fn?gi(+MX-I zOH^PI!)q?kSpxGYhR9--a6*q!doDB^VR}QnUGQsVy53bc$JhsHT7^|UF%`vh z59-M+5;`cFe!R-K)tx1f{FG~aYXG3M*h%tb9!nD9pj9ic(a8@G$EbL}eI zC$@doNxQ%-x+mE6dc}@_Vds*0Ihg0&XGSaiuvuxI|GXbm{&U&+dLz6=4Ym!_v^v_P z-H-*9^^n8(zL-JzBhcU6fvXMiY5fF6#m?jQLd_VVat(L&YaOj%Hz^sOeXmO=PKuhG zH)ZrJk)qLa-z*Oq^F+;Yv_2R}t@6CEQ)&`cv$wLWWg@FaKx!8GOKFtvQ}3Zh53~&+?9#T9D&2R*-XG?wnthXz zw`qnusZ=5(4I|Rc(eNwrEQ2Wl&;}u$a{o#C zQvxjzhL)uHLa8}2sJszc*w!ZX+GlfxGY?{e8wyh%Jzv|z`yEg^!EEuU5Ir37O592L z)flyM&dC1#;D_KVYMaD*m1S!!_1)~pp zcm4{{X)@x!OFhLW`)h0^9-Jo(pwrU_@{|wJWOyYlmY!mufzjzu|nP5~)zlVt&Xl$LX(%$lwwGEV% zk?58EZqS4VJ@hw(ApY~$3u?>noLFW>i zhvNSG3U4Y+!~J!pR6PL~@_p>LcTW^oaMBiOnf@8cbjOfmLl0iz`M9v5q4z8x*k0v>=gy zjhycqZ`)n@C<#CRLs8j82Y)DWe+V$pL#MzD+zUj_!Y(Cbx|_NQHl z>Z#&U<5Z~cI*6nz2^TefBNd!{rp+60^KYkN%|10~V<<{;a#m7Y*_ZK2mW1UZ#9rHXZVnR>kk<)bHrWL|xtG9EzqPlB~nOvDG@o z3Ot=75iy--JtY@b3_MOctg=j@7wYZ-N*&Zhk~m#el-gN!Mq%8RlZ_kq-PIULerx|{#j3vK)v1^@r?1Ny;p;4=RW zQd*H9!?Sy`!P5xN;!!8WucOiC@pD2R>taGX=7BPJCNIYNOXTVZ>?7w~%rhA2>T@as znn*9sLlK_73PCbY*3Ml_&wLQMr-;*4qHi}lECPG$`_$gqShZ&(^wPIyJ&u^m)>w27 zctgb`-?%Oems@R*&F(qSX1!@)oy3$=BFbBt3<$y=Y?VAOjSs>e#ujXGVe+YJFut)T z)Ij&t*9cw2Z4G@n*q+McJS~T%TuUINuQ0=84SyEMa*N2%Vgip_JGE|ul{Xarwl~S<2v^#-MX0R5yj{cgw%_0ue8&cQpY3F zeQS7VZa+S%&(}u6Xey5VE(b^N1nvrOM5IgI0cJR1#TM76P+-v4zz0_;QzIH??WMR< zheXZJl}aSM^p@XOs;wRHWLbrwIQe46QQ#CzVE!DrfL#>6ySGz;$xFjFd9(z5CUyt%J8BXiY8OCiw{p^3CT*reLkfp-Rj5^8!YBp7il;S>KWhO%_LkY-W)N@RMHG zEGXNI1-^ahqx-w?DPz2eb0bcTqPzE&Txi}4Eps`ZP&c^AON~+-EwJWuACDz=tz$^% z9!)O*Ft)HHJKE}Q+V{4hyruTF$BQt_7bgOsqPe<3^(tFlyP|H1v< zx|0d#MRb*-_Py&$mi7QK@9&s~N&xb_mWMDrD$a`76<#HZyOy8LAgsLn+-Tk{dl)eV zK#bqSzBC5)0p_;N;>-97n)mxoU~B*?(sO^obS<6JKJY(Y@ib`q95_@5z>Zww$IvsO zg7!)Eu8rTkmtTv-?m^Jbmd{GCre}M;g7dBeX$}MxcYj2oM^Kmc`$9Z@zYq_RqULm3 ze-_ln^7oT)c#k?gUWB368WQ>b)svVf6&*XjD->WcCAbfj0GF4WZ*hI>#3G`FyfzeN zZR8nzMP8KyqnRY2vs7>W)uYp+1%ZrSY<1Nj41Jc32#ylVA3%g#j*)GPTIDZ zv3U7tzcCYB^RThTNu{NEBg7wo;(8*f3!vy5*Pi_ex~R5^axH+>X?4eG?&zVLk0cx? zLwWl?=X|!&M)!#*uN0_X18y}hcQEFF6eeGjFiS*W6LPZ0ysmPnoyS3w-c-YwHT+Vv&=% zAB@TkYW2ufCg|i59jj>Lkip$G+QJKPUL9lu2By#w6U`oGk|ohOGsiwnqx%$<^?I%e zC30SkoB7Eijy5jz+3{!%&deZwsrimbm-)2B4{w{6^43v=S*lvmbH=Lo!2qM)rKaC6r3{{5Lndjk&(;*Q>V(Ilf z3YetCEUqIa$e`40#b6n~lvl6kbXxus49!kh&Ct0K7@F}p!_vVs^Kd4GFt@}0fS)g6 z79=*|WxclrCRZa@hn+m zSSe`S^k_FSsNO$mHfItt;7ZLTXV>9@6dlS8Kbvb0Q=l4!fC1k<7>Kd8%c}1CU?w6I z_L%__AE)rg+Ps$`cn<7tQ_tkY{?#8cY*ZC(aWQSe3AW5{2LGnspxqzJLJi|xi>w-@ zLFVi}VQ;HXYRU4_d{ou^s*4*Ro0@;OT}R?@^0(J<>4&9gk68zPD5{JkfId!~J5A#9))5}}q134r>xKdcf{ke|5GvlUjL z4?2SM35>*_vr*W2L9iW<>!#&XUIGw$37O?U9kz4E&0)rV5 z6Vfkp0_ABP?1hVHQ9kX!ULHx2Glkm1)B-ywn$w1tXj}KaN!zO5QHG`WFeTlzS%NXO zZ>DrX!`d%9j+Coe#A@%@1YgC3L%oulct6Za{DVM;M~(LZykk~4IVZmf zi84J{)|NQ#8M?mKE3o=CYafY%T;wGyxh7#EBOsa|hHm-Y_8#igF$ZF$HA#Io-d>=l zG!8DE$57iw&O2bv2)ZZO+@@a3!VMz?S4CsQ@VJ7UBQ1dTU=t`TQgL+b}C*vJ+ z8zoEFi0w@pmk$St*!t|09nd%QcBfW9lJUpg&=tE5m=DT}p>FHiZU|kgg$fntME=p8 zB3WOh%jV>uKx3#Jhup-+S^w`CXkrkNV`Ejr;iFEBZsmb~lIMydo*-}!RVk#=vK{~Kt zM$d0tEBlQTK2xA`iGKJ6wEL^KiS>}fMhWKj5`kx%>{@>;F?=EJx0M$YOGxse%VJT* z%0~>0Yd&hP!Az>QWw99`{Z% zs%b{aO8jNs-YFYwC_|g<=5+>D3^mb(X!Gia;6^c`Wn8*he7=sGGIhskh#qhdp8x!jG@l1i4L^7)$jQL?^jUF&of_4*lv#m$G~0h z9>nPWO328J0)#~ zpn)yK;tyh(wQ4~fXoQwKbhnwP_1>v`6XYR|bJ5K7z?vXu?D~->>f^ODAPB(fTCxa} zSrrCq0SddPb}8Jm6-MROs#sTEDSbgQ-@c51BsNs6OljjVzUv26vVJ^{S~@l z{*2@Q2p#Xw1WEEboT!pjR+z*fsoZ=F$=jXU|@%Q(&EGN7jdHVmb$A4?$mVjH#f zJEY59(xyE9uKS)@%Q9Q3W}#O}ksV0E1IAC`aQ=(c^Dn~3@1vRXXv8aevazMi4&zPG zx5#6sOR-y{m?cpyJreb;47oq}{S$wEUaP_~r{quxYgya8NtLghM&>HUb1U+lCES5y za=tC%{QcVGxUTqmS**a#rY+rLI4^FGhTkr?l1{!JDuTPT3fiYv*@ol_7yamMo|s>{ zdisoZs27k>$Z~27ddc+Au=fkK-RQiL(}UJBY8R7&y&Vp}WU#ET3)aSl68WxLvE&esGLi>1vqsO9}hnIvc9^8~a;kxGq!J5RZ;~6xoq?(1y?AROtq z6A{sr+;N{^&o@8ut(|A(z?zgwrXBRwL%YvHOJs#$ED8aEPZFZCxNi<^sPuST1Kk0| zl@2L=?#sA{QVJ(Z7SCwpgtO*#DQMD}dmN@D6a=>z-`%NG3+>F810CK?LXbS^v(HTE zPv!YHf*NU`Jo!f{SeJ_H1HRHR4|qnw>UVL2WSB2Gwa3I@f3QaQRt95iTC?8zBRXOo zZ-(9txjpooOc1dUQMDPd<0z}p`t>C-qaGgfPhR!g$3l)!Fk185Nv95ut(}_+p}>3u zu}PAvE$bH*+3jaQZ#|9`zaNm38i_-Q-W_5WZ5Q-p%O>B9M&o3vp`647K7XhrKTM~v z`DXo>yZB{~X{Mf7YBF%l(Ex;jcz4 zJDl^2LYk*qKQ;3FwD+k3HHFxTS*!Pi$BK6Hp)XccBSk{|Pg)J6p0=|JV8a;~Yborl zwx~9a%7S37L)gW0hSV-tL^tTik?;E3r0?L6x%Y`Dxk7cD=UcH7d7;q4@I3Aa~JhTiVLJL;zh*_ z0d$?AH4F53*pdxS%$K;;1KQGD4(f@NB*-;mN&p!FHyYRfLN@SQ%!_HyAL>fu-X3lL zF)h9VIE5)ewXO-MUHCAPZxz0+xMo21y!)bX1sO1PP@gYw-S4?v(2b(6JkY?JiJh;;*ZaF=zli&reIWOLxVyHPL zh)v3YE{c07+GiXVI;WQzUO)Oyn{wyNWKMG|m$@SQ&F=ApJsYw_`T+Q$BL16o^FFfe zI_eO0Y)+4_2rg9V<7K_De@RgO`ZZ83Q;w!Uel9n+?uBhX;?IURSYycPG(2qTKqzT8 zwL}LEk#o&OpZS}eC%lt=9;OaX=e z*M5|J7g{L|Rx>Ts)-4w#e)dFO1YCVTPwC7K@M~)i>c|9Ba*XnSP&ppW7(_VB<`ETB z>1#Sy^}+Ee2CL3f#|V&LX#0T9r?=oI*MiuqL_QvT%a_Y9?*>yeDZg$v-mSqK`{fcy z(_~QBvh%~{Nup2uVcQPZ$}XMy)y+`ATu=+&O~obEtx^!N%5GYx?K-sPVB3-h*WZ0H ztRGZx1|Q#m<=x$`Id7+%%u6zmV@b?wE^IVZL4wf(ONEkiLxD2|6x?3+Y$4xVFog`B+^|p!9%3sBPLwB z{av@g)R;IERdR+^lb&M2)m$KU=u4|Gi1M``V7fvexLBDz<6naCuLo~*yXQ`BjUQoJ9fj# zbGRfk+RXJ*!VOb9En})f3v2UCVfdcwB|BQCI@hrj*kwZ)GqMpI=MpXdc>0jkvHV|6 zZRO8|_&-hE`ds3`>y@wyRCU;euA&4p&}u^x@t<*1BflNzY|12l8KDX$B}|8ev_m%H z3Ko^h|2F;X!BKvJvvWhnVkKF#SG*i-CFnXw+Yg?tA7$S>Jo{ulYaPx6enEF|Bn-kr zER6m8%lYjswj{0C!f8Rt?RB5#NZ<3lC;zx>lY^GmbUV!Ft%9Xd-Gp${jvCZI>(z5G zmpXaG8UjB6R{v;dCc-(E{)u`+igVySYJzG?lj|ZaoX~;8kl78^V(0p2`Bjb>s!$J<%bsH&mfMm3CyK!T=i%cTGk6*Y9U|(QESH916U5+aMc;HSlV~ZKwlQ zQH(wbudz?mS|X#7@SV3inVR-l&{yg-t_bn#SkxQEO#UAcXKj_coYj`Z;94f=yC4pk z2~lzt<|o8qJGkRmgI%I=CcRelqOmwM!jAqx_ZcqHa*N>|t$d@p1X%jAoVP+jR+EFG z6A&MCkrl2Gd*l>7*CvpJ23B5?-uxSZcb0m9@WJ8pRp@egwx zv@;;QB~`1aI1Q9oKpF%VfSx=F689jYdDm4ca$$q$Z6|4&{tQ6c_CW_65s*=j9|+(M zXRl_e)3%k)!O^UWH|6RjTrXyxOp&(nF(r|$Gy^rJuE>q&9=JW8M-)@S$_DH*Dix=} zt0di`b4kBl2>y$x>bR1i5jdX2u_cJbf@DybWil1Vx`v65WS?J%%(m~i25WW7mbCn3 zv`PNNCtw=3I}s0!jCm9n$X?BElAIUP&wxI?H$Lw}zTPMIeC+>NOf-Ba>(Qrdsmf;1PkNzKm|9_>6^?H!_luW*b2T zglfHk`1W=efU%fzP>%i#b!fh z@DMkJ{1J5#kr=SH=EK$4W?IWmw?WUKWWEdiQIjiS>dDQSvquzM@&9V|YINcq>2u31 z!z6?zOI9{X1Kc12y^rxX(CW(`Fj{>;y&}V3lDOK<(0eHkVq-*oYOs%ggs{k;)A+51-g*+3K!6L{N=hddI|*Sgk@MFC?@a$QE7u*Jvj-h@>5clAvG^@6X+Do2 z;LnZklvWs|rhMfpS6wDn2`6VX^dJ7 zb1+;tL%vBm*jIwNt!JiV*!GgHyB(jYkQ>&J7$?D^-T^x4eLJJxV{KYPc&dU~`Ms43 zIAR`ZVB6boozKkFtHpb|$^+d`jl11aZ##$pB(m<~s4ATPu|CP@gFk%|NT_Q6JclGbb-5B;Y$ zm(4`tR0hX=QpTTC=Lbc&)IRF$vAVePKzj+Coth^!j%Q8Of7nnfVjhOZLINpW)d=2W z#}H)@vta6A(a*&G_@M_)&nv@T*jPttMkSO6FU!lv*JC>|Yu#OH7UD*kio*p`Go7w) z_o$aB=~qQ50dq+$e1+y&HB&+T7{vzpu|vS5A+1rP-}MoP(NvZ!=EbUac1uV6KY_hH zfe0dCC(r*W!P(k!1=d39UEced?o&E`f{-HtU~<8}llVqeTzSVxEzg>e3W*X@&3^0g z#moG;x&a$BpHbDvb5=I=Hq>l-icm}qN%-SyqKJ)Fq+S*)=JZ|is;jh&fnmv%HWeF< z?^>1vx}uG55Uljf5{fI4n~5{CxB z6SOlYZ5=tJxjxv*D6{$))Ufb%vw?m%cI|sRa+(Iy;-Rsp`B;g- z9DPK7;lAW2Q(Xb#CNx5(sRp7}Ns@wm>}R{?wmGilLEr<@=m5ZmBkkq3?}(5ar#$AZ zWW|SjV2NJ6VPvO1zKOAzVs_<_v?LgunpnAV9}BSFS0>juq#GeZ{Bn+*{D7*jD=+S>8*6c>R0+2`ft3%+ zUY0dHdx=#pvFBuwSSp7PxBIGLiZwvo;7jR&;eSi~+RnSylGfCdsfLoScE8;-fARGU zI^FW(r7BK85I)(De7zI3j?RH^HGPy|lDk2P3n&i?cG zkl-g)hR{&L_e^+03})%gar81 zkgWXP%20h5g{L<`sol%1KBs`Bm#xg53`4J5^lrF0o&TAyj5v#J0MfXd)+`l7*LB2+ z_moSB6O)0&V2!j$ib4o>-#-R%4zuY$`YZze4P~PE2Fiy?lMa$eG76Pz~p_44g8v$+!*2#HS{j)h!%dBZf zv2G#ZLF)MFPe;e2vc@H43$_VhCcq)?Mcwx8No|iZeMb*A1U0K0WKHpirtHN`p!Hr^ zyjx-z#9myZT76IsK5RkfFXxg%>N5E?NP71ioDdn&c?GR)I|L16-kAX`6EA1gTw7WA zx8f!GktB|YUA$`eRO#bz-pUuszi3LSoK~m%`=|*Gs1!6*HW6-tnuwp;MFURdJemeU#JkDe89|j%uwsjZCp&U+Ee743Bfzqqb->w% z(vb*kcc|5Nnd~9a$$pf#V@U7!{<;z}>PUri?dQJEDEjY%W%0_t5>J>Py$*T22;s{f zrRt&o#%@nuv>`os=!dTcGViiLdN^%)y8*#a1Edqcp;@4%Uw6A6ejg>0OD7V>BKLD$ zvHv?2x}pg?w*003LphZu%uU6cvJ9cn*%4e72fV6CRLH#jUWd-31R_6$xdQKS9)Pkm z{!){`xOrR&gl7EvBkTKzgzNGn7XE2I%Gn&S@rEvW^;mO74E;gSP|8%}H}<)w6q<;R zTVTsB(rMz70B2U+Qc#=hfCd*cT}!ZV_l6|T5U3Gz7yfkNqJ04SE~8LY7j7ED&XU~Z zWPm|m3t+wbcK$2=UMIFpKH!|zvf6SWHECsQyfW5;mDN}W{>Ru#53(T6%0HiE*6O$? zR=o88bwG^)bHy!GTfcLQAzH!{5=H@#66Y@axtA}sn_@b4)QTg5^36!~NXbU(~ zxF>oojNzztn-gU4P?2bUbQ-4*oix?DFac=(_LqmDeH;O{!46qo6)K!P#>oNYuB#q` z{h)QX(khdbOj*W4bZ+aOR#Dm6^?$qeesZWLCt7&&fIJ1iYl_oeYGp9@pX8}vUA}d% z)uAKttCF1OqGVVjqM|^8Ynsgo@UJjrz;mBe8g0qKA>Kvg8xScKL07Cul#+5d}-_Ga%6D z7`1SGpiHS3i#y!_l{Yk2i4_`SXaNdJcM@I{!eCU4LD4W7YXkB422*5KT@RmWC*Q8o z_N!hQ{Fze!Q7>-aE+sj06(}~Zzt8t7mk?OMUF2Y8JC~yB{#jq3x}^B zGGDdhUr_Q_g`I`p#GQ25l+ea~mW5DgX1mS^(hUM)nrN0=Bx3NNa90KLSysh{68SRP z*L@Gdi~eKreAruBdd{nWID6Hc80H)tk>;dc@cUH{{Pyq>oE0>an)zv)=94++ZtcKq zB3M`T&^N8D%zU4=t9P7v84HQ54O7$^o`osfb?DH^=1ug0U8*Bypd*{90ClkZb2RbI zBjkft?EDeHunn0Y!oWq6kchgv)cQI1us!yK?n0$DFU*TwpzJNPuVWUcvc+ZpV<`AI z-eA39U@tf`jf2i%H<#vpl7;EQxW$Z-dNAvEINznLq!+meWz#6(dM$&8*(htS=z~O?zZAWba6$akNpC7r%AZy&l0au!HveqGFFjYTWXtb=h2@ z{bFp9;qh2YRlJ6Ek=*?hGp588co#IG6Y_+XD4kM)Z;#f+8|`a7h;E*&ox~$vQl-EeFh+Rcn1&~>7w%L9kYbz zYn@wVvR~9Q&{aE@V{uer)4@{Qqxhu7bwc(^0W6Qkd3)jTEdN1PhHb3i0oHSA3YmKJ zNy6zw7hk9+p8J`3u->uSNqvy!Vt}qzWM`FfP#sAfRow?IHU!WjKE@&)@n`ba>ij*5ub@78*g5W{xscGhuDN% zUQrCn%K6&xa+QCoLIv&>zm|EcdOE4oLJ;aP_5n`qY6AXA0ZCd+o_v9IR4mJ#F#$wj zPI8?A72rI(;Zr0+ItBAd67^|KZ`uQLiwsBh9=yT@f;HgbNGs%2O7FX+_x3Nl zx-kNJ%rEo!$Ob~|cr*xCkZmK4=E{T(F2A6!IC+Yv+`HIUi#zQZ-u_+>9d=2K0h6Yk zh?UL^^BEcr``BLj>F5t~ouFuO+YYh>hdo!da-lYH$s_BSSRRRoz@tC?nqHv%PJRiT z^q;p?7ClW@N(Y3#nJiq5jMR(|J7|TO?v_6+F(V)RcgwPw8R%6fomlWMw5T(NgdX;h zBi7Jm9_VZCAma-HuC7xoDkFllcH}i_v(UYu8E`~QBm{^yPb#DXxYiCJk(X&&D|SQj z$(A*vS$HgjckAFEsO+mvN{%6i5nmiXXw>v(0V+OcY9=RdHC^#j*PW9c*uG>|#pOsA z6^vA(Jfj=b^F5I|LslKSHS;iRSwFv#sM6l#q9&a1>zQu7!}5~$Y_m*;-FuyrM#6|M zWhk3dm&!GyQ%%MP#ti%fShw^I*aop} zj%(#}8xIj<5T4M7;_uyrC)t$x|3hI&Oz z5l8VS&xvOy{(| z+z+^ZxqC-`jvzDeL6+~szhi=WzgQYT+ApP+^OseN#}B|n{syya5^XCttjBKi@V(mZ zaqr^JC`|FnDivuu&1w-;!=K056Hj&LnwkXbcY+?Fvd=bLK7xMQ@?DE0V+O&W)o}&b z_JC_ZO$#QSR|76viL=-KbCGtP&c3IPdua^SD?b^@}A zC5BuSe6^_a;B1c|E5m(#M4*&%0a~2EQViAM$aqb)5kNS27*I5>}LzO6eh9) z>heTab8sHSb@Ody_@{Q*I+9~_{+blFt$klblomUzOE=XPL+x79vBK4xglb-gbvsYs zW6_4*KB<-T&P%bADz3mvA8OcQvhfy&w>DK$#z&bKhfi$w&}{Q;HYOyM?Iq@MG6iYe z7LOGl{uAlyne*J^tFUK&NKiDI!f4LSjBDt#F)w{VSK2hNB9Q^({na%L7onst$R2{5CgGpV10mVPNZ6BU8PVar64x zgLCNd5UaU#nXzHBx3ed1vg!-%$k87Pnwz|XVg=JcH%*qa5J9R8c*AplR{1;~5Y$|H zIA?M_lr;ttbeD}F&Q*R>oOxuQy(lWd0;nIBu^gNl=s7t>r*?js_1Ae8+Qr9y3`NlH z)^7k&`e3fdKCdJDM?k^hAOs3;XKD3N8vMxw_3;9&YPdn+mnGj@-sHs5x~UGQ-fwm} zB(2(EGoRi1NDWGr{ue=%i*nhv%@je|fYFhgJN=Auy3(~ehmx*;)S zEUJKHx~U&sm}@Ag-Ajz`xT)i!mMr5lC;vVg2>m^wh;NN@*~}$-B?76Y26;Z~=2SUg z`Y;pfcA>*8 z5+W^L0cuo9>l?!^JT?qocSz9v5d!J5!jM*RKhXN}cLQiL|3+O}b9eB)NHT13nVlR` zQpG_j>c`YGu`{@mxiq3n^Ld0tc!#g@X}8A%426Ms&$S{<#+-OZ>VfRGr?)wcVF))Y zZ4^d_7xM(}>B(DzLMFqmfVs!3|;UeA`SUFRo%gIt4!2sI5B8QyD%|!JG zm$5WA!JO>(_WTwxZb+S7QgTzSTEZr2Q*W2}33_rVo6q@x?djWyu4j&?i1MY|W}VD7 zT-TTZc$e+LuWrEjMXcRWh zDthuaoH8%bsW{VM^M{0b&}Xhg@LQr#hr=IB%aQH2yTO8#rYb{A-5n*`0cQ3MP^qI$ zB?!;$L86S?g3VZb;P_@>kK0Aapb~hTZ+Tll z^ZVZI_Sl+J62CH31ozsVia1|w*7&-|mF`}f_COrHZN|`YNaeMR>lb{!^5+@@| z=I81>-%V;7K{)5qiFWqXv{TxTR$9QQi?>>-m|<<0{-L^?665c-ke~0?jr6rZi5FG! z|8YMM9vTBT9b4F|Rh?FSiS@IrtE6ttsQ3hBud38OW}w=P6|#8Z*vSf1h6S1(9-Zl> zp7wJC4xL5t{TDnamwyp3f4x5x=66EyZu~=$fF8Y`0WfsEY7!WbUK*|`I|}$xxD2ga zSa=GAQM(_q0mk7y2Yv&GCT)wnbh#Z@8)G7E>y{0pT*iPJ0&P0G68VD*+N{L=rbn^w z?v=#%gW(|d9RVBPb{e3eF)rIdagMhnIn;;ueT;K!8?$B;0{}g@z6Qf9C+GwEj@(0N zGerF3|Cj}e<+a4oiB5M(P%YH6p{IofZFgoq+zsboq^Kh;x_tC8=;*HC(x@y-Oe+1W zV|4tPasSmZRtvR**yMP3q0uvC1XTv5Sg3t~>q~%ZBo*>wTY=Ofedf~=KtcomMkg8#=c71UJHqEqBRZEyS%TbV6VhP|hXiju%kn>X zNy6QtFi{22vP-*qcbVhIULkEeCrSG?CBu@II$NR;vG_JUJ&1J6w13|&Ju4Yi91TD` zIyX_z>pL>WWJ|gtuXqX~wa0I_r2w0Aj8z=+SO0QYWAX$#p^W=|u5exC*HVivW1o^z zVR0EtVbro`M`C9>wIig@PIpIZO@4Oj^*~SV%A-)g1h}xVO{cN^1;ce$^IPDu)5H?1 zID%XLSxW(39bLyy$`@_T2ts=+s+OqMY$D`NjMxp@s46U*{XH?na(jJ>qgZ3dpE66& z#;)jG5c1h3568no5SuHL3(dYb#%spt)#)|3kBg4L6AMpdzZ8f03*DG93~GP(Ng^_l z9chqD3Yy)e=oVNu3Dg<~@_I8@0Tl2OnPV5pkL+lA4q2<6+J^=xo^!c*j1` zsw6Q7R}1s!g%(*r4Eb&V9X65Qh|mEA^z2+YaD|30To6YbVM;Gx;3Vs6@R#eq#LB{r zb-iLb#`Sm7sl_4?@6l)ZxfsssNaz11Jihp|V4eqAfmseWQA>)c%GXwCm1&Y7-(b!( z0Ul|9UJ6W2P;Yr!8yLpeSnb${IqNce<2mRDcA|G}A0z+Jo*p-}g|+1A;2$`*rm#%k zpr220UEo4vRC7w25}X@rlvBEw1fp$*qA`8nxMbi}xQy1}yErxICe+V4|oN2W1m8#xD!et7miEt)V?V#Nt@))iD9kb`|eJfk+M1RrGp= zWlz2?;ffjMex=!yLktWw%3{3aOhiTNCMJ7y_~7UDZL{dj91|pc+za9>Drp7_NiBz~ zK=2ww;R)x~=rJAzLrzbMow>!fl;xmvGl7{6uzX~W=betTsdlvcj33m9N$B>7Y#nZJ zVJ_hm{gbk_T0(t*t916)SqbRQ1Jy7sQ~Uc>$4gitS0awCY$@_T0I)z$zv>l>A+tZV zr~xyQT_nB6$2L+e=N_x5>(^uIAup9BvBHHG2U59ciF3q62zxkGtl=i87D|vk38rBH zzw6bXW*wb=VGsa6kBH}#s&7}(@G&n$Y%L(pwvL{lyf_pdQtW0#+YGCbqNjSAdYovn z6nH^qJ-ORd5E1*O_gGePa_11zZN%d&RzwD0Spj&8;Co|UH#u)^qAyiz+CzLZzv~T> zR?QtTnVNb)TK{oqu|;-w{7rj=AYo>&{T zd5dhOz*56wJ`isk0(SHIe+bQKGUO;kl-EW8-bRfGXwc#Z*%B&a9Jk_>dx)w%n;aTs zw`ZncY1*uob>C)a-URz3)Za?9l4yl)wGr;4XXRNG&K;dyy;}ZFo>%7hEp9U9KI1UH z`vMBTZ@*X}IeC)*B)LTh5T8M2r@w9WeJBjT|I*+900D0SpRa0vmT4qY(ztbv)%6M) z4u};MlN_k)~k2R`d`{l#cB{Wa$a~x{RJGa`lSBLBd z-RZ`6%EE=n!^5`l7to41fO6A~nmP_A!)rr1d7Het&9wkWu2UdfF^kWD9QRgyq~}G8 z-0p)2(e%;>63R4$AOPZ?gV*b#QBYy0%EXT8xK12Eevg0xVjFR4k%UUX#HD zRMMoRHGC}3SQ8;n$vuH~Dvq&`v~LX^0`Bx~U7E4DE7>^sFNc8TvJx=@;Jt8?`mB}A z-SSjj0iaBUwU9mYW|MoU5Ch0fC*TP*I$&s8rC(iKgi@v1_Z38>&cD`6TM30*9b#h+ z?yq*GbD_!TG_Qg9%-JoS1HLR(0`FkEAd99xKdb1Vjgw_J2EhOT1zbU!vPt0&CQ}7J z|H?HERodkU7;?ztnUvBU8bB~d4qYAm0$AX|buHY;hc@I(%lTBuk<@cJ^*@uv-+rft z+`x{bCawK_27j==x8H$ENYepcG~?mc_j{61pT5NxW5u28`Z{n3<+NSJauuq{-!oG_1`dottB zCT&e1FUV>@KSzai`P{aQq`>;*K1IK}5I?FX4u-|HGTeE1@HOT$%CHXeiLIJ@M0 zOk_d+*;Sf5AoHeYlK7&tOra&;!BQsqDkeVI?6|Xl2)-} zkqz*_Bl&eaPy(=8V6FuSi8hl!q7}(a4Nv_=62(uv>i*i1W}Em%)(5+{1E@3QsQ$~@ zg99~KuJfh8NVu|x3JRdNZ(CujGY6pZe<-#&P}sO!lB|;jAO6ZJli1yGc0n8^ZjT!G4MYvyR%SMaQQFefz_Rv#f`tVQmx#mwIs;eEG zSrRI@1dtexQ+6GIR&V(&`7$j!9R+fHMyOi^iS+W>hIWYLA&s2dbx)%x#VM}!qF%Tz zRwg(ffT{-0=B7DBZjXs%S}CEDeb)ym0=||ITgc#D;+n6LRZh!~Jurzd8sWGZC11>+ zP$YqAz2_m6Lze8oJAD}TDC=r@%PtCYAF1hmSHg|u$c8JimhLZey!SPpE^zZa3X3$6innd6@-H561%#_EmQd8{4_(3N7C$A2*yX-<4j7Bu1N*hCG|9sA zu{U`|#%){!)RXx6Iw()!WL4qTi!qcv*?xMp)1?Q)U13>%=)IDi5@qy?8W$m8oVu7J zy|BMoUm+C!yg7zfLRcC?`=3NgGs$DMc}U~am|>ybpI=~Cgjs5=a&!)o3`ExP)irXV zch?q@+=`hooqa8TIy^UF+6p%@5O}G_HW!8@Ny8ICcG&=l2r|Z`4NYtU+>fC>vO$n! z{B`-Z7s6%(Oqne%$mO*>&KQR?9DMitr@{OKEH*Yr<*BQ*BT)aAPG>D=QKnx;P_u5( zM-hWdr)#sSVA7{nQa4|NZU$5`Vb9M9&BrsCkDtx|M0^}acH}Wuj(Z}qYjEv?wPX6B zyD8mrDk3X#@9aT?56jcgd!cyiN8)V_ZF}0zF)eAWsObucoJqVX(Hdi*d1N@Ou&<&u zB=QB>il~x!XyE_=0aF2=(Q1DnDwZwx=KC4`U*3+B3EY7pNu&8*h3?g{RoOcFh!_Hr z+hb^z0gctpzsMk(Z!T-l=GAKFp+Ch*K+exXNP!D%s$JX~m8Q0cdTcz3S+%jblyOz9gzQFNO^Lv!o(^sEz825*WS}M zJEFv(oj`^sEq-Jl8X&wq<&$Oq!3ua!q0Q#sCJ@Lx&nXq>cq3jA^#A|__CcG}N#PGB zQw2Z&$|mht{L*p5?c4z#pIr$qXeMe?y5UY^@S*`ZUnZIF1(2GtKiT6AS;G9=REB2Hhu~x!Zzq$r@~HbVV6VymM?(lD`&m0-NYvi)?fD7 z#eZUrb=k)#3f4L&U8*#i326R`qufL#*E>@JWzl0ux%db;zTuWoEkB%F;Y^rcJ1{73 z9op2EoO>#aX?KnIbxe+74#k89gqhv~+#LjJ=gz%r@u;DV@{X`Z&Jy4wR?(M}#ncHq zc(FWFw;RRoT5{2|Z(rZZEA9;l2{4xiPMp_3Ov84^VLsM_0A~>BrT>-N< zvMMgF6M)}SY4oZ(d6>|c+y}MUec5VXgmnn=U8hr`IQYjLwS-GB3-$hfWFlmpeh`rh zFa(8Ass9%?{)V6)Lo(RqO_QeV(6w3=&*o?0;WLqiP2yQ81ntzB3QIR04-dL{xUo$s@tucQN1Qwf{M)j$e74zon zo`+|$L6r|4Sie*3g{knsci#Y&Mze~2T{7H$T~aw=K5X&WkWCx6Yz)7>yf!WIqYJn% z8~Ya4LGEiJq6zOr(ZhepP<@odxN=gQDF|k06_x9`piB;z*yQ442!ohxuJZ(mGW5IScRX2opl04uJ@qAqT^SEhGx*d{17>pR;w6DBqqE0{&@*er8xxR66fL10 zWl_-Fz*TtqsWxqEdgeYLMjF*_7hBDXeh3bdQ*V_qfv3vE;Dj6_*WQRNP-G#^wnTQf zabp3E50nq^V#%CiLlUZ$r#VJsDI@#1i%7gI2+18Zxg(Ey6Fmq+f15l z`Ti#Oz?vV8KT`M(RkvQrQnC0V!HEjWOEq1y=QgM(XIIsL0a$@0M&Kk3iCrC$rm0~S=BnZ%W zXFydE52(J7n*@5Y|CWm}UFhcR?){xIk*f74=w<$H_mF{KuyatqD4hUCTUc>9PQ>3D zHvK(a@|#l%Cq~nX_*8)a00B_}pYv*e=?WA38A4{eR@E)wPJqEvne>IRXl?1J-?*Me zR42c{0%CHEVHz-?5Opf@U;zo2;Y?Zp>PcnWzI(A@#Wf$-qcL07S;KWeuAjJ@|40FB z0IVN^%_PmP!71UQWB1dd;v#|uGO^*|n3r@geLBXiI~HwhkNi8eCeVjJ!%xG0M^82L zX^X}yn^lg5)=jQ&+4}d+OpBl)+i3F}Zi6yZ-fIl|J2H|&FQ65JC*aFgEF2oq1Sln> zPdCHt54o5XO1AEK$H+K=b&S3D7awAn^b$NqEJP#x1zr4u-N!fKBoV}%4ZaD1;-f$# zX0`7mKPB?J=lb;iremI0u@m6Sh7wpi-A+KV1>Du)C!Ty01QK@l(zMP^)yM-~{%Kfr z7K(1#+{tO_IFMh2^b~(Q>AY4$ru(0|NTkv(Aso?o1}*IdYRdXp+8ndM000GWL7VnT z;SVNL1wa4NhlcSJvM8?kx}GEr9^jM}<~u-hbBbM>Nhmk|TbV%2x(jfW1avm-V-Mki zNOu^b6cp6#4BDD*=qDQz?nN7m)?Okq+m*gv^_2aXD4QTF+n66p!e9TmY~F zUd0o=&l;VQ1~CreM)}3QP(wX>rhZMHY+deG0H+#dxLZthp+ZDr2ntW8lPhbhEx6Km zO4fN~b<{nsO$eCmFS6nz==m+VG`H4|Jl3{UTdvwPF6yJdvGXqdZsHnT1u@|Q2+e9E%MO_uEEm|a`W%fAV3cz@Xq@b z`17v-7Zr`Qrg0dGrkyMC+t}*Fe5peLfrfPkT>bDUQ$v8{qw|tA6C0pu`CkgDL(og` zN+XDo9W%_i(TfVx)r!hGIv_n71+(lIIEaEOVT`x^CFtP|zUU@r^&Or}`T48TuY7bN z+mLQ}!Uja!ZlDVvhp{3zsmMC3BGbXo2_$ds>Mu2l-w|pEQJCuz5A;FwMw6=9zV#j7 z+tV~?Wv=>L$|9=~&TO{&lUzZemGX7Y0D~@GtHy?e9K7RL>jXw%+zR`bf?vI#;^k*P z0a;q=1o$TClJy{l{CC@0?n`@XGYD&A;#>>+9-j(Y6+*O(g&f7X+Pmj1o!t)bXT)?O zm=;`E>+Obuv&B5bRzljo>u%&j2}EOwKcLv-C}lO_dLh8J!K*gF-ri3FPQk;3RuBKJ zY9m!_06!uUdOy-tvBNg(jpjwhp<&2r$IssuqfGAi8qm-8DqXK~NV1-|MUBn25U*ise=C#H(Y4JrA<52lIoz`<1zGFXEF2|5>h@Q`dHM?*ETePZ zvdjPg(m6oR6$?tt*bOG#gTNHD%=}4!3>jc^3_dv7RqKFR7P)t^KJ)s_gneUkuemtN zOK9I1vTKU^3~P^X&fvY$ro(J?$t${xfGy+A6aDbV*FyWG9bgiw?@ji6J~C`f1*Lw5 zQSjAripG#;&5G;wV|(zHQb4#|O_l9?+gX9Mzx{nxPXD?&;MN3aEbbAkI(x;^Rl~Be z$}BgWv}xuRv#B@gR z)436=SBSAhxLwMh+L@t0vOnWQ?@~+;p9mP~#+XiS5nEYPSTi&U4Y1%nhJzsgD7Uil z(9<^O0FZvzd6g%~nH=R64JlAsZ+zr~gX*->`1V_*R}bQvJ4&+mVVr`3-P}}Sq+-hU zVfLL?9g{~kj?UiFvtkgfKbmnn%~r#M5`7QPrbf5i;TTo6EB{;YFR++4{;s#&iHVAe zyJ=SQ{fQc2|$?6u0V1w=8N%;~tG$k!)Sfem{k4q(a17y9AX*Llk;AhIEA z&sBBs(RxzDw$lxaI~85l^H!9Hj|Nm5JHjqhw2t)9Y@Lq6Wu&_t{H@_Vo{--^@M!zA zks#JVrA+#3-_FS13yP7H5ll*`uJ9s2H55^v(#qyR_mOY3t33`xH_W8R4_-?KitP38@v606cJ zm4c{N-`H5SBE`5?8Gbo~6&iuW$m@bT`tgfgk@N&Vp?(Ry9hS(s@40o{nWi9Ta1tdI zZ zOKt*sUp(F6Tv+LGS5`zHg-FedeIY{XS+3A)18d$nw=^PebMC#)gY9n9Hq8Bq+{#I= zjmy(9ipK73*$>C@Rvu*$`2UdMtOJ=HWo+=bULoiF=e>VCg&_u`?S%jSpwj8Vq4`Qo zjf7uUTiM@)FcRE07g!rtdyH_G-&-?6Y)f@9V^Furr#T%)3_jw)JBM&Qp96qKR>AX= zFak+n{w~Ihi1l4j5(2G}?t{}qVE_OHTtS)`N#PGBQw2SLnqS#+T>=>!VtDup-Pb~q z&HWUnVhzYbL(mh1*~^JZf12a75$sVnqBXA~>+3eFFq$*<^1LHk*_6(Gx|hKqCGjcP zC7Q9@={f!dBE5rYGEn&8Is)aiGthm&<>4m>LIO?%{vvDD4; zm?SCer@ez4x4#&LA2|C;V_~C=x_WLejTcIp@AO{Nc0GNeZ3WDar_8&VX!Zq`@KdGx zfN8v5z@-LP9;2t0FljTA1Umh#$t4e}WL+dn`C?odfMRiyu6QE!43F?PldmarN@QA1 zz8+Md2e8RdKYsI91sZ&**hjj=GD>tzwt1~4ft^SEjXmG#D+RvICirUb3V-p5z8H_7 z-F6ZjvZ3zX0TOof(##M(mN|A?1S5*8R|UZ_hQqQ)gc*DQ81;!T+a9qq z%7y1#WHd9((eI|sNP5~UId)FopW)xpvOoU3%+Qo$u~41zpv}_# zwT@5>v1Xew6BK%Af&#o>Ung184msaM6>TrdZ~Pub1H06`}(I7()gfxiEd!{{lH_64&XL)(Q7XJCOxB8Q??&3Xy2)8 zH9C)zY{#5sxr-WvC+iTk=db;R0&CPCu4sG4y8Qum`Y3gN8g%@OV=bv70@eS3$OAT0 z;2L!L;BAK#puysbnwE{iS2DOtl~f^iadpsT7{{9ua}rb#Bi$a-b6PpN?) z3pBF+*Us>0g_p0TsT{jGqC)e!ZJTus(+WMqJsWUp%O>T;DDr`%kL8tH5jy&F>%QE^6iNU_ zAGEw*(u2R6s)ll13A1O|1BlS9!$%h)#$(;;VN!mrtwQatWEx`iEslR<&)lqTQBe8z z?DKS9C|5%YIDfK{uz)6jdTG~L-yKtsj0q>-r%gD(hcrp_q9#1NHhOH`!rkJ6T&CAJ zW9oN9T+tKpfqPAy<}gJfg-w^KGsb=#)HWZwWc6{brwTBn%!+psP7^Y1F;;5nxz5u6 zJltpz#)L4*C0EE~X6+IG%E8OGgGS2Xw7VsUV=oHgE&z`EN({hBv?~Y79gXFI=@f*Y z`80d}d4a!ndNO=y-F&(UulLGZD7oN)H!^@(uO!V$1^<`ez8ESdeodD0rEdJF>#?Fu zzUVz9;HNf*;S?+O@fq`>`lRZPUj+1Gt%y{GDGYmJa=H3474!iBi|#$rGrr9mm%sHK zkN&z=H@F?Hxg^cJrUiSG6xjbk0_1ccNAdf8KaqyWGc?8m`UuV`Sm)p+x= zPp1R{4dk;iVP*?VDG?4ae92*GkNq`Z3S;?#kJ7{zTd&4IBP{gweg96FT@iAAlbebd zS_0yh0I?L(jM2CIz=UtGg<)E10M=Prr=h1QbH;>Y9v`93y>qZqpCsUnU;qFCcmbX_ zYJX|lvlc|TN)ogNc2}#DfOC}rHc#e_~B&S2+^i(`Yl4hsp z+LM=QOpt_=P0ZI*jFV*1RQM7Wfh(>@!&(wrWz9s0PK(ft-uG4&EOju3YTMK62b>O7 zKC@MzY7z05*EZA0St4;~5#oqxN_$9HeW!Af6>0#f4S6uD7cLOYNIi`8BS1PQSOcwc zwMAsHRDTBdr$qm!G^*-ZklEl?YQBy3%8s1%%{+pGS=2&aIF+7I8$gimEMrmBk%fAe zz=+hT(l|^4ag~${1sp#GqAzZH%l-~a)}3j)&hcDh7W}yrbVv8;pT7w2&4KJ+GxqI^ zR3J9Q-ypG32?bPK&o8k?ys?8jc#AGSqIU7$&I*dyH{ma#us>fpoqmtxSGRpZs!jE; z^WzSTmDSeHs?)atI>@cpF#ciPJwcJbO#?xQp&+!7wz4V)$~zLG8L`h{X5>2@YO0Mk zsVb?R!UmB4U=%hyQMWb3r4u zz>Yk3_D;Fgt+p@nbvayn*oG%*>%*)IZAo{@y)UVo7|8kalneqfHxj<$Lk6#|&d*jR ztF*Y<=-QkQu2^*Lx@JpKcaQ$*g@Q-JckBm6h_=_i?K$o$_g>y%tj#9&gg^N&%36E{ zw%3Dig}m2rvd-dYq6NGln^a*->SoN@2uBMDp_0DI0Q7T8w^gq>XOqrnmzYm26Zvbt zgM_GBKTE~GMUIwvP{`I4KofK3%F6ruth1^PmPz(d`JTA7L_+KgSIK$buh3)e{pV-v1ek1j=@p|&J!Ye4dg|A6A!X;OcX6s!o9j6$3cDEjZ)gGq_g^c6 z<2KyPyTT@d0W13a&NCPBzEtNS`fh=hSzKSeqFgd#cL+UP0u)f@{((@jM4jc-QKESJML4B-N=`VOJ6y0w)yv>gimx$08psaXbMWx4naqo1*icGb?lw% zUtFnqjM=99@*w))btcLKXA4MHR!W&h#X8=4*Uyhn!>k=wiWcVV>fR*e>5d>*CYUZa z(~giL>>DYtyr`7Vz6ve+ehI|jV+0;9Ef^l%G0Sr`FR%As(MiLn$k~zjmLIz-p_}Tq zS1F{>?K2^t#Z2cSn z#C9sJpIK}~&GFU!w@kq2`7izG@^uD_!*^Ov&(ki!WGL^AVKi4WNulQ>CBLT0GcboWg=*) z;AI8g3MQ$SA1o_9L&FME+W&X}Tmy37MNmM&QD9kK1#)2pMVI|gV?tV8k|(m;!DpH` z1apQyD%du@S7HOwS9BqQ6p=)u3Xtu?eA#W<2?{m&+^BarjSO=a#4+Yw&3uY|6c0Cg|^BAHhKNj z)yIE0N?uYc3&ROwy2TEfG7XYnq~S@Tx$`DdJ-b|Pz3^Aa`8NI#t)k$*BJ@(`xvy`; z2+vYFToUr~&%hi}QFbQwGxN@wBfN_Q)U;ptVONNpdnXKV2oXP*f0*leKiAUeQ$PYx zXMy3&QpqSCqF1%bE4zbRT z=fvDK_xM`C`*W#c;~;rPB2^6xow;He5u^@zmr{~YkAD%`1KmX5m z>L^dPN^gai-*xHUKg-WH@dUiJUvqK}O|3?$JL()Dq{`>szA*v9M00c(p)8g*443@E zwWBU`n~&rCE8qh@i;#W&(5kvn2oDY&;%{#_aI`)-8muy`>Wcl?-?g1zU?GgPJ_7r+ z@Y*#(q47rq1pt;-FA{6V@^qN`67K-{q28+;U;8$qAsq9xx3sJ-L{|*Stppp*T4v5R z)NChv(JCYpna&82rSes9q;HJTQbdMxFGvU=uwdD0p5Zn~hlw|}$|3DeP8^AJ7#Q(l zmVJmuTs4M>y^5EbHnq&J+j|a@Cqwr3vMl$>j^lL;nSm{S9w5E6mjme6!Lmb}S9zK8 z&*7wyDN*|m7StS)W5f+qB=LX9mNf2Nm1cn+W|;!o>xr8*n)Ht}7rHsI9^U)7G~$V& zwccah=Vx>eEcWBK8`mEW|K)s{lV%Z-3-^0k?vwC#_Eh?akT7vlr^s9ALw~rTQRuA* zq6-p(mRN#Uce%HGk6PR1TD*)z!?PZ{EdXO?h3=@G&t&zXPcD=!lb930_c*~aJz34x z;%d)`ySR2k%@LB}WK-_ZDf*X)@Oa1F`+F`1AUO_KLaKL$xV`x*7Xnu!FN>ztmCt4! z4t+Q=0ITxLD9eOhmN-jr3<`k&00DFXo_cCmu}quN(T5`$hQH7d6EH9{Gm9TP_RItF(ITQ zN<gH>1}I8JOSQ;dN225Gq&oiL15_s00l=untVy&4<=IrznUDy>>jkV z0j2kHprWa7JL(5_VqQ1~)nW)TCxI0y)11vT&?OoRWHoMlAX-8Tvt$*8H;XGf7oG)e z`0BuwiEuqxq&vUYD zM5LHOkUu_i(iK&5U(I+1P6(7?-Ce_pG;tuK8}i;sYA8FDn~@&js8>sEgiu-cX1rt_ zwoVCf<(@D{bp<53ZSEbnotb2$kq{L6y9XV8kql`ywlzuPN$Ycl-$*v9wW2xS!BRa; zFp2Aoo^xakC+X`F>xdrA5J*oR8AF6WYg<%l^eJ$}bNNaI7n9vRf2jl`M$zx%UiQSe zecZxwM~#42Tv<5>{osqbpO!GqFqj?TYnVtG#w)$;g?2ngIC5h#VjVN=Vk)ChjATN< z5w3jSmrf<|dx{EUt^kw!>6geJg@w#YXdhDdMwYCo>oek>DGG$g2*>9sqnSer9e#C0 zstJ|RA|I3OX@!uGZdop!_r3zMJAEW^ZLRSof%E92R2RHmO=dstr~vFuX#JrDM!9+# zm2Wan6LMb0zKCr<`Qv)c+4%x*LHjQG3(cV~kFy1Mft!>v)zc@d`0%_4ikThJj}XN$ zlHMu3rIDLz7f`qidnb&V>Oi13pbgAL!>0;o#F9h$EM?bzaE00~D}j5%HZX99p3vEL zBQ(%NHz}lh3I7O<6p6y7a@SPE!b9Gfr##5-`0(^>S{yx4hR>z?5>qCQKv6<>>K!xr z!|}-9X0k$*e$RpGB)@w|1D4e$;KxD#tlVA*ZHb8YO82i$63K(5r^b@?QFvb-!L6Z<5F*du(hp=5QZpg zDn6RRWdH#Q%s*>PMgMcxW+er9kf5@PPo7e68#w)y!cj(H!I`{K!^ji-VSS>cWD|9Iel;WVDnoH;wx~Zu5b4t z;tjVk&x6zBm)^wss*WFYK0*aLh~x+hj_XQet8S4>JKfnED#%zGyQ3Nr=_;%R74NGt zyFb>qFEohw1FGJVHo}X$ZuF*bYq3)(V5stfe(C^&^L)ndeDlEG`>d1w&JN^T;n?>* z)=87$>XbcLS#>Kj&);&5DzR=sKEK(o@CuM{Mehjpj8NhD1AOgEh*ymNWgNWGGU_!C~Id8CUGVxYc=_ ze;N4f)~^!;O2ASwPW!fCpy8UJKpP{o{FEXU)l@f2o`3)V1}{OHok`&jCQ}3dc1bVi`fBwul~mqtN7WvEJ0%O1d8SMW&k!N`B2#pvxd~MAVNom%z2} z^Q-u_%D}6B%+5qY&_KWb4+QQiEbWZpQyo4Yhag*m%P1u)pYdQ)c<>0?qp>q4SYmq9 zXf2+kjJ%5)7mLJk?if*brZn~G)*b8poQUOM3F}0HUa{7<=xiS=jca`QaT60Ds(J(t zAMTo^NU!#SBAvweyYt9+77 zOjWY9=fP0zo)@dt^lv&Zu?bK!*&EcxALV`_h!OZ{HCcpFXR!&44!LX*ZMpC}yzXsp zfbA%st2&m4aDQUvB5lK;)J&>0^h>!srsh<7!R=qWAhH-@>d{DzrvlGQnt3>^F{a`U z9Lt?byd=$AkFT=bGjte+ghVwc_B7$*=i14_Kac;g4n_6UNc3;m&=~O0F@wyu z5J0&c+)ZgEadFAB{2!VcIDi%i?DO3k`8d9fDmnV@G+08z{3f!#OGW7S_XT7f7C#PO z#E<5KD;|sDJ2O+cfVB_LYp5v1Ca#A8E7o2W^n2}V?1|9)L4z3^LooDy70jNsl1JjY zZc=j8<|f%KF;$3z7T@j*lYQDELSm6_8@SP(*gf*2oDLY%@cAENm6zX}`k(-Jk$;5R zigWJxB^Y}+asnNLEVx(p*VRC^dHU{re$4W}-E}+k!_v(|j|T#kz-5134OB^HBXR3( zXe^>6Ze%PF487&14-r-qh1Vd&Q$s+iVbG3?z>JAt{#G8F!{dCt;LA-P*)48fB!~WT z-8ijJ_t|Rmx+I)V3x3!GDHwE0yho+1N)F|8<%~M?hJNGp*Idlo{?Tv04_vi_Ban=x z{+G+34Q8Uz=xL*(M>Y68o{*5#Kl-|X zg2aYj6Vbv#(Gv~($Fr8WXC!LxQ~)Sv;llbG72lJ*jCT=icZtcDB#H$CTK;j`pe3m~ zlWwHn^TOpix@_TNoDB7xfAT8ajEu->R92vlOlsO>Q6T<9rJP2I@Q7y_G{V&+uT4bT zaxBVP1SAB^na0Yf{i6lib7gstVDE z?=s1O@Gaqp61trG`pU?l#8Tip_KzndVqXzrA&c!)-G1xq*XAsDaM&xAd4*jc(NVPU zNBcLbmNw$C)d=`C4ui}g=>?_rY=Dt)T~&ec3|gX&y=`b3t0n^)$d58x=&^L6atSim z8o2;yTib#dwKt(pf?)O(ouX4bH>aQQp*v;fypVI&4+cvP9;6gvECnXsg@rUc4cg>KHY2=#LAUu~&Fs40Ij`)kh3B|7c zMig0HEBI=cYz4o33u%$j@GY`HWbVhnoGUND5RWXb*=n|1HW+bL4zJRF#CfDclQh*V z6_mW;P%{yxzyJUUzCoJ4N#PGBQv?5rtz7KOqNJQ=As;8sg(kgSKX<`ah~ASo3Iga-Qy)uW-IeT)To{A^ef`Bu`9U)GP5=U-`q z0K>X6k3JByDUXuFP_|5fy3$TENH zEx;uSlwPB>9hKP4P|+A=^pRDXaj&-j4Lqm@mKuOgi(cnhJSA@!(y@P!y*&8y35dlh zRbf7gsV442W#3_#IHFKxg3g8*+m=&spyXru0tiaAzM4~VUV^ZN*-{e5G*cYMg^dN% zw0HsybmX-CJaP zyF7{ooMd(E-Z%(bZ@|F(2@|Yu&vs5XyREB4k$tBerkde(hr!%j>;b%W7v1bo#?U~6 zxmo$(%L2C!e3h^G*I<2u{l(yQGmwma>sAq=p?GWn-Mhx8dGVgCoR(eYqw9RR{l0(& zS*hM0TJPO_@=I%SfxG#Ek|ttZ*CnT>?_C=L=>wH~ZOx&K=5S>=(Ob#Mes`h4?A0P! zGWKlBc! zV+=S#DfPGJq}Q<3`Ze*QaNtwTw3b0{qVP&|^oEl#=}LA6ZrXaqKp6tiA>Okgp+wlr zH3K}YkpF`)N!z!}T>ttT!_fK6PP;=L#4b118!nd%c%7pxN_4I`i!%hlHsZL|Tsiwm zA<7LLfOkfOgWaa1xdZvF2B27lTv8H#rF@wn1!>l4Ypq;3@!(UV{-LcAD@GGS1uRHWKnLJuKO66i?O++BDSQ=Iu zllWO-yI=Ex(a7v7->~IZ_zj(mg&H+`ZZV=qf5!{cJR`i?_0kO5gW-#~4+)9a*My#k zpSdOeXf(m_guOtWA5c7z1R*xdK0e2-h~`SGYoTYVAADDvan3lXU1^QpN>2o zuow`w|AZ3CIg~sMz=0f5@XE@~{jF*uTj=Y?4DN?SU+9NPy3B7tmvQum9)w3~oxqy9 zecMz1&>>|UcO>N`U}qG*Kh#~PvqKgbDc{;PmMkt_(k(ve^rSK?zCRDc>F*+5EK|e! zN;2_BI=hGK^q4A90OC-OuGAZ!+D?b-ye$Aw8a=Fwq^wE~luCPDpk zrk5#yg~$>ePI!QKkjtO=3PsyqUA1viRk)$?YPmnA`VkOL9QsK{`3%s82Cb9X%Eq+( zGYCei=99}Gva|`oHTF66W>Ou!8jz8^+&OR3N$SMRy7sZ&fZ30XBDKB-wpJa_rz8DN zOwpxT($O6L4>pZpO~wJH(cbYK5OH`@RYoLzW9Gcz)w~~n|Hi|k$IbCu$7^XjZjr=} zGm^(9ZuPfk@zS`i!POUhlHcjCJ2XZcgCQ&JWlHL(4^wIKD~Hh|$kWWMq$QjWVT+(+ zy=Riv*_*uDJ)2btd{O4~jvPDE_;LY$NWQ&pWH|V9MGif)T3~7G!I8+L_l4KyLi!$+ z5mDonCqzzQ&-Z!y?qZCgIPNmvUKx);Y8fLBEMCK{oxXz~*?E$JWW+AdqYGJ(goxK# zpGx`J={i!yj;hQcyOoG*(%gw`-3V0?Mev#*&m)5lzg0`?FrQpiu4g5@aZHwZvQMI$9!6^wRy%(wJq5c?WJ?)5g@6kD6ikD`E9ztVDi0(~ww zcIXDlGRjC+&9oq|C(}~D(#;q%tWFq$$YH_L`M>jh=*`ADzl+2pxM_|914|31qQUv8 zUh`-U!4qcC4PtN1oe12R))gK%y+WS^hLhpI*lnKq`A~KJmXXkR z!izX0H4z-dT?{AhLk@qr4fZw9I?2>nj*d!MNoS8bFfJxQP4OmKcaMQn7Q0Y}V{{FF zj(UKQSKV-ZHIH);2Y`9-UlB!@fJK<#4hpWqY=?{c&(CjAfwzAp&86mfIQVjs;hBGp zEI}m8|8zoMs;?F^JBsR8a1PpbNyhs<6>)1g8W!{K*x5BdeIbd)0@NQc?+edc>o4$@ zEYm-(mZQ9#h^)@uB2t9@P=-Vhx#UQtK-yZ*Fcl$&&!1nBW-hzOM~1jB8;XU<04={q z4g_=b=!`Jr(mG(YW~}G}kgdO-*^|d+mr!zaDiXr(hik#<1~Y{m-SUyD@(5D@puq&x zysOMW%uBHEW6!s+hh~pcb9hqJScUj|3Ryc?>7)90l!GaQX6({dA+^3NIkp%pj*%y8 zoBCTy33|+buIT%WUUOS3{y*s0AUiJDl5?<~(b4j2&Ht>1fVA|`{JU|AYh%jL0ar~= zgO7dWZI!5zAu@{b9v)Iy&}p>@pj{N9jmK|0RTg%$;y=%#I0Y_IKp;*gxH%;|+n#^o z%v+qRchtWu#ZEECjN$`SmBWQ>=zpwg4acox^5 z8P00{zC)_DE#LNalzN{sqK#X>McbLW)cx&fpyo4j^=tjj}O zvmuI0Qa>Y62={?dNp&?Gjt4Vmfft$qnu5T>Ab5dvKyqSd$;@ke>KC%OE^mfwt5n6W z7Sj*3J8+nMO?^kN={pm|t1#=)GV8l{v@TxOn3nR1^0Rxhyx3c`Nb}%J%7Ng37D+2$ zgY(*Tjrospd8TwoMO||hR~AkG={O`snH_Bs`g}M={3{d@4;Q-(280(X>=0qkj`}$mY4j{+n8#J1hE-6!mM~wWH6D^e z=0dT?2i>{zZyiM%y1mh))%&0>EwKTRmLc~gB)&N{{{`AZYvw=$P!;lfJ?fq>6-q~r z@Ul}7CcOAz{;lCtQjxakCtjp*(UsObyaXLgf2M_5mDB`!2zc;~cVDN~NU21p(K{Kp z?XSzt>TQ4~of3l3?0PtRnDHFBjZcIo)QG?Lg$O%5zgnT7FiKO}q?{78_8GpqI?%S$ zF)=@rR~T^G_Z3}dY%~i%=LKq5if(RDoExmp6Qh7!oxf%K8x-1=;c%CB`M=ZlGXKGw zb2i}K^>Sr6%hqe80ZSKw=1h`Rrz!VOdUV8Os{x|N7+Nk9#{GArIayd)_j_Xux*O6V z;w8azZiHtR^IzfO$y8)p4F^CB225?|WswBp6P*JGFH~z_>B{16#B!8LK$s5&hg)}< zOrkRbB;wtasz@@3m|Z9lR81816JN{5o(Ht2FxlF`=jaOdm= z^7^MO^LI?Fv$C~GPoum;g#cARs=qwYb{lyB4Zq7N&Z*HZY*FRQ^xHTAB4rf`R0;Kv z2UHY(i}do8B7b0A$o)@cCx4D=;KF#uk@`ZvNBY;+zoZwIe;FY)D$Fc+>gfVty1uJ` zjIAU#5`!T`hp^ET4VdyU-G-lzB!d49b(~M^dy7z`o+bNScC8UdS9bZS?gpk?dEKZx z<7*kdH#|K><34D&a0%IV_K%`H%mTFmJP=+T+^bn!62V7^cET7F9`D4qy9dk0*0fHs z^9WRHb*@D#B~bek(H0h;BI^b16lp>X4y>V|cI-zyi!5c;lSNyiLh4QZc_;1Ub$ZXF6!=?)WEJ@7z6X-s5adwf9JjCJ24CZOsNn zN6{!!e@u{er3kcxA~M1%`2}u?RQoO2swSo@c1Fx9V!LJFZ46saeF-NZBW__Y^7r}C zUYLJ^_tttwzGfW3$X}PI@S_jI0&xJa6#hc!xkJG{5A@SGLuvip;ex3#i~blyUQkprLjEePX(SxwR@H_d}7w04lhd?*KC_7ZjgXD*Lrvk zPHegJioyaWK@P8y0m}E=$`_OE6&#rU_=+U_+52vxocHTzSjT;mdjMb$F z%!bAf?;=(?uHIYw3?TK#rpC=|+A|beBpGnUMGco&Dx0wxCOEN=%*taBf+lr{sMyw= z#sY8m-Bf<{K5qw&9PKD>6v8t&*LWn^_)mlt?~Ab?bu--qM0{Z3~hMzXu zTg|*AV-Xo1-fa=kY%s?312GSz1ddD=Prw={o9yTN9so-twgOyVm-xK2EF9g36yW!q5E~1SYt*#&f5lr^etk4i^xRMLzm<16PN5 zI~{}fPvcz9cZ9CIN#SlGK{9poha;wc@n+lKp)L%NpiY75zl z^g?#+N*!|elHVb>r21yVuz%|QfvKem+&(#M04MmM*^?9MT_;$cZz0nds7pEX>EH@n z>!P;jjdra{X8a?DzvE}aH*8+mEA3MCw3F4_V+cM1x&zQEf~!C3NqQq&je7~nN?}rd zgqbyQC`=2P4c%jqwk642%d@4gIT~ggHac`Pj_NZ5(X;;JS22^iyY_;uX?Bb8!c@V3 ziy8}RTZhpLxp&}LNyK>jNry<1PwCI`v{u!=CKyq!Lg|ACZ#!$We@{3WD z4;o0Firv0Tlv3^0K{aIPLg2F-)cRB~I+k7o`bx}{B`pmI>I#%L|3(=!>yQ%r`n?*q zv|)xbM%_Dm6>@Y##g-D)(@huMWGGK`M z41*$g&&&IIe4`}YyY7&F`fN+vR3?3PLr96_pW0tgmI^s`~)p0?*($dFzr;QQG! zB3+(K_Y7LOsL7zDmpmnzc-?jSHBOBe^5X)1vON4I)xN^SCDsxbkgsCy{JIK)^Tc+MY27tTJ~B_GodVF`hBk? zpR^@c4#e0;|7bts-Q#$8{t2MWnB{=dO*|5PtR@$PBh>AYWMG%Ouej z0lopA|7w5Wo93r;2pl&^3>vYRrJOYTMlxZZBnh4V8H#`NuFHu?w71VEtUXrxsfl76 z4;GEVp0jL!8Co9`9AQEo0M|3!Z<8?Gjpw`@)kt4{O@sDO9ShOU8E@O#6b@0UbSPh; z&wNz*j!vBwyV*|*=&w}xqZpPTnk&ll${=8#?iVdte^|nYa9GXo!Ia+Q=Q`h&Z2~0B$r>ig6RkKgL*&TO#p?mn>ZMoe5X%Pt;@0yxJj@x)4 zD29w%J`lf4oX!jQ`PGsG=k_+kh)I8)NkJ;)p|AhbTYdfWHcLdyCSoybiUGa zP_`}be?Y44;8svQDyN2C$Z#sp@n7?%!F;VUv5f;tB}MaA^bwEg4{tX`F4Df~(XOHH zK|rd;}3t5Jv$| z|JZC}VnMXda~`yLAa{cRrwR&3Yx|ldG~Hdf4wg2<=*USTryMNw0TehIu1`2J0oT;D zPtTACHvi&5O#~M)SNhfUwIx=44a#Gf7J2n>)QHlct{n+MtTj10<^l+5yM^B(^7{== zMik1$7sqm5$Ud}2{nMaKAAldWPQYpg&p!BI(UoE{s>+n|b$Ng4WJ+7fUHNpDc=*fj z$9&GV9h>HXU7%CuzK#zdG{~#!GQC1Wd37zj1nJyzmP9u;dYYn3`0_)^lDN2I?4mEB zv*~J+XT+Iwqh(kzVh8&KImlNS{~g zOhqa*I}z!oDA{iYyi@CU(h_qAc~@QWx*M78-6en-1u33>PQL3K&!$K=NmIbbF0?Xz z@Ub=Ok(W=+5?9zq;ZnFBg!!y3{3ShuJii>$No!TM_-<`R@73{X^Lv%>uLfZ<7NrrX z$`e=k_Q5gCPP21=QdbEB=q~k|TQjwLqXt-x?%7KZ$@uT=7hY?FD*Kbg*%B|!iwGzD zD#CVV4c3@xe>ip;Foh7oE4{GPo?@$^eq^xJ z0aHak{B#DmOd6%)E8B#8%Kw>z1=$B_G{h{<2T*qG{L53Gq3@k`)~fw&-a$~|8N?Tf z*6k1^)5v$AJ^z#>6|YB6U*6ALC60d% za9bWgv+!2`r`$u3$YmsgI=t=mHIB6rGESbrU4VaS95A>2<*Tb zYC--w#0|2BFEKw`K#LFtlZUy)WOHWLr4%)wcMaY%zPjd?cRv2#=3@dDvH9E*d`|RX z&wwUB$_)4wvL}@XB%g%-+!xNU?>VAG6^$U-Bv|4hLb;FyCQ-RhZs%bevr5}P%VXBb zNrt;3kB+xGOuYL$Lad_l)~RJD>bftUv1~@L{HeC7}A<99bqkC zlaE&SH?l0qTM6;9ZERGUM{X#BN=n{;#9DDmMsV%tIISUgXxXvy5=C|Kpb!DdG|~fZ zx`ixxSk^|xBbyw)qmk4tuOR3e?jeju8W!bkE|PiJjACKh;&xIJ^bZ+_gyPwDg+qe5JqZZQrKDBeYiD>3R1;{D{!>{} ziIDwkw;fk{O%b;-gWK%jhwZI~0{nG=q9<{{zLZD!GGHUbyxRpiEKdQ^=4KvLNivDW z!`hCjshycUiJJ2@Tb`Ch@w|VzjM(N+@V3pS~7g!TMKtE9sX82~hcEKPV(~P^ohze~qVK%`MU4%sHVIs4<{CvVwUR zaiT$NiyWGySKC$~v|YrfU6nku;fM{{E2w81eHuReh{cD91<7$ zC@4$kb;c%E$2@=Hw>g~0WT#rlL1`;n(=o()kM!JBsg!WOyv!4f%`5Sx9u>ZnT1&zG zNuF1#{PYfJndsS?Esvw6nX*xf?Yg3XHK z0|p-;p)BbP23$QK{s?sU zHGCNI5olfMhynWXoj3)>pW-{teDE|hOIQ*Ee=};KvijHOe&-?%6(4}NknOP`h&J(e z+%=5;6UMSH8;_I4gka5Z8V_&L?O#!wk z5Smlm#g_hOYVFNdI1KZe<&t_A(JyZYu`}g5FH8PtAuZC!%zld}v9Lf*`!93^pljOz zW>*nDwI=Je5MkNYa;QHGpblN0?)-5$Px#1G)K-HTChxxwgr8oA6qxxesH1f zv9$B6a&m35IU=G2X9%J^y2Rb6=0oFvI+Z&elbxz=Y3??|5A<5=h?%p3tDKEXiD75I z9mmUi=yKPk)=1$6rYJIG+j!}$T!W|gD;vQ^Eu4AGSChQ=44JcL@kUQoUrBTLdB(gw zY`I`<^A!0~#(ye;j|kIe(JF;^Wcg~Aks$%5?F|cN<5czrC~lH+0qF!$@%HSI(@R(C z{tzFb5bFdQ=Nm#dl)^`bK;Ps|W~|QO&f)E<%%7Y4I)F>e{~_$I6_=}%&g$C`-?6xF zdSHq82#+yFl!QCuHX5>#GYRlAM2Hz!-rlQ(+qq^|2p9v+@63YiERU0_we^jaMp$c$ zOF)Kn#+0I;oAPxnI>i0qli|ZH+ZgvvEGs2ipQXW%2Vg%9))^A;7?5Y21Fu6-EC|#W z)ni#{gH#rqSVlbUT8}d=sy>UJ{cNpgD|9dC9LACGW<<(TnJ#K0!U~41;ND3iY>+^M z#DSIv{$lLhMP{q}Yi6xm*WNM#V~)15oo)h`Ioe~^*l2MzGob8jV4CekxV=<YY2t&!!Q9x7wly?J_E{3Jr20LMI=3Ha-g$ksLL7W)jTzO!92C_jJ^?V#e zB!@|MF~_ocll2^|os1YPc;&Nj=fH6i4%MScl?#lcj~YAQNgoE0g8_n}hk+IDCgu2X zOojzQT7*f)3>Do*>swR?Vw{wpas+dy9m)Ti4Rijigj!LEJ?F$m>qW2r8}V%@qYiG@ zC>UTGlfVzqU+lFMG9s6nVTvknVrk)hBJzX`^!B>bNHad3s6&UviplwtZt#UQ*^z3` zv<8Az5hNC{LFe^%B!YF=pO?1b*bXZ@Y04!IJz&Be9FlxW?w^-{i8oi(itiBe;pH9- zbV}F_+=!neA+zO2z8yP*3s@-@;&KXLE(ktWbaXVZAxm`u<$7G5iUoeG=Jja-pPQ8E zl{EHFArpzdS5g0TfpG%k*f$n4b8v0L@?(# zM19DW=Ed%<#Cy0z_ySN8)Mi2rr?gALsC!7NqRNNxF?$GPrs`17nk!GLo&NHY%2kkF zsFNoDk$r(RIZ8(ctEJGq%9e^HPg%N}Ox1EzBB2P`H>s8V|J)}%0Z;J|g#=|5|dgp3d zknEqIy?jfC39BvG>Y-Vi-otVHyhqw*2Mn5pB<{r&un#~#o5S#w$QDW5iO6}Vkd3>Z z3?kkk8x7w1<`tAd+th*EI#r(x>pdW$%g@A#&Bm&jH9gs{bRg)D`JBHG7hi78it!$% z4-p*PEZg0(nW}rpsNix&WdTO{D13TNlRi9isSKe602l=OUig-~b?5)umd#fU4RVbF z{u^YLZp5OK=C>{>1XUco3c3<|D6v%hW`+7{wWVc(z53 z^A+g94|cQiY#OE1J|NsRmN$Kd?9%1!0U{{#c@pOo|I zIG_RleUa8xAq(b3vmg`P9?(B#|zI7i;-##20=_1 zvq9KjBuw;F{J&>9AOX;&`2Y2LP5Z7uSgFwKWMqF(49YAQ*GHA_TV^@N`@GR-LTj@z z#df`ISqjJDqT?XbBqU)JfP63LD*(-C6s(7fU*=6_1j`TXl}33(B9P#IV2gB-TyQfe zgw`8)SV_V%0oI{y{lU+r+TPTrf5O_!g*lP z#UE2sSYMjhg>^9{5B~jje*lRRT+PCxv?U;Vg*|FkbDVn1D`>t!EXu-u;v35Y-uiFI42a~*H^%BT^(&KNAGU-8xl90Gcmt2 z`=dS+p*f;(sq<|u_Y~@`uUfSwgg^TJNfc{c0fVf>5%BF^#D}9-AfW}U_NM(PyMV+E z#Yp{2GDXu**PgmvjRwtxCm)x7>`P3zT!ml%`VT#v1{BPP)zEbrEBR$XXKt;W72>MDH+1fkKWmSb`m9J-jvO(Hs`vTHUyIK%~}$yz4Ku6Kgx=Mn3yQQ6O;_`Vs+eQ zn#p;i6l>l*lizXBWHt%Z_dk1tq2k=NbvxM*GD&NoeRP+%=L>GaMxH)(0ZOroo8h;# zjR4fHguzVpPWqWgiv_OMsHw zzO#h+vjvPi?$pZq^8b^IM(8)Gn^y4v>6QWh#3j{O8LhDI{yB{f98TeG_B%5V68VZx zJHVN+qFfSM_EfDC@dd%x=XRsQegBaJ%u+4zgkk*}cv*nxkfzKsa&}l1V7E=vb}A)~ z-(~QkE}V;bN?RIFEJ(H~<;k6$q$R<&X*8r2NnKJLQIAqkto|O!eD!T}`0>Hf0H)3utr(|3!T@mtN z>IWglq~k0hU}Fer${^se0hy)F30_)uC*Pw%5->84hiS0Zz?4y$*8frx$b3-j^i0Dg zoxDxNcm1Ym%OqhGvj!$!c6Xh?d?)hyRVc0dFgj$OCm`=j{SmlnEy=K*A6WNGM@+qUrU8xm1s!i8^< zpzvB+APw$P0PHiLL#&Z-uOV(|nJbpjsU5nsS7l&No&N=s=|-;;3o@h4AO675pT^QP znqgUw1fa){;f)T?2@A@=U;kIeGg?7a#_B~Iesj^8dW*i`xIjZ_+AVfF%G8=q7!4pR z{a455r8#D=lwkZYj`$;Zc7b))QIb4{bh(nU6A(@g)`P^H(UkN^)fYEo&wegzGXw~i=Y50JfD9chqZl*)RTWy5irndv zd4bkXq6_2y2ef1*D98sxj2Ce)W(H&VB|hdP5yY@}AW6dz4KAw!D*(T^l6Kz@AT&B4 zg{an9NdN!=+5w*>Yiil4}SUTGeh^acU zDi*_ZVy>*kCw{1ulKm%+cVho01B~~AwM}}=8Bt@`SiQ=o;1>GViw}aYSW5C3bY_K6 zD#>#ZAgMD-pJ%dj+6Dw}F=}~t8}QoA`*)U}b~!GrALPYlSz?|pyeutEzEvsvyYUoN z$cen3gTKZmD z9XuK+XV3f;BW#n1y@ptIauq4>ol}^bMmy1j#d`NIOZm@-koY)blg?6&x%KLia(Jf* zA^hlBmb)mW`W~?}Oo_sMpy~}WXl3W8Y=*TacUqWm!@1{vUS4y~-4*T)+A}9OUAqnh zq`%guX6h2)Vr%xa7w}ELj+2`s%(m__&!HaIN@y4mASSn=rt)3{7%36=3cJYlT=&_h zLXP`)1vIMlb^;M?pX6!}jy!;i`bv%LYk{Elts^E4j z#QdVbNdP4Dum-%=Nco7)c=btl;h8zW*NA*-BQ^zu*O+{B)2u_5B*WKdy=i2Lt|6oN zUJTJe<&ei_;*zmEuO}~if40qeJJN+8eDii7r2FaF#Y)A z+}T#_8)Fc2GA!ec_!gVO*n6~@gbHt?Fr^kUp-~pVphjaAKPsJf6 z9k$&vU0@>WhnNkTsnWKZ%0@eW=nM-ayxkd|V@?u!e=uEfCM5+&*i9KQ*8@qGYDJxGdl4{ zh#A+gbj@XZ8-yCJ8XzpDo~0|r$H+{vG6He zca#%hj=dW@PP%0MPS|jQ<4fhW->q$@_v;~Zlk7^r$mpMD(Sb||RS}a#so5nao`Bw> znj=&_Y#Yd(>#BY72X#8bN}h6l>D3Kr zT8H=iSbH^Cu(x4-RwYlmACLkKfkX~q_X$uaUM+$$dXjMJnSA!}bJ30`Y@)@v#59O#cluiy6a|6z;=HAj!;mVrM1RBdn9)D6 zGaBof>yTKaMduIhmQkNkJti**G_#pyFA1RH(mJ40oB&6MbjjO<&x za+-4Wa}H7zU=X0nNw%>mjCSWTu0%s7pdJ1E6EfRGQUs(x{H^SFAHnrj5Z~v1J#^-nPV}S zIQ~-w(<6FwYs&1RAAqy%)V@?F{|zB7ey2sXf9e)g*Zwozrvzl}=!2yNgoMV+5Ep#7 zT1}6C)&7}NCp_Mx*k5R~nGvhcdfBqC-tB;F zQt@oaog20)pi2ikIu~2jcgy|&7-JZ56z_oMxJIi7uu|dDb8m$j;)DcxT zi+PzPcdT ztyx`A0Pb-gMzx?(c%w(>18?Nb>0g57$~kqAj`B|>fc_iT1@-gO)OdGZI68Io51qfn zD8gutB5EVq=CL!NE&#HQ-Ts5jTkcC5upQGP`!cYp?iqdLBz9vMYbi&Ki!*zT8_`;J zMFa1D#TO3>p5kEO%DQ{|aJ#z#T-2s`S6>_KR$H~P9?#H12t-|-@hyFap7qj-_3jT_ zoSc5y(1Am&({TkP@Z$u=HbYY=Dudp(rnsW7c0#8qE=p;9wXJ*|EV+;3nIr#xS$qP* z**tnk7E8fY?#v;6^NDx9$CTJ-+f)e`!$q+(eD_^;KDi!d_*=HGJ9Dm^To&uG3f^?k|5>_e5^+20hfReGD&Bse0;;YbysSOWeN!){c-Oj zt`yJ^iT%aG#8$_#_{WSOskN#84%?^^u8H=R*@%C!(;uGGB3d0kF;r7216@WmZD;%2 z0z$@=K=BX_q!C!V)A6G|%ATt75vF?b7=YVg=nx81iI^Z&a=}0tibyqGiX57G>f`$I z77{|<)Vtn=o7Uc=N}vN~Bz5BkS;~>mWamKZL7V{a(6c4G#u^Y&cu@A??NKYp%mi2} zjYsQ*w9`VJ>!g{dMc(Z82VHa|~h_-P01K0s>}x6X{&=NQc#7 zm|}8J)Qxy$;C0Djd3EgAn30b#zU)9BoYDy4x9#?}lAU`mc9+fFYb0D)aYc-phzN>8 zcpVPu);uJfGg@iw<%gW>t`iuF2`vr;*+nT)KDUUbyFiHE1AHg}4!qP+-IykLm2>ey0> zU7@=A>P>uQvcPrgxtR?e=UiR>%C_gd#WW?GCu=oeKbi!T-Y54VFL!rn=i`0S)6#4q zHSqhYoF)3BIWqY?)|1^R5rP>EfPAqn))nzGTt1wHZ*7Y?-Y{QM_hgYDw##l`%(CMq zt>jI*y`r@f?ld`ddEPWCs&<7xX5aCb4ISkJlFUiS8Wm~AoL#)9+wS=w8Y8H|eQ0lP z&m%Oa_*II&$##d;ke9%I>f~X6Ac^f2+h+Y#BxZSDmUd7 zWxjzCJ822c%x0TI#K+%bnp3Z~dNF1c`4+oC1x74`fdBvjngO3>X&3)VV$KeEPmt+J zQmiNuQ7b4|SI!SWA}N`2@t6F5?7Gbs$J~=NE~8|B9{r}EHmpD{BH$q}><~hfFN)|M zx#Utp2MCr6j{!v~PYZ;EWqWwCEQPsECt(a;c`ZQNbkEqiBNnX^q=u(g6V8#&gnkQ@ z<)b~1uO&FN7}j7ZUok5nm9OSy>Gr7uE){6Ax)8P)kvD`CTrj-c7b8Fy|1{ojwR`Hr z8*2NJ2xHGd6d{!k@OSL>hYb?ZGL9CUog!yp$YFu4j6@y{kG)dD2k9X5bNy2%lb7{< zcR~w~KP^JH8DKInty8w76z^DRx#8fKk$DeGy}9-ZnF9t!7%-Jt*afM#aijjqx$DQ3 z_%pb$($8A-D&%5{Ul6&uq6c;WT?{fj{v+0eLoH-+e0!7Vn(fLToOyV%iVDIN@ZRz5>%@bWts4vw5C77Ay$Se> z4hJ?@6&J}=3Z*oa7$orr3M^uU{BEA@Gf8?%@qM1M&)yx{x>AH>U+s9h{eGX{Hk6`| z>0T#@pHMIkxX(M04aL(|6ai8+*<%X|iO-|W8D zJ#u5lPdWJ^cM}k~KsoxA04EtKE5Y_+1GVG{MjUEX zJ2T))S3$#w3m5u*id~i3)pSH#uWf|U=3yqO_rn=1&r;kOo;$`KN7XryvP5(C@Uy6> z-`x({+fnCtBH+sw2;Mfvbv=L6_8O^l54loFkrp>eBq+W$p?Mu5sbvf!go7a$^_B|A zFiTG3t!gZC*a-g&#NH!CX=Zx`?!|i} zH|ndH8}YX;cY6S#KJGiv+)f|yAlmhdDN(y}s*e5$IibO10CLjC(nRGB1fdzKcZ}o& zRaHr@JSCs$5F|Q@vTOD<`F5>_l(8t3Wgd$Vm|M8(vYsAWX06J*A891O`x=due&yj_ z+y96o=XSh?Ro^Ex;qkhVx#&q?XvfXip9eK5cll4#yilA(@#!>At>PP6T)b9JZ6r5z(4&Jhr4udM?B<>i?W zK^fwr!c-|T65W~Ee?C=;sb^hqOrH1zfa;~*;OPJ0X9c~YmP$LZU2n@pz`(>k-1$n2 zA2x>%SOE^M4#2e4CzxNe$3RJTT2_Km@<0idROtW{m&~^N0t4gUH7eogoxL^`n7bVy z{(-gHn)IYr70VmZ;^x{R?Lo5Nv<}+m4-Nv4Bv?F`R@LDT35SDhadJzWrwucGePYdq z0fG?!I%QoZk8Iz%4-5VYtX2!@4-_d@AvZJXlwJQDBwI-0dh4yDrrD*1Hbrnfz|W<8 zG@^C_&>I5ys`Axjz=8*EZrJ`l3L`22-0A>B#_r(geQ+1|cai@}*o;^yK;Ruu`y0N% z_bEe<8osdQHR+=kPf4P&TvCneR#&S(V31?kB{v&Z*-%K+yzK@Dxk&jl5R5?Lv1 zoP&fW@6S+D#*Cx;ZJWT}1f-1VZQN;tGyNkrKA2&)BJi1KzvO)@n3>?{_gJbNi6BKsx;3u}EfGGQ(vO zTv~ACi9#Nz>EOt3dms~;tPePnH}y-cOzY2l;+z+8ht)X78@gFgxm z@Ox(OR~VDw-{W=+2&}s=hFtopx+pNMJbejHuwj+mta!e*@{Y1j1yr^m&^K7qyrLXc zaBj%6&N%!deJl-wuZ3ub~pa3fhTOB<*T**;7WqoNy0$TH*)I%0400_Q)go zS)r>$svTC3WRPB1YC1OAmzN>&J?lD5oF!l`zX5| zf-5bsVV+NCo-;9ax=c-%sW`Q`_t?-w9mQjJ;slhRy z8-u}fus>3Ck2TQ*NjJ)^vPRg0uKtSWBA?%Ljni%(;&d{_@u;R{TMgWl%yrG@;@9U) z!9m;JM>BpKj3{N^>}vw;78)&gUzdHqxV@4Yq%=2u%;uOURP}M~Re*?wbhYp{A;=xTc z1{J*CVPLOJRi8@+FD$JWFK{(;3g~yX6!ZArD5S#qI8X?h-yTRNwdW?phYKN|Lpapr}dI7%W9DUfdpO-$abnIO#8{jan)zRG(i(yv6UTm;=}J<;S@cymMwW8og)|q zyq$r(GQLQQ2koN7+K{Aw4Vw1J>Kx9$SA3O!3C&gnA)lDeiAIF?P!7BfK4e6D~G`Y}NlX8Q%SB2Y1OC zOge?-Lb+|ozODV7E(I-;@AV`en=yC1^_Ne{on<5uHT6roL_Wr{e`T42)DoPVZ=~9? z*VFTq9<}*>h}AX$c2mJFYM9G!Lv1ZDRC#@0#w2xm>CCE5nh;iiEkWPG&numHMR{ zYZ;$^GRChWn@GjZ3_hHHdS&pz*@Ai_m={H8^d-G)$B9uFje!N1C6?AONG2I?X9#c8 zm8_lg(PRcjkp+8MR(h(@nzINT6(cqlcm{q|hVW!Yfg_)3)C3Yui*p&o8Y!|^N@-Vd z-wi4T5M1IvPNH7uKuLynUE_yg?H|Tu(+MA23y#CIn9%_1ngzUnG)fn}2UzJ-WLaJn z!Z}4$@ie7EVB@;YVtD`n0OmoTi9`Qel|)T55?rJtkRl-9K8WA4y_6tu7$?w6f5!6%g-p)2ynHOh0YZ z9du&%*HfF{xQ}EoJ8buOMUv}45P`U+7=0003}0iUR87yrMO zX8$e$z3a;F8W_k$8*GW|QTUZa#hmiQH4^??XMaFbCQ4RgoJ$@Me`E=;Q)9;!8iUID za|U&~Kqp}B^74tB0v)Hjh5n?J`zyxm`{w@CEeqVi?w^Ap{xkg=Z0|>OmPIa2(oqI0nz=C#ZQt@$xr zP&{O!I%0WGtJ0V57Po#k{Ez?u4e>#ntx2dsY?(|C|Bz4J7#R~jGFS%f+Kfu{$B>k~ zSsjt?tFlgJQGuF7-N06T`=|KH&LE0P zt`1OZop|(TYIX)qyBaW2wjaCrW2Hesy(5LGFVSWIW(mm*#>YxH5G02PdPe^wiDNy< z7^8}ydE@AB_XHo3}*{8u!*nB5IfX|BthE?O#&;YDYnqrchYLHsXzxsUdXfn=c3 z*7oqOi1UR2kw9+0BWYnjY%#{FQ9sd-?o3Lm^GRv@)4Q&&dmcU?T;E8K+gl(q3f9g!CsGf#~_F+mWqM1OeScj)Y9#4RU57^)OEDJew}B5x?^~iy_p6 zXwcfw-R^dDW6XV{gXw(lU3J#XuN!b=AiL@+^0F8*@?36ah8x zMsdI#=Zn~LTmlqPLR>8+=wejNEnja?*Wg|M@-emDY$aig{;r?K8J9suPgPn`99zra z@7`dPGShL0Bf?FtD7XWdjofK+TzbQjO&;Jz5a5QL$(79Ew%Yy#M`~;aWjk&jyMWF| z>RkVr60r?@c$5nC$OKTS`7+E2$C>u`=$js4b%cg=-^Aj|^vdlC-D@-!-#u#VNv&ExG(+c7B@5C63pL!g}(E+tZ z+@d}L*QKnWB43gX8Cy)OBHG=&`G9;PMH=@ zotsNn*uuml68 z{{NPp?$Rd3-0x0*4$zeqq%<79Dh9A{`}*;`Z?Yji+JSs%w`K?T8O^C(CC&Xg3(NV% zDYfO$?daa^RU(#^5dcE0v~Z-kc2-3J)WO6l{WZ^r;*GpuZ5b>q$qWa&3C75jFACnH zWa`6=j5#aAqjb5|JpFjXDh1AAMK@qzg{WR%V8XLV#bGgDv!Gdz? z?+XjW2&rh>6Tm!<23+pD5404$D;2G^zS|0Ada(-zWzb2fHBdv2$0oD)JENb{Uv7qv z?+>`{drSF`nW{s50tm{pyi5okq%5#kjhbFucTi20*?DGZBB-!NU~e8Ttxs6ifFW@K zO<-1%PFEOQ5Zgaz$)?OGggxRT6*NlGGXz2Bi>t0j8MQ+{YCQ7WG?CxR#{2-bivyom zYHKW$XT8~wLleua8VJa|&sQH6moS|UkWa2Ia23@pF>e?FB8s@ZOs@ME5O|tEZjqLV zE)j9t*v=yA&bY)cq(vV%+`y|h1!ccW-0N1*3&l<9oze=>bd3#I!()Doe8$vL#}^N? zdMBFa*A+}fLT9N~s;4X`(2+;9-6laeZVFN@)7RX!FBVxC!C~_bX$##g6J>fXod=GM zR*E;~&MUChbIvPZF14sj&7J~lxlM+NK;Oijbf?892aUQhB^11AiNz0u^L^>wJ540M z7W@ok*>c&$Jb#GLMBBV9Jdhr6Zc0YIBYum>xMW(03~hyuh?7>o)1S#ed4B=B5O@w< zyHly9)nxcaKB036GcM?pgdb^LB=-|vq6fMrD6H!ynI(Dl!FXgsjqfIlLKHJi#gMZe~0ziLO zQzTBLhayy*!^0g=48X}{a-NYM>e*S#&lu2A4QZ(r)rN9Qz^ch(@0k#qF1x4->uN<2 zg`tVk1V0cD5lc;7G=VX>*duy-o_MzM1v0cUF<%JIIT7&*Q>8)+dHuUp^W;Lx-?#Ug z$KHdm&hSN$?S4?y>+ppn^B5%&S9*^o3lx0f6P@bTq-{q>C`_?6GT(F6w)&wcls^rN zHTDWlIM@83`nqbMfbhP`wR6$6Yz;Z(U&H;^a9Z^HXfv_jg|w$3zKB2@5C|N1}ELE2r7-B!z97JZ5fUU1-L z4yvU%egGP0F4*QEHDI~!p!&`muK1(D`7VF(cQ$TI@Wv;B04=)~kVg});9dug+*LV_ z`5zo`;^X0FGB^C`uzrd!#j%i#NBIH0p>G#j{i_OL!`42o73i60Y zc9;j~zls{F#t%H^E%mpxU(B@My*Zd<)jl^IAU-&pS_&Xu&|Ox0ZD48uzx}g2CP~Sc zBQsW_h|RU*fBvfY5!9bHIC(p z@m)ijEHXtG2{c=!h^9^op|Y{9B9X_bO`l9gS0c8#Wo4HrYb?SgY-!^oY1YY8e-;uw zvO{5FVTq3Qb0}8NL<+2PC4vA!_85YWtb^lh40B5ajt5UPVeTBNY#kQg#-e{7zh1ko zW;}|U1VooGreow#6$xmbKlFTQjI-Cd`UE0OY-t1Ir;58HL@DKk=&|JTvV<(a>kxe%_#WoO(=xx?r>QdlMD)coq%%0@tg^T_#O4k0y} zO;%Ijl&Vm{`U##625TkvD7VtoVGd<|uzOhQ;j;8rt9Ad652do&l)4rh%?iZ@EbVyl#_A$G!{Exzs_wn`vs{ zl-(QGk`^miGfJ8uT3uRwGdHG;$R+MvN{Mf(;idg_Cp=%`EH{SUp0(GU_eUtH>O`6V zdS8_MWPwQnZvj;33nB;o`c%Yxa?u-VQ*$k79FRyr@)l}ox!{{iWcLzR5sNsXsEUpd zV7f~KhhmD;uSN@zry5+~Hx_}c>MRScd*$2XPgA_jWnw?>kqml6E7N{qAy3l_y<(ko z`OYuIxiiEAGp4l%_YyYK1`zcR4J;AW9iymU?D;!jp{cZaHvS0CVIFSvCS^@rxR1va z)z71djX!+-{ZA5;VjBp#_g9$dwtZP>%QnYd459k-F)JVH7U0{Y>^rrBc_lqte|`6;QB+IU zX#?2fj}1K-dl7`O8sy*hsPfiBnj>7 zg)gqWPoju?oZ58qp$nu%n!|tS2A8POJk8apHDnOm0dO@POrjo8MRwUS4!RMl*NUka z<0+fe$~HovkrnR=BGe8Y;i3Q}9(ux~JR&`5xLbEu_s=dndnR$VDqflQ4V(e^)Tdb? zx&CG^WmYd&s^Xqb2gJfL+C8dOn%0ryG3mzg-yqH@jtf$!VOg z4UGO(ekJ6jY8G;2qAIBE+|0_N69pw%9!v^ewi5oNUU9!h?5+XQWVa*>LCJ$jXG?7s z&){>h7j)y&Fi0!rIBMOc!1Vg|;PhAR#I*m&<1QYn5*xjY@hOpy zbT4J638fTh{&_O+*4CD4L~nctDYIm6+FaYs#3I|Mb*~U6BA3?oGJ`U&{f(g&@Z_VM z-{MHSjJ7aIB_p(4Fhp00I*9Ymz)|$9Ns7tpz(L#10P?cD&1D|V+!=5T{iudEV>VBj z0i=apv6GBht%{G7$ezw~NtKJ@z&}lo;T8;?A5Ncll{w@y-9k-}2Q zh+qie8(#;`Dj#GsR$4xesZCrS3H-#xK`JQfCS*_29{FLE?sa!2&Y1`Lpezxd>+q1%HQv z61=b8J0zTd0002=L7&V*|AC`U^U!NZEwCD;!BQm3H01ihrVH~omOD2BN8FSGLIOb3 z4oU~}A|ZND*Abmh4fm#Gz)#96@D5R>4D-tRWi56;_ubs-{3b9i-_4!S{XTGb<{HEk zRc`>x9!D(RQ62V7yrYaXwJ?LjzTsC6%!gk`|IUZ#gqvO{&c|Dk8mO zx<=RnWDr;B4U$l5YtaHu#9z5r(JR+V&2=NbiD%@0&UpNrhkH(613R7}2*}W3i~0rRM)&}WZSLHYRTGA`**9|(`-hh=nmqoe+d}BL)6P|oN-j#-%4s`!=SLA&;S4r z5<#2sNvJ_=nM@1+%YfAxaO2;TyuZC&WWmtIJU03ee}}ZwnF`GfQx{#rwO+cU})G#SXNEkPUi|3*x1sLiV53SHK z^bqJ4Gx& zM3NLm{=wZK8}SEtomh-Ru^KTff-_|WJ3uu%zAi&BGir(+cP>x|;TbPbP_Lz32+m2? z+?n_`{R!Zy1-uN*+S35&^Fr2^`2JQhB$_Ewv0s>TTXxzAm=EJk#+8wU7b9&m6RG5G!-zl&noObm^0G!J_Vi>ou8E$@w2^^_iI|mvF$O+8I~F<;g~Tfij3zEGzZi-(l+$mJsoU}~ zSit505B}X*t!K_K`LDN(#|BUB{f^C7!T&ciEKqb}YXNAz^p)`g*T(f0_Su%RzXGiG zPX*jO7l|&c7|FUXYd_}ZZzZMpx;r(f((M7+I!n$uVW9pAI66)wwqOLYxdiNha%>zV zU0ZZXQJYrt-Z_uOqxUoA%+VQf<9GQW@Sia6tBjIX%$%gua<08i>5j`v2H&FPoqXb& zKNrp^SGcebyZ0Jb7GxNBCL8qrERF5*#L2aG_5vt=>P+TR$S47b9=s3(eg%$=^3iVu z(_i1l>yuuV2!B(@#PL(|riSZ_cL=cyt_{hGS64l5BD<$meX?EqfesKBfW z2tg`b&dX_^#vk!VfZ=g_2LKYJ$;cwLA&pWjE~!_c2$lxFUH?@V0}>*IBUZU%L+;jy zE5F&6;Sj}FPk84z52kv(ai(!k4e<3SGzzwHu zh~;5aIZQ?98|HDH6zGvD(lyhU50!q?;Cg*vs8^(L2pOuX#{)pe6ML1yftq;`&Idw! ztEsEib569B&R^0a%)LTccu=8Wpd&PKIR~IPk#>r*Cv)>U-_m8LEG7gKJ39gdfrVs? z;EqOBq<8mfD}-Nm7k+VBd?N`KF!)3E;g4hS-)qSW8m zq+qCgVPU`ca%8B(T-o_%bHzy=M6%v3AkwlgsYSLaJ8lnYx^pSy@IbS4wgCm*ps5ihuWhQUh2dc z;CuVRg~n&~Ch?GT?P%3LE&bPzV+R|M=%}BV=dpf7>FigV_vmwWlJ+SVlF7W@ltkP2 zxiY z!M@nVxZkXbldKW{VljjbA7Zk|!*GcI3e8Eb6x3Yqhr!5^&Oz~nUz+ZcZHH>Sj=2*2 zMZc0-zeJH+z?ldpvG1uIy}`nlDPpaYWW-CRPfL11*|qN1NEuK^>I@s=eXu-0bM>kF z&aX4qOGb(jZug(^XR?9pcRwFZv$DT1d#~9E!SS# zF5kG$?sBN2*os1-HQ}$tsxbPW(y+r$#HGIsd{-D#w@WNfT?>A+z1;g#L`{~`Y+B|4 zuHUL&d#+!Ky9xI)zLyM|we4-e?*CrI9y$G(%^qDy^3V0tq<2}N!ga*cC4MRkc-~-L z4!V=bgeV0F$D}Q+2-SX<(*ONb&7*4$}-7N>Q>+AA_%vc7E4HS4tDC$#aHNwiQavaG{Fabe1x8f;S#=%=axrOg=Ij zLbsbWY=G<+ekG0AvwK!qB;YDaBj@zLI)>4ZX8`y z#1L?!f7t#s(%v(Y=TPDq7X_&2PNHlDIW$4Uhg&a-Y%!1a!$j2iL-~7N=;82CtY-z1 zprfGU0L17g#-C+0S3p-#n22FRa_lTfd@bY~sP-r%T4TIO3?rq`?nPEZ)GK-CM=YT5 z7Q|sTQ5j!=8Ukja=;u~Ep|HWoux2!(qegA?m6tu?xRH)-;bLa30{H_nZhoDgl)M3M zN1$w%ISwd3c_xm}O%-R~j@4J6;LBP>fI3B?`7E9~FJRGf;i7AvcM75LGc^-+erx5> zI)^;a*BUTqWBO}S8e0e0VxJPRJJ+F}q^NW961I?z&X_C1^U}Jjla5VEC5W+2CAK%w z_uF?FaY#KQ&#&Jh8O*4<0459x8-ewTm~wo)!cVj|7wfkZfU0gdPXZ9`LNwMmE)gRR zI|p`Z?}BC~A%G|}^87efhT{bQYmfUnh_R#y z-v1IymX}XQ4-%;b8(R&sZxwd#Ui)iWX{YHenkA@v@36bvv|pjF@Ba!D*u;)syV<_UD8IG=t*$}V(8Z> z5*rCXOltohAFf;Xv$q#hC<&$`G8aE}*}{n&Z&E%|08htt*%6_&A!Kh!(u30n zceb|CA3XUc;{pgt3X3@pRT>Z~=yHRKkCs1N)vw6x6DA_^j~2MWDrfjq#QKf^q?W&DYONh zgC>1P+4a_lz2G$5QUOX7;-!rR?Vb8?|BlZK{F!iN^BP9dJqJdV%S1I{?_S^+%3jf) zV+-UV=VzxEU?GkwtrK>ebt&D~lmj$P9gA+R68k39CfJvi{rtWd7r!EUt8l16LsLWS z=J_n6yc*Gj+M(ohdqi^F!`fuf8GTm#8^@$jPj zs2!hD6qXvJDm?*BIHcMx2cBJWkT~fx-MHiVO6!UE=1*ae$+_FlQob~1y1k!3DZ?y( z6V`xFc@zApbZGQ?472E_2iU4emZgJ3< zTeKe5a|_Tn3SUhC00Ch^o)1M4E&pY+`h^|1BW8Gw=l2;=wR?gjOl5TFB%* zMtbbkKUs+GHt0w!=^z6~{6f0V*lo-~)r_-?`{Ib!tY>lk*Wt0cJR&aa#K&feWMg!O zG7RuG3+3EkVoEt;YFY4se9?ze*fk0Zxe+`HzsU2F!_@m7Zdw%~hrI=)l@2ejeZkv8 z0J(RDGKh%u;RXUxVrJU1g7yo~`=P5KJkGv0Si$0%!?971vVos#JSv@$rV|)3_}}zn zIHJ@mZz=~abN~wSy0-WoI2u^8xA6%z3MX-sR|v(1axX=+BUo|V*8ComV5%Ig+-|nw zX{`A=*KP5i(yI6Vi;_|a_KI5^L`pbSI#^2(mh zR&kHIM_a-$od9T1Z%NIG7lF|mPucEPL;HOcDuW~57D2dFyH5w9I&B2Z<>gh?AbR zM;IuS8!=(&snlbx$OtAMVcpMo4Vk@QRvJYwA?B+q$K$#tq3EG7D!{D?Y4USixdrD7 zI%fa?0QUi&Flt}JUL1P^_pu%!I zw3|5-bI|qGY;C5Z))%#R#bK={BIGEgtgD_#)q!s+!nMdF=<(u> zS!V#u5T7e)UMqkgL~Pr!DaSL&II#LZx1_C)*?2qdC-GpVZBg^&Q@gfy3fOAn{3>?O)UDu0duvrVJqb?!vFvX4ndkV zNvJ_=nM@1+&vQ(3Wm28^l4AU19IHw_I7iv}1B?O*j(^{HJb8A(5#J&Wyko0bU$vtv zf9)IwD3cXH(P^lD+>b)%kV7e1n>q|2apP=PT}w{OaI#O@EZC^3yd&rr>3W_!?uPfQ z#K3u;!4(`0<42|3OIV*VfN`R-#q;|m-Bu*_LDi%8t3U!A4&OAQ;)ro|{X&C!l zlTrk7bhxl~$Jn4NHTZIj$l-m0dM?4t_eDXEvwfBWrA*#g>WEzv@*3bmpED{2K+pBi zC5}<}1v>IOA#VV(Mf1++Qb>8P)Z2Y96sh2cAS>)r*NecXy7KC0wSr27A2uJ>tI8hw zl{~>|g4Zw)&%FTo5!G(}$_7-$W10!&B~{{F_g!%YUCTvs^?l=D~5~h7!7!i}soaX#@i0&@)>ji|Vx&Kvsrwcj0L#r}fS3j2MO;QlgIZcn>Y& z1vv^@-GgRy(VH?##Ss8kQWz4qrM$OvEmhXgo|OHIwk~}U$wq;cZ4#0(_ZofWjkoiv ziOI8Ydkm;qqp>}|zj_t}_@56Bb^u-7l9k+u@YS5}Y(Ty`K_F;~2ZYV(F*qw**BvhpfQ>cldC zPUmH}JY=BY81#DJBh-In5y^B*Xs{=yWIb`H9nG=Ss;+W`Gmq2hOeXY?{$NjjQAbsX z7_^9_Zq7LZeh(f>)OW1cQ3XM$&>Kx&(p#EwsNoJ8Sl7iM}te zu)CKeIsZ3>Iuv^lVi@JXlQ@TDACWZ=jU3}YN4#FOm28mS0mUdHoLs9YxP(D2j=kzaDO z6uU4)grdMKi;R5f%+PTJAqlSk!5=f7?dSilG+QiizSMY`K;lbpffXS0B**hhtfT0X zC*EqN!yXnqw8yuE2sDm1&64vZ?I@A7NIDZAT^itio3WDP^2(ZEq?+k=IU zbCGxZKUvPPp3+cg>fKSCiD0qp+R#T_+e$tf50vy4#`yQPoKFmFYyAoKMoV@q&%Ziwhyu@ zqMhw_5O}oF{V$qAVA5dWvRNEi8U9*4hKK8R{kw5*(3Yjo#7>^WvY-0hu>$_Frynuh zuzh#*Cpez_D&ddu1ZELSjN0s+>3U1h{?+=j!cnskw!xIaw~Syzm;$q-J`)b2g!0#u zVjoOAN>ktg*NlusE-s`5&Q!$2(#%*_6S9~c7>)mtJANs4akj#;>zH9FJ>&F6r#dNd z&y^~vOz*A@g|tTIZXVp+iFj~XN7ZM-0+4rFa*2bDTqYZE6CiHDiC!( z*n?5V^L#8vo*kefUUv6OP3l1VppO^pA&=tez2*TG4UkJOT2X)HK#JP#n?(mCtlq0$ zndDEhkRcF!Mr}UHBWpyUxD{G(wo<*CyY@Q@A{mz>m<0CbyBt|YT z`S$_dZZ}SCUR&S$(4LFG(NN`qOtfR79T^p96Qn1e%n;gnBCP;PrXOPv8R33I#^*tm zB3vO+$`K07v3NQW=;`a^q4E6}#v_FKSP{l)jB?L|b8?GWKYz5mzR^4rG(`ABbsrFX z`6Fu&@-=2;%|dOdw#A(UY%WY83{L^J!V+kc^c`mJ-ImS-P=QVaX|3@$`eu7sE2L>y zJ`u4O&c!cHSLRX3&^l}|%W&B;q>a2k&x4@x@7WuQk%Eo+raey%t0fvC+MaJ(Chap9 zHKgoeSbqVK*iLu_4I7*Wu&?A&%c^vyD+wXWvl@)<8IhTHX5gz3uznwZ9!yq#s-u1S zg!p(m6q6gy-wU&4$2eC+N5?E&r5=W45Beggcc! zC3b0ou8mERWbEpXa1y2ReEBe!0(FAe)ZEOhC71&s_$RbmPaxfts#_=ez|+v-o%%Xi@ z?0DxG`sJIe^PvA%_&nNwqa2q!h>Z!w+g=iJ{s72j#8=-Ne(}9&GIB}kmFPwHk~gwr z@%C8w000290iJYfUncvSIoEfYU5FU0-f^y5vi*JtKq=h0!=tSq)`_JhPiP-}C8_O* zPuReUQn9(`wn!995-O*4d=Z>E_CDR{EsQ1VGs0DzaS&q4oE4={FbSQAUKJa%KaffY9xVUrt{R#QpgXAtXF;7IT0A00j;~nt4g6L2Q{!3;z=*Bsf{< zP)z`H!$Qqcf8f#PYHzZ#6COoNNT*hs<6o;Q#>l^fkq`iqW`J#-Kmk3Fi0SX zGwy{949(55LvcmAyM|5Aa3AvxU1V(){+=tcsYG4Op50;p2fi8+S4jnQFyyhAH zwlK&2;J2DJka{vKg`q#tx7tg9pb?f&88$bPxi3a>Mk5OIFlkms?q!10f{&z65{>+a zcJhr~8E!I_!;IDA@XcUL?6UGpA=Yc8@Mx@7r;=k170b@JV6oW-!b{*dE@5(%m`Xv5 zOO4Ib(+RSpcNHY=8wy^dwz3#}K<~`ZlgK9pzC>X%ipr<#KMg|4&&nBLS{)>`mBVdG zhN9WjA6ku!B~IKK8|e?p$P~ENrxE3b!;5VZO?vKzlRh{m_8%eM4_?izV7Vz9-r*Ou z={Z$v?=c`5?y~DdaF)OV;lvY2p+F6lqj3=g03$)0J1##FSbn$4jlxh?3_F56%&wBT z_Ozl#sEJrpX^P}?!L4g4$a&@GeHTFdtW_2-EzRxh1raedQrtQiCK8|UtWSgT^j1uC z@2FMgG~f!FM*-`cpvj$puZk*<@e9}V!XtXaUcT$pX_h7~uxio)nzjbj>JCy9uSi*y zV-M2B;6+M?b)=(Bl_OqIqRXPvYE$TvdV6(uoR96aUlc@0xJI1aOSP`ue85D-gJ~NW zKZg-Tso^0N566B_%GBqO2jqU?9~|?dHgZ~eFppBM^{}fSQ~O|In;fL45que02fixp zSFaS+u`V_mLQdELj=ndv#x@k`cLU}kfGD|ilUS*RTZ0e!W(8*fR&n+XU(ho44+wrf1)1TX(1F)9j_{heoH6xMEOB$;%9@xfPu!m;9b=TUACC>$~ zK8nB6#@T(6;FPc3U7d}?NHamSDx+KwF-7wpj31JJ%7pg*j7PD%2oZ?#?*y_p7YcVg zYhP*3*Rs-+^yuc-@vX5AXDxs_?ol=s8Igk5DHugnW@eIF-eAWdwL7TU-|*@^TK{D# zO(5aM_W-ucYf%iyW9)k73SXKV&=swnD#zNJXS2KentzKfxP}Z7O6`OEWgoL1l%>ps zKs_EW^;J#dfO^I8@)m5yD^5@`S^8FR*2BVGEv8F(ftP|S1Xaz!+H&jkw8a4k{%pn? zRed>DnJSxVyL*#YbYz?$Vr^)x`+^O;7D~+L0UQHmJ!30jF|NSBX9~bPq-tY`JzB-$lPQY^NNtz3O3T>_*jg+o zO(Ph0OHrp-W=~(v%Q9q!l^#F}WV7<`$8DAD> z4EG)-a88MydIu9JLqeUJ^|s(Bi16xSrE-;NApigY7(t$wMHMaog|x*;pRgS5!pHG< zxX3C9E=yQZ*BmwbSG^QTwLsJo<}Z_q=+diW5s)u^2NnMn@!}z^Sm0=wH}`crwM|lv z?IZ1A900?_1=ClBlRf1FLqI6yMeL1dw9O6|r6=U!qp6u}Gsd_U^<< z=3lEnUWz-BrXQgvc?7+9B?XgP)r&DvzZ+w2=W8UOp~&TZ%))rA-O-4UV)2h9)TPGJ z`EB&dFHb(U;sJnGQ|y4oaT7{Ql^O5?$DE@C*Y&Sv4@ss?tPR^t3-yz#(TmX|Amp*h z4EK~UxG|;`ZJj%N$DlGMd!kvD=$R-6>Jeg$mH-&dj#9Z5y4Uoi z1^@s6wgH~DbYE$sT?uqISRm7K*H5_7q%9Zk!NlX3lB;JyTwkcHhldhOmFpRfel)4S z$|a%Wf6swCB&YXnGp-ZkRXxDwrOmRJr$lF4+ybP{*=dykrCF^-VDkg`e3Qp>^ zP?1oLNGZP;YJ4smUS>i(_qDCXQZ`&|=G^LLnMo|j4a}KTm`+JaE;Kz~@Ho9S7^KRc zw0Gp2w*-fxXN$xCWb`D*lZH2DnK-CFss`n&y8X}H^Kz{gA(cGA000E^L7Kfus6lL* zObh?dwXX^4F#4(qf?^Olbaqt~7K{1QTjf3#bspV#(vt1F_nH!7oCQ8A+vfo#dJ?t& z(f+q{=#+WmsQ@rCVQ@nC4_)m7j=Vy4nIP=ekqsh$`|dFU0<3kCrgCS z6iNn@<@;;wx?%py@o@aaRo9OJ0knzno4XCJsuwIXQBLp)MRhxwTd$o=D>0)0(IfM}2h8Vuo>o&yf+KZxrIRIQ5KOR6B#kPmdz2 zCAmlP&hq3<(z8YH8h2KKOC64l8aQAdVBpo1uox9Avz$YB+JR54O*BpV+y}uBuVcru z9xo&I)$6n|qtt>~1}Xr^zVEwGFn1^LuVmT2tOdABrIO||^Qo|4>k5)nwVIa3=Tz1N zQGK}Sc}0SJb^3xG+N@e_U~|h7co1b7dxTZ&lUGH^8%=6dKBv*urE;_VVIUZh1m;xF z2pc{?6g^95n&-51FnzbD#eQNIZ3+U2y&H~zaZ;uA&pgS^pj`a~_Y)>uG*C1r2p!%S zDd*d&-@S&I{@UA{O)$CJqP2a$3*70B^xV~jqM#y)NXr?!toG9$IJ`6)g63}jY?PXEd;eku4IYDqtNcw~rAQu!czx=o=Y|8fYedjt@C%+}SP zR|01+WR4+icFHI^R|SQ8xdhK>Ndf#VUhG6KJ2aB{d2R9jR9b)7zZdq0>g6;ws zitdgV?V0tHlAIwSDKk)=DO#+bz2Cy6T^zN}zZ)TYm-dd@I9^mYCICbSj@?4K=1TeU z`4gEe@+Fyf$?T;p(!$6v)xkKJt#^S}da_>VX8|}!T!Ik((HTbM^ifg1@Zo=8F-j6H zK`*69R+o0&gpsPe{U6Wyag9{S=dJam&riHMwzO18;%wHS_t9S4=r`M-QnMbS=p*cO?$jbxtXX^c?lP>Cn`~yi2Yv#pGCO1ppd3 zr`(-Q$hB3#&)V{PvCx`u47obx^AJ-`@KuyW0JVOpbu21?uGi(>#xrlI=+^{#{9IZ7 z9*P{kWg!#Em-n(vyngdgLjmiJduXuCgo5sH=IM+10*3Fo>YV@iAzm7-ZCSw7?mUyQz~Ql= z6fxDbmsQqw)oUez0003}L7v-16)peC1m6HG^j>NUB$wl#Uf&i3>^Kgnu;4*`!36M3 zwqy2pRRpFp%_Mso;hP3>nzKrTDESP%(o#!+>lBO8W1!ciQnRFMH8>pZ%40nS9q!ab40+@?q@$Q!vJ`nKe z%d9AjfxjoWXgIbC3i|zQnw=_}scpDRQTWw28$PFR+2ps)OniqAan$T#Nw~h4udQUj zJTgCv>!CL|I|i6Ae#D(*Rf!yMrZU?qpT&1wo5y%-AX^0};c{W$j#T?}E#Uf?Im1RI zINgiv(je!uR>`KpYA-|Dr3E*Q$_wGr_eBp?FB+!s39FjYGxik_2ZsOv0ObLm`gC7* zn($Gz*pYRGOkH$LwdsId_3Sz?QTk_GDb3?%$m6SLBuD7R1>&;DzePn`QKq|$_oyKq zg1wz}4RX|#38e_xGgRK~P)p+ccW{xhX0HMC{b~OWc9D_9{kQvp>UTcUfS&8i@rA!S282F+(-~a#xr$L(FNvJ_=nM@1+ zg*PY$PGg>evV}6w9ZxCdRt`_TNKMApc3=96(lMxJX8V7cF{B(&ci2uyxvUe+TUKU0 z5JBI2Sol%7t8^ohoo~$JszezjS)}8?$ty+You5JKy|1N>+9`7gR&!%2;A_)_C5rQh zo@O|JP}D!x%7B-VJ>&!aOwmT>eJ*Lj+DU`aG$li))IG`j_C7g^dCIygRlAU`Pz~nx zmES0=6q}fmwgVT&Y8(1uVB6GA#pvgA;KjwF*H1|w=Xzp9kF3rw1gq#6&9RN zZTuDop~El5jML2#ETOnl$c@qx?VgiIoTgQp^5SC%7Ik1RB2!z>lbV4Na392FU#eil zA7Lfn_;Ev$)cRwlr5W4Ts$}VR{r(~%kS!eZRBx2$rNwZyCvQUhUrBextjK9v3j*M%9zBCDE z1Ab1yh9j^zc8;f&o#J%LA{)uj%r8}$ieKa6&eJccrt0#kE+}E0?*0lz`nfRWM^2H? z0$QHAMBVc=O9*@Hooe#he*n0B<8~&=+URibRCOh6cMlYOD!mg_P#lI^b{&L8qgcTi zRmcJ}1uoQcb6!IOUlK?)mce1=TgY;DMu`4i8fRX5Ee6sJUv>M5V7)!^zK}fdvZ|{` z9{uy8n^c&90_qcbw!|dW!7X%-as0e=H#8`7i@AFeEq|+4*!TGP#+SXZkYp+TrQlFB zwZdudTx-DCpW^BaZB9JI4+H-fdtFH!BBXr;Qcj5+GM0=f^uleIxMcsFRrb zrPK7H5JFcNr7yv(xT1o-e~4UQCkTn38aHLgvH5uV#DS1lr-V=~!szRX?{w6^uNz6b z1h7n;>0SCVo=PlsrxT8{ueAY%?H?Wn!s0s$n2>wp{%Z|nw!J8SJk=|Q;i%cR%){}x zQR+==4AYSs5gpdXANYohmfH5dFmf{?GT;60OaQxNUVd=J4RBCRA9oM;mytzONc-tQ z_A@489uMwCRflKIecnhnc^723ycssf^)`SbcHsVji$k1_o*N`mm$tGHh^1VuNe(zO z#=e@kLZR^&TshRcctOtoDv>ci`ukb&kaUT)5y2MDV1kNQo#p z`JUbiY3clDV~`ZqI!YG@L6ySpo6rtf$sT@|RzC_PyoGP`h72h^%wyWan$_&@?fm8@ z-Wo#|M;n&|kfR0gv&{K`!HD)(CTJj9=eu6qf73YFW=VQ0M^@98jL5`cOuia$jb9ka zf1cl#g1mD=l>=Wjhlcur@6^bIdulZgBNx4{Uy)S@juTvd6951K9zmZULjT3hnvtg1 z_#J%>ld<1aNcH7sFx&i3gu0UfJ6kjc1mK|OYXYWzVWF4XZ==Nh`5j+wG0JQ)lWRPEH#rL0-5R z6+yX68?p!O&Nr^%@F3?akB5m19_s`yB;7354gzq^=@UT#Kv`hlV1;Z69JeYY z4|AAUW33H`Xtv?&X&Ar&008{~pFU|9|H6M_e*G!9+SKW}@z2Tp`_-(EL@80Sn z@5iu81#hOuHEpI499UxQ5!XWcNY+rZ0}(tU-3%F3RoZlm&ymw3HC)A zO0|p`W&@Til8q$vlHkP7)90kIWg^#nwZz0y0N5uRRT57|glt%!8fEv|w`A)OCjWQM zQ4*iV{W=Fhqrn|Eul)kZaPeRUI~|OG5ly&y)eg#+Mf*s2VSHC$tWmpjbh?9!Md@$d zwr{C6ms z?CEs$tnJQ{SxEBo;bz%wE^9#|8U|hH%M9}|^5b6jA7WSU{tH?RG4O-68k&(8r#K|e z%aG5@@~=sGz*llc;*g2TwkFvqb$WP`*_{52E-&pc?b{e|yV~Jwyxh&Zcy3jc?Pj}D ziMzPf=99TW-{UN0`#BX4IwTJ;By~PPBg31F7m-r8bP+K`sc+LcZl|zG*6M$kW^pUn z=|rR}H)i52vv|35jMT3cUfAM~lqqd-w}b|9)?d`N?JxQ?#iZKj2ULpC!zM`$1%0Z6`q#MZ zbUpwaS6|wq8&sDwPLBQ6WJaNjK2-GFw!LBslhUBVh+R1G2>I8dR;ux5jyG$iSrU;! zQrdoWWxQiJcl5=vHkW)?c`Fr9-@kUFZ{V6&llPS(>JJ;g3d{-kf%q%ePx?bUOaAUK z_=_&k*O`;FI|Rpbc9B-aB<=o8kd=$@CBQ6xq344VT~mmJTyQyFe^#XVaf{w@GnuJz zhic4OK=&cmZn4pZrNp-K8!Ue%G>wbvV-G|8CwHsOS`R&2sc3g;g-;TK5H_89R(XZh{F5FOI)tvXbb( z|2$BuqxWVf0E!n0_Q9P%Zwnk|A7&-nG;J)>!-c+EB=-?D9xcbmXrd3!k;D~KT>%j) zn7d&kSPScriF-zWwuNQ_4d!$+6;oPAUqy$;!DsHi2!`-cU-UY;wKXEKXb(GOBQbu^ z?#9+0%4^{_jd3nJXcV*+G6X6OqBzrZj$X*W@mJ*aJGJ`St5@3dT^53;WI}(ksAdb& zHdQ{bzw;5X+AO+zgd)f8m$g+NOsJqxVpBBKHxmzZXFVKrjyKkS{3KklrkUnoMuu1~ zdFDn~toIBZ6)>{)YM0YPxt+n2rek?t(R4xR9xIz|saxC_EE>D|1IgW{-;6$EgnQ4d zv~~U&ro|)m3=y#I^8^F|lgF^CNFuQk`k2@A$F1Rb)sW7xH|U&Hc?{3kFHXYu>qh}3 z&l2}$uadn<4 z{Jx>B#SuxS@s~PN9QutGL^GAkA?L(t4;bk7sLbUXF=qc~2I5Fd*a(;Ui2wBJMdWEfS|)d!n}#qM0-P%KxMCPIl!d+Q>^;C2e>fev|x^zNEx<5 zZkv9Wx*`ALOFw?PpcDZxpue|`|Nzs<%5rO=+^2KR~ zG_HEHU?fgbdO1$@KEzZj^y1AsllxvR$L2aW;+I!E29d6(E0Q2_=O=frF6Tj#POdJM z69v&{V5 zr#gARU|bi?FL>*G+2%`n=_GmOF60bYiXnw0fmqrv5#`gtpq!e~HJaZwxVZ+mBSxiQb#u4xTt3^g`gd`zv|DW=z~yxR^r5KOeRPBTc=Maw(mt@A<< z(#Y_CqMPkejKRyYl2z5qdhZR$SWirv6lh73$D-wELe<~w<*xZ}wr_Rpn(WV*S6kK= z-E!ZjZq*CL%FJToEC!}etBeR^Fg4!Fpu8$Zk6BkMIRg}Yj-Bz+T)_|0IF-Dvl^yt~ zm`_m1Yz`eGcc*}q9Q8*X{-TgiSJ}}w-Qf5iYI{NAEr2y`KWF~lLX;;4fPI(Fg>wiY z1b)#4()AG&su&@0= z!EGa&#?J>5GaF4sgBNKgx(2?pUOA?`T<{bhEz9oGmtSNZ?W9Xz}8Ee>r6FRs@!?yMI zI=7F9SdU~o`vVxIXxctbOsxml{0m>jR~9!#eR~qF=N7Vm*$?=~WTz_oZ@M(H5ejg1 zoPeHTagP9;)vY1roH-ASm|o|8=ekzFG941GW~pqw{Ri+>0Xu~_>8KInFcxDT=`5;d zPmw9lC@-#6U+00n3LmWlpUaMXZy%^~pc+tz2cL6IzTu^jsNI16YFN{1(-bWW#_rsOjgyZn+rwICeL0C^0A^16R+CLvWY_~xp{9?@WMgN@?iuv-58U8m+;A$fcV z$T3hZn=+|;N_dpMRuFpg}PvHZz^>}eGjq?kwwengcuIuGZ%hN(~VKBrSU1DGE;p*cl9!_ z76eKN=2e?XhFL4zj(^ipL~_r^Wd!kt@J(on2)luQ`-FpOkY1P%nsAnCNKP4VXG+#C z&PkUsQosIO@ru8_qR26_q0RWuu|8cEtVQ2@<9cMhO{)TqKsoa^1AIPh~1=YsNVw=F%c-KcX|fZ?L%k2vGC> zi;&);OJ%3{CKN?_gUBzluO8kTj5Ht?di~81dSbc zsT)PC@mfMQ^u`01eGCTCidW&_?b#P=xxS(gJ`3Fc-thNr2$Lz}2gj8$(d|MB{XTLX zx=_kiXsGD3o_?|$jF%<6>q!}G%>b;SqG?S&nyiD!%U|xA`Xeyr&p~cL>lIoo$B-6= z^mcZG49vFin#%njt_2W&Gi_gup<+}f2WcyJ0|Lo$y!|eS#ym^Js0`JxNYNfi3{spInQ4|MI;UbgSHhLDkY$Dx&Q_ zyiN8dyr7(8D43*dDw9P00A&TpF%_bTbktJ zs26bjOkG`p0@TlufbxS3{di^;#Z?S7g+xAKgNS2bEy2u}`z((tDfl9}-_wgJB96q2 z(CTP58BQrGOIpO;+kovr8fa^9xR9~{ke%{Pfc7x411yEvcxzJM%94R{l`n2X7>sMB zpW0UnL$ncHmT`&4=E~|chR!R8l2_uQ(!}V94*QEPc2On_0c7D=|5cgPx^JjGYqN1I z(0^vc(T2yNRaCoA7a-}o)&4o%+MZEQoA?OQ&ra!@jzPw2GZI*77u^J@kOpj_HuGK^ z2C7G$wiM5)>2r55t$9?tVbqjwDLX}xKthw|uWa|?)0`v-$LyD1=bM9Rg0w=)c|V@H zzH0%Y56J7FngpQj9ylvogDk0HHyqgPt}PW-I8`pM!N6Yt00B|~pJ8bi|KPu`n`blr zS2XsL7RKS|H#G}h-3xa|6~4G-fIUw6Wn!s0GxAh@_C&@IUq3@4P-g}kER*6jv5=#X`xZ1;8Cs{jp`2Z z$(#|5U_O$H@-}-yc1*l&03Qu5?(yIo4XXMT4@P@ofA4p>RHE3A=Zx}~%hY-y+2`V* zJ%G;k0As{FO)R3+Pg!|%8e0St`w1g{xK|1w|1m1|8mhn%4@)=I%!Zn@JUeOs+kuDu z7i%X8ysXd_PZB%s;Q#;zjzODeNvJ_=nM@D=+kL#bLEvALC1WDI-ldG)>R7VR$PQQi zx}g<7T!5puDrsL*y8`7tMJaV^);ry+Mhxy;!7^ zz!c?`o+}y|J{eg_#f6J)xz}RXMB^*V*}$}E*E39?AOKz3WEFfR-K^+vAaskbyUY2^ z0>`;D$Y2j6x<4XlY;kpbUcwu2fJB)gAa5`>CEPzMeI5}WlinoTuYsbdtA!wPRX+Cm zKeq8Xds3%>&H7~mf~xi_7sG5F0t>QkFxfo^Hu*j#g*=w^6#0qp0-CAIpmqiRNlAHT zQ9l5c?!|p?7d?H*KkPUl$5G51rgx278Ln)oDM2`H#tTXuPWLvLxp5vuBlz+l^;NJZ zAO-Q1b7fR;gJO~%M2=wTg_G42`u1DdnUxPP(r(SvgqS)z zi^{QigsDVb^Z|7F3@98e)bfCuT`Frk$jH2Iu^=wsI^((^bF^*kx{4BHg0aUtjJ*xb zAH`&;S)|l!U+V)t+_$lUj*&3BS6>>8RebtG1KMJY^Pcy%*LE)q&TFplOQ)N8^c|#j z)Ms`*xWk~aPdE)xogtlC-3zYYeNaj~m|46Jf0p zAUO461X2#DWfH>iB9)Ybgatl;9n7;Xb}YzzSyo)$jU{5GKXV2_ z)%{=?E=XB>cf3j#Cdxf`{` z+Qoa%LIj&JP|y@hcceL!MocAI2QUX#B2HRs2d^|PIA?wA0i@d6XK{W03K$Nc0O?cC zFCAiMaPQs8ri}B_V9nVmL;V&$uz2eCT}w-KcN>Gl#nr-Cljr?Nc0qrcI!1}mdb8EFyl3WNB6W*;m(elE6t{;ayl%x zt5+g`7K=~QMFdle9*B?r2bjFuqp{#jey0(%3RsOC0GOx0qsZ&P)UJbf$X6u-){eKA z6{?1C&a=1#ihj~Ubin{)F8h}!%8^As%g&miYO1$!I91>@{XG%5(;kF?#uZ@;N`b83 z|2tB>g7n*Vk&o!*Z+%Tbn1J;YW~xT;7@W26xqSOH!OB?QEsX0D_a>{!S}D+ri!?oI zmS7wFI)O3zKI*mM=+@dI57Ys?gjTaq+!Q%ARtGz>te8z7$q~Nwii^A${|s(gPDmLB zSCXsOR@Fghd~(u8|Kyj20-zKmM0ibBGnE}b_goy3{U&h_X{Ky`3=a4dzioy!#dWga z0uZg(T|SUofC)kyXjCnjUcUl=0$6#TkJCr8$t!!%Qv>|PhnpaT0-+|m*^vm!&a&dp zs-r%{UQ3{!D$E0D9(h&=Chck*IpK%n8vUcZgE-b)K2DFItosG5R-O|^H~C)=_Mu&+>Dq71mRsO=y>Vbkgy@c9p>R;sQdU}=?t2UN3#ZZzxp+(z8AaM zzpY#L=3PM>+W=$7|vZC@2BB262rSChH%L-}C#!%E=L`g5=~iQBXyHOaL&rcrW%P>Ms8`_;tb z-x`9@-Dh5lQU*>N31)=|`~n4>61kTAmlt?4(1%k100N*vpM^ycE&s|(E&Ze3N<|(l zme-Qpf|tjx8w8*nu)r@v)%?dZt$%K9*iz)*?6itCUok=y1jfOH5MVRdI0(T;Zm*p?o2af+C?#Q+(QCjkiY6c z^J=e+*d5*e&4AkmoAFs_jbC|j%8AL~8rQH!XENy8+dWrdSH~|3lmdT50vjQ5ao0F&Kg_I0G(_4$j*$)Znqfp_I2-02l9 ze;f0KM=do3{h(R72(;DUPy`O{;`!% z#%gLcsHSVPQ?Y_Lq|7F27~2T% zL{_A}MOT*DAwu2V$1!dEnEG$f1DMg-MM8_*BznLA00Ex?pQLnOsr_SHW6Ys~4AnYl z;7QG3XAFjIUJcP$d7b{kx?c;>(GELvFl#?vW0k=0 z>&N>%WDsN?^Fyt54Qz15J}@k1S4VxMR4-`Ma_FLtxU$GfhVN*|RAl#|74> zrGh?CJIKTbaKBr6HJ@(Z> z{sOUlB4<^nYCd~qBUC;G7L;H24Lbj|UtlJA3~z`ZJ-fHxD3YUtj`ybdmiFRndBEOs zxiXR<#Dy0!j!OLU8odbtJ~+q5ad-V#iG!g%t+d{$&*I|P)B7~dhmGyqM;jE-^A0jB zsnq3g=;ZYS80JJwN)hg2=SuQ6&V4di`&NL8UWNgYqvbqavPr#{GoAvQ ze^|uZuwy68Skd#W4SBl;1$JK2hTL>;hV>-V{0o_24eS9w4zZV3<3 zu*fYrg?(LQ3{^=+y*u!$f6z(@E5HO7&y;O8P)3ZH1si?O1z7J+5(d z1bp)eA?6$hQo?AIFnn=tB%)Fh#?iIsq6|UUDt5I9H-0Aj0OI?8BYH>yS`O>V03u2^zQJ^!vOS`z_ zjQx?P4f|pdUn6A6y;v6y5M49sy)QQ~OW34IdS7Af?>l>(rB8ZfArr$>#bxX)Ht4TQ zG&Wahd;CH1DQSh1uNi*0VGd>9*|Jb~v#T5+y3urkM!ijc4CwK)4QqmpBJkaYX&tg* z=Z6;7awtBth8=5;OT(gSpj`e;6+Y()WTWI_p9<--*+uLg zzqm>(o0iUOv}*$V*i-mZ%rRHN=XzqFnnyw~g=3Io3I^P!@C&K$Wk5}vG?T35bUj}r z)_4U}Gm1V$L^0ogu@Eve_xz|OTx-uAR5c+cP1D?D#C}Mk8%@fnd_dZxYLy^dWCNFp zWH*xXGIN+9;l|q?9(SME#XA4e*EGbhpxm1^9r*%g#71=~D&2-$cn@YpHTZ<9+J%F} zMGj-Q_jqf607x*2gd8DZ^CbD!-shj57aPq83W4(8F?pEXprnp7ynht-?*F$q!2D|EXCl;znY696%gayc_MoBnv#A*%vJ{5}%q#Hz!6l3u<^Gujrv%}R= zNZAZtb!Vq5#h=N=-8F*7-e$kyayu7Rr!q_;7jl4~MVha$b`{zW8t^)JXwq6{LoQMSg*;93 zG3P&21=To5O}>HT@P5A>9S;D*tYbCPwT{L)+9mPiNb9d zk7((UGf|abZ#SIR%T#cK?;AV(1VLaFA!iyukT%YnF*!Zhafh=>I!Tfyh6w<}%F~ey zhm?*61YJN2<;f7A6UfJ-{gam>cRMX5%nQPRRl2>X|Ijvs^Z){O_!GQDM42hF-}7g; zu+*tq>H%>c;@{4jU}W6Ij*4l)GswJuXu2$~Rhl*HeK0PyN8gIrjo?(EkF3dzvlwf* zVIu*SruZ`ri%8x|TaCcP3=oMhX&03q+$@MLjjy6+!b#V2nyfwFL9qh&vJ$t=5;sN1 z@rD&tbkh2kaQ(U-H~3l5MNlc}e7#L-AW?AjVmK|3X@>Nn>1uInF^0&(i?hoGAZaf? zT358=`UAQ@)E(gr$p4`a>JR{O+j{`#AF?<6@oHX8Uj0bH@X%hG39f3|eIEPEcF*N4 zjdl=%vGcDE<)XiQ?pv{0ANIGdy%%MHARhYEyiwp3&ul%%Q@8SW#GUOCxtY4g;2JXu zlEuq-IHEC{w3c}L7RMz@i7*Gp1$2&dbv~uo9Smmd z9_5;7QyRpST6Mql`L68Gr5%roNVCIj1|Mi*{YW5MXX>}igeJTn3!MiqJ(FUAkX+_F zv|vHZMGaHkvL_5c&|*c0FyjH0$rMHi?{Fhv8$z0}0l{pvtzKV17Z zvGapS!A@gRKGYvJBDp}(OZ!9o6!OwL8I2)iWb1AKT;Nok_?_K8e!-QdzatvmO74sF ztpHE9FpzyXwh2CZ6f$)QQNGEw&j_Ed`>Cl**^SU)c%KTmJ(JWI7cwG$>-$U>7P}Ou zcsWR@D>y|gGFCH(k%FRAp6XPrnc`0ekUCb+yGbOgn6H8@VCFDb3~XQPDkla@h5$@k zoW)XoiNp}>2fy&)1aZdfNFDw(o&~@rmLeP|_S56g$sKkBAdrh!u$se;5xqi+Rk}%{rcigAG|*e*xEdVqOXJm^ z$J04&LF#|2m6y{+&Viz3KDCPT^dMD$5)=Zh;B?eIRF3&;25AJ!eNRsCpB3EX1TC zVKa0f-q1Y;a$X4GJE7%eD!X~rLQs^a9nQrmt^mq4Ys@h(ExKSJj#g)^cT*}q1DnPA z`N&llN8<9hsouixK&)?Xrs_dH36LX*>);re@x%Z1J z_eV`0WDFPNd|^JUkYT{3+RS#z41Wf`H(r#&mDf~2fiEI!=?nV~;yA8i<1^esTJ*+w z+-5g`}2=^Lx&hfqlvB(2&!U^mk3rQk)#L{eaJR;dgLREI`-#&Wiz z(3tn1s~>IEES^c=o?7vmTngJok()Nkx~+z43Vxa>=nk}N+l&ThH{9PwF~o3Wg^%~f5WV|X zr)=vkhwVkXS+;27)qz6}*IMhNo1seQ9f)9+-ptw`J6jDDSy^SPi!c(HDQ=n@$ zCo+~l&2&TfyCF|^_$Ia3e^xUT$a}9Q3}-Q4nCVyD$ymNfzR|MgGP%}5m1C#;xMWJU zdJtIl%gZtRx80vr2<$!OBD=Zb8fZ$ddTk_g4*_w(BH7Y1XSpDO;%t1M%TL9f3{g2^ z#4Wr&VbH|v$S%_ilugDA8yy`ut|`a8uckQ#f|2Gkb%x3tGFl4=eQJndl}Opw7@4Vq zj8FmS%0;GY8pRaGmdW!PP_ENx?BJ#!cfh9HUsVqqQZ3`|V4x6qhy+VllCIXZLz!Kv zp-}*w9+IZ&hnh+zG#RWY2FO~Bc)7Z76Z>q1Yca9YZ3McqLc1=bI(9EPpB&yCCcDOT#{ z&>@@jZZi2}l(oY>jr{AOcSAldqP-J`hz$hBKNg_Jcc1n=F{rs-Y)}nO6(R`WD2u(@ znen7nQwMk&dRUD;QhSQ<-f1(sPV&%a{SKRPooc@lfz@yJ32YruKmemwlBs(KB?v&_ zd9Z#+HT5w54^RoRdri~htksIKHH_-x5wsf{Ym-d9-dDoD=Q8E1;2te0SIN}vEc8ptK;VMl0dc8KJOt}ifHjM;JMT%jFmg8x- zhRBU$SyM$a=ysNqgDI|6dE1GQVLm-^HN`I;VdS3J#Ge3SlthGjsr%GMc{G43?F{IE z;moX297`J7(=nU12KYK7opAf$w>m{7z<>_?lkGXeii5RhcsrlnIogTxuIVOUM6icL zxV8UA9M&YNb0uQz;H;?r<#kyBgneq+U-CQ$DRD72Mv>`7^kiL1R?O_@#~A`a1MzaH z)~R?+n;>^ScpmajS6OpiiEPqkf`0pUO#G(0?xZ@OU(XR4tcd=%={Q&5#BnlB^sIuI&35RJAZ16j}H3767S=-k)dv3tiTMLXch+VkiqpX)D=IrOXjU3 ze4vFii?D{MVw z^Xp;+3aOno+E1Llu{?OZ^a9Z_&`Z{{6WlNxEtD>NwsPtuufY_%xf^3H){n1>L$5aK z-axPFZ!=c{Es)5buXqp$&>1%Q?jSU1RHqY3Aj{UiXp#c3%oqt4WP!!UrwMtO{sero zgoKlvzx;HU<_;GNy6pRAgz?p!HX?H*5JfkHjJj#V4;I_3L5T67>ar%0uokzwE`%cv zJ8)mbk7dne(}_F1TI{fTtd>OrODb9v)F9HHr%U-V_c(pv(u$bAe`8`=K%!2C3(JwFsHh2t1eVR z_i+Tb*F~O<;chr7w8+jHG2c`prNqUhs)ozmna$^km`f+pCtE)9T_G^R`bP_3No zYEIvaj^gR5@Ks0rouC3m2z<~fM4KE)S#|{`@Vd$*a|6`=LagDKp>xV&J%X%~hkR>o z5|K%Zd{}tLfNsaQ?Jql$;3qDpJg96|V7Q_WDSXi8DZLKK5Rj}^q8Ud8=KIg6egl9k zeY38~^#2@FXG&QKmv8_80igk&3u=G<#yyys)D3UEN#aDkM~q4*wwy`KP89;y{qlgD z@Y4vx%xA(FnQS3Www|LKA%9!^5cj=tNjKu=6^vy%9kS5k&=JEECb|sWoQIUJD;%Fa z{^?MANbKpK$S6hh5W{fW8u4GF50=Y7l(9NXun?NUk)SRy5W8vP=2B7dY4$fuoH zkY_~VzBIa5zu)^-p$jwYOOEZK_5)jk1rfnaGa3a51HE=#rm%=wTyu{~{Xq8fzE@uS zxfO=wh0}WXM=nH~GL|QpuEl*#rO3hPWJ1YKaFfe_^u!@`36BuYBcj5#14vgV@jXLj zMgif+3@O8!Tn&|x@LayrB77my76-p=9mox#A>1-BY9q(5jO|t zY%hxOO;)mY5wKP<7jS{m0%d-k!Z1be zf>E{idq{1vdz{BBTtZR&`xWl)WJ=4Pmo%C~ z(>;5Gi)MQ=SJ_v+qm~R2JiW=Q92(_|a`PYDxDakGC)$fSq#!2k6Z3F{IftV0@P{qv zZ9G&-sVO(i$Wk%_TKurUV5M199q8hj*-(-K@@M!YZS_F!9TI7C^G4K}cCoaBm-q_jO{2MQKuXsH}mUHtLkU~)DWv&Wewg+J+B?KTYM zO{~bGPNCpJ{(qQZks#|gx41k7)V!S@x+|aXk~7gUYi-5$=#rdN`af1RolS6{!Mbi+ z0Py#JPKw`X%T|f~+j}5nXSE*8vNGhq(Aol^^_V*D4<kr-N8X6x%@-MO?p0*w(*>mM1^xk2{yw8X(>KmK<7RXYB}7=RNH4qgW%!`^ zD!n?zr*C@Q&`!YF8#MK`0=l^f>X*!I=RhRMcghsr9TczBmVZEY8`6K>iQm%n$4jDl zxd=LKzc zZDgFI2Xc9|1Ljtk15v~6xTU27z7_%d)6}ys*)p}I#*rsJB>Og9F;SqeMbO=PLdt&( zpz4QI`zi8T#!uW8Sq!RqVLuNFl57WUs7|2dKcTDdUIwXfuZ7@3JOtQP@JPDh z%D;5Djx^Rg%Y(B&!_7IZBrnF2A(paD9hrYsdhg=FAY+i$X@FFK0004R0iG>te|JVC zr1ohF%1O+g*M`|*59_n0$$O!ARGz{#N@cS2sHR3T8l&rV(q~KZZg4rmqf;@S*{?J0 z#S^?9-`od{AL|{(sf>O+7U5H*V}86i?4b$qAf~<`7LZ|sr-hsO$bUedy`8Cl8E}5A zK>6kOKEw+_J2~IMwk4#bA)NQQ85;zY#%T&;oKqs9t60V(9v9SZhd%`-LTVH)!>S#d zr7AN;e>qem^lKpkUyeaP!_raj?8Lrv59_D7=!Z-gbZ*01)fXa`+V$yiN135|(o|Mv z=s;qT*$Q>A!Wtb8Jkj2(Sv-I>g|lbjYzutJ5!!HALb*J!@52db@o~xyw|TOk{ubMl z`V4T?#5%=$y*D4odJgLt&|0kek?|WJ8h1dN#;QS00c@w znlMS>4vCb(ME}@$9O5HKquwz=7B03MtHsyYLM;npZv253`mGNpx@5Em>=aOL(0~|4 z^U&Xw5mM6%A9n*BpXy0c&;QXj;uB97gjQYmXE9aNyZN%h>#r~vDo0{W$fYlh!*vOK z_2F-**ZKsMg==~YJPXoCg8sZ@z>1`mCRn_6~Dp zB7crMHSp|*;~?xe?$gq+rl{yO>jJ99k7f_Pb|A zHQwG9deZmH4x~i0jw8YV5&ISltH%k7%DT&zOq6bQZTJfOB~kcAU_nsl3CmdE9fd&t zugtlcQ~&>>#&7lODCvsS{g}bX-}+kJWG5{AMI04Y*P@PpW51R((STfXdirUq4?v1i z)`B8(_~5SjJWx5#+m7G zBfm#mZgMv;B>MmMKMoSC+NOm@uRBqRn=|YCS3njOJ{H6%!rA)<3mX<}2^{ELGQbLF zT)Kzgx&~EIsAE>1wsko+Tclc=B$aS9G=NaJ;0vSfOJN_|IV&FP5+Gb^@~^m*kzWKw z$BhWe;`&(CzP0=AVi~t+{?Hn?#l!+i`TG!Ol?Ajs{aZExpCvmPH?G25MGu^_Cp^(( zwCx_1nwf_>)-FGk4|iE|p8ramjBKUq0x>6z@BQ# z)+B=>WO!Wj*Tx!9flw=FGmEUyd2SCluTNd!3N(^}E@Sp`SL;L}ZJ4;<1S9EF3~Q z4c~}l;)!7B`gKQ3{TA1AOkeFoqqp+edU9ks(iGMUbt+Z&?br4xYm?azQ8W%QMe>}+OSAJmE7<=L3Ql)+C|1} zzP5MU11vv>RwyTc_kgmphl4`=qfL6!Yj`e9kBE6WQBhUl4$-p)Nm{oi`h({mD{u;*OOemn%94uG#1W4?L6SbM?GSPuGxBk%qZXI8eAaOZ6>N_SCPrnqm1bS zpcuAJ(po#c>5D2&VaB1LZETW+#w+ge9FdoYJjmNX2TR2MD~rU58uaEk+{@hiA|{@` zJK7OW`u`2GhlYDT0sM|suS4Leg3ag0<}3t`#O4<=IuKmXi3Vlq~3EpD#4F5Y=$)9{ZkU(wLEqs+S|r>eV!r5bb|-s8;l_x9Y= zLL7EScKm1#=sZD1xLjQpY);D>U!YimQj>3w&DGVF+e8s`fHUZZUQwS7%$agr&Z;py z(?DH+hG&zN#HI+!QEhNavc$#2V;RiL0~!IWSHj?S5#J+8t~`fV7nx7OR+VWGEIgpm z8_P;Jp-$j9u~+O~=T#=devZbg^}q3$n+GapqFd``)@VtPNlOTh6RfD*-F9TI0h+CM z>Pw1v7^P&OJ}s)b_SyJ%aMI)T-D;s;qHZ&9z#WjGq)4sQR*w z^nuQxA8mjk(lM4-l0G*E8r3YBX$M({z+O@3eNcqE^}2jZd74OYh0}uuRMldY4y{){ z4W9pa-s9?6S)Jx%$YOoh_5Aix>&U^CXdEyljz=%p z4r0{uQ;D3sMfis+Vgpvq04`dP?~k#{LkCEuS}GoaJb(9k`SU1;kqXHW7`zPDbC8f| zs15#%+sQpETsx#wJ?f|x;>*#QdKvA>rN{r9hv#&GACJ6e>$~hok?g)nxO53`i6?yF zT8r`Jmw-Fq0jJX!Yz62MEZv4eG|IZVv&Z924^=I|(fzE<)hfMq2BZE6_K;ivPiWq& zF~B7wF@XWr!XQ7Q(P0bwmaM&n^ogI!8Z)lb$_?ybYrCoCk&xs~#P;vC0E`+>bw{FL zM%atRFpsxdxw^9XH0cBsZ^IDWCx;@IliA+>gNo;+1r|%`;)S zjp>POK`gR_%-6l|hE`7b*#@5%jEUgo)**`tjbZpWV&}9iX)8JQ87*zjD=*Y7^6 z8MWT#Odw;^S9?}Luy6CTh6$*yXM>$&1R@%h4p)AgjhqHbnb~(jZANP&#=_}~Nm!^l zZl%a^*X{3Je!|}AwJ;_BKa>ZAre|q#;V*#2lq%~a(4+tubR5egu{fK&1MKBX&tfrU zo6{$7;C2#bdN2DUZk$7k*jC!qtz%G(0-gE09swEYX!u<#$W^wUJ128EFN+boLVJTe z($mw3aJ({*B=bKFqb;$DuoA5|So=t3%)+!iDo*EpOUeS%9W8KEZ)J!caw%aXs8iwK zpt65~cx4fVR)>k|NU;~-)H}W)9%9v<^kOe6t3}okM_m7w{TwO^e{wL|8oJM~V;C9F z^;*>(+->3mSZaUrky+_*AWe9x@t^C=^ED*a_|UIoxk7)HEiY$K+L8tUpg>>0;idw* zczPhgiGOEVEy?9r9tL4B9LAL+Xkr%@+dybaeS{}HF_Ns!+~cSyy-nC>46t+oJ1y0U4T#Joe=Q7!>J`sP@0T0 zzjE`G-`@Cq_^@`e`+(u|*6`v_Gca3m%TClC>GRcR&v@`fZtk*_K-$?TekZU+Zo#~d zit#CF)(*s^SX_DSA$XkJEFafFdR9xxE*WbWuWZJqh#IKkt~w?EUbKf>()HS@;k9UC z4hMDmFeSwgS)N{VuN!~DPT`UF{$}(vouQ*Ow(crusSLyMD!|RhJ~WG<3Zgw})UY)G z+HqhM_VuaZTLIFLIZ{Po7HZD`@@LGGl?s*4s+pBgKB>;zZm(Xuh*oDhN zB>D?zqZ@d|omreuvvl7&8^FG70%|e#I47WupS`Vyl;&AgfdMlpZc!?_00z5JgeN=t zYc&-dO-QVgnU}jIR5agaOj6I&o)#Sq?6>QV{UY-(pIm7Ky;FN6)oEhzEyEt%Uzsz>ePg-LNM6XAn_bki|6{gwkPdl-qG@c;k?BSD&U zN#PGBQw2Z&?Whi>u?P$tJ_8;SZnlwwIhJ6;;k?i+1M$MeQQiSb|P0l2fqu}=S<2zL$7xcf=Bv(+BlMlGI$_L$T|_`-Z(WfVG~67aU;$J_3lrL z_o|9<;KRucJHK*y*C6c=s7s}xp?(~NgSk%kp9=-%o>vd=H#31Ajq_Ry_cG1P@d(K4 z_mA;;aVoJ&lZ`C=!YW4yYZcf^HwMj_F1^T-EINrebKS~_Tb-cP4bpL`P% ze=cOF#BG3HS}YXLQ)_}cxG8)aW6`os+0oyWz7Zd%0p%;$sA|fV0A{DZ_0h&)*dn!G>E8Lr?9O z*8B7Hl^Z3n?O6)Eh-Z&@%5y^L-9YB<;)H_y=x=fToPPE&DhT3!m7d1#C}5K!0$`83D50ApYG9-sl4pUAB6F?p1sMZEPFh*4c zTK_`g7=|xhtg#;lO;dmPaz30rVc%AODQitu%V&KPp3SnFxO%v)X$CbesRrm-%lc1Z z-m#MXy+0|o{HJcqksXv#s`%sp7qxb=M%bxN1Oi$JFaGO!bZ<%G;~O!;tO4`~AH;Fj zGH!kbl0ATAcHZ^Pg-^2kkTtyE=UZXNnjwc07tOjnGIehR4Fuj19IWG-oz1M$v{SM7 z&8&pD&YTR!3?_zcb7>XAk8FdH`$(KL^0{5~6laZLbf*p3I{rXdIY0U0E((U5!O+a; z?Cqm;nf2KGM@FBwOSw!@?zhV->R-#Z|6&OC5&kHIod3XtjZKJsO#iJ>(2G_IjsF)3 zpPd$Eb9rPkXLDgbmgz{AaF@;$nbiu^U-5g;yk1~vplBYuLU=~DDemnh-F9x%m1S3C zf0)EEqwRYwN&;oOiBi^w4#Xua=TnWYlAu{73Jr=z-Voy-Dg##IC#-Fre~-%%sK>OjPqw$KZ`8s^{$7%9mg%>zkn5Qx{Ih#qPMg4MArY` zeEr56Sdmm~(S`pxzbPmSCk|bLu_`nKu2%vN>Uad~S;AQJDt?Z+iYy_Z&~l_?uuyIaX0$zi$nbm>XKH%vFR zEEBtcDRsF1`JJj2stwq-Ee*MVO+B@^r(OXJ=|nkpMAQfer6@;X--Es(l4X=`i^st` zQi1%MxW66*Hqej;=#py)VP{AN1=z#N21i9 zymc(*a7)AeW0z-TJtXP27gp$wb#m5%+?y3S^VNU&;wk_K|BwEhrGBHCta%pN_#`Gd z93kb~Y5CpuM1aUj6VwBTw(VuHT~BSF{YPb~o9K@6xT!7}rl5LIn=HIj2n-7bT`tV; z$yPzpe-TFOUuR*En(TdrdqfX(`Nhr zlx#pPRhggSrv(x{Vex$OAT8eyJAE{snY^C;tcbrwLY!x=OPu2;8AOJI2DP^+u(TK5 zr%9gYBrjg_f+sbkYkbEZ3CIHaiGW{vQa#)A}8;{1B$x&*}fLaR+XA^mm)-<9;(X*?`{@NkNDZ9;*@6^gFuPC(v~Z|Hks3q-@TS`w@zlTZ_aI&7s@6%%hO( z@hiex-;Fyx3tl`hK2-0WJh?^!A0mpd2lBWjP@#Wk_bSgF>ix9Q*g$5K6ltd6PTtdCtHPN zLAX%e6?@YRfuDt5`(BB)z5#0c_-mN8#HL2x-WU{l9|*E{7bHp*e85X>PIiMWN`Ljr z^cAGUu%XTW68%)RZ6UzY5p=%@x3*t%@CTtL;DcsBq|B{rPgGX<8S7wM5zy0Nx)gOg z8mI*DO+f^^g4NvYSBAz8;>P}se#(p$k9EI99=&*VRU`a^P@;jDsrZ18Q#twTkYS&F z8;IwK%Ut<8G&r>3kM~58h8*i|89O~5<=DfF3^mh?JOWE~utmQ=oR@kW5{Pl1XQbU* zR4RL%M`+annFL$wEd?XKb_E@9vj=z?$tpan%_V4YmKI~Vr}Lr8y|iA)pt9LH%7%~+ zl%#c;sIm`nSMW*Z(7DxVCWdsPTt#nxTz+@KRJCrR#Ls$SJKCUjQ{*);>QgMzLTy-= z(E%T9${_S~K$js^M?GmU%nPG$*NArRzhh1>LVspqley-#At6=8#AYZI)d}~+Od&+h zc%*)BvCc-7gPX>W9pu&-RSM*LS%IQ*Y~2Ec_lx$&ba^#c!|K~bxiInrccO$yP1627 zGqE*77D7KV3UU!hk!8(i)Rla75s8^cZ6=bCe3l zdt21z8JCryIQF%Pmn_uV*B3IeQ}g)0n1e)t^wS9d9m8?X;|;}SUwu_qTms8ofjTBS z=&d`K6Hi;^5=kcTxG~B>l$wBC<)n}?QDnxd5H3@_IL;sgtsnWq^-GZC0VncFu!4-W9+3lf}Zz#8pe`;|8Se%Q^Yfr;V z{f5Bq;*Vd+1eUmF>HWRgs#VKdtHk&ONUE1cLa0Eir${dHYa<9M_jxu1-;Wv#2k(Ba zS(pwl93z@GDOR<;=TCd5RxnAAyyt4fftf76oE5fhJr-fdx+}jF@D3pihBT3l>%QOI zkrSJBH)}5s<}$36aXDBv%QBGplMTRj%@LfDBsdN#CC{cBiaPb`v3AIB$Y4G8FHAJ) zUp?%QZPiI)Ih^Pkm>tD4JP|qkZLmQBGF#v#1-t*sBgh%kwmGUW0sT_*;@D{w2H zE+pGeD0RT6%ZL~%Y}$-Rd|Yr~)$LT*v!(Fgv5c-Xa~&T;fB*misR5p~YJXGcHI3SO za|pp-Tk58q0>hic94roLS(m8uKJ`0c$c z`r^{Et#@Pm3Blj4{#>SEPw*Y&Ra`|53C;x|T_2ZvR&zAyBO#Py_)--R6}%eV$>&@$ zEuch)TuRcXOLdiyyEC=sOMqoynJ-4?&?Ysk9MD|ul)u2Pd1Mq_Mvf|^sFjz&6opeGbd&nRc(Xof@+P-yjabjk>S zwv?X27K?i)Mz_sG?%X6Lu32Y4D^+sMJ%Sa;ws)YyMe&I$0&Z?(Neix|Nm?AE6)^w+ z1)V{fxJls;CQ}7J|Ir&Hd#kPOQK3#He`;m6n6A10Sg2zc)=xze+b9+y=cbz|c@-^5 z_Ozldvia=7>0?k0heYV_3k%EtSZ|D0&KOOV^y-P(z<859{{;AP&|Ki+t%t+`n9C^A zOnUx8qV5NjNT$L#wZ8BboFnZo9iH&=8-VDL@dpxco!6Aq9FH4G!%7wq5mFuf3$h6h zqD)6oP>(28S_5(l`(@9wQrFLepC-9vpMpmJUQD5H)gFKUAG(miR-iJ5QR&)PS!eu1 z)l_@mk{TFM5J11SoIrIAWbsPe5WbgX@ZZW&0cXC+U=Et%ZcS>0tw0I1+O~xqxvoh# zuCs0ZJEk$+oK-?KSFrySDuK-3*E8mN9nI{;^Am(r&22LDruhw0tvaL45_MIvf<+tH zCGe~mtdF<0_sqOjE3Rm>t6X!nqJK-0I^c(lbvfFe0eM^VDP>@Z;QwvS~W-{b3 ztJ#_qn|J%sSbv%@v0&#Mq!ZFADT*B}{SH{9Y2L)#4|Or{*mIVo`af8z(cqJ+Y#};;qCW99R}yG$b^8u6eSogXM1d_zEnZ<7v&=&r8WaJP2mTXFK3&RPZZj zv#M$4H8z$0O(81^@8F=_1q_p&3OxkhO|i6f2gGh0>)aVBaG@YbYfk8P2ipYThpo(* ztD15_F(_t9NjRuS>DZ)FiCD;i;`L!VGFdNT4F!FrKp66T8963Bkcyr%xXqOd6lT7? z`J|T#;Eo$jWI>6wNxW_)>IFM_wIn&?p@JQ#vp$SM|DNFUaEFaUn&A$&+N`b* zqRNpTZ}^R^qchYfEwTz03>W%)012fCc$y!><72R(EH*x*0bN~EQAfX-^I@!1^fYS_ z;*f&@ziW}t5TN{j9J(b;hmjab4b6ckz4c*um#n`$5I4h7nX?~I;B+pjuMuzA;1OWj zSampb%Hek1UF*|-NlxrdmEOiy5OT6vTB|~9)D#(2Y*3P;oJK(Z5khB=T2jW}NXu?mXZf=EVO5^*QmIK)udE9QBcCs)x?$_M;rp?HNJj>A$ zGYL=nOD*-Ep8su>=zTvpK#&JfKC}{_{h;)76C^_VXog62e0mhPbF;qth^w7;15+r5 z1sdya(sI}LmUR;g?{Mupa-p5zK4LyPekxI^rzMXfb!Nf*nffs(tv+IT5&)`fg_70h zBnmaC$sa6D0_seZVqF`c=Dzwbjc8TiG#JWUyo88@YWgpWG~-3`>Z_I#3Z4E!bBlN3 z5|}QW;OR2aNG_RrTwp90|FjtRI`K-^%~3oHiV{KjDbukS)zXjK>f-#Sg4g)}oH=kr zUM(Xp6UU)oHd)(}ceyMwiGm+tY!ESF=Xy6Lq0W+okek!^34k_44HYC;!+VK;;ut@t_70wR5^s4 zhSukooIg-L0s?FAM8v^V0jxoXs?O?6=2rwK5=Pnq3y15DTFE&%ASV6`@U~zDf;G!c zP%Q{Fk0%e-U+i9Z<`OX4_X|bniEGvk zfN^dX;E`A+*uK6`H_J@DRS84Nl=rOHT3^9Rj7#AKAknw{OBj1Oqb6qWmYtX>ft%(h z4Sfj8rFU%EehRdqNZy=c8!J9UWUInDJG=Cc;$~&|I&4p_(?joB<ItJMv zKvC+_@xzKsDWz;^^;AjRl65J05%)hmml8(&SJ)l7%E!@#+xFT>NKtW`=goHt%Z>`N)n;gys%2KTbTeA{mWAU%0W%d>pUo9-wAcexh19X{bNmIJwKz zhpUo$f8|=@`0drdT|L0H=q&1gqatk>C!RkR#n))a@um1ZOi|if>+LTndZvSAUW(1! z2P~w5z3M+sFS}4Egx38S319U^x8#O!mG8uwD2r@aO>R(Q$1F!58CShE{FP+*H_z18 z6iA{R4<8*%4c;(Lg;0Y9Fvk4B1k<|>WU2vy8u6DUaT4URw7Q_^l+BSyhxhda*<@xD z*q(w`s9X~#w^6q;d+6J5`)!!umvN&3O3{fQgfkFj9ISUNf!#H^Ujv;rY~|&SMG&rF zV?npN8_`g57>+EPLHF3*+wB61mkILRy;Qi?IdWY&OvCLOmRRpbRkl~*VFY{3yL z*LaPny;#Nv%9g3rDH%HzOV+H$SVqvAe7ZaG-O5$F%L&JS6wX?t`Zu2SzAI;Mu~Sq_ z5gpmCKu{9;K+CD8cLXXKFm4KiL%Tm$offA_$!NkjsDUYIDoCfQ$PJQeBfc?m*(7}S z{)QCw5BhdyPcC2_QIhLKY(`En#eXm(H>**hO+ahpnHwkJI;A*FCQ)*{tIqN~#M5e6 zNmd&{0VKVPCQRoQY_JvobX}^ub{oCI&n(rc#7$mY%={Ldjq0bK0SWb8Jy9LYC*|Ia-Z~>tAq2(ts8J8g0G_rv!ddP))mxb=f;m zwa)>X9xHc}Av(E+^Iua~SAOM~64w+wfTm*q_pht;O2cBifa1fnI)&SZ($k}lu%UyRQnTurbgZ9Y@_m+^ z)P2L#{lPp?Qwqf+Hi@PlRNm+cRFzWK7y(P21CkCuQe6LM96h*2m6|j%u+*Xh9C${d z^b7m=53XqSiUtB49hf|Aq91{KJvt;BII|KU8K zDQxyxr}*`(;E@|Sw54?O1~^x6*TzFKW^J?o%@il3)u3Ri0`>dRCNaAtqB5hn7 zaOm3Hn#)ZBIpuxReCvC8*(SbkWYP^ky7LoL(iBpk>Jfoo?s&s*8_Y*!rFI@(g+A2V za13M=Wgs#lG=tQke`xdpMzBYTjki97D_!DK1?NIjq?xKwW0=p`U2x9KFE^5-?@`o_ zVnD7k>fMD;i)Bmw=_U>F4bE~)xE#h8CHIk~^2Gf!DR6SpJt*X6j><^~2Ny_388!<* zcmvZBmOx0W1hQ^dG1I`>H@VURR~66!UnJ~6@!A;)T6^k8RS10Zm|3s@t!U!2NqD86 z{~6^w3Vv=7HhX2Ww@M94KN%iGYnG9I8V|1#n&o(aq>Fj?8b_KAxSt0JN63bl_vMp@2qDo@e)k0q7kwOm zsJ0d3XCZwE-+;oKU6;1yeMn4D%UXpsYezzy2yY#t@)!UrQSzuAFYHoDDnv&RF%LXW zpOgYFCd0>E33&A9oR&-i@4Jq>&<98l$C~&9AseQK0O*s~n-Q=rU#^SX8;jFju^M0s z;v@VWx3<_ly0RfN|__c$lNJVlCHRbQ#yfIAKB)FlBA8W>@eTxbsjGz zFUPZt6UTY@zpHkqMtiO(4#B{@MCRU%<}g-2-?&`SbKHw5oaUGbS>)MPQcuFwUXI-5 z;OmFm-`&=BpKd`{>c+etpb%mz27x`LUF`O(>zw+wiE-SLB$w*?Tk0K<^XbBbz+mW9^1=3TEGTo-wb$T&7)e51o0218^}yc_?j(x8<7D zzv0O1yBg@ryYLH=->B6@FfRq@vC`rbpmdS>1H0zHWm}GT!}9@Z;y5N(^LRPC0-!=JxJ+gkod87^hnT8E z8EtjP$V2}_%k`h$BOt;{b#$}w$aZ+fkWL%qnF}G74sclOm_lVuKX-Ky!5H1PgNwJa zL(wffffoa%X93AvP^|bgCxHrCX~Qf+n<$0UEUe3p72|KG9s3@;mTRQI7-&#Kjuz1| z9Y*S7D)0OeCte)8Y%`-U%U`en2fFR-cDZ?+gOtyUUdzZ^XKBVWO0Rr0c9*{`jjrIG zBRmsz7@@&s1xHMrU7V;JxCur0#=P=b=|`$}PX1K4lb&7aKP^fxkToHk?@Ti5aoiKz z%Cz2I7p)_xrB|0qF;@K>lG%>a8_%mFhMeqz z`0euuwxZv!OIF-^pxno|h9xWbXe$Jr0rU))f_v|5;qYKN5x7 z^MZV8%o1t)=#05&@d;c9h-xCN6SDD@&RaLRzZuI&e555A#vX!7}R$8AlU3O9eDyT1)TyB7vo8_`xphY2)@iM4<@M-G53& z#6Mwz zC0jX)$jYl&vqX$|&8Rdz%m@FVP`C~8^aLYYw0pZNP{aFw!139bD~HI&UAZBs~) z0l?3pG-mESMuW_+;^R~*S<3s;s(&@99Gcu%k(z6m9G9N@jvUO=yb4oxEHb3;9W@?I zawpVs!R?giVk1BA^b0PKxFi?V2&-qiyFNMdB7@)O7~)_iz_3_jd0jQ_??^D>XctU2 z^Y_Y=9{uUErRi$rYS_Ay>xS4Q16V--00FiEpBidctmg-5Juwv-E?7+B{eS-9u#)V+ z(i68x4`g8w)8u|%cGwgLpz$E@MzcyvqDS-)j2Im8#3cPm7fj39#@*@SsP&6`8FWP4DUfGeDs+6NgL zdBXPYXH?HWu#i_BpO#s4h;IOU5!A?7k&)Zs4$1?#ZN83Sdn+M3T9NB+scw2Ytl7_d z9bXS0!qUBxUKZ^^^H!!Rg)(W$21h-pai~VL~yWbR}b6Q^90iiWNlWo*V+iq<)*x|mbAr-0 z0HC2=DZ}YcB3I$*1;(&ZLwj0TLk`SU>dnmu$&wFYVAsq?2v8t%mMuxN7(#VWsrrDf z49}PfX>d}q+2rtMi~MGuPUVVV$%aE_#WVb7^apE`rgrcz&N!IIF<&Vu#!Uz}K03JO zxtNKT&=pU@tr?(Yscq|XaPe1I16^)=Bz{vUPHd!r$|JAgDjhUhSEFt9y0Uk8bnncZ ztD?*?rmUZ1Y<4$kDA6&oj%(qzXn|^$iBwu-cHtu>vI~FQu8>~Fx3|+ZzwK!{X{Dgu zFg5{NI;YKDQh*7ccf`@n7({6!) z=F@)$Oa8+6s$i0xT4{3C8A1`2&H`aERy5rpR!<{uno_>4cn?3VVxBa)i3#lAN*7qHk{9DJlyWOfZLVYbOR| z>~~q);5jev)Osr?2*$S>NktE|$%+mM_1xW^{%>A>F4Lmx>T#l}vIiKxXW!7=-nZ5E z2`sS{g<6U|q|Xa6KoDZr+kuo4lV`Up0U>|~yE*s%X~iMibw zN=g)6HE{B+>Mh;Dy<=t6R{3tCr0oPQeXPlV+jO~ zE^3k_^NBm^CeB@)O%141uPhHbQd-7Dt3|RqV93Y*SWI7fv=UfCxJo`D+a2y!2OLu< zQEB=2UdJQ*Fr<;0G&P~8#DrFiE@zmTU1Bk*YSvCrKKT%pkpq@$> zoOD`)9V)Q={`R_>t)-Gjrf$0t&JlwL4%Yz_AM?FPtf|C5V$YtlQcdy={jp+=2%AN- zKcB;H-wB;aPX>=6Qd!|-;MLM3Sps_3G@yTizU)CnU9c5PF__2D(Xt;Puq)xoU@)-q zT>10!ta?702dLbgAG!};`gCsr&)#gCcker24owQRPQE2{tYaclWFVQ{4Qvv#e>2=b zXLCt{)JU5S9}qr2>Mk?{5dkB}M^BP7PKtmpz%{yk{wM#uI#Nh?D`F$u_MKeV(l-M4 zvbdBal%m}sQd7fUJZ7v+UK4L!uE^q=uYvzl<^=(=Oz|v_{7C|6FJ*|ZOC^7;FceK1 zc*ot5HZPYT;~eLDiZT z8yrIjQKu`*7!Nxb-ihPS_Vp{Uz8eXfY{I~k^Aw8*inZa02yjVMz=xnj_teCAe$pR)WCJ9A7GH5G(SGPk*K2x-3ql zn^kkG)=*;ujy&8+(U)3ui4b^%6{?_mN`#gQ#$+){_|ABaN_htIF6$|NFkDSml#oG4 zC(r$-N{(Q4$9vi|?^M9u<4|QIR<)y%Hz&!~3n@HFpYnlaKkHpX8)6FBNQ^JmkNC-j zjG=+Vd3X5@0S36&*t}tC_s6iyaHzITuXAyK+Bz*k=hZQn5=p94OmDI7!c)?FUykni zYS{?$zyJUQvO$|aN#PGBQw2Z&!|Rnb?1?pYvVNH`?iVm~T9xFs3;12sw6VY(;G;}q zpD!!G(R1>R;nv1FZzyziJkbY;+3XncdE;o9i%@#&pH3fd!R6?RJz44uVn6Ykk3jrx z%$S1lCk!u|z4Kw2%E$KJbA3v|zxPN%7n)P^BFwZ;4R#r1D9JlltRWRzxHT}}YNA|@ z6{)}>UIUhc`ew>g)JsPvNX4#j{&g=eq`FUs?ibbpnsQt}rd0pFttA_yzY8J))Z(Vw z5?46DV41*NWqc3enV%7RECI||2EhYfH~GDovwuv$gx4B82#T@UD2O0?ism|COq|rA z`}qGfb3{dxy7M-e2FV3g@uGJFMFK2JMhGHZ0)Z3Y&ZE{ehu|H}J=)tsC-L6z9pE~$ zj>1Lte?V#Olv3+L73(sK$ugg|se4=JwRloU`kD2-OUIcIP-N;91%*mSvZ_PjD=qfFDfd>8s|a z$QP?cWw}tGw718uRZMc}4?BI=?pSjAFL%aV+JONFmk@#RMAFr0QrP3x-ZBo2%>lKu z-xh)MK=uyEI4WRZb}_N2LE9(cenuW&6|>p%;O1oJSO>vUN6P)Rj+-A+KoSCy7qwg= zxq*neKE&>k*mCUWT+1@(F~lPNJmSLy6+PNF2i{jI?m-vhCYyGd81F&8>Pc42D{FR> z6o^xnu)N9=7z}+C&Ur?Ar8^i%9IJ|vTnaD=+89yFzF{7~KLc|1xRD|d@?2=6Tk9)4<4)}ma5*8VIC!B`uF?UI5|81gPeg>iGKurMWPj7^PeR0$C6KtSxYlEdWR>lg2 zlo^ofrl2tNCU|Uy+Rsam-i$tb*4uaA6UacuE6z(fnjv$iHM!#m(ymZPx?~|T=ykMy zD0H)y@O66p{%B*(Iv|%t-baGtPz0j&&Gk*{$thm)tVi_gVfeVnty`3zTK*&)VQ34# z{BsRZykYc8-_*v-uSU-as6$mL2p+24{d`z>SvQ0(xwWRpT*&ePEWhlLKgt+MGA(7x zyFBCi&oR_qG$yBg-2bU`H9oQO6*uzxn9i}H4bBg1PzjpqwJb8;$p^ZiqaZ6L8O?GCYUjoayB>9X7VxFugEI6>eoe%{o+Gd@! zE_Gl>2MwU8w;)Fji}D?Q-?G|neGU%Jd{cY1m%X|$6R1*F4PL2|-S6~UGf##ig*Jbv z!7KHs1ee`AL>Hs#qNQCOJdR|&&S;New~`4lctU);6Gc#S6izxP zgv;pa+XHvk0nlrvmEBE2!#nI5LUOwksp-~Ai#y73c;}OBHLHmD0O7y|+CI$Hh&@;< z_wRmID$B3K_OhGZI=QU?00D9VpI&Nz%h)JFY1tCs%_c$iU~su@`5?6ha)mi0ZCMT{ zYXaK=Fu!U|y6%3%+}O2?==cur#&R_!_|n}F4(er=?&^SM^=Xy5Mc*#^W`rM;(LES> zJ2JGGPEx+q&@W3od_d;;9z{f+{Z9_{1KNF*s$-GQCw{{`%6^hUwQW#~8p90tv5F31 zecr6Q_wJ)}u`q<7hq+zVHHT#??7g`Sn_J%%`B-0wYy9yO60Uw@%Y??r_UA6E zl)(XUf^2-(BnU_-B@cX8*x81bTVxIJGgp1k7_?EHuy-Rbh?LBp5D6*nsgK?uIf_y_ zg0&0{#D-z^mt#v_f!+6Xp;YG>QFOQLKmY&()j^v;OW_DZ$Rh#$Kz}d~|I&RR&=|Bd z{R}C20(JUmvuIT?9IQM$tWcJ#H+3XeKs*$KF|1RSY(f8%?1o**!V9_ic$D=ouNSN} z%P(7z62EtqBj&$Ihq)EI(THhA&qJ=?SM7Eq7qnEmomebX3^%Uj#ueE~PYaz*zT??h z@5La)E&@HbORVRo@zr;9n2B!Y6CULC{zgv~3hK$nd@u?w3lAG)|pIF=aC=fxK$o?VAfGi#95bx92n?Vu^0wou75)tA2J z0`IVXOH$coLetV*%hy^Oc4kUKn`TpaROM`+j&u&y){<^~GYJ+s>V4=`Vk82BZzcws zkoHXxgt&1q=W9685gCun(igr=Z05fHr?hO6RBX?k;AB7=U9394SIp3?$EeNAikX}* zqOg%$mDRUXw#X2?^P5O=!iDN5n`x4K;u`1h$^>7H$FEo#1^MhL& zDt93!t>^@g>d+M&@nQR_BTxj(sXf24RU*8BX?1**zh4&A7p^CEwX>sl5Bhk#6_c2N z?i1a@>9y+K)kcwEyoG`*?9_=hhtZ!eZXVO+>ZK7btc02FpdN|pOj;%$a7W%*d$e2o zavvq4uwm1tftWJk`^qt4vYZWiUH2La${Y+3eD2v#=*6v zaJ{f&Tw9fh@>znERb#R@1h*AWLeItMpFga0 z5b3GZ@c_Umjv)U1V2nA*XW&y3QopZ!i=GPpI#esnZ$19FwxCm@pJPcmAIEZBY|0M`G0T=ae5pqZrZPT|@c7$wtzpM5X#4@n9L3 z{-xI$cg*s@DhftHgc=J=u}4aT2`KEa6Gi$M(#jq7p$~DJ*)Z@eMDm-R2SmQ&e2?r7 zSlh;6%8sBgSaD3(hlMo_r#iUX#?GkJ;^KpdEH*G6fE+8nK7^4JyQ8tug6g(H*GGcX%LFc2K1`pN@xemBm|@KlY)1? zz4F!+iG5aq=_+G0G{1+n=IL5m#9}e?OfIPBAVh5w9F+|*eCJpyw84AsLAxlz5s-E_ zAyDKD{Q4K(Dsb-|`i>m8iCZY8xdD=iD`gW1$XgF}ej%NeL304AEC$TVL*A1R0L%=l z&n)p3%@+!vNfkNi*1uXxVbUfNoK53d7xVWNlZa!!MsL^dnhUUjghI{PoO8&ih(E)% z?(@?u0{JOOn6ld%XZ@ZzE6@aX;LEjvxl>h((nL(R^N#PGBQv?6feSJ`ERN;JT81bSf~>ukzPM2*T8IBnCzs4|=GrP6B! z8{ms*kavC~wJ4hDs1Khfm@YTmwTkPqiB|d-YVomng2oRBwc-b+ZDI7QMgY0Gmqn}>{I!RAXkf#ji`k2m6?(K1QlUuj;3@mp@zHrwC-U5pj zan3Q0uuq>Cg53$3?>>eyy^Qk@s(7dcr6T!s#>yEm8*5M(GTdYhqezn~!3IsZ)F=8j zp7vf+wM#?1Pi;6t{xtxl%a98dP@35yoNDrhECKz8q|C-yNLVgr9j>P^yew+JB)11B z4Ngg%AuyUvIAJD%J<~&At+S;Fzs>ot!hZe$mV5l$ff4Lfw4g4@8;} zua*V$!pK3X2XLx@@@U&8T$~1zf7G6eYgV1Lg!<+zP!-}q|NNrzy4<&=o0M32k1?-C z?+|CyZUt^#lBlbbV!RTBB6~3_s8WTACYiPs2zrFzz_F?TPtlHkwXL7?i;y#X^Wkh zGdo%qrd?Af7$_KNi&eGyM#^67)&>Oo6xslg34_m#%1z4(nxnTvpq?MtY*7$>k~Z04 zk3L{ZJHB_zf^i#J&>$+I>6#2lX$9)CGJzV;RYb5(alNM(gV2QOAP?Jr51ExWnqe(sR~- z&NIR2+A8m%EM*thDRfA}G9V_F=+QGL3yOjPnmurM3I-KhQx z4k#C?zbe~OM?gV!>#3V*E^&XS?Vy2qFtg&-z7gW{1oNuBiy})ylwW9&|3kD2$7sV& z1(Z1EY}utR2nt?UnV^6^XGdhrRupjzdlhO~fJI}FL0DiA@TFy9O?wR48GmS(2UoOn zdWJU-<&^^uPmR=6P`VSL{Q%~F0C`Uc4>0G{ARtXgRNjmWS!o6pbml)8?$F&Dn{Awy zSAdYvw)~A~Owe7N>L5k9w8((Qa-T&i(iK^LFa50HF@v|X%d{ykM5@yK3$YJ8#GqA< zk;;d z5QJC=Fm=ctWBSe{{m4@sBi~2nZnqvf{4S};woXyL_hT*Ki(pl=GKtp}uBRB)HTBrt zu+4`)?f)6=EodBwEZms)JA??46Xf@-_HA!~hsfI`7a;^@?r5E;o zy$B<|ia2rBUM8S^qQL@CAB|s=Be_Vk@cyA23Tw|BB-5MAD#^SV?HYX2ii-C*x<%>P z+WhPQIhNPPFHGrx59X(c*w*LaHN%NTAK zf5zT(G<}1DAW*Vv+qP}nwr$(CZQHhO+n#n$+qU)Q?cV)|Q^~1HrQ(%pIp24>M_k!P zntvHM52cCfoZ&NqP0mOfAv&34a#HjXG}r0*^sZfzLs8Udn&CmgzrF5x8?FE$FbY42JZY22O?m=P5({M&ff%VsY zQI-cYP-AgqA6KgLM85;EO%_e3^}qnBXBzvA_KjeXd{S@Po{$#gfK*Y|+_!kA$nJ2G zO^wdCiAUHbz<`pk*afLY2<45|8}ULWYz{d~;R{Gk8Le?N@aQjTs zQuJS?P6=j@{EtSCz)W2vc^NWC`b}x4JqhX zVozhYHKBH{>Xka-)T1Y6tTR=^dRtX*>`+vZK3HUMAv_JQ=0rbh(q_T{eyUC950W6c zq$wM9Ef*nKEw~~ejkpnMOZ>t?WrZx`1gb@Ee8wRBF13d!!Q{Lxwh99+7YZmvWPlh> zs4pdeGwr3=&g__^uga4+%hVKtc5{hNU52nK+3Y9<^zqgH?fjdH13evGG2bUdO*t;7 z(nHz!z9D+k+X94sD`up>h*$u^ihsYs(AIuMo?XJ}*{CrP-n%Ij@fV+?pOj7;cN_g) zhz(tC@W?LFNlyi(VikxZ|IUAHLw2>9BTcm{wFa!9FfWTmTCMi<*fMP|@H~k$P9@Mt zaz>Lo%OHP4&Ib;QKFbXV*u)*fF7HGe>U;^5m5*olYAHsdP;lmUZg~}EAgxoNl6~7v zi!!vb_-}`Ex?8I6yZ+pI+lnr4&vrHAZ`80fb}j)rwU*tw=Fjr%9E98DJ*oHN>ENWM;@Si=ZXi9 zn7u^sN=S2rnZMtuY@smQo5{^@tlWGLS!^>(+QZQ#gvDP_Bv+nGOD-h~jAI2U@YWUKs zM$X2I*Vf?K*g}sBfLMAga`EY-(jE2f`$!{Q*RI%Lhs|MO4Tk&%>91RtRs}i{x2|gl z77kPkOB_{EmHGMVV(`S@&+oEjLC-mxVVtPoXel|^yl+DWbwH&Hn2Wxo-2@s!T9|kF zB^O+)>#gvSFIzf*%vCf+ZFcK$yfzl5(dP`n>!m(Dqga90Mw-qZ%F2^IC;8(i8FxFr zEHtb>&4H{A!d=QA^OWT}GVSY1FpujJu7E6-=D@)~IXcK1kAXPjVb+>Woi#+%KzJ?u z`NLpdbBI08um9rIHb{n>w-h!D5w1W@obIo>fn4 ziUu$yFftf>rtrqaJn5cV^UD*=89EDjltoq6XS1QovW@}%Pk>hy%s%-~HG?AZ{;oTR zLerC`(rN~G6=s7TH&BO!sNe)AH+IvsbC+$A0=Or8Li(0|)7T3; z$Yl^Yec|R@T*@4iTi?y%*4c4dkRiYn0B2Q1RGr9_@{tTi8v>(fsSoy)5gc-SeAi=Z z=^?k3)PKiG<5q{s4o`nKZaBAr;kA5NV9fPGPglTV5@mOQr|j<{55fkox5+34&aI$B z5)D8sqE8znHk)_3K-fQdg!BvH><-}!fB^FU?ovyyO+Wu~nB{Fk5#wMlMaB}Zuj-;p zn-KXunKy&GY`DX!2K!Tsl$Ejw&M*-L!`NsU!|dH-_eIzYPA8Nz_6{RchC;X^yj+F8ig(Yt1Ut$RE zpf|B=fIZPz(5Dy}Bk@pdA2tOmDNDXair4aQjfi0B2hm*4A{|01czl1xR6?nBbpIdg z!=FuO@=y!Vv%uW*FNoX4uqgE~!BJd|KDFfs@UVzEh4U(-zS?S)&9@A%PXW(H5X1F) zCOL(7%z7xg(hJb+!xP96c|{&H_Vaa*52KpQu%7!rU$B(fJj`6aoCWK;#`)#Q*avoUIXhxmq_L=O~K0iI&vMcm(Tk3u=?B6@f z#T3#6FCN#Fd+9UR_V_BedtWy|&HAcOd0qBvj^YfX@C@Ud^rcqFgKOw=pQVwNvZb99 zH1z#JXL_}d-lxX)ZNaWw-PgL>q!0TPmov~&-|OHjxhV?W=Ra2$0%kNTzo8t;a$2CBNVK^H@?z4;@Sw!T9=eX32;$VkL?ID69;LsJ(Eu z{*KE5r}=J0(nL@~gBtSQ>#A5?3jJQPG2zM0n5qI97`BKulCGK(sja~h-|lj%v8l;y zgJXkBU~vsbpgHKz%Q$j02C5Q`-tMU{Jj@}hQyd716)&7W9?N$4DZ)PW=Q&2u79j&4 z{j-L_7)RqR{Ne_QT1@(Zig86xwQk=qHqg&{fEmxUR9&+%(4J~!oNL6NWdRf9Wc3;k z6iHK~oy(@tp)Kl`GHK$ryUACM@+c#A#_F++b= zcwxsQhgzL#(T83JKrmL)=h-y^vP9;qAbLa6RhYt^nc5X0>(%PuG^e-K z@yo8wFlp%B+Jf|Vg#W2x(sS>bL+rDjGJ4~^dj4K&kWF#UUfO$6$ytREmxK)wfB;+m z0%DupRnw8Rpx!l}Kmwa{1c$ZF^%lLF&km9!6_XA`nk1&rc1|g^n-|3MSmVps?@Fuu zr;r7s?Oq{KXuM!XCbZYX&DBu)2Rhi8q5hnJoD0^D5fY)M7JL1T0~HEdmc) zjOiu^v3T@{ug!s4i&3?_y3BW-; zOIgt)>kurYv3Xdg}3}Qw&6Xz0G zyUwx!zQX&ebDKhYi4=olXnQxRq{23YXvTS9^oI^sJT$Nne3+qFZ;|jC>ccn?t$YG) zqx#9(fDQhAed@k+ov*cG+Dd5W&J4;ET*#z_2)fbf_>9ori^IQ}qx^xYI9Uj%)Pefn zR-K9-galP6errit46lXh3Wn_ZV65a~_FwXx?5>X8^jpo8EUBU9qv=iRa7%1SRkXMF za)NcRNSO7_Dyja0JUcrVn}Bdd&y~g}GM(X?Vp@k^mMWQ@=PnHv6?P>7G2kvx_iqJ;0T=#6;v@d+0iy6zr(t0o z12SdGYuO0n59LBsFi?(@Dobho4EeL-h{yRMRpM)(=|#Jf_zpxbhTqkqEh1KTv|~Mp z%p@9lhzOlsLfx&(1l&!>D41@*ci%q_QLtcH~Lpq zn=8&kd<)KPp-dwYpb0h*Ce65Ai7}TZ2~%a|YfOGd!o#b@9{02?6-zb0a&JnZmW$zK zC(dCe^#Zm?q=s8$N3C)cIS4~%sQ)NvIvm7RUT>E}%Tj8z;4amxBomrlge;Ib?llAYsm19feVf-ENroJlOsUj{@Ag&p^R^F;D)LF)fMkaPiq8WjOC@^iOoqcgGpidS`N zdb`AIechrqH>3(-ARnu07hiVnSSq{zGIjCgV!*J)tFbT<`8xGCAO9`Za$OZf$j*qe zq4c*;Pa|kxA_6#I@rY6i^d$RK!taK+W7X8iF9KKWjU(c4T3OcVP=k_EM0iT5!(h+x zC)EIq1Van@PxS}Sy6#>GD%no}pxu`B^KnS8spvB*{&*x#4>Mk)wxr!#&`FkV;A0=4 zU=DwEJU0>WNJOQN88sJTCUO)lQzR@uLdU}F*RJnK03>p$1T?i+)UNTfwy~NsK%)Vw zvzLi^?a^L`OLoD&zg|O*C#s4bdqc%c<2znNn3X$0`?ar&XAjf+CY9nJr;1ykjA(Hi zFKKR>r_odey>3Y)pAO@CpaM?hI^RaZn_&=9>huW6<;bsiN1UY-kFdAz4GaKqP>{kS z0&2O_a0R&hYXU4Z8VPcEAtR2sigU&&J7Xu@@_R`EjGm1Nxlj@gv+3m^w5=YoMN$(9zwHN9VLKok&6b_||lo>v(;$&3oWpRwivJWnQ2#@y3R&}8$+Yo^4P+Q(8moBGL2xZ+D@W|K=<3`@_%J7O+p8)y zYCRsj82)#p!7g)$erh5Em^FP!$=3cqpF%-@n8oz((Rcogi*F&hC&+-P;_Y$&YPTGy zaiK7Hr4ROkSwbg zk1LnAVkl5`@IX}#$<6Gt9^??l+vQxT11Nd&a%!Zx$B;AJIpBh~$m!z|6c~4fRo+8G zNyHpT!gXeE{jZ6Iz|veP;r#qsrtsq;o31_)D{;w-WS|@RYHk z)}Z~G=1#aP34q`zqFV>DKQKka>_r%{V$^`hV83!9*&f9{s{BC({pQHSx$6y&>2>&0 zjhotpQluCo*LG#nzmH--BLhE?xCwk3YSR$01`lt>%Ov+m`?eRNFqZ{F>kwss#oE6J znjIvpdG)8$61q2}A(^(XQ)3MdsFze$Y zk@*HC-wPNWyXkqyR(k2)iRKZ(xQNwqc(kV~-=pd5FFxCdPGlWnU20%SQQwWb(!jM32?d>PHkm%_bIu-Q#!&{i@J2p}rk3_Kf zk*1B!3d2%xd)2_x*6L2x?l)S%z~q zkZik+TzZAwf3U?Wm?imtf0Ze~@bDI>FR>Iq4q%FmQd8sz`V}apS{iz!)k2Wz?yieTbq9!P=dRu zF@^4gB#zKlRl*A8Rx@^gf;R7MLqWU{O<|QWhjM`7-9S5R-89v-c!&#=8rVeeY#UO7 zd9QAG1l?ILDr}d!wKDspcJd9!e7g){x=tM)f?KfAlcTRLqW&EX@P`@3O^oMI7&NE} zx>N{50{5XjMIE=h+v4~{bm%T|{xt>E@1(9nOrf-o7@YS8IwFeBKIrS-hLXr4Atp?a zQeK`Wg|h^k2B$F~IfxSioN2g~gDX~Q^hGp4rZyrX`Vt*P_y7KT=h*(X6jWMnrZi8C z3HeL$y@WeXz+D0iVCD5bY^46@E|te4?tI`*-32&?vK2{GM(GwUN4F(2b~F_@X`#!! z@huV~FtZhj{f!okJX)oFH?P>`k?V_flY*{Ms4SLQT^`(x+_AkK6gdO4jF#6My#}Kt z4~btj*Pnq0N_8pl8{ ziLAKf9jYhD$9i4~W0nt{GdxWc+#_Gx^s~%L)lrnNz38q2e-u74aeCEho32#yOd^3b zyy!Q*kifPllOUJt3Gfo3$ebd(znFz}9o?S8>s( z#<4|1(FF>F%RRzZCwWp!nu3w}k9lYZVhaebm$|TwGQvje;~(0bfvyI_Ea5U)YQhp( zki8Ih<#U0%vR(uvjvUm}9Sf)#g@#PKL>{dX)q|o4 zpArDTPJv&xwvkvHyE9cAN5vP)I|(h^@1mfI10}e4nL+q z8f#e8d~}H8+mHJ?SaMxicL$-nEh36#T-BY$$7Ytf=AoS9t(~S&G)G;{M{U?QC|lz^ zqC8Fsyz)O-U7X=9MQ$tdQi_D0YiSFzM3ztw;tFpE!Bl9OPlLStxmr>qt5_$aJtvo& zphq0S!TU#S5nRy2IQG0e_HdKyb$m{9I^r9LYrVVg+Nr>wgR8vNsfY_Q*8kr|gD03} z{eM$D{@qgiS?B?b!!Vnq;>O8oW{c@tFjzubo$F9|UO? zbni2~WH%n{szBWX5Y%{uF9dC6fiUN`Vx}YMNb`AWgAcx1(E{&nUr7xwNHT)w42a6O zCrV{qXb3ArGZa9=q;rIVbPa37c|)e{&lc`O%4cLr^$U?+h85&X{D<_zaJ(ilT;@iO z9Zx73Cuu`V(86IWp&?KwWMIrvl}4TN4ljN%SazS}I=ML;E> z!c%mI1aL60;Lwg{g3bnE;zL(-?RM2x5Fwc^MLe1*6IhK#&*CtY3ER#nl*V>L(Wnp7e+Zt)liL&uJYqtZa+V|;y`{=5J6v8viZDQ$I^|?A+Bh=N3-1 z^W@lqQC|`%;tXoM zA~^AUF0Rt&HjSDPsNl^ENKW2}mL5r| z$=dCte5{QFd)lBt3aUy;w!39&h`bFc6AlSSv$`}YL(?-Rs!&@WouV&)=v`YYXyCGM zOp}Z@Y;)x3QoAQhZ2S4jUb0qS2fO>4_Z1jM0z%_&Z>+iEj`{#lI4Jr^#iZ>^tmc^D zjwa5#kXVQSUmuanfdzevq9-SZT?j~39L~5vlAl!|C5UBm+3Ey#4uzeWisVzPuJcq+ ze6$6d!ImP`hb|z|OQ3ZqOIam;D-mmkPn6wfsQgjP=z$0n$F3DD=pD>{Ws&38(t9vJ zRWo*Q$t2R3U-ZBtrqIOr1_0&wrYa3t=p@c39sXT1)G?T=+b7G1sxsROq~sX&V9sz~ zn@12${7khq#|0pF%8D6%zb_K`=O-jJsuM9dcxA+ZpofLtT90PHoDl&mzE? z)lp+P@!3^G`LGR1eBNNt0di@#Ho_BsHN0T|e^G2PJ^t!gHPcYgUyz4t_m~sYDhM*&n>l(it?1an!>wIjvJ6pPe zqCp;xl&NJ`p<#Cz&j7<$dq5Xh&R8o05O-c7bgWm>7D>=KWa9pWHuw4-5}}gMw`tF& za7Rt*h8@A(TmfZ%Sf<9rs$mW~JX;QSkqZg=q;6u;S4*CCQ!?$3bkVA-;t4M2qy}`* za217~8Fb>ZP~IAH+k?*d7xQFfJ%Q5E$ZP~Y)-}$z044;I z*q4``OB8%_$2G(^mp7hW&vFy)L#~p~6;e=L#UMXsJ7D++MAB{&=Ozux6RZs)K_7>0 zeCDzblvUQ(5rT&?EGT4CLD6shEEe4HC%D%~V0TWC-o7?cY!R=Z=KQgGoNP$1J;M){ zSQbOtHL>ODe6U3BMlxK{$0EyLQmfT&=W#Vdd==t5)dKg02L=lOrl_-$+3_a0o*$k; zfqXaH$(R&eokhX5QW4a1DgJ2UAJOy+X2t%OX!w7Vdu9_F2@?}Q;M&g(?-~U~sOjVu zQVZSY_-^1j#fL#FO=&W8fnqR;KWa3TL{5xl^O`(YE9mTIZ0(k4PLNQ2VV``s=|S1dOX`7Y196ja8b5J0+wFQ9}ceS5Dwi{JVwsmc&xQ`P=fhyl46a6dY^$y~f5Su7@Oy!PIX3_NvMg)Tsmo%o>;e2*y&TrOJ5&A6YzumZ3# zXfu=NClQZ^NabD$<+Pc@gsf_IVl*ZA-|Ye!sI*7nIqc>GZk?Q*ZV=)mtds1> zgu4mu{xIT(7(qxcZd0xO(avq|<~$;UqK161lrS)U@Z9uy@?I$;!xXkYy6E-{_&#y73l6gGS=yB*lpKwc#$iz*5mgS<0gdi6^dNME z_{YAZkWG(l50`!`r7im8UW{mhOTwL;o$ZVb;M{mLbA-uQ^p)Muwh`_^6P%Uu_A9BD zOTqPCe!kD^L9bQ7?H2u#&qI6_cI5@zUzx-0I@`RQRl0k z6f1uuwy!vl)f$o#=$}*s)we^DONMNh4Y6|kSMF&5I#DI@iSfCwk;Su$@z!E34j?JkY&ZP#hQ-14kTHZc3lca!+0=(y&` ziDZYXFy^Pv$ojrHvhvJ1d>AkEspL7czC9>@^$&HDch@nU=tS7O$ud``%fJ&D)rHJl zO@l8ZB0Jr{z6tm~2}E2I{51|}yPHN7=F_P)3w`>I9)SN30~{f(M!OxaSc$i*EMliG z=$|vH#TaY3bTN3kGfwA%Hsfv1t2m7S?_|7v%?Qr(PcRkAli}kW$Xy>*Idrim4pg!O zGhx88Mx44Jo>D@%QEEsT;o@2C;rWU!#6Mi6R2%E{bGE{5(=?8n91{mNdx!#R3b#yU zKh<#UA^}|}Cq-RPiWFe$KlOdjzHqzjwfA&B5O%l*Kao&-f=wI^koD=~(`xp5{*(<@ zRRdg+)UHC>+ITO2Aqq=HMr_z(Bu3Ch^bkhe<(fU>^o{uww&FooKMf|*+8k*Rge3%| zSlZHH>Vd&0V$?G$9dI5l#L2Q5lV@Q>{9noVV+i`+7$a!ud2 zWBogf_$_UE)=U;ZdqsH2H$%#PR`6lkI>2;KgM(z#hOX+tDMI1=YyDCkHp|+$R^I{J zu-n4|`vou^AvT`qETNqUBvT(9r-P3PGbHyIY$chAq%JF<>m8trR!Ikyuy(&A%W5MY zom?CC_-GWD0ARqtBeMDXWroZL{uSqo9YrLboTHL-VP2`m9G3zg3nB0lGb z&c@KYhEJ^+Ze*3e(*nDuAY>1{TAGDj%HjQmhm=D*fnQVSU#|d23z%19Volv_!|N~XVe=x$LIsxeT)p6ijNcilX?WJh1wkWL*cpr1PvlR^$n`b_N6 zgo(OUJxa%?T`pFrizlp5KkUqlIs}8V?XFwiE0hpZ-86GgHMlHB0P`{H0@XYEKxJ#v z>9;mQd}wd8HKs*fn%$0?6W{aQj%1dE`zWKyG33jC<@Mc7Gfm{XCgTE1qx8pqN*N~1 zj6H_~0ZL5jYpRx@%yBYnRG!M+A)z38=$EPqFLL2@?TI_*^+aK@ZUsj>{hL}z+VFB) z+!p*s$^3_iC5f;UME#vd7K=u~>bm?GHw|L+JFM2hpaJdB<>Qtq`@ycXT4*~T#4f~AM14fr{o{!5ejD2|CjT4{j=0q;Wrr-?jqt5 zS$Di_jKM~LmcxN4HkSoEnL2ejKMPr*E*%fv+x4WCy$lrs{}wi7KY8IF{|kj4^P=7 zE*Ns`Hacps0{WKUN>K)&Z+buh>M(QMzVB?7mZ752GgJT?S%)Q74>~gr!4~~>bm}km zWMe2(C|5y$AWyKQvv5aOHJZq0CB#z=YZVXXHK;*{X66|a5JTefoB9!&I|#g=f<>@EC+}nd!bY#3*39z1^B14*?-g#IY0hw~C_Jj*-H#pzx$#2vVOod?>L!(-V<%cK2ktVnJRD}$7H?4)2aP`5fabanTgE3oRdwTX zn0DC6dB}x4!b*l|Oy&??;pStZCa=`f9ib#PV z7#PfUX{_m{$z{!Sd%cRZA+SMqFW*S~ls>X+w_W}Yz&3~W77~&~6LqQWGCrL@r?fvB zD85YZ*HqaU8S_6Z4yrAy1-(bxi^q!*d`|^0w*r~bFK#5GNE2onEGCSODu6@t*|~yA zrEa$6W}IDi>tR~O_qRIH0#N#%%6pHJ?2PjBG9uXFy^*iRL1XygJ1Z>bUx3|yvgXfi zc>~z$ZW&I$Bw7_lWy(X^#RlBZgy~5Bau8a4oFN?QYB)E@HAQLGnuLFjQXWxG>rakq zo!}r*?3si|;B+l_Yp9{HiX%=sTWovwELeO7!(Ppm!uNdg2We1Ry;#}0i?>EaYfK6C~-xyB*1w=C)OeT4hkx*bHuSIkO z>z>aDKw7mvC-~JRJZ*V%76-=c&o%{QY%?4e|LVQCh)5boyz$BadkiUG{|i$ow!^jO zg!yw!92xpqH8kt%S-{40uj2_UREp*hzurVzg#Ka*rzk!lTb)bcGJQFD%lT?^KbQpg z$iI~X@8Z`-zc``iD>V=%IQ8Sl{iaQMOevP~9R8cN86hSpd4A?h*u(O5s|n&C?@2!{ zj955;?r3VbJ7w5B5_x>>yHn1doc3)HVOMjHmhN8*r&3s0n|!699=F1B*;1^Xsts+E zHX8t^TBaRUoEo3OG`1?1ci?t##;g(Xuez+d#92YSO_pxs&^kk4-(yzbs0q$bBBTx^ z9kF&s3(!q@98dBBANH6Ta>WA2VX_tYuhIRg&%RupjMqTR%LJrXmB+9QzGLjbLH zOrZtrg%HPFT;&C^KYzK-5O@e0Rl&X+8}qmQTFfZ8&p#-A&AAy6b8g+1l=c2|CP{kX zkxqq1ObdfZje2=WV>vF2TrhaY6;VMa?LrOk)YBFq%?w`gbifz#iR>?+yiR#}qqe<= zJPVp6%t0Q$v;FezJh;^tL~4a8iX6V#}n9RMa(gld!KUOh@I6yyoQvoNU2)! zDOtkw954+~FElyPB^GXSXV_>yh3-KPoky9^vOEHMm}Q7)owYvMf89t;5^htpe?^gq zMPKFl4IE#eR*U7o@YsDhWbo?9M-07E&k~`>)E=KjV7+ax>7;1(OT=!NHq0$m(Q|AB zEf(0DHBiX-te=9n8o(V0Ayw_g8D_7ugQWHMLp_7TZK@9D+=%HC#0RQhs^x7ej=xo@ zg6H#hz`1CMCl{h{9ByrKOnQ8)Qjn<#sRNPjV;H2vY|ACr6~;rJm)Z+K_G!E+ z;gLk~Ja_z|U@t8C-Glwy$+y7w;?`JXc9?NxCo20=iOsFUrl~05%4?L z*~{eP^MwpLnirxOrXlkjT+1daHGw-)#F}kxxMDxdH1b*%$KFg~({|3<{ULMd?q6Y5 z>mBocS0pV=UbAB1bI+*!!!~H&g&C$_$Bvt1e2gqlb*A=BvoG=Xi9Br99mFIM zUKFSm!448B%7hHxTpt=#8{uY%nP@Xz+}uy}`aQ%8Nei$Cf#8$$LSL1QwYv^u8E)uc z@iLa)2m^LobD~WIk1ctlPzX>%Ng=FmR%*=0*$mPt$>=?#*%3B2!$EkGAI7N*8zsA+ z8Gu{S44O2u)lDv!8tNKf*?bk6dXm>tW}aEO{t3p zG`dheLSC$nS&ha{FygujhzH2yG+NWWpeyP7q{l%jL@R)C>M$W-__kIYa`HUnEP3V# zv(Akp&7{q0NCMKSJUGrNE95@S@1~y*!K%9m0{POF% zP3EOGNs8Jrf41Cpq$RC^$x6P(d$n&AcF#mY-X)=r3PwfL=_1N;nZJfD8(Uj|X6A%` zpRloTxOY%o9FMNqLaD_c3IobwM&5!7)1jL($<5ug3B(oJ7XSv-8Y)-#UXEs&y_vmr z3p2_D;*W%xB#(sCY&~5iy)p2yY9=2QuM&#FT=V0ojaiZy{jFVVkJ~l7?PTn;_28sw z(E*ir5`&GZK^I&`(HJ9C?q&)7$<`ymzU3){*aXUrM6;?BI5wr;Yvz~}v%9>R$Txa> z)ipgWG&0mW^k`X3C$j@dufM!b7BEBmt-Vr`f2{^W({Uri`kHcqsgJ)giXoh7>M#=*_-3Gh{8-z)?a>Gs_D+1E6wFguvVY=q9?!&@allWbNhMPpNA@T^y%Q`nWQwIS!WgghfnQi30~cRRBa|6cmnuD9}@{-x&ODJmMxg|_`m)FbAp#<9BP>)ZTB?lZq^iFJKZw#&}yy7 z8`RR4wN@C9TJVeqlsQXB#N}R0Py9(Lkk^?C!iEV5FjbNI!`Psv0dVkSZ{mdlybR_-yNzY^X|nkn!=lZlO1@}> zkd*nUks>9nVX0O90ii^1+IwBlMXMJpv8q>nS%Bj?45Ati@J7=RNKvI9B;a1mRK-a0x*N`EY^}>W40SqCVRJEM<620&mr&UJqw?DvZa2hyq z#t!Md{%|Ku0cv!SP*^jr3jbIW_!N!T^EnCmnU~S!3pEcTSV>ZMa>aGx7BnDyA(%C; zJN;x!P&n!@77`#CCh0`LD2nNPH*q`f7A7~6ZA$`V-o#Y)d#!Ws!WSS2efu8Zpqf-DJ@PwgAF6WGPa*(<*Jg~nn|H?o}}YNHLQGDQku}(@J#mS2&f^2PE}o;Is23qeEGjOYN zo{&bK*HmLE2&{8EDB#xWl`u;%dDJQ0U#OJJ+~eZ6OtF|PpjtnxGG(lUZj69!yy={( zD^ZmhDkX?*cH7Y$;Y@7KPp^3aOOVL=&qflmF40XvAeKwLQ)4%p&o1=vBMyVc_=*uEhrDGt z)jTYC*+;$m$Wxs6#?c2?^XK&=k*Ql2Cn>iJO4(fs&B?B4#<{_W?m)M;hpu*vMn{pa zOI*GG<|P2EI2nSR}S*eqPY(~(>Ya@P&t(oHW(#2%&L!+@s@^qfe^pB*t=lt0bPcwO zflW(gErW4fH7zny!t;BH$57Ja(X=O^vnsJkrQV2h5CH&y{+YW_|MxRp%Ih=+`iz+O z*;`AUf~G}9^QDcYhRcFK?1KDcY-HvJ@J@8r6!oG?UJRENv$JR;sSw_W{#l`t3O?ci zgNsuU1JqDHW<<8KJ?NKyMtm4UbXN8&RO7!4Bo{B?L+{MTX-**8@F)(vzO4dKR`%@^ z;ZL!>d@kYw5r={qp5zShd6@NYylmQnKT;f-GRy6eD)0^FLjgpcV+Se;OjgWU;y;-F znc`Dw2UK2CkR2appihQMGp2a?D-@fp0ASo~(JdJ6K!6Qxxy5Wn{N*E1>d6Uph6QwD zh8pG4_=R6Xb^4NTtxCi;sw63t7J#85q+d>mX=w#tsEs)U&Y#r5Qy+TmLhO9Dw5i8J*H7-T zmL3QWzj;YauvNMiq7oAtcY@Jj36|Vu9bBZfwSZQoTl*)S-YvfdH{`H}MikEp&)Cw( zwA?N3(!o*}gu%H(=LPO*(vUUtA2xqC7%?dnno#F4sF&?w=cs~$1bMAh9uED>TQMyh_)R? zT;L|PQIK@?@e0rk`bj0}K&L)y!sRPsE=F=Zi;CbxN)F1=x=Dtmv#xersEM>Z^RRea zYIh`m96DiR4UX=uA1&k4MtXFQMbRAYBxU(9I}aOGl!UDkzQ@YC-BxdWjGmWk=+mQp zF(CG1d!CRE_%fYTUoy97)t-38F9lMdC6ovSU48~W@YjKa&I*w>+>^}Pj4{YwwH$l@ z#~-gDzX0P00TV&-5dr5Hz?}fim|Qdx7S2X2_E_7-oB`_o*psLU``c^U&*P2|?KCjg zQ%fya^Lbn7T1Een$|YDE3>xfTQ`!KS&tm1@1@(a&1b?>AKea5|n*|@Fxe zv6vW;RS^RG*h@$}aXeUbE4>}eJ)2>=uy(51N&k2E^f(a1Q`f7*elB$313rozRSnDj_av7sOe-mpp{yPtx>}VzIANq;sgn*r1cAAzZcDHHP(#})%0y~+ zz~#B^oo0uZSG?LZ7@N#)=WXD(`cmyjVgqGba{5#`AiQbwi$Ha=Kt;u}ry=l0lY0zV zm(QgE3-lDzuQF%eC!P3sb32SrwVTCgfc9hoDY(XUiDQ4FIcIFDI2cNyy&#O!;--ao z6_02bAld-?g(U8nP1X>WxX8R8U0sk(c zb%IWCNVcNYS2uPLk&1irS%(h2Vr-0X6f0)R1#dL`LdN^tB@QxGD#1EgGW|A0WD^-h zaaM`d5IfzW8SwLcMx`6&yJOn*(iZz<7w|zQ5jn%?w=n>kMK9yDH!FRfRf;<_Z?;ME zePAFVwgJZ)u2Z}j18Xj!0TWFD|E^QW+42?~c0h7mcXE^mOmlo&mpe?%y(Mx#X!Z9W zgVY2^kfJEAo(`F+;0kAeBhT}y#OTbtTxOeC&(DuQKGuknd2sdSv&}8RN`}r z4|Jn$8{|)4kfR=eom)v~7>d2G!{#?72p-ScbiKpk&`b#`FetxVvT>}o$g*Sat z_^b*1GQNU-Ti{=k4YIgl>>C!BnHyyp?+Q~tYn3~*PzNq5?Cbw09s#kU`pm7zGl5uW zO4mimgzig&1Rw%K5u_j=xD3tHj-52C$5&Nv(JA}l{#ss1)p8(k1>I)xlh5x#6Pe7b z5-hOvOAjvLv{G`889mrOkjg?&?NW43Ar?G*bIjM+WQMEA)g1TS7BP?#w;>VLI-Xaz z9!$ni_9Ot=q;*1r(2xFAaBoQMx5)MwZ1SfQg7kaW-8G1IO;Cx;oLe8uYk$E5&Lhm^ z1(cMoCb!Al4j$h=Os1xK+50V$)CCQ$P-n#+%Ns!X`J7{-@z0oQ zqh{^rSa?wem7-}DRl7OQ1*eS9^ z+gbxHdIBedAvHigE@2r$S$E>Q=2<7j6dqkP5_T2VYHQiM!9G zzoWsH&KK`v@WkBKE%wscU95(QJv9=%0AX6r3>NL>>)O{ZTWdpdz4jfQlX;LYh!w;B zeDRIrkNu}6*#V5Hs=HwSYU*;-*-qgoQ=?&B-`xAChzlAe5|Cm1#K8ocVD1DiMGdMC zxgUqALw|iTB{8669QOLz+~YSB;yXY$y+puqm6cX*R*Z?w&rQg#y!%W=J8 z6wO>9s1Qv1=CMFdBUQ~2#6fUOFjG9FiiQ_JA@v>KeEJ46bY#tv_#`H~dK|*&Y58*T zmSvC(6`*1-V}>fH*fR!3>%p%ftp*>bZv8;wY7+N(WeVt!ZN=l!PyPcz5ZYkq|X zUi4!Hv;TDd$&;$;S(w93izTx?M(Sb-cJ*d64G2qd411R#_>XFr?{4LvnV__OX9kk; zM>>YC{plos9!2*L%dUQ3sHmHG76WVo{D);qK;hp6t_2x;6lKl)0k?ZTsRqZHzkB!0 z(0Al-4*dHL@8RT?|F4~M0gkFV1MoR}@7;t1h|r{kl+=Jh#Wpb+LOXywgMfh4f}lou zY;qS!lO-Y91e;nd%Fv=B#@1q0$R<9hB9FER1xvw@A`)II3Q=oAkrZWkv=BuYOTYi_ zrd?uUrXvheb7v0U`OouzoqN~2Ys`jSPwskPVa&SH0iEnK{^;hnGD^#{j>j*K zcz@pH$ENT6OU>D+aVfd$=bH7O8iz`2_svlL%xM=@xU(zcOs`$__xdfVz4wlJ;qMO#hdLddiAi4DUqr2&H?PPaT8b59T{EO}_n|&J$3tJD z&u(gaKVCm)stMK3%;Xt%USVhrRgzYe#f)27TdV^%a;TdPouDSTrP>IsBi`};ETHK|IBec9_%9u&KN#`2G?nm+x$V! zwB;@OX^^kX`<`nn`47c_3T@`@m-*ze`c& za1yM6aj*=EJ^|*%L*^Sva1Q(l{4=}`{*HIJ)i8hz6nnXf;lpqjWW=efdj(h8(14;CNUChlO|p ze4h2*hv?z#y9r8QUJ1$X!9?WOAhxO3q4;$_%z)Tq@QhS1!j_(c;V6Z>>;z9vY3U67fpvymCAU5 zM8C*S;hjoFqoeK$*+bt)!LPDT)Nr_ug5K&B^5IaQ#NjC4O%N-+oYIMsqEyUOIF{0Z zer)d8i+Xp;*O0L>>SxqbDIJm5u)hWInUZf|uIK&kX-FItI;dOG7yBhY6gI1S=@);X zw|X!rN01W|54DhTXUHxu^&J%Kk038}B6a2}a?L#-en64+EAZ1|`iYU8A+;G+7r8!+^QS{a{}x-;?kyoOuAgf{qgFAJV>$`m5~O2YDO)qF*`k zO5|VA_hX1pO?;<|VP|*^?Lzc3ABFhM$x-&PWSv5YIg%?TF*EN+CJy?W)Y((V!rwBW z0VYB6$Bd+%eO%ZgF*Rnv{ZP*Gd+-x@5Mr-$Pnin;hCC7CPvdsTI9k8O)+2u#egv-!+96bbMV6Qn z7h^!Eo!r#;*tikm7jqKh{sq&JFNaHDI%I!c0I}6M+Y{ik_$vp}XQ7L(A|88T07_m+ zT(DiEvwIkvgG`&8K`R?x2mc8B(@)On@yNtTUkghhezEY8#<%WUAU?BX-4yseNX#s; zFA4UBvFKM0&5)e~;cu{qb7tO0do(2XslTYraLo)Xn{VMA{NI6A2y2%+e$5E)8S1Z+d5PFfv zCi!XZrGE$L(@so{?bHjPIL6)<{ z9P1W13#P*&I2F=no`#II{1APe_Pc^MSlqjubHv_m_Kd7jsvR+tGuZAX$X-$SL*f{n z2j8Z!UxyRe&HY34@6bSd4O2iIa{)Ud!GhlUHgzr4}7z9&0$y zmwNh_A@`u*ljsppY@y%%2)=xp`cssjA|Hj76x!X_Kyuhz0I}QTyvg(0xi{CrtB~)8 z--pX#1^g59h^zG?b#$@tpR8-HfhG7I-Hh23VkP%hgR^GcMjz+Fz+Mwu%)YdVuIOoP zhQ!(G36r7N_)o@`L-uhoU$2IHC>g=ADz?a', '']) + sep = eos + + instance = {} + instance["lm_labels"] = reply + [eos] + caption = list(chain(*caption)) + + if not drop_caption: + # sequence = [[bos] + list(chain(*caption))] + history + [reply + ([eos] if with_eos else [])] + + # NOTE It is important not to include the reply in the input of the encoder -- > the decoder will just + # learn to copy it --> low train/val loss but no learning is happening + sequence = [[bos] + caption + [eos]] + [[sep] + s for s in history] + [[eos]] + else: + sequence = [[bos]] + [[sep] + s for s in history] + [[eos]] + + instance["input_ids"] = list(chain(*sequence)) + return instance + + +class AVSDDataSet(Dataset): + def __init__(self, config, medium, vis_processor, text_processor, split + # tokenizer, features=None, drop_rate=0.0, train=True + ): + self.config = config + self.medium = medium + self.vis_processor = vis_processor + self.text_processor = text_processor + self.split = split + self.batch_size = config['batch_size_test_{}'.format(medium)] if split == 'test' else config['batch_size_{}'.format(medium)] + self.root_vis = config['root_raw_vis_{}_{}'.format(medium, split)] + self.dialogs = get_dataset(config, split) + + if split == 'test': + self.dialogs = self.dialogs[config['start_idx_gen']: config['end_idx_gen']] + + num_samples = config['num_samples_{}'.format(self.medium)] + if num_samples > 0: + self.dialogs = self.dialogs[:num_samples] + + def __len__(self): + return len(self.dialogs) + + def load_vid(self, vid_id): + vid_dir_path = os.path.join(self.root_vis, vid_id + '.mp4') + + frames, _, _ = read_frames_decord(vid_dir_path, self.config.num_frames) + frames = [self.vis_processor(f).unsqueeze(0) for f in frames] + + vis = torch.cat(frames, dim=0) + return vis + + def load_vid_old(self, vid_id): + # if vid_id == 'QQM8M': + # print('bla') + vid_dir_path = os.path.join(self.root_vis, vid_id) + frame_paths = [os.path.join(vid_dir_path, f) for f in os.listdir(vid_dir_path)] + frame_paths.sort() + num_avail_frames = len(frame_paths) + delta = int(num_avail_frames / (self.config['num_frames'] - 1)) + ran = list(range(0, num_avail_frames, delta)) + if len(ran) < self.config['num_frames']: + ran.extend([num_avail_frames - 1 for _ in range(self.config['num_frames'] - len(ran))]) + if len(ran) > self.config['num_frames']: + ran = ran[:self.config['num_frames']] + assert len(ran) == self.config['num_frames'], f"vid {vid_id} - loaded {len(ran)}/{len(frame_paths)} frames" + frame_paths = [frame_paths[i] for i in ran] + vis = [Image.open(p).convert('RGB') for p in frame_paths] + vis = [transforms.PILToTensor()(v).unsqueeze(0) for v in vis] + vis = torch.cat(vis, dim=0) + vis = self.trans(vis) + return vis + + def __getitem__(self, index): + dialog = self.dialogs[index] + vid_id = dialog['vid'] + + caption = dialog['caption'] + summary = dialog['summary'] + history = dialog['history'] + answer = dialog['answer'] + + caption = self.text_processor(caption) + summary = self.text_processor(summary) + if self.config.dstc != 10: + caption = caption + ' ' + summary + + history = [self.text_processor(h) for h in history] + answer = self.text_processor(answer, remove_period=True) + + if self.config.embed_from_llm: + if self.config.llm_family in ['llama', 'mistral']: + cls_tok = '' + sep_tok = ' ' + bos_tok = '' + eos_tok = '' + else: + cls_tok = '' + sep_tok = '' + bos_tok = '' + eos_tok = '' + else: + cls_tok = '[CLS]' + sep_tok = '[SEP]' + bos_tok = '[SEP]' + eos_tok = '[SEP]' + + caption = cls_tok + caption + sep_tok + history = sep_tok.join(history) + history = history + sep_tok + + # load the video frames + vis = self.load_vid(vid_id) + + return vis, caption, history, answer, vid_id + + +def load_avsd_dataset(config, vis_processor, text_processor, split): + # data_file = config['anno_avsd_{}'.format(split)] + # dataset_list = get_dataset(config, split, tokenizer_enc_dec) + dataset = AVSDDataSet(config, 'avsd', vis_processor, text_processor, split) + return dataset diff --git a/datasets/champagne_dataset.py b/datasets/champagne_dataset.py new file mode 100644 index 0000000..01d6853 --- /dev/null +++ b/datasets/champagne_dataset.py @@ -0,0 +1,279 @@ +# coding: utf-8 +# author: noctli +import json +import os +import pickle +import logging +from tqdm import tqdm +import numpy as np +import torch +import torch.utils.data +from PIL import Image +from torch.utils.data import Dataset +from itertools import chain +from torchvision import transforms +from .utils import type_transform_helper + + +def tokenize(text, tokenizer, return_tensor=False): + tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text)) + if return_tensor: + return torch.tensor(tokenized_text).long() + return tokenized_text + + +def get_dataset(config, split): + + dialog_pth = config['anno_visdial_{}'.format(split)] + dialog_data = json.load(open(dialog_pth, 'r'))['data'] + all_answers = dialog_data['answers'] + all_questions = dialog_data['questions'] + dialog_list = [] + n_history = config['num_hist_turns'] + vid_set = set() + + pbar = tqdm(dialog_data['dialogs']) + pbar.set_description('[INFO] Loading VisDial - {}'.format(split)) + for dialog in pbar: + caption = dialog['caption'] + questions = [all_questions[d['question']] for d in dialog['dialog']] + answers = [all_answers[d['answer']] for d in dialog['dialog']] + + vid = dialog["image_id"] + vid_set.add(vid) + # if undisclosed_only: + # it = range(len(questions) - 1, len(questions)) + # else: + it = range(len(questions)) + qalist=[] + history = [] + # if undisclosed_only: + # for n in range(len(questions)-1): + # qalist.append(questions[n]) + # qalist.append(answers[n]) + # history=qalist[max(-len(qalist),-n_history*2):] + + for n in it: + # if undisclosed_only: + # assert dialog['dialog'][n]['answer'] == '__UNDISCLOSED__' + question = questions[n] + answer = answers[n] + history.append(question) + if n_history == 0: + item = {'vid': vid, 'history': [question], 'answer': answer, 'caption': caption} + else: + item = {'vid': vid, 'history': history, 'answer': answer, 'caption': caption} + dialog_list.append(item) + qalist.append(question) + qalist.append(answer) + history=qalist[max(-len(qalist),-n_history*2):] + return dialog_list + + +class Champagne(Dataset): + def __init__(self, config, medium, vis_processor, text_processor, split): + + self.config = config + self.medium = medium + self.vis_processor = vis_processor + self.text_processor = text_processor + self.split = split + self.batch_size = config['batch_size_{}'.format(medium)] + + self.root_vis = config['root_raw_vis_{}_{}'.format(medium, split)] + + # get the mapping between caption and image/video + mapping_path = config.get('mapping_path_{}_{}'.format(medium, split), None) + with open(mapping_path, 'rb') as f: + self.mapping = pickle.load(f) + + ids = list(self.mapping.keys()) + ids.sort() + + # reserve some samples for validation + if split == 'train': + self.ids = ids[config.num_val_samples:] + elif split == 'val': + self.ids = ids[:config.num_val_samples] + + num_samples = config['num_samples_{}'.format(self.medium)] + if num_samples > 0: + self.ids = self.ids[:num_samples] + + def __len__(self): + return len(self.ids) + + + def padding(self, seq, pad_token, max_len=None): + if max_len is None: + max_len = max([i.size(0) for i in seq]) + if len(seq[0].size()) == 1: + result = torch.ones((len(seq), max_len)).long() * pad_token + else: + result = torch.ones((len(seq), max_len, seq[0].size(-1))).float() + for i in range(len(seq)): + result[i, :seq[i].size(0)] = seq[i] + orig_len = [s.size(0) for s in seq] + return result, orig_len + + def __getitem__(self, index): + item = self.mapping[self.ids[index]] + # load the videos + pth = os.path.join(self.root_vis, item['path']) + f_names = os.listdir(pth) + if len(f_names) == 0: + with open('/pfss/mlde/workspaces/mlde_wsp_Rohrbach/users/ma35vahy/V2Dial_new/emergency/item.pkl', 'rb') as f: + item = pickle.load(f) + + # load the videos + pth = os.path.join(self.root_vis, item['path']) + f_names = os.listdir(pth) + f_names.sort() + + if len(f_names) < self.config['num_frames']: + f_names += [f_names[-1]] * (self.config['num_frames'] - len(f_names)) + elif len(f_names) > self.config['num_frames']: + f_names = f_names[:self.config['num_frames']] + + pth = [os.path.join(pth, f_name) for f_name in f_names] + try: + vis = [Image.open(p).convert('RGB') for p in pth] + except: + with open('/pfss/mlde/workspaces/mlde_wsp_Rohrbach/users/ma35vahy/V2Dial_new/emergency/item.pkl', 'rb') as f: + item = pickle.load(f) + + # load the videos + pth = os.path.join(self.root_vis, item['path']) + f_names = os.listdir(pth) + f_names.sort() + + pth = [os.path.join(pth, f_name) for f_name in f_names] + vis = [Image.open(p).convert('RGB') for p in pth] + + vis = [self.vis_processor(v).unsqueeze(0) for v in vis] + vis = torch.cat(vis, dim=0) + + dialog = item['dialog'] + + caption = dialog['caption'] + history = dialog['history'] + answer = dialog['answer'] + + caption = self.text_processor(caption) + history = [self.text_processor(h) for h in history] + answer = self.text_processor(answer, remove_period=True) + + if self.config.embed_from_llm: + if self.config.llm_family in ['llama', 'mistral']: + cls_tok = '' + sep_tok = '' + bos_tok = '' + eos_tok = '' + else: + cls_tok = '' + sep_tok = '' + bos_tok = '' + eos_tok = '' + else: + cls_tok = '[CLS]' + sep_tok = '[SEP]' + bos_tok = '[SEP]' + eos_tok = '[SEP]' + + # preprocess the textual data + caption = cls_tok + caption + sep_tok + history = sep_tok.join(history) + history = history + sep_tok + # if self.config.llm_family == 'flan_t5': + # answer = ' ' + self.text_processor(answer) + ' ' + # else: + # answer = self.text_processor(answer) + eos_tok + + return vis, caption, history, answer + + + # def collate_fn(self, batch): + + # BOS, EOS, SEP = self.tokenizer_enc_dec.convert_tokens_to_ids(['', '', '']) + + # vis_list, cap_list, hist_list, ques_list, ans_list, index_list, vid_id_list = [], [], [], [], [], [], [] + # batch_size = len(batch) + # for b in batch: + # vis_list.append(b[0]) + # cap = [BOS] + tokenize(b[1], self.tokenizer_enc_dec) + [EOS] + # cap_list.append(torch.tensor(cap)) + # if len(b[2])!=0: + # hist = [[SEP] + tokenize(s, self.tokenizer_enc_dec) for s in b[2]] + [[EOS]] + # hist_list.append(torch.tensor(list(chain(*hist)))) + # else: + # hist = [SEP] + tokenize(b[3], self.tokenizer_enc_dec) + [EOS] + # hist_list.append(torch.tensor(hist)) + + # ques = tokenize(b[3], self.tokenizer_enc_dec) + [EOS] + # ques_list.append(torch.tensor(ques)) + # ans = tokenize(b[4], self.tokenizer_enc_dec) + [EOS] + # ans_list.append(torch.tensor(ans)) + # index_list.append(b[5]) + # vid_id_list.append(b[6]) + + # # pad and keep track of the original lengths + # cap_input_ids, cap_orig_lens = self.padding(cap_list, self.tokenizer_experts.pad_token_id) + # hist_input_ids, hist_orig_lens = self.padding(hist_list, self.tokenizer_experts.pad_token_id) + # ques_input_ids, ques_orig_lens = self.padding(ques_list, self.tokenizer_experts.pad_token_id) + # ans_input_ids, _ = self.padding(ans_list, -100) + + # cap_attention_mask = cap_input_ids != self.tokenizer_experts.pad_token_id + # hist_attention_mask = hist_input_ids != self.tokenizer_experts.pad_token_id + # ques_attention_mask = ques_input_ids != self.tokenizer_experts.pad_token_id + + # total_orig_lens = [sum(l) for l in zip(cap_orig_lens, hist_orig_lens, ques_orig_lens)] + # max_len = max(total_orig_lens) + + # dummy_input_ids_enc_dec = torch.full((batch_size, max_len), self.tokenizer_experts.pad_token_id) + # enc_dec_attention_mask = torch.zeros_like(dummy_input_ids_enc_dec, dtype=torch.bool) + # for i, l in enumerate(total_orig_lens): + # enc_dec_attention_mask[i][:l] = True + # # add the masking of the visual input + # num_query_tok = self.config['num_temporal_query_tokens_{}'.format(self.config['bert_size'])] + # if self.medium in ['avsd', 'msrvtt', 'webvid', 'champagne']: + # vis_attention_mask = torch.ones((batch_size, 2 * num_query_tok), dtype=torch.bool) # *2 for spatial and temporal queries + # else: + # vis_attention_mask = torch.ones((batch_size, num_query_tok), dtype=torch.bool) # only spatial queries + + # enc_dec_attention_mask = torch.concat((vis_attention_mask, enc_dec_attention_mask), dim=1) + # # Now prepare the data + # vis = torch.stack(vis_list, dim=0) + # cap = { + # 'input_ids': cap_input_ids, + # 'attention_mask': cap_attention_mask, + # 'orig_lens': cap_orig_lens + # } + + # hist = { + # 'input_ids': hist_input_ids, + # 'attention_mask': hist_attention_mask, + # 'orig_lens': hist_orig_lens + # } + + # ques = { + # 'input_ids': ques_input_ids, + # 'attention_mask': ques_attention_mask, + # 'orig_lens': ques_orig_lens + # } + + # ans = { + # 'input_ids': ans_input_ids, + # } + + # enc_dec_input = { + # 'input_ids': dummy_input_ids_enc_dec, + # 'attention_mask': enc_dec_attention_mask, + # } + + # index = torch.tensor(index_list) + # return vis, cap, hist, ques, ans, enc_dec_input, index, vid_id_list + + +def load_champagne_dataset(config, vis_processor, text_processor, split): + dataset = Champagne(config, 'champagne', vis_processor, text_processor, split) + return dataset \ No newline at end of file diff --git a/datasets/dataloader.py b/datasets/dataloader.py new file mode 100644 index 0000000..0b72870 --- /dev/null +++ b/datasets/dataloader.py @@ -0,0 +1,137 @@ +""" +From https://github.com/klauscc/VindLU/blob/main/dataset/dataloader.py +""" + +import torch +from torch.utils.data import DataLoader, Dataset, ConcatDataset +import torch.distributed as dist +from utils.dist import * +import random +import logging + +logger = logging.getLogger(__name__) + + +class MetaLoader(object): + """ wraps multiple data loader """ + def __init__(self, name2loader): + """Iterates over multiple dataloaders, it ensures all processes + work on data from the same dataloader. This loader will end when + the shorter dataloader raises StopIteration exception. + + loaders: Dict, {name: dataloader} + """ + self.name2loader = name2loader + self.name2iter = {name: iter(l) for name, l in name2loader.items()} + name2index = {name: idx for idx, (name, l) in enumerate(name2loader.items())} + index2name = {v: k for k, v in name2index.items()} + + iter_order = [] + for n, l in name2loader.items(): + iter_order.extend([name2index[n]]*len(l)) + + random.shuffle(iter_order) + iter_order = torch.Tensor(iter_order).to(torch.device("cuda")).to(torch.uint8) + + # sync + if is_dist_avail_and_initialized(): + # make sure all processes have the same order so that + # each step they will have data from the same loader + dist.broadcast(iter_order, src=0) + self.iter_order = [index2name[int(e.item())] for e in iter_order.cpu()] + + logger.info(str(self)) + + def __str__(self): + output = [f"MetaLoader has {len(self.name2loader)} dataloaders, {len(self)} batches in total"] + for idx, (name, loader) in enumerate(self.name2loader.items()): + output.append( + f"dataloader index={idx} name={name}, batch-size={loader.batch_size} length(#batches)={len(loader)} " + ) + return "\n".join(output) + + def __len__(self): + return len(self.iter_order) + + def __iter__(self): + """ this iterator will run indefinitely """ + for name in self.iter_order: + _iter = self.name2iter[name] + batch = next(_iter) + yield name, batch + + +def load_dataloaders(config, datasets, split, output_dict=False): + if isinstance(datasets, dict): + datasets = list(datasets.values()) + shuffles = [True] * len(datasets) if split == 'train' else [False] * len(datasets) + if config['distributed'] and split != 'test': + num_tasks = get_world_size() + global_rank = get_rank() + samplers = create_samplers( + datasets, shuffles, num_tasks, global_rank + ) + else: + samplers = [None] * len(datasets) + + batch_size = [dataset.datasets[0].batch_size if isinstance(dataset, ConcatDataset) else dataset.batch_size for dataset in datasets] + collate_fns = [] + for dataset in datasets: + if isinstance(dataset, ConcatDataset): + collate_fns.append(getattr(dataset.datasets[0], 'collate_fn', None)) + else: + collate_fns.append(getattr(dataset, 'collate_fn', None)) + + loaders = create_loader( + datasets, + samplers, + batch_size=batch_size, + num_workers=[config.num_workers] * len(datasets), + is_trains=shuffles, + collate_fns=collate_fns, + ) # [0] + loaders_dict = {} + if output_dict: + for l in loaders: + if isinstance(l.dataset, ConcatDataset): + loaders_dict[l.dataset.datasets[0].medium] = l + else: + loaders_dict[l.dataset.medium] = l + return loaders_dict + return loaders + + +def create_samplers(datasets, shuffles, num_tasks, global_rank): + samplers = [] + for dataset, shuffle in zip(datasets, shuffles): + sampler = torch.utils.data.DistributedSampler( + dataset, num_replicas=num_tasks, rank=global_rank, shuffle=shuffle + ) + samplers.append(sampler) + return samplers + + +def create_loader(datasets, samplers, batch_size, num_workers, is_trains, collate_fns): + loaders = [] + for dataset, sampler, bs, n_worker, is_train, collate_fn in zip( + datasets, samplers, batch_size, num_workers, is_trains, collate_fns + ): + if is_train: + shuffle = sampler is None + drop_last = True + else: + shuffle = False + drop_last = True + loader = DataLoader( + dataset, + batch_size=bs, + num_workers=n_worker, + pin_memory=False, + sampler=sampler, + shuffle=shuffle, + collate_fn=collate_fn, + drop_last=drop_last, + persistent_workers=True if n_worker > 0 else False, + ) + loaders.append(loader) + return loaders \ No newline at end of file diff --git a/datasets/nextqa_dataset.py b/datasets/nextqa_dataset.py new file mode 100644 index 0000000..50d5e0a --- /dev/null +++ b/datasets/nextqa_dataset.py @@ -0,0 +1,86 @@ +import os +import pandas as pd +# import h5py +import json +import numpy as np +import torch +from torch.utils.data import Dataset +from .video_utils import read_frames_decord + + +def load_file(file_name): + annos = None + if os.path.splitext(file_name)[-1] == '.csv': + return pd.read_csv(file_name) + with open(file_name, 'r') as fp: + if os.path.splitext(file_name)[1]== '.txt': + annos = fp.readlines() + annos = [line.rstrip() for line in annos] + if os.path.splitext(file_name)[1] == '.json': + annos = json.load(fp) + return annos + + +class NextQADataset(Dataset): + def __init__(self, config, medium, vis_processor, text_processor, split): + + super().__init__() + self.config = config + self.medium = medium + self.vis_processor = vis_processor + self.text_processor = text_processor + self.split = split + + self.batch_size = config['batch_size_test_{}'.format(medium)] if split == 'test' else config['batch_size_{}'.format(medium)] + self.root_vis = config['root_raw_vis_{}_{}'.format(medium, split)] + with open(config['vid_mapping_nextqa'], 'r') as f: + self.video_mapping = json.load(f) + + self.sample_list = load_file(self.config['anno_nextqa_{}'.format(split)]) + + if split == 'test': + self.sample_list = self.sample_list[config['start_idx_gen']: config['end_idx_gen']] + self.captions = load_file(self.config['next_qa_captions_{}'.format(split)]) + else: + self.captions = None + + num_samples = config['num_samples_{}'.format(self.medium)] + if num_samples > 0: + self.sample_list = self.sample_list[:num_samples] + + def __len__(self): + return len(self.sample_list) + + + def load_vid(self, vid_id): + vid_dir_path = os.path.join(self.root_vis, self.video_mapping[vid_id] + '.mp4') + + frames, _, _ = read_frames_decord(vid_dir_path, self.config.num_frames) + frames = [self.vis_processor(f).unsqueeze(0) for f in frames] + + vis = torch.cat(frames, dim=0) + return vis + + def __getitem__(self, idx): + if self.split == 'test': + idx += self.config['start_idx_gen'] + + cur_sample = self.sample_list.loc[idx] + video_id, ques, ans, qid = str(cur_sample['video']), str(cur_sample['question']),\ + str(cur_sample['answer']), str(cur_sample['qid']) + + history = self.text_processor(ques) + answer = self.text_processor(ans) + if self.split == 'test': + caption = self.text_processor(self.captions[video_id]) + else: + caption = self.text_processor('please answer the following question based on the video') + vis = self.load_vid(video_id) + + return vis, caption, history, answer, video_id, qid + +def load_nextqa_dataset(config, vis_processor, text_processor, split): + # data_file = config['anno_avsd_{}'.format(split)] + # dataset_list = get_dataset(config, split, tokenizer_enc_dec) + dataset = NextQADataset(config, 'nextqa', vis_processor, text_processor, split) + return dataset diff --git a/datasets/pretraining.py b/datasets/pretraining.py new file mode 100644 index 0000000..13d6876 --- /dev/null +++ b/datasets/pretraining.py @@ -0,0 +1,156 @@ +from torch.utils.data import Dataset +import pickle +import os + +import torch +from PIL import Image +import numpy as np +from torchvision import transforms +import random + +from .utils import pre_text, type_transform_helper, load_anno, open_img + +class CapDataset(Dataset): + def __init__(self, config, medium, vis_processor, text_processor, split): + super(CapDataset, self).__init__() + self.config = config + self.batch_size = config['batch_size_{}'.format(medium)] + self.medium = medium # "webvid / cc3m / msrvtt" + self.vis_processor = vis_processor + self.text_processor = text_processor + self.split = split # train / val / test + + self.root_vis = config['root_raw_vis_{}_{}'.format(medium, split)] + + # get the mapping between caption and image/video + mapping_path = config.get('mapping_path_{}_{}'.format(medium, split), None) + with open(mapping_path, 'rb') as f: + self.mapping = pickle.load(f) + + # These are the main ids of the dataset (typically one pro image/vid) + self.ids = list(self.mapping.keys()) + num_samples = config['num_samples_{}'.format(self.medium)] + if num_samples > 0: + self.ids = self.ids[:num_samples] + + def __len__(self): + return len(self.ids) + + def __getitem__(self, index): + item = self.mapping[self.ids[index]] + # _id = self.ids[index] + ############################# Textal features ############################# + caption = item['caption'] + # caption_ = pre_text(caption) + caption = self.text_processor(caption) + # add [CLS] token + caption = '[CLS] ' + caption + + if self.medium == 'cc3m': + pth = os.path.join(self.root_vis, item['file']) + vis = open_img(pth) + vis = self.vis_processor(vis).unsqueeze(0) + else: + pth = os.path.join(self.root_vis, item['file']) + f_names = os.listdir(pth) + f_names.sort(key=lambda f_n: int(f_n.split('.')[0])) + pth = [os.path.join(pth, f_name) for f_name in f_names] + vis = [Image.open(p).convert('RGB') for p in pth] + vis = [self.vis_processor(v).unsqueeze(0) for v in vis] + vis = torch.cat(vis, dim=0) + + # Get negative vis + neg_index = random.randint(0, len(self) - 1) + while neg_index == index: + neg_index = random.randint(0, len(self) - 1) + + neg_item = self.mapping[self.ids[neg_index]] + + if self.medium == 'cc3m': + neg_pth = os.path.join(self.root_vis, neg_item['file']) + neg_vis = open_img(neg_pth) + neg_vis = self.vis_processor(neg_vis).unsqueeze(0) + else: + neg_pth = os.path.join(self.root_vis, neg_item['file']) + neg_f_names = os.listdir(neg_pth) + neg_f_names.sort(key=lambda f_n: int(f_n.split('.')[0])) + neg_pth = [os.path.join(neg_pth, neg_f_name) for neg_f_name in neg_f_names] + neg_vis = [Image.open(p).convert('RGB') for p in neg_pth] + neg_vis = [self.vis_processor(v).unsqueeze(0) for v in neg_vis] + neg_vis = torch.cat(neg_vis, dim=0) + + # return caption, vis + return vis, caption, neg_vis + + +class VideoTextRetDataset(Dataset): + def __init__(self, config, vis_processor, text_processor, medium, split): + super(VideoTextRetDataset, self).__init__() + + self.config = config + self.batch_size = config['batch_size_{}'.format(medium)] + self.medium = medium # "webvid / cc3m / msrvtt" + self.vis_processor = vis_processor + self.text_processor = text_processor + self.split = split # train / val / test + + + self.root_vis = config['root_raw_vis_{}_{}'.format(medium, split)] + + anno_path = config['annotation_{}_{}'.format(medium, split)] + self.raw_anno_list = load_anno(anno_path) + self.text = [] + self.vis = [] + self.txt2vis = {} + self.vis2txt = {} + self.build_data() + self.anno_list = [dict(vis=v) for v in self.vis] + # print('bla') + + def __len__(self): + return len(self.anno_list) + + def __getitem__(self, index): + pth = self.anno_list[index]['vis'] + f_names = os.listdir(pth) + f_names.sort(key=lambda f_n: int(f_n.split('.')[0])) + pth = [os.path.join(pth, f_name) for f_name in f_names] + vis = [Image.open(p).convert('RGB') for p in pth] + vis = [self.vis_processor(v) for v in vis] + # vis = [transforms.PILToTensor()(v).unsqueeze(0) for v in vis] + vis = torch.cat(vis, dim=0) + # vis = self.trans(vis) + + return vis, index + + def build_data(self): + """each image may have multiple ground_truth text, e.g., COCO and Flickr30K""" + txt_id = 0 + for vis_id, ann in enumerate(self.raw_anno_list): + self.vis.append(ann["vis"]) + self.vis2txt[vis_id] = [] + _captions = ann["caption"] \ + if isinstance(ann["caption"], list) else [ann["caption"], ] + for i, caption in enumerate(_captions): + # self.text.append(pre_text(caption)) + self.text.append(self.text_processor(caption)) + self.vis2txt[vis_id].append(txt_id) + self.txt2vis[txt_id] = vis_id + txt_id += 1 + + +def load_datasets(config, vis_processor, text_processor, split): + if config['stage'] == 'stage_1': + if split != 'test': + cc3m_dataset = CapDataset(config, 'cc3m', vis_processor, text_processor, split) + webvid_dataset = CapDataset(config, 'webvid', vis_processor, text_processor, split) + datasets = { + 'cc3m': cc3m_dataset, + 'webvid': webvid_dataset + } + else: # Test with msrvtt_1k --> video retieval + msrvtt_dataset = VideoTextRetDataset(config, vis_processor, text_processor, 'msrvtt', split) + datasets = { + 'msrvtt': msrvtt_dataset + } + return datasets \ No newline at end of file diff --git a/datasets/utils.py b/datasets/utils.py new file mode 100644 index 0000000..0ef9df2 --- /dev/null +++ b/datasets/utils.py @@ -0,0 +1,83 @@ +import os +import re +import json +from tqdm import trange +from utils.dist import is_main_process +from torch.utils.data import Dataset, ConcatDataset +from PIL import Image +import numpy as np + +def open_img(img_pth): + try: + img = Image.open(img_pth).convert('RGB') + return img + except: + img = np.random.randint(0, high=256, size=(224,224, 3)) + img = Image.fromarray(img, 'RGB') + return img + + +def pre_text(text, max_l=None): + text = re.sub(r"(['!?\"()*#:;~])", '', text.lower()) + text = text.replace('-', ' ').replace('/', ' ').replace('', 'person') + + text = re.sub(r"\s{2,}", ' ', text) + text = text.rstrip('\n').strip(' ') + + if max_l: # truncate + words = text.split(' ') + if len(words) > max_l: + text = ' '.join(words[:max_l]) + return text + + +def get_datasets_media(dataloaders): + media = {} + for dataloader in dataloaders: + if isinstance(dataloader.dataset, ConcatDataset): + media[dataloader.dataset.datasets[0].medium] = dataloader + else: + media[dataloader.dataset.medium] = dataloader + + # media = [dataloader.dataset.medium for dataloader in dataloaders] + return media + +def type_transform_helper(x): + return x.float().div(255.0) + +def load_anno(ann_file_list): + """[summary] + + Args: + ann_file_list (List[List[str, str]] or List[str, str]): + the latter will be automatically converted to the former. + Each sublist contains [anno_path, image_root], (or [anno_path, video_root, 'video']) + which specifies the data type, video or image + + Returns: + List(dict): each dict is { + image: str or List[str], # image_path, + caption: str or List[str] # caption text string + } + """ + if isinstance(ann_file_list[0], str): + ann_file_list = [ann_file_list] + + ann = [] + for d in ann_file_list: + data_root = d[1] + fp = d[0] + is_video = len(d) == 3 and d[2] == "video" + cur_ann = json.load(open(fp, "r")) + iterator = trange(len(cur_ann), desc=f"Loading {fp}") \ + if is_main_process() else range(len(cur_ann)) + for idx in iterator: + key = "video" if is_video else "image" + video_id = cur_ann[idx][key][5:].split('.')[0] + # unified to have the same key for data path + # if isinstance(cur_ann[idx][key], str): + cur_ann[idx]["vis"] = os.path.join(data_root, video_id) + # else: # list + # cur_ann[idx]["vis"] = [os.path.join(data_root, e) for e in cur_ann[idx][key]] + ann += cur_ann + return ann \ No newline at end of file diff --git a/datasets/video_utils.py b/datasets/video_utils.py new file mode 100644 index 0000000..8e5be71 --- /dev/null +++ b/datasets/video_utils.py @@ -0,0 +1,97 @@ +""" +Modified from https://github.com/m-bain/frozen-in-time/blob/22a91d78405ec6032fdf521ae1ff5573358e632f/base/base_dataset.py +""" +import random +import decord +from PIL import Image +import numpy as np +import math +decord.bridge.set_bridge("torch") + + +def pts_to_secs(pts: int, time_base: float, start_pts: int) -> float: + """ + Converts a present time with the given time base and start_pts offset to seconds. + + Returns: + time_in_seconds (float): The corresponding time in seconds. + + https://github.com/facebookresearch/pytorchvideo/blob/main/pytorchvideo/data/utils.py#L54-L64 + """ + if pts == math.inf: + return math.inf + + return int(pts - start_pts) * time_base + + +def get_pyav_video_duration(video_reader): + video_stream = video_reader.streams.video[0] + video_duration = pts_to_secs( + video_stream.duration, + video_stream.time_base, + video_stream.start_time + ) + return float(video_duration) + + +def get_frame_indices_by_fps(): + pass + + +def get_frame_indices(num_frames, vlen, sample='rand', fix_start=None, input_fps=1, max_num_frames=-1): + if sample in ["rand", "middle"]: + acc_samples = min(num_frames, vlen) + # split the video into `acc_samples` intervals, and sample from each interval. + intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int) + ranges = [] + for idx, interv in enumerate(intervals[:-1]): + ranges.append((interv, intervals[idx + 1] - 1)) + if sample == 'rand': + try: + frame_indices = [random.choice(range(x[0], x[1])) for x in ranges] + except: + frame_indices = np.random.permutation(vlen)[:acc_samples] + frame_indices.sort() + frame_indices = list(frame_indices) + elif fix_start is not None: + frame_indices = [x[0] + fix_start for x in ranges] + elif sample == 'middle': + frame_indices = [(x[0] + x[1]) // 2 for x in ranges] + else: + raise NotImplementedError + + if len(frame_indices) < num_frames: # padded with last frame + padded_frame_indices = [frame_indices[-1]] * num_frames + padded_frame_indices[:len(frame_indices)] = frame_indices + frame_indices = padded_frame_indices + elif "fps" in sample: # fps0.5, sequentially sample frames at 0.5 fps + output_fps = float(sample[3:]) + duration = float(vlen) / input_fps + delta = 1 / output_fps # gap between frames, this is also the clip length each frame represents + frame_seconds = np.arange(0 + delta / 2, duration + delta / 2, delta) + frame_indices = np.around(frame_seconds * input_fps).astype(int) + frame_indices = [e for e in frame_indices if e < vlen] + if max_num_frames > 0 and len(frame_indices) > max_num_frames: + frame_indices = frame_indices[:max_num_frames] + # frame_indices = np.linspace(0 + delta / 2, duration + delta / 2, endpoint=False, num=max_num_frames) + else: + raise ValueError + return frame_indices + + +def read_frames_decord(video_path, num_frames, sample='rand', fix_start=None, max_num_frames=-1): + video_reader = decord.VideoReader(video_path, num_threads=1) + vlen = len(video_reader) + fps = video_reader.get_avg_fps() + duration = vlen / float(fps) + frame_indices = get_frame_indices( + num_frames, vlen, sample=sample, fix_start=fix_start, + input_fps=fps, max_num_frames=max_num_frames + ) + frames = video_reader.get_batch(frame_indices) # (T, H, W, C), torch.uint8 + frames = frames.permute(0, 3, 1, 2) # (T, C, H, W), torch.uint8 + frames = frames.split(1, dim=0) + + frames = [Image.fromarray(f.squeeze().numpy(), mode='RGB') for f in frames] + # frames = frames.numpy() # convert to numpy + return frames, frame_indices, duration diff --git a/datasets/visdial_dataset.py b/datasets/visdial_dataset.py new file mode 100644 index 0000000..1382270 --- /dev/null +++ b/datasets/visdial_dataset.py @@ -0,0 +1,183 @@ +# coding: utf-8 +# author: noctli +import json +import os +import pickle +import logging +from tqdm import tqdm +import numpy as np +import torch +import torch.utils.data +from PIL import Image +from torch.utils.data import Dataset +from itertools import chain +from torchvision import transforms +from .utils import type_transform_helper +from .utils import open_img + +def tokenize(text, tokenizer, return_tensor=False): + tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text)) + if return_tensor: + return torch.tensor(tokenized_text).long() + return tokenized_text + + +def get_dataset(config, split): + + dialog_pth = config['anno_visdial_{}'.format(split)] + dialog_data = json.load(open(dialog_pth, 'r'))['data'] + + all_answers = dialog_data['answers'] + all_questions = dialog_data['questions'] + dialog_list = [] + n_history = config['num_hist_turns_visdial'] + vid_set = set() + undisclosed_only = False + + pbar = tqdm(dialog_data['dialogs']) + pbar.set_description('[INFO] Loading VisDial - {}'.format(split)) + for dialog in pbar: + caption = dialog['caption'] + ' .' + questions = [all_questions[d['question']] + ' ?' for d in dialog['dialog']] + answers = [all_answers[d['answer']] + ' .' for d in dialog['dialog']] + + # answer_opts = [[all_answers[key] for key in d['answer_options']] for d in dialog['dialog']] + # if 'test' in config['anno_visdial_{}'.format(split)]: + # gt_indices = [-1 for _ in range(len(questions))] + # else: + # gt_indices = [d['gt_index'] for d in dialog['dialog']] + + vid = dialog['image_id'] + vid_set.add(vid) + if undisclosed_only: + it = range(len(questions) - 1, len(questions)) + else: + it = range(len(questions)) + + qalist=[] + history = [] + if undisclosed_only: + for n in range(len(questions)-1): + qalist.append(questions[n]) + qalist.append(answers[n]) + history=qalist[max(-len(qalist),-n_history*2):] + + for n in it: + if undisclosed_only: + assert dialog['dialog'][n]['answer'] == '__UNDISCLOSED__' + question = questions[n] + answer = answers[n] + # answer_opt = answer_opts[n] + # gt_index = gt_indices[n] + history.append(question) + # if n_history == 0: + # item = {'vid': vid, 'history': [question], 'answer': answer, 'caption': caption, 'round': n+1, 'answer_opts': answer_opt, 'gt_index': gt_index} + # else: + # item = {'vid': vid, 'history': history, 'answer': answer, 'caption': caption, 'round': n+1, 'answer_opts': answer_opt, 'gt_index': gt_index} + + if n_history == 0: + item = {'vid': vid, 'history': [question], 'answer': answer, 'caption': caption, 'round': n+1} + else: + item = {'vid': vid, 'history': history, 'answer': answer, 'caption': caption, 'round': n+1} + + + + dialog_list.append(item) + qalist.append(question) + qalist.append(answer) + history=qalist[max(-len(qalist),-n_history*2):] + + return dialog_list + + +class VisDial(Dataset): + def __init__(self, config, medium, vis_processor, text_processor, split + # tokenizer, features=None, drop_rate=0.0, train=True + ): + self.config = config + self.medium = medium + self.split = split + self.vis_processor = vis_processor + self.text_processor = text_processor + self.batch_size = config['batch_size_test_{}'.format(medium)] if split == 'test' else config['batch_size_{}'.format(medium)] + self.root_vis = config['root_raw_vis_{}_{}'.format(medium, split)] + + self.dialogs = get_dataset(config, split) + + if split == 'test': + self.dialogs = self.dialogs[config['start_idx_gen']: config['end_idx_gen']] + + num_samples = config['num_samples_{}'.format(self.medium)] + if num_samples > 0: + self.dialogs = self.dialogs[:num_samples] + + def __len__(self): + return len(self.dialogs) + + def load_img(self, vid_id): + file_pth = os.path.join(self.root_vis, f'{vid_id}.jpg') + vis = open_img(file_pth) + vis = self.vis_processor(vis).unsqueeze(0) + return vis + + def __getitem__(self, index): + dialog = self.dialogs[index] + + vid_id = dialog['vid'] + caption = dialog['caption'] + history = dialog['history'] + answer = dialog['answer'] + d_round = dialog['round'] + + caption = self.text_processor(caption) + history = [self.text_processor(h) for h in history] + answer = self.text_processor(answer, remove_period=True) + + + # if self.split == 'test': + # answer_opts = dialog['answer_opts'] + # answer_opts = [self.text_processor(a) for a in answer_opts] + + # gt_index = dialog['gt_index'] + # dialog_round = dialog['round'] + + # dense_key = str(vid_id) + '_' + str(dialog_round) + # gt_relevance = self.dense_annos.get(dense_key, -1) + # # eval_data = (answer_opts, gt_index, gt_relevance) + + + if self.config.embed_from_llm: + if self.config.llm_family in ['llama', 'mistral']: + cls_tok = '' + sep_tok = ' ' + bos_tok = '' + eos_tok = '' + else: + cls_tok = '' + sep_tok = '' + bos_tok = '' + eos_tok = '' + else: + cls_tok = '[CLS]' + sep_tok = '[SEP]' + bos_tok = '[SEP]' + eos_tok = '[SEP]' + + caption = cls_tok + caption + sep_tok + history = sep_tok.join(history) + history = history + sep_tok + + # load the video frames + vis = self.load_img(vid_id) + + # if self.split == 'test': + # return vis, caption, history, answer, vid_id, answer_opts, gt_relevance, gt_index + + # else: + return vis, caption, history, answer, vid_id, d_round + + + +def load_visdial_dataset(config, vis_processor, text_processor, split): + dataset = VisDial(config, 'visdial', vis_processor, text_processor, split) + return dataset diff --git a/emergency/item.pkl b/emergency/item.pkl new file mode 100644 index 0000000000000000000000000000000000000000..9289657287c579486557c0e18bf217ec35bc90be GIT binary patch literal 599 zcmZ`$&x_PB6dq;S#fxXb%jao43|&3!q9;)hEDr0UE9ya(H%&Upw#_R|x z|N4^3da|S>tqs@DPtVU9_mN)IZySGlc76UV z&d179%lnwnCl&Rcjb2bM*eTO{Cx2~&L_tDz{3>33+~4!Mv0rz2OP*b+F;UT zHmC+oL4&Zf0bI-BhPsUayB_QijD{M<&TsN@N8Y2gjWo2a+)#iIV_g?0!`frdG8wxx zuH`(g9$gXV$E9GcYjQ`%1(1VPnxy*jN`yjIBD>^X!!E>Mn3acO{h!jSsOMvH{-B5W z^})T>s$6$M>_dUqVy#@?YV>3kJa>A>YDx^#de9i1(nID`+Bc#_`r$G zeAkn+>ql3KlYP3i18Pb5Z%o~?i;d1!;D)B0FvtK`DX0T^5h59}dV&u|CM>S+LaPPT o8>Ed5YzlOFmYRgckg&>&lxxxUPfN 0: + # convert to numpy array for easy calculation. + __rank_list = torch.tensor(self._rank_list).float() + metrics = { + "r@1": torch.mean((__rank_list <= 1).float()).item(), + "r@5": torch.mean((__rank_list <= 5).float()).item(), + "r@10": torch.mean((__rank_list <= 10).float()).item(), + "mean": torch.mean(__rank_list).item(), + "mrr": torch.mean(__rank_list.reciprocal()).item(), + } + else: + metrics = {} + + if reset: + self.reset() + return metrics + + def reset(self): + self._rank_list = [] + + +class NDCG(object): + def __init__(self): + self._ndcg_numerator = 0.0 + self._ndcg_denominator = 0.0 + + def observe( + self, predicted_scores: torch.Tensor, target_relevance: torch.Tensor + ): + """ + Observe model output scores and target ground truth relevance and + accumulate NDCG metric. + + Parameters + ---------- + predicted_scores: torch.Tensor + A tensor of shape (batch_size, num_options), because dense + annotations are available for 1 randomly picked round out of 10. + target_relevance: torch.Tensor + A tensor of shape same as predicted scores, indicating ground truth + relevance of each answer option for a particular round. + """ + predicted_scores = predicted_scores.detach() + + # shape: (batch_size, 1, num_options) + predicted_scores = predicted_scores.unsqueeze(1) + predicted_ranks = scores_to_ranks(predicted_scores) + + # shape: (batch_size, num_options) + predicted_ranks = predicted_ranks.squeeze(1) + batch_size, num_options = predicted_ranks.size() + + k = torch.sum(target_relevance != 0, dim=-1) + + # shape: (batch_size, num_options) + _, rankings = torch.sort(predicted_ranks, dim=-1) + # Sort relevance in descending order so highest relevance gets top rnk. + _, best_rankings = torch.sort( + target_relevance, dim=-1, descending=True + ) + + # shape: (batch_size, ) + batch_ndcg = [] + for batch_index in range(batch_size): + + num_relevant = k[batch_index] + dcg = self._dcg( + rankings[batch_index][:num_relevant], + target_relevance[batch_index], + ) + best_dcg = self._dcg( + best_rankings[batch_index][:num_relevant], + target_relevance[batch_index], + ) + batch_ndcg.append(dcg / best_dcg) + + self._ndcg_denominator += batch_size + self._ndcg_numerator += sum(batch_ndcg) + + def _dcg(self, rankings: torch.Tensor, relevance: torch.Tensor): + sorted_relevance = relevance[rankings].cpu().float() + discounts = torch.log2(torch.arange(len(rankings)).float() + 2) + return torch.sum(sorted_relevance / discounts, dim=-1) + + def retrieve(self, reset: bool = True): + if self._ndcg_denominator > 0: + metrics = { + "ndcg": float(self._ndcg_numerator / self._ndcg_denominator) + } + else: + metrics = {} + + if reset: + self.reset() + return metrics + + def reset(self): + self._ndcg_numerator = 0.0 + self._ndcg_denominator = 0.0 + + +annos_path = '/pfss/mlde/workspaces/mlde_wsp_Rohrbach/data/annotations/visdial_v1.0/visdial_1.0_val.json' +with open(annos_path, 'r') as f: + data = json.load(f)['data'] + +dense_annos_path = '/pfss/mlde/workspaces/mlde_wsp_Rohrbach/data/annotations/visdial_v1.0/visdial_1.0_val_dense_annotations.json' +with open(dense_annos_path, 'r') as f: + dense_data = json.load(f) + +dense_data = {str(d['image_id']) + '_' + str(d['round_id']): d['gt_relevance'] for d in dense_data} + +results_path = '/pfss/mlde/workspaces/mlde_wsp_Rohrbach/users/ma35vahy/V2Dial_new_v2/output/visdial_before_supplementary/zeroshot_visdial_after_avsd_4_frames_3_rounds_ft_fp16_googleflant5large_results_dstc10_beam_depth_4_lenPen_0.3.json' +with open(results_path, 'r') as f: + results = json.load(f) + +all_answers = data['answers'] +all_questions = data['questions'] + + +dialogs = data['dialogs'] + +dialogs_dict = {} + +for dialog in dialogs: + image_id = dialog['image_id'] + for i, turn in enumerate(dialog['dialog']): + answer_opts = [all_answers[a] for a in turn['answer_options']] + dialogs_dict[str(image_id) + '_' + str(i+1)] = { + 'answer_opts': answer_opts, + 'gt_index': turn['gt_index'] + } + # print('bla') + +sparse_metrics = SparseGTMetrics() +ndcg = NDCG() + +# 1. Load a pretrained CrossEncoder model +model = CrossEncoder("cross-encoder/stsb-roberta-large") + +for i, (res_key, res) in enumerate(results.items()): + print('[INFO] {} / {}'.format(i+1, len(results))) + answer_opts = dialogs_dict[res_key]['answer_opts'] + gt_index = torch.tensor(dialogs_dict[res_key]['gt_index']) + gt_answer = answer_opts[gt_index] + sentence_combinations = [[res, opt] for opt in answer_opts] + scores = model.predict(sentence_combinations) + scores = torch.from_numpy(scores).unsqueeze(0).unsqueeze(0) + # scores = torch.tensor([ratio(res, answer_opt) for answer_opt in answer_opts]).unsqueeze(0).unsqueeze(0) + # scores = model.rank(res, answer_opts) + ranked_idx = scores_to_ranks(scores).squeeze().tolist() + new_order = np.argsort(ranked_idx) + # ranked_answers = [answer_opts[idx] for idx in new_order] + best_pick = answer_opts[new_order[0]] + sparse_metrics.observe(scores, gt_index) + if res_key in dense_data: + gt_relevance = torch.tensor(dense_data[res_key]).unsqueeze(0) + ndcg.observe(scores.squeeze(0), gt_relevance) + + # print('bla') +print(sparse_metrics.retrieve()) +print(ndcg.retrieve()) + +# We want to compute the similarity between the query sentence... +# query = "A man is eating pasta." + +# # ... and all sentences in the corpus +# corpus = [ +# "A man is eating food.", +# "A man is eating a piece of bread.", +# "The girl is carrying a baby.", +# "A man is riding a horse.", +# "A woman is playing violin.", +# "Two men pushed carts through the woods.", +# "A man is riding a white horse on an enclosed ground.", +# "A monkey is playing drums.", +# "A cheetah is running behind its prey.", +# ] + +# # 2. We rank all sentences in the corpus for the query +# ranks = model.rank(query, corpus) + +# # Print the scores +# print("Query: ", query) +# for rank in ranks: +# print(f"{rank['score']:.2f}\t{corpus[rank['corpus_id']]}") +# """ +# Query: A man is eating pasta. +# 0.67 A man is eating food. +# 0.34 A man is eating a piece of bread. +# 0.08 A man is riding a horse. +# 0.07 A man is riding a white horse on an enclosed ground. +# 0.01 The girl is carrying a baby. +# 0.01 Two men pushed carts through the woods. +# 0.01 A monkey is playing drums. +# 0.01 A woman is playing violin. +# 0.01 A cheetah is running behind its prey. +# """ + +# # 3. Alternatively, you can also manually compute the score between two sentences +# import numpy as np + +# sentence_combinations = [[query, sentence] for sentence in corpus] +# scores = model.predict(sentence_combinations) + +# # Sort the scores in decreasing order to get the corpus indices +# ranked_indices = np.argsort(scores)[::-1] +# print("Scores:", scores) +# print("Indices:", ranked_indices) +# """ +# Scores: [0.6732372, 0.34102544, 0.00542465, 0.07569341, 0.00525378, 0.00536814, 0.06676237, 0.00534825, 0.00516717] +# Indices: [0 1 3 6 2 5 7 4 8] +# """ \ No newline at end of file diff --git a/generate_parallel_avsd.sh b/generate_parallel_avsd.sh new file mode 100755 index 0000000..2c2b0be --- /dev/null +++ b/generate_parallel_avsd.sh @@ -0,0 +1,71 @@ +# export MODEL=$1 +# export TAG=$2 +# export MODE=$3 +# export EVAL_DIR=$4 +# export MEDIUM=$5 +# export DSTC=$6 + +export MODEL='v2dial/stage_3' +export TAG='finetuned_no_experts_avsd' +export MODE='generate' +export EVAL_DIR='/pfss/mlde/workspaces/mlde_wsp_Rohrbach/users/ma35vahy/V2Dial_new_v2/logs/stage_3/v2dial-google_flan-t5-large-finetune_without_experts_avsd' +export DSTC=7 + +# >>> conda initialize >>> +# !! Contents within this block are managed by 'conda init' !! +__conda_setup="$('/opt/anaconda3/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" +if [ $? -eq 0 ]; then + eval "$__conda_setup" +else + if [ -f "/opt/anaconda3/etc/profile.d/conda.sh" ]; then + . "/opt/anaconda3/etc/profile.d/conda.sh" + else + export PATH="/opt/anaconda3/bin:$PATH" + fi +fi +unset __conda_setup +# <<< conda initialize <<< + +conda activate v2dial + +if [ $DSTC -eq 10 ]; then + export CUDA_VISIBLE_DEVICES=0; python main_stage_3.py --start_idx_gen 0000 --end_idx_gen 0112 --gen_subset_num 01 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=1; python main_stage_3.py --start_idx_gen 0112 --end_idx_gen 0224 --gen_subset_num 02 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=2; python main_stage_3.py --start_idx_gen 0224 --end_idx_gen 0336 --gen_subset_num 03 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=3; python main_stage_3.py --start_idx_gen 0336 --end_idx_gen 0448 --gen_subset_num 04 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=4; python main_stage_3.py --start_idx_gen 0448 --end_idx_gen 0560 --gen_subset_num 05 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=5; python main_stage_3.py --start_idx_gen 0560 --end_idx_gen 0672 --gen_subset_num 06 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=6; python main_stage_3.py --start_idx_gen 0672 --end_idx_gen 0784 --gen_subset_num 07 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=7; python main_stage_3.py --start_idx_gen 0784 --end_idx_gen 0896 --gen_subset_num 08 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=0; python main_stage_3.py --start_idx_gen 0896 --end_idx_gen 1008 --gen_subset_num 09 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=1; python main_stage_3.py --start_idx_gen 1008 --end_idx_gen 1120 --gen_subset_num 10 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=2; python main_stage_3.py --start_idx_gen 1120 --end_idx_gen 1232 --gen_subset_num 11 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=3; python main_stage_3.py --start_idx_gen 1232 --end_idx_gen 1344 --gen_subset_num 12 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=4; python main_stage_3.py --start_idx_gen 1344 --end_idx_gen 1456 --gen_subset_num 13 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=5; python main_stage_3.py --start_idx_gen 1456 --end_idx_gen 1568 --gen_subset_num 14 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=6; python main_stage_3.py --start_idx_gen 1568 --end_idx_gen 1680 --gen_subset_num 15 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=7; python main_stage_3.py --start_idx_gen 1680 --end_idx_gen 1804 --gen_subset_num 16 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +else + export CUDA_VISIBLE_DEVICES=0; python main_stage_3.py --start_idx_gen 0000 --end_idx_gen 0107 --gen_subset_num 01 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=1; python main_stage_3.py --start_idx_gen 0107 --end_idx_gen 0214 --gen_subset_num 02 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=2; python main_stage_3.py --start_idx_gen 0214 --end_idx_gen 0321 --gen_subset_num 03 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=3; python main_stage_3.py --start_idx_gen 0321 --end_idx_gen 0428 --gen_subset_num 04 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=4; python main_stage_3.py --start_idx_gen 0428 --end_idx_gen 0535 --gen_subset_num 05 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=5; python main_stage_3.py --start_idx_gen 0535 --end_idx_gen 0642 --gen_subset_num 06 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=6; python main_stage_3.py --start_idx_gen 0642 --end_idx_gen 0749 --gen_subset_num 07 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=7; python main_stage_3.py --start_idx_gen 0749 --end_idx_gen 0856 --gen_subset_num 08 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=0; python main_stage_3.py --start_idx_gen 0856 --end_idx_gen 0963 --gen_subset_num 09 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=1; python main_stage_3.py --start_idx_gen 0963 --end_idx_gen 1070 --gen_subset_num 10 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=2; python main_stage_3.py --start_idx_gen 1070 --end_idx_gen 1177 --gen_subset_num 11 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=3; python main_stage_3.py --start_idx_gen 1177 --end_idx_gen 1284 --gen_subset_num 12 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=4; python main_stage_3.py --start_idx_gen 1284 --end_idx_gen 1391 --gen_subset_num 13 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=5; python main_stage_3.py --start_idx_gen 1391 --end_idx_gen 1498 --gen_subset_num 14 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=6; python main_stage_3.py --start_idx_gen 1498 --end_idx_gen 1605 --gen_subset_num 15 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + export CUDA_VISIBLE_DEVICES=7; python main_stage_3.py --start_idx_gen 1605 --end_idx_gen 1710 --gen_subset_num 16 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +fi + +# export CUDA_VISIBLE_DEVICES=6; python main_stage_3.py --start_idx_gen 00 --end_idx_gen 10 --gen_subset_num 15 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +# export CUDA_VISIBLE_DEVICES=7; python main_stage_3.py --start_idx_gen 10 --end_idx_gen 20 --gen_subset_num 16 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + +wait +python merge_pred_avsd.py --dstc $DSTC diff --git a/generate_parallel_nextqa.sh b/generate_parallel_nextqa.sh new file mode 100755 index 0000000..d90e318 --- /dev/null +++ b/generate_parallel_nextqa.sh @@ -0,0 +1,51 @@ +# export MODEL=$1 +# export TAG=$2 +# export MODE=$3 +# export EVAL_DIR=$4 + +export MODEL='v2dial/stage_3' +export TAG='nextqa_with_test_captions' +export MODE='generate' +export EVAL_DIR='/pfss/mlde/workspaces/mlde_wsp_Rohrbach/users/ma35vahy/V2Dial_new_v2/logs/stage_3/v2dial-google_flan-t5-large-from_stage1_only_nextqa_after_avsd_4_frames_3_rounds_ft_fp16' + +# >>> conda initialize >>> +# !! Contents within this block are managed by 'conda init' !! +__conda_setup="$('/opt/anaconda3/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" +if [ $? -eq 0 ]; then + eval "$__conda_setup" +else + if [ -f "/opt/anaconda3/etc/profile.d/conda.sh" ]; then + . "/opt/anaconda3/etc/profile.d/conda.sh" + else + export PATH="/opt/anaconda3/bin:$PATH" + fi +fi +unset __conda_setup +# <<< conda initialize <<< + +conda activate v2dial + +# export CUDA_VISIBLE_DEVICES=0; python main_stage_3.py --start_idx_gen 0000 --end_idx_gen 10 --gen_subset_num 01 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +# export CUDA_VISIBLE_DEVICES=0; python main_stage_3.py --start_idx_gen 10 --end_idx_gen 20 --gen_subset_num 02 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + + +export CUDA_VISIBLE_DEVICES=0; python main_stage_3.py --start_idx_gen 0000 --end_idx_gen 0573 --gen_subset_num 01 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=0; python main_stage_3.py --start_idx_gen 0573 --end_idx_gen 1146 --gen_subset_num 02 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=1; python main_stage_3.py --start_idx_gen 1146 --end_idx_gen 1719 --gen_subset_num 03 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=1; python main_stage_3.py --start_idx_gen 1719 --end_idx_gen 2292 --gen_subset_num 04 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=2; python main_stage_3.py --start_idx_gen 2292 --end_idx_gen 2865 --gen_subset_num 05 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=2; python main_stage_3.py --start_idx_gen 2865 --end_idx_gen 3438 --gen_subset_num 06 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=3; python main_stage_3.py --start_idx_gen 3438 --end_idx_gen 4011 --gen_subset_num 07 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=3; python main_stage_3.py --start_idx_gen 4011 --end_idx_gen 4584 --gen_subset_num 08 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & +export CUDA_VISIBLE_DEVICES=4; python main_stage_3.py --start_idx_gen 4584 --end_idx_gen 5157 --gen_subset_num 09 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=4; python main_stage_3.py --start_idx_gen 5157 --end_idx_gen 5730 --gen_subset_num 10 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=5; python main_stage_3.py --start_idx_gen 5730 --end_idx_gen 6303 --gen_subset_num 11 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=5; python main_stage_3.py --start_idx_gen 6303 --end_idx_gen 6876 --gen_subset_num 12 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=6; python main_stage_3.py --start_idx_gen 6876 --end_idx_gen 7449 --gen_subset_num 13 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=6; python main_stage_3.py --start_idx_gen 7449 --end_idx_gen 8022 --gen_subset_num 14 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=7; python main_stage_3.py --start_idx_gen 8022 --end_idx_gen 8495 --gen_subset_num 15 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=7; python main_stage_3.py --start_idx_gen 8495 --end_idx_gen 9178 --gen_subset_num 16 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + +wait + +python merge_pred_nextqa.py \ No newline at end of file diff --git a/generate_parallel_visdial.sh b/generate_parallel_visdial.sh new file mode 100755 index 0000000..e18e070 --- /dev/null +++ b/generate_parallel_visdial.sh @@ -0,0 +1,67 @@ +# export MODEL=$1 +# export TAG=$2 +# export MODE=$3 +# export EVAL_DIR=$4 +# export MEDIUM=$5 +# export DSTC=$6 + +export MODEL='v2dial/stage_3' +export TAG='finetuned_visdial_from_scratch' +export MODE='generate' +export EVAL_DIR='/pfss/mlde/workspaces/mlde_wsp_Rohrbach/users/ma35vahy/V2Dial_new_v2/logs/stage_3/v2dial-google_flan-t5-large-finetuned_visdial_from_scratch/' + +# >>> conda initialize >>> +# !! Contents within this block are managed by 'conda init' !! +__conda_setup="$('/opt/anaconda3/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" +if [ $? -eq 0 ]; then + eval "$__conda_setup" +else + if [ -f "/opt/anaconda3/etc/profile.d/conda.sh" ]; then + . "/opt/anaconda3/etc/profile.d/conda.sh" + else + export PATH="/opt/anaconda3/bin:$PATH" + fi +fi +unset __conda_setup +# <<< conda initialize <<< + +conda activate v2dial +# export CUDA_VISIBLE_DEVICES=0; python main_stage_3.py --start_idx_gen 00000 --end_idx_gen 10 --gen_subset_num 01 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +# export CUDA_VISIBLE_DEVICES=1; python main_stage_3.py --start_idx_gen 00010 --end_idx_gen 20 --gen_subset_num 02 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + + +export CUDA_VISIBLE_DEVICES=0; python main_stage_3.py --start_idx_gen 00000 --end_idx_gen 00645 --gen_subset_num 01 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=1; python main_stage_3.py --start_idx_gen 00645 --end_idx_gen 01290 --gen_subset_num 02 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=2; python main_stage_3.py --start_idx_gen 01290 --end_idx_gen 01935 --gen_subset_num 03 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=3; python main_stage_3.py --start_idx_gen 01935 --end_idx_gen 02580 --gen_subset_num 04 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=4; python main_stage_3.py --start_idx_gen 02580 --end_idx_gen 03225 --gen_subset_num 05 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=5; python main_stage_3.py --start_idx_gen 03225 --end_idx_gen 03870 --gen_subset_num 06 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=6; python main_stage_3.py --start_idx_gen 03870 --end_idx_gen 04515 --gen_subset_num 07 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=7; python main_stage_3.py --start_idx_gen 04515 --end_idx_gen 05160 --gen_subset_num 08 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=0; python main_stage_3.py --start_idx_gen 05160 --end_idx_gen 05805 --gen_subset_num 09 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=1; python main_stage_3.py --start_idx_gen 05805 --end_idx_gen 06450 --gen_subset_num 10 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=2; python main_stage_3.py --start_idx_gen 06450 --end_idx_gen 07095 --gen_subset_num 11 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=3; python main_stage_3.py --start_idx_gen 07095 --end_idx_gen 07740 --gen_subset_num 12 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=4; python main_stage_3.py --start_idx_gen 07740 --end_idx_gen 08385 --gen_subset_num 13 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=5; python main_stage_3.py --start_idx_gen 08385 --end_idx_gen 09030 --gen_subset_num 14 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=6; python main_stage_3.py --start_idx_gen 09030 --end_idx_gen 09675 --gen_subset_num 15 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=7; python main_stage_3.py --start_idx_gen 09675 --end_idx_gen 10320 --gen_subset_num 16 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=0; python main_stage_3.py --start_idx_gen 10320 --end_idx_gen 10965 --gen_subset_num 17 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=1; python main_stage_3.py --start_idx_gen 10965 --end_idx_gen 11610 --gen_subset_num 18 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=2; python main_stage_3.py --start_idx_gen 11610 --end_idx_gen 12255 --gen_subset_num 19 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=3; python main_stage_3.py --start_idx_gen 12255 --end_idx_gen 12900 --gen_subset_num 20 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=4; python main_stage_3.py --start_idx_gen 12900 --end_idx_gen 13545 --gen_subset_num 21 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=5; python main_stage_3.py --start_idx_gen 13545 --end_idx_gen 14190 --gen_subset_num 22 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=6; python main_stage_3.py --start_idx_gen 14190 --end_idx_gen 14835 --gen_subset_num 23 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=7; python main_stage_3.py --start_idx_gen 14835 --end_idx_gen 15480 --gen_subset_num 24 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=0; python main_stage_3.py --start_idx_gen 15480 --end_idx_gen 16125 --gen_subset_num 25 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=1; python main_stage_3.py --start_idx_gen 16125 --end_idx_gen 16770 --gen_subset_num 26 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=2; python main_stage_3.py --start_idx_gen 16770 --end_idx_gen 17415 --gen_subset_num 27 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=3; python main_stage_3.py --start_idx_gen 17415 --end_idx_gen 18060 --gen_subset_num 28 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=4; python main_stage_3.py --start_idx_gen 18060 --end_idx_gen 18705 --gen_subset_num 29 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=5; python main_stage_3.py --start_idx_gen 18705 --end_idx_gen 19350 --gen_subset_num 30 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=6; python main_stage_3.py --start_idx_gen 19350 --end_idx_gen 19995 --gen_subset_num 31 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ +export CUDA_VISIBLE_DEVICES=7; python main_stage_3.py --start_idx_gen 19995 --end_idx_gen 20640 --gen_subset_num 32 --model $MODEL --mode $MODE --eval_dir $EVAL_DIR --tag $TAG & \ + +wait +python eval_visdial.py diff --git a/main_stage_1.py b/main_stage_1.py new file mode 100644 index 0000000..292b236 --- /dev/null +++ b/main_stage_1.py @@ -0,0 +1,177 @@ + +import argparse + + +import torch + +import torch.multiprocessing as mp +import torch.nn as nn +import torch.distributed as dist +# from transformers import BartTokenizer +from torch.utils.data import ConcatDataset + +from utils.init import initialize_from_env +# from datasets.pretraining import load_datasets, VideoTextRetDataset +# from datasets.utils import get_datasets_media +from models.setup import setup_model, setup_data, setup_data_test +from tasks.pre_train import pre_train +# from tasks.ft_avsd import ft_avsd, generate +# from tasks.stage_2_3 import pretrain +# from tasks.stage_2 import train as train_stage_2 + +# torch.autograd.set_detect_anomaly(True) + +parser = argparse.ArgumentParser(description='Main script for v2dial') +parser.add_argument( + '--model', + type=str, + default='v2dial/stage_1', + help='model name to train or test') + +parser.add_argument( + '--mode', + type=str, + default='train', + help='train, generate or debug' + ) + +parser.add_argument( + '--eval_dir', + type=str, + default='/scratch/abdessaied/projects/V2Dial_TU/logs/stage_4/v2dial-flant5_large_bert_experts_4_only_gen_AVSD' +) + +parser.add_argument( + '--wandb_mode', + type=str, + default='online', + choices=['online', 'offline', 'disabled', 'run', 'dryrun'] +) + +parser.add_argument( + '--wandb_project', + type=str, + default='V2Dial' +) + +parser.add_argument( + '--tag', + type=str, + # default='V2dial-bart_large-Experts_from_scratch-gen-modalityLayers_4-without_residuals-AVSD', + # default='Q_base_bart_base_from_modality_experts_c3m_webvid2mToVisdialToAVSD_num_hist3_with_fc_embed', + # default='like_mst_mixer_Q_base_bart_large_from_modality_experts_c3m_webvid2mToavsd_12_frames_without_temp_fp16', + default='without_sep_spatial_temporal_experts', + # default='flant5_large_bert_experts_4_only_gen_AVSD_24epochs', + help="Tag to differentiate the models" +) + +parser.add_argument( + '--medium', + type=str, + default='avsd', + help="Medium of the test dataset" +) + +parser.add_argument( + '--start_idx_gen', + type=int, + default=0, + help="The start index for generation" +) + +parser.add_argument( + '--end_idx_gen', + type=int, + default=10, + help="The end index for generation" +) + +parser.add_argument( + '--gen_subset_num', + type=int, + default=1, + help="The index of the test split for generation" +) + +parser.add_argument('--ssh', action='store_true', + help='whether or not we are executing command via ssh. ' + 'If set to True, we will not log.info anything to screen and only redirect them to log file') + + +def main(gpu, config, args): + + config['gpu'] = gpu + if config['distributed']: + dist.init_process_group( + backend='nccl', + world_size=config['num_gpus'], + rank=gpu + ) + torch.cuda.set_device(gpu) + + device = torch.device(f'cuda:{gpu}') + if config.use_cpu: + device = torch.device('cpu') + config['device'] = device + # model = V2Dial(config) + + # config['num_training_steps'] = num_step_per_epoch * config['epochs'] + # config['num_warmup_steps'] = num_step_per_epoch * config['warmup_epochs'] + if config['training']: + train_dataloaders, val_dataloaders = setup_data(config) + + ( + model, + model_without_ddp, + optimizer, + scheduler, + scaler, + start_epoch, + global_step, + webvid_step, + cc3m_step, + config + ) = setup_model(config, pretrain=True) + + pre_train( + model, + model_without_ddp, + train_dataloaders, + val_dataloaders, + optimizer, + global_step, + webvid_step, + cc3m_step, + scheduler, + scaler, + start_epoch, + config + ) + + if config['distributed']: + dist.destroy_process_group() + +if __name__ == '__main__': + args = parser.parse_args() + + # initialization + model, stage = args.model.split('/') + config = initialize_from_env(model, args.mode, stage, args.eval_dir, tag=args.tag) + config['wandb_enabled'] = args.wandb_mode == 'online' + config['training'] = args.mode == 'train' + config['generating'] = args.mode == 'generate' + config['debugging'] = args.mode == 'debug' + + config['wandb_mode'] = args.wandb_mode + config['medium'] = args.medium + config['start_idx_gen'] = args.start_idx_gen + config['end_idx_gen'] = args.end_idx_gen + + # config['wandb_project'] + # if config['accelerator'] == 'ddp': + if config['num_gpus'] > 1: + config['distributed'] = True + mp.spawn(main, nprocs=config['num_gpus'], args=(config, args)) + else: + config['distributed'] = False + main(0, config, args) diff --git a/main_stage_2.py b/main_stage_2.py new file mode 100644 index 0000000..6295416 --- /dev/null +++ b/main_stage_2.py @@ -0,0 +1,186 @@ + +import argparse + + +import torch + +import torch.multiprocessing as mp +import torch.nn as nn +import torch.distributed as dist +# from transformers import BartTokenizer +from torch.utils.data import ConcatDataset + +from utils.init import initialize_from_env +# from datasets.pretraining import load_datasets, VideoTextRetDataset +# from datasets.utils import get_datasets_media +from models.setup import setup_model, setup_data, setup_data_test +# from tasks.ft_avsd import ft_avsd, generate +from tasks.stage_2 import train as train_stage_2 + + +parser = argparse.ArgumentParser(description='Main script for v2dial') +parser.add_argument( + '--model', + type=str, + default='v2dial/stage_2', + help='model name to train or test') + +parser.add_argument( + '--mode', + type=str, + default='train', + help='train, generate or debug' + ) + +parser.add_argument( + '--eval_dir', + type=str, + default='/scratch/abdessaied/projects/V2Dial_TU/logs/stage_4/v2dial-flant5_large_bert_experts_4_only_gen_AVSD' +) + +parser.add_argument( + '--wandb_mode', + type=str, + default='online', + choices=['online', 'offline', 'disabled', 'run', 'dryrun'] +) + +parser.add_argument( + '--wandb_project', + type=str, + default='V2Dial' +) + +parser.add_argument( + '--tag', + type=str, + # default='V2dial-bart_large-Experts_from_scratch-gen-modalityLayers_4-without_residuals-AVSD', + # default='Q_base_bart_base_from_modality_experts_c3m_webvid2mToVisdialToAVSD_num_hist3_with_fc_embed', + # default='like_mst_mixer_Q_base_bart_large_from_modality_experts_c3m_webvid2mToavsd_12_frames_without_temp_fp16', + default='from_stage_1_only_gen_loss_frozen_llm', + # default='blub', + # default='flant5_large_bert_experts_4_only_gen_AVSD_24epochs', + help="Tag to differentiate the models" +) + +parser.add_argument( + '--medium', + type=str, + default='avsd', + help="Medium of the test dataset" +) + +parser.add_argument( + '--start_idx_gen', + type=int, + default=0, + help="The start index for generation" +) + +parser.add_argument( + '--end_idx_gen', + type=int, + default=10, + help="The end index for generation" +) + +parser.add_argument( + '--gen_subset_num', + type=int, + default=1, + help="The index of the test split for generation" +) + +parser.add_argument('--ssh', action='store_true', + help='whether or not we are executing command via ssh. ' + 'If set to True, we will not log.info anything to screen and only redirect them to log file') + + +def main(gpu, config, args): + + config['gpu'] = gpu + if config['distributed']: + dist.init_process_group( + backend='nccl', + world_size=config['num_gpus'], + rank=gpu + ) + torch.cuda.set_device(gpu) + + device = torch.device(f'cuda:{gpu}') + if config.use_cpu: + device = torch.device('cpu') + config['device'] = device + # model = V2Dial(config) + + # config['num_training_steps'] = num_step_per_epoch * config['epochs'] + # config['num_warmup_steps'] = num_step_per_epoch * config['warmup_epochs'] + if config['training']: + train_dataloaders, val_dataloaders = setup_data(config) + + ( + model, model_without_ddp, optimizer, scheduler, scaler, start_epoch, global_step, config + ) = setup_model(config) + + if config['training']: + train_stage_2( + model, + model_without_ddp, + train_dataloaders, + val_dataloaders, + optimizer, + global_step, + scheduler, + scaler, + start_epoch, + config + ) + + # if config['stage'] == 'stage_3': + # ( + # model, model_without_ddp, optimizer, scheduler, scaler, start_epoch, global_step, config + # ) = setup_model(config) + # if config['training']: + # ft_avsd( + # model, + # model_without_ddp, + # train_dataloaders, + # val_dataloaders, + # optimizer, + # global_step, + # scheduler, + # scaler, + # start_epoch, + # config + # ) + # elif config['generating']: + # test_dataloader = setup_data_test(config, args) + # generate(model, test_dataloader, args.tag, config, gen_subset_num=args.gen_subset_num) + + if config['distributed']: + dist.destroy_process_group() + +if __name__ == '__main__': + args = parser.parse_args() + + # initialization + model, stage = args.model.split('/') + config = initialize_from_env(model, args.mode, stage, args.eval_dir, tag=args.tag) + config['wandb_enabled'] = args.wandb_mode == 'online' + config['training'] = args.mode == 'train' + config['generating'] = args.mode == 'generate' + config['debugging'] = args.mode == 'debug' + + config['wandb_mode'] = args.wandb_mode + config['medium'] = args.medium + config['start_idx_gen'] = args.start_idx_gen + config['end_idx_gen'] = args.end_idx_gen + + # config['wandb_project'] + # if config['accelerator'] == 'ddp': + if config['num_gpus'] > 1: + config['distributed'] = True + mp.spawn(main, nprocs=config['num_gpus'], args=(config, args)) + else: + config['distributed'] = False + main(0, config, args) diff --git a/main_stage_3.py b/main_stage_3.py new file mode 100644 index 0000000..a63bcee --- /dev/null +++ b/main_stage_3.py @@ -0,0 +1,185 @@ + +import argparse + + +import torch + +import torch.multiprocessing as mp +import torch.nn as nn +import torch.distributed as dist +# from transformers import BartTokenizer +from torch.utils.data import ConcatDataset + +from utils.init import initialize_from_env +# from datasets.pretraining import load_datasets, VideoTextRetDataset +# from datasets.utils import get_datasets_media +from models.setup import setup_model, setup_data, setup_data_test +# from tasks.ft_avsd import ft_avsd, generate +from tasks.stage_3 import ft_avsd, generate, generate_nextqa, generate_visdial + +parser = argparse.ArgumentParser(description='Main script for v2dial') +parser.add_argument( + '--model', + type=str, + default='v2dial/stage_3', + help='model name to train or test') + +parser.add_argument( + '--mode', + type=str, + default='generate', + help='train, generate or debug' + ) + +parser.add_argument( + '--eval_dir', + type=str, + default='/pfss/mlde/workspaces/mlde_wsp_Rohrbach/users/ma35vahy/V2Dial_new_v2/logs/stage_3/v2dial-google_flan-t5-large-finetune_without_stc_stm_only_visdial' +) + +parser.add_argument( + '--wandb_mode', + type=str, + default='online', + choices=['online', 'offline', 'disabled', 'run', 'dryrun'] +) + +parser.add_argument( + '--wandb_project', + type=str, + default='V2Dial' +) + +parser.add_argument( + '--tag', + type=str, + default="finetuned_visdial_without_stm_stc", + # default='V2dial-bart_large-Experts_from_scratch-gen-modalityLayers_4-without_residuals-AVSD', + # default='Q_base_bart_base_from_modality_experts_c3m_webvid2mToVisdialToAVSD_num_hist3_with_fc_embed', + # default='like_mst_mixer_Q_base_bart_large_from_modality_experts_c3m_webvid2mToavsd_12_frames_without_temp_fp16', + # default='from_stage1_after_avsd_only_visdial_4_frames_10_rounds_ft', + # default='from_scratch_visdial', + # default='no_moes_div_st_from_scratch_only_avsd_4_frames_3_rounds_ft_fp16', + # default='flant5_large_bert_experts_4_only_gen_AVSD_24epochs', + help="Tag to differentiate the models" +) + +# parser.add_argument( +# '--medium', +# type=str, +# default='avsd', +# help="Medium of the test dataset" +# ) + +parser.add_argument( + '--start_idx_gen', + type=int, + default=0, + help="The start index for generation" +) + +parser.add_argument( + '--end_idx_gen', + type=int, + default=10, + help="The end index for generation" +) + +parser.add_argument( + '--gen_subset_num', + type=int, + default=1, + help="The index of the test split for generation" +) + +parser.add_argument('--ssh', action='store_true', + help='whether or not we are executing command via ssh. ' + 'If set to True, we will not log.info anything to screen and only redirect them to log file') + + +def main(gpu, config, args): + + config['gpu'] = gpu + if config['distributed']: + dist.init_process_group( + backend='nccl', + world_size=config['num_gpus'], + rank=gpu + ) + torch.cuda.set_device(gpu) + + device = torch.device(f'cuda:{gpu}') + if config.use_cpu: + device = torch.device('cpu') + config['device'] = device + # model = V2Dial(config) + + # config['num_training_steps'] = num_step_per_epoch * config['epochs'] + # config['num_warmup_steps'] = num_step_per_epoch * config['warmup_epochs'] + if config['training']: + train_dataloaders, val_dataloaders = setup_data(config) + + ( + model, model_without_ddp, optimizer, scheduler, scaler, start_epoch, global_step, visdial_step, avsd_step, nextqa_step, config + ) = setup_model(config) + + if config['training']: + ft_avsd( + model, + model_without_ddp, + train_dataloaders, + val_dataloaders, + optimizer, + global_step, + visdial_step, + avsd_step, + nextqa_step, + scheduler, + scaler, + start_epoch, + config + ) + elif config['generating']: + test_dataloader = setup_data_test(config) + if config.media_test == 'avsd': + generate(model, test_dataloader, args.tag, config, gen_subset_num=args.gen_subset_num) + if config.media_test == 'visdial': + generate_visdial(model, test_dataloader, args.tag, config, gen_subset_num=args.gen_subset_num) + elif config.media_test == 'nextqa': + generate_nextqa(model, test_dataloader, args.tag, config, gen_subset_num=args.gen_subset_num) + + if config['distributed']: + dist.destroy_process_group() + + +if __name__ == '__main__': + args = parser.parse_args() + + # initialization + model, stage = args.model.split('/') + config = initialize_from_env(model, args.mode, stage, args.eval_dir, tag=args.tag) + config['wandb_enabled'] = args.wandb_mode == 'online' + config['training'] = args.mode == 'train' + config['generating'] = args.mode == 'generate' + config['debugging'] = args.mode == 'debug' + + config['wandb_mode'] = args.wandb_mode + # config['medium'] = args.medium + config['start_idx_gen'] = args.start_idx_gen + config['end_idx_gen'] = args.end_idx_gen + config['expert_permutation'] = None + # config['expert_permutation'] = { + # 'spatial': 'history', + # 'temporal': 'temporal', + # 'caption': 'caption', + # 'history': 'spatial' + # } + + # config['wandb_project'] + # if config['accelerator'] == 'ddp': + if config['num_gpus'] > 1: + config['distributed'] = True + mp.spawn(main, nprocs=config['num_gpus'], args=(config, args)) + else: + config['distributed'] = False + main(0, config, args) diff --git a/merge_pred_avsd.py b/merge_pred_avsd.py new file mode 100644 index 0000000..c14d432 --- /dev/null +++ b/merge_pred_avsd.py @@ -0,0 +1,61 @@ +import os +import json +import argparse + +parser = argparse.ArgumentParser(description='Main script for MST-MIXER') +parser.add_argument( + '--dstc', + type=int, + default=7, + choices=[7, 8, 10], + help='DSTC challenge identifier') + +args = parser.parse_args() + +assert args.dstc in [7, 8, 10] +if args.dstc == 7: + output_dir = 'output/dstc7' + raw_data_path = '/pfss/mlde/workspaces/mlde_wsp_Rohrbach/data/annotations/AVSD/test_set4DSTC7-AVSD.json' + +elif args.dstc == 8: + output_dir = 'output/dstc8' + raw_data_path = '/pfss/mlde/workspaces/mlde_wsp_Rohrbach/data/annotations/AVSD/test_set4DSTC8-AVSD.json' +else: + output_dir = 'output/dstc10' + raw_data_path = '/pfss/mlde/workspaces/mlde_wsp_Rohrbach/data/annotations/AVSD/test_set4DSTC10-AVSD.json' + +with open(raw_data_path, 'r') as f: + raw_dialogs = json.load(f)['dialogs'] + +file_paths = os.listdir(output_dir) +file_paths = list(filter(lambda f: 'part' in f , file_paths)) +name = file_paths[0] +file_paths = list(map(lambda f: os.path.join(output_dir, f), file_paths)) + +dialogs = {} +for pth in file_paths: + with open(pth, 'r') as f: + data = json.load(f) + + for dialog in data['dialogs']: + vid_id = dialog['image_id'] + dialogs[vid_id] = dialog + # dialogs.extend(data['dialogs']) + os.remove(pth) + +# Now, re-establish the original order of the dialogs +res = [] +for dialog in raw_dialogs: + vid_id = dialog['image_id'] + res.append(dialogs[vid_id]) + +res = { + 'dialogs': res +} + +name = "".join(name.split('-')[:-1]) + '.json' +output_path = os.path.join(output_dir, name) +with open(output_path, 'w') as f: + json.dump(res, f, indent=4) + +print('[INFO] Files merged and saved in {}'.format(output_path)) \ No newline at end of file diff --git a/merge_pred_nextqa.py b/merge_pred_nextqa.py new file mode 100644 index 0000000..a4124fa --- /dev/null +++ b/merge_pred_nextqa.py @@ -0,0 +1,34 @@ +import os +import json +import argparse + +parser = argparse.ArgumentParser(description='Main script for MST-MIXER') + +args = parser.parse_args() + +output_dir = 'output/nextqa' + +file_paths = os.listdir(output_dir) +file_paths = list(filter(lambda f: 'part' in f , file_paths)) +name = file_paths[0] +file_paths = list(map(lambda f: os.path.join(output_dir, f), file_paths)) + +results = {} +for pth in file_paths: + with open(pth, 'r') as f: + data = json.load(f) + for video_id in data: + if video_id not in results: + results[video_id] = data[video_id] + else: + for qid in data[video_id]: + if qid not in results[video_id]: + results[video_id][qid] = data[video_id][qid] + os.remove(pth) + +name = "".join(name.split('-')[:-1]) + '.json' +output_path = os.path.join(output_dir, name) +with open(output_path, 'w') as f: + json.dump(results, f, indent=4) + +print('[INFO] Files merged and saved in {}'.format(output_path)) \ No newline at end of file diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/backbones/Qformer.py b/models/backbones/Qformer.py new file mode 100755 index 0000000..e71b123 --- /dev/null +++ b/models/backbones/Qformer.py @@ -0,0 +1,1216 @@ +""" + * Copyright (c) 2023, salesforce.com, inc. + * All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause + * By Junnan Li + * Based on huggingface code base + * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert +""" + +import math +import os +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple, Dict, Any + +import torch +from torch import Tensor, device, dtype, nn +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss +import torch.nn.functional as F + +from transformers.activations import ACT2FN +from transformers.file_utils import ( + ModelOutput, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import ( + PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer, +) +from transformers.utils import logging +from transformers.models.bert.configuration_bert import BertConfig + +logger = logging.get_logger(__name__) + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding( + config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id + ) + self.position_embeddings = nn.Embedding( + config.max_position_embeddings, config.hidden_size + ) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + ) + self.position_embedding_type = getattr( + config, "position_embedding_type", "absolute" + ) + + self.config = config + + def forward( + self, + input_ids=None, + position_ids=None, + query_embeds=None, + past_key_values_length=0, + ): + if input_ids is not None: + seq_length = input_ids.size()[1] + else: + seq_length = 0 + + if position_ids is None: + position_ids = self.position_ids[ + :, past_key_values_length : seq_length + past_key_values_length + ].clone() + + if input_ids is not None: + embeddings = self.word_embeddings(input_ids) + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings = embeddings + position_embeddings + + if query_embeds is not None: + embeddings = torch.cat((query_embeds, embeddings), dim=1) + else: + embeddings = query_embeds + + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config, is_cross_attention): + super().__init__() + self.config = config + if config.hidden_size % config.num_attention_heads != 0 and not hasattr( + config, "embedding_size" + ): + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads) + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + if is_cross_attention: + self.key = nn.Linear(config.encoder_width, self.all_head_size) + self.value = nn.Linear(config.encoder_width, self.all_head_size) + else: + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr( + config, "position_embedding_type", "absolute" + ) + if ( + self.position_embedding_type == "relative_key" + or self.position_embedding_type == "relative_key_query" + ): + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding( + 2 * config.max_position_embeddings - 1, self.attention_head_size + ) + self.save_attention = False + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + ( + self.num_attention_heads, + self.attention_head_size, + ) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + mixed_query_layer = self.query(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if ( + self.position_embedding_type == "relative_key" + or self.position_embedding_type == "relative_key_query" + ): + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange( + seq_length, dtype=torch.long, device=hidden_states.device + ).view(-1, 1) + position_ids_r = torch.arange( + seq_length, dtype=torch.long, device=hidden_states.device + ).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding( + distance + self.max_position_embeddings - 1 + ) + positional_embedding = positional_embedding.to( + dtype=query_layer.dtype + ) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum( + "bhld,lrd->bhlr", query_layer, positional_embedding + ) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum( + "bhld,lrd->bhlr", query_layer, positional_embedding + ) + relative_position_scores_key = torch.einsum( + "bhrd,lrd->bhlr", key_layer, positional_embedding + ) + attention_scores = ( + attention_scores + + relative_position_scores_query + + relative_position_scores_key + ) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + if is_cross_attention and self.save_attention: + self.save_attention_map(attention_probs) + attention_probs.register_hook(self.save_attn_gradients) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs_dropped = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs_dropped = attention_probs_dropped * head_mask + + context_layer = torch.matmul(attention_probs_dropped, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = ( + (context_layer, attention_probs) if output_attentions else (context_layer,) + ) + + outputs = outputs + (past_key_value,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config, is_cross_attention=False): + super().__init__() + self.self = BertSelfAttention(config, is_cross_attention) + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, + self.self.num_attention_heads, + self.self.attention_head_size, + self.pruned_heads, + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = ( + self.self.attention_head_size * self.self.num_attention_heads + ) + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + + outputs = (attention_output,) + self_outputs[ + 1: + ] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.layer_num = layer_num + if ( + self.config.add_cross_attention + and layer_num % self.config.cross_attention_freq == 0 + ): + self.crossattention = BertAttention( + config, is_cross_attention=self.config.add_cross_attention + ) + self.has_cross_attention = True + else: + self.has_cross_attention = False + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + self.intermediate_query = BertIntermediate(config) + self.output_query = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + query_length=0, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = ( + past_key_value[:2] if past_key_value is not None else None + ) + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:-1] + + present_key_value = self_attention_outputs[-1] + + if query_length > 0: + query_attention_output = attention_output[:, :query_length, :] + + if self.has_cross_attention: + assert ( + encoder_hidden_states is not None + ), "encoder_hidden_states must be given for cross-attention layers" + cross_attention_outputs = self.crossattention( + query_attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + query_attention_output = cross_attention_outputs[0] + outputs = ( + outputs + cross_attention_outputs[1:-1] + ) # add cross attentions if we output attention weights + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk_query, + self.chunk_size_feed_forward, + self.seq_len_dim, + query_attention_output, + ) + if attention_output.shape[1] > query_length: + layer_output_text = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output[:, query_length:, :], + ) + layer_output = torch.cat([layer_output, layer_output_text], dim=1) + else: + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output, + ) + outputs = (layer_output,) + outputs + + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + def feed_forward_chunk_query(self, attention_output): + intermediate_output = self.intermediate_query(attention_output) + layer_output = self.output_query(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList( + [BertLayer(config, i) for i in range(config.num_hidden_layers)] + ) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + query_length=0, + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = ( + () if output_attentions and self.config.add_cross_attention else None + ) + + next_decoder_cache = () if use_cache else None + + for i in range(self.config.num_hidden_layers): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if getattr(self.config, "gradient_checkpointing", False) and self.training: + + if use_cache: + logger.warn( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module( + *inputs, past_key_value, output_attentions, query_length + ) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + query_length, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = BertConfig + base_model_prefix = "bert" + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +class BertModel(BertPreTrainedModel): + """ + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in `Attention is + all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an + input to the forward pass. + """ + + def __init__(self, config, add_pooling_layer=False): + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def get_extended_attention_mask( + self, + attention_mask: Tensor, + input_shape: Tuple[int], + device: device, + is_decoder: bool, + has_query: bool = False, + ) -> Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + + seq_ids = torch.arange(seq_length, device=device) + causal_mask = ( + seq_ids[None, None, :].repeat(batch_size, seq_length, 1) + <= seq_ids[None, :, None] + ) + + # add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] + if has_query: # UniLM style attention mask + causal_mask = torch.cat( + [ + torch.zeros( + (batch_size, prefix_seq_len, seq_length), + device=device, + dtype=causal_mask.dtype, + ), + causal_mask, + ], + axis=1, + ) + causal_mask = torch.cat( + [ + torch.ones( + (batch_size, causal_mask.shape[1], prefix_seq_len), + device=device, + dtype=causal_mask.dtype, + ), + causal_mask, + ], + axis=-1, + ) + extended_attention_mask = ( + causal_mask[:, None, :, :] * attention_mask[:, None, None, :] + ) + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( + input_shape, attention_mask.shape + ) + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to( + dtype=self.dtype + ) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + query_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + # use_cache = use_cache if use_cache is not None else self.config.use_cache + + if input_ids is None: + assert ( + query_embeds is not None + ), "You have to specify query_embeds when input_ids is None" + + # past_key_values_length + past_key_values_length = ( + past_key_values[0][0].shape[2] - self.config.query_length + if past_key_values is not None + else 0 + ) + + query_length = query_embeds.shape[1] if query_embeds is not None else 0 + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + query_embeds=query_embeds, + past_key_values_length=past_key_values_length, + ) + + input_shape = embedding_output.size()[:-1] + batch_size, seq_length = input_shape + device = embedding_output.device + + if attention_mask is None: + attention_mask = torch.ones( + ((batch_size, seq_length + past_key_values_length)), device=device + ) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if is_decoder: + extended_attention_mask = self.get_extended_attention_mask( + attention_mask, + input_ids.shape, + device, + is_decoder, + has_query=(query_embeds is not None), + ) + else: + extended_attention_mask = self.get_extended_attention_mask( + attention_mask, input_shape, device, is_decoder + ) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[ + 0 + ].size() + else: + ( + encoder_batch_size, + encoder_sequence_length, + _, + ) = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [ + self.invert_attention_mask(mask) for mask in encoder_attention_mask + ] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask + ) + else: + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask + ) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + query_length=query_length, + ) + sequence_output = encoder_outputs[0] + pooled_output = ( + self.pooler(sequence_output) if self.pooler is not None else None + ) + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + query_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=True, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=True, + reduction="mean", + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are + ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') + >>> config = BertConfig.from_pretrained("bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + if labels is not None: + use_cache = False + if past_key_values is not None: + query_embeds = None + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + query_embeds=query_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + ) + + sequence_output = outputs[0] + if query_embeds is not None: + sequence_output = outputs[0][:, query_embeds.shape[1] :, :] + + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) + lm_loss = loss_fct( + shifted_prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1), + ) + if reduction == "none": + lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs + ): + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_ids.shape) + query_mask = input_ids.new_ones(query_embeds.shape[:-1]) + attention_mask = torch.cat([query_mask, attention_mask], dim=-1) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + "input_ids": input_ids, + "query_embeds": query_embeds, + "attention_mask": attention_mask, + "past_key_values": past, + "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), + "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), + "is_decoder": True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += ( + tuple( + past_state.index_select(0, beam_idx) for past_state in layer_past + ), + ) + return reordered_past + + +class BertForMaskedLM(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + query_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=False, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., + config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored + (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` + """ + + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + query_embeds=query_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + ) + + if query_embeds is not None: + sequence_output = outputs[0][:, query_embeds.shape[1] :, :] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct( + prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) + ) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ( + ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + ) + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/models/backbones/__init__.py b/models/backbones/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/backbones/base_model.py b/models/backbones/base_model.py new file mode 100755 index 0000000..5da161f --- /dev/null +++ b/models/backbones/base_model.py @@ -0,0 +1,247 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import logging +import os + +import numpy as np +import torch +import torch.nn as nn +from models.common.dist_utils import download_cached_file, is_dist_avail_and_initialized +from models.common.utils import get_abs_path, is_url +from omegaconf import OmegaConf + + +class BaseModel(nn.Module): + """Base class for models.""" + + def __init__(self): + super().__init__() + + @property + def device(self): + return list(self.parameters())[0].device + + def load_checkpoint(self, url_or_filename): + """ + Load from a finetuned checkpoint. + + This should expect no mismatch in the model keys and the checkpoint keys. + """ + + if is_url(url_or_filename): + cached_file = download_cached_file( + url_or_filename, check_hash=False, progress=True + ) + checkpoint = torch.load(cached_file, map_location="cpu") + elif os.path.isfile(url_or_filename): + checkpoint = torch.load(url_or_filename, map_location="cpu") + else: + raise RuntimeError("checkpoint url or path is invalid") + + if "model" in checkpoint.keys(): + state_dict = checkpoint["model"] + else: + state_dict = checkpoint + + msg = self.load_state_dict(state_dict, strict=False) + + logging.info("Missing keys {}".format(msg.missing_keys)) + logging.info("load checkpoint from %s" % url_or_filename) + + return msg + + @classmethod + def from_pretrained(cls, model_type): + """ + Build a pretrained model from default configuration file, specified by model_type. + + Args: + - model_type (str): model type, specifying architecture and checkpoints. + + Returns: + - model (nn.Module): pretrained or finetuned model, depending on the configuration. + """ + model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model + model = cls.from_config(model_cfg) + + return model + + @classmethod + def default_config_path(cls, model_type): + assert ( + model_type in cls.PRETRAINED_MODEL_CONFIG_DICT + ), "Unknown model type {}".format(model_type) + return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type]) + + def load_checkpoint_from_config(self, cfg, **kwargs): + """ + Load checkpoint as specified in the config file. + + If load_finetuned is True, load the finetuned model; otherwise, load the pretrained model. + When loading the pretrained model, each task-specific architecture may define their + own load_from_pretrained() method. + """ + load_finetuned = cfg.get("load_finetuned", True) + if load_finetuned: + finetune_path = cfg.get("finetuned", None) + assert ( + finetune_path is not None + ), "Found load_finetuned is True, but finetune_path is None." + self.load_checkpoint(url_or_filename=finetune_path) + else: + # load pre-trained weights + pretrain_path = cfg.get("pretrained", None) + assert "Found load_finetuned is False, but pretrain_path is None." + self.load_from_pretrained(url_or_filename=pretrain_path, **kwargs) + + def before_evaluation(self, **kwargs): + pass + + def show_n_params(self, return_str=True): + tot = 0 + for p in self.parameters(): + w = 1 + for x in p.shape: + w *= x + tot += w + if return_str: + if tot >= 1e6: + return "{:.1f}M".format(tot / 1e6) + else: + return "{:.1f}K".format(tot / 1e3) + else: + return tot + + +class BaseEncoder(nn.Module): + """ + Base class for primitive encoders, such as ViT, TimeSformer, etc. + """ + + def __init__(self): + super().__init__() + + def forward_features(self, samples, **kwargs): + raise NotImplementedError + + @property + def device(self): + return list(self.parameters())[0].device + + +class SharedQueueMixin: + @torch.no_grad() + def _dequeue_and_enqueue(self, image_feat, text_feat, idxs=None): + # gather keys before updating queue + image_feats = concat_all_gather(image_feat) + text_feats = concat_all_gather(text_feat) + + batch_size = image_feats.shape[0] + + ptr = int(self.queue_ptr) + assert self.queue_size % batch_size == 0 # for simplicity + + # replace the keys at ptr (dequeue and enqueue) + self.image_queue[:, ptr : ptr + batch_size] = image_feats.T + self.text_queue[:, ptr : ptr + batch_size] = text_feats.T + + if idxs is not None: + idxs = concat_all_gather(idxs) + self.idx_queue[:, ptr : ptr + batch_size] = idxs.T + + ptr = (ptr + batch_size) % self.queue_size # move pointer + self.queue_ptr[0] = ptr + + +class MomentumDistilationMixin: + @torch.no_grad() + def copy_params(self): + for model_pair in self.model_pairs: + for param, param_m in zip( + model_pair[0].parameters(), model_pair[1].parameters() + ): + param_m.data.copy_(param.data) # initialize + param_m.requires_grad = False # not update by gradient + + @torch.no_grad() + def _momentum_update(self): + for model_pair in self.model_pairs: + for param, param_m in zip( + model_pair[0].parameters(), model_pair[1].parameters() + ): + param_m.data = param_m.data * self.momentum + param.data * ( + 1.0 - self.momentum + ) + + +class GatherLayer(torch.autograd.Function): + """ + Gather tensors from all workers with support for backward propagation: + This implementation does not cut the gradients as torch.distributed.all_gather does. + """ + + @staticmethod + def forward(ctx, x): + output = [ + torch.zeros_like(x) for _ in range(torch.distributed.get_world_size()) + ] + torch.distributed.all_gather(output, x) + return tuple(output) + + @staticmethod + def backward(ctx, *grads): + all_gradients = torch.stack(grads) + torch.distributed.all_reduce(all_gradients) + return all_gradients[torch.distributed.get_rank()] + + +def all_gather_with_grad(tensors): + """ + Performs all_gather operation on the provided tensors. + Graph remains connected for backward grad computation. + """ + # Queue the gathered tensors + world_size = torch.distributed.get_world_size() + # There is no need for reduction in the single-proc case + if world_size == 1: + return tensors + + # tensor_all = GatherLayer.apply(tensors) + tensor_all = GatherLayer.apply(tensors) + + return torch.cat(tensor_all, dim=0) + + +@torch.no_grad() +def concat_all_gather(tensor): + """ + Performs all_gather operation on the provided tensors. + *** Warning ***: torch.distributed.all_gather has no gradient. + """ + # if use distributed training + if not is_dist_avail_and_initialized(): + return tensor + + tensors_gather = [ + torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size()) + ] + torch.distributed.all_gather(tensors_gather, tensor, async_op=False) + + output = torch.cat(tensors_gather, dim=0) + return output + + +def tile(x, dim, n_tile): + init_dim = x.size(dim) + repeat_idx = [1] * x.dim() + repeat_idx[dim] = n_tile + x = x.repeat(*(repeat_idx)) + order_index = torch.LongTensor( + np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]) + ) + return torch.index_select(x, dim, order_index.to(x.device)) diff --git a/models/backbones/beit/__init__.py b/models/backbones/beit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/backbones/beit/builder.py b/models/backbones/beit/builder.py new file mode 100644 index 0000000..bc66e3c --- /dev/null +++ b/models/backbones/beit/builder.py @@ -0,0 +1,107 @@ +import logging +import torch +from models.utils import (interpolate_pos_relative_bias_beit, + load_temp_embed_with_mismatch) + +logger = logging.getLogger(__name__) + + +def interpolate_pos_embed_beit(state_dict, new_model): + """interpolate the positional embeddings. + The spatial pe is relative and temporal pe is absolute. + additional temporal pe is padded with 0. + + Args: + state_dict (dict): The state_dict. + new_model (nn.Module): The created model. + + Returns: dict. The state_dict with updated positional embeddings. + + """ + state_dict = interpolate_pos_relative_bias_beit( + state_dict_old=state_dict, + state_dict_new=new_model.state_dict(), + patch_shape_new=new_model.beit.embeddings.patch_embeddings.patch_shape, + ) + # absolute temporal pos bias + temporal_pe_key = "beit.embeddings.temporal_position_embeddings" + if temporal_pe_key in state_dict: + logger.info(f"interpolate temporal positional embeddings: {temporal_pe_key}") + state_dict[temporal_pe_key] = load_temp_embed_with_mismatch( + temp_embed_old=state_dict[temporal_pe_key], + temp_embed_new=new_model.state_dict()[temporal_pe_key], + ) + return state_dict + +def extract_beit_from_vindlu(vindlu_state_dict): + beit_state_dict = {} + beit_param_names = [k for k in vindlu_state_dict if k.startswith('vision_encoder.') and 'temp_model' not in k] + for param_name in beit_param_names: + new_name = param_name.replace('vision_encoder.', '') + beit_state_dict[new_name] = vindlu_state_dict[param_name] + + return beit_state_dict + +def build_beit(model_config, image_res, checkpoint=False): + """build beit with configuration. + + Args: + config (dict): The configs for beit. + image_res (int): The image resolution. + checkpoint (bool): Whether to enable gradient checkpointing. + + Returns: nn.Module + + """ + from .st_beit import BeitConfig as config_cls + from .st_beit import BeitModel as model_cls + + + vindlu_state_dict = torch.load(model_config['vindlu_path'])['model'] + state_dict = extract_beit_from_vindlu(vindlu_state_dict) + model_config = model_config['beit_config_json'] + + logger.info( + f"Loading vit pre-trained weights from huggingface {model_config['pretrained']}." + ) + # BEiT uses average pooled tokens instead of [CLS] used by other models + aux_kwargs = {"add_pooling_layer": True} + # tmp_model = model_cls.from_pretrained(model_config['beit_pretrained'], **aux_kwargs) + + + # tmp_model = model_cls.from_pretrained(model_config['pretrained'], **aux_kwargs) + # state_dict = tmp_model.state_dict() + + # del tmp_model + + logger.info(f"Init new model with new image size {image_res}, and load weights.") + + # other_cfg = model_config.temporal_modeling + other_cfg = {} + + vit_config = config_cls.from_pretrained( + model_config['pretrained'], image_size=image_res, **other_cfg + ) + + # vit_config.update(model_config) + + model = model_cls(config=vit_config, **aux_kwargs) + + if checkpoint: + model.gradient_checkpointing_enable() + + # interpolate relative pos bias + state_dict = interpolate_pos_relative_bias_beit( + state_dict_old=state_dict, + state_dict_new=model.state_dict(), + patch_shape_new=model.embeddings.patch_embeddings.patch_shape, + ) + + # del prompt_bias_table + for k in list(state_dict.keys()): + if "prompt_bias_table" in k: + del state_dict[k] + + msg = model.load_state_dict(state_dict, strict=False) + logger.info(msg) + return model diff --git a/models/backbones/beit/st_beit.py b/models/backbones/beit/st_beit.py new file mode 100644 index 0000000..ae38511 --- /dev/null +++ b/models/backbones/beit/st_beit.py @@ -0,0 +1,1752 @@ +# coding=utf-8 +# Copyright 2021 Microsoft Research and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch BEiT model.""" + +import collections.abc +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import einops +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from torch.nn import functional as F +from transformers.activations import ACT2FN +from transformers.configuration_utils import PretrainedConfig +from transformers.modeling_outputs import (BaseModelOutput, + BaseModelOutputWithPooling, + ImageClassifierOutput, + MaskedLMOutput, + SemanticSegmenterOutput) +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import (find_pruneable_heads_and_indices, + prune_linear_layer) +from transformers.utils import (add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, logging, + replace_return_docstrings) + +from models.utils import interpolate_temporal_pos_embed + +from ...modules.temporal_modelling import (X_CLIP, STAdapter, TemporalAttention, + TemporalS4, WindowTemporalAttention) + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "BeitConfig" +_FEAT_EXTRACTOR_FOR_DOC = "BeitFeatureExtractor" + +# Base docstring +_CHECKPOINT_FOR_DOC = "microsoft/beit-base-patch16-224-pt22k" +_EXPECTED_OUTPUT_SHAPE = [1, 197, 768] + +# Image classification docstring +_IMAGE_CLASS_CHECKPOINT = "microsoft/beit-base-patch16-224" +_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" + +BEIT_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "microsoft/beit-base-patch16-224", + # See all BEiT models at https://huggingface.co/models?filter=beit +] + + +class BeitConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`BeitModel`]. It is used to instantiate an BEiT + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the BEiT + [microsoft/beit-base-patch16-224-pt22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k) architecture. + + Args: + vocab_size (`int`, *optional*, defaults to 8092): + Vocabulary size of the BEiT model. Defines the number of different image tokens that can be used during + pre-training. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 16): + The size (resolution) of each patch. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + use_mask_token (`bool`, *optional*, defaults to `False`): + Whether to use a mask token for masked image modeling. + use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`): + Whether to use BERT-style absolute position embeddings. + use_relative_position_bias (`bool`, *optional*, defaults to `False`): + Whether to use T5-style relative position embeddings in the self-attention layers. + use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`): + Whether to use the same relative position embeddings across all self-attention layers of the Transformer. + layer_scale_init_value (`float`, *optional*, defaults to 0.1): + Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale. + drop_path_rate (`float`, *optional*, defaults to 0.1): + Stochastic depth rate per sample (when applied in the main path of residual layers). + use_mean_pooling (`bool`, *optional*, defaults to `True`): + Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the + CLS token, before applying the classification head. + out_indices (`List[int]`, *optional*, defaults to `[3, 5, 7, 11]`): + Indices of the feature maps to use for semantic segmentation. + pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`): + Pooling scales used in Pooling Pyramid Module applied on the last feature map. + use_auxiliary_head (`bool`, *optional*, defaults to `True`): + Whether to use an auxiliary head during training. + auxiliary_loss_weight (`float`, *optional*, defaults to 0.4): + Weight of the cross-entropy loss of the auxiliary head. + auxiliary_channels (`int`, *optional*, defaults to 256): + Number of channels to use in the auxiliary head. + auxiliary_num_convs (`int`, *optional*, defaults to 1): + Number of convolutional layers to use in the auxiliary head. + auxiliary_concat_input (`bool`, *optional*, defaults to `False`): + Whether to concatenate the output of the auxiliary head with the input before the classification layer. + semantic_loss_ignore_index (`int`, *optional*, defaults to 255): + The index that is ignored by the loss function of the semantic segmentation model. + + Example: + + ```python + >>> from transformers import BeitModel, BeitConfig + + >>> # Initializing a BEiT beit-base-patch16-224-pt22k style configuration + >>> configuration = BeitConfig() + + >>> # Initializing a model from the beit-base-patch16-224-pt22k style configuration + >>> model = BeitModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "beit" + + def __init__( + self, + vocab_size=8192, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + initializer_range=0.02, + layer_norm_eps=1e-12, + is_encoder_decoder=False, + image_size=224, + num_frames=1, + patch_size=16, + num_channels=3, + use_mask_token=False, + use_absolute_position_embeddings=False, + use_relative_position_bias=False, + use_shared_relative_position_bias=False, + layer_scale_init_value=0.1, + drop_path_rate=0.1, + use_mean_pooling=True, + out_indices=[3, 5, 7, 11], + pool_scales=[1, 2, 3, 6], + use_auxiliary_head=True, + auxiliary_loss_weight=0.4, + auxiliary_channels=256, + auxiliary_num_convs=1, + auxiliary_concat_input=False, + semantic_loss_ignore_index=255, + + temporal_model_block="none", + temporal_model_position="last", + temporal_model_init_value=0.0, + temporal_model_config={}, + use_temporal_position_embedding=False, + add_k_prompts=0, + **kwargs, + ): + super().__init__(**kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.use_mask_token = use_mask_token + self.use_absolute_position_embeddings = use_absolute_position_embeddings + self.use_relative_position_bias = use_relative_position_bias + self.use_shared_relative_position_bias = use_shared_relative_position_bias + self.layer_scale_init_value = layer_scale_init_value + self.drop_path_rate = drop_path_rate + self.use_mean_pooling = use_mean_pooling + # decode head attributes (semantic segmentation) + self.out_indices = out_indices + self.pool_scales = pool_scales + # auxiliary head attributes (semantic segmentation) + self.use_auxiliary_head = use_auxiliary_head + self.auxiliary_loss_weight = auxiliary_loss_weight + self.auxiliary_channels = auxiliary_channels + self.auxiliary_num_convs = auxiliary_num_convs + self.auxiliary_concat_input = auxiliary_concat_input + self.semantic_loss_ignore_index = semantic_loss_ignore_index + + self.temporal_model_block = temporal_model_block + self.temporal_model_config = temporal_model_config + self.temporal_model_position = temporal_model_position + self.temporal_model_init_value = temporal_model_init_value + self.use_temporal_position_embedding = use_temporal_position_embedding + self.add_k_prompts = add_k_prompts + self.num_frames = num_frames + + +@dataclass +class BeitModelOutputWithPooling(BaseModelOutputWithPooling): + """ + Class for outputs of [`BeitModel`]. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if + *config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token + will be returned. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + +def drop_path( + input: torch.Tensor, drop_prob: float = 0.0, training: bool = False +) -> torch.Tensor: + """ + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, + however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the + layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the + argument. + """ + if drop_prob == 0.0 or not training: + return input + keep_prob = 1 - drop_prob + shape = (input.shape[0],) + (1,) * ( + input.ndim - 1 + ) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) + random_tensor.floor_() # binarize + output = input.div(keep_prob) * random_tensor + return output + + +class BeitDropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob: Optional[float] = None) -> None: + super().__init__() + self.drop_prob = drop_prob + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return "p={}".format(self.drop_prob) + + +# Based on timm implementation, which can be found here: +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +class BeitEmbeddings(nn.Module): + """ + Construct the CLS token, position and patch embeddings. Optionally, also the mask token. + + """ + + def __init__(self, config: BeitConfig) -> None: + super().__init__() + + self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) + if config.use_mask_token: + self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) + else: + self.mask_token = None + self.patch_embeddings = BeitPatchEmbeddings(config) + num_patches = self.patch_embeddings.num_patches + if config.use_absolute_position_embeddings: + self.position_embeddings = nn.Parameter( + torch.zeros(1, num_patches + 1, config.hidden_size) + ) + else: + self.position_embeddings = None + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + if config.use_temporal_position_embedding: + self.temporal_position_embeddings = nn.parameter.Parameter( + torch.zeros(1, config.num_frames, 1, config.hidden_size) + ) + else: + self.temporal_position_embeddings = None + + if config.add_k_prompts > 0: + self.prompt_tokens = nn.parameter.Parameter( + torch.zeros(1, config.add_k_prompts, config.hidden_size) + ) + else: + self.prompt_tokens = None + + def forward( + self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None + ) -> torch.Tensor: + """ + Args: + pixel_values (torch.Tensor): The input image patches. Shape: [B, T, C, H, W]. + + + """ + t = pixel_values.shape[1] + pixel_values = einops.rearrange(pixel_values, "b t c h w -> (b t) c h w") + + embeddings = self.patch_embeddings(pixel_values) + batch_size, seq_len, _ = embeddings.size() # [(b t) l c] + + cls_tokens = self.cls_token.expand(batch_size, -1, -1) + if bool_masked_pos is not None: + mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) + # replace the masked visual tokens by mask_tokens + w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) + embeddings = embeddings * (1 - w) + mask_tokens * w + + if self.prompt_tokens is not None: + prompt_tokens = self.prompt_tokens.expand(batch_size, -1, -1) + embeddings = torch.cat((cls_tokens, embeddings, prompt_tokens), dim=1) + else: + embeddings = torch.cat((cls_tokens, embeddings), dim=1) # [B*T, L, C] + if self.position_embeddings is not None: + embeddings = embeddings + self.position_embeddings + + embeddings = einops.rearrange(embeddings, "(b t) l c -> b t l c", t=t) + if self.temporal_position_embeddings is not None: + if t <= self.temporal_position_embeddings.shape[1]: + embeddings = embeddings + self.temporal_position_embeddings[:, :t] + else: + tpe = interpolate_temporal_pos_embed(self.temporal_position_embeddings, t) + embeddings = embeddings + tpe + + embeddings = self.dropout(embeddings) + + return embeddings + + +class BeitPatchEmbeddings(nn.Module): + """ + This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial + `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a + Transformer. + """ + + def __init__(self, config): + super().__init__() + image_size, patch_size = config.image_size, config.patch_size + num_channels, hidden_size = config.num_channels, config.hidden_size + + image_size = ( + image_size + if isinstance(image_size, collections.abc.Iterable) + else (image_size, image_size) + ) + patch_size = ( + patch_size + if isinstance(patch_size, collections.abc.Iterable) + else (patch_size, patch_size) + ) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_patches = num_patches + self.patch_shape = patch_shape + + self.projection = nn.Conv2d( + num_channels, hidden_size, kernel_size=patch_size, stride=patch_size + ) + + def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: + batch_size, num_channels, height, width = pixel_values.shape + if num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + if height != self.image_size[0] or width != self.image_size[1]: + raise ValueError( + f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." + ) + embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) + + return embeddings + + +class BeitSelfAttention(nn.Module): + def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None: + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr( + config, "embedding_size" + ): + raise ValueError( + f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " + f"heads {config.num_attention_heads}." + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + if window_size: + self.relative_position_bias = BeitRelativePositionBias( + config, window_size=window_size + ) + else: + self.relative_position_bias = None + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + relative_position_bias: Optional["BeitRelativePositionBias"] = None, + ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]: + mixed_query_layer = self.query(hidden_states) + + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + query_layer = self.transpose_for_scores(mixed_query_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + + # Add relative position bias if present. + if self.relative_position_bias is not None: + attention_scores = attention_scores + self.relative_position_bias().unsqueeze(0) + + # Add shared relative position bias if provided. + if relative_position_bias is not None: + attention_scores = attention_scores + relative_position_bias + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + +class BeitSelfOutput(nn.Module): + """ + The residual connection is defined in BeitLayer instead of here (as is the case with other models), due to the + layernorm applied before each block. + """ + + def __init__(self, config: BeitConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward( + self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, gamma=None + ) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + + return hidden_states + + +class BeitAttention(nn.Module): + def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None: + super().__init__() + self.attention = BeitSelfAttention(config, window_size=window_size) + self.output = BeitSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, + self.attention.num_attention_heads, + self.attention.attention_head_size, + self.pruned_heads, + ) + + # Prune linear layers + self.attention.query = prune_linear_layer(self.attention.query, index) + self.attention.key = prune_linear_layer(self.attention.key, index) + self.attention.value = prune_linear_layer(self.attention.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) + self.attention.all_head_size = ( + self.attention.attention_head_size * self.attention.num_attention_heads + ) + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + relative_position_bias: Optional["BeitRelativePositionBias"] = None, + ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]: + self_outputs = self.attention( + hidden_states, head_mask, output_attentions, relative_position_bias + ) + + attention_output = self.output(self_outputs[0], hidden_states) + + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BeitIntermediate(nn.Module): + def __init__(self, config: BeitConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + + return hidden_states + + +class BeitOutput(nn.Module): + def __init__(self, config: BeitConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + + return hidden_states + + +class TemporalAttentionBeit(nn.Module): + + """temporal attention using BeitAttention""" + + def __init__(self, config: BeitConfig): + """TODO: to be defined.""" + super().__init__() + + self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.attention = BeitAttention(config, window_size=None) + self.scale = nn.Parameter( + config.temporal_model_init_value * torch.ones((config.hidden_size)), + requires_grad=True, + ) + self.drop_path = BeitDropPath(config.drop_path_rate) + + def forward(self, hidden_states: torch.Tensor): + """forward function + + Args: + hidden_states (torch.Tensor): The input. Shape: [b,t,l,c] + + Returns: TODO + + """ + b = hidden_states.shape[0] + output = einops.rearrange(hidden_states, "b t l c -> (b l) t c") + output = self.layernorm_before(output) + output = self.attention(output) + output = einops.rearrange(output[0], "(b l) t c -> b t l c", b=b) + return hidden_states + self.drop_path(output[0]) * self.scale + + +class BeitLayer(nn.Module): + """This corresponds to the Block class in the timm implementation.""" + + def __init__( + self, + config: BeitConfig, + window_size: Optional[tuple] = None, + drop_path_rate: float = 0.0, + ) -> None: + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BeitAttention(config, window_size=window_size) + self.intermediate = BeitIntermediate(config) + self.output = BeitOutput(config) + self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.drop_path = ( + BeitDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() + ) + self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + self.temporal_model_position = config.temporal_model_position + + init_values = config.layer_scale_init_value + if init_values > 0: + self.lambda_1 = nn.Parameter( + init_values * torch.ones((config.hidden_size)), requires_grad=True + ) + self.lambda_2 = nn.Parameter( + init_values * torch.ones((config.hidden_size)), requires_grad=True + ) + else: + self.lambda_1, self.lambda_2 = None, None + + if config.temporal_model_block == "st_adapter": + self.temp_model = STAdapter(**config.temporal_model_config) + elif config.temporal_model_block == "timesformer": + self.temp_model = TemporalAttention(**config.temporal_model_config) + elif config.temporal_model_block == "s4": + self.temp_model = TemporalS4(**config.temporal_model_config) + elif config.temporal_model_block == "ta_beit": + self.temp_model = TemporalAttentionBeit(config) + elif config.temporal_model_block == "window_attention": + self.temp_model = WindowTemporalAttention(**config.temporal_model_config) + elif config.temporal_model_block == "xclip": + self.temp_model = X_CLIP(**config.temporal_model_config) + elif config.temporal_model_block == "none": + self.temp_model = None + else: + raise ValueError(f"not accepted temporal model: {config.temporal_model_block}") + + self.temporal_model_block = config.temporal_model_block + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + relative_position_bias: Optional["BeitRelativePositionBias"] = None, + ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]: + + b, t, l, c = hidden_states.shape + + if self.temporal_model_block == "xclip": + assert ( + self.temporal_model_position == "first" and self.config.add_k_prompts == 1 + ), "xclip must be put before the attention and add_k_prompts must be 1." + + if self.temp_model is not None and self.temporal_model_position == "first": + hidden_states = self.temp_model(hidden_states) + + hidden_states = einops.rearrange(hidden_states, "b t l c -> (b t) l c") + + self_attention_outputs = self.attention( + self.layernorm_before( + hidden_states + ), # in BEiT, layernorm is applied before self-attention + head_mask, + output_attentions=output_attentions, + relative_position_bias=relative_position_bias, + ) + attention_output = self_attention_outputs[0] + + # add self attentions if we output attention weights + outputs = self_attention_outputs[1:] + + # apply lambda_1 if present + if self.lambda_1 is not None: + attention_output = self.lambda_1 * attention_output + + # first residual connection + hidden_states = self.drop_path(attention_output) + hidden_states + + # in BEiT, layernorm is also applied after self-attention + layer_output = self.layernorm_after(hidden_states) + + layer_output = self.intermediate(layer_output) + layer_output = self.output(layer_output) + + if self.lambda_2 is not None: + layer_output = self.lambda_2 * layer_output + + # second residual connection + layer_output = self.drop_path(layer_output) + hidden_states + + layer_output = einops.rearrange(layer_output, "(b t) l c -> b t l c", b=b) + + # apply temporal modeling block + if self.temp_model is not None and self.temporal_model_position == "last": + layer_output = self.temp_model(layer_output) + + outputs = (layer_output,) + outputs + + return outputs + + +class BeitRelativePositionBias(nn.Module): + def __init__(self, config: BeitConfig, window_size: tuple) -> None: + super().__init__() + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, config.num_attention_heads) + ) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = ( + coords_flatten[:, :, None] - coords_flatten[:, None, :] + ) # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = torch.zeros( + size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype + ) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", relative_position_index) + + # add bias for prompts + k = config.add_k_prompts + self.k = k + if k > 0: + self.prompt_bias_table = nn.parameter.Parameter( + torch.zeros((2 + k) * k, config.num_attention_heads) + ) # k prompt-to-token, k token-to-prompt, k*k prompt-to-promt + else: + self.prompt_bias_table = None + + def forward(self) -> torch.Tensor: + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1) + ].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, + -1, + ) # Wh*Ww,Wh*Ww,nH + + k = self.k + if k > 0: + l = self.window_size[0] * self.window_size[1] + 1 + bias = torch.zeros(l + k, l + k, relative_position_bias.shape[-1]).to( + relative_position_bias.device + ) + bias[:l, :l] = relative_position_bias + bias[l:, :l] = self.prompt_bias_table[:k].view(k, 1, -1) # prompt to token + bias[:l, l:] = self.prompt_bias_table[k : 2 * k].view(1, k, -1) # token to prompt + bias[l:, l:] = self.prompt_bias_table[2 * k, :].view(k, k, -1) # prompt to prompt + + # bias[k:, k:] = relative_position_bias + # bias[:k, k:] = self.prompt_bias_table[:k].view(k, 1, -1) + # bias[k:, :k] = self.prompt_bias_table[k : 2 * k].view(1, k, -1) + # bias[:k, :k] = self.prompt_bias_table[2 * k :].view(k, k, -1) + else: + bias = relative_position_bias + + return bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + + +class BeitEncoder(nn.Module): + def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None: + super().__init__() + self.config = config + if config.use_shared_relative_position_bias: + self.relative_position_bias = BeitRelativePositionBias( + config, window_size=window_size + ) + else: + self.relative_position_bias = None + + # stochastic depth decay rule + dpr = [ + x.item() + for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers) + ] + self.layer = nn.ModuleList( + [ + BeitLayer( + config, + window_size=window_size if config.use_relative_position_bias else None, + drop_path_rate=dpr[i], + ) + for i in range(config.num_hidden_layers) + ] + ) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ) -> Union[tuple, BaseModelOutput]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + # all_hidden_states = all_hidden_states + ( + # einops.rearrange(hidden_states, "b t l c -> (b t) l c"), + # ) + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + layer_head_mask, + use_reentrant=False, + ) + else: + relative_position_bias = ( + self.relative_position_bias() + if self.relative_position_bias is not None + else None + ) + layer_outputs = layer_module( + hidden_states, layer_head_mask, output_attentions, relative_position_bias + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + # hidden_states = einops.rearrange(hidden_states, "b t l c -> (b t) l c") + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [hidden_states, all_hidden_states, all_self_attentions] + if v is not None + ) + return BaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class BeitPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = BeitConfig + base_model_prefix = "beit" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, BeitEncoder): + module.gradient_checkpointing = value + + +BEIT_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`BeitConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +BEIT_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`BeitFeatureExtractor`]. See + [`BeitFeatureExtractor.__call__`] for details. + + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Beit Model transformer outputting raw hidden-states without any specific head on top.", + BEIT_START_DOCSTRING, +) +class BeitModel(BeitPreTrainedModel): + def __init__(self, config: BeitConfig, add_pooling_layer: bool = True) -> None: + super().__init__(config) + self.config = config + + self.embeddings = BeitEmbeddings(config) + self.encoder = BeitEncoder( + config, window_size=self.embeddings.patch_embeddings.patch_shape + ) + + self.layernorm = ( + nn.Identity() + if config.use_mean_pooling + else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + ) + self.pooler = BeitPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.patch_embeddings + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + processor_class=_FEAT_EXTRACTOR_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BeitModelOutputWithPooling, + config_class=_CONFIG_FOR_DOC, + modality="vision", + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + bool_masked_pos: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, BeitModelOutputWithPooling]: + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + # pixel_values: [bsz, nframes, c, h, w] + assert pixel_values.ndim == 5, logger.error( + f"input shape to st_beit: {pixel_values.shape}" + ) + + embedding_output = self.embeddings( + pixel_values, bool_masked_pos + ) # [bs, nframes, L, c] + + encoder_outputs = self.encoder( + embedding_output, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + sequence_output = self.layernorm(sequence_output) + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + # logger.info(f"sequence_output: {sequence_output.shape}. pooled_output: {pooled_output.shape}") + + if not return_dict: + head_outputs = ( + (sequence_output, pooled_output) + if pooled_output is not None + else (sequence_output,) + ) + return head_outputs + encoder_outputs[1:] + + return BeitModelOutputWithPooling( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +class BeitPooler(nn.Module): + def __init__(self, config: BeitConfig) -> None: + super().__init__() + self.num_prompts = config.add_k_prompts + self.layernorm = ( + nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + if config.use_mean_pooling + else None + ) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + """ + Args: + hidden_states (torch.Tensor): Shape: [B,T,L,C] + """ + if self.layernorm is not None: + # Mean pool the final hidden states of the patch tokens + # patch_tokens = hidden_states[:, 1 + self.num_prompts :, :] + if self.num_prompts > 0: + patch_tokens = hidden_states[:, :, 1 : -self.num_prompts, :] + else: + patch_tokens = hidden_states[:, :, 1:, :] + pooled_output = self.layernorm(patch_tokens.mean(2)) + else: + # Pool by simply taking the final hidden state of the [CLS] token + pooled_output = hidden_states[:, :, 0] + + return pooled_output + + +@add_start_docstrings( + """Beit Model transformer with a 'language' modeling head on top. BEiT does masked image modeling by predicting + visual tokens of a Vector-Quantize Variational Autoencoder (VQ-VAE), whereas other vision models like ViT and DeiT + predict RGB pixel values. As a result, this class is incompatible with [`AutoModelForMaskedImageModeling`], so you + will need to use [`BeitForMaskedImageModeling`] directly if you wish to do masked image modeling with BEiT.""", + BEIT_START_DOCSTRING, +) +class BeitForMaskedImageModeling(BeitPreTrainedModel): + def __init__(self, config: BeitConfig) -> None: + super().__init__(config) + + self.num_labels = config.num_labels + self.beit = BeitModel(config, add_pooling_layer=False) + + # Classifier head + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + bool_masked_pos: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, MaskedLMOutput]: + r""" + bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): + Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). + + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + + Returns: + + Examples: + + ```python + >>> from transformers import BeitFeatureExtractor, BeitForMaskedImageModeling + >>> import torch + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> feature_extractor = BeitFeatureExtractor.from_pretrained("microsoft/beit-base-patch16-224-pt22k") + >>> model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k") + + >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2 + >>> pixel_values = feature_extractor(images=image, return_tensors="pt").pixel_values + >>> # create random boolean mask of shape (batch_size, num_patches) + >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool() + + >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) + >>> loss, logits = outputs.loss, outputs.logits + >>> list(logits.shape) + [1, 196, 8192] + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.beit( + pixel_values, + bool_masked_pos=bool_masked_pos, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + sequence_output = self.layernorm(sequence_output) + prediction_scores = self.lm_head(sequence_output[:, 1:]) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct(prediction_scores[bool_masked_pos], labels) + + if not return_dict: + output = (prediction_scores,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final + hidden states of the patch tokens) e.g. for ImageNet. + """, + BEIT_START_DOCSTRING, +) +class BeitForImageClassification(BeitPreTrainedModel): + def __init__(self, config: BeitConfig) -> None: + super().__init__(config) + + self.num_labels = config.num_labels + self.beit = BeitModel(config, add_pooling_layer=True) + + # Classifier head + self.classifier = ( + nn.Linear(config.hidden_size, config.num_labels) + if config.num_labels > 0 + else nn.Identity() + ) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + processor_class=_FEAT_EXTRACTOR_FOR_DOC, + checkpoint=_IMAGE_CLASS_CHECKPOINT, + output_type=ImageClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, + ) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, ImageClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + outputs = self.beit( + pixel_values, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs.pooler_output if return_dict else outputs[1] + + logits = self.classifier(pooled_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and ( + labels.dtype == torch.long or labels.dtype == torch.int + ): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return ImageClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class BeitConvModule(nn.Module): + """ + A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution + layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU). + + Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: Union[int, Tuple[int, int]], + padding: Union[int, Tuple[int, int], str] = 0, + bias: bool = False, + dilation: Union[int, Tuple[int, int]] = 1, + ) -> None: + super().__init__() + self.conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + padding=padding, + bias=bias, + dilation=dilation, + ) + self.bn = nn.BatchNorm2d(out_channels) + self.activation = nn.ReLU() + + def forward(self, input: torch.Tensor) -> torch.Tensor: + output = self.conv(input) + output = self.bn(output) + output = self.activation(output) + + return output + + +class BeitPyramidPoolingBlock(nn.Module): + def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None: + super().__init__() + self.layers = [ + nn.AdaptiveAvgPool2d(pool_scale), + BeitConvModule(in_channels, channels, kernel_size=1), + ] + for i, layer in enumerate(self.layers): + self.add_module(str(i), layer) + + def forward(self, input: torch.Tensor) -> torch.Tensor: + hidden_state = input + for layer in self.layers: + hidden_state = layer(hidden_state) + return hidden_state + + +class BeitPyramidPoolingModule(nn.Module): + """ + Pyramid Pooling Module (PPM) used in PSPNet. + + Args: + pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module. + in_channels (int): Input channels. + channels (int): Channels after modules, before conv_seg. + align_corners (bool): align_corners argument of F.interpolate. + + Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation. + """ + + def __init__( + self, + pool_scales: Tuple[int, ...], + in_channels: int, + channels: int, + align_corners: bool, + ) -> None: + super().__init__() + self.pool_scales = pool_scales + self.align_corners = align_corners + self.in_channels = in_channels + self.channels = channels + self.blocks = [] + for i, pool_scale in enumerate(pool_scales): + block = BeitPyramidPoolingBlock( + pool_scale=pool_scale, in_channels=in_channels, channels=channels + ) + self.blocks.append(block) + self.add_module(str(i), block) + + def forward(self, x: torch.Tensor) -> List[torch.Tensor]: + ppm_outs = [] + for ppm in self.blocks: + ppm_out = ppm(x) + upsampled_ppm_out = nn.functional.interpolate( + ppm_out, size=x.size()[2:], mode="bilinear", align_corners=self.align_corners + ) + ppm_outs.append(upsampled_ppm_out) + return ppm_outs + + +class BeitUperHead(nn.Module): + """ + Unified Perceptual Parsing for Scene Understanding. This head is the implementation of + [UPerNet](https://arxiv.org/abs/1807.10221). + + Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation. + """ + + def __init__(self, config: BeitConfig) -> None: + super().__init__() + + self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6) + self.in_channels = [config.hidden_size] * 4 # e.g. [768, 768, 768, 768] + self.channels = config.hidden_size + self.align_corners = False + self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1) + + # PSP Module + self.psp_modules = BeitPyramidPoolingModule( + self.pool_scales, + self.in_channels[-1], + self.channels, + align_corners=self.align_corners, + ) + self.bottleneck = BeitConvModule( + self.in_channels[-1] + len(self.pool_scales) * self.channels, + self.channels, + kernel_size=3, + padding=1, + ) + # FPN Module + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + for in_channels in self.in_channels[:-1]: # skip the top layer + l_conv = BeitConvModule(in_channels, self.channels, kernel_size=1) + fpn_conv = BeitConvModule(self.channels, self.channels, kernel_size=3, padding=1) + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + self.fpn_bottleneck = BeitConvModule( + len(self.in_channels) * self.channels, + self.channels, + kernel_size=3, + padding=1, + ) + + def psp_forward(self, inputs): + x = inputs[-1] + psp_outs = [x] + psp_outs.extend(self.psp_modules(x)) + psp_outs = torch.cat(psp_outs, dim=1) + output = self.bottleneck(psp_outs) + + return output + + def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: + # build laterals + laterals = [ + lateral_conv(encoder_hidden_states[i]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + laterals.append(self.psp_forward(encoder_hidden_states)) + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate( + laterals[i], size=prev_shape, mode="bilinear", align_corners=self.align_corners + ) + + # build outputs + fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)] + # append psp feature + fpn_outs.append(laterals[-1]) + + for i in range(used_backbone_levels - 1, 0, -1): + fpn_outs[i] = nn.functional.interpolate( + fpn_outs[i], + size=fpn_outs[0].shape[2:], + mode="bilinear", + align_corners=self.align_corners, + ) + fpn_outs = torch.cat(fpn_outs, dim=1) + output = self.fpn_bottleneck(fpn_outs) + output = self.classifier(output) + + return output + + +class BeitFCNHead(nn.Module): + """ + Fully Convolution Networks for Semantic Segmentation. This head is implemented of + [FCNNet](https://arxiv.org/abs/1411.4038>). + + Args: + config (BeitConfig): Configuration. + in_channels + kernel_size (int): The kernel size for convs in the head. Default: 3. + dilation (int): The dilation rate for convs in the head. Default: 1. + + + Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation. + """ + + def __init__( + self, + config: BeitConfig, + in_index: int = 2, + kernel_size: int = 3, + dilation: Union[int, Tuple[int, int]] = 1, + ) -> None: + super().__init__() + self.in_channels = config.hidden_size + self.channels = config.auxiliary_channels + self.num_convs = config.auxiliary_num_convs + self.concat_input = config.auxiliary_concat_input + self.in_index = in_index + + conv_padding = (kernel_size // 2) * dilation + convs = [] + convs.append( + BeitConvModule( + self.in_channels, + self.channels, + kernel_size=kernel_size, + padding=conv_padding, + dilation=dilation, + ) + ) + for i in range(self.num_convs - 1): + convs.append( + BeitConvModule( + self.channels, + self.channels, + kernel_size=kernel_size, + padding=conv_padding, + dilation=dilation, + ) + ) + if self.num_convs == 0: + self.convs = nn.Identity() + else: + self.convs = nn.Sequential(*convs) + if self.concat_input: + self.conv_cat = BeitConvModule( + self.in_channels + self.channels, + self.channels, + kernel_size=kernel_size, + padding=kernel_size // 2, + ) + + self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1) + + def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: + # just take the relevant feature maps + hidden_states = encoder_hidden_states[self.in_index] + output = self.convs(hidden_states) + if self.concat_input: + output = self.conv_cat(torch.cat([hidden_states, output], dim=1)) + output = self.classifier(output) + return output + + +@add_start_docstrings( + """ + Beit Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes. + """, + BEIT_START_DOCSTRING, +) +class BeitForSemanticSegmentation(BeitPreTrainedModel): + def __init__(self, config: BeitConfig) -> None: + super().__init__(config) + + self.num_labels = config.num_labels + self.beit = BeitModel(config, add_pooling_layer=False) + + # FPNs + self.fpn1 = nn.Sequential( + nn.ConvTranspose2d( + config.hidden_size, config.hidden_size, kernel_size=2, stride=2 + ), + nn.BatchNorm2d(config.hidden_size), + nn.GELU(), + nn.ConvTranspose2d( + config.hidden_size, config.hidden_size, kernel_size=2, stride=2 + ), + ) + self.fpn2 = nn.Sequential( + nn.ConvTranspose2d( + config.hidden_size, config.hidden_size, kernel_size=2, stride=2 + ), + ) + self.fpn3 = nn.Identity() + self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2) + + # Semantic segmentation head(s) + self.decode_head = BeitUperHead(config) + self.auxiliary_head = BeitFCNHead(config) if config.use_auxiliary_head else None + + # Initialize weights and apply final processing + self.post_init() + + def compute_loss(self, logits, auxiliary_logits, labels): + # upsample logits to the images' original size + upsampled_logits = nn.functional.interpolate( + logits, size=labels.shape[-2:], mode="bilinear", align_corners=False + ) + if auxiliary_logits is not None: + upsampled_auxiliary_logits = nn.functional.interpolate( + auxiliary_logits, size=labels.shape[-2:], mode="bilinear", align_corners=False + ) + # compute weighted loss + loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index) + main_loss = loss_fct(upsampled_logits, labels) + auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels) + loss = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss + + return loss + + @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) + @replace_return_docstrings( + output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC + ) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, SemanticSegmenterOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): + Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). + + Returns: + + Examples: + + ```python + >>> from transformers import AutoFeatureExtractor, BeitForSemanticSegmentation + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/beit-base-finetuned-ade-640-640") + >>> model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640") + + >>> inputs = feature_extractor(images=image, return_tensors="pt") + >>> outputs = model(**inputs) + >>> # logits are of shape (batch_size, num_labels, height, width) + >>> logits = outputs.logits + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + + outputs = self.beit( + pixel_values, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=True, # we need the intermediate hidden states + return_dict=return_dict, + ) + + encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] + + # only keep certain features, and reshape + # note that we do +1 as the encoder_hidden_states also includes the initial embeddings + features = [ + feature + for idx, feature in enumerate(encoder_hidden_states) + if idx + 1 in self.config.out_indices + ] + batch_size = pixel_values.shape[0] + patch_resolution = self.config.image_size // self.config.patch_size + features = [ + x[:, 1:, :] + .permute(0, 2, 1) + .reshape(batch_size, -1, patch_resolution, patch_resolution) + for x in features + ] + + # apply FPNs + ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4] + for i in range(len(features)): + features[i] = ops[i](features[i]) + + logits = self.decode_head(features) + + auxiliary_logits = None + if self.auxiliary_head is not None: + auxiliary_logits = self.auxiliary_head(features) + + loss = None + if labels is not None: + if self.config.num_labels == 1: + raise ValueError("The number of labels should be greater than one") + else: + loss = self.compute_loss(logits, auxiliary_logits, labels) + + if not return_dict: + if output_hidden_states: + output = (logits,) + outputs[1:] + else: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SemanticSegmenterOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states if output_hidden_states else None, + attentions=outputs.attentions, + ) diff --git a/models/backbones/bert/__init__.py b/models/backbones/bert/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/backbones/bert/builder.py b/models/backbones/bert/builder.py new file mode 100644 index 0000000..18271b0 --- /dev/null +++ b/models/backbones/bert/builder.py @@ -0,0 +1,71 @@ +from .xbert import BertConfig, BertForMaskedLM, BertLMHeadModel, BertModel + + +def build_bert(model_config, pretrain, checkpoint, expert_type, modality_type='text'): + """build text encoder. + + Args: + model_config (dict): model config. + pretrain (bool): Whether to do pretrain or finetuning. + checkpoint (bool): whether to do gradient_checkpointing. + + Returns: TODO + + """ + bert_size = model_config['expert_size'] + bert_config = BertConfig.from_json_file(model_config[f'bert_config_{bert_size}']) + # bert_config.encoder_width = model_config.vision_encoder.d_model + bert_config.gradient_checkpointing = checkpoint + bert_config.num_hidden_layers = model_config['num_layers_{}_expert'.format(expert_type)] + if expert_type=='modality': + if modality_type == 'vis': + bert_config.cross_attention_freq = 2 + else: + bert_config.cross_attention_freq = -1 + else: + bert_config.cross_attention_freq = 1 + + if pretrain: + text_encoder, loading_info = BertForMaskedLM.from_pretrained( + f'bert-{bert_size}-uncased', + config=bert_config, + output_loading_info=True, + ) + else: + text_encoder, loading_info = BertModel.from_pretrained( + f'bert-{bert_size}-uncased', + config=bert_config, + add_pooling_layer=True, + output_loading_info=True, + ) + + return text_encoder + + +def build_bert_decoder(model_config, checkpoint): + """build text decoder the same as the multimodal encoder. + + Args: + model_config (dict): model config. + pretrain (bool): Whether to do pretrain or finetuning. + checkpoint (bool): whether to do gradient_checkpointing. + + Returns: TODO + + """ + bert_config = BertConfig.from_json_file(model_config.text_encoder.config) + bert_config.encoder_width = model_config.vision_encoder.d_model + bert_config.gradient_checkpointing = checkpoint + + bert_config.fusion_layer = 0 + bert_config.num_hidden_layers = ( + bert_config.num_hidden_layers - model_config.text_encoder.fusion_layer + ) + + text_decoder, loading_info = BertLMHeadModel.from_pretrained( + model_config.text_encoder.pretrained, + config=bert_config, + output_loading_info=True, + ) + + return text_decoder diff --git a/models/backbones/bert/tokenization_bert.py b/models/backbones/bert/tokenization_bert.py new file mode 100644 index 0000000..66e8d8e --- /dev/null +++ b/models/backbones/bert/tokenization_bert.py @@ -0,0 +1,546 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for Bert.""" + + +import collections +import os +import unicodedata +from typing import List, Optional, Tuple + +from transformers.tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace +from transformers.utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": { + "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt", + "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt", + "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt", + "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt", + "bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt", + "bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt", + "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt", + "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt", + "bert-large-uncased-whole-word-masking": "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt", + "bert-large-cased-whole-word-masking": "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt", + "bert-large-uncased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt", + "bert-large-cased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt", + "bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt", + "bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt", + "bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt", + "TurkuNLP/bert-base-finnish-cased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt", + "TurkuNLP/bert-base-finnish-uncased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt", + "wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt", + } +} + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "bert-base-uncased": 512, + "bert-large-uncased": 512, + "bert-base-cased": 512, + "bert-large-cased": 512, + "bert-base-multilingual-uncased": 512, + "bert-base-multilingual-cased": 512, + "bert-base-chinese": 512, + "bert-base-german-cased": 512, + "bert-large-uncased-whole-word-masking": 512, + "bert-large-cased-whole-word-masking": 512, + "bert-large-uncased-whole-word-masking-finetuned-squad": 512, + "bert-large-cased-whole-word-masking-finetuned-squad": 512, + "bert-base-cased-finetuned-mrpc": 512, + "bert-base-german-dbmdz-cased": 512, + "bert-base-german-dbmdz-uncased": 512, + "TurkuNLP/bert-base-finnish-cased-v1": 512, + "TurkuNLP/bert-base-finnish-uncased-v1": 512, + "wietsedv/bert-base-dutch-cased": 512, +} + +PRETRAINED_INIT_CONFIGURATION = { + "bert-base-uncased": {"do_lower_case": True}, + "bert-large-uncased": {"do_lower_case": True}, + "bert-base-cased": {"do_lower_case": False}, + "bert-large-cased": {"do_lower_case": False}, + "bert-base-multilingual-uncased": {"do_lower_case": True}, + "bert-base-multilingual-cased": {"do_lower_case": False}, + "bert-base-chinese": {"do_lower_case": False}, + "bert-base-german-cased": {"do_lower_case": False}, + "bert-large-uncased-whole-word-masking": {"do_lower_case": True}, + "bert-large-cased-whole-word-masking": {"do_lower_case": False}, + "bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True}, + "bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False}, + "bert-base-cased-finetuned-mrpc": {"do_lower_case": False}, + "bert-base-german-dbmdz-cased": {"do_lower_case": False}, + "bert-base-german-dbmdz-uncased": {"do_lower_case": True}, + "TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False}, + "TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True}, + "wietsedv/bert-base-dutch-cased": {"do_lower_case": False}, +} + + +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + with open(vocab_file, "r", encoding="utf-8") as reader: + tokens = reader.readlines() + for index, token in enumerate(tokens): + token = token.rstrip("\n") + vocab[token] = index + return vocab + + +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +class BertTokenizer(PreTrainedTokenizer): + r""" + Construct a BERT tokenizer. Based on WordPiece. + This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. + Users should refer to this superclass for more information regarding those methods. + Args: + vocab_file (:obj:`str`): + File containing the vocabulary. + do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether or not to lowercase the input when tokenizing. + do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether or not to do basic tokenization before WordPiece. + never_split (:obj:`Iterable`, `optional`): + Collection of tokens which will never be split during tokenization. Only has an effect when + :obj:`do_basic_tokenize=True` + unk_token (:obj:`str`, `optional`, defaults to :obj:`"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (:obj:`str`, `optional`, defaults to :obj:`"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (:obj:`str`, `optional`, defaults to :obj:`"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether or not to tokenize Chinese characters. + This should likely be deactivated for Japanese (see this `issue + `__). + strip_accents: (:obj:`bool`, `optional`): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for :obj:`lowercase` (as in the original BERT). + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + + def __init__( + self, + vocab_file, + do_lower_case=True, + do_basic_tokenize=True, + never_split=None, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs + ): + super().__init__( + do_lower_case=do_lower_case, + do_basic_tokenize=do_basic_tokenize, + never_split=never_split, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + if not os.path.isfile(vocab_file): + raise ValueError( + "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " + "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format( + vocab_file) + ) + self.vocab = load_vocab(vocab_file) + self.ids_to_tokens = collections.OrderedDict( + [(ids, tok) for tok, ids in self.vocab.items()]) + self.do_basic_tokenize = do_basic_tokenize + if do_basic_tokenize: + self.basic_tokenizer = BasicTokenizer( + do_lower_case=do_lower_case, + never_split=never_split, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + ) + self.wordpiece_tokenizer = WordpieceTokenizer( + vocab=self.vocab, unk_token=self.unk_token) + + @property + def do_lower_case(self): + return self.basic_tokenizer.do_lower_case + + @property + def vocab_size(self): + return len(self.vocab) + + def get_vocab(self): + return dict(self.vocab, **self.added_tokens_encoder) + + def _tokenize(self, text): + split_tokens = [] + if self.do_basic_tokenize: + for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): + + # If the token is part of the never_split set + if token in self.basic_tokenizer.never_split: + split_tokens.append(token) + else: + split_tokens += self.wordpiece_tokenizer.tokenize(token) + else: + split_tokens = self.wordpiece_tokenizer.tokenize(text) + return split_tokens + + def _convert_token_to_id(self, token): + """ Converts a token (str) in an id using the vocab. """ + return self.vocab.get(token, self.vocab.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.ids_to_tokens.get(index, self.unk_token) + + def convert_tokens_to_string(self, tokens): + """ Converts a sequence of tokens (string) in a single string. """ + out_string = " ".join(tokens).replace(" ##", "").strip() + return out_string + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A BERT sequence has the following format: + - single sequence: ``[CLS] X `` + - pair of sequences: ``[CLS] A [SEP] B [SEP]`` + Args: + token_ids_0 (:obj:`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (:obj:`List[int]`, `optional`): + Optional second list of IDs for sequence pairs. + Returns: + :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + token_ids_1 + sep + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer ``prepare_for_model`` method. + Args: + token_ids_0 (:obj:`List[int]`): + List of IDs. + token_ids_1 (:obj:`List[int]`, `optional`): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): + Whether or not the token list is already formatted with special tokens for the model. + Returns: + :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + if token_ids_1 is not None: + raise ValueError( + "You should not supply a second sequence if the provided sequence of " + "ids is already formatted with special tokens for the model." + ) + return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) + + if token_ids_1 is not None: + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence + pair mask has the following format: + :: + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s). + Args: + token_ids_0 (:obj:`List[int]`): + List of IDs. + token_ids_1 (:obj:`List[int]`, `optional`): + Optional second list of IDs for sequence pairs. + Returns: + :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given + sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + index = 0 + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + + VOCAB_FILES_NAMES["vocab_file"] + ) + else: + vocab_file = (filename_prefix + + "-" if filename_prefix else "") + save_directory + with open(vocab_file, "w", encoding="utf-8") as writer: + for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + "Saving vocabulary to {}: vocabulary indices are not consecutive." + " Please check that the vocabulary is not corrupted!".format( + vocab_file) + ) + index = token_index + writer.write(token + "\n") + index += 1 + return (vocab_file,) + + +class BasicTokenizer(object): + """ + Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + Args: + do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether or not to lowercase the input when tokenizing. + never_split (:obj:`Iterable`, `optional`): + Collection of tokens which will never be split during tokenization. Only has an effect when + :obj:`do_basic_tokenize=True` + tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether or not to tokenize Chinese characters. + This should likely be deactivated for Japanese (see this `issue + `__). + strip_accents: (:obj:`bool`, `optional`): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for :obj:`lowercase` (as in the original BERT). + """ + + def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see + WordPieceTokenizer. + Args: + **never_split**: (`optional`) list of str + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + :func:`PreTrainedTokenizer.tokenize`) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union( + set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if never_split is not None and text in never_split: + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +class WordpieceTokenizer(object): + """Runs WordPiece tokenization.""" + + def __init__(self, vocab, unk_token, max_input_chars_per_word=100): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """ + Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform + tokenization using the given vocabulary. + For example, :obj:`input = "unaffable"` wil return as output :obj:`["un", "##aff", "##able"]`. + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through `BasicTokenizer`. + Returns: + A list of wordpiece tokens. + """ + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens diff --git a/models/backbones/bert/xbert.py b/models/backbones/bert/xbert.py new file mode 100644 index 0000000..494eff2 --- /dev/null +++ b/models/backbones/bert/xbert.py @@ -0,0 +1,2160 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch BERT model. """ + +import math +import os +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +import transformers +from torch import Tensor, device, dtype, nn +from torch.nn import CrossEntropyLoss, MSELoss +from transformers.activations import ACT2FN +# from transformers.models.bert.configuration_bert import BertConfig +from transformers.configuration_utils import PretrainedConfig +from transformers.file_utils import (ModelOutput, add_start_docstrings, + add_start_docstrings_to_model_forward, + replace_return_docstrings) +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, MaskedLMOutput, + MultipleChoiceModelOutput, NextSentencePredictorOutput, + QuestionAnsweringModelOutput, SequenceClassifierOutput, + TokenClassifierOutput) +from transformers.modeling_utils import (PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer) +from transformers.utils import logging + +transformers.logging.set_verbosity_error() + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "BertConfig" +_TOKENIZER_FOR_DOC = "BertTokenizer" + +BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "bert-base-uncased", + "bert-large-uncased", + "bert-base-cased", + "bert-large-cased", + "bert-base-multilingual-uncased", + "bert-base-multilingual-cased", + "bert-base-chinese", + "bert-base-german-cased", + "bert-large-uncased-whole-word-masking", + "bert-large-cased-whole-word-masking", + "bert-large-uncased-whole-word-masking-finetuned-squad", + "bert-large-cased-whole-word-masking-finetuned-squad", + "bert-base-cased-finetuned-mrpc", + "bert-base-german-dbmdz-cased", + "bert-base-german-dbmdz-uncased", + "cl-tohoku/bert-base-japanese", + "cl-tohoku/bert-base-japanese-whole-word-masking", + "cl-tohoku/bert-base-japanese-char", + "cl-tohoku/bert-base-japanese-char-whole-word-masking", + "TurkuNLP/bert-base-finnish-cased-v1", + "TurkuNLP/bert-base-finnish-uncased-v1", + "wietsedv/bert-base-dutch-cased", + # See all BERT models at https://huggingface.co/models?filter=bert +] + + +class BertConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`BertModel`] or a [`TFBertModel`]. It is used to + instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the BERT + [bert-base-uncased](https://huggingface.co/bert-base-uncased) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`BertModel`] or [`TFBertModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For + positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + classifier_dropout (`float`, *optional*): + The dropout ratio for the classification head. + + Examples: + + ```python + >>> from transformers import BertModel, BertConfig + + >>> # Initializing a BERT bert-base-uncased style configuration + >>> configuration = BertConfig() + + >>> # Initializing a model from the bert-base-uncased style configuration + >>> model = BertModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "bert" + + def __init__( + self, + vocab_size=30522, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + layer_norm_eps=1e-12, + pad_token_id=0, + position_embedding_type="absolute", + use_cache=True, + classifier_dropout=None, + cross_module="ca", + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.classifier_dropout = classifier_dropout + self.cross_module = cross_module + + +def load_tf_weights_in_bert(model, config, tf_checkpoint_path): + """Load tf checkpoints in a pytorch model.""" + try: + import re + + import numpy as np + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info("Loading TF weight {} with shape {}".format(name, shape)) + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split("/") + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any( + n + in [ + "adam_v", + "adam_m", + "AdamWeightDecayOptimizer", + "AdamWeightDecayOptimizer_1", + "global_step", + ] + for n in name + ): + logger.info("Skipping {}".format("/".join(name))) + continue + pointer = model + for m_name in name: + if re.fullmatch(r"[A-Za-z]+_\d+", m_name): + scope_names = re.split(r"_(\d+)", m_name) + else: + scope_names = [m_name] + if scope_names[0] == "kernel" or scope_names[0] == "gamma": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "output_bias" or scope_names[0] == "beta": + pointer = getattr(pointer, "bias") + elif scope_names[0] == "output_weights": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "squad": + pointer = getattr(pointer, "classifier") + else: + try: + pointer = getattr(pointer, scope_names[0]) + except AttributeError: + logger.info("Skipping {}".format("/".join(name))) + continue + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + if m_name[-11:] == "_embeddings": + pointer = getattr(pointer, "weight") + elif m_name == "kernel": + array = np.transpose(array) + try: + assert ( + pointer.shape == array.shape + ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info("Initialize PyTorch weight {}".format(name)) + pointer.data = torch.from_numpy(array) + return model + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding( + config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id + ) + self.position_embeddings = nn.Embedding( + config.max_position_embeddings, config.hidden_size + ) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + ) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + + self.config = config + + def forward( + self, + input_ids=None, + token_type_ids=None, + position_ids=None, + inputs_embeds=None, + past_key_values_length=0, + ): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[ + :, past_key_values_length : seq_length + past_key_values_length + ] + + if token_type_ids is None: + token_type_ids = torch.zeros( + input_shape, dtype=torch.long, device=self.position_ids.device + ) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config, is_cross_attention): + super().__init__() + self.config = config + if config.hidden_size % config.num_attention_heads != 0 and not hasattr( + config, "embedding_size" + ): + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads) + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + if is_cross_attention: + self.key = nn.Linear(config.encoder_width, self.all_head_size) + self.value = nn.Linear(config.encoder_width, self.all_head_size) + else: + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + if ( + self.position_embedding_type == "relative_key" + or self.position_embedding_type == "relative_key_query" + ): + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding( + 2 * config.max_position_embeddings - 1, self.attention_head_size + ) + self.save_attention = False + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if ( + self.position_embedding_type == "relative_key" + or self.position_embedding_type == "relative_key_query" + ): + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange( + seq_length, dtype=torch.long, device=hidden_states.device + ).view(-1, 1) + position_ids_r = torch.arange( + seq_length, dtype=torch.long, device=hidden_states.device + ).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding( + distance + self.max_position_embeddings - 1 + ) + positional_embedding = positional_embedding.to( + dtype=query_layer.dtype + ) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum( + "bhld,lrd->bhlr", query_layer, positional_embedding + ) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum( + "bhld,lrd->bhlr", query_layer, positional_embedding + ) + relative_position_scores_key = torch.einsum( + "bhrd,lrd->bhlr", key_layer, positional_embedding + ) + attention_scores = ( + attention_scores + + relative_position_scores_query + + relative_position_scores_key + ) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + if is_cross_attention and self.save_attention: + self.save_attention_map(attention_probs) + attention_probs.register_hook(self.save_attn_gradients) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs_dropped = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs_dropped = attention_probs_dropped * head_mask + + context_layer = torch.matmul(attention_probs_dropped, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + # added `attention_scores` to return tuple + outputs = ( + (context_layer, attention_probs, attention_scores) + if output_attentions + else (context_layer,) + ) + + outputs = outputs + (past_key_value,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config, is_cross_attention=False): + super().__init__() + + self.self = BertSelfAttention(config, is_cross_attention) + + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, + self.self.num_attention_heads, + self.self.attention_head_size, + self.pruned_heads, + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + # add attentions if we output them + outputs = (attention_output,) + self_outputs[1:] + return outputs # (context_layer, attention_probs, attention_scores, past_key_value,) + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + + self.has_cross_attention = config.cross_attention_freq > 0 and layer_num % config.cross_attention_freq == 0 + + if self.has_cross_attention: + self.crossattention = BertAttention(config, is_cross_attention=True) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) # (context_layer, attention_probs, attention_scores, past_key_value,) + attention_output = self_attention_outputs[0] + + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + + if self.has_cross_attention: + assert ( + encoder_hidden_states is not None + ), "encoder_hidden_states must be given for cross-attention layers" + + if type(encoder_hidden_states) == list: + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states[ + (self.layer_num - self.config.fusion_layer) + % len(encoder_hidden_states) + ], + encoder_attention_mask[ + (self.layer_num - self.config.fusion_layer) + % len(encoder_hidden_states) + ], + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] + + else: + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) # (context_layer, attention_probs, attention_scores, past_key_value,) + attention_output = cross_attention_outputs[0] + # add cross attentions if we output attention weights + outputs = outputs + cross_attention_outputs[1:-1] + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output, + ) + outputs = (layer_output,) + outputs + + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList( + [BertLayer(config, i) for i in range(config.num_hidden_layers)] + ) + logger.info(f"build bert with cross_module: {config.cross_module}") + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + # mode="multi_modal", + normalize_attention=True, + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + # all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + all_cross_attentions = () if output_attentions else None + + next_decoder_cache = () if use_cache else None + + # if ( + # mode == "text" or mode == "temporal" + # ): # temporal is added and used for temporal att module. + # start_layer = 0 + # output_layer = self.config.fusion_layer + + # elif mode == "fusion": + # start_layer = self.config.fusion_layer + # output_layer = self.config.num_hidden_layers + + # elif mode == "multi_modal": + # start_layer = 0 + # output_layer = self.config.num_hidden_layers + + for i in range(len(self.layer)): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if getattr(self.config, "gradient_checkpointing", False) and self.training: + + if use_cache: + logger.warn( + "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " + "`use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + use_reentrant=False, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) # (context_layer, attention_probs, attention_scores, past_key_value,) + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + # whether to output normalized attention, + # note for unnormalized attention, there is a mask added + offset = int(normalize_attention) + # all_self_attentions = all_self_attentions + (layer_outputs[1], ) + all_self_attentions = all_self_attentions + (layer_outputs[2 - offset],) + if hasattr(layer_module, "crossattention"): + # all_cross_attentions = all_cross_attentions + (layer_outputs[3], ) + all_cross_attentions = all_cross_attentions + (layer_outputs[4 - offset],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertOnlyNSPHead(nn.Module): + def __init__(self, config): + super().__init__() + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, pooled_output): + seq_relationship_score = self.seq_relationship(pooled_output) + return seq_relationship_score + + +class BertPreTrainingHeads(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, sequence_output, pooled_output): + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + return prediction_scores, seq_relationship_score + + +class BertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = BertConfig + load_tf_weights = load_tf_weights_in_bert + base_model_prefix = "bert" + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +@dataclass +class BertForPreTrainingOutput(ModelOutput): + """ + Output type of :class:`~transformers.BertForPreTraining`. + Args: + loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`): + Total loss as the sum of the masked language modeling loss and the next sequence prediction + (classification) loss. + prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`): + Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation + before SoftMax). + hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): + Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) + of shape :obj:`(batch_size, sequence_length, hidden_size)`. + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): + Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, + sequence_length, sequence_length)`. + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + prediction_logits: torch.FloatTensor = None + seq_relationship_logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +BERT_START_DOCSTRING = r""" + This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic + methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, + pruning heads etc.) + This model is also a PyTorch `torch.nn.Module `__ + subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to + general usage and behavior. + Parameters: + config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model + weights. +""" + +BERT_INPUTS_DOCSTRING = r""" + Args: + input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): + Indices of input sequence tokens in the vocabulary. + Indices can be obtained using :class:`~transformers.BertTokenizer`. See + :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for + details. + `What are input IDs? <../glossary.html#input-ids>`__ + attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): + Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + `What are attention masks? <../glossary.html#attention-mask>`__ + token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, + 1]``: + - 0 corresponds to a `sentence A` token, + - 1 corresponds to a `sentence B` token. + `What are token type IDs? <../glossary.html#token-type-ids>`_ + position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, + config.max_position_embeddings - 1]``. + `What are position IDs? <../glossary.html#position-ids>`_ + head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): + Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): + Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert :obj:`input_ids` indices into associated + vectors than the model's internal embedding lookup matrix. + output_attentions (:obj:`bool`, `optional`): + Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned + tensors for more detail. + output_hidden_states (:obj:`bool`, `optional`): + Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for + more detail. + return_dict (:obj:`bool`, `optional`): + Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", + BERT_START_DOCSTRING, +) +class BertModel(BertPreTrainedModel): + """ + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in `Attention is + all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an + input to the forward pass. + """ + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def get_extended_attention_mask( + self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool + ) -> Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + seq_ids = torch.arange(seq_length, device=device) + causal_mask = ( + seq_ids[None, None, :].repeat(batch_size, seq_length, 1) + <= seq_ids[None, :, None] + ) + # in case past_key_values are used we need to add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] + causal_mask = torch.cat( + [ + torch.ones( + (batch_size, seq_length, prefix_seq_len), + device=device, + dtype=causal_mask.dtype, + ), + causal_mask, + ], + axis=-1, + ) + + extended_attention_mask = ( + causal_mask[:, None, :, :] * attention_mask[:, None, None, :] + ) + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( + input_shape, attention_mask.shape + ) + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to( + dtype=self.dtype + ) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + # mode="multi_modal", + normalize_attention=True, + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time" + ) + elif input_ids is not None: + input_shape = input_ids.size() + batch_size, seq_length = input_shape + device = input_ids.device + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = inputs_embeds.device + elif encoder_embeds is not None: + input_shape = encoder_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = encoder_embeds.device + else: + raise ValueError( + "You have to specify either input_ids or inputs_embeds or encoder_embeds" + ) + + # past_key_values_length + past_key_values_length = ( + past_key_values[0][0].shape[2] if past_key_values is not None else 0 + ) + + if attention_mask is None: + attention_mask = torch.ones( + ((batch_size, seq_length + past_key_values_length)), device=device + ) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( + attention_mask, input_shape, device, is_decoder + ) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[ + 0 + ].size() + else: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [ + self.invert_attention_mask(mask) for mask in encoder_attention_mask + ] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask + ) + else: + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask + ) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + if encoder_embeds is None: + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + else: + embedding_output = encoder_embeds + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + # mode=mode, + normalize_attention=normalize_attention, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + """ + Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next + sentence prediction (classification)` head. + """, + BERT_START_DOCSTRING, +) +class BertForPreTraining(BertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config) + self.cls = BertPreTrainingHeads(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @replace_return_docstrings( + output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC + ) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + next_sentence_label=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`): + Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., + config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored + (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` + next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`): + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair + (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``: + - 0 indicates sequence B is a continuation of sequence A, + - 1 indicates sequence B is a random sequence. + kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): + Used to hide legacy arguments that have been deprecated. + Returns: + Example:: + >>> from transformers import BertTokenizer, BertForPreTraining + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + >>> model = BertForPreTraining.from_pretrained('bert-base-uncased') + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.prediction_logits + >>> seq_relationship_logits = outputs.seq_relationship_logits + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output, pooled_output = outputs[:2] + prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) + + total_loss = None + if labels is not None and next_sentence_label is not None: + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct( + prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) + ) + next_sentence_loss = loss_fct( + seq_relationship_score.view(-1, 2), next_sentence_label.view(-1) + ) + total_loss = masked_lm_loss + next_sentence_loss + + if not return_dict: + output = (prediction_scores, seq_relationship_score) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return BertForPreTrainingOutput( + loss=total_loss, + prediction_logits=prediction_scores, + seq_relationship_logits=seq_relationship_score, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """Bert Model with a `language modeling` head on top for CLM fine-tuning. """, + BERT_START_DOCSTRING, +) +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @replace_return_docstrings( + output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC + ) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=True, + reduction="mean", + mode="multi_modal", + normalize_attention=True, + soft_labels=None, + alpha=0, + return_logits=False, + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are + ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') + >>> config = BertConfig.from_pretrained("bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + mode=mode, + normalize_attention=normalize_attention, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss(reduction=reduction) + lm_loss = loss_fct( + shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) + ) + lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) + + if soft_labels is not None: + loss_distill = -torch.sum( + F.log_softmax(shifted_prediction_scores, dim=1) * soft_labels, dim=-1 + ) + loss_distill = (loss_distill * (labels != -100)).sum(1) + lm_loss = (1 - alpha) * lm_loss + alpha * loss_distill + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past=None, attention_mask=None, **model_kwargs + ): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "past_key_values": past, + "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), + "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), + "is_decoder": True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx) for past_state in layer_past), + ) + return reordered_past + + +@dataclass +class MaskedLMOutputWithDistill(MaskedLMOutput): + loss_aux: Optional[torch.FloatTensor] = None + loss_distill: Optional[torch.FloatTensor] = None + last_hidden_state: Optional[torch.FloatTensor] = None + + +@add_start_docstrings( + """Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING +) +class BertForMaskedLM(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def tie_aux_decoder_weights(self, module, aux_modules): + """Tie decoder weights of all `aux_modules` to `module`, (not bias)""" + for m in aux_modules: + m.predictions.decoder.weight = module.predictions.decoder.weight + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + # mode="multi_modal", + normalize_attention=True, + soft_labels=None, + alpha=0, + return_logits=False, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., + config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored + (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` + """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_embeds=encoder_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + # mode=mode, + normalize_attention=normalize_attention, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores + + masked_lm_loss = None + masked_lm_loss_aux = 0.0 + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct( + prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) + ) + + if soft_labels is not None: + loss_distill = -torch.sum( + F.log_softmax(prediction_scores, dim=1) * soft_labels, dim=-1 + ) + loss_distill = loss_distill[labels != -100].mean() + masked_lm_loss = (1 - alpha) * masked_lm_loss + alpha * loss_distill + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + # changed from MaskedLMOutput to MaskedLMOutputWithDistill + return MaskedLMOutputWithDistill( + loss=masked_lm_loss, + loss_aux=masked_lm_loss_aux, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + last_hidden_state=outputs.last_hidden_state + ) + + def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + effective_batch_size = input_shape[0] + + # add a dummy token + assert ( + self.config.pad_token_id is not None + ), "The PAD token should be defined for generation" + attention_mask = torch.cat( + [attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1 + ) + dummy_token = torch.full( + (effective_batch_size, 1), + self.config.pad_token_id, + dtype=torch.long, + device=input_ids.device, + ) + input_ids = torch.cat([input_ids, dummy_token], dim=1) + + return {"input_ids": input_ids, "attention_mask": attention_mask} + + +@add_start_docstrings( + """Bert Model with a `next sentence prediction (classification)` head on top. """, + BERT_START_DOCSTRING, +) +class BertForNextSentencePrediction(BertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config) + self.cls = BertOnlyNSPHead(config) + + self.init_weights() + + @add_start_docstrings_to_model_forward( + BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) + @replace_return_docstrings( + output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC + ) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + **kwargs, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair + (see ``input_ids`` docstring). Indices should be in ``[0, 1]``: + - 0 indicates sequence B is a continuation of sequence A, + - 1 indicates sequence B is a random sequence. + Returns: + Example:: + >>> from transformers import BertTokenizer, BertForNextSentencePrediction + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + >>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased') + >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." + >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." + >>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt') + >>> outputs = model(**encoding, labels=torch.LongTensor([1])) + >>> logits = outputs.logits + >>> assert logits[0, 0] < logits[0, 1] # next sentence was random + """ + + if "next_sentence_label" in kwargs: + warnings.warn( + "The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.", + FutureWarning, + ) + labels = kwargs.pop("next_sentence_label") + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + seq_relationship_scores = self.cls(pooled_output) + + next_sentence_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) + + if not return_dict: + output = (seq_relationship_scores,) + outputs[2:] + return ( + ((next_sentence_loss,) + output) if next_sentence_loss is not None else output + ) + + return NextSentencePredictorOutput( + loss=next_sentence_loss, + logits=seq_relationship_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled + output) e.g. for GLUE tasks. + """, + BERT_START_DOCSTRING, +) +class BertForSequenceClassification(BertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): + Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., + config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), + If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + loss = None + if labels is not None: + if self.num_labels == 1: + # We are doing regression + loss_fct = MSELoss() + loss = loss_fct(logits.view(-1), labels.view(-1)) + else: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a + softmax) e.g. for RocStories/SWAG tasks. + """, + BERT_START_DOCSTRING, +) +class BertForMultipleChoice(BertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, 1) + + self.init_weights() + + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): + Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., + num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See + :obj:`input_ids` above) + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] + + input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None + attention_mask = ( + attention_mask.view(-1, attention_mask.size(-1)) + if attention_mask is not None + else None + ) + token_type_ids = ( + token_type_ids.view(-1, token_type_ids.size(-1)) + if token_type_ids is not None + else None + ) + position_ids = ( + position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None + ) + inputs_embeds = ( + inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) + if inputs_embeds is not None + else None + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, num_choices) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + + if not return_dict: + output = (reshaped_logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return MultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + BERT_START_DOCSTRING, +) +class BertForTokenClassification(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.bert = BertModel(config, add_pooling_layer=False) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - + 1]``. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + # Only keep active parts of the loss + if attention_mask is not None: + active_loss = attention_mask.view(-1) == 1 + active_logits = logits.view(-1, self.num_labels) + active_labels = torch.where( + active_loss, + labels.view(-1), + torch.tensor(loss_fct.ignore_index).type_as(labels), + ) + loss = loss_fct(active_logits, active_labels) + else: + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + BERT_START_DOCSTRING, +) +class BertForQuestionAnswering(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.bert = BertModel(config, add_pooling_layer=False) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + start_positions=None, + end_positions=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the + sequence are not taken into account for computing the loss. + end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the + sequence are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1) + end_logits = end_logits.squeeze(-1) + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions.clamp_(0, ignored_index) + end_positions.clamp_(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/models/backbones/blip2.py b/models/backbones/blip2.py new file mode 100755 index 0000000..33707b6 --- /dev/null +++ b/models/backbones/blip2.py @@ -0,0 +1,268 @@ +""" + Copyright (c) 2023, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" +import contextlib +import logging +import os +import time +import datetime + +import torch +import torch.nn as nn +import torch.distributed as dist +import torch.nn.functional as F + +# import .backbones.common.dist_utils as dist_utils +# from minigpt4.common.dist_utils import download_cached_file +# from minigpt4.common.utils import is_url +# from minigpt4.common.logger import MetricLogger + +from models.backbones.base_model import BaseModel +from models.backbones.Qformer import BertConfig, BertLMHeadModel +from models.backbones.eva_vit import create_eva_vit_g +from transformers import BertTokenizer + + +class Blip2Base(BaseModel): + @classmethod + def init_tokenizer(cls): + tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") + tokenizer.add_special_tokens({"bos_token": "[DEC]"}) + return tokenizer + + def maybe_autocast(self, dtype=torch.float16): + # if on cpu, don't use autocast + # if on gpu, use autocast with dtype if provided, otherwise use torch.float16 + enable_autocast = self.device != torch.device("cpu") + + if enable_autocast: + return torch.cuda.amp.autocast(dtype=dtype) + else: + return contextlib.nullcontext() + + @classmethod + def init_Qformer(cls, num_query_token, vision_width, cross_attention_freq=2): + encoder_config = BertConfig.from_pretrained("bert-base-uncased") + encoder_config.encoder_width = vision_width + # insert cross-attention layer every other block + encoder_config.add_cross_attention = True + encoder_config.cross_attention_freq = cross_attention_freq + encoder_config.query_length = num_query_token + Qformer = BertLMHeadModel(config=encoder_config) + query_tokens = nn.Parameter( + torch.zeros(1, num_query_token, encoder_config.hidden_size) + ) + query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range) + return Qformer, query_tokens + + @classmethod + def init_vision_encoder( + cls, model_name, img_size, drop_path_rate, use_grad_checkpoint, precision + ): + assert model_name == "eva_clip_g", "vit model must be eva_clip_g for current version of MiniGPT-4" + visual_encoder = create_eva_vit_g( + img_size, drop_path_rate, use_grad_checkpoint, precision + ) + + ln_vision = LayerNorm(visual_encoder.num_features) + return visual_encoder, ln_vision + + def load_from_pretrained(self, url_or_filename): + if is_url(url_or_filename): + cached_file = download_cached_file( + url_or_filename, check_hash=False, progress=True + ) + checkpoint = torch.load(cached_file, map_location="cpu") + elif os.path.isfile(url_or_filename): + checkpoint = torch.load(url_or_filename, map_location="cpu") + else: + raise RuntimeError("checkpoint url or path is invalid") + + state_dict = checkpoint["model"] + + msg = self.load_state_dict(state_dict, strict=False) + + # logging.info("Missing keys {}".format(msg.missing_keys)) + logging.info("load checkpoint from %s" % url_or_filename) + + return msg + + def get_optimizer_params(self, weight_decay, lr_scale=1): + + vit_num_layers = self.visual_encoder.get_num_layer() + lr_scales = list(lr_scale ** (vit_num_layers + 1 - i) for i in range(vit_num_layers + 2)) + + parameter_group_names = {} + parameter_group_vars = {} + + for name, param in self.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith(".bias"): + group_name = "no_decay" + this_weight_decay = 0. + else: + group_name = "decay" + this_weight_decay = weight_decay + if 'visual_encoder' in name: + layer_id = self.visual_encoder.get_num_layer(name.replace('visual_encoder.','')) + group_name = "vit_layer_%d_%s" % (layer_id, group_name) + else: + layer_id = None + + if group_name not in parameter_group_names: + if layer_id is not None: + scale = lr_scales[layer_id] + else: + scale = 1 + parameter_group_names[group_name] = { + "weight_decay": this_weight_decay, + "params": [], + "lr_scale": scale + } + parameter_group_vars[group_name] = { + "weight_decay": this_weight_decay, + "params": [], + "lr_scale": scale + } + parameter_group_vars[group_name]["params"].append(param) + parameter_group_names[group_name]["params"].append(name) + # import json + # print("Param groups = %s" % json.dumps(parameter_group_names, indent=2)) + optim_params = list(parameter_group_vars.values()) + return optim_params + + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +def compute_sim_matrix(model, data_loader, **kwargs): + k_test = kwargs.pop("k_test") + + metric_logger = MetricLogger(delimiter=" ") + header = "Evaluation:" + + logging.info("Computing features for evaluation...") + start_time = time.time() + + texts = data_loader.dataset.text + num_text = len(texts) + text_bs = 256 + text_ids = [] + text_embeds = [] + text_atts = [] + for i in range(0, num_text, text_bs): + text = texts[i : min(num_text, i + text_bs)] + text_input = model.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=35, + return_tensors="pt", + ).to(model.device) + text_feat = model.forward_text(text_input) + text_embed = F.normalize(model.text_proj(text_feat)) + text_embeds.append(text_embed) + text_ids.append(text_input.input_ids) + text_atts.append(text_input.attention_mask) + + text_embeds = torch.cat(text_embeds, dim=0) + text_ids = torch.cat(text_ids, dim=0) + text_atts = torch.cat(text_atts, dim=0) + + vit_feats = [] + image_embeds = [] + for samples in data_loader: + image = samples["image"] + + image = image.to(model.device) + image_feat, vit_feat = model.forward_image(image) + image_embed = model.vision_proj(image_feat) + image_embed = F.normalize(image_embed, dim=-1) + + vit_feats.append(vit_feat.cpu()) + image_embeds.append(image_embed) + + vit_feats = torch.cat(vit_feats, dim=0) + image_embeds = torch.cat(image_embeds, dim=0) + + sims_matrix = [] + for image_embed in image_embeds: + sim_q2t = image_embed @ text_embeds.t() + sim_i2t, _ = sim_q2t.max(0) + sims_matrix.append(sim_i2t) + sims_matrix = torch.stack(sims_matrix, dim=0) + + score_matrix_i2t = torch.full( + (len(data_loader.dataset.image), len(texts)), -100.0 + ).to(model.device) + + num_tasks = dist_utils.get_world_size() + rank = dist_utils.get_rank() + step = sims_matrix.size(0) // num_tasks + 1 + start = rank * step + end = min(sims_matrix.size(0), start + step) + + for i, sims in enumerate( + metric_logger.log_every(sims_matrix[start:end], 50, header) + ): + topk_sim, topk_idx = sims.topk(k=k_test, dim=0) + image_inputs = vit_feats[start + i].repeat(k_test, 1, 1).to(model.device) + score = model.compute_itm( + image_inputs=image_inputs, + text_ids=text_ids[topk_idx], + text_atts=text_atts[topk_idx], + ).float() + score_matrix_i2t[start + i, topk_idx] = score + topk_sim + + sims_matrix = sims_matrix.t() + score_matrix_t2i = torch.full( + (len(texts), len(data_loader.dataset.image)), -100.0 + ).to(model.device) + + step = sims_matrix.size(0) // num_tasks + 1 + start = rank * step + end = min(sims_matrix.size(0), start + step) + + for i, sims in enumerate( + metric_logger.log_every(sims_matrix[start:end], 50, header) + ): + topk_sim, topk_idx = sims.topk(k=k_test, dim=0) + image_inputs = vit_feats[topk_idx.cpu()].to(model.device) + score = model.compute_itm( + image_inputs=image_inputs, + text_ids=text_ids[start + i].repeat(k_test, 1), + text_atts=text_atts[start + i].repeat(k_test, 1), + ).float() + score_matrix_t2i[start + i, topk_idx] = score + topk_sim + + if dist_utils.is_dist_avail_and_initialized(): + dist.barrier() + torch.distributed.all_reduce( + score_matrix_i2t, op=torch.distributed.ReduceOp.SUM + ) + torch.distributed.all_reduce( + score_matrix_t2i, op=torch.distributed.ReduceOp.SUM + ) + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + logging.info("Evaluation time {}".format(total_time_str)) + + return score_matrix_i2t.cpu().numpy(), score_matrix_t2i.cpu().numpy() diff --git a/models/backbones/blip2_outputs.py b/models/backbones/blip2_outputs.py new file mode 100755 index 0000000..e8722b1 --- /dev/null +++ b/models/backbones/blip2_outputs.py @@ -0,0 +1,110 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +from dataclasses import dataclass +from typing import Optional + +import torch +from transformers.modeling_outputs import ( + ModelOutput, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, +) + + +@dataclass +class BlipSimilarity(ModelOutput): + sim_i2t: torch.FloatTensor = None + sim_t2i: torch.FloatTensor = None + + sim_i2t_m: Optional[torch.FloatTensor] = None + sim_t2i_m: Optional[torch.FloatTensor] = None + + sim_i2t_targets: Optional[torch.FloatTensor] = None + sim_t2i_targets: Optional[torch.FloatTensor] = None + + +@dataclass +class BlipIntermediateOutput(ModelOutput): + """ + Data class for intermediate outputs of BLIP models. + + image_embeds (torch.FloatTensor): Image embeddings, shape (batch_size, num_patches, embed_dim). + text_embeds (torch.FloatTensor): Text embeddings, shape (batch_size, seq_len, embed_dim). + + image_embeds_m (torch.FloatTensor): Image embeddings from momentum visual encoder, shape (batch_size, num_patches, embed_dim). + text_embeds_m (torch.FloatTensor): Text embeddings from momentum text encoder, shape (batch_size, seq_len, embed_dim). + + encoder_output (BaseModelOutputWithPoolingAndCrossAttentions): output from the image-grounded text encoder. + encoder_output_neg (BaseModelOutputWithPoolingAndCrossAttentions): output from the image-grounded text encoder for negative pairs. + + decoder_output (CausalLMOutputWithCrossAttentions): output from the image-grounded text decoder. + decoder_labels (torch.LongTensor): labels for the captioning loss. + + itm_logits (torch.FloatTensor): logits for the image-text matching loss, shape (batch_size * 3, 2). + itm_labels (torch.LongTensor): labels for the image-text matching loss, shape (batch_size * 3,) + + """ + + # uni-modal features + image_embeds: torch.FloatTensor = None + text_embeds: Optional[torch.FloatTensor] = None + + image_embeds_m: Optional[torch.FloatTensor] = None + text_embeds_m: Optional[torch.FloatTensor] = None + + # intermediate outputs of multimodal encoder + encoder_output: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None + encoder_output_neg: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None + + itm_logits: Optional[torch.FloatTensor] = None + itm_labels: Optional[torch.LongTensor] = None + + # intermediate outputs of multimodal decoder + decoder_output: Optional[CausalLMOutputWithCrossAttentions] = None + decoder_labels: Optional[torch.LongTensor] = None + + +@dataclass +class BlipOutput(ModelOutput): + # some finetuned models (e.g. BlipVQA) do not compute similarity, thus optional. + sims: Optional[BlipSimilarity] = None + + intermediate_output: BlipIntermediateOutput = None + + loss: Optional[torch.FloatTensor] = None + + loss_itc: Optional[torch.FloatTensor] = None + + loss_itm: Optional[torch.FloatTensor] = None + + loss_lm: Optional[torch.FloatTensor] = None + + +@dataclass +class BlipOutputFeatures(ModelOutput): + """ + Data class of features from BlipFeatureExtractor. + + Args: + image_embeds: (torch.FloatTensor) of shape (batch_size, num_patches+1, embed_dim), optional + image_features: (torch.FloatTensor) of shape (batch_size, num_patches+1, feature_dim), optional + text_embeds: (torch.FloatTensor) of shape (batch_size, sequence_length+1, embed_dim), optional + text_features: (torch.FloatTensor) of shape (batch_size, sequence_length+1, feature_dim), optional + + The first embedding or feature is for the [CLS] token. + + Features are obtained by projecting the corresponding embedding into a normalized low-dimensional space. + """ + + image_embeds: Optional[torch.FloatTensor] = None + image_embeds_proj: Optional[torch.FloatTensor] = None + + text_embeds: Optional[torch.FloatTensor] = None + text_embeds_proj: Optional[torch.FloatTensor] = None + + multimodal_embeds: Optional[torch.FloatTensor] = None diff --git a/models/backbones/clip_vision_encoder.py b/models/backbones/clip_vision_encoder.py new file mode 100644 index 0000000..8518551 --- /dev/null +++ b/models/backbones/clip_vision_encoder.py @@ -0,0 +1,83 @@ +import torch +import torch.nn as nn + +from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig + + +class CLIPVisionEncoder(nn.Module): + def __init__(self, encoder_name="openai/clip-vit-large-patch14", delay_load=False): + super().__init__() + + self.is_loaded = False + + self.vision_encoder_name = encoder_name + # self.select_layer = args.mm_vision_select_layer + # self.select_feature = getattr(args, 'mm_vision_select_feature', 'patch') + self.select_layer = -1 + self.select_feature = "patch" + if not delay_load: + self.load_model() + else: + self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_encoder_name) + + def load_model(self): + self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_encoder_name) + self.vision_encoder = CLIPVisionModel.from_pretrained(self.vision_encoder_name) + self.vision_encoder.requires_grad_(False) + + self.is_loaded = True + + def feature_select(self, image_forward_outs): + image_features = image_forward_outs.hidden_states[self.select_layer] + if self.select_feature == 'patch': + image_features = image_features[:, :] + elif self.select_feature == 'cls_patch': + image_features = image_features + else: + raise ValueError(f'Unexpected select feature: {self.select_feature}') + return image_features + + @torch.no_grad() + def forward(self, images): + if type(images) is list: + image_features = [] + for image in images: + image_forward_out = self.vision_encoder(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True) + image_feature = self.feature_select(image_forward_out).to(image.dtype) + image_features.append(image_feature) + else: + image_forward_outs = self.vision_encoder(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True) + image_features = self.feature_select(image_forward_outs).to(images.dtype) + # print("image feature shape", image_features.shape) + # print(type(image_forward_outs)) + # print(type(image_forward_outs.shape)) + # image_features = image_forward_outs.to(images.dtype) + + return image_features + + @property + def dummy_feature(self): + return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype) + + @property + def dtype(self): + return self.vision_encoder.dtype + + @property + def device(self): + return self.vision_encoder.device + + @property + def config(self): + if self.is_loaded: + return self.vision_encoder.config + else: + return self.cfg_only + + @property + def hidden_size(self): + return self.config.hidden_size + + @property + def num_patches(self): + return (self.config.image_size // self.config.patch_size) ** 2 \ No newline at end of file diff --git a/models/backbones/encoder_decoder/builder.py b/models/backbones/encoder_decoder/builder.py new file mode 100644 index 0000000..1aff26a --- /dev/null +++ b/models/backbones/encoder_decoder/builder.py @@ -0,0 +1,141 @@ + +import glog as logger +import re +import json + +from peft import LoraConfig, get_peft_model + +from .xflan_t5 import T5Config, T5ForConditionalGeneration +from .xbart import BartConfig, BartForConditionalGeneration, BartEncoder, BartForCausalLM + + +def build_encoder_decoder(model_config): + """build (encoder-) decoder model for answer generation. + + Args: + model_config (dict): model config. + + Returns: TODO + + """ + logger.info('[INFO] Loading Encoder Decoder [Type = {}]'.format(model_config['enc_dec_name'])) + + if model_config['enc_dec_family'] == 'flan_t5': + config_cls = T5Config + model_cls = T5ForConditionalGeneration + elif model_config['enc_dec_family'] == 'bart': + config_cls = BartConfig + if model_config['use_decoder_only']: + model_cls = BartForCausalLM + else: + model_cls = BartForConditionalGeneration + else: + raise ValueError('{} is not supported'.format(model_config['enc_dec_family'])) + enc_dec_config = config_cls.from_pretrained(model_config['enc_dec_name']) + model_config['enc_dec_dim'] = enc_dec_config.d_model + # enc_dec_config.encoder_layers = enc_dec_config.encoder_layers - model_config['num_layers_modality_expert_{}'.format(model_config['enc_dec_family'])] + enc_dec = model_cls.from_pretrained( + model_config['enc_dec_name'], + config=enc_dec_config + ) + + # first_k = model_config['num_layers_modality_expert_{}'.format(model_config['enc_dec_family'])] + # enc_dec.model.encoder.remove_first_k_layers(first_k) + # get the last encoder layers + # enc_dec. + + + if model_config['use_lora_enc_dec']: + # load the lora config + with open(model_config['lora_config'], 'r') as f: + lora_config = json.load(f) + + # get the linear layer to perform LoRA on + model_modules = str(enc_dec.modules) + pattern = r'\((\w+)\): Linear' + linear_layer_names = re.findall(pattern, model_modules) + + names = [] + # Print the names of the Linear layers + for name in linear_layer_names: + names.append(name) + target_modules = list(set(names)) + + lora_config['target_modules'] = target_modules + + lora_config = LoraConfig(**lora_config) + + enc_dec = get_peft_model(enc_dec, lora_config) + + return enc_dec + + +def build_encoder(model_config, expert_type, modality=None): + """build (encoder-) decoder model for answer generation. + + Args: + model_config (dict): model config. + + Returns: TODO + + """ + log_txt = '[INFO] Loading {} Expert'.format(expert_type) + if modality is not None: + log_txt += ' [Modality = {}]'.format(modality) + log_txt += ' [Type = {}]'.format(model_config['enc_dec_name']) + + logger.info(log_txt) + + if model_config['enc_dec_family'] == 'flan_t5': + config_cls = T5Config + model_cls = T5ForConditionalGeneration + elif model_config['enc_dec_family'] == 'bart': + config_cls = BartConfig + model_cls = BartEncoder + else: + raise ValueError('{} is not supported'.format(model_config['enc_dec_family'])) + + config = config_cls.from_pretrained(model_config['enc_dec_name']) + config.modality_expert_layers = model_config['num_layers_modality_expert_{}'.format(model_config['enc_dec_family'])] + config.grounding_expert_layers = model_config['num_layers_grounding_expert_{}'.format(model_config['enc_dec_family'])] + + model_config['enc_dec_dim'] = config.d_model + + expert = model_cls.from_pretrained( + model_config['enc_dec_name'], + config=config, + expert_type=expert_type, + modality=modality + ) + + if model_config['use_lora_expert']: + # load the lora config + with open(model_config['lora_config'], 'r') as f: + lora_config = json.load(f) + + # get the linear layer to perform LoRA on + model_modules = str(expert.modules) + pattern = r'\((\w+)\): Linear' + linear_layer_names = re.findall(pattern, model_modules) + + names = [] + # Print the names of the Linear layers + for name in linear_layer_names: + names.append(name) + target_modules = list(set(names)) + + lora_config['target_modules'] = target_modules + + lora_config = LoraConfig(**lora_config) + + expert = get_peft_model(expert, lora_config) + + # expert = model_cls( + # config=config, + # expert_type=expert_type, + # modality=modality + # ) + + return expert + + diff --git a/models/backbones/encoder_decoder/builder_orig.py b/models/backbones/encoder_decoder/builder_orig.py new file mode 100644 index 0000000..119ef0e --- /dev/null +++ b/models/backbones/encoder_decoder/builder_orig.py @@ -0,0 +1,65 @@ +from .xflan_t5 import T5Config, T5ForConditionalGeneration +from .xbart_original import BartConfig, BartForConditionalGeneration, BartEncoder + +import glog as logger + + +def build_encoder_decoder(model_config): + """build (encoder-) decoder model for answer generation. + + Args: + model_config (dict): model config. + + Returns: TODO + + """ + logger.info('[INFO] Loading Encoder Decoder: {}'.format(model_config['enc_dec_name'])) + + if model_config['enc_dec_family'] == 'flan_t5': + config_cls = T5Config + model_cls = T5ForConditionalGeneration + elif model_config['enc_dec_family'] == 'bart': + config_cls = BartConfig + model_cls = BartForConditionalGeneration + else: + raise ValueError('{} is not supported'.format(model_config['enc_dec_family'])) + config = config_cls.from_pretrained(model_config['enc_dec_name']) + model_config['enc_dec_dim'] = config.d_model + enc_dec = model_cls.from_pretrained( + model_config['enc_dec_name'], + config=config + ) + + return enc_dec + + +def build_encoder(model_config): + """build (encoder-) decoder model for answer generation. + + Args: + model_config (dict): model config. + + Returns: TODO + + """ + logger.info('[INFO] Loading Expert as Encoder of {}'.format(model_config['enc_dec_name'])) + + if model_config['enc_dec_family'] == 'flan_t5': + config_cls = T5Config + model_cls = T5ForConditionalGeneration + elif model_config['enc_dec_family'] == 'bart': + config_cls = BartConfig + model_cls = BartEncoder + else: + raise ValueError('{} is not supported'.format(model_config['enc_dec_family'])) + + config = config_cls.from_pretrained(model_config['enc_dec_name']) + model_config['enc_dec_dim'] = config.d_model + config.encoder_layers = model_config['num_layers_modality_expert'] + + expert = model_cls.from_pretrained( + model_config['enc_dec_name'], + config=config + ) + + return expert diff --git a/models/backbones/encoder_decoder/outputs.py b/models/backbones/encoder_decoder/outputs.py new file mode 100644 index 0000000..7330ec0 --- /dev/null +++ b/models/backbones/encoder_decoder/outputs.py @@ -0,0 +1,19 @@ +from typing import Optional, Tuple +import torch +from transformers.modeling_outputs import ModelOutput +from dataclasses import dataclass + + +@dataclass +class Seq2SeqV2DialOutput(ModelOutput): + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None + diff --git a/models/backbones/encoder_decoder/xbart.py b/models/backbones/encoder_decoder/xbart.py new file mode 100644 index 0000000..0183a4a --- /dev/null +++ b/models/backbones/encoder_decoder/xbart.py @@ -0,0 +1,2044 @@ +# coding=utf-8 +# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch BART model.""" +import copy +import math +import warnings +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + Seq2SeqLMOutput, + Seq2SeqModelOutput, + Seq2SeqQuestionAnsweringModelOutput, + Seq2SeqSequenceClassifierOutput, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import ( + add_code_sample_docstrings, + add_end_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from transformers.models.bart.configuration_bart import BartConfig +from .outputs import Seq2SeqV2DialOutput + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "facebook/bart-base" +_CONFIG_FOR_DOC = "BartConfig" + +# Base model docstring +_EXPECTED_OUTPUT_SHAPE = [1, 8, 768] + +# SequenceClassification docstring +_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "valhalla/bart-large-sst2" +_SEQ_CLASS_EXPECTED_LOSS = 0.0 +_SEQ_CLASS_EXPECTED_OUTPUT = "'POSITIVE'" + +# QuestionAsnwering docstring +_CHECKPOINT_FOR_QA = "valhalla/bart-large-finetuned-squadv1" +_QA_EXPECTED_LOSS = 0.59 +_QA_EXPECTED_OUTPUT = "' nice puppet'" + + +BART_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "facebook/bart-large", + # see all BART models at https://huggingface.co/models?filter=bart +] + + +def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): + """ + Shift input ids one token to the right. + """ + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() + shifted_input_ids[:, 0] = decoder_start_token_id + + if pad_token_id is None: + raise ValueError("self.model.config.pad_token_id has to be defined.") + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + +def _make_causal_mask( + input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 +): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +class BartLearnedPositionalEmbedding(nn.Embedding): + """ + This module learns positional embeddings up to a fixed maximum size. + """ + + def __init__(self, num_embeddings: int, embedding_dim: int): + # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2 + # and adjust num_embeddings appropriately. Other models don't have this hack + self.offset = 2 + super().__init__(num_embeddings + self.offset, embedding_dim) + + def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): + """`input_ids' shape is expected to be [bsz x seqlen].""" + + bsz, seq_len = input_ids.shape[:2] + positions = torch.arange( + past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device + ).expand(bsz, -1) + + return super().forward(positions + self.offset) + + +class BartAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + # `past_key_value[0].shape[2] == key_value_states.shape[1]` + # is checking that the `sequence_length` of the `past_key_value` is the same as + # the provided `key_value_states` to support prefix tuning + if ( + is_cross_attention + and past_key_value is not None + and past_key_value[0].shape[2] == key_value_states.shape[1] + ): + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.reshape(*proj_shape) + value_states = value_states.reshape(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned across GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +class BartEncoderLayer(nn.Module): + def __init__(self, config: BartConfig, has_cross_att=False): + super().__init__() + self.embed_dim = config.d_model + self.has_cross_att = has_cross_att + + self.self_attn = BartAttention( + embed_dim=self.embed_dim, + num_heads=config.encoder_attention_heads, + dropout=config.attention_dropout, + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + + if self.has_cross_att: + self.cross_attn = BartAttention( + embed_dim=self.embed_dim, + num_heads=config.encoder_attention_heads, + dropout=config.attention_dropout, + ) + self.cross_attn_layer_norm = nn.LayerNorm(self.embed_dim) + + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) + self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.FloatTensor, + attention_mask: torch.FloatTensor, + layer_head_mask: torch.FloatTensor, + key_value_states: Optional[torch.FloatTensor] = None, + cross_hidden_states: Optional[torch.FloatTensor] = None, + cross_attention_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + hidden_states, attn_weights, _ = self.self_attn( + hidden_states=hidden_states, + key_value_states=key_value_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + ######################################## + if self.has_cross_att: + assert cross_hidden_states is not None + assert cross_attention_mask is not None + residual = hidden_states + hidden_states, attn_weights, _ = self.cross_attn( + hidden_states=hidden_states, + key_value_states=cross_hidden_states, + attention_mask=cross_attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.cross_attn_layer_norm(hidden_states) + ######################################## + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + if hidden_states.dtype == torch.float16 and ( + torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() + ): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +class BartDecoderLayer(nn.Module): + def __init__(self, config: BartConfig): + super().__init__() + self.embed_dim = config.d_model + + self.self_attn = BartAttention( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + ) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.encoder_attn = BartAttention( + self.embed_dim, + config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) + self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + cross_attn_layer_head_mask: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = True, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + encoder_hidden_states (`torch.FloatTensor`): + cross attention input to the layer of shape `(batch, seq_len, embed_dim)` + encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of + size `(decoder_attention_heads,)`. + past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value = present_key_value + cross_attn_present_key_value + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +class BartClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__( + self, + input_dim: int, + inner_dim: int, + num_classes: int, + pooler_dropout: float, + ): + super().__init__() + self.dense = nn.Linear(input_dim, inner_dim) + self.dropout = nn.Dropout(p=pooler_dropout) + self.out_proj = nn.Linear(inner_dim, num_classes) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dropout(hidden_states) + hidden_states = self.dense(hidden_states) + hidden_states = torch.tanh(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.out_proj(hidden_states) + return hidden_states + + +class BartPreTrainedModel(PreTrainedModel): + config_class = BartConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _keys_to_ignore_on_load_unexpected = ["encoder.version", "decoder.version"] + _no_split_modules = [r"BartEncoderLayer", r"BartDecoderLayer"] + _skip_keys_device_placement = "past_key_values" + + def _init_weights(self, module): + std = self.config.init_std + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (BartDecoder, BartEncoder)): + module.gradient_checkpointing = value + + @property + def dummy_inputs(self): + pad_token = self.config.pad_token_id + input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) + dummy_inputs = { + "attention_mask": input_ids.ne(pad_token), + "input_ids": input_ids, + } + return dummy_inputs + + +class PretrainedBartModel(BartPreTrainedModel): + def __init_subclass__(self): + warnings.warn( + "The class `PretrainedBartModel` has been depreciated, please use `BartPreTrainedModel` instead.", + FutureWarning, + ) + + +class BartPretrainedModel(BartPreTrainedModel): + def __init_subclass__(self): + warnings.warn( + "The class `PretrainedBartModel` has been depreciated, please use `BartPreTrainedModel` instead.", + FutureWarning, + ) + + +BART_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`BartConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +BART_GENERATION_EXAMPLE = r""" + Summarization example: + + ```python + >>> from transformers import AutoTokenizer, BartForConditionalGeneration + + >>> model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn") + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn") + + >>> ARTICLE_TO_SUMMARIZE = ( + ... "PG&E stated it scheduled the blackouts in response to forecasts for high winds " + ... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were " + ... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow." + ... ) + >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="pt") + + >>> # Generate Summary + >>> summary_ids = model.generate(inputs["input_ids"], num_beams=2, min_length=0, max_length=20) + >>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + 'PG&E scheduled the blackouts in response to forecasts for high winds amid dry conditions' + ``` + + Mask filling example: + + ```python + >>> from transformers import AutoTokenizer, BartForConditionalGeneration + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-base") + >>> model = BartForConditionalGeneration.from_pretrained("facebook/bart-base") + + >>> TXT = "My friends are but they eat too many carbs." + >>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"] + >>> logits = model(input_ids).logits + + >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() + >>> probs = logits[0, masked_index].softmax(dim=0) + >>> values, predictions = probs.topk(5) + + >>> tokenizer.decode(predictions).split() + ['not', 'good', 'healthy', 'great', 'very'] + ``` +""" + +BART_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` + is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). + + For translation and summarization training, `decoder_input_ids` should be provided. If no + `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right + for denoising pre-training following the paper. + decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + + If you want to change padding behavior, you should read [`modeling_bart._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, + 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape + `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you + can choose to directly pass an embedded representation. This is useful if you want more control over how to + convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded + representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be + input (see `past_key_values`). This is useful if you want more control over how to convert + `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + + If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value + of `inputs_embeds`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class BartEncoder(BartPreTrainedModel): + """ + Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a + [`BartEncoderLayer`]. + + Args: + config: BartConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None, expert_type: Optional[str] = None, modality: Optional[str] = None): + super().__init__(config) + + self.dropout = config.dropout + self.layerdrop = config.encoder_layerdrop + self.expert_type = expert_type + self.modality = modality + + embed_dim = config.d_model + self.padding_idx = config.pad_token_id + self.max_source_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 + + self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) + + if embed_tokens is not None: + self.embed_tokens.weight = embed_tokens.weight + + self.embed_positions = BartLearnedPositionalEmbedding( + config.max_position_embeddings, + embed_dim, + ) + + + # self.modality_expert_layers = None + # if hasattr(config, 'modality_expert_layers'): + # encoder_layers = config.modality_expert_layers + + if self.expert_type is None: + encoder_layers = config.encoder_layers + self.cross_att_every = encoder_layers + 1 # No cross attention + + elif self.expert_type == 'modality': + encoder_layers = config.modality_expert_layers + if self.modality in ['spatial', 'temporal']: + self.cross_att_every = 2 # Cross attention every two layers + else: + self.cross_att_every = encoder_layers + 1 # No cross attention + + + elif self.expert_type == 'grounding': + encoder_layers = config.grounding_expert_layers + self.cross_att_every = 1 # Cross attention at every layer + + layers = [] + for i in range(encoder_layers): + has_cross_att = i % self.cross_att_every == 0 + if self.cross_att_every > encoder_layers: + has_cross_att = False + layers.append(BartEncoderLayer(config, has_cross_att=has_cross_att)) + + self.layers = nn.ModuleList(layers) + # self.layers = nn.ModuleList([BartEncoderLayer(config) for _ in range(config.encoder_layers)]) + + self.layernorm_embedding = nn.LayerNorm(embed_dim) + + # self.grounding_expert_layers = range(config.modality_expert_layers, config.encoder_layers) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def remove_first_k_layers(self, first_k): + assert first_k < len(self.layers) and first_k > 0 + self.layers = self.layers[first_k:] + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + cross_embeds: Optional[torch.FloatTensor] = None, + cross_attention_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + # expert_type: Optional[str] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input = input_ids + input_ids = input_ids.view(-1, input_ids.shape[-1]) + elif inputs_embeds is not None: + input = inputs_embeds[:, :, -1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) + + if cross_attention_mask is not None: + cross_attention_mask = _expand_mask(cross_attention_mask, inputs_embeds.dtype, tgt_len=inputs_embeds.size(1)) + + embed_pos = self.embed_positions(input) + embed_pos = embed_pos.to(inputs_embeds.device) + + hidden_states = inputs_embeds + embed_pos + hidden_states = self.layernorm_embedding(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + if head_mask.size()[0] != (len(self.layers)): + raise ValueError( + f"The head_mask should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + to_drop = False + if self.training: + dropout_probability = torch.rand([]) + if dropout_probability < self.layerdrop: # skip the layer + to_drop = True + + if to_drop: + layer_outputs = (None, None) + else: + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(encoder_layer), + hidden_states, + attention_mask, + (head_mask[idx] if head_mask is not None else None), + cross_hidden_states=cross_embeds, + cross_attention_mask=cross_attention_mask, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + cross_hidden_states=cross_embeds, + cross_attention_mask=cross_attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class BartDecoder(BartPreTrainedModel): + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BartDecoderLayer`] + + Args: + config: BartConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None): + super().__init__(config) + self.dropout = config.dropout + self.layerdrop = config.decoder_layerdrop + self.padding_idx = config.pad_token_id + self.max_target_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 + + self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) + + if embed_tokens is not None: + self.embed_tokens.weight = embed_tokens.weight + + self.embed_positions = BartLearnedPositionalEmbedding( + config.max_position_embeddings, + config.d_model, + ) + self.layers = nn.ModuleList([BartDecoderLayer(config) for _ in range(config.decoder_layers)]) + self.layernorm_embedding = nn.LayerNorm(config.d_model) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + device=inputs_embeds.device, + past_key_values_length=past_key_values_length, + ) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values + selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing + cross-attention on hidden heads. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of + shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing + `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more + control over how to convert `input_ids` indices into associated vectors than the model's internal + embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + input = input_ids + input_shape = input.shape + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + input = inputs_embeds[:, :, -1] + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input) * self.embed_scale + + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) + + # expand encoder attention mask + if encoder_hidden_states is not None and encoder_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + + # embed positions + positions = self.embed_positions(input, past_key_values_length) + positions = positions.to(inputs_embeds.device) + + hidden_states = inputs_embeds + positions + hidden_states = self.layernorm_embedding(hidden_states) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + next_decoder_cache = () if use_cache else None + + # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired + for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): + if attn_mask is not None: + if attn_mask.size()[0] != (len(self.layers)): + raise ValueError( + f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + if self.training: + dropout_probability = torch.rand([]) + if dropout_probability < self.layerdrop: + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, output_attentions, use_cache) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + head_mask[idx] if head_mask is not None else None, + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, + None, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + cross_attn_layer_head_mask=( + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None + ), + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + "The bare BART Model outputting raw hidden-states without any specific head on top.", + BART_START_DOCSTRING, +) +class BartModel(BartPreTrainedModel): + _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] + + def __init__(self, config: BartConfig): + super().__init__(config, embed=None) + + padding_idx, vocab_size = config.pad_token_id, config.vocab_size + self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) + + self.encoder = BartEncoder(config, self.shared) + self.decoder = BartDecoder(config, self.shared) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.shared + + def set_input_embeddings(self, value): + self.shared = value + self.encoder.embed_tokens = self.shared + self.decoder.embed_tokens = self.shared + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=Seq2SeqModelOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[List[torch.FloatTensor]] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Seq2SeqModelOutput]: + # different to other models, Bart automatically creates decoder_input_ids from + # input_ids if no decoder_input_ids are provided + if decoder_input_ids is None and decoder_inputs_embeds is None: + if input_ids is None: + raise ValueError( + "If no `decoder_input_ids` or `decoder_inputs_embeds` are " + "passed, `input_ids` cannot be `None`. Please pass either " + "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." + ) + + decoder_input_ids = shift_tokens_right( + input_ids, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=attention_mask, + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return Seq2SeqV2DialOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + encoder_outputs=encoder_outputs + ) + + +@add_start_docstrings( + "The BART Model with a language modeling head. Can be used for summarization.", BART_START_DOCSTRING +) +class BartForConditionalGeneration(BartPreTrainedModel): + base_model_prefix = "model" + _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] + _keys_to_ignore_on_load_missing = ["final_logits_bias"] + + def __init__(self, config: BartConfig): + super().__init__(config) + self.model = BartModel(config) + self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) + self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_encoder(self): + return self.model.get_encoder() + + def get_decoder(self): + return self.model.get_decoder() + + def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding: + new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of) + self._resize_final_logits_bias(new_embeddings.weight.shape[0]) + return new_embeddings + + def _resize_final_logits_bias(self, new_num_tokens: int) -> None: + old_num_tokens = self.final_logits_bias.shape[-1] + if new_num_tokens <= old_num_tokens: + new_bias = self.final_logits_bias[:, :new_num_tokens] + else: + extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) + new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) + self.register_buffer("final_logits_bias", new_bias) + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + @add_end_docstrings(BART_GENERATION_EXAMPLE) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[List[torch.FloatTensor]] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Seq2SeqLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if labels is not None: + if use_cache: + logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") + use_cache = False + if decoder_input_ids is None and decoder_inputs_embeds is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + encoder_outputs=encoder_outputs, + decoder_attention_mask=decoder_attention_mask, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + lm_logits = self.lm_head(outputs[0]) + lm_logits = lm_logits + self.final_logits_bias.to(lm_logits.device) + + masked_lm_loss = None + if labels is not None: + labels = labels.to(lm_logits.device) + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return Seq2SeqV2DialOutput( + loss=masked_lm_loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + encoder_outputs=outputs.encoder_outputs + ) + + def prepare_inputs_for_generation( + self, + decoder_input_ids, + past_key_values=None, + attention_mask=None, + decoder_attention_mask=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, + use_cache=None, + encoder_outputs=None, + **kwargs, + ): + # cut decoder_input_ids if past_key_values is used + if past_key_values is not None: + decoder_input_ids = decoder_input_ids[:, -1:] + + return { + "input_ids": None, # encoder_outputs is defined. input_ids not needed + "encoder_outputs": encoder_outputs, + "past_key_values": past_key_values, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "decoder_attention_mask": decoder_attention_mask, + "head_mask": head_mask, + "decoder_head_mask": decoder_head_mask, + "cross_attn_head_mask": cross_attn_head_mask, + "use_cache": use_cache, # change this to avoid caching (presumably for debugging) + } + + def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): + return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + # cached cross_attention states don't have to be reordered -> they are always the same + reordered_past += ( + tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], + ) + return reordered_past + + +@add_start_docstrings( + """ + Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE + tasks. + """, + BART_START_DOCSTRING, +) +class BartForSequenceClassification(BartPreTrainedModel): + _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] + + def __init__(self, config: BartConfig, **kwargs): + super().__init__(config, **kwargs) + self.model = BartModel(config) + self.classification_head = BartClassificationHead( + config.d_model, + config.d_model, + config.num_labels, + config.classifier_dropout, + ) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, + output_type=Seq2SeqSequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, + expected_loss=_SEQ_CLASS_EXPECTED_LOSS, + ) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + if input_ids is None and inputs_embeds is not None: + raise NotImplementedError( + f"Passing input embeddings is currently not supported for {self.__class__.__name__}" + ) + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + encoder_outputs=encoder_outputs, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = outputs[0] # last hidden state + + eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device) + + if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: + raise ValueError("All examples must have the same number of tokens.") + sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[ + :, -1, : + ] + logits = self.classification_head(sentence_representation) + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.config.num_labels == 1: + self.config.problem_type = "regression" + elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.config.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return Seq2SeqSequenceClassifierOutput( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + +@add_start_docstrings( + """ + BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layer on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + BART_START_DOCSTRING, +) +class BartForQuestionAnswering(BartPreTrainedModel): + _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] + + def __init__(self, config): + super().__init__(config) + + config.num_labels = 2 + self.num_labels = config.num_labels + + self.model = BartModel(config) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_QA, + output_type=Seq2SeqQuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + expected_loss=_QA_EXPECTED_LOSS, + expected_output=_QA_EXPECTED_OUTPUT, + ) + def forward( + self, + input_ids: torch.Tensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[List[torch.FloatTensor]] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Seq2SeqQuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if start_positions is not None and end_positions is not None: + use_cache = False + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + encoder_outputs=encoder_outputs, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = ( + start_logits, + end_logits, + ) + outputs[1:] + return ((total_loss,) + output) if total_loss is not None else output + + return Seq2SeqQuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + past_key_values=outputs.past_key_values, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + +class BartDecoderWrapper(BartPreTrainedModel): + """ + This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is + used in combination with the [`EncoderDecoderModel`] framework. + """ + + def __init__(self, config): + super().__init__(config) + self.decoder = BartDecoder(config) + + def forward(self, *args, **kwargs): + return self.decoder(*args, **kwargs) + + +@add_start_docstrings( + """ + BART decoder with with a language modeling head on top (linear layer with weights tied to the input embeddings). + """, + BART_START_DOCSTRING, +) +class BartForCausalLM(BartPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + config = copy.deepcopy(config) + config.is_decoder = True + config.is_encoder_decoder = False + super().__init__(config) + self.model = BartDecoderWrapper(config) + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.decoder.embed_tokens + + def set_input_embeddings(self, value): + self.model.decoder.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model.decoder = decoder + + def get_decoder(self): + return self.model.decoder + + @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + if the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used + in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional + tensors are only required when the model is used as a decoder in a Sequence to Sequence model. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, BartForCausalLM + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-base") + >>> model = BartForCausalLM.from_pretrained("facebook/bart-base", add_cross_attention=False) + >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> logits = outputs.logits + >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] + >>> list(logits.shape) == expected_shape + True + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if labels is not None: + if use_cache: + logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") + use_cache = False + if input_ids is None and inputs_embeds is None: + input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model.decoder( + input_ids=input_ids, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + head_mask=head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + logits = self.lm_head(outputs[0]) + + loss = None + if labels is not None: + labels = labels.to(logits.device) + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs + ): + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_ids.shape) + + if past_key_values: + input_ids = input_ids[:, -1:] + # first step, decoder_cached_states are empty + return { + "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed + "attention_mask": attention_mask, + "past_key_values": past_key_values, + "use_cache": use_cache, + } + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past diff --git a/models/backbones/encoder_decoder/xbart_original.py b/models/backbones/encoder_decoder/xbart_original.py new file mode 100644 index 0000000..b6de4ff --- /dev/null +++ b/models/backbones/encoder_decoder/xbart_original.py @@ -0,0 +1,1954 @@ +# coding=utf-8 +# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch BART model.""" +import copy +import math +import warnings +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + Seq2SeqLMOutput, + Seq2SeqModelOutput, + Seq2SeqQuestionAnsweringModelOutput, + Seq2SeqSequenceClassifierOutput, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import ( + add_code_sample_docstrings, + add_end_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from transformers.models.bart.configuration_bart import BartConfig +from .outputs import Seq2SeqV2DialOutput + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "facebook/bart-base" +_CONFIG_FOR_DOC = "BartConfig" + +# Base model docstring +_EXPECTED_OUTPUT_SHAPE = [1, 8, 768] + +# SequenceClassification docstring +_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "valhalla/bart-large-sst2" +_SEQ_CLASS_EXPECTED_LOSS = 0.0 +_SEQ_CLASS_EXPECTED_OUTPUT = "'POSITIVE'" + +# QuestionAsnwering docstring +_CHECKPOINT_FOR_QA = "valhalla/bart-large-finetuned-squadv1" +_QA_EXPECTED_LOSS = 0.59 +_QA_EXPECTED_OUTPUT = "' nice puppet'" + + +BART_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "facebook/bart-large", + # see all BART models at https://huggingface.co/models?filter=bart +] + + +def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): + """ + Shift input ids one token to the right. + """ + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() + shifted_input_ids[:, 0] = decoder_start_token_id + + if pad_token_id is None: + raise ValueError("self.model.config.pad_token_id has to be defined.") + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + +def _make_causal_mask( + input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 +): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +class BartLearnedPositionalEmbedding(nn.Embedding): + """ + This module learns positional embeddings up to a fixed maximum size. + """ + + def __init__(self, num_embeddings: int, embedding_dim: int): + # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2 + # and adjust num_embeddings appropriately. Other models don't have this hack + self.offset = 2 + super().__init__(num_embeddings + self.offset, embedding_dim) + + def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): + """`input_ids' shape is expected to be [bsz x seqlen].""" + + bsz, seq_len = input_ids.shape[:2] + positions = torch.arange( + past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device + ).expand(bsz, -1) + + return super().forward(positions + self.offset) + + +class BartAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + # `past_key_value[0].shape[2] == key_value_states.shape[1]` + # is checking that the `sequence_length` of the `past_key_value` is the same as + # the provided `key_value_states` to support prefix tuning + if ( + is_cross_attention + and past_key_value is not None + and past_key_value[0].shape[2] == key_value_states.shape[1] + ): + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.reshape(*proj_shape) + value_states = value_states.reshape(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned across GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +class BartEncoderLayer(nn.Module): + def __init__(self, config: BartConfig): + super().__init__() + self.embed_dim = config.d_model + self.self_attn = BartAttention( + embed_dim=self.embed_dim, + num_heads=config.encoder_attention_heads, + dropout=config.attention_dropout, + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) + self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.FloatTensor, + attention_mask: torch.FloatTensor, + layer_head_mask: torch.FloatTensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + hidden_states, attn_weights, _ = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + if hidden_states.dtype == torch.float16 and ( + torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() + ): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +class BartDecoderLayer(nn.Module): + def __init__(self, config: BartConfig): + super().__init__() + self.embed_dim = config.d_model + + self.self_attn = BartAttention( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + ) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.encoder_attn = BartAttention( + self.embed_dim, + config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) + self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + cross_attn_layer_head_mask: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = True, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + encoder_hidden_states (`torch.FloatTensor`): + cross attention input to the layer of shape `(batch, seq_len, embed_dim)` + encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of + size `(decoder_attention_heads,)`. + past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value = present_key_value + cross_attn_present_key_value + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +class BartClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__( + self, + input_dim: int, + inner_dim: int, + num_classes: int, + pooler_dropout: float, + ): + super().__init__() + self.dense = nn.Linear(input_dim, inner_dim) + self.dropout = nn.Dropout(p=pooler_dropout) + self.out_proj = nn.Linear(inner_dim, num_classes) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dropout(hidden_states) + hidden_states = self.dense(hidden_states) + hidden_states = torch.tanh(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.out_proj(hidden_states) + return hidden_states + + +class BartPreTrainedModel(PreTrainedModel): + config_class = BartConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _keys_to_ignore_on_load_unexpected = ["encoder.version", "decoder.version"] + _no_split_modules = [r"BartEncoderLayer", r"BartDecoderLayer"] + _skip_keys_device_placement = "past_key_values" + + def _init_weights(self, module): + std = self.config.init_std + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (BartDecoder, BartEncoder)): + module.gradient_checkpointing = value + + @property + def dummy_inputs(self): + pad_token = self.config.pad_token_id + input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) + dummy_inputs = { + "attention_mask": input_ids.ne(pad_token), + "input_ids": input_ids, + } + return dummy_inputs + + +class PretrainedBartModel(BartPreTrainedModel): + def __init_subclass__(self): + warnings.warn( + "The class `PretrainedBartModel` has been depreciated, please use `BartPreTrainedModel` instead.", + FutureWarning, + ) + + +class BartPretrainedModel(BartPreTrainedModel): + def __init_subclass__(self): + warnings.warn( + "The class `PretrainedBartModel` has been depreciated, please use `BartPreTrainedModel` instead.", + FutureWarning, + ) + + +BART_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`BartConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +BART_GENERATION_EXAMPLE = r""" + Summarization example: + + ```python + >>> from transformers import AutoTokenizer, BartForConditionalGeneration + + >>> model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn") + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn") + + >>> ARTICLE_TO_SUMMARIZE = ( + ... "PG&E stated it scheduled the blackouts in response to forecasts for high winds " + ... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were " + ... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow." + ... ) + >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="pt") + + >>> # Generate Summary + >>> summary_ids = model.generate(inputs["input_ids"], num_beams=2, min_length=0, max_length=20) + >>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + 'PG&E scheduled the blackouts in response to forecasts for high winds amid dry conditions' + ``` + + Mask filling example: + + ```python + >>> from transformers import AutoTokenizer, BartForConditionalGeneration + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-base") + >>> model = BartForConditionalGeneration.from_pretrained("facebook/bart-base") + + >>> TXT = "My friends are but they eat too many carbs." + >>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"] + >>> logits = model(input_ids).logits + + >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() + >>> probs = logits[0, masked_index].softmax(dim=0) + >>> values, predictions = probs.topk(5) + + >>> tokenizer.decode(predictions).split() + ['not', 'good', 'healthy', 'great', 'very'] + ``` +""" + +BART_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` + is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). + + For translation and summarization training, `decoder_input_ids` should be provided. If no + `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right + for denoising pre-training following the paper. + decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + + If you want to change padding behavior, you should read [`modeling_bart._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, + 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape + `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you + can choose to directly pass an embedded representation. This is useful if you want more control over how to + convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded + representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be + input (see `past_key_values`). This is useful if you want more control over how to convert + `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + + If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value + of `inputs_embeds`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class BartEncoder(BartPreTrainedModel): + """ + Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a + [`BartEncoderLayer`]. + + Args: + config: BartConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None): + super().__init__(config) + + self.dropout = config.dropout + self.layerdrop = config.encoder_layerdrop + + embed_dim = config.d_model + self.padding_idx = config.pad_token_id + self.max_source_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 + + self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) + + if embed_tokens is not None: + self.embed_tokens.weight = embed_tokens.weight + + self.embed_positions = BartLearnedPositionalEmbedding( + config.max_position_embeddings, + embed_dim, + ) + self.layers = nn.ModuleList([BartEncoderLayer(config) for _ in range(config.encoder_layers)]) + self.layernorm_embedding = nn.LayerNorm(embed_dim) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input = input_ids + input_ids = input_ids.view(-1, input_ids.shape[-1]) + elif inputs_embeds is not None: + input = inputs_embeds[:, :, -1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + embed_pos = self.embed_positions(input) + embed_pos = embed_pos.to(inputs_embeds.device) + + hidden_states = inputs_embeds + embed_pos + hidden_states = self.layernorm_embedding(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + if head_mask.size()[0] != (len(self.layers)): + raise ValueError( + f"The head_mask should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + to_drop = False + if self.training: + dropout_probability = torch.rand([]) + if dropout_probability < self.layerdrop: # skip the layer + to_drop = True + + if to_drop: + layer_outputs = (None, None) + else: + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(encoder_layer), + hidden_states, + attention_mask, + (head_mask[idx] if head_mask is not None else None), + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class BartDecoder(BartPreTrainedModel): + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BartDecoderLayer`] + + Args: + config: BartConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None): + super().__init__(config) + self.dropout = config.dropout + self.layerdrop = config.decoder_layerdrop + self.padding_idx = config.pad_token_id + self.max_target_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 + + self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) + + if embed_tokens is not None: + self.embed_tokens.weight = embed_tokens.weight + + self.embed_positions = BartLearnedPositionalEmbedding( + config.max_position_embeddings, + config.d_model, + ) + self.layers = nn.ModuleList([BartDecoderLayer(config) for _ in range(config.decoder_layers)]) + self.layernorm_embedding = nn.LayerNorm(config.d_model) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + device=inputs_embeds.device, + past_key_values_length=past_key_values_length, + ) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values + selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing + cross-attention on hidden heads. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of + shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing + `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more + control over how to convert `input_ids` indices into associated vectors than the model's internal + embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + input = input_ids + input_shape = input.shape + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + input = inputs_embeds[:, :, -1] + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input) * self.embed_scale + + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) + + # expand encoder attention mask + if encoder_hidden_states is not None and encoder_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + + # embed positions + positions = self.embed_positions(input, past_key_values_length) + positions = positions.to(inputs_embeds.device) + + hidden_states = inputs_embeds + positions + hidden_states = self.layernorm_embedding(hidden_states) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + next_decoder_cache = () if use_cache else None + + # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired + for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): + if attn_mask is not None: + if attn_mask.size()[0] != (len(self.layers)): + raise ValueError( + f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + if self.training: + dropout_probability = torch.rand([]) + if dropout_probability < self.layerdrop: + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, output_attentions, use_cache) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + head_mask[idx] if head_mask is not None else None, + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, + None, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + cross_attn_layer_head_mask=( + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None + ), + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + "The bare BART Model outputting raw hidden-states without any specific head on top.", + BART_START_DOCSTRING, +) +class BartModel(BartPreTrainedModel): + _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] + + def __init__(self, config: BartConfig): + super().__init__(config) + + padding_idx, vocab_size = config.pad_token_id, config.vocab_size + self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) + + self.encoder = BartEncoder(config, self.shared) + self.decoder = BartDecoder(config, self.shared) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.shared + + def set_input_embeddings(self, value): + self.shared = value + self.encoder.embed_tokens = self.shared + self.decoder.embed_tokens = self.shared + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=Seq2SeqModelOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[List[torch.FloatTensor]] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Seq2SeqModelOutput]: + # different to other models, Bart automatically creates decoder_input_ids from + # input_ids if no decoder_input_ids are provided + if decoder_input_ids is None and decoder_inputs_embeds is None: + if input_ids is None: + raise ValueError( + "If no `decoder_input_ids` or `decoder_inputs_embeds` are " + "passed, `input_ids` cannot be `None`. Please pass either " + "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." + ) + + decoder_input_ids = shift_tokens_right( + input_ids, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=attention_mask, + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return Seq2SeqV2DialOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + encoder_outputs=encoder_outputs + ) + + +@add_start_docstrings( + "The BART Model with a language modeling head. Can be used for summarization.", BART_START_DOCSTRING +) +class BartForConditionalGeneration(BartPreTrainedModel): + base_model_prefix = "model" + _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] + _keys_to_ignore_on_load_missing = ["final_logits_bias"] + + def __init__(self, config: BartConfig): + super().__init__(config) + self.model = BartModel(config) + self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) + self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_encoder(self): + return self.model.get_encoder() + + def get_decoder(self): + return self.model.get_decoder() + + def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding: + new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of) + self._resize_final_logits_bias(new_embeddings.weight.shape[0]) + return new_embeddings + + def _resize_final_logits_bias(self, new_num_tokens: int) -> None: + old_num_tokens = self.final_logits_bias.shape[-1] + if new_num_tokens <= old_num_tokens: + new_bias = self.final_logits_bias[:, :new_num_tokens] + else: + extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) + new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) + self.register_buffer("final_logits_bias", new_bias) + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + @add_end_docstrings(BART_GENERATION_EXAMPLE) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[List[torch.FloatTensor]] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Seq2SeqLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if labels is not None: + if use_cache: + logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") + use_cache = False + if decoder_input_ids is None and decoder_inputs_embeds is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + encoder_outputs=encoder_outputs, + decoder_attention_mask=decoder_attention_mask, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + lm_logits = self.lm_head(outputs[0]) + lm_logits = lm_logits + self.final_logits_bias.to(lm_logits.device) + + masked_lm_loss = None + if labels is not None: + labels = labels.to(lm_logits.device) + # loss_fct = CrossEntropyLoss(ignore_index=self.config.pad_token_id) + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return Seq2SeqV2DialOutput( + loss=masked_lm_loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + encoder_outputs=outputs.encoder_outputs + ) + + def prepare_inputs_for_generation( + self, + decoder_input_ids, + past_key_values=None, + attention_mask=None, + decoder_attention_mask=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, + use_cache=None, + encoder_outputs=None, + **kwargs, + ): + # cut decoder_input_ids if past_key_values is used + if past_key_values is not None: + decoder_input_ids = decoder_input_ids[:, -1:] + + return { + "input_ids": None, # encoder_outputs is defined. input_ids not needed + "encoder_outputs": encoder_outputs, + "past_key_values": past_key_values, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "decoder_attention_mask": decoder_attention_mask, + "head_mask": head_mask, + "decoder_head_mask": decoder_head_mask, + "cross_attn_head_mask": cross_attn_head_mask, + "use_cache": use_cache, # change this to avoid caching (presumably for debugging) + } + + def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): + return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + # cached cross_attention states don't have to be reordered -> they are always the same + reordered_past += ( + tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], + ) + return reordered_past + + +@add_start_docstrings( + """ + Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE + tasks. + """, + BART_START_DOCSTRING, +) +class BartForSequenceClassification(BartPreTrainedModel): + _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] + + def __init__(self, config: BartConfig, **kwargs): + super().__init__(config, **kwargs) + self.model = BartModel(config) + self.classification_head = BartClassificationHead( + config.d_model, + config.d_model, + config.num_labels, + config.classifier_dropout, + ) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, + output_type=Seq2SeqSequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, + expected_loss=_SEQ_CLASS_EXPECTED_LOSS, + ) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + if input_ids is None and inputs_embeds is not None: + raise NotImplementedError( + f"Passing input embeddings is currently not supported for {self.__class__.__name__}" + ) + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + encoder_outputs=encoder_outputs, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = outputs[0] # last hidden state + + eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device) + + if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: + raise ValueError("All examples must have the same number of tokens.") + sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[ + :, -1, : + ] + logits = self.classification_head(sentence_representation) + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.config.num_labels == 1: + self.config.problem_type = "regression" + elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.config.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return Seq2SeqSequenceClassifierOutput( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + +@add_start_docstrings( + """ + BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layer on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + BART_START_DOCSTRING, +) +class BartForQuestionAnswering(BartPreTrainedModel): + _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] + + def __init__(self, config): + super().__init__(config) + + config.num_labels = 2 + self.num_labels = config.num_labels + + self.model = BartModel(config) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_QA, + output_type=Seq2SeqQuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + expected_loss=_QA_EXPECTED_LOSS, + expected_output=_QA_EXPECTED_OUTPUT, + ) + def forward( + self, + input_ids: torch.Tensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[List[torch.FloatTensor]] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Seq2SeqQuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if start_positions is not None and end_positions is not None: + use_cache = False + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + encoder_outputs=encoder_outputs, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = ( + start_logits, + end_logits, + ) + outputs[1:] + return ((total_loss,) + output) if total_loss is not None else output + + return Seq2SeqQuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + past_key_values=outputs.past_key_values, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + +class BartDecoderWrapper(BartPreTrainedModel): + """ + This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is + used in combination with the [`EncoderDecoderModel`] framework. + """ + + def __init__(self, config): + super().__init__(config) + self.decoder = BartDecoder(config) + + def forward(self, *args, **kwargs): + return self.decoder(*args, **kwargs) + + +@add_start_docstrings( + """ + BART decoder with with a language modeling head on top (linear layer with weights tied to the input embeddings). + """, + BART_START_DOCSTRING, +) +class BartForCausalLM(BartPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + config = copy.deepcopy(config) + config.is_decoder = True + config.is_encoder_decoder = False + super().__init__(config) + self.model = BartDecoderWrapper(config) + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.decoder.embed_tokens + + def set_input_embeddings(self, value): + self.model.decoder.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model.decoder = decoder + + def get_decoder(self): + return self.model.decoder + + @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + if the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used + in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional + tensors are only required when the model is used as a decoder in a Sequence to Sequence model. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, BartForCausalLM + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-base") + >>> model = BartForCausalLM.from_pretrained("facebook/bart-base", add_cross_attention=False) + >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> logits = outputs.logits + >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] + >>> list(logits.shape) == expected_shape + True + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model.decoder( + input_ids=input_ids, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + head_mask=head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + logits = self.lm_head(outputs[0]) + + loss = None + if labels is not None: + labels = labels.to(logits.device) + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs + ): + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_ids.shape) + + if past_key_values: + input_ids = input_ids[:, -1:] + # first step, decoder_cached_states are empty + return { + "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed + "attention_mask": attention_mask, + "past_key_values": past_key_values, + "use_cache": use_cache, + } + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past \ No newline at end of file diff --git a/models/backbones/encoder_decoder/xflan_t5.py b/models/backbones/encoder_decoder/xflan_t5.py new file mode 100644 index 0000000..db3ab99 --- /dev/null +++ b/models/backbones/encoder_decoder/xflan_t5.py @@ -0,0 +1,2075 @@ +# coding=utf-8 +# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch T5 model.""" + + +import copy +import math +import os +import warnings +from typing import Optional, Tuple, Union + +import torch +from torch import nn +from torch.nn import CrossEntropyLoss +from torch.utils.checkpoint import checkpoint + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + Seq2SeqLMOutput, + Seq2SeqModelOutput, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import ( + ALL_LAYERNORM_LAYERS, + find_pruneable_heads_and_indices, + prune_linear_layer, +) +from transformers.utils import ( + DUMMY_INPUTS, + DUMMY_MASK, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_torch_fx_proxy, + logging, + replace_return_docstrings, +) +from transformers.utils.model_parallel_utils import assert_device_map, get_device_map +from transformers.models.t5.configuration_t5 import T5Config +from .outputs import Seq2SeqV2DialOutput + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "T5Config" +_TOKENIZER_FOR_DOC = "T5Tokenizer" +_CHECKPOINT_FOR_DOC = "t5-small" + +#################################################### +# This dict contains ids and associated url +# for the pretrained weights provided with the models +#################################################### +T5_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "t5-small", + "t5-base", + "t5-large", + "t5-3b", + "t5-11b", + # See all T5 models at https://huggingface.co/models?filter=t5 +] + + +#################################################### +# This is a conversion method from TF 1.0 to PyTorch +# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28 +#################################################### +def load_tf_weights_in_t5(model, config, tf_checkpoint_path): + """Load tf checkpoints in a pytorch model.""" + try: + import re + + import numpy as np + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info(f"Converting TensorFlow checkpoint from {tf_path}") + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + tf_weights = {} + for name, shape in init_vars: + logger.info(f"Loading TF weight {name} with shape {shape}") + array = tf.train.load_variable(tf_path, name) + names.append(name) + tf_weights[name] = array + + for txt_name in names: + name = txt_name.split("/") + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any( + n + in [ + "adam_v", + "adam_m", + "AdamWeightDecayOptimizer", + "AdamWeightDecayOptimizer_1", + "global_step", + ] + for n in name + ): + logger.info(f"Skipping {'/'.join(name)}") + tf_weights.pop(txt_name, None) + continue + if "_slot_" in name[-1]: + logger.info(f"Skipping {'/'.join(name)}") + tf_weights.pop(txt_name, None) + continue + pointer = model + array = tf_weights[txt_name] + + for m_name in name: + if re.fullmatch(r"[A-Za-z]+_\d+", m_name): + scope_names = re.split(r"_(\d+)", m_name) + else: + scope_names = [m_name] + if scope_names[0] in ["kernel", "scale", "embedding"]: + pointer = getattr(pointer, "weight") + elif scope_names[0] == "self_attention": + pointer = getattr(pointer, "layer") + pointer = pointer[0] + elif scope_names[0] == "enc_dec_attention": + pointer = getattr(pointer, "layer") + pointer = pointer[1] + elif scope_names[0] == "dense_relu_dense": + pointer = getattr(pointer, "layer") + pointer = pointer[2] + elif scope_names[0] == "rms_norm": + if hasattr(pointer, "layer_norm"): + pointer = getattr(pointer, "layer_norm") + elif hasattr(pointer, "final_layer_norm"): + pointer = getattr(pointer, "final_layer_norm") + elif scope_names[0] == "scale": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "output_bias" or scope_names[0] == "beta": + pointer = getattr(pointer, "bias") + elif scope_names[0] == "squad": + pointer = getattr(pointer, "classifier") + elif scope_names[0] == "decoder" and name[1] == "logits": + continue + elif scope_names[0] == "logits": + pointer = getattr(pointer, "lm_head") + elif ( + scope_names[0] == "wi" + and len(scope_names) > 1 + and scope_names[1].isdigit() + ): + pointer = getattr(pointer, f"wi_{scope_names[1]}") + continue + else: + try: + pointer = getattr(pointer, scope_names[0]) + except AttributeError: + logger.info(f"Skipping {'/'.join(name)}") + continue + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + if scope_names[0] not in ["kernel", "scale", "embedding"]: + pointer = getattr(pointer, "weight") + if scope_names[0] != "embedding": + logger.info(f"Transposing numpy weight of shape {array.shape} for {name}") + array = np.transpose(array) + try: + assert ( + pointer.shape == array.shape + ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info(f"Initialize PyTorch weight {name}") + pointer.data = torch.from_numpy(array.astype(np.float32)) + tf_weights.pop(txt_name, None) + + logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.") + return model + + +#################################################### +# PyTorch Models are constructed by sub-classing +# - torch.nn.Module for the layers and +# - PreTrainedModel for the models (it-self a sub-class of nn.Module) +#################################################### +PARALLELIZE_DOCSTRING = r""" + This is an experimental feature and is a subject to change at a moment's notice. + + Uses a device map to distribute attention modules of the model across several devices. If no device map is given, + it will evenly distribute blocks across all devices. + + Args: + device_map (`Dict[int, list]`, optional, defaults to None): + A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always + automatically mapped to the first device (for esoteric reasons). That means that the first device should + have fewer attention modules mapped to it than other devices. For reference, the t5 models have the + following number of attention modules: + + - t5-small: 6 + - t5-base: 12 + - t5-large: 24 + - t5-3b: 24 + - t5-11b: 24 + + Example: + + ```python + # Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules: + model = T5ForConditionalGeneration.from_pretrained("t5-3b") + device_map = { + 0: [0, 1, 2], + 1: [3, 4, 5, 6, 7, 8, 9], + 2: [10, 11, 12, 13, 14, 15, 16], + 3: [17, 18, 19, 20, 21, 22, 23], + } + model.parallelize(device_map) + ``` +""" +DEPARALLELIZE_DOCSTRING = r""" + Moves the model to cpu from a model parallel state. + + Example: + + ```python + # On a 4 GPU machine with t5-3b: + model = T5ForConditionalGeneration.from_pretrained("t5-3b") + device_map = { + 0: [0, 1, 2], + 1: [3, 4, 5, 6, 7, 8, 9], + 2: [10, 11, 12, 13, 14, 15, 16], + 3: [17, 18, 19, 20, 21, 22, 23], + } + model.parallelize(device_map) # Splits the model across several devices + model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() + ``` +""" + + +class T5LayerNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + Construct a layernorm module in the T5 style. No bias and no subtraction of mean. + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + + # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean + # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated + # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for + # half-precision inputs is done in fp32 + + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + + # convert into half-precision if necessary + if self.weight.dtype in [torch.float16, torch.bfloat16]: + hidden_states = hidden_states.to(self.weight.dtype) + + return self.weight * hidden_states + + +try: + from apex.normalization import FusedRMSNorm + + T5LayerNorm = FusedRMSNorm # noqa + + logger.info( + "Discovered apex.normalization.FusedRMSNorm - will use it instead of T5LayerNorm" + ) +except ImportError: + # using the normal T5LayerNorm + pass +except Exception: + logger.warning("discovered apex but it failed to load, falling back to T5LayerNorm") + pass + +ALL_LAYERNORM_LAYERS.append(T5LayerNorm) + + +class T5DenseActDense(nn.Module): + def __init__(self, config: T5Config): + super().__init__() + self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) + self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) + self.dropout = nn.Dropout(config.dropout_rate) + self.act = ACT2FN[config.dense_act_fn] + + def forward(self, hidden_states): + hidden_states = self.wi(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.wo(hidden_states) + return hidden_states + + +class T5DenseGatedActDense(nn.Module): + def __init__(self, config: T5Config): + super().__init__() + self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) + self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) + self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) + self.dropout = nn.Dropout(config.dropout_rate) + self.act = ACT2FN[config.dense_act_fn] + + def forward(self, hidden_states): + hidden_gelu = self.act(self.wi_0(hidden_states)) + hidden_linear = self.wi_1(hidden_states) + hidden_states = hidden_gelu * hidden_linear + hidden_states = self.dropout(hidden_states) + hidden_states = self.wo(hidden_states) + return hidden_states + + +class T5LayerFF(nn.Module): + def __init__(self, config: T5Config): + super().__init__() + if config.is_gated_act: + self.DenseReluDense = T5DenseGatedActDense(config) + else: + self.DenseReluDense = T5DenseActDense(config) + + self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) + self.dropout = nn.Dropout(config.dropout_rate) + + def forward(self, hidden_states): + forwarded_states = self.layer_norm(hidden_states) + forwarded_states = self.DenseReluDense(forwarded_states) + hidden_states = hidden_states + self.dropout(forwarded_states) + return hidden_states + + +class T5Attention(nn.Module): + def __init__(self, config: T5Config, has_relative_attention_bias=False): + super().__init__() + self.is_decoder = config.is_decoder + self.has_relative_attention_bias = has_relative_attention_bias + self.relative_attention_num_buckets = config.relative_attention_num_buckets + self.relative_attention_max_distance = config.relative_attention_max_distance + self.d_model = config.d_model + self.key_value_proj_dim = config.d_kv + self.n_heads = config.num_heads + self.dropout = config.dropout_rate + self.inner_dim = self.n_heads * self.key_value_proj_dim + + # Mesh TensorFlow initialization to avoid scaling before softmax + self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) + self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) + self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) + self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) + + if self.has_relative_attention_bias: + self.relative_attention_bias = nn.Embedding( + self.relative_attention_num_buckets, self.n_heads + ) + self.pruned_heads = set() + self.gradient_checkpointing = False + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads + ) + # Prune linear layers + self.q = prune_linear_layer(self.q, index) + self.k = prune_linear_layer(self.k, index) + self.v = prune_linear_layer(self.v, index) + self.o = prune_linear_layer(self.o, index, dim=1) + # Update hyper params + self.n_heads = self.n_heads - len(heads) + self.inner_dim = self.key_value_proj_dim * self.n_heads + self.pruned_heads = self.pruned_heads.union(heads) + + @staticmethod + def _relative_position_bucket( + relative_position, bidirectional=True, num_buckets=32, max_distance=128 + ): + """ + Adapted from Mesh Tensorflow: + https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 + + Translate relative position to a bucket number for relative attention. The relative position is defined as + memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to + position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for + small absolute relative_position and larger buckets for larger absolute relative_positions. All relative + positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. + This should allow for more graceful generalization to longer sequences than the model has been trained on + + Args: + relative_position: an int32 Tensor + bidirectional: a boolean - whether the attention is bidirectional + num_buckets: an integer + max_distance: an integer + + Returns: + a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) + """ + relative_buckets = 0 + if bidirectional: + num_buckets //= 2 + relative_buckets += (relative_position > 0).to(torch.long) * num_buckets + relative_position = torch.abs(relative_position) + else: + relative_position = -torch.min( + relative_position, torch.zeros_like(relative_position) + ) + # now relative_position is in the range [0, inf) + + # half of the buckets are for exact increments in positions + max_exact = num_buckets // 2 + is_small = relative_position < max_exact + + # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance + relative_position_if_large = max_exact + ( + torch.log(relative_position.float() / max_exact) + / math.log(max_distance / max_exact) + * (num_buckets - max_exact) + ).to(torch.long) + relative_position_if_large = torch.min( + relative_position_if_large, + torch.full_like(relative_position_if_large, num_buckets - 1), + ) + + relative_buckets += torch.where( + is_small, relative_position, relative_position_if_large + ) + return relative_buckets + + def compute_bias(self, query_length, key_length, device=None): + """Compute binned relative position bias""" + if device is None: + device = self.relative_attention_bias.weight.device + context_position = torch.arange(query_length, dtype=torch.long, device=device)[ + :, None + ] + memory_position = torch.arange(key_length, dtype=torch.long, device=device)[ + None, : + ] + relative_position = ( + memory_position - context_position + ) # shape (query_length, key_length) + relative_position_bucket = self._relative_position_bucket( + relative_position, # shape (query_length, key_length) + bidirectional=(not self.is_decoder), + num_buckets=self.relative_attention_num_buckets, + max_distance=self.relative_attention_max_distance, + ) + values = self.relative_attention_bias( + relative_position_bucket + ) # shape (query_length, key_length, num_heads) + values = values.permute([2, 0, 1]).unsqueeze( + 0 + ) # shape (1, num_heads, query_length, key_length) + return values + + def forward( + self, + hidden_states, + mask=None, + key_value_states=None, + position_bias=None, + past_key_value=None, + layer_head_mask=None, + query_length=None, + use_cache=False, + output_attentions=False, + ): + """ + Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). + """ + # Input is (batch_size, seq_length, dim) + # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) + # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) + batch_size, seq_length = hidden_states.shape[:2] + + real_seq_length = seq_length + + if past_key_value is not None: + assert ( + len(past_key_value) == 2 + ), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states" + real_seq_length += ( + past_key_value[0].shape[2] if query_length is None else query_length + ) + + key_length = ( + real_seq_length if key_value_states is None else key_value_states.shape[1] + ) + + def shape(states): + """projection""" + return states.view( + batch_size, -1, self.n_heads, self.key_value_proj_dim + ).transpose(1, 2) + + def unshape(states): + """reshape""" + return ( + states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) + ) + + def project(hidden_states, proj_layer, key_value_states, past_key_value): + """projects hidden states correctly to key/query states""" + if key_value_states is None: + # self-attn + # (batch_size, n_heads, seq_length, dim_per_head) + hidden_states = shape(proj_layer(hidden_states)) + elif past_key_value is None: + # cross-attn + # (batch_size, n_heads, seq_length, dim_per_head) + hidden_states = shape(proj_layer(key_value_states)) + + if past_key_value is not None: + if key_value_states is None: + # self-attn + # (batch_size, n_heads, key_length, dim_per_head) + hidden_states = torch.cat([past_key_value, hidden_states], dim=2) + else: + # cross-attn + hidden_states = past_key_value + return hidden_states + + # get query states + query_states = shape( + self.q(hidden_states) + ) # (batch_size, n_heads, seq_length, dim_per_head) + + # get key/value states + key_states = project( + hidden_states, + self.k, + key_value_states, + past_key_value[0] if past_key_value is not None else None, + ) + value_states = project( + hidden_states, + self.v, + key_value_states, + past_key_value[1] if past_key_value is not None else None, + ) + + # compute scores + scores = torch.matmul( + query_states, key_states.transpose(3, 2) + ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 + + if position_bias is None: + if not self.has_relative_attention_bias: + position_bias = torch.zeros( + (1, self.n_heads, real_seq_length, key_length), + device=scores.device, + dtype=scores.dtype, + ) + if self.gradient_checkpointing and self.training: + position_bias.requires_grad = True + else: + position_bias = self.compute_bias( + real_seq_length, key_length, device=scores.device + ) + + # if key and values are already calculated + # we want only the last query position bias + if past_key_value is not None: + position_bias = position_bias[:, :, -hidden_states.size(1) :, :] + + if mask is not None: + position_bias = ( + position_bias + mask + ) # (batch_size, n_heads, seq_length, key_length) + + if self.pruned_heads: + mask = torch.ones(position_bias.shape[1]) + mask[list(self.pruned_heads)] = 0 + position_bias_masked = position_bias[:, mask.bool()] + else: + position_bias_masked = position_bias + + scores += position_bias_masked + attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( + scores + ) # (batch_size, n_heads, seq_length, key_length) + attn_weights = nn.functional.dropout( + attn_weights, p=self.dropout, training=self.training + ) # (batch_size, n_heads, seq_length, key_length) + + # Mask heads if we want to + if layer_head_mask is not None: + attn_weights = attn_weights * layer_head_mask + + attn_output = unshape( + torch.matmul(attn_weights, value_states) + ) # (batch_size, seq_length, dim) + attn_output = self.o(attn_output) + + present_key_value_state = ( + (key_states, value_states) if (self.is_decoder and use_cache) else None + ) + outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) + + if output_attentions: + outputs = outputs + (attn_weights,) + return outputs + + +class T5LayerSelfAttention(nn.Module): + def __init__(self, config, has_relative_attention_bias=False): + super().__init__() + self.SelfAttention = T5Attention( + config, has_relative_attention_bias=has_relative_attention_bias + ) + self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) + self.dropout = nn.Dropout(config.dropout_rate) + + def forward( + self, + hidden_states, + attention_mask=None, + position_bias=None, + layer_head_mask=None, + past_key_value=None, + use_cache=False, + output_attentions=False, + ): + normed_hidden_states = self.layer_norm(hidden_states) + attention_output = self.SelfAttention( + normed_hidden_states, + mask=attention_mask, + position_bias=position_bias, + layer_head_mask=layer_head_mask, + past_key_value=past_key_value, + use_cache=use_cache, + output_attentions=output_attentions, + ) + + hidden_states = hidden_states + self.dropout(attention_output[0]) + + if torch.isinf(hidden_states).any(): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = (hidden_states,) + attention_output[ + 1: + ] # add attentions if we output them + return outputs + + +class T5LayerCrossAttention(nn.Module): + def __init__(self, config): + super().__init__() + self.EncDecAttention = T5Attention(config, has_relative_attention_bias=False) + self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) + self.dropout = nn.Dropout(config.dropout_rate) + + def forward( + self, + hidden_states, + key_value_states, + attention_mask=None, + position_bias=None, + layer_head_mask=None, + past_key_value=None, + use_cache=False, + query_length=None, + output_attentions=False, + ): + normed_hidden_states = self.layer_norm(hidden_states) + attention_output = self.EncDecAttention( + normed_hidden_states, + mask=attention_mask, + key_value_states=key_value_states, + position_bias=position_bias, + layer_head_mask=layer_head_mask, + past_key_value=past_key_value, + use_cache=use_cache, + query_length=query_length, + output_attentions=output_attentions, + ) + layer_output = hidden_states + self.dropout(attention_output[0]) + + if torch.isinf(layer_output).any(): + clamp_value = torch.finfo(layer_output.dtype).max - 1000 + layer_output = torch.clamp(layer_output, min=-clamp_value, max=clamp_value) + + outputs = (layer_output,) + attention_output[ + 1: + ] # add attentions if we output them + return outputs + + +class T5Block(nn.Module): + def __init__(self, config, has_relative_attention_bias=False): + super().__init__() + self.is_decoder = config.is_decoder + self.layer = nn.ModuleList() + self.layer.append( + T5LayerSelfAttention( + config, has_relative_attention_bias=has_relative_attention_bias + ) + ) + if self.is_decoder: + self.layer.append(T5LayerCrossAttention(config)) + + self.layer.append(T5LayerFF(config)) + + def forward( + self, + hidden_states, + attention_mask=None, + position_bias=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + encoder_decoder_position_bias=None, + layer_head_mask=None, + cross_attn_layer_head_mask=None, + past_key_value=None, + use_cache=False, + output_attentions=False, + return_dict=True, + ): + + if past_key_value is not None: + if not self.is_decoder: + logger.warning( + "`past_key_values` is passed to the encoder. Please make sure this is intended." + ) + expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 + + if len(past_key_value) != expected_num_past_key_values: + raise ValueError( + f"There should be {expected_num_past_key_values} past states. " + f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}" + f"Got {len(past_key_value)} past key / value states" + ) + + self_attn_past_key_value = past_key_value[:2] + cross_attn_past_key_value = past_key_value[2:] + else: + self_attn_past_key_value, cross_attn_past_key_value = None, None + + self_attention_outputs = self.layer[0]( + hidden_states, + attention_mask=attention_mask, + position_bias=position_bias, + layer_head_mask=layer_head_mask, + past_key_value=self_attn_past_key_value, + use_cache=use_cache, + output_attentions=output_attentions, + ) + hidden_states, present_key_value_state = self_attention_outputs[:2] + attention_outputs = self_attention_outputs[ + 2: + ] # Keep self-attention outputs and relative position weights + + # clamp inf values to enable fp16 training + if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp( + hidden_states, min=-clamp_value, max=clamp_value + ) + + do_cross_attention = self.is_decoder and encoder_hidden_states is not None + if do_cross_attention: + # the actual query length is unknown for cross attention + # if using past key value states. Need to inject it here + if present_key_value_state is not None: + query_length = present_key_value_state[0].shape[2] + else: + query_length = None + + cross_attention_outputs = self.layer[1]( + hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + position_bias=encoder_decoder_position_bias, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + query_length=query_length, + use_cache=use_cache, + output_attentions=output_attentions, + ) + hidden_states = cross_attention_outputs[0] + + # clamp inf values to enable fp16 training + if ( + hidden_states.dtype == torch.float16 + and torch.isinf(hidden_states).any() + ): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp( + hidden_states, min=-clamp_value, max=clamp_value + ) + + # Combine self attn and cross attn key value states + if present_key_value_state is not None: + present_key_value_state = ( + present_key_value_state + cross_attention_outputs[1] + ) + + # Keep cross-attention outputs and relative position weights + attention_outputs = attention_outputs + cross_attention_outputs[2:] + + # Apply Feed Forward layer + hidden_states = self.layer[-1](hidden_states) + + # clamp inf values to enable fp16 training + if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp( + hidden_states, min=-clamp_value, max=clamp_value + ) + + outputs = (hidden_states,) + + if use_cache: + outputs = outputs + (present_key_value_state,) + attention_outputs + else: + outputs = outputs + attention_outputs + + return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) + + +class T5PreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = T5Config + load_tf_weights = load_tf_weights_in_t5 + base_model_prefix = "transformer" + is_parallelizable = True + supports_gradient_checkpointing = True + _no_split_modules = ["T5Block"] + + @property + def dummy_inputs(self): + input_ids = torch.tensor(DUMMY_INPUTS) + input_mask = torch.tensor(DUMMY_MASK) + dummy_inputs = { + "decoder_input_ids": input_ids, + "input_ids": input_ids, + "decoder_attention_mask": input_mask, + } + return dummy_inputs + + def _init_weights(self, module): + """Initialize the weights""" + factor = ( + self.config.initializer_factor + ) # Used for testing weights initialization + if isinstance(module, T5LayerNorm): + module.weight.data.fill_(factor * 1.0) + elif isinstance(module, (T5Model, T5ForConditionalGeneration, T5EncoderModel)): + # Mesh TensorFlow embeddings initialization + # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 + module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) + if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: + module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) + elif isinstance(module, T5DenseActDense): + # Mesh TensorFlow FF initialization + # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 + # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 + module.wi.weight.data.normal_( + mean=0.0, std=factor * ((self.config.d_model) ** -0.5) + ) + if hasattr(module.wi, "bias") and module.wi.bias is not None: + module.wi.bias.data.zero_() + module.wo.weight.data.normal_( + mean=0.0, std=factor * ((self.config.d_ff) ** -0.5) + ) + if hasattr(module.wo, "bias") and module.wo.bias is not None: + module.wo.bias.data.zero_() + elif isinstance(module, T5DenseGatedActDense): + module.wi_0.weight.data.normal_( + mean=0.0, std=factor * ((self.config.d_model) ** -0.5) + ) + if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: + module.wi_0.bias.data.zero_() + module.wi_1.weight.data.normal_( + mean=0.0, std=factor * ((self.config.d_model) ** -0.5) + ) + if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: + module.wi_1.bias.data.zero_() + module.wo.weight.data.normal_( + mean=0.0, std=factor * ((self.config.d_ff) ** -0.5) + ) + if hasattr(module.wo, "bias") and module.wo.bias is not None: + module.wo.bias.data.zero_() + elif isinstance(module, T5Attention): + # Mesh TensorFlow attention initialization to avoid scaling before softmax + # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 + d_model = self.config.d_model + key_value_proj_dim = self.config.d_kv + n_heads = self.config.num_heads + module.q.weight.data.normal_( + mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5) + ) + module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) + module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) + module.o.weight.data.normal_( + mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5) + ) + if module.has_relative_attention_bias: + module.relative_attention_bias.weight.data.normal_( + mean=0.0, std=factor * ((d_model) ** -0.5) + ) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (T5Attention, T5Stack)): + module.gradient_checkpointing = value + + def _shift_right(self, input_ids): + decoder_start_token_id = self.config.decoder_start_token_id + pad_token_id = self.config.pad_token_id + + assert decoder_start_token_id is not None, ( + "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id." + " See T5 docs for more information" + ) + + # shift inputs to the right + if is_torch_fx_proxy(input_ids): + # Item assignment is not supported natively for proxies. + shifted_input_ids = torch.full( + input_ids.shape[:-1] + (1,), decoder_start_token_id + ) + shifted_input_ids = torch.cat( + [shifted_input_ids, input_ids[..., :-1]], dim=-1 + ) + else: + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() + shifted_input_ids[..., 0] = decoder_start_token_id + + assert ( + pad_token_id is not None + ), "self.model.config.pad_token_id has to be defined." + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + +class T5Stack(T5PreTrainedModel): + def __init__(self, config, embed_tokens=None): + super().__init__(config) + + self.embed_tokens = embed_tokens + self.is_decoder = config.is_decoder + + self.block = nn.ModuleList( + [ + T5Block(config, has_relative_attention_bias=bool(i == 0)) + for i in range(config.num_layers) + ] + ) + self.final_layer_norm = T5LayerNorm( + config.d_model, eps=config.layer_norm_epsilon + ) + self.dropout = nn.Dropout(config.dropout_rate) + + # Initialize weights and apply final processing + self.post_init() + # Model parallel + self.model_parallel = False + self.device_map = None + self.gradient_checkpointing = False + + @add_start_docstrings(PARALLELIZE_DOCSTRING) + def parallelize(self, device_map=None): + # Check validity of device_map + self.device_map = ( + get_device_map(len(self.block), range(torch.cuda.device_count())) + if device_map is None + else device_map + ) + assert_device_map(self.device_map, len(self.block)) + self.model_parallel = True + self.first_device = ( + "cpu" + if "cpu" in self.device_map.keys() + else "cuda:" + str(min(self.device_map.keys())) + ) + self.last_device = "cuda:" + str(max(self.device_map.keys())) + # Load onto devices + for k, v in self.device_map.items(): + for layer in v: + cuda_device = "cuda:" + str(k) + self.block[layer] = self.block[layer].to(cuda_device) + + # Set embed_tokens to first layer + self.embed_tokens = self.embed_tokens.to(self.first_device) + # Set final layer norm to last device + self.final_layer_norm = self.final_layer_norm.to(self.last_device) + + @add_start_docstrings(PARALLELIZE_DOCSTRING) + def deparallelize(self): + self.model_parallel = False + self.device_map = None + self.first_device = "cpu" + self.last_device = "cpu" + for i in range(len(self.block)): + self.block[i] = self.block[i].to("cpu") + self.embed_tokens = self.embed_tokens.to("cpu") + self.final_layer_norm = self.final_layer_norm.to("cpu") + torch.cuda.empty_cache() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, new_embeddings): + self.embed_tokens = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + inputs_embeds=None, + head_mask=None, + cross_attn_head_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + # Model parallel + if self.model_parallel: + torch.cuda.set_device(self.first_device) + self.embed_tokens = self.embed_tokens.to(self.first_device) + use_cache = use_cache if use_cache is not None else self.config.use_cache + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + if input_ids is not None and inputs_embeds is not None: + err_msg_prefix = "decoder_" if self.is_decoder else "" + raise ValueError( + f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" + ) + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + err_msg_prefix = "decoder_" if self.is_decoder else "" + raise ValueError( + f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds" + ) + + if inputs_embeds is None: + assert ( + self.embed_tokens is not None + ), "You have to initialize the model with valid token embeddings" + inputs_embeds = self.embed_tokens(input_ids) + + batch_size, seq_length = input_shape + + # required mask seq length can be calculated via length of past + mask_seq_length = ( + past_key_values[0][0].shape[2] + seq_length + if past_key_values is not None + else seq_length + ) + + if use_cache is True: + assert ( + self.is_decoder + ), f"`use_cache` can only be set to `True` if {self} is used as a decoder" + + if attention_mask is None: + attention_mask = torch.ones( + batch_size, mask_seq_length, device=inputs_embeds.device + ) + if ( + self.is_decoder + and encoder_attention_mask is None + and encoder_hidden_states is not None + ): + encoder_seq_length = encoder_hidden_states.shape[1] + encoder_attention_mask = torch.ones( + batch_size, + encoder_seq_length, + device=inputs_embeds.device, + dtype=torch.long, + ) + + # initialize past_key_values with `None` if past does not exist + if past_key_values is None: + past_key_values = [None] * len(self.block) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask = self.get_extended_attention_mask( + attention_mask, input_shape + ) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.is_decoder and encoder_hidden_states is not None: + ( + encoder_batch_size, + encoder_sequence_length, + _, + ) = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones( + encoder_hidden_shape, device=inputs_embeds.device + ) + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask + ) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + head_mask = self.get_head_mask(head_mask, self.config.num_layers) + cross_attn_head_mask = self.get_head_mask( + cross_attn_head_mask, self.config.num_layers + ) + present_key_value_states = () if use_cache else None + all_hidden_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + all_cross_attentions = () if (output_attentions and self.is_decoder) else None + position_bias = None + encoder_decoder_position_bias = None + + hidden_states = self.dropout(inputs_embeds) + + for i, (layer_module, past_key_value) in enumerate( + zip(self.block, past_key_values) + ): + layer_head_mask = head_mask[i] + cross_attn_layer_head_mask = cross_attn_head_mask[i] + # Model parallel + if self.model_parallel: + torch.cuda.set_device(hidden_states.device) + # Ensure that attention_mask is always on the same device as hidden_states + if attention_mask is not None: + attention_mask = attention_mask.to(hidden_states.device) + if position_bias is not None: + position_bias = position_bias.to(hidden_states.device) + if encoder_hidden_states is not None: + encoder_hidden_states = encoder_hidden_states.to( + hidden_states.device + ) + if encoder_extended_attention_mask is not None: + encoder_extended_attention_mask = ( + encoder_extended_attention_mask.to(hidden_states.device) + ) + if encoder_decoder_position_bias is not None: + encoder_decoder_position_bias = encoder_decoder_position_bias.to( + hidden_states.device + ) + if layer_head_mask is not None: + layer_head_mask = layer_head_mask.to(hidden_states.device) + if cross_attn_layer_head_mask is not None: + cross_attn_layer_head_mask = cross_attn_layer_head_mask.to( + hidden_states.device + ) + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return tuple(module(*inputs, use_cache, output_attentions)) + + return custom_forward + + layer_outputs = checkpoint( + create_custom_forward(layer_module), + hidden_states, + extended_attention_mask, + position_bias, + encoder_hidden_states, + encoder_extended_attention_mask, + encoder_decoder_position_bias, + layer_head_mask, + cross_attn_layer_head_mask, + None, # past_key_value is always None with gradient checkpointing + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask=extended_attention_mask, + position_bias=position_bias, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + encoder_decoder_position_bias=encoder_decoder_position_bias, + layer_head_mask=layer_head_mask, + cross_attn_layer_head_mask=cross_attn_layer_head_mask, + past_key_value=past_key_value, + use_cache=use_cache, + output_attentions=output_attentions, + ) + + # layer_outputs is a tuple with: + # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) + if use_cache is False: + layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] + + hidden_states, present_key_value_state = layer_outputs[:2] + + # We share the position biases between the layers - the first layer store them + # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), + # (cross-attention position bias), (cross-attention weights) + position_bias = layer_outputs[2] + if self.is_decoder and encoder_hidden_states is not None: + encoder_decoder_position_bias = layer_outputs[ + 4 if output_attentions else 3 + ] + # append next layer key value states + if use_cache: + present_key_value_states = present_key_value_states + ( + present_key_value_state, + ) + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[3],) + if self.is_decoder: + all_cross_attentions = all_cross_attentions + (layer_outputs[5],) + + # Model Parallel: If it's the last layer for that device, put things on the next device + if self.model_parallel: + for k, v in self.device_map.items(): + if i == v[-1] and "cuda:" + str(k) != self.last_device: + hidden_states = hidden_states.to("cuda:" + str(k + 1)) + + hidden_states = self.final_layer_norm(hidden_states) + hidden_states = self.dropout(hidden_states) + + # Add last layer + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + present_key_value_states, + all_hidden_states, + all_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=present_key_value_states, + hidden_states=all_hidden_states, + attentions=all_attentions, + cross_attentions=all_cross_attentions, + ) + + +T5_START_DOCSTRING = r""" + + The T5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text + Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan + Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a + text-to-text denoising generative setting. + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`T5Config`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +T5_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you + should be able to pad the inputs on both the right and the left. + + Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for detail. + + [What are input IDs?](../glossary#input-ids) + + To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training). + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` + is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). + + To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 + Training](./t5#training). + decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, + 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, + 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in + `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at + the output of the last layer of the encoder. Used in the cross-attention of the decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded + representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be + input (see `past_key_values`). This is useful if you want more control over how to convert + `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + + If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value + of `inputs_embeds`. + + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +T5_ENCODER_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you + should be able to pad the inputs on both the right and the left. + + Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for detail. + + To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training). + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +# Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask +__HEAD_MASK_WARNING_MSG = """ +The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, +`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. +If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, +num_heads)`. +""" + + +@add_start_docstrings( + "The bare T5 Model transformer outputting raw hidden-states without any specific head on top.", + T5_START_DOCSTRING, +) +class T5Model(T5PreTrainedModel): + _keys_to_ignore_on_load_missing = [ + r"encoder.embed_tokens.weight", + r"decoder.embed_tokens.weight", + ] + _keys_to_ignore_on_load_unexpected = [ + r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", + ] + + def __init__(self, config: T5Config): + super().__init__(config) + self.shared = nn.Embedding(config.vocab_size, config.d_model) + + encoder_config = copy.deepcopy(config) + encoder_config.is_decoder = False + encoder_config.use_cache = False + encoder_config.is_encoder_decoder = False + self.encoder = T5Stack(encoder_config, self.shared) + + decoder_config = copy.deepcopy(config) + decoder_config.is_decoder = True + decoder_config.is_encoder_decoder = False + decoder_config.num_layers = config.num_decoder_layers + self.decoder = T5Stack(decoder_config, self.shared) + + # Initialize weights and apply final processing + self.post_init() + + # Model parallel + self.model_parallel = False + self.device_map = None + + @add_start_docstrings(PARALLELIZE_DOCSTRING) + def parallelize(self, device_map=None): + self.device_map = ( + get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) + if device_map is None + else device_map + ) + assert_device_map(self.device_map, len(self.encoder.block)) + self.encoder.parallelize(self.device_map) + self.decoder.parallelize(self.device_map) + self.model_parallel = True + + @add_start_docstrings(DEPARALLELIZE_DOCSTRING) + def deparallelize(self): + self.encoder.deparallelize() + self.decoder.deparallelize() + self.encoder = self.encoder.to("cpu") + self.decoder = self.decoder.to("cpu") + self.model_parallel = False + self.device_map = None + torch.cuda.empty_cache() + + def get_input_embeddings(self): + return self.shared + + def set_input_embeddings(self, new_embeddings): + self.shared = new_embeddings + self.encoder.set_input_embeddings(new_embeddings) + self.decoder.set_input_embeddings(new_embeddings) + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) + @replace_return_docstrings( + output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + decoder_head_mask: Optional[torch.FloatTensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + decoder_inputs_embeds: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: + r""" + Returns: + + Example: + + ```python + >>> from transformers import T5Tokenizer, T5Model + + >>> tokenizer = T5Tokenizer.from_pretrained("t5-small") + >>> model = T5Model.from_pretrained("t5-small") + + >>> input_ids = tokenizer( + ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" + ... ).input_ids # Batch size 1 + >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 + + >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for T5Model. + >>> # This is not needed for torch's T5ForConditionalGeneration as it does this internally using labels arg. + >>> decoder_input_ids = model._shift_right(decoder_input_ids) + + >>> # forward pass + >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) + >>> last_hidden_states = outputs.last_hidden_state + ```""" + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask + if head_mask is not None and decoder_head_mask is None: + if self.config.num_layers == self.config.num_decoder_layers: + warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) + decoder_head_mask = head_mask + + # Encode if needed (training, first prediction pass) + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + hidden_states = encoder_outputs[0] + + # Set device for model parallelism + if self.model_parallel: + torch.cuda.set_device(self.decoder.first_device) + hidden_states = hidden_states.to(self.decoder.first_device) + if decoder_input_ids is not None: + decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) + if attention_mask is not None: + attention_mask = attention_mask.to(self.decoder.first_device) + if decoder_attention_mask is not None: + decoder_attention_mask = decoder_attention_mask.to( + self.decoder.first_device + ) + + # Decode + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + inputs_embeds=decoder_inputs_embeds, + past_key_values=past_key_values, + encoder_hidden_states=hidden_states, + encoder_attention_mask=attention_mask, + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return Seq2SeqModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings( + """T5 Model with a `language modeling` head on top.""", T5_START_DOCSTRING +) +class T5ForConditionalGeneration(T5PreTrainedModel): + _keys_to_ignore_on_load_missing = [ + r"encoder.embed_tokens.weight", + r"decoder.embed_tokens.weight", + r"lm_head.weight", + ] + _keys_to_ignore_on_load_unexpected = [ + r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", + ] + + def __init__(self, config: T5Config): + super().__init__(config) + self.model_dim = config.d_model + + self.shared = nn.Embedding(config.vocab_size, config.d_model) + + encoder_config = copy.deepcopy(config) + encoder_config.is_decoder = False + encoder_config.use_cache = False + encoder_config.is_encoder_decoder = False + self.encoder = T5Stack(encoder_config, self.shared) + + decoder_config = copy.deepcopy(config) + decoder_config.is_decoder = True + decoder_config.is_encoder_decoder = False + decoder_config.num_layers = config.num_decoder_layers + self.decoder = T5Stack(decoder_config, self.shared) + + self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + # Model parallel + self.model_parallel = False + self.device_map = None + + @add_start_docstrings(PARALLELIZE_DOCSTRING) + def parallelize(self, device_map=None): + self.device_map = ( + get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) + if device_map is None + else device_map + ) + assert_device_map(self.device_map, len(self.encoder.block)) + self.encoder.parallelize(self.device_map) + self.decoder.parallelize(self.device_map) + self.lm_head = self.lm_head.to(self.decoder.first_device) + self.model_parallel = True + + @add_start_docstrings(DEPARALLELIZE_DOCSTRING) + def deparallelize(self): + self.encoder.deparallelize() + self.decoder.deparallelize() + self.encoder = self.encoder.to("cpu") + self.decoder = self.decoder.to("cpu") + self.lm_head = self.lm_head.to("cpu") + self.model_parallel = False + self.device_map = None + torch.cuda.empty_cache() + + def get_input_embeddings(self): + return self.shared + + def set_input_embeddings(self, new_embeddings): + self.shared = new_embeddings + self.encoder.set_input_embeddings(new_embeddings) + self.decoder.set_input_embeddings(new_embeddings) + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def get_output_embeddings(self): + return self.lm_head + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) + @replace_return_docstrings( + output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + decoder_head_mask: Optional[torch.FloatTensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + reduction: Optional[str] = "mean", + ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., + config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for + labels in `[0, ..., config.vocab_size]` + + Returns: + + Examples: + + ```python + >>> from transformers import T5Tokenizer, T5ForConditionalGeneration + + >>> tokenizer = T5Tokenizer.from_pretrained("t5-small") + >>> model = T5ForConditionalGeneration.from_pretrained("t5-small") + + >>> # training + >>> input_ids = tokenizer("The walks in park", return_tensors="pt").input_ids + >>> labels = tokenizer(" cute dog the ", return_tensors="pt").input_ids + >>> outputs = model(input_ids=input_ids, labels=labels) + >>> loss = outputs.loss + >>> logits = outputs.logits + + >>> # inference + >>> input_ids = tokenizer( + ... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt" + ... ).input_ids # Batch size 1 + >>> outputs = model.generate(input_ids) + >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) + >>> # studies have shown that owning a dog is good for you. + ```""" + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask + if head_mask is not None and decoder_head_mask is None: + if self.config.num_layers == self.config.num_decoder_layers: + warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) + decoder_head_mask = head_mask + + # Encode if needed (training, first prediction pass) + if encoder_outputs is None: + # Convert encoder inputs in embeddings if needed + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + hidden_states = encoder_outputs[0] + + if self.model_parallel: + torch.cuda.set_device(self.decoder.first_device) + + if ( + labels is not None + and decoder_input_ids is None + and decoder_inputs_embeds is None + ): + # get decoder inputs from shifting lm labels to the right + decoder_input_ids = self._shift_right(labels) + + # Set device for model parallelism + if self.model_parallel: + torch.cuda.set_device(self.decoder.first_device) + hidden_states = hidden_states.to(self.decoder.first_device) + if decoder_input_ids is not None: + decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) + if attention_mask is not None: + attention_mask = attention_mask.to(self.decoder.first_device) + if decoder_attention_mask is not None: + decoder_attention_mask = decoder_attention_mask.to( + self.decoder.first_device + ) + + # Decode + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + inputs_embeds=decoder_inputs_embeds, + past_key_values=past_key_values, + encoder_hidden_states=hidden_states, + encoder_attention_mask=attention_mask, + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = decoder_outputs[0] + + # Set device for model parallelism + if self.model_parallel: + torch.cuda.set_device(self.encoder.first_device) + self.lm_head = self.lm_head.to(self.encoder.first_device) + sequence_output = sequence_output.to(self.lm_head.weight.device) + + if self.config.tie_word_embeddings: + # Rescale output before projecting on vocab + # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 + sequence_output = sequence_output * (self.model_dim**-0.5) + + lm_logits = self.lm_head(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss(ignore_index=-100, reduction=reduction) + loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) + if reduction == "none": + loss = loss.view(lm_logits.size(0), -1).sum(1) + + if not return_dict: + output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs + return ((loss,) + output) if loss is not None else output + + return Seq2SeqV2DialOutput( + loss=loss, + logits=lm_logits, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + encoder_outputs=encoder_outputs + ) + + def prepare_inputs_for_generation( + self, + input_ids, + past=None, + attention_mask=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, + use_cache=None, + encoder_outputs=None, + **kwargs, + ): + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + "decoder_input_ids": input_ids, + "past_key_values": past, + "encoder_outputs": encoder_outputs, + "attention_mask": attention_mask, + "head_mask": head_mask, + "decoder_head_mask": decoder_head_mask, + "cross_attn_head_mask": cross_attn_head_mask, + "use_cache": use_cache, + } + + def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): + return self._shift_right(labels) + + def _reorder_cache(self, past, beam_idx): + # if decoder past is not included in output + # speedy decoding is disabled and no need to reorder + if past is None: + logger.warning( + "You might want to consider setting `use_cache=True` to speed up decoding" + ) + return past + + reordered_decoder_past = () + for layer_past_states in past: + # get the correct batch idx from layer past batch dim + # batch dim of `past` is at 2nd position + reordered_layer_past_states = () + for layer_past_state in layer_past_states: + # need to set correct `past` for each of the four key / value states + reordered_layer_past_states = reordered_layer_past_states + ( + layer_past_state.index_select( + 0, beam_idx.to(layer_past_state.device) + ), + ) + + assert reordered_layer_past_states[0].shape == layer_past_states[0].shape + assert len(reordered_layer_past_states) == len(layer_past_states) + + reordered_decoder_past = reordered_decoder_past + ( + reordered_layer_past_states, + ) + return reordered_decoder_past + + +@add_start_docstrings( + "The bare T5 Model transformer outputting encoder's raw hidden-states without any specific head on top.", + T5_START_DOCSTRING, +) +class T5EncoderModel(T5PreTrainedModel): + authorized_missing_keys = [ + r"encoder.embed_tokens.weight", + ] + + def __init__(self, config: T5Config): + super().__init__(config) + self.shared = nn.Embedding(config.vocab_size, config.d_model) + + encoder_config = copy.deepcopy(config) + encoder_config.use_cache = False + encoder_config.is_encoder_decoder = False + self.encoder = T5Stack(encoder_config, self.shared) + + # Initialize weights and apply final processing + self.post_init() + + # Model parallel + self.model_parallel = False + self.device_map = None + + @add_start_docstrings(PARALLELIZE_DOCSTRING) + def parallelize(self, device_map=None): + self.device_map = ( + get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) + if device_map is None + else device_map + ) + assert_device_map(self.device_map, len(self.encoder.block)) + self.encoder.parallelize(self.device_map) + self.model_parallel = True + + @add_start_docstrings(DEPARALLELIZE_DOCSTRING) + def deparallelize(self): + self.encoder.deparallelize() + self.encoder = self.encoder.to("cpu") + self.model_parallel = False + self.device_map = None + torch.cuda.empty_cache() + + def get_input_embeddings(self): + return self.shared + + def set_input_embeddings(self, new_embeddings): + self.shared = new_embeddings + self.encoder.set_input_embeddings(new_embeddings) + + def get_encoder(self): + return self.encoder + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(T5_ENCODER_INPUTS_DOCSTRING) + @replace_return_docstrings( + output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: + r""" + Returns: + + Example: + + ```python + >>> from transformers import T5Tokenizer, T5EncoderModel + + >>> tokenizer = T5Tokenizer.from_pretrained("t5-small") + >>> model = T5EncoderModel.from_pretrained("t5-small") + >>> input_ids = tokenizer( + ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" + ... ).input_ids # Batch size 1 + >>> outputs = model(input_ids=input_ids) + >>> last_hidden_states = outputs.last_hidden_state + ```""" + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + return encoder_outputs diff --git a/models/backbones/eva_vit.py b/models/backbones/eva_vit.py new file mode 100755 index 0000000..84e3fc3 --- /dev/null +++ b/models/backbones/eva_vit.py @@ -0,0 +1,455 @@ +# Based on EVA, BEIT, timm and DeiT code bases +# https://github.com/baaivision/EVA +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/facebookresearch/deit/ +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from timm.models.layers import drop_path, to_2tuple, trunc_normal_ +from timm.models.registry import register_model + +from models.common.dist_utils import download_cached_file + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + **kwargs + } + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return 'p={}'.format(self.drop_prob) + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + # x = self.drop(x) + # commit this for the orignal BERT implement + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__( + self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., + proj_drop=0., window_size=None, attn_head_dim=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + if window_size: + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", relative_position_index) + else: + self.window_size = None + self.relative_position_bias_table = None + self.relative_position_index = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, rel_pos_bias=None): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) + # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if self.relative_position_bias_table is not None: + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if rel_pos_bias is not None: + attn = attn + rel_pos_bias + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, + window_size=None, attn_head_dim=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, + attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if init_values is not None and init_values > 0: + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x, rel_pos_bias=None): + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x, **kwargs): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class RelativePositionBias(nn.Module): + + def __init__(self, window_size, num_heads): + super().__init__() + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", relative_position_index) + + # trunc_normal_(self.relative_position_bias_table, std=.02) + + def forward(self): + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + + +class VisionTransformer(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, + use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, + use_mean_pooling=True, init_scale=0.001, use_checkpoint=False): + super().__init__() + self.image_size = img_size + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + if use_abs_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + else: + self.pos_embed = None + self.pos_drop = nn.Dropout(p=drop_rate) + + if use_shared_rel_pos_bias: + self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) + else: + self.rel_pos_bias = None + self.use_checkpoint = use_checkpoint + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.use_rel_pos_bias = use_rel_pos_bias + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) + for i in range(depth)]) +# self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) +# self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None +# self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + if self.pos_embed is not None: + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + # trunc_normal_(self.mask_token, std=.02) +# if isinstance(self.head, nn.Linear): +# trunc_normal_(self.head.weight, std=.02) + self.apply(self._init_weights) + self.fix_init_weight() +# if isinstance(self.head, nn.Linear): +# self.head.weight.data.mul_(init_scale) +# self.head.bias.data.mul_(init_scale) + + def fix_init_weight(self): + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + batch_size, seq_len, _ = x.size() + + cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed + x = self.pos_drop(x) + + rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x, rel_pos_bias) + else: + x = blk(x, rel_pos_bias) + return x +# x = self.norm(x) + +# if self.fc_norm is not None: +# t = x[:, 1:, :] +# return self.fc_norm(t.mean(1)) +# else: +# return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) +# x = self.head(x) + return x + + def get_intermediate_layers(self, x): + x = self.patch_embed(x) + batch_size, seq_len, _ = x.size() + + cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed + x = self.pos_drop(x) + + features = [] + rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None + for blk in self.blocks: + x = blk(x, rel_pos_bias) + features.append(x) + + return features + + def get_num_layer(self, var_name=""): + if var_name in ("cls_token", "mask_token", "pos_embed"): + return 0 + elif var_name.startswith("patch_embed"): + return 0 + elif var_name.startswith("rel_pos_bias"): + return len(self.blocks) - 1 + elif var_name.startswith("blocks"): + layer_id = int(var_name.split('.')[1]) + return layer_id + 1 + else: + return len(self.blocks) + +def interpolate_pos_embed(model, checkpoint_model): + if 'pos_embed' in checkpoint_model: + pos_embed_checkpoint = checkpoint_model['pos_embed'].float() + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = model.patch_embed.num_patches + num_extra_tokens = model.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + checkpoint_model['pos_embed'] = new_pos_embed + + +def convert_weights_to_fp16(model: nn.Module): + """Convert applicable model parameters to fp16""" + + def _convert_weights_to_fp16(l): + if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): + l.weight.data = l.weight.data.half() + if l.bias is not None: + l.bias.data = l.bias.data.half() + +# if isinstance(l, (nn.MultiheadAttention, Attention)): +# for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: +# tensor = getattr(l, attr) +# if tensor is not None: +# tensor.data = tensor.data.half() + + model.apply(_convert_weights_to_fp16) + + +def create_eva_vit_g(img_size=224,drop_path_rate=0.4,use_checkpoint=False,precision="fp16"): + model = VisionTransformer( + img_size=img_size, + patch_size=14, + use_mean_pooling=False, + embed_dim=1408, + depth=39, + # depth = 37, + num_heads=1408//88, + mlp_ratio=4.3637, + qkv_bias=True, + drop_path_rate=drop_path_rate, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + use_checkpoint=use_checkpoint, + ) + url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth" + cached_file = download_cached_file( + url, check_hash=False, progress=True + ) + state_dict = torch.load(cached_file, map_location="cpu") + interpolate_pos_embed(model,state_dict) + + incompatible_keys = model.load_state_dict(state_dict, strict=False) +# print(incompatible_keys) + + if precision == "fp16": +# model.to("cuda") + convert_weights_to_fp16(model) + return model \ No newline at end of file diff --git a/models/backbones/mini_gpt4_llama_v2.py b/models/backbones/mini_gpt4_llama_v2.py new file mode 100755 index 0000000..175fbeb --- /dev/null +++ b/models/backbones/mini_gpt4_llama_v2.py @@ -0,0 +1,895 @@ +import logging +import random + +import torch +from torch.cuda.amp import autocast as autocast +import torch.nn as nn + +from minigpt4.common.registry import registry +from minigpt4.models.blip2 import Blip2Base, disabled_train +# from minigpt4.models.modeling_llama_v2 import LlamaForCausalLM as llm_model +# minigpt4.models.modeling_mistral import MistralForCausalLM as llm_model +from minigpt4.conversation.conversation import Conversation, SeparatorStyle, StoppingCriteriaList, StoppingCriteriaSub + +from transformers import LlamaTokenizer +from transformers import BitsAndBytesConfig + +from peft import ( + LoraConfig, + get_peft_model, + get_peft_model_state_dict, + prepare_model_for_int8_training, + set_peft_model_state_dict, +) +import time +import numpy as np + +from minigpt4.models import policies + + +@registry.register_model("mini_gpt4_llama_v2") +class MiniGPT4_llama_v2(Blip2Base): + """ + BLIP2 GPT-LLAMA model. + """ + + PRETRAINED_MODEL_CONFIG_DICT = { + "pretrain_vicuna": "configs/models/minigpt4.yaml", + } + + def __init__( + self, + vit_model="eva_clip_g", + img_size=224, + drop_path_rate=0, + use_grad_checkpoint=False, + vit_precision="fp16", + freeze_vit=True, + llama_model="", + prompt_path="", + prompt_template="", + max_txt_len=32, + low_resource=False, # use 8 bit and put vit in cpu + end_sym='\n', + lora_r = 8, + lora_target_modules = ["q_proj","v_proj"], + lora_alpha=16, + # lora_r = 16, + # lora_target_modules = ["q_proj","v_proj","v_proj"], + lora_dropout= 0.05, + ckpt_path = "", + system_prompt= False, + chat_template=False, + token_pooling=True, + use_grad_checkpoint_llm=False, + max_context_len=3800, + remove_template = False, + + ): + super().__init__() + if "Mistral" in llama_model: + from minigpt4.models.modeling_mistral import MistralForCausalLM as llm_model + print("Mistral model") + self.model_type = "Mistral" + else: + from minigpt4.models.modeling_llama_v2 import LlamaForCausalLM as llm_model + print("Llama model") + self.model_type = "Llama" + self.tokenizer = self.init_tokenizer() + self.low_resource = low_resource + self.token_pooling = token_pooling + self.remove_template = remove_template + + print("token pooling", self.token_pooling) + + + self.use_grad_checkpoint_llm = use_grad_checkpoint_llm + self.max_context_len = max_context_len + self.chat_template = chat_template + + # print('Loading VIT') + # self.visual_encoder, self.ln_vision = self.init_vision_encoder( + # vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision + # ) + + if freeze_vit: + # vit_precision="fp32" + print("vit precision", vit_precision) + self.visual_encoder, self.ln_vision = self.init_vision_encoder( + vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision + ) + for name, param in self.visual_encoder.named_parameters(): + param.requires_grad = False + self.visual_encoder = self.visual_encoder.eval() + self.visual_encoder.train = disabled_train + for name, param in self.ln_vision.named_parameters(): + param.requires_grad = False + self.ln_vision = self.ln_vision.eval() + self.ln_vision.train = disabled_train + logging.info("freeze vision encoder") + print("freeze the vision encoder") + + else: + vit_precision="fp32" + self.visual_encoder, self.ln_vision = self.init_vision_encoder( + vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision + ) + + print("unfreeze the vision encoder") + + print('Loading VIT Done') + + # print("visual encoder shape", self.visual_encoder.pos_embed.shape) + # assert False + + print('Loading LLAMA') + + + self.B_SYS, self.E_SYS = "<>\n", "\n<>\n\n" + + self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model,use_fast=False) # + self.llama_tokenizer.pad_token = "$$" + + self.system_prompt = system_prompt + + + + print("self.low_resource",self.low_resource) + if self.low_resource: + self.llama_model = llm_model.from_pretrained( + llama_model, + torch_dtype=torch.float16, + # torch_dtype = torch.bfloat16, + load_in_8bit=True, + # device_map = "balanced" + # device_map="auto", + device_map={'':torch.cuda.current_device()}, + # device_map={'':0} + + ) + # bnb_config = BitsAndBytesConfig( + # load_in_4bit=True, + # bnb_4bit_use_double_quant=True, + # bnb_4bit_quant_type="nf4", + # bnb_4bit_compute_dtype=torch.bfloat16, + # ) + # self.llama_model = llm_model.from_pretrained( + # llama_model, + # torch_dtype=torch.bfloat16, + # device_map={'':torch.cuda.current_device()}, + # quantization_config=bnb_config, + # ) + else: + self.llama_model = llm_model.from_pretrained( + llama_model, + torch_dtype=torch.float16, + ) + + + + # self.llama_model.resize_token_embeddings(len(self.llama_tokenizer)) + self.llama_model = prepare_model_for_int8_training(self.llama_model) + + + + loraconfig = LoraConfig( + r=lora_r, + lora_alpha=lora_alpha, + target_modules=lora_target_modules, + lora_dropout=lora_dropout, + bias="none", + task_type="CAUSAL_LM" + ) + self.llama_model = get_peft_model(self.llama_model, loraconfig) + + # if ckpt_path: + # print('load the llm under lora') + # ckpt = torch.load(ckpt_path) + # set_peft_model_state_dict(self.llama_model,ckpt) + + + + self.llama_model.print_trainable_parameters() + + if self.use_grad_checkpoint_llm: + self.llama_model.gradient_checkpointing_enable() + + # if not self.low_resource: + # for name, param in self.llama_model.named_parameters(): + # if "embed_token" in name: + # param.data = param.data.float() + # param.requires_grad = True + + + print('Loading LLAMA Done') + + + if self.token_pooling: + self.llama_proj = nn.Linear( + 1408*4, self.llama_model.config.hidden_size + ) + else: + self.llama_proj = nn.Linear( + 1408, self.llama_model.config.hidden_size + ) + + self.max_txt_len = max_txt_len + self.end_sym = end_sym + + if prompt_path: + with open(prompt_path, 'r') as f: + raw_prompts = f.read().splitlines() + filted_prompts = [raw_prompt for raw_prompt in raw_prompts if "" in raw_prompt] + self.prompt_list = [prompt_template.format(p) for p in filted_prompts] + print('Load {} training prompts'.format(len(self.prompt_list))) + print('Prompt Example \n{}'.format(random.choice(self.prompt_list))) + else: + self.prompt_list = [] + + def encode_img(self, image): + device = image.device + if len(image.shape) > 4: + image = image.reshape(-1, *image.shape[-3:]) # for video input flatten the batch and time dimension (4,50,3,224,224) -> (200,3,224,224) + with self.maybe_autocast(): + image_embeds = self.ln_vision(self.visual_encoder(image)).to(device) # (200,3,224,224) -> (200,257,1408) + image_embeds = image_embeds[:,1:,:] # remove the first token (CLS) (200,256,1408) + bs, pn, hs = image_embeds.shape + if self.token_pooling: # concat the each 4 tokens into one token (200,64,5632) + image_embeds = image_embeds.view(bs, int(pn/4), int(hs*4)) # (200,64,5632) + + inputs_llama = self.llama_proj(image_embeds) # project to llama input size (200,64,5632) -> (200,64,4096) + atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(image.device) + return inputs_llama, atts_llama + + def get_context_emb(self, prompt, img_list): + img_device = img_list[0].device + prompt_segs = prompt.split('') + assert len(prompt_segs) == len(img_list) + 1, "Unmatched numbers of image placeholders and images." + seg_tokens = [ + self.llama_tokenizer( + seg, return_tensors="pt", add_special_tokens=i==0).to(img_device).input_ids # only add bos to the first seg + for i, seg in enumerate(prompt_segs) + ] + + seg_embs = [self.embed_tokens(seg_t) for seg_t in seg_tokens] + + mixed_embs = [emb for pair in zip(seg_embs[:-1], img_list) for emb in pair] + [seg_embs[-1]] + + mixed_embs = torch.cat(mixed_embs, dim=1) + # # truncate the length of tokens to the max context window + # mixed_embs_without_instruction = [emb for pair in zip(seg_embs[:-1], img_list) for emb in pair] + # mixed_embs_without_instruction=torch.cat(mixed_embs_without_instruction, dim=1) + # # check if the number of token in the second dimention is more than the max context window then truncate it + # context_window=self.max_context_len-seg_embs[-1].shape[1] + # if mixed_embs_without_instruction.shape[1] > context_window : + # mixed_embs_without_instruction = mixed_embs_without_instruction[:, 0:context_window] + # mixed_embs=torch.cat([mixed_embs_without_instruction,seg_embs[-1]], dim=1) + # print("mixed_embs",mixed_embs.shape) + + return mixed_embs + + def prompt_wrap(self, img_embeds, atts_img, prompts, lengths=None): + if prompts is None or len(prompts) == 0: + # prompts is not provided, just return the original image embedding + return img_embeds, atts_img + elif img_embeds is None: + # prompt is provided but there is no image embedding. return the prompt embedding in right padding + self.llama_tokenizer.padding_side = "right" + prompt_tokens = self.llama_tokenizer( + prompts, + return_tensors="pt", + padding="longest", + add_special_tokens=False + ).to(self.device) + prompt_embeds = self.embed_tokens(prompt_tokens.input_ids) + atts_prompt = prompt_tokens.attention_mask + return prompt_embeds, atts_prompt + + else: + # return the multi-modal embedding in right padding + emb_lists = [] + + for idx, (each_img_embed, each_prompt) in enumerate(zip(img_embeds, prompts)): + pn = each_img_embed.shape[-2] + if lengths is not None: + each_img_embed = each_img_embed.reshape(-1, each_img_embed.shape[-1]) + each_img_embed = each_img_embed[:lengths[idx] * pn] + + p_segs = each_prompt.split('') + interleave_emb = [] + for idx, seg in enumerate(p_segs[:-1]): + p_tokens = self.llama_tokenizer(seg, return_tensors="pt", add_special_tokens=False).to(img_embeds.device) + # print("p_embed device",p_tokens.input_ids.device) + # print("p_tokens",img_embeds.device) + # print("emb layer", list(self.llama_model.base_model.model.model.embed_tokens.parameters())[0].device) + p_embed = self.embed_tokens(p_tokens.input_ids) + + # print("model device",self.llama_model.get_device()) + interleave_emb.append(torch.cat([p_embed, each_img_embed[None][:, idx*pn:(idx+1)*pn]], dim=1)) + + wrapped_emb = torch.cat(interleave_emb, dim=1) + p_tokens = self.llama_tokenizer(p_segs[-1], return_tensors="pt", add_special_tokens=False).to(img_embeds.device) + p_embed = self.embed_tokens(p_tokens.input_ids) + wrapped_emb = torch.cat([wrapped_emb,p_embed], dim=1) + emb_lists.append(wrapped_emb) + + emb_lens = [emb.shape[1] for emb in emb_lists] + pad_emb = self.embed_tokens(torch.tensor(self.llama_tokenizer.pad_token_id, device=img_embeds.device)) + + # max_length = max(emb_lens) if max(emb_lens) < self.max_context_len else self.max_context_len + max_length = self.max_context_len + wrapped_embs = pad_emb.expand(len(emb_lens), max_length, -1).clone() + wrapped_atts = torch.zeros([len(emb_lens), max_length], dtype=torch.int, device=img_embeds.device) + + for i, emb in enumerate(emb_lists): + length = emb_lens[i] if emb_lens[i] < self.max_context_len else self.max_context_len + wrapped_embs[i, :length] = emb[:, :length] + wrapped_atts[i, :length] = 1 + + return wrapped_embs, wrapped_atts + + def concat_emb_input_output(self, input_embs, input_atts, output_embs, output_atts): + """ + Concatenate the batched input embedding and batched output embedding together. + Both the input and the output embedding should be right padded. + """ + + input_lens = [] + cat_embs = [] + cat_atts = [] + + for i in range(input_embs.size(0)): + input_len = input_atts[i].sum() + input_lens.append(input_len) + + cat_embs.append( + torch.cat([ + input_embs[i][:input_len], + output_embs[i], + input_embs[i][input_len:] + ]) + ) + cat_atts.append( + torch.cat([ + input_atts[i][:input_len], + output_atts[i], + input_atts[i][input_len:] + ]) + ) + # print('===================================') + # print('check input emb: ', input_embs[i][this_input_ones-2:this_input_ones]) + # print('check pad emb: ', input_embs[i][this_input_ones:this_input_ones+2]) + # print('check out emb: ', output_embs[i][:2]) + # print('check out pad emb: ', output_embs[i][-2:]) + # print('+++++++++++++++++++++++++++++++++++') + # + # print('check attn before: ', input_atts[i][:this_input_ones]) + # print('check attn after: ', input_atts[i][this_input_ones:]) + # print('check attn gt before: ', output_atts[i][:3]) + # print('check attn gt after: ', output_atts[i][-3:]) + + cat_embs = torch.stack(cat_embs) + cat_atts = torch.stack(cat_atts) + return cat_embs, cat_atts, input_lens + + def get_conv_emb(self, conv_q, conv_a, conv_img): + """concatenate conversation and make sure the model is only trained to regress the answer""" + + regress_embs_list = [] + targets_list = [] + + batch_size = len(conv_q) + for batch_idx in range(batch_size): + questions, answers = conv_q[batch_idx], conv_a[batch_idx] + assigned_imgs = conv_img[batch_idx] + questions = [self.prompt_wrap( + img_embeds=img, + atts_img=None, + prompts=[q], + lengths=[img.shape[1]] if img is not None else None) for q, img in zip(questions, assigned_imgs)] + q_embs = [emb for emb, _ in questions] + + answers = [self.llama_tokenizer(a, return_tensors="pt", add_special_tokens=False).to(self.device) for a in answers] + cur_emb = [] + cur_target = [] + for i in range(len(questions)): + cur_emb.append(q_embs[i]) + cur_target.append(torch.ones_like(q_embs[i][..., 0], dtype=torch.int) * -100) + + cur_emb.append(self.embed_tokens(answers[i].input_ids)) + cur_target.append(answers[i].input_ids) + + cur_emb = torch.cat(cur_emb, dim=1) + cur_target = torch.cat(cur_target, dim=1) + + regress_embs_list.append(cur_emb) + targets_list.append(cur_target) + + max_len = min(max([target.shape[1] for target in targets_list]), self.max_txt_len) + + regress_embeds = torch.zeros([batch_size, max_len, cur_emb.shape[-1]], device=self.device) + regress_attn = torch.zeros([batch_size, max_len], dtype=torch.int, device=self.device) + targets = torch.ones([batch_size, max_len], dtype=torch.long, device=self.device) * -100 + + for batch_idx in range(batch_size): + cur_len = regress_embs_list[batch_idx].shape[1] + regress_embeds[batch_idx, :cur_len] = regress_embs_list[batch_idx][0, :max_len] + regress_attn[batch_idx, :cur_len] = 1 + targets[batch_idx, :cur_len] = targets_list[batch_idx][0, :max_len] + + return regress_embeds, regress_attn, targets + + def preparing_embedding(self, samples): + def remove_special_tokens(data): + + # if "instruction_input" in data: + data = [instruct.replace(" [caption]","") for instruct in data] + data = [instruct.replace(" [vqa]","") for instruct in data] + data = [instruct.replace(" [grounding]","") for instruct in data] + data = [instruct.replace(" [identify]","") for instruct in data] + data = [instruct.replace(" [refer]","") for instruct in data] + return data + + ### prepare input tokens + if 'image' in samples: + img_embeds, img_atts = self.encode_img(samples["image"]) + # print("img_embeds shape",img_embeds.shape) + else: + img_embeds = img_atts = None + + if 'conv_q' in samples: + # handeling conversation datasets + conv_q, conv_a = samples['conv_q'], samples['conv_a'] + + connect_sym = samples['connect_sym'][0] + conv_q = [q.split(connect_sym)for q in conv_q] + conv_a = [a.split(connect_sym) for a in conv_a] + conv_img = assign_imgs(conv_q, img_embeds) + + if self.chat_template: + conv_q = [["[INST] " + item + "[/INST]" for item in items] for items in conv_q] + + regress_embeds, regress_atts, part_targets = self.get_conv_emb(conv_q, conv_a, conv_img) + cond_embeds, cond_atts = regress_embeds[:, :0], regress_atts[:, :0] + + else: + instruction = samples["instruction_input"] if "instruction_input" in samples else None + + # print("instruction before", instruction) + if self.remove_template: + instruction = remove_special_tokens(instruction) + # print("instruction after", instruction) + + if self.chat_template: + instruction = ["[INST] " + instruct + "[/INST]" for instruct in instruction] + + if 'length' in samples: + # the input is a image train (like videos) + bsz, pn, hs = img_embeds.shape + img_embeds = img_embeds.reshape(len(samples['image']), -1, pn, hs) # (200,64,4096) -> (4,50,64,4096) + cond_embeds, cond_atts = self.prompt_wrap(img_embeds, img_atts, instruction, samples['length']) + else: + cond_embeds, cond_atts = self.prompt_wrap(img_embeds, img_atts, instruction) + + ### prepare target tokens + self.llama_tokenizer.padding_side = "right" + text = [t + self.end_sym for t in samples["answer"]] + + regress_tokens = self.llama_tokenizer( + text, + return_tensors="pt", + padding="max_length", + truncation=True, + max_length=self.max_txt_len, + add_special_tokens=False + ).to(self.device) + + regress_token_ids = regress_tokens.input_ids + regress_atts = regress_tokens.attention_mask + part_targets = regress_token_ids.masked_fill( + regress_token_ids == self.llama_tokenizer.pad_token_id, -100 + ) + + regress_embeds = self.embed_tokens(regress_token_ids) + + return cond_embeds, cond_atts, regress_embeds, regress_atts, part_targets + + def forward(self, samples, reduction="mean"): + # prepare the embedding to condition and the embedding to regress + cond_embeds, cond_atts, regress_embeds, regress_atts, part_targets = \ + self.preparing_embedding(samples) + + # concat the embedding to condition and the embedding to regress + inputs_embeds, attention_mask, input_lens = \ + self.concat_emb_input_output(cond_embeds, cond_atts, regress_embeds, regress_atts) + print("inputs_embeds shape",inputs_embeds.shape) + print("cond_embeds shape",cond_embeds.shape) + print("regress_embeds shape",regress_embeds.shape) + # get bos token embedding + bos = torch.ones_like(part_targets[:, :1]) * self.llama_tokenizer.bos_token_id + bos_embeds = self.embed_tokens(bos) + bos_atts = attention_mask[:, :1] + + # add bos token at the begining + inputs_embeds = torch.cat([bos_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([bos_atts, attention_mask], dim=1) + # print length of instruction_input and answer words + # for i in range (len(samples["instruction_input"])): + # print("instruction_input length",len(samples["instruction_input"][i].split(" "))) + # print("answer length",len(samples["answer"][i].split(" "))) + # ensemble the final targets + targets = torch.ones([inputs_embeds.shape[0], inputs_embeds.shape[1]], + dtype=torch.long).to(self.device).fill_(-100) + for i, target in enumerate(part_targets): + targets[i, input_lens[i]+1:input_lens[i]+len(target)+1] = target # plus 1 for bos + print("targets shape",targets.shape) + with self.maybe_autocast(): + outputs = self.llama_model( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + return_dict=True, + labels=targets, + reduction=reduction + ) + loss = outputs.loss + + return {"loss": loss} + + @torch.no_grad() + def generate( + self, + images, + texts, + use_nucleus_sampling=False, + num_beams=1, + max_new_tokens=20, + min_length=1, + top_p=0.9, + repetition_penalty=1.5, + length_penalty=1, + temperature=1, + do_sample=False, + stop_words_ids=[2], + lengths=None, + return_video_temporal_features=False, + img_embeds=None, + ): + ''' + function for generate test use + ''' + + stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub( + stops=[torch.tensor([i]).to(self.device) for i in stop_words_ids])]) + if img_embeds is None: + img_embeds, atts_img = self.encode_img(images.to(self.device)) + else: + # Use images features from the input(4,45,64,5632) + img_embeds = img_embeds.reshape(-1, *img_embeds.shape[-2:]) + img_embeds= img_embeds.to(self.device) + img_embeds = self.llama_proj(img_embeds) # project to llama input size (200,64,5632) -> (200,64,4096) + atts_img = torch.ones(img_embeds.size()[:-1], dtype=torch.long).to(self.device) + + print("img_embeds shape",img_embeds.shape) + if lengths is not None: + image_lists = [] + img_embeds = img_embeds.reshape(len(lengths), -1, img_embeds.shape[-2], img_embeds.shape[-1]) + for idx, img_embed in enumerate(img_embeds): + image_lists.append([img_embed[i][None] for i in range(lengths[idx])]) + else: + image_lists = [[image_emb[None]] for image_emb in img_embeds] + assert len(texts) == len(image_lists) + batch_embs = [self.get_context_emb(text, img_list) for text, img_list in zip(texts, image_lists)] + + batch_size = len(batch_embs) + max_len = max([emb.shape[1] for emb in batch_embs]) + emb_dim = batch_embs[0].shape[2] + dtype = batch_embs[0].dtype + device = batch_embs[0].device + + embs = torch.zeros([batch_size, max_len, emb_dim], dtype=dtype, device=device) + attn_mask = torch.zeros([batch_size, max_len], dtype=torch.int, device=device) + for i, emb in enumerate(batch_embs): + emb_len = emb.shape[1] + embs[i, -emb_len:] = emb[0] + attn_mask[i, -emb_len:] = 1 + # print("inputs_embeds shape",embs.shape) + # print("attention_mask shape",attn_mask.shape) + # check if the input embedding tokens are in the range of the model cotext window (4096) and if it is not, then truncate it to the max context window + if self.model_type == "Llama": + context_window = 3700 + else: + context_window = 7500 + if embs.shape[1] > context_window: + embs = embs[:, -context_window:] + attn_mask = attn_mask[:, -context_window:] + print("inputs_embeds shape",embs.shape) + print("attention_mask shape",attn_mask.shape) + with self.maybe_autocast(): + if return_video_temporal_features: + last_hidden_state = self.llama_model( + inputs_embeds=embs, + attention_mask=attn_mask, + output_hidden_states=True, + ).hidden_states[-1] + video_temporal_features = last_hidden_state.mean(dim=1) + # normalize the temporal features using L2 norm + # video_temporal_features = video_temporal_features / video_temporal_features.norm(dim=-1, keepdim=True) + outputs = self.llama_model.generate( + inputs_embeds=embs, + attention_mask=attn_mask, + max_new_tokens=max_new_tokens, + num_beams=num_beams, + do_sample=do_sample, + temperature=temperature, + repetition_penalty=repetition_penalty, + # stopping_criteria=stopping_criteria, + ) + + answers = [] + for output_token in outputs: + if output_token[0] == 0: + output_token = output_token[1:] + output_texts = self.llama_tokenizer.decode(output_token, skip_special_tokens=True) + output_texts = output_texts.split('')[0] # remove the stop sign + output_texts = output_texts.replace("", "") + output_texts = output_texts.split(r'[/INST]')[-1].strip() + answers.append(output_texts) + if return_video_temporal_features: + return answers, video_temporal_features + else: + return answers + + @torch.no_grad() + def generate_text_only( + self, + images, + seg_tokens, + use_nucleus_sampling=False, + num_beams=1, + max_new_tokens=20, + min_length=1, + top_p=0.9, + repetition_penalty=1.5, + length_penalty=1, + temperature=1, + do_sample=False, + stop_words_ids=[2], + lengths=None, + return_video_temporal_features=False, + img_embeds=None, + ): + ''' + function for generate test use + ''' + + stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub( + stops=[torch.tensor([i]).to(self.device) for i in stop_words_ids])]) + + # seg_tokens=[] + # for i, text in enumerate(texts): + # seg_tokens.append(self.llama_tokenizer(text, return_tensors="pt", add_special_tokens=True).to(self.device).input_ids) + + batch_embs = [torch.cat([self.embed_tokens(seg_t)]) for seg_t in seg_tokens] + + # seg_embs = torch.cat(seg_embs, dim=1) + # print("seg_embs shape",seg_embs.shape) + # batch_embs=[seg_embs] + batch_size = len(batch_embs) + max_len = max([emb.shape[1] for emb in batch_embs]) + emb_dim = batch_embs[0].shape[2] + dtype = batch_embs[0].dtype + device = batch_embs[0].device + + embs = torch.zeros([batch_size, max_len, emb_dim], dtype=dtype, device=device) + attn_mask = torch.zeros([batch_size, max_len], dtype=torch.int, device=device) + for i, emb in enumerate(batch_embs): + emb_len = emb.shape[1] + embs[i, -emb_len:] = emb[0] + attn_mask[i, -emb_len:] = 1 + + + print("inputs_embeds shape",embs.shape) + print("attention_mask shape",attn_mask.shape) + with self.maybe_autocast(): + outputs = self.llama_model.generate( + inputs_embeds=embs, + attention_mask=attn_mask, + max_new_tokens=max_new_tokens, + num_beams=num_beams, + do_sample=do_sample, + temperature=temperature, + repetition_penalty=repetition_penalty, + # stopping_criteria=stopping_criteria, + ) + + answers = [] + for output_token in outputs: + if output_token[0] == 0: + output_token = output_token[1:] + output_texts = self.llama_tokenizer.decode(output_token, skip_special_tokens=True) + output_texts = output_texts.split('')[0] # remove the stop sign + output_texts = output_texts.replace("", "") + output_texts = output_texts.split(r'[/INST]')[-1].strip() + answers.append(output_texts) + return answers + + + + @torch.no_grad() + def multi_select(self, images, texts, answers, num_cand=None): + all_losses = [] + for answer in answers: + choice_samples = { + 'image': images, + 'instruction_input': texts, + 'answer': answer + } + loss = self.forward(choice_samples, reduction='none')['loss'].reshape(-1, 1) + all_losses.append(loss) + torch.cuda.empty_cache() + all_losses = torch.cat(all_losses, dim=-1) + if num_cand is not None: + for i in range(all_losses.shape[0]): + all_losses[i, num_cand[i]:] = 9999 + output_class_ranks = torch.argsort(all_losses, dim=-1) + return output_class_ranks.tolist() + + def predict_answers( + self, + samples, + num_beams=5, + inference_method="generate", + max_len=10, + min_len=1, + num_ans_candidates=128, + answer_list=None, + prompt="", + length_penalty=0, + **kwargs + ): + ''' + function for open-ended VQA + ''' + images = samples["image"].cuda() + texts = samples["instruction_input"] + + output_text = self.generate( + images=images, + texts=texts, + num_beams=num_beams, + max_new_tokens=max_len, + min_length=min_len, + length_penalty=length_penalty + ) + + if "apply_lemmatizer" in samples.keys() and samples["apply_lemmatizer"]: + output_text = self._lemmatize(output_text) + + return output_text + + def predict_class( + self, + samples, + num_beams=5, + inference_method="generate", + max_len=10, + min_len=1, + num_ans_candidates=5, + answer_list=None, + prompt="", + length_penalty=0, + **kwargs + ): + ''' + function for multi-choice VQA + ''' + + image = samples["image"].cuda() + instruction = samples['instruction_input'] + answers = samples["choices"] + num_cand = samples["num_choices"] + + ranks = self.multi_select(image, instruction, answers, num_cand) + + pred_ans = [] + for i, rank in enumerate(ranks): + pred = answers[rank[0]][i] + pred_ans.append(pred) + return pred_ans + + def embed_tokens(self, token_ids): + try: + embeds = self.llama_model.base_model.model.model.embed_tokens(token_ids) + except AttributeError: + embeds = self.llama_model.model.embed_tokens(token_ids) + + return embeds + + @classmethod + def from_config(cls, cfg): + vit_model = cfg.get("vit_model", "eva_clip_g") + q_former_model = cfg.get("q_former_model", "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth") + img_size = cfg.get("image_size") + num_query_token = cfg.get("num_query_token") + llama_model = cfg.get("llama_model") + + drop_path_rate = cfg.get("drop_path_rate", 0) + use_grad_checkpoint = cfg.get("use_grad_checkpoint", False) + vit_precision = cfg.get("vit_precision", "fp16") + freeze_vit = cfg.get("freeze_vit", True) + freeze_qformer = cfg.get("freeze_qformer", True) + low_resource = cfg.get("low_resource", False) + + prompt_path = cfg.get("prompt_path", "") + prompt_template = cfg.get("prompt_template", "") + max_txt_len = cfg.get("max_txt_len", 300) + end_sym = cfg.get("end_sym", '\n') + + lora_r = cfg.get("lora_r",64) + lora_alpha = cfg.get("lora_alpha",16) + chat_template = cfg.get("chat_template",False) + system_prompt = cfg.get("system_prompt", False) + token_pooling = cfg.get("token_pooling",True) + + use_grad_checkpoint_llm = cfg.get("use_grad_checkpoint_llm", False) + max_context_len = cfg.get("max_context_len", 3800) + remove_template = cfg.get("remove_template", False) + + + model = cls( + vit_model=vit_model, + img_size=img_size, + drop_path_rate=drop_path_rate, + use_grad_checkpoint=use_grad_checkpoint, + vit_precision=vit_precision, + freeze_vit=freeze_vit, + llama_model=llama_model, + prompt_path=prompt_path, + prompt_template=prompt_template, + max_txt_len=max_txt_len, + low_resource=low_resource, + end_sym=end_sym, + lora_r = lora_r, + lora_alpha = lora_alpha, + chat_template = chat_template, + system_prompt = system_prompt, + token_pooling = token_pooling, + use_grad_checkpoint_llm=use_grad_checkpoint_llm, + max_context_len=max_context_len, + remove_template = remove_template + ) + + ckpt_path = cfg.get("ckpt", "") # load weights of MiniGPT-4 + if ckpt_path: + print("Load Minigpt-4-LLM Checkpoint: {}".format(ckpt_path)) + ckpt = torch.load(ckpt_path, map_location="cpu") + msg = model.load_state_dict(ckpt['model'], strict=False) + + return model + + +def assign_imgs(batched_instruct_list, batched_img_embeds): + '''this function is used when the data is interleaved. + the interlevaed data is separated, and this function assign + corresponding image embeddings to each segment''' + if len(batched_img_embeds.shape) == 3: + batched_img_embeds = batched_img_embeds[:, None] + + batched_assigned = [] + + for instruct_list, img_embeds in zip(batched_instruct_list, batched_img_embeds): + img_idx = 0 + assigned_img = [] + n_assigned = [] + for instruct in instruct_list: + n_img = instruct.count('') + if n_img > 0: # this instruction include images. + assigned_img.append(img_embeds[None, img_idx:img_idx+n_img]) + img_idx += n_img + n_assigned.append(n_img) + else: # this instruction doesn't include images + assigned_img.append(None) + n_assigned.append(None) + batched_assigned.append(assigned_img) + + return batched_assigned \ No newline at end of file diff --git a/models/backbones/mini_gpt4v.py b/models/backbones/mini_gpt4v.py new file mode 100755 index 0000000..c3d5e4d --- /dev/null +++ b/models/backbones/mini_gpt4v.py @@ -0,0 +1,709 @@ +import logging +import random + +import torch +from torch.cuda.amp import autocast as autocast +import torch.nn as nn + +from minigpt4.common.registry import registry +from minigpt4.models.blip2 import Blip2Base, disabled_train +from minigpt4.models.modeling_llama_v2 import LlamaForCausalLM +from minigpt4.conversation.conversation import Conversation, SeparatorStyle, StoppingCriteriaList, StoppingCriteriaSub + +from transformers import LlamaTokenizer, CodeLlamaTokenizer, BitsAndBytesConfig + +from peft import ( + LoraConfig, + get_peft_model, + prepare_model_for_kbit_training +) +import time +import numpy as np + +from minigpt4.models import policies + + +@registry.register_model("mini_gpt4v") +class MiniGPT4v(Blip2Base): + """ + BLIP2 GPT-LLAMA model. + """ + + PRETRAINED_MODEL_CONFIG_DICT = { + "pretrain_vicuna": "configs/models/minigpt4.yaml", + } + + def __init__( + self, + vit_model="eva_clip_g", + img_size=224, + drop_path_rate=0, + use_grad_checkpoint=False, + vit_precision="fp16", + freeze_vit=True, + llama_model="", + prompt_path="", + prompt_template="", + max_txt_len=32, + low_resource=False, # use 8 bit and put vit in cpu + end_sym='\n', + lora_r = 8, + lora_target_modules = ["q_proj","v_proj"], + lora_alpha=16, + # lora_r = 16, + # lora_target_modules = ["q_proj","v_proj","v_proj"], + lora_dropout= 0.05, + ckpt_path = "", + system_prompt= False, + chat_template=False, + token_pooling=True, + use_grad_checkpoint_llm=False, + max_context_len=3800, + remove_template = False, + + ): + super().__init__() + + self.tokenizer = self.init_tokenizer() + self.low_resource = low_resource + self.token_pooling = token_pooling + self.remove_template = remove_template + + print("token pooling", self.token_pooling) + + + self.use_grad_checkpoint_llm = use_grad_checkpoint_llm + self.max_context_len = max_context_len + self.chat_template = chat_template + + # print('Loading VIT') + # self.visual_encoder, self.ln_vision = self.init_vision_encoder( + # vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision + # ) + + + print("vit precision", vit_precision) + self.visual_encoder, self.ln_vision = self.init_vision_encoder( + vit_model, 224, drop_path_rate, use_grad_checkpoint, vit_precision + ) + for name, param in self.visual_encoder.named_parameters(): + param.requires_grad = False + self.visual_encoder = self.visual_encoder.eval() + self.visual_encoder.train = disabled_train + for name, param in self.ln_vision.named_parameters(): + param.requires_grad = False + self.ln_vision = self.ln_vision.eval() + self.ln_vision.train = disabled_train + logging.info("freeze vision encoder") + print("freeze the vision encoder") + + + print('Loading VIT Done') + + # print("visual encoder shape", self.visual_encoder.pos_embed.shape) + # assert False + + print('Loading LLAMA') + + + self.B_SYS, self.E_SYS = "<>\n", "\n<>\n\n" + + if 'CodeLlama' in llama_model: + self.llama_tokenizer = CodeLlamaTokenizer.from_pretrained(llama_model, use_fast=False) # + self.llama_tokenizer.pad_token = "$$" + else: + self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model, use_fast=False) # + self.llama_tokenizer.pad_token = "$$" + + self.system_prompt = system_prompt + + bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.bfloat16 + ) + + + + self.llama_model = LlamaForCausalLM.from_pretrained( + llama_model, + quantization_config=bnb_config, + device_map={"": 0} + ) + + # self.llama_model.gradient_checkpointing_enable() + self.llama_model = prepare_model_for_kbit_training(self.llama_model) + + # self.llama_model.print_trainable_parameters() + + + print('Loading LLAMA Done') + + self.merge_n = 3 + + self.llama_proj = nn.Linear( + 1408 * self.merge_n**2, self.llama_model.config.hidden_size + ) + + self.max_txt_len = max_txt_len + self.end_sym = end_sym + + if prompt_path: + with open(prompt_path, 'r') as f: + raw_prompts = f.read().splitlines() + filted_prompts = [raw_prompt for raw_prompt in raw_prompts if "" in raw_prompt] + self.prompt_list = [prompt_template.format(p) for p in filted_prompts] + print('Load {} training prompts'.format(len(self.prompt_list))) + print('Prompt Example \n{}'.format(random.choice(self.prompt_list))) + else: + self.prompt_list = [] + + def encode_img(self, image): + device = image.device + if len(image.shape) > 4: + image = image.reshape(-1, *image.shape[-3:]) + + bs, ch, w, h = image.shape + assert w % 224 == 0 + bw = w // 224 + assert h % 224 == 0 + bh = h // 224 + image_patches = image.view(bs, ch, bw, 224, bh, 224).permute(0, 2, 4, 1, 3, 5) # bs, bw, bh, ch, 224, 224 + image_patches = image_patches.reshape(bs * bw * bh, ch, 224, 224) + + with self.maybe_autocast(): + image_patch_embeds = self.ln_vision(self.visual_encoder(image_patches)).to(device) + + image_patch_embeds = image_patch_embeds[:,1:,:].reshape(bs, bw, bh, 16, 16, image_patch_embeds.shape[-1]) + image_patch_embeds = image_patch_embeds.permute(0, 1, 3, 2, 4, 5) # bs, bw, 16, bh, 16, hs + image_embeds = image_patch_embeds.reshape(bs, bw * 16 * bh * 16, image_patch_embeds.shape[-1]) + + bs, pn, hs = image_embeds.shape + + image_embeds = image_embeds.view(bs, int(pn/self.merge_n**2), int(hs*self.merge_n**2)) + + inputs_llama = self.llama_proj(image_embeds) + atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(image.device) + return inputs_llama, atts_llama + + def get_context_emb(self, prompt, img_list): + img_device = img_list[0].device + prompt_segs = prompt.split('') + assert len(prompt_segs) == len(img_list) + 1, "Unmatched numbers of image placeholders and images." + seg_tokens = [ + self.llama_tokenizer( + seg, return_tensors="pt", add_special_tokens=i==0).to(img_device).input_ids # only add bos to the first seg + for i, seg in enumerate(prompt_segs) + ] + + seg_embs = [self.embed_tokens(seg_t) for seg_t in seg_tokens] + + mixed_embs = [emb for pair in zip(seg_embs[:-1], img_list) for emb in pair] + [seg_embs[-1]] + + mixed_embs = torch.cat(mixed_embs, dim=1) + return mixed_embs + + def prompt_wrap(self, img_embeds, atts_img, prompts, lengths=None): + if prompts is None or len(prompts) == 0: + # prompts is not provided, just return the original image embedding + return img_embeds, atts_img + elif img_embeds is None: + # prompt is provided but there is no image embedding. return the prompt embedding in right padding + self.llama_tokenizer.padding_side = "right" + prompt_tokens = self.llama_tokenizer( + prompts, + return_tensors="pt", + padding="longest", + add_special_tokens=False + ).to(self.device) + prompt_embeds = self.embed_tokens(prompt_tokens.input_ids) + atts_prompt = prompt_tokens.attention_mask + return prompt_embeds, atts_prompt + + else: + # return the multi-modal embedding in right padding + emb_lists = [] + + for idx, (each_img_embed, each_prompt) in enumerate(zip(img_embeds, prompts)): + pn = each_img_embed.shape[-2] + if lengths is not None: + each_img_embed = each_img_embed.reshape(-1, each_img_embed.shape[-1]) + each_img_embed = each_img_embed[:lengths[idx] * pn] + + p_segs = each_prompt.split('') + interleave_emb = [] + for idx, seg in enumerate(p_segs[:-1]): + p_tokens = self.llama_tokenizer(seg, return_tensors="pt", add_special_tokens=False).to(img_embeds.device) + p_embed = self.embed_tokens(p_tokens.input_ids) + interleave_emb.append(torch.cat([p_embed, each_img_embed[None][:, idx*pn:(idx+1)*pn]], dim=1)) + + wrapped_emb = torch.cat(interleave_emb, dim=1) + p_tokens = self.llama_tokenizer(p_segs[-1], return_tensors="pt", add_special_tokens=False).to(img_embeds.device) + p_embed = self.embed_tokens(p_tokens.input_ids) + wrapped_emb = torch.cat([wrapped_emb,p_embed], dim=1) + emb_lists.append(wrapped_emb) + + emb_lens = [emb.shape[1] for emb in emb_lists] + pad_emb = self.embed_tokens(torch.tensor(self.llama_tokenizer.pad_token_id, device=img_embeds.device)) + + max_length = max(emb_lens) if max(emb_lens) < self.max_context_len else self.max_context_len + wrapped_embs = pad_emb.expand(len(emb_lens), max_length, -1).clone() + wrapped_atts = torch.zeros([len(emb_lens), max_length], dtype=torch.int, device=img_embeds.device) + + for i, emb in enumerate(emb_lists): + length = emb_lens[i] if emb_lens[i] < self.max_context_len else self.max_context_len + wrapped_embs[i, :length] = emb[:, :length] + wrapped_atts[i, :length] = 1 + + return wrapped_embs, wrapped_atts + + def concat_emb_input_output(self, input_embs, input_atts, output_embs, output_atts): + """ + Concatenate the batched input embedding and batched output embedding together. + Both the input and the output embedding should be right padded. + """ + + input_lens = [] + cat_embs = [] + cat_atts = [] + + for i in range(input_embs.size(0)): + input_len = input_atts[i].sum() + input_lens.append(input_len) + + cat_embs.append( + torch.cat([ + input_embs[i][:input_len], + output_embs[i], + input_embs[i][input_len:] + ]) + ) + cat_atts.append( + torch.cat([ + input_atts[i][:input_len], + output_atts[i], + input_atts[i][input_len:] + ]) + ) + # print('===================================') + # print('check input emb: ', input_embs[i][this_input_ones-2:this_input_ones]) + # print('check pad emb: ', input_embs[i][this_input_ones:this_input_ones+2]) + # print('check out emb: ', output_embs[i][:2]) + # print('check out pad emb: ', output_embs[i][-2:]) + # print('+++++++++++++++++++++++++++++++++++') + # + # print('check attn before: ', input_atts[i][:this_input_ones]) + # print('check attn after: ', input_atts[i][this_input_ones:]) + # print('check attn gt before: ', output_atts[i][:3]) + # print('check attn gt after: ', output_atts[i][-3:]) + + cat_embs = torch.stack(cat_embs) + cat_atts = torch.stack(cat_atts) + return cat_embs, cat_atts, input_lens + + def get_conv_emb(self, conv_q, conv_a, conv_img): + """concatenate conversation and make sure the model is only trained to regress the answer""" + + regress_embs_list = [] + targets_list = [] + + batch_size = len(conv_q) + for batch_idx in range(batch_size): + questions, answers = conv_q[batch_idx], conv_a[batch_idx] + assigned_imgs = conv_img[batch_idx] + questions = [self.prompt_wrap( + img_embeds=img, + atts_img=None, + prompts=[q], + lengths=[img.shape[1]] if img is not None else None) for q, img in zip(questions, assigned_imgs)] + q_embs = [emb for emb, _ in questions] + + answers = [self.llama_tokenizer(a, return_tensors="pt", add_special_tokens=False).to(self.device) for a in answers] + cur_emb = [] + cur_target = [] + for i in range(len(questions)): + cur_emb.append(q_embs[i]) + cur_target.append(torch.ones_like(q_embs[i][..., 0], dtype=torch.int) * -100) + + cur_emb.append(self.embed_tokens(answers[i].input_ids)) + cur_target.append(answers[i].input_ids) + + cur_emb = torch.cat(cur_emb, dim=1) + cur_target = torch.cat(cur_target, dim=1) + + regress_embs_list.append(cur_emb) + targets_list.append(cur_target) + + max_len = min(max([target.shape[1] for target in targets_list]), self.max_txt_len) + + regress_embeds = torch.zeros([batch_size, max_len, cur_emb.shape[-1]], device=self.device) + regress_attn = torch.zeros([batch_size, max_len], dtype=torch.int, device=self.device) + targets = torch.ones([batch_size, max_len], dtype=torch.long, device=self.device) * -100 + + for batch_idx in range(batch_size): + cur_len = regress_embs_list[batch_idx].shape[1] + regress_embeds[batch_idx, :cur_len] = regress_embs_list[batch_idx][0, :max_len] + regress_attn[batch_idx, :cur_len] = 1 + targets[batch_idx, :cur_len] = targets_list[batch_idx][0, :max_len] + + return regress_embeds, regress_attn, targets + + def preparing_embedding(self, samples): + def remove_special_tokens(data): + + # if "instruction_input" in data: + data = [instruct.replace(" [caption]","") for instruct in data] + data = [instruct.replace(" [vqa]","") for instruct in data] + data = [instruct.replace(" [grounding]","") for instruct in data] + data = [instruct.replace(" [identify]","") for instruct in data] + data = [instruct.replace(" [refer]","") for instruct in data] + return data + + ### prepare input tokens + if 'image' in samples: + img_embeds, img_atts = self.encode_img(samples["image"]) + else: + img_embeds = img_atts = None + + if 'conv_q' in samples: + # handeling conversation datasets + conv_q, conv_a = samples['conv_q'], samples['conv_a'] + + connect_sym = samples['connect_sym'][0] + conv_q = [q.split(connect_sym)for q in conv_q] + conv_a = [a.split(connect_sym) for a in conv_a] + conv_img = assign_imgs(conv_q, img_embeds) + + if self.chat_template: + conv_q = [["[INST] " + item + "[/INST]" for item in items] for items in conv_q] + + regress_embeds, regress_atts, part_targets = self.get_conv_emb(conv_q, conv_a, conv_img) + cond_embeds, cond_atts = regress_embeds[:, :0], regress_atts[:, :0] + + else: + instruction = samples["instruction_input"] if "instruction_input" in samples else None + + # print("instruction before", instruction) + if self.remove_template: + instruction = remove_special_tokens(instruction) + # print("instruction after", instruction) + + if self.chat_template: + instruction = ["[INST] " + instruct + "[/INST]" for instruct in instruction] + + if 'length' in samples: + # the input is a image train (like videos) + bsz, pn, hs = img_embeds.shape + img_embeds = img_embeds.reshape(len(samples['image']), -1, pn, hs) + cond_embeds, cond_atts = self.prompt_wrap(img_embeds, img_atts, instruction, samples['length']) + else: + cond_embeds, cond_atts = self.prompt_wrap(img_embeds, img_atts, instruction) + + ### prepare target tokens + self.llama_tokenizer.padding_side = "right" + text = [t + self.end_sym for t in samples["answer"]] + + regress_tokens = self.llama_tokenizer( + text, + return_tensors="pt", + padding="longest", + truncation=True, + max_length=self.max_txt_len, + add_special_tokens=False + ).to(self.device) + + regress_token_ids = regress_tokens.input_ids + regress_atts = regress_tokens.attention_mask + part_targets = regress_token_ids.masked_fill( + regress_token_ids == self.llama_tokenizer.pad_token_id, -100 + ) + + regress_embeds = self.embed_tokens(regress_token_ids) + + return cond_embeds, cond_atts, regress_embeds, regress_atts, part_targets + + def forward(self, samples, reduction="mean"): + # prepare the embedding to condition and the embedding to regress + cond_embeds, cond_atts, regress_embeds, regress_atts, part_targets = \ + self.preparing_embedding(samples) + + # concat the embedding to condition and the embedding to regress + inputs_embeds, attention_mask, input_lens = \ + self.concat_emb_input_output(cond_embeds, cond_atts, regress_embeds, regress_atts) + + # get bos token embedding + bos = torch.ones_like(part_targets[:, :1]) * self.llama_tokenizer.bos_token_id + bos_embeds = self.embed_tokens(bos) + bos_atts = attention_mask[:, :1] + + # add bos token at the begining + inputs_embeds = torch.cat([bos_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([bos_atts, attention_mask], dim=1) + + # ensemble the final targets + targets = torch.ones([inputs_embeds.shape[0], inputs_embeds.shape[1]], + dtype=torch.long).to(self.device).fill_(-100) + for i, target in enumerate(part_targets): + targets[i, input_lens[i]+1:input_lens[i]+len(target)+1] = target # plus 1 for bos + + with self.maybe_autocast(): + outputs = self.llama_model( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + return_dict=True, + labels=targets, + reduction=reduction + ) + loss = outputs.loss + + return {"loss": loss} + + @torch.no_grad() + def generate( + self, + images, + texts, + use_nucleus_sampling=False, + num_beams=1, + max_new_tokens=20, + min_length=1, + top_p=0.9, + repetition_penalty=1, + length_penalty=1, + temperature=1, + do_sample=False, + stop_words_ids=[2], + lengths=None, + ): + ''' + function for generate test use + ''' + + stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub( + stops=[torch.tensor([i]).to(self.device) for i in stop_words_ids])]) + + img_embeds, atts_img = self.encode_img(images.to(self.device)) + if lengths is not None: + image_lists = [] + img_embeds = img_embeds.reshape(len(lengths), -1, img_embeds.shape[-2], img_embeds.shape[-1]) + for idx, img_embed in enumerate(img_embeds): + image_lists.append([img_embed[i][None] for i in range(lengths[idx])]) + else: + image_lists = [[image_emb[None]] for image_emb in img_embeds] + assert len(texts) == len(image_lists) + batch_embs = [self.get_context_emb(text, img_list) for text, img_list in zip(texts, image_lists)] + + batch_size = len(batch_embs) + max_len = max([emb.shape[1] for emb in batch_embs]) + emb_dim = batch_embs[0].shape[2] + dtype = batch_embs[0].dtype + device = batch_embs[0].device + + embs = torch.zeros([batch_size, max_len, emb_dim], dtype=dtype, device=device) + attn_mask = torch.zeros([batch_size, max_len], dtype=torch.int, device=device) + for i, emb in enumerate(batch_embs): + emb_len = emb.shape[1] + embs[i, -emb_len:] = emb[0] + attn_mask[i, -emb_len:] = 1 + + with self.maybe_autocast(): + outputs = self.llama_model.generate( + inputs_embeds=embs, + attention_mask=attn_mask, + max_new_tokens=max_new_tokens, + num_beams=num_beams, + do_sample=do_sample, + # stopping_criteria=stopping_criteria, + ) + + answers = [] + for output_token in outputs: + if output_token[0] == 0: + output_token = output_token[1:] + output_texts = self.llama_tokenizer.decode(output_token, skip_special_tokens=True) + output_texts = output_texts.split('')[0] # remove the stop sign + output_texts = output_texts.replace("", "") + output_texts = output_texts.split(r'[/INST]')[-1].strip() + answers.append(output_texts) + + return answers + + @torch.no_grad() + def multi_select(self, images, texts, answers, num_cand=None): + all_losses = [] + for answer in answers: + choice_samples = { + 'image': images, + 'instruction_input': texts, + 'answer': answer + } + loss = self.forward(choice_samples, reduction='none')['loss'].reshape(-1, 1) + all_losses.append(loss) + torch.cuda.empty_cache() + all_losses = torch.cat(all_losses, dim=-1) + if num_cand is not None: + for i in range(all_losses.shape[0]): + all_losses[i, num_cand[i]:] = 9999 + output_class_ranks = torch.argsort(all_losses, dim=-1) + return output_class_ranks.tolist() + + def predict_answers( + self, + samples, + num_beams=5, + inference_method="generate", + max_len=10, + min_len=1, + num_ans_candidates=128, + answer_list=None, + prompt="", + length_penalty=0, + **kwargs + ): + ''' + function for open-ended VQA + ''' + images = samples["image"].cuda() + texts = samples["instruction_input"] + + output_text = self.generate( + images=images, + texts=texts, + num_beams=num_beams, + max_new_tokens=max_len, + min_length=min_len, + length_penalty=length_penalty + ) + + if "apply_lemmatizer" in samples.keys() and samples["apply_lemmatizer"]: + output_text = self._lemmatize(output_text) + + return output_text + + def predict_class( + self, + samples, + num_beams=5, + inference_method="generate", + max_len=10, + min_len=1, + num_ans_candidates=5, + answer_list=None, + prompt="", + length_penalty=0, + **kwargs + ): + ''' + function for multi-choice VQA + ''' + + image = samples["image"].cuda() + instruction = samples['instruction_input'] + answers = samples["choices"] + num_cand = samples["num_choices"] + + ranks = self.multi_select(image, instruction, answers, num_cand) + + pred_ans = [] + for i, rank in enumerate(ranks): + pred = answers[rank[0]][i] + pred_ans.append(pred) + return pred_ans + + def embed_tokens(self, token_ids): + try: + embeds = self.llama_model.base_model.model.model.embed_tokens(token_ids) + except AttributeError: + embeds = self.llama_model.model.embed_tokens(token_ids) + + return embeds + + @classmethod + def from_config(cls, cfg): + vit_model = cfg.get("vit_model", "eva_clip_g") + q_former_model = cfg.get("q_former_model", "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth") + img_size = cfg.get("image_size") + num_query_token = cfg.get("num_query_token") + llama_model = cfg.get("llama_model") + + drop_path_rate = cfg.get("drop_path_rate", 0) + use_grad_checkpoint = cfg.get("use_grad_checkpoint", False) + vit_precision = cfg.get("vit_precision", "fp16") + freeze_vit = cfg.get("freeze_vit", True) + freeze_qformer = cfg.get("freeze_qformer", True) + low_resource = cfg.get("low_resource", False) + + prompt_path = cfg.get("prompt_path", "") + prompt_template = cfg.get("prompt_template", "") + max_txt_len = cfg.get("max_txt_len", 300) + end_sym = cfg.get("end_sym", '\n') + + lora_r = cfg.get("lora_r",64) + lora_alpha = cfg.get("lora_alpha",16) + chat_template = cfg.get("chat_template",False) + system_prompt = cfg.get("system_prompt", False) + token_pooling = cfg.get("token_pooling",True) + + use_grad_checkpoint_llm = cfg.get("use_grad_checkpoint_llm", False) + max_context_len = cfg.get("max_context_len", 3800) + remove_template = cfg.get("remove_template", False) + + + model = cls( + vit_model=vit_model, + img_size=img_size, + drop_path_rate=drop_path_rate, + use_grad_checkpoint=use_grad_checkpoint, + vit_precision=vit_precision, + freeze_vit=freeze_vit, + llama_model=llama_model, + prompt_path=prompt_path, + prompt_template=prompt_template, + max_txt_len=max_txt_len, + low_resource=low_resource, + end_sym=end_sym, + lora_r = lora_r, + lora_alpha = lora_alpha, + chat_template = chat_template, + system_prompt = system_prompt, + token_pooling = token_pooling, + use_grad_checkpoint_llm=use_grad_checkpoint_llm, + max_context_len=max_context_len, + remove_template = remove_template + ) + + ckpt_path = cfg.get("ckpt", "") # load weights of MiniGPT-4 + if ckpt_path: + print("Load Minigpt-4-LLM Checkpoint: {}".format(ckpt_path)) + ckpt = torch.load(ckpt_path, map_location="cpu") + msg = model.load_state_dict(ckpt['model'], strict=False) + + return model + + +def assign_imgs(batched_instruct_list, batched_img_embeds): + '''this function is used when the data is interleaved. + the interlevaed data is separated, and this function assign + corresponding image embeddings to each segment''' + if len(batched_img_embeds.shape) == 3: + batched_img_embeds = batched_img_embeds[:, None] + + batched_assigned = [] + + for instruct_list, img_embeds in zip(batched_instruct_list, batched_img_embeds): + img_idx = 0 + assigned_img = [] + n_assigned = [] + for instruct in instruct_list: + n_img = instruct.count('') + if n_img > 0: # this instruction include images. + assigned_img.append(img_embeds[None, img_idx:img_idx+n_img]) + img_idx += n_img + n_assigned.append(n_img) + else: # this instruction doesn't include images + assigned_img.append(None) + n_assigned.append(None) + batched_assigned.append(assigned_img) + + return batched_assigned \ No newline at end of file diff --git a/models/backbones/mistral.py b/models/backbones/mistral.py new file mode 100644 index 0000000..43095ff --- /dev/null +++ b/models/backbones/mistral.py @@ -0,0 +1,25 @@ +from transformers import AutoModelForCausalLM, AutoTokenizer + +device = "cuda" # the device to load the model onto + +model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2") +tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2") + +messages = [ + {"role": "user", "content": "What is your favourite condiment?"}, + {"role": "assistant", "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"}, + {"role": "user", "content": "Do you have mayonnaise recipes?"} +] +p="Well, I'm quite partial to a good squeeze of fresh lemon juice." +encoded_input = tokenizer(p, return_tensors='pt') +embeds = model.model.embed_tokens(encoded_input.input_ids) +print(embeds.shape) + + +encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt") +model_inputs = encodeds.to(device) +model.to(device) + +generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True) +decoded = tokenizer.batch_decode(generated_ids) +print(decoded[0]) diff --git a/models/backbones/modeling_llama_v2.py b/models/backbones/modeling_llama_v2.py new file mode 100644 index 0000000..3043af0 --- /dev/null +++ b/models/backbones/modeling_llama_v2.py @@ -0,0 +1,112 @@ +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from torch.nn import CrossEntropyLoss + +from transformers.utils import add_start_docstrings_to_model_forward, replace_return_docstrings +from transformers.modeling_outputs import CausalLMOutputWithPast +from transformers.models.llama.modeling_llama import LLAMA_INPUTS_DOCSTRING, _CONFIG_FOR_DOC +from transformers.models.llama.modeling_llama import LlamaForCausalLM as LlamaForCausalLMOrig + + +class LlamaForCausalLM(LlamaForCausalLMOrig): + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + reduction: Optional[str] = "mean", + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, LlamaForCausalLM + + >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + + if self.config.pretraining_tp > 1: + lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0) + logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)] + logits = torch.cat(logits, dim=-1) + else: + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss(reduction=reduction) + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + if reduction == "none": + loss = loss.view(logits.size(0), -1).mean(1) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/models/backbones/modeling_llama_v3.py b/models/backbones/modeling_llama_v3.py new file mode 100644 index 0000000..3043af0 --- /dev/null +++ b/models/backbones/modeling_llama_v3.py @@ -0,0 +1,112 @@ +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from torch.nn import CrossEntropyLoss + +from transformers.utils import add_start_docstrings_to_model_forward, replace_return_docstrings +from transformers.modeling_outputs import CausalLMOutputWithPast +from transformers.models.llama.modeling_llama import LLAMA_INPUTS_DOCSTRING, _CONFIG_FOR_DOC +from transformers.models.llama.modeling_llama import LlamaForCausalLM as LlamaForCausalLMOrig + + +class LlamaForCausalLM(LlamaForCausalLMOrig): + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + reduction: Optional[str] = "mean", + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, LlamaForCausalLM + + >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + + if self.config.pretraining_tp > 1: + lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0) + logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)] + logits = torch.cat(logits, dim=-1) + else: + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss(reduction=reduction) + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + if reduction == "none": + loss = loss.view(logits.size(0), -1).mean(1) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/models/backbones/modeling_mistral.py b/models/backbones/modeling_mistral.py new file mode 100644 index 0000000..3a98c7d --- /dev/null +++ b/models/backbones/modeling_mistral.py @@ -0,0 +1,1388 @@ +# coding=utf-8 +# Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Mistral model.""" +import inspect +import math +import warnings +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.cache_utils import Cache, DynamicCache +from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa +from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from transformers.models.mistral.configuration_mistral import MistralConfig + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters) + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "MistralConfig" + + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Mistral +class MistralRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + MistralRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + +# copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Mistral +# TODO @Arthur no longer copied from LLama after static cache +class MistralRotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) + + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype), + self.sin_cached[:seq_len].to(dtype=x.dtype), + ) + + +# Copied from transformers.models.llama.modeling_llama.rotate_half +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +# copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb +# TODO @Arthur no longer copied from LLama after static cache +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class MistralMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + +# Copied from transformers.models.llama.modeling_llama.repeat_kv +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +class MistralAttention(nn.Module): + """ + Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer + and "Generating Long Sequences with Sparse Transformers". + """ + + def __init__(self, config: MistralConfig, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " + "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.is_causal = True + self.attention_dropout = config.attention_dropout + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + + self.rotary_emb = MistralRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " + "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " + "with a layer index." + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + + attn_weights = attn_weights + attention_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class MistralFlashAttention2(MistralAttention): + """ + Mistral flash attention module. This module inherits from `MistralAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + **kwargs, + ): + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + + # overwrite attention_mask with padding_mask + attention_mask = kwargs.pop("padding_mask") + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " + "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " + "with a layer index." + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + + # Because the input can be padded, the absolute sequence length depends on the max position id. + rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1 + cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len) + + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + use_sliding_windows = ( + _flash_supports_window_size + and getattr(self.config, "sliding_window", None) is not None + and kv_seq_len > self.config.sliding_window + ) + + if not _flash_supports_window_size: + logger.warning_once( + "The current flash attention version does not support sliding window attention, for a more memory efficient implementation" + " make sure to upgrade flash-attn library." + ) + + if past_key_value is not None: + # Activate slicing cache only if the config has a value `sliding_windows` attribute + cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0 + if ( + getattr(self.config, "sliding_window", None) is not None + and kv_seq_len > self.config.sliding_window + and cache_has_contents + ): + slicing_tokens = 1 - self.config.sliding_window + + past_key = past_key_value[self.layer_idx][0] + past_value = past_key_value[self.layer_idx][1] + + past_key = past_key[:, :, slicing_tokens:, :].contiguous() + past_value = past_value[:, :, slicing_tokens:, :].contiguous() + + if past_key.shape[-2] != self.config.sliding_window - 1: + raise ValueError( + f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got" + f" {past_key.shape}" + ) + + if attention_mask is not None: + attention_mask = attention_mask[:, slicing_tokens:] + attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) + + cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + dropout_rate = 0.0 if not self.training else self.attention_dropout + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in float16 just to be sure everything works as expected. + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + # Reashape to the expected shape for Flash Attention + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + attn_output = self._flash_attention_forward( + query_states, + key_states, + value_states, + attention_mask, + q_len, + dropout=dropout_rate, + use_sliding_windows=use_sliding_windows, + ) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + def _flash_attention_forward( + self, + query_states, + key_states, + value_states, + attention_mask, + query_length, + dropout=0.0, + softmax_scale=None, + use_sliding_windows=False, + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + use_sliding_windows (`bool`, *optional*): + Whether to activate sliding window attention. + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + if not use_sliding_windows: + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + else: + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + window_size=(self.config.sliding_window, self.config.sliding_window), + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + if not use_sliding_windows: + attn_output = flash_attn_func( + query_states, + key_states, + value_states, + dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + else: + attn_output = flash_attn_func( + query_states, + key_states, + value_states, + dropout, + softmax_scale=softmax_scale, + causal=causal, + window_size=(self.config.sliding_window, self.config.sliding_window), + ) + + return attn_output + + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape + + # On the first iteration we need to properly re-create the padding mask + # by slicing it on the proper place + if kv_seq_len != attention_mask.shape[-1]: + attention_mask_num_tokens = attention_mask.shape[-1] + attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :] + + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + + key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) + value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) + + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +# copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Mistral +# TODO @Arthur no longer copied from LLama after static cache +class MistralSdpaAttention(MistralAttention): + """ + Mistral attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `MistralAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + # Adapted from MistralAttention.forward + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. + logger.warning_once( + "MistralModel is using MistralSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " + 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + + # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, + # Reference: https://github.com/pytorch/pytorch/issues/112577. + if query_states.device.type == "cuda" and attention_mask is not None: + query_states = query_states.contiguous() + key_states = key_states.contiguous() + value_states = value_states.contiguous() + + attn_output = torch.nn.functional.scaled_dot_product_attention( + query_states, + key_states, + value_states, + attn_mask=attention_mask, + dropout_p=self.attention_dropout if self.training else 0.0, + # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. + is_causal=self.is_causal and attention_mask is None and q_len > 1, + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.view(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + return attn_output, None, past_key_value + + +MISTRAL_ATTENTION_CLASSES = { + "eager": MistralAttention, + "flash_attention_2": MistralFlashAttention2, + "sdpa": MistralSdpaAttention, +} + + +class MistralDecoderLayer(nn.Module): + def __init__(self, config: MistralConfig, layer_idx: int): + super().__init__() + self.hidden_size = config.hidden_size + + self.self_attn = MISTRAL_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) + + self.mlp = MistralMLP(config) + self.input_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, sequence_length)` where padding elements are indicated by 0. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +MISTRAL_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`MistralConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare Mistral Model outputting raw hidden-states without any specific head on top.", + MISTRAL_START_DOCSTRING, +) +class MistralPreTrainedModel(PreTrainedModel): + config_class = MistralConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["MistralDecoderLayer"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_cache_class = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +MISTRAL_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Mistral Model outputting raw hidden-states without any specific head on top.", + MISTRAL_START_DOCSTRING, +) +class MistralModel(MistralPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MistralDecoderLayer`] + + Args: + config: MistralConfig + """ + + def __init__(self, config: MistralConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList( + [MistralDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self._attn_implementation = config._attn_implementation + self.norm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + past_key_values_length = 0 + + if use_cache: + use_legacy_cache = not isinstance(past_key_values, Cache) + if use_legacy_cache: + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + past_key_values_length = past_key_values.get_usable_length(seq_length) + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0).view(-1, seq_length) + else: + position_ids = position_ids.view(-1, seq_length).long() + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache: + is_padding_right = attention_mask[:, -1].sum().item() != batch_size + if is_padding_right: + raise ValueError( + "You are attempting to perform batched generation with padding_side='right'" + " this may lead to unexpected behaviour for Flash Attention version of Mistral. Make sure to " + " call `tokenizer.padding_side = 'left'` before tokenizing the input. " + ) + + if self._attn_implementation == "flash_attention_2": + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + elif self._attn_implementation == "sdpa" and not output_attentions: + # output_attentions=True can not be supported when using SDPA, and we fall back on + # the manual implementation that requires a 4D causal mask in all cases. + attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (batch_size, seq_length), + inputs_embeds, + past_key_values_length, + ) + else: + # 4d mask is passed through the layers + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, + (batch_size, seq_length), + inputs_embeds, + past_key_values_length, + sliding_window=self.config.sliding_window, + ) + + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + attention_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = None + if use_cache: + next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache + + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class MistralForCausalLM(MistralPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = MistralModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + reduction: Optional[str] = "mean", + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, MistralForCausalLM + + >>> model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") + >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss(reduction=reduction) + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + if reduction == "none": + loss = loss.view(logits.size(0), -1).mean(1) + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + # Omit tokens covered by past_key_values + if past_key_values is not None: + if isinstance(past_key_values, Cache): + cache_length = past_key_values.get_seq_length() + past_length = past_key_values.seen_tokens + max_cache_length = past_key_values.get_max_length() + else: + cache_length = past_length = past_key_values[0][0].shape[2] + max_cache_length = None + + # Keep only the unprocessed tokens: + # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as + # input) + if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: + input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] + # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # input_ids based on the past_length. + elif past_length < input_ids.shape[1]: + input_ids = input_ids[:, past_length:] + # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + if ( + max_cache_length is not None + and attention_mask is not None + and cache_length + input_ids.shape[1] > max_cache_length + ): + attention_mask = attention_mask[:, -max_cache_length:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +@add_start_docstrings( + """ + The Mistral Model transformer with a sequence classification head on top (linear layer). + + [`MistralForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + MISTRAL_START_DOCSTRING, +) +# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Mistral, LLAMA->MISTRAL +class MistralForSequenceClassification(MistralPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = MistralModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility + sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + sequence_lengths = sequence_lengths % input_ids.shape[-1] + sequence_lengths = sequence_lengths.to(logits.device) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) \ No newline at end of file diff --git a/models/backbones/moes.py b/models/backbones/moes.py new file mode 100644 index 0000000..3f0914b --- /dev/null +++ b/models/backbones/moes.py @@ -0,0 +1,287 @@ +import torch +import torch.nn as nn +from transformers.models.llama.modeling_llama import LlamaRMSNorm +from timm.models.layers import DropPath + +class Mlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, mask=None): + B, N, C = x.shape + qkv = ( + self.qkv(x) + .reshape(B, N, 3, self.num_heads, C // self.num_heads) + .permute(2, 0, 3, 1, 4) + ) + q, k, v = ( + qkv[0], + qkv[1], + qkv[2], + ) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + if mask is not None: + # if mask.dim() != x.dim(): + # expanded_mask = mask[:, None, None, :].expand(B, 1, N, N) + # else: + # expanded_mask = mask + mask = mask.bool() + attn = attn.masked_fill(~mask, float("-inf")) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x, attn + + +class MoELayer(nn.Module): + def __init__( + self, + dim, + num_heads, + expert_type, + use_sep_spatial_temp_experts=True, + has_hist=False, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + act_layer=nn.GELU, + norm_layer=LlamaRMSNorm, + ): + super().__init__() + self.has_hist = has_hist + self.use_sep_spatial_temp_experts = use_sep_spatial_temp_experts + self.norm_att = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + ) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + mlp_hidden_dim = int(dim * mlp_ratio) + + if expert_type == 'modalities': + # EXPERT CONSTRUCTION + if use_sep_spatial_temp_experts: + # Spatial expert + self.norm_spatial = norm_layer(dim) + self.mlp_spatial = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + # Temporal expert + self.norm_temp = norm_layer(dim) + self.mlp_temp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + # Vis expert + self.norm_vis = norm_layer(dim) + self.mlp_vis = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + # caption expert + self.norm_cap = norm_layer(dim) + self.mlp_cap = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + # history expert + if has_hist: + self.norm_hist = norm_layer(dim) + self.mlp_hist = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + elif expert_type == 'fusion': + # Fusion expert + self.norm_fusion = norm_layer(dim) + self.mlp_fusion = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + else: + raise ValueError + + + def forward(self, x, vis_feat_len, cap_feat_len, expert_flag, hist_feat_len=None, is_vid=False, mask=None, only_text=False, expert_permutation=None): + + if self.has_hist: + assert hist_feat_len is not None + + x_shortcut, attn = self.attn(self.norm_att(x), mask=mask) + x = x + self.drop_path(x_shortcut) + len_init = x.size(1) + # bs, h_dim = x.size(0), x.size(-1) + # device = x.device + # if only_text: + # # end_idx_caption = special_toks_indices.get('', special_toks_indices[''] + 1) + # # x = x[:, special_toks_indices['']: end_idx_caption, :] + # x = x + self.drop_path(self.mlp_cap(self.norm_cap(x))) + + if expert_flag == 'modalities': + if self.use_sep_spatial_temp_experts: + x_spatial = x[:, :vis_feat_len] + if expert_permutation is not None: + if expert_permutation['spatial'] == 'temporal': + x_spatial = x_spatial + self.drop_path(self.mlp_temp(self.norm_temp(x_spatial))) + elif expert_permutation['spatial'] == 'caption': + x_spatial = x_spatial + self.drop_path(self.mlp_cap(self.norm_cap(x_spatial))) + elif expert_permutation['spatial'] == 'history': + x_spatial = x_spatial + self.drop_path(self.mlp_hist(self.norm_hist(x_spatial))) + elif expert_permutation['spatial'] == 'spatial': + x_spatial = x_spatial + self.drop_path(self.mlp_spatial(self.norm_spatial(x_spatial))) + x_vis = x_spatial + + else: + x_spatial = x_spatial + self.drop_path(self.mlp_spatial(self.norm_spatial(x_spatial))) + x_vis = x_spatial + + if is_vid: + x_temporal = x[:, vis_feat_len:2*vis_feat_len] + if expert_permutation is not None: + if expert_permutation['temporal'] == 'spatial': + x_temporal = x_temporal + self.drop_path(self.mlp_spatial(self.norm_spatial(x_temporal))) + elif expert_permutation['temporal'] == 'caption': + x_temporal = x_temporal + self.drop_path(self.mlp_cap(self.norm_cap(x_temporal))) + elif expert_permutation['temporal'] == 'history': + x_temporal = x_temporal + self.drop_path(self.mlp_hist(self.norm_hist(x_temporal))) + elif expert_permutation['temporal'] == 'temporal': + x_temporal = x_temporal + self.drop_path(self.mlp_temp(self.norm_temp(x_temporal))) + else: + x_temporal = x_temporal + self.drop_path(self.mlp_temp(self.norm_temp(x_temporal))) + x_vis = torch.concat([x_spatial, x_temporal], dim=1) + x_vis = x_vis + self.drop_path(self.mlp_vis(self.norm_vis(x_vis))) + else: + x_vis = x[:, :vis_feat_len] + x_vis = x_vis + self.drop_path(self.mlp_vis(self.norm_vis(x_vis))) + + if self.has_hist: + x_caption = x[:, -(cap_feat_len + hist_feat_len): -hist_feat_len] + if expert_permutation is not None: + if expert_permutation['caption'] == 'spatial': + x_caption = x_caption + self.drop_path(self.mlp_spatial(self.norm_spatial(x_caption))) + elif expert_permutation['caption'] == 'temporal': + x_caption = x_caption + self.drop_path(self.mlp_temp(self.norm_temp(x_caption))) + elif expert_permutation['caption'] == 'history': + x_caption = x_caption + self.drop_path(self.mlp_hist(self.norm_hist(x_caption))) + elif expert_permutation['caption'] == 'caption': + x_caption = x_caption + self.drop_path(self.mlp_cap(self.norm_cap(x_caption))) + else: + x_caption = x_caption + self.drop_path(self.mlp_cap(self.norm_cap(x_caption))) + + + x_history = x[:, -hist_feat_len:] + if expert_permutation is not None: + if expert_permutation['history'] == 'spatial': + x_history = x_history + self.drop_path(self.mlp_spatial(self.norm_spatial(x_history))) + elif expert_permutation['history'] == 'temporal': + x_history = x_history + self.drop_path(self.mlp_temp(self.norm_temp(x_history))) + elif expert_permutation['history'] == 'caption': + x_history = x_history + self.drop_path(self.mlp_cap(self.norm_cap(x_history))) + elif expert_permutation['history'] == 'history': + x_history = x_history + self.drop_path(self.mlp_hist(self.norm_hist(x_history))) + else: + x_history = x_history + self.drop_path(self.mlp_hist(self.norm_hist(x_history))) + # concat the features back + x = torch.cat([x_vis, x_caption, x_history], dim=1) + else: + x_caption = x[:, -cap_feat_len:] + x_caption = x_caption + self.drop_path(self.mlp_cap(self.norm_cap(x_caption))) + x = torch.cat([x_vis, x_caption], dim=1) + + assert x.size(1) == len_init, 'Reconstructed features length is {} != original features len = {}'.format( + x.size(1), len_init + ) + + elif expert_flag == 'fusion': + x = x + self.drop_path(self.mlp_fusion(self.norm_fusion(x))) + + return x + + +class Pooler(nn.Module): + def __init__(self, hidden_size): + super(Pooler, self).__init__() + + self.dense = nn.Linear(hidden_size, hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + pooled_states = hidden_states[:, 0] + pooled_output = self.dense(pooled_states) + pooled_output = self.activation(pooled_output) + return pooled_output \ No newline at end of file diff --git a/models/backbones/moes_huggingface.py b/models/backbones/moes_huggingface.py new file mode 100644 index 0000000..d3e6e45 --- /dev/null +++ b/models/backbones/moes_huggingface.py @@ -0,0 +1,234 @@ +import torch +import torch.nn as nn +from transformers.models.llama.modeling_llama import LlamaRMSNorm +from timm.models.layers import DropPath +import warnings +from torch import Tensor +from typing import Optional, Tuple + + +from .bert.xbert import BertLayer, BertAttention, BertIntermediate, BertOutput, BertConfig + +class MoELayer(nn.Module): + def __init__(self, config, expert_type): + super(MoELayer, self).__init__() + self.config = config + self.expert_type = expert_type + self.bert_config = BertConfig.from_pretrained('bert-large-uncased') + + # Shared across all experts + self.attention = BertAttention(self.bert_config) + + # One for each expert + if expert_type == 'modalities': + # Spatial expert + self.intermediate_spatial = BertIntermediate(self.bert_config) + self.output_spatial = BertOutput(self.bert_config) + + # Temporal expert + self.intermediate_temporal = BertIntermediate(self.bert_config) + self.output_temporal = BertOutput(self.bert_config) + + # Vis Expert + self.intermediate_vis = BertIntermediate(self.bert_config) + self.output_vis = BertOutput(self.bert_config) + + # Caption Expert + self.intermediate_caption = BertIntermediate(self.bert_config) + self.output_caption = BertOutput(self.bert_config) + + if config.stage != 'stage_1': + # History Expert + self.intermediate_history = BertIntermediate(self.bert_config) + self.output_history = BertOutput(self.bert_config) + + # Fusion expert + elif expert_type == 'fusion': + self.intermediate_fusion = BertIntermediate(self.bert_config) + self.output_fusion = BertOutput(self.bert_config) + else: + raise ValueError + + self._init_weights() + + def _init_weights(self): + for _, m in dict(self.named_modules()).items(): + if isinstance(m, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + m.weight.data.normal_(mean=0.0, std=self.bert_config.initializer_range) + elif isinstance(m, nn.LayerNorm): + m.bias.data.zero_() + m.weight.data.fill_(1.0) + if isinstance(m, nn.Linear) and m.bias is not None: + m.bias.data.zero_() + + + def get_extended_attention_mask( + self, attention_mask: Tensor, input_shape: Tuple[int], device: torch.device = None, dtype: torch.float = None + ) -> Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (`Tuple[int]`): + The shape of the input to the model. + + Returns: + `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. + """ + if dtype is None: + dtype = self.dtype + + if not (attention_mask.dim() == 2 and self.bert_config.is_decoder): + # show warning only if it won't be shown in `create_extended_attention_mask_for_decoder` + if device is not None: + warnings.warn( + "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning + ) + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + # if self.config.is_decoder: + # extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder( + # input_shape, attention_mask, device + # ) + # else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})" + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and the dtype's smallest value for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min + return extended_attention_mask + + + def forward(self, hidden_states, special_toks_indices, expert_flag, mask=None, only_text=False, output_attentions=False): + + input_shape = hidden_states.size()[:-1] + # dtype = mask.dtype + # device = mask.device + extended_attention_mask = self.get_extended_attention_mask(mask, input_shape, dtype=torch.float32) + + self_attention_outputs = self.attention( + hidden_states, + attention_mask=extended_attention_mask, + output_attentions=output_attentions, + head_mask=None + ) + attention_output = self_attention_outputs[0] + # outputs = self_attention_outputs[1:] + + len_init = attention_output.size(1) + # bs, h_dim = x.size(0), x.size(-1) + # device = x.device + + + if expert_flag == 'modalities': + if only_text: + intermediate_output = self.intermediate_caption(attention_output) + layer_output = self.output_caption(intermediate_output, attention_output) + else: + # split the input first into different parts/modalities + unchanged = attention_output[:, :special_toks_indices[''], :] + end_idx_spatial = special_toks_indices.get('', special_toks_indices['']) + attention_spatial = attention_output[:, special_toks_indices['']:end_idx_spatial, :] + + end_idx_caption = special_toks_indices.get('', special_toks_indices[''] + 1) + attention_caption = attention_output[:, special_toks_indices['']: end_idx_caption, :] + + attention_temporal, attention_history = None, None + + if '' in special_toks_indices: + end_idx_temporal = special_toks_indices[''] + attention_temporal = attention_output[:, special_toks_indices['']:end_idx_temporal, :] + + if '' in special_toks_indices: + end_idx_history = special_toks_indices[''] + 1 + attention_history = attention_output[:, special_toks_indices['']:end_idx_history, :] + + # Expert activation + # 1- Spatial + intermediate_spatial = self.intermediate_spatial(attention_spatial) + output_sapatial = self.output_spatial(intermediate_spatial, attention_spatial) + + output_vis = output_sapatial + + # 2- Temporal + if attention_temporal is not None: + intermediate_temporal = self.intermediate_temporal(attention_temporal) + output_temporal = self.output_temporal(intermediate_temporal, attention_temporal) + + attention_vis = torch.concat([output_sapatial, output_temporal], dim=1) + intermediate_vis = self.intermediate_vis(attention_vis) + output_vis = self.output_vis(intermediate_vis, attention_vis) + + # 3- Caption + intermediate_caption = self.intermediate_caption(attention_caption) + output_caption = self.output_caption(intermediate_caption, attention_caption) + + # 4- History + if attention_history is not None: + intermediate_history = self.intermediate_history(attention_history) + output_history = self.output_history(intermediate_history, attention_history) + + output_list = [unchanged, output_vis, output_caption] + + if attention_history is not None: + output_list.append(output_history) + + # Concat the features back + layer_output = torch.concat(output_list, dim=1) + assert layer_output.size(1) == len_init, 'Reconstructed features length is {} != original features len = {}'.format( + layer_output.size(1), len_init + ) + + elif expert_flag == 'fusion': + intermediate_output = self.intermediate_fusion(attention_output) + layer_output = self.output_fusion(intermediate_output, attention_output) + + return layer_output + + +class MoEPooler(nn.Module): + def __init__(self): + super(MoEPooler, self).__init__() + + self.bert_config = BertConfig.from_pretrained('bert-large-uncased') + hidden_size = self.bert_config.hidden_size + + self.dense = nn.Linear(hidden_size, hidden_size) + self.activation = nn.Tanh() + self._init_weights() + + def _init_weights(self): + for _, m in dict(self.named_modules()).items(): + if isinstance(m, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + m.weight.data.normal_(mean=0.0, std=self.bert_config.initializer_range) + elif isinstance(m, nn.LayerNorm): + m.bias.data.zero_() + m.weight.data.fill_(1.0) + if isinstance(m, nn.Linear) and m.bias is not None: + m.bias.data.zero_() + + def forward(self, hidden_states, idx): + pooled_states = hidden_states[:, idx] + pooled_output = self.dense(pooled_states) + pooled_output = self.activation(pooled_output) + return pooled_output diff --git a/models/backbones/moes_original.py b/models/backbones/moes_original.py new file mode 100644 index 0000000..6f7c737 --- /dev/null +++ b/models/backbones/moes_original.py @@ -0,0 +1,247 @@ +import torch +import torch.nn as nn +from transformers.models.llama.modeling_llama import LlamaRMSNorm +from timm.models.layers import DropPath + +class Mlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, mask=None): + B, N, C = x.shape + qkv = ( + self.qkv(x) + .reshape(B, N, 3, self.num_heads, C // self.num_heads) + .permute(2, 0, 3, 1, 4) + ) + q, k, v = ( + qkv[0], + qkv[1], + qkv[2], + ) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + if mask is not None: + if mask.dim() != x.dim(): + expanded_mask = mask[:, None, None, :].expand(B, 1, N, N) + else: + expanded_mask = mask + expanded_mask = expanded_mask.bool() + attn = attn.masked_fill(~expanded_mask, float("-inf")) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x, attn + + +class MoELayer(nn.Module): + def __init__( + self, + dim, + num_heads, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + act_layer=nn.SiLU, + norm_layer=LlamaRMSNorm, + ): + super().__init__() + self.norm_att = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + ) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + # EXPERT CONSTRUCTION + mlp_hidden_dim = int(dim * mlp_ratio) + + + # Spatial expert + self.norm_spatial = norm_layer(dim) + self.mlp_spatial = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + # Temporal expert + self.norm_temp = norm_layer(dim) + self.mlp_temp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + # Vis expert + self.norm_vis = norm_layer(dim) + self.mlp_vis = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + # caption expert + self.norm_cap = norm_layer(dim) + self.mlp_cap = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + # history expert + self.norm_hist = norm_layer(dim) + self.mlp_hist = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + # Fusion expert + self.norm_fusion = norm_layer(dim) + self.mlp_fusion = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + # expert_flag:{Only Text : 00 , Only Image : 01, Fusion : 10, Text & Image : 11} (BINARY) + + # expert_flag: + # 0: + + def forward(self, x, special_toks_indices, expert_flag, mask=None): + x_shortcut, attn = self.attn(self.norm_att(x), mask=mask) + x = x + self.drop_path(x_shortcut) + bs, h_dim = x.size(0), x.size(-1) + device = x.device + + if expert_flag == 'modalities': + end_index = special_toks_indices.get('', special_toks_indices['']) + spatial_feats = x[:, special_toks_indices['']: end_index, :] + spatial_feats = spatial_feats + self.drop_path(self.mlp_spatial(self.norm_spatial(spatial_feats))) + spatial_index = torch.arange(special_toks_indices[''], end_index, device=device) + spatial_index = spatial_index.unsqueeze(0).unsqueeze(-1) + spatial_index = spatial_index.repeat(bs, 1, h_dim) + x = x.scatter(1, spatial_index, spatial_feats) + # x[:, special_toks_indices['']: special_toks_indices[''], :] = spatial_feats + + end_index = special_toks_indices.get('', special_toks_indices['']) + caption_feats = x[:, special_toks_indices['']: end_index, :] + caption_feats = caption_feats + self.drop_path(self.mlp_cap(self.norm_cap(caption_feats))) + caption_index = torch.arange(special_toks_indices[''], end_index, device=device) + caption_index = caption_index.unsqueeze(0).unsqueeze(-1) + caption_index = caption_index.repeat(bs, 1, h_dim) + x = x.scatter(1, caption_index, caption_feats) + + # x[:, special_toks_indices['']: special_toks_indices[''], :] = caption_feats + + if '' in special_toks_indices: + temporal_feats = x[:, special_toks_indices['']: special_toks_indices[''], :] + temporal_feats = temporal_feats + self.drop_path(self.mlp_temp(self.norm_temp(temporal_feats))) + temporal_index = torch.arange(special_toks_indices[''], special_toks_indices[''], device=device) + temporal_index = temporal_index.unsqueeze(0).unsqueeze(-1) + temporal_index = temporal_index.repeat(bs, 1, h_dim) + x = x.scatter(1, temporal_index, temporal_feats) + + # x[:, special_toks_indices['']: special_toks_indices[''], :] = temporal_feats + + vis_feats = x[:, special_toks_indices['']: special_toks_indices[''], :] + vis_feats = vis_feats + self.drop_path(self.mlp_vis(self.norm_vis(vis_feats))) + vis_index = torch.arange(special_toks_indices[''], special_toks_indices[''], device=device) + vis_index = vis_index.unsqueeze(0).unsqueeze(-1) + vis_index = vis_index.repeat(bs, 1, h_dim) + x = x.scatter(1, vis_index, vis_feats) + + # x[:, special_toks_indices['']: special_toks_indices[''], :] = vis_feats + + if '' in special_toks_indices: + history_feats = x[:, special_toks_indices['']: special_toks_indices[''], :] + history_feats = history_feats + self.drop_path(self.mlp_hist(self.norm_hist(history_feats))) + history_index = torch.arange(special_toks_indices[''], special_toks_indices[''], device=device) + history_index = history_index.unsqueeze(0).unsqueeze(-1) + history_index = history_index.repeat(bs, 1, h_dim) + x = x.scatter(1, history_index, history_feats) + + elif expert_flag == 'fusion': + x = x + self.drop_path(self.mlp_fusion(self.norm_fusion(x))) + + return x, attn + + # if expert_flag == 2: + # x = x + self.drop_path(self.mlp(self.norm2(x))) + # elif expert_flag == 0: + # x = (x[:, -it_split:]) + # x = x + self.drop_path(self.sentence_mlp(self.sentence_norm(x))) + # elif expert_flag == 1: + # x = (x[:, :-it_split ]) + # x = x + self.drop_path(self.image_mlp(self.image_norm(x))) + # elif expert_flag == 3: + # text, image = (x[:, :it_split], x[:, it_split:],) + # text = text + self.drop_path(self.sentence_mlp(self.sentence_norm(text))) + # image = image + self.drop_path(self.image_mlp(self.image_norm(image))) + # x = torch.cat([text, image], dim=1) + # elif expert_flag == 4: + # x = x + self.drop_path(self.generation_mlp(self.generation_norm(x))) + # return x, attn \ No newline at end of file diff --git a/models/common/__init__.py b/models/common/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/models/common/config.py b/models/common/config.py new file mode 100755 index 0000000..0d092a3 --- /dev/null +++ b/models/common/config.py @@ -0,0 +1,474 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import logging +import json +from typing import Dict + +from omegaconf import OmegaConf +from minigpt4.common.registry import registry + + +class Config: + def __init__(self, args): + self.config = {} + + self.args = args + + # Register the config and configuration for setup + registry.register("configuration", self) + + user_config = self._build_opt_list(self.args.options) + + config = OmegaConf.load(self.args.cfg_path) + + runner_config = self.build_runner_config(config) + model_config = self.build_model_config(config, **user_config) + dataset_config = self.build_dataset_config(config) + + # Validate the user-provided runner configuration + # model and dataset configuration are supposed to be validated by the respective classes + # [TODO] validate the model/dataset configuration + # self._validate_runner_config(runner_config) + + # Override the default configuration with user options. + self.config = OmegaConf.merge( + runner_config, model_config, dataset_config, user_config + ) + + def _validate_runner_config(self, runner_config): + """ + This method validates the configuration, such that + 1) all the user specified options are valid; + 2) no type mismatches between the user specified options and the config. + """ + runner_config_validator = create_runner_config_validator() + runner_config_validator.validate(runner_config) + + def _build_opt_list(self, opts): + opts_dot_list = self._convert_to_dot_list(opts) + return OmegaConf.from_dotlist(opts_dot_list) + + @staticmethod + def build_model_config(config, **kwargs): + model = config.get("model", None) + assert model is not None, "Missing model configuration file." + + model_cls = registry.get_model_class(model.arch) + assert model_cls is not None, f"Model '{model.arch}' has not been registered." + + model_type = kwargs.get("model.model_type", None) + if not model_type: + model_type = model.get("model_type", None) + # else use the model type selected by user. + + assert model_type is not None, "Missing model_type." + + print("--------------") + print("model arch",model.arch) + print("model cls",model_cls) + + model_config_path = model_cls.default_config_path(model_type=model_type) + + model_config = OmegaConf.create() + # hierarchy override, customized config > default config + model_config = OmegaConf.merge( + model_config, + OmegaConf.load(model_config_path), + {"model": config["model"]}, + ) + + return model_config + + @staticmethod + def build_runner_config(config): + return {"run": config.run} + + @staticmethod + def build_dataset_config(config): + datasets = config.get("datasets", None) + if datasets is None: + raise KeyError( + "Expecting 'datasets' as the root key for dataset configuration." + ) + + dataset_config = OmegaConf.create() + + for dataset_name in datasets: + + print("dataset name", dataset_name) + builder_cls = registry.get_builder_class(dataset_name) + + dataset_config_type = datasets[dataset_name].get("type", "default") + dataset_config_path = builder_cls.default_config_path( + type=dataset_config_type + ) + + # hierarchy override, customized config > default config + dataset_config = OmegaConf.merge( + dataset_config, + OmegaConf.load(dataset_config_path), + {"datasets": {dataset_name: config["datasets"][dataset_name]}}, + ) + + return dataset_config + + def _convert_to_dot_list(self, opts): + if opts is None: + opts = [] + + if len(opts) == 0: + return opts + + has_equal = opts[0].find("=") != -1 + + if has_equal: + return opts + + return [(opt + "=" + value) for opt, value in zip(opts[0::2], opts[1::2])] + + def get_config(self): + return self.config + + @property + def run_cfg(self): + return self.config.run + + @property + def datasets_cfg(self): + return self.config.datasets + + @property + def model_cfg(self): + return self.config.model + + def pretty_print(self): + logging.info("\n===== Running Parameters =====") + logging.info(self._convert_node_to_json(self.config.run)) + + logging.info("\n====== Dataset Attributes ======") + datasets = self.config.datasets + + for dataset in datasets: + if dataset in self.config.datasets: + logging.info(f"\n======== {dataset} =======") + dataset_config = self.config.datasets[dataset] + logging.info(self._convert_node_to_json(dataset_config)) + else: + logging.warning(f"No dataset named '{dataset}' in config. Skipping") + + logging.info(f"\n====== Model Attributes ======") + logging.info(self._convert_node_to_json(self.config.model)) + + def _convert_node_to_json(self, node): + container = OmegaConf.to_container(node, resolve=True) + return json.dumps(container, indent=4, sort_keys=True) + + def to_dict(self): + return OmegaConf.to_container(self.config) + + +def node_to_dict(node): + return OmegaConf.to_container(node) + + +class ConfigValidator: + """ + This is a preliminary implementation to centralize and validate the configuration. + May be altered in the future. + + A helper class to validate configurations from yaml file. + + This serves the following purposes: + 1. Ensure all the options in the yaml are defined, raise error if not. + 2. when type mismatches are found, the validator will raise an error. + 3. a central place to store and display helpful messages for supported configurations. + + """ + + class _Argument: + def __init__(self, name, choices=None, type=None, help=None): + self.name = name + self.val = None + self.choices = choices + self.type = type + self.help = help + + def __str__(self): + s = f"{self.name}={self.val}" + if self.type is not None: + s += f", ({self.type})" + if self.choices is not None: + s += f", choices: {self.choices}" + if self.help is not None: + s += f", ({self.help})" + return s + + def __init__(self, description): + self.description = description + + self.arguments = dict() + + self.parsed_args = None + + def __getitem__(self, key): + assert self.parsed_args is not None, "No arguments parsed yet." + + return self.parsed_args[key] + + def __str__(self) -> str: + return self.format_help() + + def add_argument(self, *args, **kwargs): + """ + Assume the first argument is the name of the argument. + """ + self.arguments[args[0]] = self._Argument(*args, **kwargs) + + def validate(self, config=None): + """ + Convert yaml config (dict-like) to list, required by argparse. + """ + for k, v in config.items(): + assert ( + k in self.arguments + ), f"""{k} is not a valid argument. Support arguments are {self.format_arguments()}.""" + + if self.arguments[k].type is not None: + try: + self.arguments[k].val = self.arguments[k].type(v) + except ValueError: + raise ValueError(f"{k} is not a valid {self.arguments[k].type}.") + + if self.arguments[k].choices is not None: + assert ( + v in self.arguments[k].choices + ), f"""{k} must be one of {self.arguments[k].choices}.""" + + return config + + def format_arguments(self): + return str([f"{k}" for k in sorted(self.arguments.keys())]) + + def format_help(self): + # description + key-value pair string for each argument + help_msg = str(self.description) + return help_msg + ", available arguments: " + self.format_arguments() + + def print_help(self): + # display help message + print(self.format_help()) + + +def create_runner_config_validator(): + validator = ConfigValidator(description="Runner configurations") + + validator.add_argument( + "runner", + type=str, + choices=["runner_base", "runner_iter"], + help="""Runner to use. The "runner_base" uses epoch-based training while iter-based + runner runs based on iters. Default: runner_base""", + ) + # add argumetns for training dataset ratios + validator.add_argument( + "train_dataset_ratios", + type=Dict[str, float], + help="""Ratios of training dataset. This is used in iteration-based runner. + Do not support for epoch-based runner because how to define an epoch becomes tricky. + Default: None""", + ) + validator.add_argument( + "max_iters", + type=float, + help="Maximum number of iterations to run.", + ) + validator.add_argument( + "max_epoch", + type=int, + help="Maximum number of epochs to run.", + ) + # add arguments for iters_per_inner_epoch + validator.add_argument( + "iters_per_inner_epoch", + type=float, + help="Number of iterations per inner epoch. This is required when runner is runner_iter.", + ) + lr_scheds_choices = registry.list_lr_schedulers() + validator.add_argument( + "lr_sched", + type=str, + choices=lr_scheds_choices, + help="Learning rate scheduler to use, from {}".format(lr_scheds_choices), + ) + task_choices = registry.list_tasks() + validator.add_argument( + "task", + type=str, + choices=task_choices, + help="Task to use, from {}".format(task_choices), + ) + # add arguments for init_lr + validator.add_argument( + "init_lr", + type=float, + help="Initial learning rate. This will be the learning rate after warmup and before decay.", + ) + # add arguments for min_lr + validator.add_argument( + "min_lr", + type=float, + help="Minimum learning rate (after decay).", + ) + # add arguments for warmup_lr + validator.add_argument( + "warmup_lr", + type=float, + help="Starting learning rate for warmup.", + ) + # add arguments for learning rate decay rate + validator.add_argument( + "lr_decay_rate", + type=float, + help="Learning rate decay rate. Required if using a decaying learning rate scheduler.", + ) + # add arguments for weight decay + validator.add_argument( + "weight_decay", + type=float, + help="Weight decay rate.", + ) + # add arguments for training batch size + validator.add_argument( + "batch_size_train", + type=int, + help="Training batch size.", + ) + # add arguments for evaluation batch size + validator.add_argument( + "batch_size_eval", + type=int, + help="Evaluation batch size, including validation and testing.", + ) + # add arguments for number of workers for data loading + validator.add_argument( + "num_workers", + help="Number of workers for data loading.", + ) + # add arguments for warm up steps + validator.add_argument( + "warmup_steps", + type=int, + help="Number of warmup steps. Required if a warmup schedule is used.", + ) + # add arguments for random seed + validator.add_argument( + "seed", + type=int, + help="Random seed.", + ) + # add arguments for output directory + validator.add_argument( + "output_dir", + type=str, + help="Output directory to save checkpoints and logs.", + ) + # add arguments for whether only use evaluation + validator.add_argument( + "evaluate", + help="Whether to only evaluate the model. If true, training will not be performed.", + ) + # add arguments for splits used for training, e.g. ["train", "val"] + validator.add_argument( + "train_splits", + type=list, + help="Splits to use for training.", + ) + # add arguments for splits used for validation, e.g. ["val"] + validator.add_argument( + "valid_splits", + type=list, + help="Splits to use for validation. If not provided, will skip the validation.", + ) + # add arguments for splits used for testing, e.g. ["test"] + validator.add_argument( + "test_splits", + type=list, + help="Splits to use for testing. If not provided, will skip the testing.", + ) + # add arguments for accumulating gradient for iterations + validator.add_argument( + "accum_grad_iters", + type=int, + help="Number of iterations to accumulate gradient for.", + ) + + # ====== distributed training ====== + validator.add_argument( + "device", + type=str, + choices=["cpu", "cuda"], + help="Device to use. Support 'cuda' or 'cpu' as for now.", + ) + validator.add_argument( + "world_size", + type=int, + help="Number of processes participating in the job.", + ) + validator.add_argument("dist_url", type=str) + validator.add_argument("distributed", type=bool) + # add arguments to opt using distributed sampler during evaluation or not + validator.add_argument( + "use_dist_eval_sampler", + type=bool, + help="Whether to use distributed sampler during evaluation or not.", + ) + + # ====== task specific ====== + # generation task specific arguments + # add arguments for maximal length of text output + validator.add_argument( + "max_len", + type=int, + help="Maximal length of text output.", + ) + # add arguments for minimal length of text output + validator.add_argument( + "min_len", + type=int, + help="Minimal length of text output.", + ) + # add arguments number of beams + validator.add_argument( + "num_beams", + type=int, + help="Number of beams used for beam search.", + ) + + # vqa task specific arguments + # add arguments for number of answer candidates + validator.add_argument( + "num_ans_candidates", + type=int, + help="""For ALBEF and BLIP, these models first rank answers according to likelihood to select answer candidates.""", + ) + # add arguments for inference method + validator.add_argument( + "inference_method", + type=str, + choices=["genearte", "rank"], + help="""Inference method to use for question answering. If rank, requires a answer list.""", + ) + + # ====== model specific ====== + validator.add_argument( + "k_test", + type=int, + help="Number of top k most similar samples from ITC/VTC selection to be tested.", + ) + + return validator diff --git a/models/common/dist_utils.py b/models/common/dist_utils.py new file mode 100755 index 0000000..07919b0 --- /dev/null +++ b/models/common/dist_utils.py @@ -0,0 +1,203 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import datetime +import functools +import os + +import torch +import torch.distributed as dist +import timm.models.hub as timm_hub + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop("force", False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def init_distributed_mode(args): + if args.distributed is False: + print("Not using distributed mode") + args.rank = 0 + return + + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if "RANK" in os.environ and "WORLD_SIZE" in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ["WORLD_SIZE"]) + args.gpu = int(os.environ["LOCAL_RANK"]) + elif "SLURM_PROCID" in os.environ: + args.rank = int(os.environ["SLURM_PROCID"]) + args.gpu = args.rank % torch.cuda.device_count() + else: + print("Not using distributed mode") + args.distributed = False + args.rank = 0 + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = "nccl" + print( + "| distributed init (rank {}, world {}): {}".format( + args.rank, args.world_size, args.dist_url + ), + flush=True, + ) + torch.distributed.init_process_group( + backend=args.dist_backend, + init_method=args.dist_url, + world_size=args.world_size, + rank=args.rank, + timeout=datetime.timedelta( + days=365 + ), # allow auto-downloading and de-compressing + ) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +def get_dist_info(): + if torch.__version__ < "1.0": + initialized = dist._initialized + else: + initialized = dist.is_initialized() + if initialized: + rank = dist.get_rank() + world_size = dist.get_world_size() + else: # non-distributed training + rank = 0 + world_size = 1 + return rank, world_size + + +def main_process(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + rank, _ = get_dist_info() + if rank == 0: + return func(*args, **kwargs) + + return wrapper + + +def download_cached_file(url, check_hash=True, progress=False): + """ + Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again. + If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded. + """ + + def get_cached_file_path(): + # a hack to sync the file path across processes + parts = torch.hub.urlparse(url) + filename = os.path.basename(parts.path) + cached_file = os.path.join(timm_hub.get_cache_dir(), filename) + + return cached_file + + if is_main_process(): + timm_hub.download_cached_file(url, check_hash, progress) + + if is_dist_avail_and_initialized(): + dist.barrier() + + return get_cached_file_path() + + +class GatherLayer(torch.autograd.Function): + """ + Gather tensors from all workers with support for backward propagation: + This implementation does not cut the gradients as torch.distributed.all_gather does. + """ + + @staticmethod + def forward(ctx, x): + output = [ + torch.zeros_like(x) for _ in range(torch.distributed.get_world_size()) + ] + torch.distributed.all_gather(output, x) + return tuple(output) + + @staticmethod + def backward(ctx, *grads): + all_gradients = torch.stack(grads) + torch.distributed.all_reduce(all_gradients) + return all_gradients[torch.distributed.get_rank()] + + +def all_gather_with_grad(tensors): + """ + Performs all_gather operation on the provided tensors. + Graph remains connected for backward grad computation. + """ + # Queue the gathered tensors + world_size = torch.distributed.get_world_size() + # There is no need for reduction in the single-proc case + if world_size == 1: + return tensors + + # tensor_all = GatherLayer.apply(tensors) + tensor_all = GatherLayer.apply(tensors) + + return torch.cat(tensor_all, dim=0) + + +@torch.no_grad() +def concat_all_gather(tensor): + """ + Performs all_gather operation on the provided tensors. + *** Warning ***: torch.distributed.all_gather has no gradient. + """ + # if use distributed training + if not is_dist_avail_and_initialized(): + return tensor + + tensors_gather = [ + torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size()) + ] + torch.distributed.all_gather(tensors_gather, tensor, async_op=False) + + output = torch.cat(tensors_gather, dim=0) + return output diff --git a/models/common/eval_utils.py b/models/common/eval_utils.py new file mode 100644 index 0000000..0450873 --- /dev/null +++ b/models/common/eval_utils.py @@ -0,0 +1,224 @@ +import argparse +import numpy as np +from nltk.translate.bleu_score import sentence_bleu +import sys +sys.path.append('/home/ataallka/minigpt_video/minigpt_multi_img') +from minigpt4.common.registry import registry +from minigpt4.common.config import Config + +# imports modules for registration +from minigpt4.datasets.builders import * +from minigpt4.models import * +from minigpt4.processors import * +# from minigpt4.runners import * +from minigpt4.tasks import * +from pycocoevalcap.cider.cider import Cider +import os +import openai +from tqdm import tqdm +import json +import ast +import time + +def eval_parser(): + parser = argparse.ArgumentParser(description="Demo") + parser.add_argument("--cfg-path", help="path to configuration file.",default="test_configs/llama2_test_config.yaml") + parser.add_argument("--ckpt", type=str,default='checkpoints/video_llama_checkpoint_last.pth', help="path to checkpoint") + parser.add_argument("--eval_opt", type=str, default='all', help="path to configuration file.") + parser.add_argument("--max_new_tokens", type=int, default=512, help="max number of generated tokens") + parser.add_argument("--lora_r", type=int, default=64, help="lora rank of the model") + parser.add_argument("--lora_alpha", type=int, default=16, help="lora alpha") + parser.add_argument( + "--options", + nargs="+", + help="override some settings in the used config, the key-value pair " + "in xxx=yyy format will be merged into config file (deprecate), " + "change to --cfg-options instead.", + ) + return parser + + +def prepare_texts(texts, conv_temp, template='', lengths=None): + convs = [conv_temp.copy() for _ in range(len(texts))] + if lengths is None: + [conv.append_message(conv.roles[0], '{} {}'.format(template, text)) for conv, text in zip(convs, texts)] + else: + templates = [template * length for length in lengths] + [conv.append_message(conv.roles[0], '{} {}'.format(template, text)) for template, conv, text in zip(templates, convs, texts)] + [conv.append_message(conv.roles[1], None) for conv in convs] + texts = [conv.get_prompt() for conv in convs] + return texts + + +def init_model(args): + print('Initialization Model') + cfg = Config(args) + cfg.model_cfg.ckpt = args.ckpt + cfg.model_cfg.lora_r = args.lora_r + cfg.model_cfg.lora_alpha = args.lora_alpha + + model_config = cfg.model_cfg + model_config.low_resource = True + model_cls = registry.get_model_class(model_config.arch) + model = model_cls.from_config(model_config).to('cuda:0') + +# import pudb; pudb.set_trace() + key = list(cfg.datasets_cfg.keys())[0] + vis_processor_cfg = cfg.datasets_cfg.get(key).vis_processor.train + print(vis_processor_cfg) + vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg) + print('Initialization Finished') + return model, vis_processor + +def computeIoU(bbox1, bbox2): + x1, y1, x2, y2 = bbox1 + x3, y3, x4, y4 = bbox2 + intersection_x1 = max(x1, x3) + intersection_y1 = max(y1, y3) + intersection_x2 = min(x2, x4) + intersection_y2 = min(y2, y4) + intersection_area = max(0, intersection_x2 - intersection_x1 + 1) * max(0, intersection_y2 - intersection_y1 + 1) + bbox1_area = (x2 - x1 + 1) * (y2 - y1 + 1) + bbox2_area = (x4 - x3 + 1) * (y4 - y3 + 1) + union_area = bbox1_area + bbox2_area - intersection_area + iou = intersection_area / union_area + return iou + +def eval_bleu(results): + bleus1,bleus2,bleus3,bleus4 = [],[],[],[] + for result in tqdm (results,desc="bleu_eval"): + gt = result['gt'] + pred = result['pred'] + bleus1.append(sentence_bleu([gt.split()], pred.split(), weights=(1,0,0,0))) + bleus2.append(sentence_bleu([gt.split()], pred.split(), weights=(0.5,0.5,0,0))) + bleus3.append(sentence_bleu([gt.split()], pred.split(), weights=(0.33,0.33,0.33,0))) + bleus4.append(sentence_bleu([gt.split()], pred.split())) + # print(np.mean(bleus1),np.mean(bleus2),np.mean(bleus3),np.mean(bleus4),flush=True) + return {'bleu1':np.mean(bleus1),'bleu2':np.mean(bleus2),'bleu3':np.mean(bleus3),'bleu4':np.mean(bleus4)} + +# Create a Cider object +cider_scorer = Cider() +def eval_cider(pred_result,gt_result): + # Compute CIDEr scores + mean_cider_scores, cider_scores = cider_scorer.compute_score(gt_result, pred_result) + cider_scores_dict={} + for score,pred_vid_id,gt_vid_id in tqdm(zip(cider_scores.tolist(),pred_result,gt_result),desc="cider_eval") : + assert pred_vid_id==gt_vid_id + cider_scores_dict[pred_vid_id] = score + return {'mean_cider_scores':mean_cider_scores,'cider_scores':cider_scores_dict} + + +openai.api_key_path = "/home/ataallka/chatgpt_api.txt" + + +def chat_gpt_eval(results,output_path): + trial=0 + gpt_results=[] + avg_chatgpt_score=0 + existed_files={} + # read previous results from output path + for file in os.listdir(output_path): + if file.endswith(".json"): + with open(f'{output_path}/{file}') as json_file: + data = json.load(json_file) + gpt_results.append(data[0]) + avg_chatgpt_score+=float(data[0]['chatgpt_score']) + existed_files[data[0]['video_name']]=True + length_output_path=len(os.listdir(output_path)) + while len (results)!= length_output_path: + for res in tqdm(results,desc="chatgpt_eval"): + if existed_files.get(res['video_name'],False): + continue + video_name=res['video_name'] + sentence_1=res['A'] + sentence_2=res['pred'] + try: + # prompt=f"given these 2 sentences the first one is the ground truth text and the second sentence is the generated text ,give me a score from 0 to 1 to evaluate how much they are similar to each other, and have the same context and related to each other to evaluate the quality of this generated text.the output should be only the score float number without any additional information\nfirst sentence: {sentence_1}\nsecond sentence: {sentence_2}\nscore:" + prompt=f"given these 2 sentences the first one is the ground truth descrption of a video and the second sentence is the generated text from a video summarization model,give it a score from 0 to 5 to evaluate the model summarization performance.the output should be only the score number without any additional information\nfirst sentence: {sentence_1}\nsecond sentence: {sentence_2}\nscore:" + response = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": prompt + }], + ) + res['chatgpt_score']=response.choices[0].message['content'] + out={'video_name':video_name,'chatgpt_score':response.choices[0].message['content']} + gpt_results.append(out) + # save each video result in a json file + with open(f'{output_path}/{video_name}.json', 'w') as f: + json.dump([out], f) + avg_chatgpt_score+=float(response.choices[0].message['content']) + except Exception as e: + print("chat gpt error",e) + print ("Finished chat gpt evaluation in trial",trial) + trial+=1 + length_output_path=len(os.listdir(output_path)) + return results,avg_chatgpt_score/len(results) +def GPT4_answer(question, answer,pred): + try: + # Compute the correctness score + completion = openai.ChatCompletion.create( + # model="gpt-3.5-turbo", + model='gpt-4', + messages=[ + { + "role": "system", + "content": + "You are an intelligent chatbot designed for evaluating the correctness of generative outputs for question-answer pairs. " + "Your task is to compare the predicted answer with the correct answer and determine if they match meaningfully. Here's how you can accomplish the task:" + "------" + "##INSTRUCTIONS: " + "- Focus on the meaningful match between the predicted answer and the correct answer.\n" + "- Consider synonyms or paraphrases as valid matches.\n" + "- Evaluate the correctness of the prediction compared to the answer." + }, + { + "role": "user", + "content": + "Please evaluate the following video-based question-answer pair:\n\n" + f"Question: {question}\n" + f"Correct Answer: {answer}\n" + f"Predicted Answer: {pred}\n\n" + "Provide your evaluation only as a yes/no and score where the score is an integer value between 0 and 5, with 5 indicating the highest meaningful match. " + "Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING." + "DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. " + "For example, your response should look like this: {'pred': 'yes', 'score': 4.8}." + } + ] + ) + # Convert response to a Python dictionary. + response_message = completion["choices"][0]["message"]["content"] + response_dict = ast.literal_eval(response_message) + return response_dict + except Exception as e: + print(f"Error : {e}") + return None +def GPT4_evaluation(val_result): + scores=[] + yes_count=0 + no_count=0 + for res in val_result: + gpt_response=GPT4_answer(res['Q'],res['A'],res['pred']) + if gpt_response is None: + continue + try: + scores.append(float(gpt_response['score'])) + if 'yes' in gpt_response['pred'].lower(): + yes_count+=1 + elif 'no' in gpt_response['pred'].lower(): + no_count+=1 + except: + continue + avg_score=sum(scores)/len(scores) + accuracy=(yes_count/(yes_count+no_count))*100 + print(f"chatgpt score: {avg_score} accuracy: {accuracy}") + return avg_score,accuracy + +# with open('results/ckpt_15_res89_res32_Video_validation_Dataset_subtitles.json','r') as f: +# results = json.load(f) +# t1=time.time() +# avg_score,accuracy=GPT4_evaluation(results) +# print(f"chatgpt score: {avg_score} accuracy: {accuracy}") +# print(f"Time taken: {time.time()-t1}") \ No newline at end of file diff --git a/models/common/gradcam.py b/models/common/gradcam.py new file mode 100755 index 0000000..d53a525 --- /dev/null +++ b/models/common/gradcam.py @@ -0,0 +1,24 @@ +import numpy as np +from matplotlib import pyplot as plt +from scipy.ndimage import filters +from skimage import transform as skimage_transform + + +def getAttMap(img, attMap, blur=True, overlap=True): + attMap -= attMap.min() + if attMap.max() > 0: + attMap /= attMap.max() + attMap = skimage_transform.resize(attMap, (img.shape[:2]), order=3, mode="constant") + if blur: + attMap = filters.gaussian_filter(attMap, 0.02 * max(img.shape[:2])) + attMap -= attMap.min() + attMap /= attMap.max() + cmap = plt.get_cmap("jet") + attMapV = cmap(attMap) + attMapV = np.delete(attMapV, 3, 2) + if overlap: + attMap = ( + 1 * (1 - attMap**0.7).reshape(attMap.shape + (1,)) * img + + (attMap**0.7).reshape(attMap.shape + (1,)) * attMapV + ) + return attMap diff --git a/models/common/logger.py b/models/common/logger.py new file mode 100755 index 0000000..9a5a727 --- /dev/null +++ b/models/common/logger.py @@ -0,0 +1,195 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import datetime +import logging +import time +from collections import defaultdict, deque + +import torch +import torch.distributed as dist + +from minigpt4.common import dist_utils + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not dist_utils.is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda") + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value, + ) + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError( + "'{}' object has no attribute '{}'".format(type(self).__name__, attr) + ) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append("{}: {}".format(name, str(meter))) + return self.delimiter.join(loss_str) + + def global_avg(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append("{}: {:.4f}".format(name, meter.global_avg)) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = "" + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt="{avg:.4f}") + data_time = SmoothedValue(fmt="{avg:.4f}") + space_fmt = ":" + str(len(str(len(iterable)))) + "d" + log_msg = [ + header, + "[{0" + space_fmt + "}/{1}]", + "eta: {eta}", + "{meters}", + "time: {time}", + "data: {data}", + ] + if torch.cuda.is_available(): + log_msg.append("max mem: {memory:.0f}") + log_msg = self.delimiter.join(log_msg) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print( + log_msg.format( + i, + len(iterable), + eta=eta_string, + meters=str(self), + time=str(iter_time), + data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB, + ) + ) + else: + print( + log_msg.format( + i, + len(iterable), + eta=eta_string, + meters=str(self), + time=str(iter_time), + data=str(data_time), + ) + ) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print( + "{} Total time: {} ({:.4f} s / it)".format( + header, total_time_str, total_time / len(iterable) + ) + ) + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + +def setup_logger(): + logging.basicConfig( + level=logging.INFO if dist_utils.is_main_process() else logging.WARN, + format="%(asctime)s [%(levelname)s] %(message)s", + handlers=[logging.StreamHandler()], + ) diff --git a/models/common/optims.py b/models/common/optims.py new file mode 100755 index 0000000..270e66b --- /dev/null +++ b/models/common/optims.py @@ -0,0 +1,119 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import math + +from minigpt4.common.registry import registry + + +@registry.register_lr_scheduler("linear_warmup_step_lr") +class LinearWarmupStepLRScheduler: + def __init__( + self, + optimizer, + max_epoch, + min_lr, + init_lr, + decay_rate=1, + warmup_start_lr=-1, + warmup_steps=0, + **kwargs + ): + self.optimizer = optimizer + + self.max_epoch = max_epoch + self.min_lr = min_lr + + self.decay_rate = decay_rate + + self.init_lr = init_lr + self.warmup_steps = warmup_steps + self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr + + def step(self, cur_epoch, cur_step): + if cur_epoch == 0: + warmup_lr_schedule( + step=cur_step, + optimizer=self.optimizer, + max_step=self.warmup_steps, + init_lr=self.warmup_start_lr, + max_lr=self.init_lr, + ) + else: + step_lr_schedule( + epoch=cur_epoch, + optimizer=self.optimizer, + init_lr=self.init_lr, + min_lr=self.min_lr, + decay_rate=self.decay_rate, + ) + + +@registry.register_lr_scheduler("linear_warmup_cosine_lr") +class LinearWarmupCosineLRScheduler: + def __init__( + self, + optimizer, + max_epoch, + iters_per_epoch, + min_lr, + init_lr, + warmup_steps=0, + warmup_start_lr=-1, + **kwargs + ): + self.optimizer = optimizer + + self.max_epoch = max_epoch + self.iters_per_epoch = iters_per_epoch + self.min_lr = min_lr + + self.init_lr = init_lr + self.warmup_steps = warmup_steps + self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr + + def step(self, cur_epoch, cur_step): + total_cur_step = cur_epoch * self.iters_per_epoch + cur_step + if total_cur_step < self.warmup_steps: + warmup_lr_schedule( + step=total_cur_step, + optimizer=self.optimizer, + max_step=self.warmup_steps, + init_lr=self.warmup_start_lr, + max_lr=self.init_lr, + ) + else: + cosine_lr_schedule( + epoch=total_cur_step, + optimizer=self.optimizer, + max_epoch=self.max_epoch * self.iters_per_epoch, + init_lr=self.init_lr, + min_lr=self.min_lr, + ) + + +def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr): + """Decay the learning rate""" + lr = (init_lr - min_lr) * 0.5 * ( + 1.0 + math.cos(math.pi * epoch / max_epoch) + ) + min_lr + for param_group in optimizer.param_groups: + param_group["lr"] = lr + + +def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr): + """Warmup the learning rate""" + lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max(max_step, 1)) + for param_group in optimizer.param_groups: + param_group["lr"] = lr + + +def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate): + """Decay the learning rate""" + lr = max(min_lr, init_lr * (decay_rate**epoch)) + for param_group in optimizer.param_groups: + param_group["lr"] = lr diff --git a/models/common/registry.py b/models/common/registry.py new file mode 100755 index 0000000..c953097 --- /dev/null +++ b/models/common/registry.py @@ -0,0 +1,330 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + + +class Registry: + mapping = { + "builder_name_mapping": {}, + "task_name_mapping": {}, + "processor_name_mapping": {}, + "model_name_mapping": {}, + "lr_scheduler_name_mapping": {}, + "runner_name_mapping": {}, + "state": {}, + "paths": {}, + } + + @classmethod + def register_builder(cls, name): + r"""Register a dataset builder to registry with key 'name' + + Args: + name: Key with which the builder will be registered. + + Usage: + + from minigpt4.common.registry import registry + from minigpt4.datasets.base_dataset_builder import BaseDatasetBuilder + """ + + def wrap(builder_cls): + from minigpt4.datasets.builders.base_dataset_builder import BaseDatasetBuilder + + assert issubclass( + builder_cls, BaseDatasetBuilder + ), "All builders must inherit BaseDatasetBuilder class, found {}".format( + builder_cls + ) + if name in cls.mapping["builder_name_mapping"]: + raise KeyError( + "Name '{}' already registered for {}.".format( + name, cls.mapping["builder_name_mapping"][name] + ) + ) + cls.mapping["builder_name_mapping"][name] = builder_cls + return builder_cls + + return wrap + + @classmethod + def register_task(cls, name): + r"""Register a task to registry with key 'name' + + Args: + name: Key with which the task will be registered. + + Usage: + + from minigpt4.common.registry import registry + """ + + def wrap(task_cls): + from minigpt4.tasks.base_task import BaseTask + + assert issubclass( + task_cls, BaseTask + ), "All tasks must inherit BaseTask class" + if name in cls.mapping["task_name_mapping"]: + raise KeyError( + "Name '{}' already registered for {}.".format( + name, cls.mapping["task_name_mapping"][name] + ) + ) + cls.mapping["task_name_mapping"][name] = task_cls + return task_cls + + return wrap + + @classmethod + def register_model(cls, name): + r"""Register a task to registry with key 'name' + + Args: + name: Key with which the task will be registered. + + Usage: + + from minigpt4.common.registry import registry + """ + + def wrap(model_cls): + # from minigpt4.models import BaseModel + + # assert issubclass( + # model_cls, BaseModel + # ), "All models must inherit BaseModel class" + + if name in cls.mapping["model_name_mapping"]: + raise KeyError( + "Name '{}' already registered for {}.".format( + name, cls.mapping["model_name_mapping"][name] + ) + ) + cls.mapping["model_name_mapping"][name] = model_cls + return model_cls + + return wrap + + @classmethod + def register_processor(cls, name): + r"""Register a processor to registry with key 'name' + + Args: + name: Key with which the task will be registered. + + Usage: + + from minigpt4.common.registry import registry + """ + + def wrap(processor_cls): + from minigpt4.processors import BaseProcessor + + assert issubclass( + processor_cls, BaseProcessor + ), "All processors must inherit BaseProcessor class" + if name in cls.mapping["processor_name_mapping"]: + raise KeyError( + "Name '{}' already registered for {}.".format( + name, cls.mapping["processor_name_mapping"][name] + ) + ) + cls.mapping["processor_name_mapping"][name] = processor_cls + return processor_cls + + return wrap + + @classmethod + def register_lr_scheduler(cls, name): + r"""Register a model to registry with key 'name' + + Args: + name: Key with which the task will be registered. + + Usage: + + from minigpt4.common.registry import registry + """ + + def wrap(lr_sched_cls): + if name in cls.mapping["lr_scheduler_name_mapping"]: + raise KeyError( + "Name '{}' already registered for {}.".format( + name, cls.mapping["lr_scheduler_name_mapping"][name] + ) + ) + cls.mapping["lr_scheduler_name_mapping"][name] = lr_sched_cls + return lr_sched_cls + + return wrap + + @classmethod + def register_runner(cls, name): + r"""Register a model to registry with key 'name' + + Args: + name: Key with which the task will be registered. + + Usage: + + from minigpt4.common.registry import registry + """ + + def wrap(runner_cls): + if name in cls.mapping["runner_name_mapping"]: + raise KeyError( + "Name '{}' already registered for {}.".format( + name, cls.mapping["runner_name_mapping"][name] + ) + ) + cls.mapping["runner_name_mapping"][name] = runner_cls + return runner_cls + + return wrap + + @classmethod + def register_path(cls, name, path): + r"""Register a path to registry with key 'name' + + Args: + name: Key with which the path will be registered. + + Usage: + + from minigpt4.common.registry import registry + """ + assert isinstance(path, str), "All path must be str." + if name in cls.mapping["paths"]: + raise KeyError("Name '{}' already registered.".format(name)) + cls.mapping["paths"][name] = path + + @classmethod + def register(cls, name, obj): + r"""Register an item to registry with key 'name' + + Args: + name: Key with which the item will be registered. + + Usage:: + + from minigpt4.common.registry import registry + + registry.register("config", {}) + """ + path = name.split(".") + current = cls.mapping["state"] + + for part in path[:-1]: + if part not in current: + current[part] = {} + current = current[part] + + current[path[-1]] = obj + + # @classmethod + # def get_trainer_class(cls, name): + # return cls.mapping["trainer_name_mapping"].get(name, None) + + @classmethod + def get_builder_class(cls, name): + return cls.mapping["builder_name_mapping"].get(name, None) + + @classmethod + def get_model_class(cls, name): + return cls.mapping["model_name_mapping"].get(name, None) + + @classmethod + def get_task_class(cls, name): + return cls.mapping["task_name_mapping"].get(name, None) + + @classmethod + def get_processor_class(cls, name): + return cls.mapping["processor_name_mapping"].get(name, None) + + @classmethod + def get_lr_scheduler_class(cls, name): + return cls.mapping["lr_scheduler_name_mapping"].get(name, None) + + @classmethod + def get_runner_class(cls, name): + return cls.mapping["runner_name_mapping"].get(name, None) + + @classmethod + def list_runners(cls): + return sorted(cls.mapping["runner_name_mapping"].keys()) + + @classmethod + def list_models(cls): + return sorted(cls.mapping["model_name_mapping"].keys()) + + @classmethod + def list_tasks(cls): + return sorted(cls.mapping["task_name_mapping"].keys()) + + @classmethod + def list_processors(cls): + return sorted(cls.mapping["processor_name_mapping"].keys()) + + @classmethod + def list_lr_schedulers(cls): + return sorted(cls.mapping["lr_scheduler_name_mapping"].keys()) + + @classmethod + def list_datasets(cls): + return sorted(cls.mapping["builder_name_mapping"].keys()) + + @classmethod + def get_path(cls, name): + return cls.mapping["paths"].get(name, None) + + @classmethod + def get(cls, name, default=None, no_warning=False): + r"""Get an item from registry with key 'name' + + Args: + name (string): Key whose value needs to be retrieved. + default: If passed and key is not in registry, default value will + be returned with a warning. Default: None + no_warning (bool): If passed as True, warning when key doesn't exist + will not be generated. Useful for MMF's + internal operations. Default: False + """ + original_name = name + name = name.split(".") + value = cls.mapping["state"] + for subname in name: + value = value.get(subname, default) + if value is default: + break + + if ( + "writer" in cls.mapping["state"] + and value == default + and no_warning is False + ): + cls.mapping["state"]["writer"].warning( + "Key {} is not present in registry, returning default value " + "of {}".format(original_name, default) + ) + return value + + @classmethod + def unregister(cls, name): + r"""Remove an item from registry with key 'name' + + Args: + name: Key which needs to be removed. + Usage:: + + from mmf.common.registry import registry + + config = registry.unregister("config") + """ + return cls.mapping["state"].pop(name, None) + + +registry = Registry() diff --git a/models/common/utils.py b/models/common/utils.py new file mode 100755 index 0000000..f665d5b --- /dev/null +++ b/models/common/utils.py @@ -0,0 +1,424 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import io +import json +import logging +import os +import pickle +import re +import shutil +import urllib +import urllib.error +import urllib.request +from typing import Optional +from urllib.parse import urlparse + +import numpy as np +import pandas as pd +import yaml +from iopath.common.download import download +from iopath.common.file_io import file_lock, g_pathmgr +from models.common.registry import registry +from torch.utils.model_zoo import tqdm +from torchvision.datasets.utils import ( + check_integrity, + download_file_from_google_drive, + extract_archive, +) + + +def now(): + from datetime import datetime + + return datetime.now().strftime("%Y%m%d%H%M") + + +def is_url(url_or_filename): + parsed = urlparse(url_or_filename) + return parsed.scheme in ("http", "https") + + +def get_cache_path(rel_path): + return os.path.expanduser(os.path.join(registry.get_path("cache_root"), rel_path)) + + +def get_abs_path(rel_path): + return os.path.join(registry.get_path("library_root"), rel_path) + + +def load_json(filename): + with open(filename, "r") as f: + return json.load(f) + + +# The following are adapted from torchvision and vissl +# torchvision: https://github.com/pytorch/vision +# vissl: https://github.com/facebookresearch/vissl/blob/main/vissl/utils/download.py + + +def makedir(dir_path): + """ + Create the directory if it does not exist. + """ + is_success = False + try: + if not g_pathmgr.exists(dir_path): + g_pathmgr.mkdirs(dir_path) + is_success = True + except BaseException: + print(f"Error creating directory: {dir_path}") + return is_success + + +def get_redirected_url(url: str): + """ + Given a URL, returns the URL it redirects to or the + original URL in case of no indirection + """ + import requests + + with requests.Session() as session: + with session.get(url, stream=True, allow_redirects=True) as response: + if response.history: + return response.url + else: + return url + + +def to_google_drive_download_url(view_url: str) -> str: + """ + Utility function to transform a view URL of google drive + to a download URL for google drive + Example input: + https://drive.google.com/file/d/137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp/view + Example output: + https://drive.google.com/uc?export=download&id=137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp + """ + splits = view_url.split("/") + assert splits[-1] == "view" + file_id = splits[-2] + return f"https://drive.google.com/uc?export=download&id={file_id}" + + +def download_google_drive_url(url: str, output_path: str, output_file_name: str): + """ + Download a file from google drive + Downloading an URL from google drive requires confirmation when + the file of the size is too big (google drive notifies that + anti-viral checks cannot be performed on such files) + """ + import requests + + with requests.Session() as session: + + # First get the confirmation token and append it to the URL + with session.get(url, stream=True, allow_redirects=True) as response: + for k, v in response.cookies.items(): + if k.startswith("download_warning"): + url = url + "&confirm=" + v + + # Then download the content of the file + with session.get(url, stream=True, verify=True) as response: + makedir(output_path) + path = os.path.join(output_path, output_file_name) + total_size = int(response.headers.get("Content-length", 0)) + with open(path, "wb") as file: + from tqdm import tqdm + + with tqdm(total=total_size) as progress_bar: + for block in response.iter_content( + chunk_size=io.DEFAULT_BUFFER_SIZE + ): + file.write(block) + progress_bar.update(len(block)) + + +def _get_google_drive_file_id(url: str) -> Optional[str]: + parts = urlparse(url) + + if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None: + return None + + match = re.match(r"/file/d/(?P[^/]*)", parts.path) + if match is None: + return None + + return match.group("id") + + +def _urlretrieve(url: str, filename: str, chunk_size: int = 1024) -> None: + with open(filename, "wb") as fh: + with urllib.request.urlopen( + urllib.request.Request(url, headers={"User-Agent": "vissl"}) + ) as response: + with tqdm(total=response.length) as pbar: + for chunk in iter(lambda: response.read(chunk_size), ""): + if not chunk: + break + pbar.update(chunk_size) + fh.write(chunk) + + +def download_url( + url: str, + root: str, + filename: Optional[str] = None, + md5: Optional[str] = None, +) -> None: + """Download a file from a url and place it in root. + Args: + url (str): URL to download file from + root (str): Directory to place downloaded file in + filename (str, optional): Name to save the file under. + If None, use the basename of the URL. + md5 (str, optional): MD5 checksum of the download. If None, do not check + """ + root = os.path.expanduser(root) + if not filename: + filename = os.path.basename(url) + fpath = os.path.join(root, filename) + + makedir(root) + + # check if file is already present locally + if check_integrity(fpath, md5): + print("Using downloaded and verified file: " + fpath) + return + + # expand redirect chain if needed + url = get_redirected_url(url) + + # check if file is located on Google Drive + file_id = _get_google_drive_file_id(url) + if file_id is not None: + return download_file_from_google_drive(file_id, root, filename, md5) + + # download the file + try: + print("Downloading " + url + " to " + fpath) + _urlretrieve(url, fpath) + except (urllib.error.URLError, IOError) as e: # type: ignore[attr-defined] + if url[:5] == "https": + url = url.replace("https:", "http:") + print( + "Failed download. Trying https -> http instead." + " Downloading " + url + " to " + fpath + ) + _urlretrieve(url, fpath) + else: + raise e + + # check integrity of downloaded file + if not check_integrity(fpath, md5): + raise RuntimeError("File not found or corrupted.") + + +def download_and_extract_archive( + url: str, + download_root: str, + extract_root: Optional[str] = None, + filename: Optional[str] = None, + md5: Optional[str] = None, + remove_finished: bool = False, +) -> None: + download_root = os.path.expanduser(download_root) + if extract_root is None: + extract_root = download_root + if not filename: + filename = os.path.basename(url) + + download_url(url, download_root, filename, md5) + + archive = os.path.join(download_root, filename) + print("Extracting {} to {}".format(archive, extract_root)) + extract_archive(archive, extract_root, remove_finished) + + +def cache_url(url: str, cache_dir: str) -> str: + """ + This implementation downloads the remote resource and caches it locally. + The resource will only be downloaded if not previously requested. + """ + parsed_url = urlparse(url) + dirname = os.path.join(cache_dir, os.path.dirname(parsed_url.path.lstrip("/"))) + makedir(dirname) + filename = url.split("/")[-1] + cached = os.path.join(dirname, filename) + with file_lock(cached): + if not os.path.isfile(cached): + logging.info(f"Downloading {url} to {cached} ...") + cached = download(url, dirname, filename=filename) + logging.info(f"URL {url} cached in {cached}") + return cached + + +# TODO (prigoyal): convert this into RAII-style API +def create_file_symlink(file1, file2): + """ + Simply create the symlinks for a given file1 to file2. + Useful during model checkpointing to symlinks to the + latest successful checkpoint. + """ + try: + if g_pathmgr.exists(file2): + g_pathmgr.rm(file2) + g_pathmgr.symlink(file1, file2) + except Exception as e: + logging.info(f"Could NOT create symlink. Error: {e}") + + +def save_file(data, filename, append_to_json=True, verbose=True): + """ + Common i/o utility to handle saving data to various file formats. + Supported: + .pkl, .pickle, .npy, .json + Specifically for .json, users have the option to either append (default) + or rewrite by passing in Boolean value to append_to_json. + """ + if verbose: + logging.info(f"Saving data to file: {filename}") + file_ext = os.path.splitext(filename)[1] + if file_ext in [".pkl", ".pickle"]: + with g_pathmgr.open(filename, "wb") as fopen: + pickle.dump(data, fopen, pickle.HIGHEST_PROTOCOL) + elif file_ext == ".npy": + with g_pathmgr.open(filename, "wb") as fopen: + np.save(fopen, data) + elif file_ext == ".json": + if append_to_json: + with g_pathmgr.open(filename, "a") as fopen: + fopen.write(json.dumps(data, sort_keys=True) + "\n") + fopen.flush() + else: + with g_pathmgr.open(filename, "w") as fopen: + fopen.write(json.dumps(data, sort_keys=True) + "\n") + fopen.flush() + elif file_ext == ".yaml": + with g_pathmgr.open(filename, "w") as fopen: + dump = yaml.dump(data) + fopen.write(dump) + fopen.flush() + else: + raise Exception(f"Saving {file_ext} is not supported yet") + + if verbose: + logging.info(f"Saved data to file: {filename}") + + +def load_file(filename, mmap_mode=None, verbose=True, allow_pickle=False): + """ + Common i/o utility to handle loading data from various file formats. + Supported: + .pkl, .pickle, .npy, .json + For the npy files, we support reading the files in mmap_mode. + If the mmap_mode of reading is not successful, we load data without the + mmap_mode. + """ + if verbose: + logging.info(f"Loading data from file: {filename}") + + file_ext = os.path.splitext(filename)[1] + if file_ext == ".txt": + with g_pathmgr.open(filename, "r") as fopen: + data = fopen.readlines() + elif file_ext in [".pkl", ".pickle"]: + with g_pathmgr.open(filename, "rb") as fopen: + data = pickle.load(fopen, encoding="latin1") + elif file_ext == ".npy": + if mmap_mode: + try: + with g_pathmgr.open(filename, "rb") as fopen: + data = np.load( + fopen, + allow_pickle=allow_pickle, + encoding="latin1", + mmap_mode=mmap_mode, + ) + except ValueError as e: + logging.info( + f"Could not mmap {filename}: {e}. Trying without g_pathmgr" + ) + data = np.load( + filename, + allow_pickle=allow_pickle, + encoding="latin1", + mmap_mode=mmap_mode, + ) + logging.info("Successfully loaded without g_pathmgr") + except Exception: + logging.info("Could not mmap without g_pathmgr. Trying without mmap") + with g_pathmgr.open(filename, "rb") as fopen: + data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1") + else: + with g_pathmgr.open(filename, "rb") as fopen: + data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1") + elif file_ext == ".json": + with g_pathmgr.open(filename, "r") as fopen: + data = json.load(fopen) + elif file_ext == ".yaml": + with g_pathmgr.open(filename, "r") as fopen: + data = yaml.load(fopen, Loader=yaml.FullLoader) + elif file_ext == ".csv": + with g_pathmgr.open(filename, "r") as fopen: + data = pd.read_csv(fopen) + else: + raise Exception(f"Reading from {file_ext} is not supported yet") + return data + + +def abspath(resource_path: str): + """ + Make a path absolute, but take into account prefixes like + "http://" or "manifold://" + """ + regex = re.compile(r"^\w+://") + if regex.match(resource_path) is None: + return os.path.abspath(resource_path) + else: + return resource_path + + +def makedir(dir_path): + """ + Create the directory if it does not exist. + """ + is_success = False + try: + if not g_pathmgr.exists(dir_path): + g_pathmgr.mkdirs(dir_path) + is_success = True + except BaseException: + logging.info(f"Error creating directory: {dir_path}") + return is_success + + +def is_url(input_url): + """ + Check if an input string is a url. look for http(s):// and ignoring the case + """ + is_url = re.match(r"^(?:http)s?://", input_url, re.IGNORECASE) is not None + return is_url + + +def cleanup_dir(dir): + """ + Utility for deleting a directory. Useful for cleaning the storage space + that contains various training artifacts like checkpoints, data etc. + """ + if os.path.exists(dir): + logging.info(f"Deleting directory: {dir}") + shutil.rmtree(dir) + logging.info(f"Deleted contents of directory: {dir}") + + +def get_file_size(filename): + """ + Given a file, get the size of file in MB + """ + size_in_mb = os.path.getsize(filename) / float(1024**2) + return size_in_mb diff --git a/models/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvalDemo.py b/models/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvalDemo.py new file mode 100644 index 0000000..07ca21d --- /dev/null +++ b/models/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvalDemo.py @@ -0,0 +1,89 @@ +# coding: utf-8 + +import sys +dataDir = '../../VQA' +sys.path.insert(0, '%s/PythonHelperTools/vqaTools' %(dataDir)) +from vqa import VQA +from vqaEvaluation.vqaEval import VQAEval +import matplotlib.pyplot as plt +import skimage.io as io +import json +import random +import os + +# set up file names and paths +versionType ='v2_' # this should be '' when using VQA v2.0 dataset +taskType ='OpenEnded' # 'OpenEnded' only for v2.0. 'OpenEnded' or 'MultipleChoice' for v1.0 +dataType ='mscoco' # 'mscoco' only for v1.0. 'mscoco' for real and 'abstract_v002' for abstract for v1.0. +dataSubType ='train2014' +annFile ='%s/Annotations/%s%s_%s_annotations.json'%(dataDir, versionType, dataType, dataSubType) +quesFile ='%s/Questions/%s%s_%s_%s_questions.json'%(dataDir, versionType, taskType, dataType, dataSubType) +imgDir ='%s/Images/%s/%s/' %(dataDir, dataType, dataSubType) +resultType ='fake' +fileTypes = ['results', 'accuracy', 'evalQA', 'evalQuesType', 'evalAnsType'] + +# An example result json file has been provided in './Results' folder. + +[resFile, accuracyFile, evalQAFile, evalQuesTypeFile, evalAnsTypeFile] = ['%s/Results/%s%s_%s_%s_%s_%s.json'%(dataDir, versionType, taskType, dataType, dataSubType, \ +resultType, fileType) for fileType in fileTypes] + +# create vqa object and vqaRes object +vqa = VQA(annFile, quesFile) +vqaRes = vqa.loadRes(resFile, quesFile) + +# create vqaEval object by taking vqa and vqaRes +vqaEval = VQAEval(vqa, vqaRes, n=2) #n is precision of accuracy (number of places after decimal), default is 2 + +# evaluate results +""" +If you have a list of question ids on which you would like to evaluate your results, pass it as a list to below function +By default it uses all the question ids in annotation file +""" +vqaEval.evaluate() + +# print accuracies +print "\n" +print "Overall Accuracy is: %.02f\n" %(vqaEval.accuracy['overall']) +print "Per Question Type Accuracy is the following:" +for quesType in vqaEval.accuracy['perQuestionType']: + print "%s : %.02f" %(quesType, vqaEval.accuracy['perQuestionType'][quesType]) +print "\n" +print "Per Answer Type Accuracy is the following:" +for ansType in vqaEval.accuracy['perAnswerType']: + print "%s : %.02f" %(ansType, vqaEval.accuracy['perAnswerType'][ansType]) +print "\n" +# demo how to use evalQA to retrieve low score result +evals = [quesId for quesId in vqaEval.evalQA if vqaEval.evalQA[quesId]<35] #35 is per question percentage accuracy +if len(evals) > 0: + print 'ground truth answers' + randomEval = random.choice(evals) + randomAnn = vqa.loadQA(randomEval) + vqa.showQA(randomAnn) + + print '\n' + print 'generated answer (accuracy %.02f)'%(vqaEval.evalQA[randomEval]) + ann = vqaRes.loadQA(randomEval)[0] + print "Answer: %s\n" %(ann['answer']) + + imgId = randomAnn[0]['image_id'] + imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' + if os.path.isfile(imgDir + imgFilename): + I = io.imread(imgDir + imgFilename) + plt.imshow(I) + plt.axis('off') + plt.show() + +# plot accuracy for various question types +plt.bar(range(len(vqaEval.accuracy['perQuestionType'])), vqaEval.accuracy['perQuestionType'].values(), align='center') +plt.xticks(range(len(vqaEval.accuracy['perQuestionType'])), vqaEval.accuracy['perQuestionType'].keys(), rotation='0',fontsize=10) +plt.title('Per Question Type Accuracy', fontsize=10) +plt.xlabel('Question Types', fontsize=10) +plt.ylabel('Accuracy', fontsize=10) +plt.show() + +# save evaluation results to ./Results folder +json.dump(vqaEval.accuracy, open(accuracyFile, 'w')) +json.dump(vqaEval.evalQA, open(evalQAFile, 'w')) +json.dump(vqaEval.evalQuesType, open(evalQuesTypeFile, 'w')) +json.dump(vqaEval.evalAnsType, open(evalAnsTypeFile, 'w')) + diff --git a/models/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__init__.py b/models/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__init__.py new file mode 100644 index 0000000..148424d --- /dev/null +++ b/models/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__init__.py @@ -0,0 +1 @@ +author='aagrawal' diff --git a/models/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/vqaEval.py b/models/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/vqaEval.py new file mode 100644 index 0000000..8a65604 --- /dev/null +++ b/models/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/vqaEval.py @@ -0,0 +1,192 @@ +# coding=utf-8 + +__author__='aagrawal' + +import re +# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link: +# (https://github.com/tylin/coco-caption/blob/master/pycocoevalcap/eval.py). +import sys + + +class VQAEval: + def __init__(self, vqa, vqaRes, n=2): + self.n = n + self.accuracy = {} + self.evalQA = {} + self.evalQuesType = {} + self.evalAnsType = {} + self.vqa = vqa + self.vqaRes = vqaRes + self.params = {'question_id': vqa.getQuesIds()} + self.contractions = {"aint": "ain't", "arent": "aren't", "cant": "can't", "couldve": "could've", "couldnt": "couldn't", \ + "couldn'tve": "couldn't've", "couldnt've": "couldn't've", "didnt": "didn't", "doesnt": "doesn't", "dont": "don't", "hadnt": "hadn't", \ + "hadnt've": "hadn't've", "hadn'tve": "hadn't've", "hasnt": "hasn't", "havent": "haven't", "hed": "he'd", "hed've": "he'd've", \ + "he'dve": "he'd've", "hes": "he's", "howd": "how'd", "howll": "how'll", "hows": "how's", "Id've": "I'd've", "I'dve": "I'd've", \ + "Im": "I'm", "Ive": "I've", "isnt": "isn't", "itd": "it'd", "itd've": "it'd've", "it'dve": "it'd've", "itll": "it'll", "let's": "let's", \ + "maam": "ma'am", "mightnt": "mightn't", "mightnt've": "mightn't've", "mightn'tve": "mightn't've", "mightve": "might've", \ + "mustnt": "mustn't", "mustve": "must've", "neednt": "needn't", "notve": "not've", "oclock": "o'clock", "oughtnt": "oughtn't", \ + "ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat": "'ow's'at", "shant": "shan't", "shed've": "she'd've", "she'dve": "she'd've", \ + "she's": "she's", "shouldve": "should've", "shouldnt": "shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve": "shouldn't've", \ + "somebody'd": "somebodyd", "somebodyd've": "somebody'd've", "somebody'dve": "somebody'd've", "somebodyll": "somebody'll", \ + "somebodys": "somebody's", "someoned": "someone'd", "someoned've": "someone'd've", "someone'dve": "someone'd've", \ + "someonell": "someone'll", "someones": "someone's", "somethingd": "something'd", "somethingd've": "something'd've", \ + "something'dve": "something'd've", "somethingll": "something'll", "thats": "that's", "thered": "there'd", "thered've": "there'd've", \ + "there'dve": "there'd've", "therere": "there're", "theres": "there's", "theyd": "they'd", "theyd've": "they'd've", \ + "they'dve": "they'd've", "theyll": "they'll", "theyre": "they're", "theyve": "they've", "twas": "'twas", "wasnt": "wasn't", \ + "wed've": "we'd've", "we'dve": "we'd've", "weve": "we've", "werent": "weren't", "whatll": "what'll", "whatre": "what're", \ + "whats": "what's", "whatve": "what've", "whens": "when's", "whered": "where'd", "wheres": "where's", "whereve": "where've", \ + "whod": "who'd", "whod've": "who'd've", "who'dve": "who'd've", "wholl": "who'll", "whos": "who's", "whove": "who've", "whyll": "why'll", \ + "whyre": "why're", "whys": "why's", "wont": "won't", "wouldve": "would've", "wouldnt": "wouldn't", "wouldnt've": "wouldn't've", \ + "wouldn'tve": "wouldn't've", "yall": "y'all", "yall'll": "y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've", \ + "y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", "youd": "you'd", "youd've": "you'd've", "you'dve": "you'd've", \ + "youll": "you'll", "youre": "you're", "youve": "you've"} + self.manualMap = { 'none': '0', + 'zero': '0', + 'one': '1', + 'two': '2', + 'three': '3', + 'four': '4', + 'five': '5', + 'six': '6', + 'seven': '7', + 'eight': '8', + 'nine': '9', + 'ten': '10' + } + self.articles = ['a', + 'an', + 'the' + ] + + + self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)") + self.commaStrip = re.compile("(\d)(\,)(\d)") + self.punct = [';', r"/", '[', ']', '"', '{', '}', + '(', ')', '=', '+', '\\', '_', '-', + '>', '<', '@', '`', ',', '?', '!'] + + + def evaluate(self, quesIds=None): + if quesIds == None: + quesIds = [quesId for quesId in self.params['question_id']] + gts = {} + res = {} + for quesId in quesIds: + gts[quesId] = self.vqa.qa[quesId] + res[quesId] = self.vqaRes.qa[quesId] + + # ================================================= + # Compute accuracy + # ================================================= + accQA = [] + accQuesType = {} + accAnsType = {} + # print "computing accuracy" + step = 0 + for quesId in quesIds: + for ansDic in gts[quesId]['answers']: + ansDic['answer'] = ansDic['answer'].replace('\n', ' ') + ansDic['answer'] = ansDic['answer'].replace('\t', ' ') + ansDic['answer'] = ansDic['answer'].strip() + resAns = res[quesId]['answer'] + resAns = resAns.replace('\n', ' ') + resAns = resAns.replace('\t', ' ') + resAns = resAns.strip() + gtAcc = [] + gtAnswers = [ans['answer'] for ans in gts[quesId]['answers']] + + if len(set(gtAnswers)) > 1: + for ansDic in gts[quesId]['answers']: + ansDic['answer'] = self.processPunctuation(ansDic['answer']) + ansDic['answer'] = self.processDigitArticle(ansDic['answer']) + resAns = self.processPunctuation(resAns) + resAns = self.processDigitArticle(resAns) + + for gtAnsDatum in gts[quesId]['answers']: + otherGTAns = [item for item in gts[quesId]['answers'] if item!=gtAnsDatum] + matchingAns = [item for item in otherGTAns if item['answer'].lower()==resAns.lower()] + acc = min(1, float(len(matchingAns))/3) + gtAcc.append(acc) + quesType = gts[quesId]['question_type'] + ansType = gts[quesId]['answer_type'] + avgGTAcc = float(sum(gtAcc))/len(gtAcc) + accQA.append(avgGTAcc) + if quesType not in accQuesType: + accQuesType[quesType] = [] + accQuesType[quesType].append(avgGTAcc) + if ansType not in accAnsType: + accAnsType[ansType] = [] + accAnsType[ansType].append(avgGTAcc) + self.setEvalQA(quesId, avgGTAcc) + self.setEvalQuesType(quesId, quesType, avgGTAcc) + self.setEvalAnsType(quesId, ansType, avgGTAcc) + if step%100 == 0: + self.updateProgress(step/float(len(quesIds))) + step = step + 1 + + self.setAccuracy(accQA, accQuesType, accAnsType) + # print "Done computing accuracy" + + def processPunctuation(self, inText): + outText = inText + for p in self.punct: + if (p + ' ' in inText or ' ' + p in inText) or (re.search(self.commaStrip, inText) != None): + outText = outText.replace(p, '') + else: + outText = outText.replace(p, ' ') + outText = self.periodStrip.sub("", + outText, + re.UNICODE) + return outText + + def processDigitArticle(self, inText): + outText = [] + tempText = inText.lower().split() + for word in tempText: + word = self.manualMap.setdefault(word, word) + if word not in self.articles: + outText.append(word) + else: + pass + for wordId, word in enumerate(outText): + if word in self.contractions: + outText[wordId] = self.contractions[word] + outText = ' '.join(outText) + return outText + + def setAccuracy(self, accQA, accQuesType, accAnsType): + self.accuracy['overall'] = round(100*float(sum(accQA))/len(accQA), self.n) + self.accuracy['perQuestionType'] = {quesType: round(100*float(sum(accQuesType[quesType]))/len(accQuesType[quesType]), self.n) for quesType in accQuesType} + self.accuracy['perAnswerType'] = {ansType: round(100*float(sum(accAnsType[ansType]))/len(accAnsType[ansType]), self.n) for ansType in accAnsType} + + def setEvalQA(self, quesId, acc): + self.evalQA[quesId] = round(100*acc, self.n) + + def setEvalQuesType(self, quesId, quesType, acc): + if quesType not in self.evalQuesType: + self.evalQuesType[quesType] = {} + self.evalQuesType[quesType][quesId] = round(100*acc, self.n) + + def setEvalAnsType(self, quesId, ansType, acc): + if ansType not in self.evalAnsType: + self.evalAnsType[ansType] = {} + self.evalAnsType[ansType][quesId] = round(100*acc, self.n) + + def updateProgress(self, progress): + barLength = 20 + status = "" + if isinstance(progress, int): + progress = float(progress) + if not isinstance(progress, float): + progress = 0 + status = "error: progress var must be float\r\n" + if progress < 0: + progress = 0 + status = "Halt...\r\n" + if progress >= 1: + progress = 1 + status = "Done...\r\n" + block = int(round(barLength*progress)) + text = "\rFinshed Percent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), int(progress*100), status) + sys.stdout.write(text) + sys.stdout.flush() diff --git a/models/common/vqa_tools/VQA/PythonHelperTools/vqaDemo.py b/models/common/vqa_tools/VQA/PythonHelperTools/vqaDemo.py new file mode 100644 index 0000000..406b596 --- /dev/null +++ b/models/common/vqa_tools/VQA/PythonHelperTools/vqaDemo.py @@ -0,0 +1,73 @@ +# coding: utf-8 + +from vqaTools.vqa import VQA +import random +import skimage.io as io +import matplotlib.pyplot as plt +import os + +dataDir ='../../VQA' +versionType ='v2_' # this should be '' when using VQA v2.0 dataset +taskType ='OpenEnded' # 'OpenEnded' only for v2.0. 'OpenEnded' or 'MultipleChoice' for v1.0 +dataType ='mscoco' # 'mscoco' only for v1.0. 'mscoco' for real and 'abstract_v002' for abstract for v1.0. +dataSubType ='train2014' +annFile ='%s/Annotations/%s%s_%s_annotations.json'%(dataDir, versionType, dataType, dataSubType) +quesFile ='%s/Questions/%s%s_%s_%s_questions.json'%(dataDir, versionType, taskType, dataType, dataSubType) +imgDir = '%s/Images/%s/%s/' %(dataDir, dataType, dataSubType) + +# initialize VQA api for QA annotations +vqa=VQA(annFile, quesFile) + +# load and display QA annotations for given question types +""" +All possible quesTypes for abstract and mscoco has been provided in respective text files in ../QuestionTypes/ folder. +""" +annIds = vqa.getQuesIds(quesTypes='how many'); +anns = vqa.loadQA(annIds) +randomAnn = random.choice(anns) +vqa.showQA([randomAnn]) +imgId = randomAnn['image_id'] +imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' +if os.path.isfile(imgDir + imgFilename): + I = io.imread(imgDir + imgFilename) + plt.imshow(I) + plt.axis('off') + plt.show() + +# load and display QA annotations for given answer types +""" +ansTypes can be one of the following +yes/no +number +other +""" +annIds = vqa.getQuesIds(ansTypes='yes/no'); +anns = vqa.loadQA(annIds) +randomAnn = random.choice(anns) +vqa.showQA([randomAnn]) +imgId = randomAnn['image_id'] +imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' +if os.path.isfile(imgDir + imgFilename): + I = io.imread(imgDir + imgFilename) + plt.imshow(I) + plt.axis('off') + plt.show() + +# load and display QA annotations for given images +""" +Usage: vqa.getImgIds(quesIds=[], quesTypes=[], ansTypes=[]) +Above method can be used to retrieve imageIds for given question Ids or given question types or given answer types. +""" +ids = vqa.getImgIds() +annIds = vqa.getQuesIds(imgIds=random.sample(ids,5)); +anns = vqa.loadQA(annIds) +randomAnn = random.choice(anns) +vqa.showQA([randomAnn]) +imgId = randomAnn['image_id'] +imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' +if os.path.isfile(imgDir + imgFilename): + I = io.imread(imgDir + imgFilename) + plt.imshow(I) + plt.axis('off') + plt.show() + diff --git a/models/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__init__.py b/models/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__init__.py new file mode 100644 index 0000000..072d8d9 --- /dev/null +++ b/models/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__init__.py @@ -0,0 +1 @@ +__author__ = 'aagrawal' diff --git a/models/common/vqa_tools/VQA/PythonHelperTools/vqaTools/vqa.py b/models/common/vqa_tools/VQA/PythonHelperTools/vqaTools/vqa.py new file mode 100644 index 0000000..4f76961 --- /dev/null +++ b/models/common/vqa_tools/VQA/PythonHelperTools/vqaTools/vqa.py @@ -0,0 +1,179 @@ +__author__ = 'aagrawal' +__version__ = '0.9' + +# Interface for accessing the VQA dataset. + +# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link: +# (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py). + +# The following functions are defined: +# VQA - VQA class that loads VQA annotation file and prepares data structures. +# getQuesIds - Get question ids that satisfy given filter conditions. +# getImgIds - Get image ids that satisfy given filter conditions. +# loadQA - Load questions and answers with the specified question ids. +# showQA - Display the specified questions and answers. +# loadRes - Load result file and create result object. + +# Help on each function can be accessed by: "help(COCO.function)" + +import json +import datetime +import copy + + +class VQA: + def __init__(self, annotation_file=None, question_file=None): + """ + Constructor of VQA helper class for reading and visualizing questions and answers. + :param annotation_file (str): location of VQA annotation file + :return: + """ + # load dataset + self.dataset = {} + self.questions = {} + self.qa = {} + self.qqa = {} + self.imgToQA = {} + if not annotation_file == None and not question_file == None: + # print 'loading VQA annotations and questions into memory...' + time_t = datetime.datetime.utcnow() + dataset = json.load(open(annotation_file, 'r')) + questions = json.load(open(question_file, 'r')) + # print datetime.datetime.utcnow() - time_t + self.dataset = dataset + self.questions = questions + self.createIndex() + + def createIndex(self): + imgToQA = {ann['image_id']: [] for ann in self.dataset['annotations']} + qa = {ann['question_id']: [] for ann in self.dataset['annotations']} + qqa = {ann['question_id']: [] for ann in self.dataset['annotations']} + for ann in self.dataset['annotations']: + imgToQA[ann['image_id']] += [ann] + qa[ann['question_id']] = ann + for ques in self.questions['questions']: + qqa[ques['question_id']] = ques + # print 'index created!' + + # create class members + self.qa = qa + self.qqa = qqa + self.imgToQA = imgToQA + + def info(self): + """ + Print information about the VQA annotation file. + :return: + """ + + # for key, value in self.datset['info'].items(): + # print '%s: %s'%(key, value) + + def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]): + """ + Get question ids that satisfy given filter conditions. default skips that filter + :param imgIds (int array) : get question ids for given imgs + quesTypes (str array) : get question ids for given question types + ansTypes (str array) : get question ids for given answer types + :return: ids (int array) : integer array of question ids + """ + imgIds = imgIds if type(imgIds) == list else [imgIds] + quesTypes = quesTypes if type(quesTypes) == list else [quesTypes] + ansTypes = ansTypes if type(ansTypes) == list else [ansTypes] + + if len(imgIds) == len(quesTypes) == len(ansTypes) == 0: + anns = self.dataset['annotations'] + else: + if not len(imgIds) == 0: + anns = sum([self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA], []) + else: + anns = self.dataset['annotations'] + anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann['question_type'] in quesTypes] + anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann['answer_type'] in ansTypes] + ids = [ann['question_id'] for ann in anns] + return ids + + def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]): + """ + Get image ids that satisfy given filter conditions. default skips that filter + :param quesIds (int array) : get image ids for given question ids + quesTypes (str array) : get image ids for given question types + ansTypes (str array) : get image ids for given answer types + :return: ids (int array) : integer array of image ids + """ + quesIds = quesIds if type(quesIds) == list else [quesIds] + quesTypes = quesTypes if type(quesTypes) == list else [quesTypes] + ansTypes = ansTypes if type(ansTypes) == list else [ansTypes] + + if len(quesIds) == len(quesTypes) == len(ansTypes) == 0: + anns = self.dataset['annotations'] + else: + if not len(quesIds) == 0: + anns = sum([self.qa[quesId] for quesId in quesIds if quesId in self.qa], []) + else: + anns = self.dataset['annotations'] + anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann['question_type'] in quesTypes] + anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann['answer_type'] in ansTypes] + ids = [ann['image_id'] for ann in anns] + return ids + + def loadQA(self, ids=[]): + """ + Load questions and answers with the specified question ids. + :param ids (int array) : integer ids specifying question ids + :return: qa (object array) : loaded qa objects + """ + if type(ids) == list: + return [self.qa[id] for id in ids] + elif type(ids) == int: + return [self.qa[ids]] + + def showQA(self, anns): + """ + Display the specified annotations. + :param anns (array of object): annotations to display + :return: None + """ + if len(anns) == 0: + return 0 + for ann in anns: + quesId = ann['question_id'] + print("Question: %s" % (self.qqa[quesId]['question'])) + for ans in ann['answers']: + print("Answer %d: %s" % (ans['answer_id'], ans['answer'])) + + def loadRes(self, resFile, quesFile): + """ + Load result file and return a result object. + :param resFile (str) : file name of result file + :return: res (obj) : result api object + """ + res = VQA() + res.questions = json.load(open(quesFile)) + res.dataset['info'] = copy.deepcopy(self.questions['info']) + res.dataset['task_type'] = copy.deepcopy(self.questions['task_type']) + res.dataset['data_type'] = copy.deepcopy(self.questions['data_type']) + res.dataset['data_subtype'] = copy.deepcopy(self.questions['data_subtype']) + res.dataset['license'] = copy.deepcopy(self.questions['license']) + + # print 'Loading and preparing results... ' + time_t = datetime.datetime.utcnow() + anns = json.load(open(resFile)) + assert type(anns) == list, 'results is not an array of objects' + annsQuesIds = [ann['question_id'] for ann in anns] + assert set(annsQuesIds) == set(self.getQuesIds()), \ + 'Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file.' + for ann in anns: + quesId = ann['question_id'] + if res.dataset['task_type'] == 'Multiple Choice': + assert ann['answer'] in self.qqa[quesId][ + 'multiple_choices'], 'predicted answer is not one of the multiple choices' + qaAnn = self.qa[quesId] + ann['image_id'] = qaAnn['image_id'] + ann['question_type'] = qaAnn['question_type'] + ann['answer_type'] = qaAnn['answer_type'] + # print 'DONE (t=%0.2fs)'%((datetime.datetime.utcnow() - time_t).total_seconds()) + + res.dataset['annotations'] = anns + res.createIndex() + return res diff --git a/models/common/vqa_tools/VQA/README.md b/models/common/vqa_tools/VQA/README.md new file mode 100644 index 0000000..439d59d --- /dev/null +++ b/models/common/vqa_tools/VQA/README.md @@ -0,0 +1,80 @@ +Python API and Evaluation Code for v2.0 and v1.0 releases of the VQA dataset. +=================== +## VQA v2.0 release ## +This release consists of +- Real + - 82,783 MS COCO training images, 40,504 MS COCO validation images and 81,434 MS COCO testing images (images are obtained from [MS COCO website] (http://mscoco.org/dataset/#download)) + - 443,757 questions for training, 214,354 questions for validation and 447,793 questions for testing + - 4,437,570 answers for training and 2,143,540 answers for validation (10 per question) + +There is only one type of task +- Open-ended task + +## VQA v1.0 release ## +This release consists of +- Real + - 82,783 MS COCO training images, 40,504 MS COCO validation images and 81,434 MS COCO testing images (images are obtained from [MS COCO website] (http://mscoco.org/dataset/#download)) + - 248,349 questions for training, 121,512 questions for validation and 244,302 questions for testing (3 per image) + - 2,483,490 answers for training and 1,215,120 answers for validation (10 per question) +- Abstract + - 20,000 training images, 10,000 validation images and 20,000 MS COCO testing images + - 60,000 questions for training, 30,000 questions for validation and 60,000 questions for testing (3 per image) + - 600,000 answers for training and 300,000 answers for validation (10 per question) + +There are two types of tasks +- Open-ended task +- Multiple-choice task (18 choices per question) + +## Requirements ## +- python 2.7 +- scikit-image (visit [this page](http://scikit-image.org/docs/dev/install.html) for installation) +- matplotlib (visit [this page](http://matplotlib.org/users/installing.html) for installation) + +## Files ## +./Questions +- For v2.0, download the question files from the [VQA download page](http://www.visualqa.org/download.html), extract them and place in this folder. +- For v1.0, both real and abstract, question files can be found on the [VQA v1 download page](http://www.visualqa.org/vqa_v1_download.html). +- Question files from Beta v0.9 release (123,287 MSCOCO train and val images, 369,861 questions, 3,698,610 answers) can be found below + - [training question files](http://visualqa.org/data/mscoco/prev_rel/Beta_v0.9/Questions_Train_mscoco.zip) + - [validation question files](http://visualqa.org/data/mscoco/prev_rel/Beta_v0.9/Questions_Val_mscoco.zip) +- Question files from Beta v0.1 release (10k MSCOCO images, 30k questions, 300k answers) can be found [here](http://visualqa.org/data/mscoco/prev_rel/Beta_v0.1/Questions_Train_mscoco.zip). + +./Annotations +- For v2.0, download the annotations files from the [VQA download page](http://www.visualqa.org/download.html), extract them and place in this folder. +- For v1.0, for both real and abstract, annotation files can be found on the [VQA v1 download page](http://www.visualqa.org/vqa_v1_download.html). +- Annotation files from Beta v0.9 release (123,287 MSCOCO train and val images, 369,861 questions, 3,698,610 answers) can be found below + - [training annotation files](http://visualqa.org/data/mscoco/prev_rel/Beta_v0.9/Annotations_Train_mscoco.zip) + - [validation annotation files](http://visualqa.org/data/mscoco/prev_rel/Beta_v0.9/Annotations_Val_mscoco.zip) +- Annotation files from Beta v0.1 release (10k MSCOCO images, 30k questions, 300k answers) can be found [here](http://visualqa.org/data/mscoco/prev_rel/Beta_v0.1/Annotations_Train_mscoco.zip). + +./Images +- For real, create a directory with name mscoco inside this directory. For each of train, val and test, create directories with names train2014, val2014 and test2015 respectively inside mscoco directory, download respective images from [MS COCO website](http://mscoco.org/dataset/#download) and place them in respective folders. +- For abstract, create a directory with name abstract_v002 inside this directory. For each of train, val and test, create directories with names train2015, val2015 and test2015 respectively inside abstract_v002 directory, download respective images from [VQA download page](http://www.visualqa.org/download.html) and place them in respective folders. + +./PythonHelperTools +- This directory contains the Python API to read and visualize the VQA dataset +- vqaDemo.py (demo script) +- vqaTools (API to read and visualize data) + +./PythonEvaluationTools +- This directory contains the Python evaluation code +- vqaEvalDemo.py (evaluation demo script) +- vqaEvaluation (evaluation code) + +./Results +- OpenEnded_mscoco_train2014_fake_results.json (an example of a fake results file for v1.0 to run the demo) +- Visit [VQA evaluation page] (http://visualqa.org/evaluation) for more details. + +./QuestionTypes +- This directory contains the following lists of question types for both real and abstract questions (question types are unchanged from v1.0 to v2.0). In a list, if there are question types of length n+k and length n with the same first n words, then the question type of length n does not include questions that belong to the question type of length n+k. +- mscoco_question_types.txt +- abstract_v002_question_types.txt + +## References ## +- [VQA: Visual Question Answering](http://visualqa.org/) +- [Microsoft COCO](http://mscoco.org/) + +## Developers ## +- Aishwarya Agrawal (Virginia Tech) +- Code for API is based on [MSCOCO API code](https://github.com/pdollar/coco). +- The format of the code for evaluation is based on [MSCOCO evaluation code](https://github.com/tylin/coco-caption). diff --git a/models/common/vqa_tools/__init__.py b/models/common/vqa_tools/__init__.py new file mode 100644 index 0000000..9b98da8 --- /dev/null +++ b/models/common/vqa_tools/__init__.py @@ -0,0 +1,8 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +__author__ = "aagrawal" diff --git a/models/common/vqa_tools/aokvqa/LICENSE b/models/common/vqa_tools/aokvqa/LICENSE new file mode 100644 index 0000000..663d675 --- /dev/null +++ b/models/common/vqa_tools/aokvqa/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2022 Allen Institute for Artificial Intelligence + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/models/common/vqa_tools/aokvqa/README.md b/models/common/vqa_tools/aokvqa/README.md new file mode 100644 index 0000000..21caefa --- /dev/null +++ b/models/common/vqa_tools/aokvqa/README.md @@ -0,0 +1,207 @@ +# A-OKVQA + +Official repository for **A-OKVQA: A Benchmark for Visual Question Answering using World Knowledge**. + +Links: [[Paper]](https://arxiv.org/abs/2206.01718) [[Website]](http://a-okvqa.allenai.org) [[Leaderboard]](https://leaderboard.allenai.org/a-okvqa/submissions/public) + +### Abstract + +The Visual Question Answering (VQA) task aspires to provide a meaningful testbed for the development of AI models that can jointly reason over visual and natural language inputs. Despite a proliferation of VQA datasets, this goal is hindered by a set of common limitations. These include a reliance on relatively simplistic questions that are repetitive in both concepts and linguistic structure, little world knowledge needed outside of the paired image, and limited reasoning required to arrive at the correct answer. We introduce A-OKVQA, a crowdsourced dataset composed of a diverse set of about 25K questions requiring a broad base of commonsense and world knowledge to answer. In contrast to the existing knowledge-based VQA datasets, the questions generally cannot be answered by simply querying a knowledge base, and instead require some form of commonsense reasoning about the scene depicted in the image. We demonstrate the potential of this new dataset through a detailed analysis of its contents and baseline performance measurements over a variety of state-of-the-art vision–language models. + +![dataset_web](https://user-images.githubusercontent.com/28768645/170799740-f0d9ea60-6aff-4322-98d5-cae8e05983f4.svg) + +


+ +#### Table of Contents + +- [Getting started](#getting-started) + * [Downloading the dataset](#downloading-the-dataset) +- [Evaluation & Leaderboard](#evaluation) +- [Codebase](#codebase) + * [Preparing data](#preparing-data) + * [Models and Predictions](#models-and-predictions) + +
+ +## Getting started + +```bash +git clone --single-branch --recurse-submodules https://github.com/allenai/aokvqa.git + +cd aokvqa +export PYTHONPATH=. + +conda env create --name aokvqa +conda activate aokvqa +``` + +### Downloading the dataset + +```bash +export AOKVQA_DIR=./datasets/aokvqa/ +mkdir -p ${AOKVQA_DIR} + +curl -fsSL https://prior-datasets.s3.us-east-2.amazonaws.com/aokvqa/aokvqa_v1p0.tar.gz | tar xvz -C ${AOKVQA_DIR} +``` + +
Downloading COCO 2017 + +```bash +export COCO_DIR=./datasets/coco/ +mkdir -p ${COCO_DIR} + +for split in train val test; do + wget "http://images.cocodataset.org/zips/${split}2017.zip" + unzip "${split}2017.zip" -d ${COCO_DIR}; rm "${split}2017.zip" +done + +wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip +unzip annotations_trainval2017.zip -d ${COCO_DIR}; rm annotations_trainval2017.zip +``` + +
+ +Loading our dataset is easy! Just grab our [load_aokvqa.py](https://github.com/allenai/aokvqa/blob/main/load_aokvqa.py) file and refer to the following code. + +```python +import os +aokvqa_dir = os.getenv('AOKVQA_DIR') + +from load_aokvqa import load_aokvqa, get_coco_path +train_dataset = load_aokvqa(aokvqa_dir, 'train') # also 'val' or 'test' +``` + +
Example dataset entry + +```python +dataset_example = train_dataset[0] + +print(dataset_example['question_id']) +# 22MexNkBPpdZGX6sxbxVBH + +coco_dir = os.getenv('COCO_DIR') +image_path = get_coco_path('train', dataset_example['image_id'], coco_dir) +print(image_path) +# ./datasets/coco/train2017/000000299207.jpg + +print(dataset_example['question']) +print(dataset_example['choices']) +# What is the man by the bags awaiting? +# ['skateboarder', 'train', 'delivery', 'cab'] + +correct_choice = dataset_example['choices'][ dataset_example['correct_choice_idx'] ] +# Corrrect: cab + +print(dataset_example['rationales'][0]) +# A train would not be on the street, he would not have luggage waiting for a delivery, and the skateboarder is there and not paying attention to him so a cab is the only possible answer. +``` + +
+ +## Evaluation + +Please prepare `predictions_{split}.json` files (for `split: {val,test}`) in the format below. You may omit either `multiple_choice` or `direct_answer` field if you only want to evaluate one setting. + +```python +{ + '' : { + 'multiple_choice' : '', + 'direct_answer' : '' + } +} +``` + +You can run evaluation on the validation set as follows. + +```bash +python evaluation/eval_predictions.py --aokvqa-dir ${AOKVQA_DIR} --split val --preds ./predictions_val.json +``` + +### Leaderboard + +You may submit `predictions_test.json` to the [leaderboard](https://leaderboard.allenai.org/a-okvqa/submissions/get-started). + +## Codebase + +We provide all code and pretrained models necessary to replicate our experiments for Large-Scale Pretrained Models (sec. 5.2) and Rationale Generation (sec. 5.3). + +### Preparing data + +```bash +export FEATURES_DIR=./features/ +mkdir -p ${FEATURES_DIR} +``` + +You can compute CLIP features for our vocabulary and dataset. These are most commonly used by our other experiments. + +```bash +python data_scripts/encode_vocab_clip.py --vocab ${AOKVQA_DIR}/large_vocab_train.csv --model-type ViT-B/32 --out ${FEATURES_DIR}/clip-ViT-B-32_large_vocab.pt + +for split in train val test; do + python data_scripts/extract_clip_features.py --aokvqa-dir ${AOKVQA_DIR} --coco-dir ${COCO_DIR} --split ${split} --model-type ViT-B/32 --out ${FEATURES_DIR}/clip-ViT-B-32_${split}.pt +done +``` + +
For training ClipCap with a transformer mapping network + +If you want to train our ClipCap models with the transformer mapping network (instead of an MLP, like we do), you'll also need to run `extract_clip_features.py` with `--model-type RN50x4`. + +
+ +
For ResNet and BERT input features + +Our ResNet and BERT classification experiments require these respective features instead of CLIP. To generate these, please run the following commands: + +```bash +# ResNet +for split in train val test; do + python data_scripts/extract_resnet_features.py --aokvqa-dir ${AOKVQA_DIR} --coco-dir ${COCO_DIR} --split ${split} --out ${FEATURES_DIR}/resnet_${split}.pt +done + +# BERT +for split in train val test; do + python data_scripts/extract_bert_features.py --aokvqa-dir ${AOKVQA_DIR} --split ${split} --out ${FEATURES_DIR}/bert_${split}.pt +done +``` + +
+ +### Models and Predictions + +```bash +export LOG_DIR=./logs/ +export PREDS_DIR=./predictions/ +export PT_MODEL_DIR=./pretrained_models/ +mkdir -p ${LOG_DIR} ${PREDS_DIR} ${PT_MODEL_DIR} +``` + +
Download our pretrained model weights + +```bash +# Checkpoints for transfer learning experiments +curl -fsSL https://prior-model-weights.s3.us-east-2.amazonaws.com/aokvqa/transfer_exp_checkpoints.tar.gz | tar xvz -C ${PT_MODEL_DIR}/aokvqa_models + +# Checkpoints for ClipCap models (generating answers and rationales) +curl -fsSL https://prior-model-weights.s3.us-east-2.amazonaws.com/aokvqa/clipcap_checkpoints.tar.gz | tar xvz -C ${PT_MODEL_DIR}/aokvqa_models +``` + +
+ +We have included instructions for replicating each of our experiments (see README.md files below). + +All Python scripts should be run from the root of this repository. Please be sure to first run the installation and data preparation as directed above. + +- [Heuristics](./heuristics/README.md) +- [Transfer Learning Experiments](./transfer_experiments/README.md) +- [Querying GPT-3](./gpt3/README.md) +- [ClipCap](https://github.com/allenai/aokvqa/blob/ClipCap/README.md) +- [Generating Captions & Rationales](https://github.com/allenai/aokvqa/blob/ClipCap/README.md) + +For each experiment, we follow this prediction file naming scheme: `{model-name}_{split}-{setting}.json` (e.g. `random-weighted_val-mc.json` or `random-weighted_test-da.json`). As examples in these Readme files, we produce predictions on the validation set. + +We unify predictions for each split before evaluation. (You can omit one of `--mc` or `--da` prediction file if you only want to evaluate one setting.) + +```bash +python evaluation/prepare_predictions.py --aokvqa-dir ${AOKVQA_DIR} --split val --mc ./predictions_val-mc.json --da ./predictions_val-da.json --out ./predictions_val.json +# repeat for test split ... +``` diff --git a/models/common/vqa_tools/aokvqa/data_scripts/build_vocab.py b/models/common/vqa_tools/aokvqa/data_scripts/build_vocab.py new file mode 100644 index 0000000..2c44686 --- /dev/null +++ b/models/common/vqa_tools/aokvqa/data_scripts/build_vocab.py @@ -0,0 +1,45 @@ +import os +import argparse +from collections import Counter +import pathlib + +from load_aokvqa import load_aokvqa + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file') +args = parser.parse_args() + + +# Build vocab from train set: correct choices + (direct answers appearing in >= 3 ) + +train_set = load_aokvqa(args.aokvqa_dir, 'train') + +vocab = [] +all_choices = Counter() +direct_answers = Counter() + +for i in train_set: + vocab.append( i['choices'][i['correct_choice_idx']] ) + all_choices.update(i['choices']) + direct_answers.update(set(i['direct_answers'])) +vocab += [k for k,v in all_choices.items() if v >= 3] +vocab += [k for k,v in direct_answers.items() if v >= 3] + +vocab = sorted(set(vocab)) +print(f"Vocab size: {len(vocab)}") + +# Save vocabulary Output + +with open(args.output_file, 'w') as f: + for v in vocab: + print(v, file=f) + +## Check validation set coverage + +val_set = load_aokvqa(args.aokvqa_dir, 'val') + +val_acc = [v['choices'][v['correct_choice_idx']] in vocab for v in val_set] +val_acc = sum(val_acc) / len(val_acc) * 100 +print(f"Val set coverage: {val_acc:.2f}" ) diff --git a/models/common/vqa_tools/aokvqa/data_scripts/encode_vocab_clip.py b/models/common/vqa_tools/aokvqa/data_scripts/encode_vocab_clip.py new file mode 100644 index 0000000..1dce760 --- /dev/null +++ b/models/common/vqa_tools/aokvqa/data_scripts/encode_vocab_clip.py @@ -0,0 +1,26 @@ +import json +from tqdm import tqdm +import argparse +import pathlib + +import torch +import clip + +parser = argparse.ArgumentParser() +parser.add_argument('--vocab', type=pathlib.Path, required=True, dest='vocab_file') +parser.add_argument('--model-type', type=str, choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'], required=True, dest='model_type') +parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file') +args = parser.parse_args() + +assert args.output_file.suffix == '.pt' + +device = "cuda" if torch.cuda.is_available() else "cpu" +model, preprocess = clip.load(args.model_type, device=device) + +with torch.no_grad(): + a = open(args.vocab_file).read().splitlines() + mc_text = clip.tokenize(a).to(device) + mc_text_features = torch.stack([model.encode_text(mct.unsqueeze(0)).cpu() for mct in tqdm(mc_text)], dim=1)[0] + mc_text_features = mc_text_features.float() + model_name = args.model_type.replace('/', '-').replace('@', '-') + torch.save(mc_text_features, args.output_file) diff --git a/models/common/vqa_tools/aokvqa/data_scripts/extract_bert_features.py b/models/common/vqa_tools/aokvqa/data_scripts/extract_bert_features.py new file mode 100644 index 0000000..60cd40f --- /dev/null +++ b/models/common/vqa_tools/aokvqa/data_scripts/extract_bert_features.py @@ -0,0 +1,50 @@ +import os +import argparse +import pathlib +from tqdm import tqdm + +import torch +from transformers import AutoTokenizer, AutoModel + +from load_aokvqa import load_aokvqa + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) +parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file') +args = parser.parse_args() + +assert args.output_file.suffix == '.pt' + +## Load dataset + +dataset = load_aokvqa(args.aokvqa_dir, args.split) + +## Load model + +tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens') +model = AutoModel.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens') +device = "cuda" if torch.cuda.is_available() else "cpu" +model = model.to(device) +model.eval() + +def mean_pooling(model_output, attention_mask): + token_embeddings = model_output[0] # First element of model_output contains all token embeddings + input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() + return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) + +## Encoding loop + +with torch.no_grad(): + embeddings = {} + + for d in tqdm(dataset): + encoded_input = tokenizer([d['question']], padding=True, return_tensors='pt') + encoded_input = {k:v.to(device) for k,v in encoded_input.items()} + e = mean_pooling(model(**encoded_input), encoded_input['attention_mask']) + embeddings[d['question_id']] = { + 'question' : e[0].cpu() + } + + torch.save(embeddings, args.output_file) diff --git a/models/common/vqa_tools/aokvqa/data_scripts/extract_clip_features.py b/models/common/vqa_tools/aokvqa/data_scripts/extract_clip_features.py new file mode 100644 index 0000000..20d0455 --- /dev/null +++ b/models/common/vqa_tools/aokvqa/data_scripts/extract_clip_features.py @@ -0,0 +1,51 @@ +import os +from PIL import Image +from tqdm import tqdm +import argparse +import pathlib + +import torch +import clip + +from load_aokvqa import load_aokvqa, get_coco_path + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--coco-dir', type=pathlib.Path, required=True, dest='coco_dir') +parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) +parser.add_argument('--model-type', type=str, choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'], required=True, dest='model_type') +parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file') +args = parser.parse_args() + +assert args.output_file.suffix == '.pt' + +## Load dataset + +dataset = load_aokvqa(args.aokvqa_dir, args.split) + +## Load model + +device = "cuda" if torch.cuda.is_available() else "cpu" +model, preprocess = clip.load(args.model_type, device=device) + +## Encoding loop + +with torch.no_grad(): + embeddings = {} + + for d in tqdm(dataset): + q = d["question"] + q_text = clip.tokenize(q).to(device) + q_text_features = model.encode_text(q_text) + + img = Image.open(get_coco_path(args.split, d['image_id'], args.coco_dir)) + img = preprocess(img).unsqueeze(0).to(device) + image_features = model.encode_image(img) + + embeddings[d['question_id']] = { + 'question' : q_text_features[0].float().cpu(), + 'image' : image_features[0].float().cpu(), + } + + torch.save(embeddings, args.output_file) diff --git a/models/common/vqa_tools/aokvqa/data_scripts/extract_resnet_features.py b/models/common/vqa_tools/aokvqa/data_scripts/extract_resnet_features.py new file mode 100644 index 0000000..0d7277b --- /dev/null +++ b/models/common/vqa_tools/aokvqa/data_scripts/extract_resnet_features.py @@ -0,0 +1,62 @@ +import os +import argparse +import pathlib +from tqdm import tqdm +from PIL import Image + +import torch +import torch.nn as nn +from torchvision import models +from torchvision import transforms as T + +from load_aokvqa import load_aokvqa, get_coco_path + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--coco-dir', type=pathlib.Path, required=True, dest='coco_dir') +parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) +parser.add_argument('--out', type=pathlib.Path, required=True, dest='output_file') +args = parser.parse_args() + +assert args.output_file.suffix == '.pt' + +## Load dataset + +dataset = load_aokvqa(args.aokvqa_dir, args.split) + +## Load model + +resnet_preprocess = T.Compose([ + T.Resize(size=224, interpolation=T.InterpolationMode.BICUBIC), + T.CenterCrop(size=(224, 224)), + T.ToTensor(), + T.Normalize( + mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225] + ) +]) + +device = "cuda" if torch.cuda.is_available() else "cpu" + +resnet_model = models.resnet50(pretrained=True) +resnet_model = torch.nn.Sequential( + *list(resnet_model.children())[:-1], + nn.Flatten() +) # strip classification layer +resnet_model = resnet_model.to(device) + +## Encoding loop + +with torch.no_grad(): + embeddings = {} + + for d in tqdm(dataset): + img = Image.open(get_coco_path(args.split, d['image_id'], args.coco_dir)).convert('RGB') + resnet_input = resnet_preprocess(img).unsqueeze(0).to(device) + resnet_features = resnet_model(resnet_input) + embeddings[d['question_id']] = { + 'image' : resnet_features[0].cpu() + } + + torch.save(embeddings, args.output_file) diff --git a/models/common/vqa_tools/aokvqa/environment.yml b/models/common/vqa_tools/aokvqa/environment.yml new file mode 100644 index 0000000..58284ec --- /dev/null +++ b/models/common/vqa_tools/aokvqa/environment.yml @@ -0,0 +1,36 @@ +name: aokvqa +channels: + - pytorch + - nvidia + - huggingface + - conda-forge + - defaults +dependencies: + - python=3.7 + - cudatoolkit=11.3 + - numpy=1.21.6 + - pytorch=1.11.0 + - torchvision=0.12.0 + - pytorch-lightning=1.6.3 + - torchmetrics=0.8.1 + - gdown=4.4.0 + - pip=22.0.4 + - pip: + - argparse==1.4.0 + - Pillow==9.0.1 + - tensorboard==2.9.0 + - ftfy==6.1.1 + - regex==2022.3.15 + - tqdm==4.64.0 + - clip @ git+https://github.com/openai/CLIP.git@b46f5ac7587d2e1862f8b7b1573179d80dcdd620 + - openai==0.18.1 + - nltk==3.7 + - sacrebleu==2.0.0 + - sacremoses==0.0.53 + - sentence-transformers==2.2.0 + - datasets==2.1.0 + - tokenizers==0.10.3 + - transformers==4.10.3 + +# Next: resolve conflict between sentence-transfomers and pytorch-lightning +# pip uninstall sentencepiece diff --git a/models/common/vqa_tools/aokvqa/evaluation/eval_predictions.py b/models/common/vqa_tools/aokvqa/evaluation/eval_predictions.py new file mode 100644 index 0000000..a7b5dbe --- /dev/null +++ b/models/common/vqa_tools/aokvqa/evaluation/eval_predictions.py @@ -0,0 +1,97 @@ +import argparse +import pathlib +import json +import glob + +from load_aokvqa import load_aokvqa + + +def eval_aokvqa(dataset, preds, multiple_choice=False, strict=True): + + if isinstance(dataset, list): + dataset = { dataset[i]['question_id'] : dataset[i] for i in range(len(dataset)) } + + if multiple_choice is False: + dataset = {k:v for k,v in dataset.items() if v['difficult_direct_answer'] is False} + + if strict: + dataset_qids = set(dataset.keys()) + preds_qids = set(preds.keys()) + assert dataset_qids.issubset(preds_qids) + + # dataset = q_id (str) : dataset element (dict) + # preds = q_id (str) : prediction (str) + + acc = [] + + for q in dataset.keys(): + if q not in preds.keys(): + acc.append(0.0) + continue + + pred = preds[q] + choices = dataset[q]['choices'] + direct_answers = dataset[q]['direct_answers'] + + ## Multiple Choice setting + if multiple_choice: + if strict: + assert pred in choices, 'Prediction must be a valid choice' + correct_choice_idx = dataset[q]['correct_choice_idx'] + acc.append( float(pred == choices[correct_choice_idx]) ) + ## Direct Answer setting + else: + num_match = sum([pred.lower() == da.lower() for da in direct_answers]) + vqa_acc = min(1.0, num_match / 3.0) + acc.append(vqa_acc) + + acc = sum(acc) / len(acc) * 100 + + return acc + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') + parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) + parser.add_argument('--preds', type=str, required=True, dest='prediction_files') + args = parser.parse_args() + + dataset = load_aokvqa(args.aokvqa_dir, args.split) + + for prediction_file in glob.glob(args.prediction_files): + predictions = json.load(open(prediction_file, 'r')) + + # Multiple choice + + mc_predictions = {} + + for q in predictions.keys(): + if 'multiple_choice' in predictions[q].keys(): + mc_predictions[q] = predictions[q]['multiple_choice'] + + if mc_predictions != {}: + mc_acc = eval_aokvqa( + dataset, + mc_predictions, + multiple_choice=True, + strict=False + ) + print(prediction_file, 'MC', mc_acc) + + # Direct Answer + + da_predictions = {} + + for q in predictions.keys(): + if 'direct_answer' in predictions[q].keys(): + da_predictions[q] = predictions[q]['direct_answer'] + + if da_predictions != {}: + da_acc = eval_aokvqa( + dataset, + da_predictions, + multiple_choice=False, + strict=False + ) + print(prediction_file, 'DA', da_acc) diff --git a/models/common/vqa_tools/aokvqa/evaluation/load_aokvqa.py b/models/common/vqa_tools/aokvqa/evaluation/load_aokvqa.py new file mode 100644 index 0000000..3e3dd49 --- /dev/null +++ b/models/common/vqa_tools/aokvqa/evaluation/load_aokvqa.py @@ -0,0 +1,13 @@ +import os +import json + + +def load_aokvqa(aokvqa_dir, split, version='v1p0'): + assert split in ['train', 'val', 'test', 'test_w_ans'] + dataset = json.load(open( + os.path.join(aokvqa_dir, f"aokvqa_{version}_{split}.json") + )) + return dataset + +def get_coco_path(split, image_id, coco_dir): + return os.path.join(coco_dir, f"{split}2017", f"{image_id:012}.jpg") diff --git a/models/common/vqa_tools/aokvqa/evaluation/prepare_predictions.py b/models/common/vqa_tools/aokvqa/evaluation/prepare_predictions.py new file mode 100644 index 0000000..202f00c --- /dev/null +++ b/models/common/vqa_tools/aokvqa/evaluation/prepare_predictions.py @@ -0,0 +1,31 @@ +import argparse +import pathlib +import json + +from load_aokvqa import load_aokvqa + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') + parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) + parser.add_argument('--mc', type=argparse.FileType('r'), dest='mc_pred_file') + parser.add_argument('--da', type=argparse.FileType('r'), dest='da_pred_file') + parser.add_argument('--out', type=argparse.FileType('w'), dest='output_file') + args = parser.parse_args() + assert args.mc_pred_file or args.da_pred_file + + dataset = load_aokvqa(args.aokvqa_dir, args.split) + mc_preds = json.load(args.mc_pred_file) if args.mc_pred_file else None + da_preds = json.load(args.da_pred_file) if args.da_pred_file else None + predictions = {} + + for d in dataset: + q = d['question_id'] + predictions[q] = {} + if mc_preds and q in mc_preds.keys(): + predictions[q]['multiple_choice'] = mc_preds[q] + if da_preds and q in da_preds.keys(): + predictions[q]['direct_answer'] = da_preds[q] + + json.dump(predictions, args.output_file) diff --git a/models/common/vqa_tools/aokvqa/evaluation/remap_predictions.py b/models/common/vqa_tools/aokvqa/evaluation/remap_predictions.py new file mode 100644 index 0000000..40ba155 --- /dev/null +++ b/models/common/vqa_tools/aokvqa/evaluation/remap_predictions.py @@ -0,0 +1,44 @@ +import argparse +import pathlib +import json +from tqdm import tqdm + +from sentence_transformers import SentenceTransformer +from sentence_transformers.util import cos_sim + +from load_aokvqa import load_aokvqa + + +def map_to_choices(dataset, predictions, device='cpu'): + if isinstance(dataset, list): + dataset = { dataset[i]['question_id'] : dataset[i] for i in range(len(dataset)) } + + if all([p in dataset[q]['choices'] for q, p in predictions.items()]): + return predictions + + model = SentenceTransformer('sentence-transformers/average_word_embeddings_glove.6B.300d') + model.to(device) + for q in tqdm(predictions.keys()): + choices = dataset[q]['choices'] + if predictions[q] not in choices: + choice_embeddings = model.encode([predictions[q]] + choices, convert_to_tensor=True) + a_idx = cos_sim(choice_embeddings[0], choice_embeddings[1:]).argmax().item() + predictions[q] = choices[a_idx] + + return predictions + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') + parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) + parser.add_argument('--pred', type=argparse.FileType('r'), required=True, dest='prediction_file') + parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file') + args = parser.parse_args() + + + dataset = load_aokvqa(args.aokvqa_dir, args.split) + predictions = json.load(args.prediction_file) + predictions = map_to_choices(dataset, predictions) + + json.dump(predictions, args.output_file) diff --git a/models/common/vqa_tools/aokvqa/gpt3/README.md b/models/common/vqa_tools/aokvqa/gpt3/README.md new file mode 100644 index 0000000..fc1fd6b --- /dev/null +++ b/models/common/vqa_tools/aokvqa/gpt3/README.md @@ -0,0 +1,14 @@ +## Querying GPT-3 + +To follow our experiments which use GPT-3, you must have access to the [OpenAI API](https://openai.com/api/) (at cost). Please retrieve your [organization](https://beta.openai.com/account/org-settings) and [API](https://beta.openai.com/account/api-keys) keys and set them in your environment variables. + +```bash +export OPENAI_ORG=.... +export OPENAI_API_KEY=... +``` + +For producing predictions for both DA and MC settings, run: +```bash +python gpt3/query_gpt3.py --aokvqa-dir ${AOKVQA_DIR} --split val --out ${PREDS_DIR}/gpt3_val-da.json +python remap_predictions.py --aokvqa-dir ${AOKVQA_DIR} --split val --pred ${PREDS_DIR}/gpt3_val-da.json --out ${PREDS_DIR}/gpt3_val-mc.json +``` diff --git a/models/common/vqa_tools/aokvqa/gpt3/caption_inputs.py b/models/common/vqa_tools/aokvqa/gpt3/caption_inputs.py new file mode 100644 index 0000000..2117434 --- /dev/null +++ b/models/common/vqa_tools/aokvqa/gpt3/caption_inputs.py @@ -0,0 +1,23 @@ +import os +import json +import argparse +import pathlib + +from load_aokvqa import load_aokvqa + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--coco-dir', type=pathlib.Path, required=True, dest='coco_dir') +parser.add_argument('--split', type=str, choices=['train', 'val'], required=True) +parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file') +args = parser.parse_args() + +aokvqa_set = load_aokvqa(args.aokvqa_dir, args.split) + +coco_captions = json.load(open(os.path.join(args.coco_dir, 'annotations', f'captions_{args.split}2017.json')))['annotations'] +coco_captions = {c['image_id'] : c['caption'] for c in coco_captions} + +captions = { d['question_id'] : coco_captions[d['image_id']] for d in aokvqa_set } + +json.dump(captions, args.output_file) diff --git a/models/common/vqa_tools/aokvqa/gpt3/query_gpt3.py b/models/common/vqa_tools/aokvqa/gpt3/query_gpt3.py new file mode 100644 index 0000000..4a08900 --- /dev/null +++ b/models/common/vqa_tools/aokvqa/gpt3/query_gpt3.py @@ -0,0 +1,79 @@ +import os +import random +import json +from tqdm import tqdm +import argparse +import pathlib + +import openai +openai.organization = os.getenv('OPENAI_ORG') +openai.api_key = os.getenv('OPENAI_API_KEY') + +from load_aokvqa import load_aokvqa + + +random.seed(0) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') + parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) + parser.add_argument('--n', type=int, default=10, dest='num_examples') + parser.add_argument('--train-context', type=argparse.FileType('r'), dest='train_context_file') + parser.add_argument('--prefix', type=str, default='', dest='prompt_prefix') + parser.add_argument('--include-choices', action='store_true', dest='include_choices') + parser.add_argument('--context', type=argparse.FileType('r'), dest='context_file') + parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file') + args = parser.parse_args() + + + train_set = load_aokvqa(args.aokvqa_dir, 'train') + eval_set = load_aokvqa(args.aokvqa_dir, args.split) + + train_context = {} + context = {} + if args.context_file is not None: + train_context = json.load(args.train_context_file) + context = json.load(args.context_file) + + predictions = {} + + for d in tqdm(eval_set): + q = d['question_id'] + + prompt = args.prompt_prefix + for e in random.sample(train_set, args.num_examples): + prompt += prompt_element(e, + context=train_context.get(q, None), + include_choices=args.include_choices, + answer=True + ) + prompt += '\n\n' + + prompt += prompt_element(d, + context=context.get(q, None), + include_choices=args.include_choices, + answer=False + ) + + response = openai.Completion.create( + engine="text-curie-001", + prompt=prompt, + temperature=0.0, + max_tokens=10, + ) + + predictions[q] = response.choices[0].text.strip() + + json.dump(predictions, args.output_file) + + +def prompt_element(d, context=None, include_choices=False, answer=False): + return (f"Context: {context}\n" if context is not None else '') + \ + f"Q: {d['question']}\n" + \ + (f"Choices: {', '.join(d['choices'])}.\n" if include_choices else '') + \ + f"A:" + (f" {d['choices'][d['correct_choice_idx']]}" if answer else '') + +if __name__ == '__main__': + main() diff --git a/models/common/vqa_tools/aokvqa/gpt3/rationale_inputs.py b/models/common/vqa_tools/aokvqa/gpt3/rationale_inputs.py new file mode 100644 index 0000000..411d1ee --- /dev/null +++ b/models/common/vqa_tools/aokvqa/gpt3/rationale_inputs.py @@ -0,0 +1,16 @@ +import json +import argparse +import pathlib + +from load_aokvqa import load_aokvqa + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--split', type=str, choices=['train', 'val', 'test_w_ans'], required=True) +parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file') +args = parser.parse_args() + +aokvqa_set = load_aokvqa(args.aokvqa_dir, args.split) +rationales = {d['question_id'] : d['rationales'][0] for d in aokvqa_set} +json.dump(rationales, args.output_file) diff --git a/models/common/vqa_tools/aokvqa/heuristics/README.md b/models/common/vqa_tools/aokvqa/heuristics/README.md new file mode 100644 index 0000000..67c8632 --- /dev/null +++ b/models/common/vqa_tools/aokvqa/heuristics/README.md @@ -0,0 +1,11 @@ +## Heuristics + +```bash +# These scripts accept the same arguments. +# heuristics/random_unweighted.py +# heuristics/random_weighted.py +# heuristics/most_common_answer.py + +python heuristics/random_unweighted.py --aokvqa-dir ${AOKVQA_DIR} --split val --mc --out ${PREDS_DIR}/random-unweighted_val-mc.json +# Exclude --mc for the direct answer setting +``` diff --git a/models/common/vqa_tools/aokvqa/heuristics/most_common_answer.py b/models/common/vqa_tools/aokvqa/heuristics/most_common_answer.py new file mode 100644 index 0000000..59a27bc --- /dev/null +++ b/models/common/vqa_tools/aokvqa/heuristics/most_common_answer.py @@ -0,0 +1,39 @@ +import os +import json +import argparse +import pathlib +from collections import Counter + +from load_aokvqa import load_aokvqa + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) +parser.add_argument('--mc', action='store_true', dest='multiple_choice') +parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file') +args = parser.parse_args() + + +train_set = load_aokvqa(args.aokvqa_dir, 'train') +train_freq = dict(Counter( + [d['choices'][d['correct_choice_idx']] for d in train_set] +)) +most_common_answer = max(train_freq.keys(), key=train_freq.get) + +## + +eval_set = load_aokvqa(args.aokvqa_dir, args.split) + +predictions = {} + +for d in eval_set: + q = d['question_id'] + predictions[q] = most_common_answer + + if args.multiple_choice: + choices = [c for c in d['choices'] if c in train_freq.keys()] + if len(choices) > 0: + predictions[q] = max(choices, key=train_freq.get) + +json.dump(predictions, args.output_file) diff --git a/models/common/vqa_tools/aokvqa/heuristics/random_unweighted.py b/models/common/vqa_tools/aokvqa/heuristics/random_unweighted.py new file mode 100644 index 0000000..cfcf900 --- /dev/null +++ b/models/common/vqa_tools/aokvqa/heuristics/random_unweighted.py @@ -0,0 +1,38 @@ +import os +import json +from random import seed, sample +import argparse +import pathlib + +from load_aokvqa import load_aokvqa + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) +parser.add_argument('--mc', action='store_true', dest='multiple_choice') +parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file') +args = parser.parse_args() + +seed(0) + +train_set = load_aokvqa(args.aokvqa_dir, 'train') + +if args.multiple_choice is False: + choices = list(set( + [d['choices'][d['correct_choice_idx']] for d in train_set] + )) + +## + +predictions = {} + +eval_set = load_aokvqa(args.aokvqa_dir, args.split) + +for d in eval_set: + q = d['question_id'] + if args.multiple_choice: + choices = d['choices'] + predictions[q] = sample(choices, 1)[0] + +json.dump(predictions, args.output_file) diff --git a/models/common/vqa_tools/aokvqa/heuristics/random_weighted.py b/models/common/vqa_tools/aokvqa/heuristics/random_weighted.py new file mode 100644 index 0000000..2ccfa61 --- /dev/null +++ b/models/common/vqa_tools/aokvqa/heuristics/random_weighted.py @@ -0,0 +1,46 @@ +import os +import json +import numpy as np +import argparse +import pathlib +from collections import Counter + +from load_aokvqa import load_aokvqa + + +parser = argparse.ArgumentParser() +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) +parser.add_argument('--mc', action='store_true', dest='multiple_choice') +parser.add_argument('--out', type=argparse.FileType('w'), required=True, dest='output_file') +args = parser.parse_args() + +np.random.seed(0) + +train_set = load_aokvqa(args.aokvqa_dir, 'train') +train_freq = dict(Counter( + [d['choices'][d['correct_choice_idx']] for d in train_set] +)) + +if args.multiple_choice is False: + choices = list(train_freq.keys()) + probs = [f / len(train_set) for f in train_freq.values()] + +## + +predictions = {} + +eval_set = load_aokvqa(args.aokvqa_dir, args.split) + +for d in eval_set: + if args.multiple_choice: + choices = d['choices'] + probs = [train_freq.get(c, 0) for c in choices] + if probs == [0, 0, 0, 0]: + probs = [1, 1, 1, 1] + probs = [p / sum(probs) for p in probs] + + q = d['question_id'] + predictions[q] = np.random.choice(choices, size=1, p=probs)[0] + +json.dump(predictions, args.output_file) diff --git a/models/common/vqa_tools/aokvqa/load_aokvqa.py b/models/common/vqa_tools/aokvqa/load_aokvqa.py new file mode 100644 index 0000000..3e3dd49 --- /dev/null +++ b/models/common/vqa_tools/aokvqa/load_aokvqa.py @@ -0,0 +1,13 @@ +import os +import json + + +def load_aokvqa(aokvqa_dir, split, version='v1p0'): + assert split in ['train', 'val', 'test', 'test_w_ans'] + dataset = json.load(open( + os.path.join(aokvqa_dir, f"aokvqa_{version}_{split}.json") + )) + return dataset + +def get_coco_path(split, image_id, coco_dir): + return os.path.join(coco_dir, f"{split}2017", f"{image_id:012}.jpg") diff --git a/models/common/vqa_tools/aokvqa/transfer_experiments/README.md b/models/common/vqa_tools/aokvqa/transfer_experiments/README.md new file mode 100644 index 0000000..dc5138d --- /dev/null +++ b/models/common/vqa_tools/aokvqa/transfer_experiments/README.md @@ -0,0 +1,41 @@ +## Transfer Learning Experiments + +We use the following training/prediction scripts for the classifier, zero-shot, and contrastive experiments in Table 3. + +```bash +## Training +python transfer_experiments/train.py --aokvqa-dir ${AOKVQA_DIR} --vocab ${AOKVQA_DIR}/large_vocab_train.csv --log-dir ${LOG_DIR} + +--backbone clip --clip-model-type ViT-B/32 --train-features ${FEATURES_DIR}/clip-ViT-B-32_train.pt --val-features ${FEATURES_DIR}/clip-ViT-B-32_val.pt +--inputs question # OR --inputs image # OR --inputs question image +# OR +--backbone resnet --train-features ${FEATURES_DIR}/resnet_train.pt --val-features ${FEATURES_DIR}/resnet_val.pt --inputs image +# OR +--backbone bert --train-features ${FEATURES_DIR}/bert_train.pt --val-features ${FEATURES_DIR}/bert_val.pt --inputs question + +--objective classifier +# OR +--objective contrastive --vocab-features ${FEATURE_DIR}/clip-ViT-B-32_large_vocab.pt +``` + +You can make predictions for CLIP zero-shot or from a classifier/contrastive checkpoint trained above. + +```bash +## Predicting +python transfer_experiments/predict.py --aokvqa-dir ${AOKVQA_DIR} --out ${PREDS_DIR}/clip-classifier_val-mc.json + +--split val # or test +--features ${FEATURE_DIR}/clip-ViT-B-32_val.pt # adjust for backbone and eval split + +--ckpt path/to/model.ckpt +# OR +--zero-shot --clip-model-type ViT-B/32 +--inputs question # OR --inputs image # OR --inputs question image + +--mc # Multiple-choice. Exclude for direct-answer. + +# IF classifier OR direct-answer +--vocab ${AOKVQA_DIR}/large_vocab_train.csv +# IF contrastive/zero-shot AND direct-answer +--vocab-features ${FEATURES_DIR}/clip-ViT-B-32_large_vocab.pt +``` diff --git a/models/common/vqa_tools/aokvqa/transfer_experiments/predict.py b/models/common/vqa_tools/aokvqa/transfer_experiments/predict.py new file mode 100644 index 0000000..d2fbb42 --- /dev/null +++ b/models/common/vqa_tools/aokvqa/transfer_experiments/predict.py @@ -0,0 +1,126 @@ +import sys +import os +import argparse +import pathlib +from tqdm import tqdm +import json + +import torch +import torch.nn as nn + +# https://github.com/PyTorchLightning/pytorch-lightning/issues/11663 +import sentencepiece; import pytorch_lightning as pl; import clip + +from transfer_experiments.train import LinearClassifier +from load_aokvqa import load_aokvqa +from evaluation.remap_predictions import map_to_choices + + +parser = argparse.ArgumentParser() +parser.add_argument('--split', type=str, choices=['train', 'val', 'test'], required=True) +parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') +parser.add_argument('--features', type=pathlib.Path, required=True) +parser.add_argument('--out', type=argparse.FileType('w'), dest='output_file') +# +parser_weights = parser.add_mutually_exclusive_group(required=True) + +parser_weights.add_argument('--ckpt', type=pathlib.Path, dest='checkpoint_path') + +parser_weights.add_argument('--zero-shot', action='store_true', dest='clip_zero_shot') +parser.add_argument('--inputs', nargs='+', type=str, choices=['question', 'image'], required=('--zero-shot' in sys.argv)) +# +parser.add_argument('--vocab', type=argparse.FileType('r')) +parser.add_argument('--vocab-features', type=pathlib.Path, dest='vocab_features') +parser.add_argument('--mc', action='store_true', dest='multiple_choice') + +parser.add_argument('--clip-model-type', type=str, + choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'], + dest='clip_model_type', required=('--zero-shot' in sys.argv and '--mc' in sys.argv)) +# +args = parser.parse_args() + + +## Load dataset + +aokvqa_set = load_aokvqa(args.aokvqa_dir, args.split) + +## Load models + +device = "cuda" if torch.cuda.is_available() else "cpu" + +if args.checkpoint_path is not None: + classifier = LinearClassifier.load_from_checkpoint(args.checkpoint_path) + classifier.to(device) + hp = classifier.hparams +elif args.clip_zero_shot: + classifier = nn.Identity().to(device) + hp = pl.utilities.AttributeDict(backbone='clip', clip_model_type=args.clip_model_type, objective='zero-shot', inputs=args.inputs) + +# Load input features + +embeddings = torch.load(args.features) +if hp.backbone == 'clip': + for q in embeddings.keys(): + embeddings[q]['question'] = embeddings[q]['question'] / embeddings[q]['question'].norm(dim=-1, keepdim=True) + embeddings[q]['image'] = embeddings[q]['image'] / embeddings[q]['image'].norm(dim=-1, keepdim=True) + +# Load vocab, vocab features, clip + +if (hp.objective == 'classifier') or \ + (hp.objective in ['contrastive', 'zero-shot'] and args.multiple_choice is False): + vocab = args.vocab.read().splitlines() + +if hp.objective in ['contrastive', 'zero-shot']: + if args.multiple_choice is False: + vocab_features = torch.load(args.vocab_features).cpu() + vocab_features /= vocab_features.norm(dim=-1, keepdim=True) + else: + clip_model = clip.load(hp.clip_model_type, device=device)[0] + logit_scale = clip_model.logit_scale.exp().cpu() + +## Prediction loop + +predictions = {} + +with torch.no_grad(): + for o in tqdm(aokvqa_set): + q = o['question_id'] + + # Load input embedding (from question / image) + if hp.objective == 'zero-shot' and ('question' in hp.inputs and 'image' in hp.inputs): + e = embeddings[q]['question'] + embeddings[q]['image'] + elif 'question' in hp.inputs and 'image' in hp.inputs: + e = torch.cat((embeddings[q]['question'], embeddings[q]['image'])) + elif 'question' in hp.inputs: + e = embeddings[q]['question'] + elif 'image' in hp.inputs: + e = embeddings[q]['image'] + + # Pass inputs through model + e = e.unsqueeze(0).to(device) + x = classifier(e)[0].cpu() + + # Predict + if hp.objective in ['contrastive', 'zero-shot']: + if args.multiple_choice: + vocab = o['choices'] + # Encode choices + vocab_features = clip.tokenize(vocab).to(device) + vocab_features = torch.stack([ + clip_model.encode_text(v.unsqueeze(0)) for v in vocab_features + ], dim=1)[0] + vocab_features /= vocab_features.norm(dim=-1, keepdim=True) + vocab_features = vocab_features.float().cpu() + + x = logit_scale * x @ vocab_features.t() + x = x.softmax(dim=-1) + + predictions[q] = vocab[x.argmax().item()] + +## Save and evaluate predictions + +# Map prediction to nearest neighbor choice (by word embeddings) +if args.multiple_choice and hp.objective == 'classifier': + predictions = map_to_choices(aokvqa_set, predictions) + +json.dump(predictions, args.output_file) diff --git a/models/common/vqa_tools/aokvqa/transfer_experiments/train.py b/models/common/vqa_tools/aokvqa/transfer_experiments/train.py new file mode 100644 index 0000000..ac48b5a --- /dev/null +++ b/models/common/vqa_tools/aokvqa/transfer_experiments/train.py @@ -0,0 +1,263 @@ +import os +import sys +import json +import argparse +import pathlib +import random + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import Dataset, DataLoader + +# https://github.com/PyTorchLightning/pytorch-lightning/issues/11663 +import sentencepiece; import pytorch_lightning as pl + +import torchmetrics.functional as MF + +from load_aokvqa import load_aokvqa + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--aokvqa-dir', type=pathlib.Path, required=True, dest='aokvqa_dir') + parser.add_argument('--vocab', type=argparse.FileType('r'), required=True) + parser.add_argument('--log-dir', type=pathlib.Path, dest='log_dir', required=True) + # + parser.add_argument('--backbone', type=str, choices=['clip', 'resnet', 'bert'], required=True) + parser.add_argument('--clip-model-type', type=str, + choices=['RN50', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'], + dest='clip_model_type', required=('clip' in sys.argv)) + parser.add_argument('--train-features', type=pathlib.Path, required=True, dest='train_features') + parser.add_argument('--val-features', type=pathlib.Path, required=True, dest='val_features') + parser.add_argument('--vocab-features', type=pathlib.Path, required=('contrastive' in sys.argv), dest='vocab_features') + # + parser.add_argument('--objective', type=str, choices=['classifier', 'contrastive'], required=True) + parser.add_argument('--inputs', nargs='+', type=str, choices=['question', 'image'], required=True) + # Defaults + parser.add_argument('--bs', type=int, default=128, dest='batch_size') + parser.add_argument('--lr', type=float, default=0.01) + parser.add_argument('--epochs', type=int, default=500) + parser.add_argument('--gpus', type=int, default=1) + args = parser.parse_args() + + pl.seed_everything(1) + vocab = args.vocab.read().splitlines() + + ## Data loading + + dm = AokvqaEmbeddingsDataModule( + args.aokvqa_dir, + args.train_features, + args.val_features, + args.objective, + args.backbone, + args.inputs, + vocab, + args.vocab_features, + batch_size=args.batch_size, + num_workers=16 + ) + + ## Model definition + + model = LinearClassifier( + args.objective, + args.backbone, + args.clip_model_type, + args.inputs, + len(vocab), + args.lr + ) + + ## Training and testing loops + + logger = pl.loggers.TensorBoardLogger( + args.log_dir, + name=f'{args.backbone}-{args.objective}', + version=f"inputs:{'+'.join(args.inputs)}" + ) + + trainer = pl.Trainer( + logger=logger, + gpus=args.gpus, + max_epochs=args.epochs, + callbacks=[ + pl.callbacks.ModelCheckpoint( + monitor="val_acc", + filename="{epoch:02d}-{val_acc:.2f}", + mode="max" + ) + ], + ) + + trainer.fit(model, dm) + + +class AokvqaEmbeddingsDataset(Dataset): + def __init__(self, aokvqa_dir, split, input_features, objective, backbone, inputs, vocab, vocab_features): + + aokvqa_set = load_aokvqa(aokvqa_dir, split) + + assert ( backbone == 'resnet' and inputs == ['image'] and objective == 'classifier' ) \ + or ( backbone == 'bert' and inputs == ['question'] and objective == 'classifier' ) \ + or ( backbone == 'clip' ) + + embeddings = torch.load(input_features) + if backbone == 'clip': + for q in embeddings.keys(): + embeddings[q]['question'] /= embeddings[q]['question'].norm(dim=-1, keepdim=True) + embeddings[q]['image'] /= embeddings[q]['image'].norm(dim=-1, keepdim=True) + if objective == 'contrastive': + vocab_embeddings = torch.load(vocab_features) + vocab_embeddings /= vocab_embeddings.norm(dim=-1, keepdim=True) + + self.objective = objective + self.vocab_len = len(vocab) + + self.embeddings = [] + self.answers = [] + + for o in aokvqa_set: + correct_answers = set([o['choices'][o['correct_choice_idx']]] + o['direct_answers']) + correct_answers = [vocab.index(a) for a in correct_answers if a in vocab] + if self.objective == 'contrastive': + correct_answers = [vocab_embeddings[a] for a in correct_answers] + if len(correct_answers) == 0: continue + self.answers.append(correct_answers) + + q = o['question_id'] + if 'question' in inputs and 'image' in inputs: + e = torch.cat((embeddings[q]['question'], embeddings[q]['image'])) + elif 'question' in inputs and 'image' not in inputs: + e = embeddings[q]['question'] + elif 'question' not in inputs and 'image' in inputs: + e = embeddings[q]['image'] + self.embeddings.append(e) + + def __getitem__(self, index): + e = self.embeddings[index] + a = self.answers[index] + if self.objective == 'classifier': + a = torch.sum(F.one_hot(torch.tensor(a), num_classes=self.vocab_len), dim=0) + elif self.objective == 'contrastive': + a = random.sample(a, 1)[0] + return e, a + + def __len__(self): + return len(self.embeddings) + + +class AokvqaEmbeddingsDataModule(pl.LightningDataModule): + + def __init__(self, aokvqa_dir, train_features, val_features, objective, backbone, inputs, vocab, vocab_features, batch_size=1, num_workers=0): + super().__init__() + self.aokvqa_dir = aokvqa_dir + self.train_features = train_features + self.val_features = val_features + self.objective = objective + self.backbone = backbone + self.inputs = inputs + self.vocab = vocab + self.vocab_features = vocab_features + self.batch_size = batch_size + self.num_workers = num_workers + + def setup(self, stage=None): + self.train_dataset = AokvqaEmbeddingsDataset( + self.aokvqa_dir, 'train', self.train_features, self.objective, + self.backbone, self.inputs, self.vocab, self.vocab_features + ) + self.val_dataset = AokvqaEmbeddingsDataset( + self.aokvqa_dir, 'val', self.val_features, self.objective, + self.backbone, self.inputs, self.vocab, self.vocab_features + ) + + def train_dataloader(self): + return DataLoader( + self.train_dataset, batch_size=self.batch_size, shuffle=True, + num_workers=int(0.8 * self.num_workers) + ) + + def val_dataloader(self): + return DataLoader( + self.val_dataset, batch_size=self.batch_size, shuffle=False, + num_workers=int(0.2 * self.num_workers) + ) + + +class LinearClassifier(pl.LightningModule): + def __init__(self, objective, backbone, clip_model_type, inputs, vocab_len, lr=0.001): + super().__init__() + self.save_hyperparameters(ignore=['lr']) + self.lr = lr + + if self.hparams.backbone == 'clip': + clip_dim = { + 'RN50' : 1024, + 'RN50x4' : 640, + 'RN50x16' : 768, + 'RN50x64' : 1024, + 'RN101' : 512, + 'ViT-B/32' : 512, + 'ViT-B/16' : 512, + 'ViT-L/14' : 768, + 'ViT-L/14@336px' : 768, + }[clip_model_type] + emb_dim = clip_dim * len(inputs) + elif self.hparams.backbone == 'resnet': + emb_dim = 2048 + elif self.hparams.backbone == 'bert': + emb_dim = 768 + + if self.hparams.objective == 'classifier': + out_dim = vocab_len + elif self.hparams.objective == 'contrastive': + out_dim = clip_dim + + self.linear = nn.Linear(emb_dim, out_dim) + + def forward(self, x): + x = self.linear(x) + if self.hparams.objective == 'classifier': + x = torch.sigmoid(x) + return x + + def compute_loss(self, batch): + x, y = batch + + y_pred = self.forward(x) + + if self.hparams.objective == 'classifier': + loss = F.binary_cross_entropy(y_pred, y.float()) + elif self.hparams.objective == 'contrastive': + indices = torch.arange(0, x.shape[0], dtype=torch.int64, device=self.device) + sim = (y_pred @ y.T).softmax(dim=-1) + loss = F.cross_entropy(sim, indices) + + if self.hparams.objective == 'classifier': + acc = MF.f1_score(y_pred, y) + elif self.hparams.objective == 'contrastive': + acc = torch.mean(sim[indices, indices]) + + return loss, acc + + def training_step(self, batch, batch_idx): + loss, acc = self.compute_loss(batch) + self.log("train_loss", loss) + self.log("train_acc", acc) + return loss + + def validation_step(self, batch, batch_idx): + loss, acc = self.compute_loss(batch) + self.log("val_loss", loss) + self.log("val_acc", acc) + return loss + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=self.lr) + return optimizer + + +if __name__ == '__main__': + main() diff --git a/models/common/vqa_tools/vqa.py b/models/common/vqa_tools/vqa.py new file mode 100644 index 0000000..a386b90 --- /dev/null +++ b/models/common/vqa_tools/vqa.py @@ -0,0 +1,211 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +__author__ = "aagrawal" +__version__ = "0.9" + +# Interface for accessing the VQA dataset. + +# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link: +# (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py). + +# The following functions are defined: +# VQA - VQA class that loads VQA annotation file and prepares data structures. +# getQuesIds - Get question ids that satisfy given filter conditions. +# getImgIds - Get image ids that satisfy given filter conditions. +# loadQA - Load questions and answers with the specified question ids. +# showQA - Display the specified questions and answers. +# loadRes - Load result file and create result object. + +# Help on each function can be accessed by: "help(COCO.function)" + +import json +import datetime +import copy + + +class VQA: + def __init__(self, annotation_file=None, question_file=None): + """ + Constructor of VQA helper class for reading and visualizing questions and answers. + :param annotation_file (str): location of VQA annotation file + :return: + """ + # load dataset + self.dataset = {} + self.questions = {} + self.qa = {} + self.qqa = {} + self.imgToQA = {} + if not annotation_file == None and not question_file == None: + print("loading VQA annotations and questions into memory...") + time_t = datetime.datetime.utcnow() + dataset = json.load(open(annotation_file, "r")) + questions = json.load(open(question_file, "r")) + self.dataset = dataset + self.questions = questions + self.createIndex() + + def createIndex(self): + # create index + print("creating index...") + imgToQA = {ann["image_id"]: [] for ann in self.dataset["annotations"]} + qa = {ann["question_id"]: [] for ann in self.dataset["annotations"]} + qqa = {ann["question_id"]: [] for ann in self.dataset["annotations"]} + for ann in self.dataset["annotations"]: + imgToQA[ann["image_id"]] += [ann] + qa[ann["question_id"]] = ann + for ques in self.questions["questions"]: + qqa[ques["question_id"]] = ques + print("index created!") + + # create class members + self.qa = qa + self.qqa = qqa + self.imgToQA = imgToQA + + def info(self): + """ + Print information about the VQA annotation file. + :return: + """ + for key, value in self.datset["info"].items(): + print("%s: %s" % (key, value)) + + def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]): + """ + Get question ids that satisfy given filter conditions. default skips that filter + :param imgIds (int array) : get question ids for given imgs + quesTypes (str array) : get question ids for given question types + ansTypes (str array) : get question ids for given answer types + :return: ids (int array) : integer array of question ids + """ + imgIds = imgIds if type(imgIds) == list else [imgIds] + quesTypes = quesTypes if type(quesTypes) == list else [quesTypes] + ansTypes = ansTypes if type(ansTypes) == list else [ansTypes] + + if len(imgIds) == len(quesTypes) == len(ansTypes) == 0: + anns = self.dataset["annotations"] + else: + if not len(imgIds) == 0: + anns = sum( + [self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA], + [], + ) + else: + anns = self.dataset["annotations"] + anns = ( + anns + if len(quesTypes) == 0 + else [ann for ann in anns if ann["question_type"] in quesTypes] + ) + anns = ( + anns + if len(ansTypes) == 0 + else [ann for ann in anns if ann["answer_type"] in ansTypes] + ) + ids = [ann["question_id"] for ann in anns] + return ids + + def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]): + """ + Get image ids that satisfy given filter conditions. default skips that filter + :param quesIds (int array) : get image ids for given question ids + quesTypes (str array) : get image ids for given question types + ansTypes (str array) : get image ids for given answer types + :return: ids (int array) : integer array of image ids + """ + quesIds = quesIds if type(quesIds) == list else [quesIds] + quesTypes = quesTypes if type(quesTypes) == list else [quesTypes] + ansTypes = ansTypes if type(ansTypes) == list else [ansTypes] + + if len(quesIds) == len(quesTypes) == len(ansTypes) == 0: + anns = self.dataset["annotations"] + else: + if not len(quesIds) == 0: + anns = sum( + [self.qa[quesId] for quesId in quesIds if quesId in self.qa], [] + ) + else: + anns = self.dataset["annotations"] + anns = ( + anns + if len(quesTypes) == 0 + else [ann for ann in anns if ann["question_type"] in quesTypes] + ) + anns = ( + anns + if len(ansTypes) == 0 + else [ann for ann in anns if ann["answer_type"] in ansTypes] + ) + ids = [ann["image_id"] for ann in anns] + return ids + + def loadQA(self, ids=[]): + """ + Load questions and answers with the specified question ids. + :param ids (int array) : integer ids specifying question ids + :return: qa (object array) : loaded qa objects + """ + if type(ids) == list: + return [self.qa[id] for id in ids] + elif type(ids) == int: + return [self.qa[ids]] + + def showQA(self, anns): + """ + Display the specified annotations. + :param anns (array of object): annotations to display + :return: None + """ + if len(anns) == 0: + return 0 + for ann in anns: + quesId = ann["question_id"] + print("Question: %s" % (self.qqa[quesId]["question"])) + for ans in ann["answers"]: + print("Answer %d: %s" % (ans["answer_id"], ans["answer"])) + + def loadRes(self, resFile, quesFile): + """ + Load result file and return a result object. + :param resFile (str) : file name of result file + :return: res (obj) : result api object + """ + res = VQA() + res.questions = json.load(open(quesFile)) + res.dataset["info"] = copy.deepcopy(self.questions["info"]) + res.dataset["task_type"] = copy.deepcopy(self.questions["task_type"]) + res.dataset["data_type"] = copy.deepcopy(self.questions["data_type"]) + res.dataset["data_subtype"] = copy.deepcopy(self.questions["data_subtype"]) + res.dataset["license"] = copy.deepcopy(self.questions["license"]) + + print("Loading and preparing results... ") + time_t = datetime.datetime.utcnow() + anns = json.load(open(resFile)) + assert type(anns) == list, "results is not an array of objects" + annsQuesIds = [ann["question_id"] for ann in anns] + assert set(annsQuesIds) == set( + self.getQuesIds() + ), "Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file." + for ann in anns: + quesId = ann["question_id"] + if res.dataset["task_type"] == "Multiple Choice": + assert ( + ann["answer"] in self.qqa[quesId]["multiple_choices"] + ), "predicted answer is not one of the multiple choices" + qaAnn = self.qa[quesId] + ann["image_id"] = qaAnn["image_id"] + ann["question_type"] = qaAnn["question_type"] + ann["answer_type"] = qaAnn["answer_type"] + print( + "DONE (t=%0.2fs)" % ((datetime.datetime.utcnow() - time_t).total_seconds()) + ) + + res.dataset["annotations"] = anns + res.createIndex() + return res diff --git a/models/common/vqa_tools/vqa_eval.py b/models/common/vqa_tools/vqa_eval.py new file mode 100644 index 0000000..ee808b3 --- /dev/null +++ b/models/common/vqa_tools/vqa_eval.py @@ -0,0 +1,324 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +# coding=utf-8 + +__author__ = "aagrawal" + +# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link: +# (https://github.com/tylin/coco-caption/blob/master/pycocoevalcap/eval.py). +import sys +import re + + +class VQAEval: + def __init__(self, vqa=None, vqaRes=None, n=2): + self.n = n + self.accuracy = {} + self.evalQA = {} + self.evalQuesType = {} + self.evalAnsType = {} + self.vqa = vqa + self.vqaRes = vqaRes + if vqa is not None: + self.params = {"question_id": vqa.getQuesIds()} + self.contractions = { + "aint": "ain't", + "arent": "aren't", + "cant": "can't", + "couldve": "could've", + "couldnt": "couldn't", + "couldn'tve": "couldn't've", + "couldnt've": "couldn't've", + "didnt": "didn't", + "doesnt": "doesn't", + "dont": "don't", + "hadnt": "hadn't", + "hadnt've": "hadn't've", + "hadn'tve": "hadn't've", + "hasnt": "hasn't", + "havent": "haven't", + "hed": "he'd", + "hed've": "he'd've", + "he'dve": "he'd've", + "hes": "he's", + "howd": "how'd", + "howll": "how'll", + "hows": "how's", + "Id've": "I'd've", + "I'dve": "I'd've", + "Im": "I'm", + "Ive": "I've", + "isnt": "isn't", + "itd": "it'd", + "itd've": "it'd've", + "it'dve": "it'd've", + "itll": "it'll", + "let's": "let's", + "maam": "ma'am", + "mightnt": "mightn't", + "mightnt've": "mightn't've", + "mightn'tve": "mightn't've", + "mightve": "might've", + "mustnt": "mustn't", + "mustve": "must've", + "neednt": "needn't", + "notve": "not've", + "oclock": "o'clock", + "oughtnt": "oughtn't", + "ow's'at": "'ow's'at", + "'ows'at": "'ow's'at", + "'ow'sat": "'ow's'at", + "shant": "shan't", + "shed've": "she'd've", + "she'dve": "she'd've", + "she's": "she's", + "shouldve": "should've", + "shouldnt": "shouldn't", + "shouldnt've": "shouldn't've", + "shouldn'tve": "shouldn't've", + "somebody'd": "somebodyd", + "somebodyd've": "somebody'd've", + "somebody'dve": "somebody'd've", + "somebodyll": "somebody'll", + "somebodys": "somebody's", + "someoned": "someone'd", + "someoned've": "someone'd've", + "someone'dve": "someone'd've", + "someonell": "someone'll", + "someones": "someone's", + "somethingd": "something'd", + "somethingd've": "something'd've", + "something'dve": "something'd've", + "somethingll": "something'll", + "thats": "that's", + "thered": "there'd", + "thered've": "there'd've", + "there'dve": "there'd've", + "therere": "there're", + "theres": "there's", + "theyd": "they'd", + "theyd've": "they'd've", + "they'dve": "they'd've", + "theyll": "they'll", + "theyre": "they're", + "theyve": "they've", + "twas": "'twas", + "wasnt": "wasn't", + "wed've": "we'd've", + "we'dve": "we'd've", + "weve": "we've", + "werent": "weren't", + "whatll": "what'll", + "whatre": "what're", + "whats": "what's", + "whatve": "what've", + "whens": "when's", + "whered": "where'd", + "wheres": "where's", + "whereve": "where've", + "whod": "who'd", + "whod've": "who'd've", + "who'dve": "who'd've", + "wholl": "who'll", + "whos": "who's", + "whove": "who've", + "whyll": "why'll", + "whyre": "why're", + "whys": "why's", + "wont": "won't", + "wouldve": "would've", + "wouldnt": "wouldn't", + "wouldnt've": "wouldn't've", + "wouldn'tve": "wouldn't've", + "yall": "y'all", + "yall'll": "y'all'll", + "y'allll": "y'all'll", + "yall'd've": "y'all'd've", + "y'alld've": "y'all'd've", + "y'all'dve": "y'all'd've", + "youd": "you'd", + "youd've": "you'd've", + "you'dve": "you'd've", + "youll": "you'll", + "youre": "you're", + "youve": "you've", + } + self.manualMap = { + "none": "0", + "zero": "0", + "one": "1", + "two": "2", + "three": "3", + "four": "4", + "five": "5", + "six": "6", + "seven": "7", + "eight": "8", + "nine": "9", + "ten": "10", + } + self.articles = ["a", "an", "the"] + + self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)") + self.commaStrip = re.compile("(\d)(,)(\d)") + self.punct = [ + ";", + r"/", + "[", + "]", + '"', + "{", + "}", + "(", + ")", + "=", + "+", + "\\", + "_", + "-", + ">", + "<", + "@", + "`", + ",", + "?", + "!", + ] + + def evaluate(self, quesIds=None): + if quesIds == None: + quesIds = [quesId for quesId in self.params["question_id"]] + gts = {} + res = {} + for quesId in quesIds: + gts[quesId] = self.vqa.qa[quesId] + res[quesId] = self.vqaRes.qa[quesId] + + # ================================================= + # Compute accuracy + # ================================================= + accQA = [] + accQuesType = {} + accAnsType = {} + print("computing accuracy") + step = 0 + for quesId in quesIds: + resAns = res[quesId]["answer"] + resAns = resAns.replace("\n", " ") + resAns = resAns.replace("\t", " ") + resAns = resAns.strip() + resAns = self.processPunctuation(resAns) + resAns = self.processDigitArticle(resAns) + gtAcc = [] + gtAnswers = [ans["answer"] for ans in gts[quesId]["answers"]] + if len(set(gtAnswers)) > 1: + for ansDic in gts[quesId]["answers"]: + ansDic["answer"] = self.processPunctuation(ansDic["answer"]) + for gtAnsDatum in gts[quesId]["answers"]: + otherGTAns = [ + item for item in gts[quesId]["answers"] if item != gtAnsDatum + ] + matchingAns = [item for item in otherGTAns if item["answer"] == resAns] + acc = min(1, float(len(matchingAns)) / 3) + gtAcc.append(acc) + quesType = gts[quesId]["question_type"] + ansType = gts[quesId]["answer_type"] + avgGTAcc = float(sum(gtAcc)) / len(gtAcc) + accQA.append(avgGTAcc) + if quesType not in accQuesType: + accQuesType[quesType] = [] + accQuesType[quesType].append(avgGTAcc) + if ansType not in accAnsType: + accAnsType[ansType] = [] + accAnsType[ansType].append(avgGTAcc) + self.setEvalQA(quesId, avgGTAcc) + self.setEvalQuesType(quesId, quesType, avgGTAcc) + self.setEvalAnsType(quesId, ansType, avgGTAcc) + if step % 100 == 0: + self.updateProgress(step / float(len(quesIds))) + step = step + 1 + + self.setAccuracy(accQA, accQuesType, accAnsType) + print("Done computing accuracy") + + def processPunctuation(self, inText): + outText = inText + for p in self.punct: + if (p + " " in inText or " " + p in inText) or ( + re.search(self.commaStrip, inText) != None + ): + outText = outText.replace(p, "") + else: + outText = outText.replace(p, " ") + outText = self.periodStrip.sub("", outText, re.UNICODE) + return outText + + def processDigitArticle(self, inText): + outText = [] + tempText = inText.lower().split() + for word in tempText: + word = self.manualMap.setdefault(word, word) + if word not in self.articles: + outText.append(word) + else: + pass + for wordId, word in enumerate(outText): + if word in self.contractions: + outText[wordId] = self.contractions[word] + outText = " ".join(outText) + return outText + + def setAccuracy(self, accQA, accQuesType, accAnsType): + self.accuracy["overall"] = round(100 * float(sum(accQA)) / len(accQA), self.n) + self.accuracy["perQuestionType"] = { + quesType: round( + 100 * float(sum(accQuesType[quesType])) / len(accQuesType[quesType]), + self.n, + ) + for quesType in accQuesType + } + self.accuracy["perAnswerType"] = { + ansType: round( + 100 * float(sum(accAnsType[ansType])) / len(accAnsType[ansType]), self.n + ) + for ansType in accAnsType + } + + def setEvalQA(self, quesId, acc): + self.evalQA[quesId] = round(100 * acc, self.n) + + def setEvalQuesType(self, quesId, quesType, acc): + if quesType not in self.evalQuesType: + self.evalQuesType[quesType] = {} + self.evalQuesType[quesType][quesId] = round(100 * acc, self.n) + + def setEvalAnsType(self, quesId, ansType, acc): + if ansType not in self.evalAnsType: + self.evalAnsType[ansType] = {} + self.evalAnsType[ansType][quesId] = round(100 * acc, self.n) + + def updateProgress(self, progress): + barLength = 20 + status = "" + if isinstance(progress, int): + progress = float(progress) + if not isinstance(progress, float): + progress = 0 + status = "error: progress var must be float\r\n" + if progress < 0: + progress = 0 + status = "Halt...\r\n" + if progress >= 1: + progress = 1 + status = "Done...\r\n" + block = int(round(barLength * progress)) + text = "\rFinshed Percent: [{0}] {1}% {2}".format( + "#" * block + "-" * (barLength - block), int(progress * 100), status + ) + sys.stdout.write(text) + sys.stdout.flush() diff --git a/models/criteria.py b/models/criteria.py new file mode 100644 index 0000000..02a7726 --- /dev/null +++ b/models/criteria.py @@ -0,0 +1,654 @@ +from functools import lru_cache + +import torch +import torch.nn.functional as F +from torch import nn + +from models.utils import allgather_wgrad +from utils.dist import get_rank, get_world_size +from utils.easydict import EasyDict + + +def get_sim( + x_proj: torch.Tensor, + y_proj: torch.Tensor, + temp=1.0, +): + """calculate pair-wise similarity between two modalities x and y. + + Args: + x_proj (torch.Tensor): The representation of modality x. Shape: [B,T,C] or [B,C]. + y_proj (torch.Tensor): The representation of modality y. Shape: [B,C]. + temp (torch.Tensor): The temperature. Shape: []. + + Returns: The similarity between modality x and y. Shape: [B,B]. + + """ + x_proj = F.normalize(x_proj, dim=-1) + y_proj = F.normalize(y_proj, dim=-1) + assert x_proj.dim() in [2, 3] + assert y_proj.dim() == 2 + if x_proj.dim() == 2: + sim_x2y = torch.einsum("md,nd->mn", x_proj, y_proj) / temp # (B,B) + else: + sim_x2y = torch.einsum("mld,nd->mln", x_proj, y_proj).mean(1) / temp # (B,B) + sim_y2x = sim_x2y.T + return sim_x2y, sim_y2x + + +class ContMatchLoss(nn.Module): + def __init__(self): + super(ContMatchLoss, self).__init__() + + @torch.no_grad() + def get_mask(self, sim, idx=None, normalize=False): + """ + Args: + sim (torch.Tensor): The similarity between videos and texts. shape: (B, B). + idx (torch.Tensor): The index for each video. Shape: [B]. + normalize (bool): If true, make row sum equal to 1 + """ + if idx is not None: + idx = idx.view(-1, 1) + mask = torch.eq(idx, idx.T).to(sim.dtype) + if normalize: + mask = mask / mask.sum(1, keepdim=True) + else: + mask = torch.zeros_like(sim) + mask.fill_diagonal_(1) + return mask # `1` mark valid/matched location + + @lru_cache(maxsize=16) + def get_gather_args(self): + """obtain the args for all_gather + Returns: dict. + + """ + return EasyDict({"world_size": get_world_size(), "rank": get_rank()}) + + +class STC_STM_Loss(ContMatchLoss): + """Contrastive and matching losses""" + + def __init__(self): + super(STC_STM_Loss, self).__init__() + + def stc_loss( + self, + temporal_proj: torch.Tensor, + spatial_proj: torch.Tensor, + idx: torch.Tensor, + temp=1.0, + all_gather=True + ): + + """forward to calculate the loss + + Args: + vision_proj (torch.Tensor): The vision representation. Shape: [B,T,C]. + text_proj (torch.Tensor): The text representation. Shape: [B,C]. + idx (torch.Tensor): The index for each example. Shape: [B,]. + temp (torch.Tensor): The temperature. Shape: []. + all_gather (bool): If true, will gather samples across all the GPUs and calculate loss across the gathered samples. + + Returns: loss_vtc (torch.Tensor): The video-text contrastive loss. Shape: []. + + """ + if all_gather: + gather_args = self.get_gather_args() + temporal_proj = allgather_wgrad(temporal_proj, gather_args) + spatial_proj = allgather_wgrad(spatial_proj, gather_args) + if idx is not None: + idx = allgather_wgrad(idx, gather_args) + + sim_t2s, sim_s2t = get_sim(temporal_proj, spatial_proj, temp) + + with torch.no_grad(): + sim_t2s_targets = self.get_mask(sim_t2s, idx=idx, normalize=True) + sim_s2t_targets = sim_t2s_targets + + loss_t2s = -torch.sum(F.log_softmax(sim_t2s, dim=1) * sim_t2s_targets, dim=1).mean() + loss_s2t = -torch.sum(F.log_softmax(sim_s2t, dim=1) * sim_s2t_targets, dim=1).mean() + + loss_stc = (loss_t2s + loss_s2t) / 2 + return loss_stc + + def stm_loss( + self, + grounding_expert, + stm_head, + # temp, + spatial_embeds_orig, + temporal_embeds_orig, + temporal_proj, + spatial_proj, + idx, + generation=False, + temp=1.0 + ): + spatial_embeds = spatial_embeds_orig.clone() + temporal_embeds = temporal_embeds_orig.clone() + with torch.no_grad(): + sim_s2t, sim_t2s = get_sim(temporal_proj, spatial_proj, temp) + spatial_atts = torch.ones( + spatial_embeds.size()[:-1], dtype=torch.long, device=spatial_embeds.device + ) + temporal_atts = torch.ones( + temporal_embeds.size()[:-1], dtype=torch.long, device=temporal_embeds.device + ) + weights_s2t = F.softmax(sim_s2t + 1e-4, dim=1) # (N, N) + weights_t2s = F.softmax(sim_t2s + 1e-4, dim=1) + + mask = self.get_mask(sim_s2t, idx=idx).bool() + weights_s2t.masked_fill_(mask, 0) + weights_t2s.masked_fill_(mask, 0) + weights_s2t = torch.nan_to_num_(weights_s2t, nan=1e-2, posinf=1e-2, neginf=1e-2) + weights_t2s = torch.nan_to_num_(weights_t2s, nan=1e-2, posinf=1e-2, neginf=1e-2) + + if generation: + with torch.no_grad(): + output = grounding_expert( + encoder_embeds=temporal_embeds, + attention_mask=temporal_atts, + encoder_hidden_states=spatial_embeds, + encoder_attention_mask=spatial_atts, + return_dict=True, + ) + pos_feats = output.last_hidden_state + return pos_feats + + else: + # select a hard negatives within the batch + spatial_neg_indices = torch.multinomial(weights_s2t, 1).squeeze() + temporal_neg_indices = torch.multinomial(weights_t2s, 1).squeeze() + + + spatial_embeds_neg = spatial_embeds[spatial_neg_indices] # [B, L, c] + temporal_embeds_neg = temporal_embeds[temporal_neg_indices] # [B, L, d] + # temporal_atts_neg = temporal_atts[temporal_neg_indices] + + # concat embeddings + spatial_embeds_all = torch.cat([spatial_embeds, spatial_embeds_neg, spatial_embeds], dim=0) + temporal_embeds_all = torch.cat([temporal_embeds, temporal_embeds, temporal_embeds_neg], dim=0) + spatial_atts_all = torch.cat([spatial_atts, spatial_atts, spatial_atts], dim=0) + temporal_atts_all = torch.cat([temporal_atts, temporal_atts, temporal_atts], dim=0) + + output = grounding_expert( + inputs_embeds=temporal_embeds_all, + attention_mask=temporal_atts_all, + cross_embeds=spatial_embeds_all, + cross_attention_mask=spatial_atts_all, + ) + + stm_embeds = output.last_hidden_state[:, 0] # pos (N, d) + neg (2N, d) + + stm_logits = stm_head(stm_embeds) # [3*B, 2] + + bs = stm_logits.shape[0] // 3 + stm_labels = stm_logits.new_ones(3 * bs, dtype=torch.long) + stm_labels[bs:] = 0 + loss_stm = F.cross_entropy(stm_logits, stm_labels) + pos_feats = output.last_hidden_state[:bs] + + return loss_stm, pos_feats + + +class VCC_VCM_Loss(ContMatchLoss): + """Contrastive and matching losses""" + + def __init__(self): + super(VCC_VCM_Loss, self).__init__() + + def vcc_loss( + self, + vis_proj: torch.Tensor, + cap_proj: torch.Tensor, + idx: torch.Tensor, + temp=1.0, + all_gather=True + ): + + """forward to calculate the loss + + Args: + vision_proj (torch.Tensor): The vision representation. Shape: [B,T,C]. + text_proj (torch.Tensor): The text representation. Shape: [B,C]. + idx (torch.Tensor): The index for each example. Shape: [B,]. + temp (torch.Tensor): The temperature. Shape: []. + all_gather (bool): If true, will gather samples across all the GPUs and calculate loss across the gathered samples. + + Returns: loss_vtc (torch.Tensor): The video-text contrastive loss. Shape: []. + + """ + if all_gather: + gather_args = self.get_gather_args() + vis_proj = allgather_wgrad(vis_proj, gather_args) + cap_proj = allgather_wgrad(cap_proj, gather_args) + if idx is not None: + idx = allgather_wgrad(idx, gather_args) + + sim_v2c, sim_c2v = get_sim(vis_proj, cap_proj, temp) + + with torch.no_grad(): + sim_v2c_targets = self.get_mask(sim_v2c, idx=idx, normalize=True) + sim_c2v_targets = sim_v2c_targets + + loss_v2c = -torch.sum(F.log_softmax(sim_v2c, dim=1) * sim_v2c_targets, dim=1).mean() + loss_c2v = -torch.sum(F.log_softmax(sim_c2v, dim=1) * sim_c2v_targets, dim=1).mean() + + loss_vcc = (loss_v2c + loss_c2v) / 2 + return loss_vcc + + def vcm_loss( + self, + grounding_expert, + vcm_head, + vis_embeds_orig, + cap_embeds_orig, + vis_proj, + cap_proj, + cap_atts, + idx, + generation=False, + temp=1.0 + ): + vis_embeds = vis_embeds_orig.clone() + cap_embeds = cap_embeds_orig.clone() + + with torch.no_grad(): + sim_v2c, sim_c2v = get_sim(vis_proj, cap_proj, temp) + vis_atts = torch.ones( + vis_embeds.size()[:-1], dtype=torch.long, device=vis_embeds.device + ) + + weights_v2c = F.softmax(sim_v2c + 1e-4, dim=1) # (N, N) + weights_c2v = F.softmax(sim_c2v + 1e-4, dim=1) + + mask = self.get_mask(weights_v2c, idx=idx).bool() + weights_v2c.masked_fill_(mask, 0) + weights_c2v.masked_fill_(mask, 0) + weights_v2c = torch.nan_to_num_(weights_v2c, nan=1e-2, posinf=1e-2, neginf=1e-2) + weights_c2v = torch.nan_to_num_(weights_c2v, nan=1e-2, posinf=1e-2, neginf=1e-2) + + if generation: + with torch.no_grad(): + output = grounding_expert( + encoder_embeds=cap_embeds, + attention_mask=cap_atts, + encoder_hidden_states=vis_embeds, + encoder_attention_mask=vis_atts, + return_dict=True, + ) + pos_feats = output.last_hidden_state + return pos_feats + + else: + + + # select a hard negatives within the batch + vis_neg_indices = torch.multinomial(weights_v2c, 1).squeeze() + cap_neg_indices = torch.multinomial(weights_c2v, 1).squeeze() + + + vis_embeds_neg = vis_embeds[vis_neg_indices] # [B, L, c] + cap_embeds_neg = cap_embeds[cap_neg_indices] # [B, L, d] + cap_atts_neg = cap_atts[cap_neg_indices] + + # concat embeddings + vis_embeds_all = torch.cat([vis_embeds, vis_embeds_neg, vis_embeds], dim=0) + cap_embeds_all = torch.cat([cap_embeds, cap_embeds, cap_embeds_neg], dim=0) + vis_atts_all = torch.cat([vis_atts, vis_atts, vis_atts], dim=0) + cap_atts_all = torch.cat([cap_atts, cap_atts, cap_atts_neg], dim=0) + + output = grounding_expert( + inputs_embeds=cap_embeds_all, + attention_mask=cap_atts_all, + cross_embeds=vis_embeds_all, + cross_attention_mask=vis_atts_all, + ) + + vcm_embeds = output.last_hidden_state[:, 0] # pos (N, d) + neg (2N, d) + + vcm_logits = vcm_head(vcm_embeds) # [3*B, 2] + + bs = vcm_logits.shape[0] // 3 + vcm_labels = vcm_logits.new_ones(3 * bs, dtype=torch.long) + vcm_labels[bs:] = 0 + loss_vcm = F.cross_entropy(vcm_logits, vcm_labels) + pos_feats = output.last_hidden_state[:bs] + return loss_vcm, pos_feats + + +class VHC_VHM_Loss(ContMatchLoss): + """Contrastive and matching losses""" + + def __init__(self): + super(VHC_VHM_Loss, self).__init__() + + def vhc_loss( + self, + vis_proj: torch.Tensor, + hist_proj: torch.Tensor, + idx: torch.Tensor, + temp=1.0, + all_gather=True + ): + + """forward to calculate the loss + + Args: + vision_proj (torch.Tensor): The vision representation. Shape: [B,T,C]. + text_proj (torch.Tensor): The text representation. Shape: [B,C]. + idx (torch.Tensor): The index for each example. Shape: [B,]. + temp (torch.Tensor): The temperature. Shape: []. + all_gather (bool): If true, will gather samples across all the GPUs and calculate loss across the gathered samples. + + Returns: loss_vtc (torch.Tensor): The video-text contrastive loss. Shape: []. + + """ + if all_gather: + gather_args = self.get_gather_args() + vis_proj = allgather_wgrad(vis_proj, gather_args) + hist_proj = allgather_wgrad(hist_proj, gather_args) + if idx is not None: + idx = allgather_wgrad(idx, gather_args) + + sim_v2h, sim_h2v = get_sim(vis_proj, hist_proj, temp) + + with torch.no_grad(): + sim_v2h_targets = self.get_mask(sim_v2h, idx=idx, normalize=True) + sim_h2v_targets = sim_v2h_targets + + loss_v2h = -torch.sum(F.log_softmax(sim_v2h, dim=1) * sim_v2h_targets, dim=1).mean() + loss_h2v = -torch.sum(F.log_softmax(sim_h2v, dim=1) * sim_h2v_targets, dim=1).mean() + + loss_vhc = (loss_v2h + loss_h2v) / 2 + return loss_vhc + + def vhm_loss( + self, + grounding_expert, + vhm_head, + vis_embeds_orig, + hist_embeds_orig, + vis_proj, + hist_proj, + hist_atts, + idx, + generation=False, + temp=1.0, + ): + vis_embeds = vis_embeds_orig.clone() + hist_embeds = hist_embeds_orig.clone() + with torch.no_grad(): + sim_v2h, sim_h2v = get_sim(vis_proj, hist_proj, temp) + vis_atts = torch.ones( + vis_embeds.size()[:-1], dtype=torch.long, device=vis_embeds.device + ) + + weights_v2h = F.softmax(sim_v2h + 1e-4, dim=1) # (N, N) + weights_h2v = F.softmax(sim_h2v + 1e-4, dim=1) + + mask = self.get_mask(weights_v2h, idx=idx).bool() + weights_v2h.masked_fill_(mask, 0) + weights_h2v.masked_fill_(mask, 0) + weights_v2h = torch.nan_to_num_(weights_v2h, nan=1e-2, posinf=1e-2, neginf=1e-2) + weights_h2v = torch.nan_to_num_(weights_h2v, nan=1e-2, posinf=1e-2, neginf=1e-2) + + if generation: + with torch.no_grad(): + output = grounding_expert( + encoder_embeds=hist_embeds, + attention_mask=hist_atts, + encoder_hidden_states=vis_embeds, + encoder_attention_mask=vis_atts, + return_dict=True, + # mode="fusion", + ) + pos_feats = output.last_hidden_state + return pos_feats + + else: + # select a hard negatives within the batch + vis_neg_indices = torch.multinomial(weights_v2h, 1).squeeze() + hist_neg_indices = torch.multinomial(weights_h2v, 1).squeeze() + + vis_embeds_neg = vis_embeds[vis_neg_indices] # [B, L, c] + hist_embeds_neg = hist_embeds[hist_neg_indices] # [B, L, d] + hist_atts_neg = hist_atts[hist_neg_indices] + + # concat embeddings + vis_embeds_all = torch.cat([vis_embeds, vis_embeds_neg, vis_embeds], dim=0) + hist_embeds_all = torch.cat([hist_embeds, hist_embeds, hist_embeds_neg], dim=0) + vis_atts_all = torch.cat([vis_atts, vis_atts, vis_atts], dim=0) + hist_atts_all = torch.cat([hist_atts, hist_atts, hist_atts_neg], dim=0) + + output = grounding_expert( + inputs_embeds=hist_embeds_all, + attention_mask=hist_atts_all, + cross_embeds=vis_embeds_all, + cross_attention_mask=vis_atts_all, + ) + + vhm_embeds = output.last_hidden_state[:, 0] # pos (N, d) + neg (2N, d) + + vhm_logits = vhm_head(vhm_embeds) # [3*B, 2] + + bs = vhm_logits.shape[0] // 3 + vhm_labels = vhm_logits.new_ones(3 * bs, dtype=torch.long) + vhm_labels[bs:] = 0 + loss_vhm = F.cross_entropy(vhm_logits, vhm_labels) + pos_feats = output.last_hidden_state[:bs] + + return loss_vhm, pos_feats + + +class CHC_CHM_Loss(ContMatchLoss): + """Contrastive and matching losses""" + + def __init__(self): + super(CHC_CHM_Loss, self).__init__() + + def chc_loss( + self, + cap_proj: torch.Tensor, + hist_proj: torch.Tensor, + idx: torch.Tensor, + temp=1.0, + all_gather=True + ): + + """forward to calculate the loss + + Args: + vision_proj (torch.Tensor): The vision representation. Shape: [B,T,C]. + text_proj (torch.Tensor): The text representation. Shape: [B,C]. + idx (torch.Tensor): The index for each example. Shape: [B,]. + temp (torch.Tensor): The temperature. Shape: []. + all_gather (bool): If true, will gather samples across all the GPUs and calculate loss across the gathered samples. + + Returns: loss_vtc (torch.Tensor): The video-text contrastive loss. Shape: []. + + """ + if all_gather: + gather_args = self.get_gather_args() + cap_proj = allgather_wgrad(cap_proj, gather_args) + hist_proj = allgather_wgrad(hist_proj, gather_args) + if idx is not None: + idx = allgather_wgrad(idx, gather_args) + + sim_c2h, sim_h2c = get_sim(cap_proj, hist_proj, temp) + + with torch.no_grad(): + sim_c2h_targets = self.get_mask(sim_c2h, idx=idx, normalize=True) + sim_h2c_targets = sim_c2h_targets + + loss_c2h = -torch.sum(F.log_softmax(sim_c2h, dim=1) * sim_c2h_targets, dim=1).mean() + loss_h2c = -torch.sum(F.log_softmax(sim_h2c, dim=1) * sim_h2c_targets, dim=1).mean() + + loss_chc = (loss_c2h + loss_h2c) / 2 + return loss_chc + + def chm_loss( + self, + grounding_expert, + chm_head, + cap_embeds_orig, + hist_embeds_orig, + cap_proj, + hist_proj, + cap_atts, + hist_atts, + idx, + generation=False, + temp=1.0 + ): + cap_embeds = cap_embeds_orig.clone() + hist_embeds = hist_embeds_orig.clone() + with torch.no_grad(): + sim_c2h, sim_h2c = get_sim(cap_proj, hist_proj, temp) + + weights_c2h = F.softmax(sim_c2h + 1e-4, dim=1) # (N, N) + weights_h2c = F.softmax(sim_h2c + 1e-4, dim=1) + + mask = self.get_mask(weights_c2h, idx=idx).bool() + weights_c2h.masked_fill_(mask, 0) + weights_h2c.masked_fill_(mask, 0) + weights_c2h = torch.nan_to_num_(weights_c2h, nan=1e-2, posinf=1e-2, neginf=1e-2) + weights_h2c = torch.nan_to_num_(weights_h2c, nan=1e-2, posinf=1e-2, neginf=1e-2) + + if generation: + with torch.no_grad(): + output = grounding_expert( + encoder_embeds=hist_embeds, + attention_mask=hist_atts, + encoder_hidden_states=cap_embeds, + encoder_attention_mask=cap_atts, + return_dict=True, + ) + pos_feats = output.last_hidden_state + return pos_feats + else: + # select a hard negatives within the batch + cap_neg_indices = torch.multinomial(weights_c2h, 1).squeeze() + hist_neg_indices = torch.multinomial(weights_h2c, 1).squeeze() + + cap_embeds_neg = cap_embeds[cap_neg_indices] # [B, L, c] + cap_atts_neg = cap_atts[cap_neg_indices] + hist_embeds_neg = hist_embeds[hist_neg_indices] # [B, L, d] + hist_atts_neg = hist_atts[hist_neg_indices] + + # concat embeddings + cap_embeds_all = torch.cat([cap_embeds, cap_embeds_neg, cap_embeds], dim=0) + hist_embeds_all = torch.cat([hist_embeds, hist_embeds, hist_embeds_neg], dim=0) + cap_atts_all = torch.cat([cap_atts, cap_atts_neg, cap_atts], dim=0) + hist_atts_all = torch.cat([hist_atts, hist_atts, hist_atts_neg], dim=0) + + output = grounding_expert( + inputs_embeds=hist_embeds_all, + attention_mask=hist_atts_all, + cross_embeds=cap_embeds_all, + cross_attention_mask=cap_atts_all, + ) + + chm_embeds = output.last_hidden_state[:, 0] # pos (N, d) + neg (2N, d) + + chm_logits = chm_head(chm_embeds) # [3*B, 2] + + bs = chm_logits.shape[0] // 3 + chm_labels = chm_logits.new_ones(3 * bs, dtype=torch.long) + chm_labels[bs:] = 0 + loss_chm = F.cross_entropy(chm_logits, chm_labels) + pos_feats = output.last_hidden_state[:bs] + return loss_chm, pos_feats + + +class MLMLoss(nn.Module): + """masked language modeling loss.""" + + def __init__(self, masking_prob, tokenizer): + super(MLMLoss, self).__init__() + self.tokenizer = tokenizer + self.masking_prob = masking_prob + + def mlm_loss( + self, + text_encoder, + text, + text_embeds, + vision_embeds, + vision_atts, + ): + input_ids = text.input_ids.clone() + labels = input_ids.clone() + probability_matrix = torch.full(labels.shape, self.masking_prob) + input_ids, labels = self.mask( + input_ids, + text_encoder.config.vocab_size, + input_ids.device, + targets=labels, + probability_matrix=probability_matrix, + ) + + # intermediate_mlm_output = text_encoder.bert( + # input_ids, + # attention_mask=text.attention_mask, + # encoder_hidden_states=vision_embeds, + # encoder_attention_mask=vision_atts, + # return_dict=True, + # # mode="text", + # ) + + # text_embeds = intermediate_mlm_output.last_hidden_state + + mlm_output = text_encoder( + encoder_embeds=text_embeds, + attention_mask=text.attention_mask, + encoder_hidden_states=vision_embeds, + encoder_attention_mask=vision_atts, + return_dict=True, + labels=labels, + soft_labels=None, + # mode="fusion", + ) + return mlm_output.loss + + def mask( + self, + input_ids, + vocab_size, + device, + targets=None, + masked_indices=None, + probability_matrix=None, + ): + if masked_indices is None: + masked_indices = torch.bernoulli(probability_matrix).bool() + + masked_indices[input_ids == self.tokenizer.pad_token_id] = False + masked_indices[input_ids == self.tokenizer.cls_token_id] = False + + if targets is not None: + # We only compute loss on masked tokens + targets[~masked_indices] = -100 + + # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) + indices_replaced = ( + torch.bernoulli(torch.full(input_ids.shape, 0.8)).bool() & masked_indices + ) + input_ids[indices_replaced] = self.tokenizer.mask_token_id + + # 10% of the time, we replace masked input tokens with random word + indices_random = ( + torch.bernoulli(torch.full(input_ids.shape, 0.5)).bool() + & masked_indices + & ~indices_replaced + ) + random_words = torch.randint(vocab_size, input_ids.shape, dtype=torch.long).to(device) + input_ids[indices_random] = random_words[indices_random] + # The rest of the time (10% of the time) we keep the masked input tokens unchanged + + if targets is not None: + return input_ids, targets + else: + return input_ids diff --git a/models/modules/__init__.py b/models/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/modules/temporal_modelling.py b/models/modules/temporal_modelling.py new file mode 100644 index 0000000..55f2d50 --- /dev/null +++ b/models/modules/temporal_modelling.py @@ -0,0 +1,286 @@ +import logging +import math + +import einops +import torch +from einops import rearrange +from timm.models.layers.drop import DropPath +from torch import nn +from torch.nn import LayerNorm, Linear, MultiheadAttention + +logger = logging.getLogger(__name__) + + +class STAdapter(nn.Module): + """ST Adapter""" + + def __init__( + self, + kernel_size=(3, 3, 3), + input_dim=768, + hidden_dim=384, + img_size=224, + patch_size=16, + drop_prob=0.1, + ): + super(STAdapter, self).__init__() + self.kernel_size = kernel_size + self.input_dim = input_dim + self.hidden_dim = hidden_dim + + self.h = self.w = img_size // patch_size + + self.linear1 = nn.Linear(input_dim, hidden_dim) + self.linear2 = nn.Linear(hidden_dim, input_dim) + self.act = nn.ReLU() + self.conv = nn.Conv3d( + hidden_dim, hidden_dim, kernel_size=kernel_size, padding="same", groups=hidden_dim + ) + self.droppath = DropPath(drop_prob=drop_prob) + + self.scale = nn.parameter.Parameter(torch.zeros([])) + + def forward(self, x: torch.Tensor): + """forward + + Args: + x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w + + Returns: features after adapter. The same shape as input. + + """ + if x.shape[1] == 1: # for single frame, return itself. + return x + + shortcut = x + x = self.linear1(x) + cls = x[:, :, :1, :] + tokens = x[:, :, 1:, :] + tokens = einops.rearrange(tokens, "b t (h w) c -> b c t h w", h=self.h).contiguous() + tokens = self.conv(tokens) + tokens = einops.rearrange(tokens, "b c t h w -> b t (h w) c") + x = torch.cat([cls, tokens], dim=2) # [b, t, 1+h*w, c] + x = self.act(x) + x = self.linear2(x) + + return shortcut + self.scale * self.droppath(x) + + +class SpatialAttention(nn.Module): + """Perfrom spatial self-attention""" + + def __init__(self, input_dim=768, droppath_rate=0.1): + super(SpatialAttention, self).__init__() + self.attn = MultiheadAttention(input_dim, num_heads=input_dim // 64, batch_first=True) + self.norm = LayerNorm(input_dim, eps=1e-12) + self.linear = Linear(input_dim, input_dim) + self.droppath = DropPath(droppath_rate) + # self.scale = nn.parameter.Parameter(torch.zeros([])) + self.scale = 1.0 + + def forward(self, x: torch.Tensor): + if x.shape[1] == 1: + x = self.norm(x) + x = einops.rearrange(x, "b t l c -> b (t l) c") + return x # return self if media is image + + shortcut = x + x = einops.rearrange(x, 'b t l c -> (b t) l c') + x = self.norm(x) + x = self.attn(x, x, x)[0] + x = einops.rearrange(x, "(b t) l c -> b t l c", b=shortcut.shape[0]) + x = shortcut + self.scale * self.droppath(x) + x = einops.rearrange(x, "b t l c -> b (t l) c") + return x + + +class TemporalAttention(nn.Module): + + """perform temporal self-attention""" + + def __init__(self, input_dim=768, droppath_rate=0.1): + """ + + Kwargs: + input_dim (int): The input feature dimension. + + + """ + super(TemporalAttention, self).__init__() + + self._input_dim = input_dim + self.attn = MultiheadAttention(input_dim, num_heads=input_dim // 64, batch_first=True) + self.norm = LayerNorm(input_dim, eps=1e-12) + self.linear = Linear(input_dim, input_dim) + self.droppath = DropPath(droppath_rate) + # self.scale = nn.parameter.Parameter(torch.zeros([])) + self.scale = 1.0 + + def forward(self, x: torch.Tensor): + """forward + + Args: + x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w + + Returns: features after adapter. The same shape as input. + + """ + if x.shape[1] == 1: # for single frame, return itself. + x = self.norm(x) + x = einops.rearrange(x, "b t l c -> b (t l) c") + return x + + shortcut = x + x = einops.rearrange(x, "b t l c -> (b l) t c") + x = self.norm(x) + x = self.attn(x, x, x)[0] + x = einops.rearrange(x, "(b l) t c -> b t l c", b=shortcut.shape[0]) + x = shortcut + self.scale * self.droppath(x) + x = einops.rearrange(x, "b t l c -> b (t l) c") + return x + + +class WindowTemporalAttention(nn.Module): + + """perform windowed temporal self-attention""" + + def __init__(self, input_dim=768, droppath_rate=0.1, window_size=(2, 2)): + """ + + Kwargs: + input_dim (int): The input feature dimension. + + + """ + super().__init__() + + self._input_dim = input_dim + self.temporal_attn = MultiheadAttention(input_dim, num_heads=input_dim // 64) + self.norm = LayerNorm(input_dim, eps=1e-12) + self.droppath = DropPath(droppath_rate) + self.scale = nn.parameter.Parameter(torch.zeros([])) + self.wh, self.ww = window_size + # logger.info(f"WindowTemporalAttention: window_size: {window_size}") + + def forward(self, x: torch.Tensor): + """forward + + Args: + x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w + + Returns: features after adapter. The same shape as input. + + """ + if x.shape[1] == 1: # for single frame, return itself. + return x + shortcut = x + + h = w = int(math.sqrt(x.shape[2] - 1)) + cls_token = x[:, :, :1, :] + x = einops.rearrange( + x[:, :, 1:, :], + "b t (nh wh nw ww) c -> (t wh ww) (b nh nw) c", + nh=h // self.wh, + wh=self.wh, + nw=w // self.ww, + ww=self.ww, + ) + x = self.norm(x) + x = self.temporal_attn(x, x, x)[0] + x = einops.rearrange( + x, + "(t wh ww) (b nh nw) c -> b t (nh wh nw ww) c", + wh=self.wh, + ww=self.ww, + nh=h // self.wh, + nw=w // self.ww, + ) + # add back cls token. + x = torch.concat([cls_token, x], dim=2) + return shortcut + self.scale * self.droppath(x) + + +class X_CLIP(nn.Module): + + """perform windowed temporal self-attention""" + + def __init__(self, input_dim=768, droppath_rate=0.1, num_prompts=1): + """ + + Kwargs: + input_dim (int): The input feature dimension. + + + """ + super().__init__() + + d_model = input_dim + + self.message_fc = nn.Linear(d_model, d_model) + self.message_ln = LayerNorm(d_model, eps=1e-12) + self.message_attn = nn.MultiheadAttention(d_model, d_model // 64) + self.num_prompts = num_prompts + + self.droppath = DropPath(droppath_rate) + + def forward(self, x: torch.Tensor): + """forward + + Args: + x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w + + Returns: features after adapter. The same shape as input. + + """ + if x.shape[1] == 1: # for single frame, return itself. + return x + msg_token = self.message_ln(self.message_fc(x[:, :, 0, :])) # [b, t, c] + msg_token = rearrange(msg_token, "b t c -> t b c") + msg_token = msg_token + self.droppath( + self.message_attn(msg_token, msg_token, msg_token)[0] + ) + msg_token = rearrange(msg_token, "t b c -> b t c") + # replace the last prompt token with msg_token. + x = torch.cat([x[:, :, :-1, :], msg_token.unsqueeze(2)], dim=2) # [b, t, l+1, c] + return x + + +class TemporalS4(nn.Module): + + """perform temporal self-attention""" + + def __init__(self, input_dim=768, droppath_rate=0.1): + """ + + Kwargs: + input_dim (int): The input feature dimension. + + + """ + super().__init__() + from .s4 import S4 + + self._input_dim = input_dim + self.norm = LayerNorm(input_dim, eps=1e-12) + self.droppath = DropPath(droppath_rate) + self.scale = nn.parameter.Parameter(torch.zeros([])) + self.s4 = S4(d_model=input_dim, bidirectional=True, transposed=True) + + def forward(self, x: torch.Tensor): + """forward + + Args: + x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w + + Returns: features after adapter. The same shape as input. + + """ + if x.shape[1] == 1: # for single frame, return itself. + return x + + shortcut = x + x = self.norm(x) + x = einops.rearrange(x, "b t l c -> b c (t l)") + x, _ = self.s4(x) + x = einops.rearrange(x, "b c (t l) -> b t l c", t=shortcut.shape[1]) + return shortcut + self.scale * self.droppath(x) diff --git a/models/setup.py b/models/setup.py new file mode 100644 index 0000000..95d5be2 --- /dev/null +++ b/models/setup.py @@ -0,0 +1,358 @@ +import copy +import os.path as osp +import glog as logger + +import torch +from torch.utils.data import ConcatDataset +from models.backbones.beit.builder import interpolate_pos_embed_beit +from models.backbones.bert.tokenization_bert import BertTokenizer +from transformers import T5Tokenizer, BartTokenizer, LlamaTokenizer +from utils.optimizer import create_optimizer +from utils.scheduler import create_scheduler +from datasets.dataloader import load_dataloaders +from datasets.pretraining import load_datasets as load_datasets_stage_1 +from datasets.visdial_dataset import load_visdial_dataset +from datasets.champagne_dataset import load_champagne_dataset +from datasets.nextqa_dataset import load_nextqa_dataset +from datasets.avsd_dataset import load_avsd_dataset +# from datasets.avsd_dataset_like_mixer import load_avsd_dataset + +from processors.blip_processors import Blip2ImageTrainProcessor +from processors.blip_processors import BlipCaptionProcessor, BlipDialogProcessor + +from utils.init import set_training_steps +# from models.v2dial import V2Dial, V2DialBase +from models.v2dial import V2DialBase, V2Dial, V2DialNoMoes + +# from datasets.avsd_dataset import get_dataset, AVSDDataSet +from torch.utils.data import DataLoader + + +def setup_model( + config, has_decoder=False, pretrain=False, find_unused_parameters=True +): + logger.info("Creating model") + + if config['stage'] == 'stage_1': + config = copy.deepcopy(config) + + # tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + # model = V2DialBase(config=config, expert_tokenizer=tokenizer) + model = V2DialBase(config) + model = model.to(torch.device('cuda')) + model_without_ddp = model + optimizer = create_optimizer(config, model) + scheduler = create_scheduler(config, optimizer) + scaler = torch.cuda.amp.GradScaler(enabled=config.fp16) + + if config['distributed']: + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[config['gpu']], + find_unused_parameters=find_unused_parameters, # `False` for image-only task + ) + + start_epoch = 0 + global_step = 0 + webvid_step = 0 + cc3m_step = 0 + + if osp.isfile(config['pretrained_path']): + logger.info(f"Loading checkpoint from {config['pretrained_path']}") + checkpoint = torch.load(config['pretrained_path'], map_location="cpu") + state_dict = checkpoint["model"] + + if config.resume: + optimizer.load_state_dict(checkpoint["optimizer"]) + scheduler.load_state_dict(checkpoint["scheduler"]) + scaler.load_state_dict(checkpoint["scaler"]) + start_epoch = checkpoint["epoch"] + 1 + global_step = checkpoint["global_step"] + elif not pretrain: # downstream init from pretrained ckpt + + # interpolate positional embeddings. + state_dict = interpolate_pos_embed_beit(state_dict, model_without_ddp) + + + #TODO Might need to update to match the MoEs + if not config.evaluate: # finetuning from a pretarined weights. + for key in list(state_dict.keys()): + if "bert" in key: + encoder_key = key.replace("bert.", "") + state_dict[encoder_key] = state_dict[key] + if not has_decoder: + del state_dict[key] + + # init text decoder as multimodal encoder (last 6 layers of model.text_encoder) + # only for generation tasks like VQA + if has_decoder and "text_encoder" in key: + if "layer" in key: + encoder_keys = key.split(".") + layer_num = int(encoder_keys[4]) + if layer_num < config.model.text_encoder.fusion_layer: + del state_dict[key] + continue + else: + decoder_layer_num = layer_num - 9 + encoder_keys[4] = str(decoder_layer_num) + encoder_key = ".".join(encoder_keys) + else: + encoder_key = key + decoder_key = encoder_key.replace("text_encoder", "text_decoder") + state_dict[decoder_key] = state_dict[key] + del state_dict[key] + + msg = model_without_ddp.load_state_dict(state_dict, strict=False) + logger.info(msg) + logger.info(f"Loaded checkpoint from {config.pretrained_path}") + else: + logger.warning("No pretrained checkpoint provided, training from scratch") + + return ( + model, + model_without_ddp, + optimizer, + scheduler, + scaler, + start_epoch, + global_step, + webvid_step, + cc3m_step, + config + ) + else: + # config = copy.deepcopy(config) + # if config['use_original_feats']: + # model = AVSDBart(config) + # else: + # # model = V2Dial(config, tokenizer_experts, tokenizer_enc_dec) + # if config.use_moes: + model = V2Dial(config) + # else: + # model = V2DialNoMoes(config) + + model = model.to(torch.device('cuda')) + model_without_ddp = model + + optimizer = None + scheduler = None + scaler = None + + start_epoch = 0 + global_step = 0 + if config['stage'] == 'stage_3': + visdial_step = 0 + avsd_step = 0 + nextqa_step = 0 + + ckpt_path = config.pretrained_path_resume if config.resume else config.pretrained_path_prev_stage + if config.generating: + ckpt_path = config.best_ckpt_path + + if osp.isfile(ckpt_path): + logger.info(f"Loading checkpoint from {ckpt_path}") + checkpoint = torch.load(ckpt_path, map_location="cpu") + state_dict = checkpoint["model"] + + if config.resume: + optimizer.load_state_dict(checkpoint["optimizer"]) + scheduler.load_state_dict(checkpoint["scheduler"]) + scaler.load_state_dict(checkpoint["scaler"]) + start_epoch = checkpoint["epoch"] + 1 + global_step = checkpoint["global_step"] + if config['stage'] == 'stage_3': + visdial_step = checkpoint['visdial_step'] + avsd_step = checkpoint['avsd_step'] + next_step = checkpoint['nextqa_step'] + + + if config['stage'] in ['stage_2', 'stage_3'] and config.use_moes: + # Init. the history expert erights with the caption expert weights + p_names = [ + 'moe_layers.{}.norm_hist.weight', + 'moe_layers.{}.mlp_hist.fc1.weight', + 'moe_layers.{}.mlp_hist.fc1.bias', + 'moe_layers.{}.mlp_hist.fc2.weight', + 'moe_layers.{}.mlp_hist.fc2.bias', + ] + + for moe_layer_idx in range(config.num_moe_modality_layers): + for p_name in p_names: + p_hist_name = p_name.format(moe_layer_idx) + if p_hist_name not in state_dict: + p_cap_name = p_hist_name.replace('hist', 'cap') + state_dict[p_hist_name] = state_dict[p_cap_name].clone() + + msg = model_without_ddp.load_state_dict(state_dict, strict=False) + logger.info(msg) + + logger.info(f"Loaded checkpoint from {ckpt_path}") + else: + logger.warning("No pretrained checkpoint provided, training from scratch") + + if config['training']: + optimizer = create_optimizer(config, model_without_ddp) + scheduler = create_scheduler(config, optimizer) + scaler = torch.cuda.amp.GradScaler(enabled=config.fp16) + + elif config['generating']: + model.llm.set_input_embeddings(model.text_embedding) + + if config['distributed']: + + static_graph=config.stage!='stage_1' + if len(config.media_train) > 0: + static_graph = False + + model = torch.nn.parallel.DistributedDataParallel( + model_without_ddp, + device_ids=[config['gpu']], + find_unused_parameters=find_unused_parameters, # `False` for image-only task + static_graph=static_graph + ) + + if config['stage'] == 'stage_3': + return ( + model, + model_without_ddp, + optimizer, + scheduler, + scaler, + start_epoch, + global_step, + visdial_step, + avsd_step, + nextqa_step, + config + ) + return ( + model, + model_without_ddp, + optimizer, + scheduler, + scaler, + start_epoch, + global_step, + config + ) + + +def setup_data(config): + logger.info("[INFO] Creating datasets") + + # define the processors + vis_processor = Blip2ImageTrainProcessor(image_size=config.image_res) + + if config['stage'] == 'stage_1': + text_processor = BlipCaptionProcessor(max_words=config.max_cap_len) + + if config['debugging']: + train_datasets = load_datasets_stage_1(config, vis_processor, text_processor, 'val') + else: + train_datasets = load_datasets_stage_1(config, vis_processor, text_processor, 'train') + + val_datasets = load_datasets_stage_1(config, vis_processor, text_processor, 'val') + + # cc3m_dataset = ConcatDataset([train_datasets['cc3m'], val_datasets['cc3m']]) + + # webvid_dataset = ConcatDataset([train_datasets['webvid'], val_datasets['webvid']]) + + # train_datasets = [cc3m_dataset, webvid_dataset] + train_datasets = list(train_datasets.values()) + val_datasets = list(val_datasets.values()) + + batch_sizes = [config['batch_size_cc3m'], config['batch_size_webvid']] + num_samples = [len(d) for d in train_datasets] + config = set_training_steps(config, num_samples, batch_sizes) + + train_dataloaders = load_dataloaders(config, train_datasets, 'train', output_dict=True) + val_dataloaders = load_dataloaders(config, val_datasets, 'val', output_dict=True) + + # val_datasets = load_datasets_stage_1(config, vis_processor, text_processor, 'test') + + # val_dataloader = load_dataloaders(config, val_datasets, 'test', output_dict=True) + + if config['stage'] == 'stage_2': + text_processor = BlipDialogProcessor(max_words=config.max_text_len) # max_words = 50 + train_datasets = [load_champagne_dataset(config, vis_processor, text_processor, 'train')] + val_datasets = [load_champagne_dataset(config, vis_processor, text_processor, 'val')] + batch_sizes = [config['batch_size_champagne']] + num_samples = [len(d) for d in train_datasets] + config = set_training_steps(config, num_samples, batch_sizes) + + train_dataloaders = load_dataloaders(config, train_datasets, 'train', output_dict=True) + val_dataloaders = load_dataloaders(config, val_datasets, 'val', output_dict=True) + + + if config['stage'] == 'stage_3': + text_processor = BlipDialogProcessor(max_words=config.max_text_len) # max_words = 50 + train_datasets = [] + val_datasets = [] + for medium in config['media_train']: + if medium == 'visdial': + load_dataset_fn = load_visdial_dataset + elif medium == 'avsd': + load_dataset_fn = load_avsd_dataset + elif medium == 'nextqa': + load_dataset_fn = load_nextqa_dataset + # elif medium == 'champagne': + # load_dataset_fn = load_champagne_dataset + + train_datasets.append(load_dataset_fn(config, vis_processor, text_processor, 'train')) + + for medium in config['media_val']: + if medium == 'visdial': + load_dataset_fn = load_visdial_dataset + elif medium == 'avsd': + load_dataset_fn = load_avsd_dataset + elif medium == 'nextqa': + load_dataset_fn = load_nextqa_dataset + # elif medium == 'champagne': + # load_dataset_fn = load_champagne_dataset + + val_datasets.append(load_dataset_fn(config, vis_processor, text_processor, 'val')) + + batch_sizes = [d.batch_size for d in train_datasets] + num_samples = [len(d) for d in train_datasets] + config = set_training_steps(config, num_samples, batch_sizes) + + train_dataloaders = load_dataloaders(config, train_datasets, 'train', output_dict=True) + + val_dataloaders = load_dataloaders(config, val_datasets, 'val', output_dict=True) + + return train_dataloaders, val_dataloaders + + +def setup_data_test(config): + vis_processor = Blip2ImageTrainProcessor(image_size=config.image_res) + text_processor = BlipDialogProcessor(max_words=config.max_text_len) # max_words = 50 + + if config.media_test == 'visdial': + load_dataset_fn = load_visdial_dataset + elif config.media_test == 'avsd': + load_dataset_fn = load_avsd_dataset + elif config.media_test == 'nextqa': + load_dataset_fn = load_nextqa_dataset + test_dataset = load_dataset_fn(config, vis_processor, text_processor, 'test') + + test_dataloader = DataLoader( + test_dataset, shuffle=False, batch_size=test_dataset.batch_size) + + return test_dataloader + + +# def setup_data_test(config, args): +# tokenizer_experts = BertTokenizer.from_pretrained('bert-base-uncased') +# tokenizer_enc_dec = None +# if config.enc_dec_family == 'flan_t5': +# tokenizer_enc_dec = T5Tokenizer.from_pretrained(config.enc_dec_name) +# elif config.enc_dec_family == 'bart': +# tokenizer_enc_dec = BartTokenizer.from_pretrained(config.enc_dec_name) +# if config['tie_embeddings']: +# tokenizer_experts = tokenizer_enc_dec + +# if config['medium'] == 'avsd': +# test_dataset = AVSDDataSet(config, 'avsd', tokenizer_experts, tokenizer_enc_dec, 'test') +# test_dataloader = DataLoader( +# test_dataset, shuffle=False, batch_size=test_dataset.batch_size, collate_fn=test_dataset.collate_fn) +# return test_dataloader diff --git a/models/utils.py b/models/utils.py new file mode 100644 index 0000000..5799643 --- /dev/null +++ b/models/utils.py @@ -0,0 +1,266 @@ +import logging + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from scipy import interpolate +from typing import List + +logger = logging.getLogger(__name__) + + +class MLM: + def __init__( + self, + mask_token: int, + padding_token: int, + no_mask_tokens: List[int], + n_tokens: int, + masking_prob: float = 0.15, + randomize_prob: float = 0.1, + no_change_prob: float = 0.1 + ): + self.mask_token = mask_token + self.padding_token = padding_token + self.no_mask_tokens = list(set(no_mask_tokens + [padding_token, mask_token])) + self.n_tokens = n_tokens + self.masking_prob = masking_prob + self.randomize_prob = randomize_prob + self.no_change_prob = no_change_prob + + def __call__(self, x: torch.Tensor): + full_mask = torch.rand(x.shape, device=x.device) < self.masking_prob + for tok in self.no_mask_tokens: + full_mask &= x != tok # unmask unwanted tokens --> 0 + + unchanged_mask = full_mask & (torch.rand(x.shape, device=x.device) < self.no_change_prob) + random_token_mask = full_mask & (torch.rand(x.shape, device=x.device) < self.randomize_prob) + random_token_idx = torch.nonzero(random_token_mask, as_tuple=True) + random_tokens = torch.randint(0, self.n_tokens, (len(random_token_idx[0]),), device=x.device) + mask = full_mask & ~random_token_mask & ~unchanged_mask + + y = x.clone().detach() + x.masked_fill_(mask, self.mask_token) + x[random_token_idx] = random_tokens + y.masked_fill_(~full_mask, self.padding_token) + + return x, y + + + +def _init_transformer_weights(module, initializer_range=0.02): + """Initialize the weights. Copied from transformers ViT/Bert model init""" + if isinstance(module, (nn.Linear, nn.Conv2d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +def load_temp_embed_with_mismatch(temp_embed_old, temp_embed_new, add_zero=True): + """ + Add/Remove extra temporal_embeddings as needed. + https://arxiv.org/abs/2104.00650 shows adding zero paddings works. + + temp_embed_old: (1, num_frames_old, 1, d) + temp_embed_new: (1, num_frames_new, 1, d) + add_zero: bool, if True, add zero, else, interpolate trained embeddings. + """ + # TODO zero pad + num_frms_new = temp_embed_new.shape[1] + num_frms_old = temp_embed_old.shape[1] + logger.info(f"Load temporal_embeddings, lengths: {num_frms_old}-->{num_frms_new}") + if num_frms_new > num_frms_old: + if add_zero: + temp_embed_new[ + :, :num_frms_old + ] = temp_embed_old # untrained embeddings are zeros. + else: + temp_embed_new = interpolate_temporal_pos_embed(temp_embed_old, num_frms_new) + elif num_frms_new < num_frms_old: + temp_embed_new = temp_embed_old[:, :num_frms_new] + else: # = + temp_embed_new = temp_embed_old + return temp_embed_new + + +def interpolate_temporal_pos_embed(temp_embed_old, num_frames_new): + """ + temp_embed_old: (1, num_frames_old, 1, d) + Returns: + temp_embed_new: (1, num_frames_new, 1, d) + """ + temp_embed_old = temp_embed_old.squeeze(2).permute( + 0, 2, 1 + ) # (1, d, num_frames_old) + temp_embed_new = F.interpolate( + temp_embed_old, num_frames_new, mode="linear" + ) # (1, d, num_frames_new) + temp_embed_new = temp_embed_new.permute(0, 2, 1).unsqueeze( + 2 + ) # (1, num_frames_new, 1, d) + return temp_embed_new + + +def interpolate_pos_embed(pos_embed_old, pos_embed_new, num_patches_new): + """ + Args: + pos_embed_old: (1, L_old, d), pre-trained + pos_embed_new: (1, L_new, d), newly initialized, to be replaced by interpolated weights + num_patches_new: + """ + # interpolate position embedding + embedding_size = pos_embed_old.shape[-1] + num_extra_tokens = pos_embed_new.shape[-2] - num_patches_new + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_old.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches_new ** 0.5) + + if orig_size != new_size: + # class_token and dist_token are kept unchanged + # the extra tokens seems always at the beginning of the position embedding + extra_tokens = pos_embed_old[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_old[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape( + -1, orig_size, orig_size, embedding_size + ).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode="bicubic", align_corners=False + ) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + interpolated_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + logger.info(f"reshape position embedding from {orig_size}**2 to {new_size}**2") + return interpolated_pos_embed + else: + return pos_embed_old + + +def interpolate_pos_relative_bias_beit(state_dict_old, state_dict_new, patch_shape_new): + """ + Args: + state_dict_old: loaded state dict + state_dict_new: state dict for model with new image size + patch_shape_new: new model patch_shape + ref: https://github.com/microsoft/unilm/blob/master/beit/run_class_finetuning.py + """ + all_keys = list(state_dict_old.keys()) + for key in all_keys: + if "relative_position_index" in key: + state_dict_old.pop(key) + + if "relative_position_bias_table" in key: + rel_pos_bias = state_dict_old[key] + src_num_pos, num_attn_heads = rel_pos_bias.size() + dst_num_pos, _ = state_dict_new[key].size() + dst_patch_shape = patch_shape_new + if dst_patch_shape[0] != dst_patch_shape[1]: + raise NotImplementedError() + num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * ( + dst_patch_shape[1] * 2 - 1 + ) + src_size = int((src_num_pos - num_extra_tokens) ** 0.5) + dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5) + if src_size != dst_size: + # logger.info("Position interpolate for %s from %dx%d to %dx%d" % ( + # key, src_size, src_size, dst_size, dst_size)) + extra_tokens = rel_pos_bias[-num_extra_tokens:, :] + rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] + + def geometric_progression(a, r, n): + return a * (1.0 - r ** n) / (1.0 - r) + + left, right = 1.01, 1.5 + while right - left > 1e-6: + q = (left + right) / 2.0 + gp = geometric_progression(1, q, src_size // 2) + if gp > dst_size // 2: + right = q + else: + left = q + + # if q > 1.090307: + # q = 1.090307 + + dis = [] + cur = 1 + for i in range(src_size // 2): + dis.append(cur) + cur += q ** (i + 1) + + r_ids = [-_ for _ in reversed(dis)] + + x = r_ids + [0] + dis + y = r_ids + [0] + dis + + t = dst_size // 2.0 + dx = np.arange(-t, t + 0.1, 1.0) + dy = np.arange(-t, t + 0.1, 1.0) + + # logger.info("Original positions = %s" % str(x)) + # logger.info("Target positions = %s" % str(dx)) + + all_rel_pos_bias = [] + + for i in range(num_attn_heads): + z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy() + f = interpolate.interp2d(x, y, z, kind="cubic") + all_rel_pos_bias.append( + torch.Tensor(f(dx, dy)) + .contiguous() + .view(-1, 1) + .to(rel_pos_bias.device) + ) + + rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) + + new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) + state_dict_old[key] = new_rel_pos_bias + return state_dict_old + + +def tile(x, dim, n_tile): + init_dim = x.size(dim) + repeat_idx = [1] * x.dim() + repeat_idx[dim] = n_tile + x = x.repeat(*repeat_idx) + order_index = torch.LongTensor( + np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]) + ) + return torch.index_select(x, dim, order_index.to(x.device)) + + +def mask_logits(target, mask): + return target * mask + (1 - mask) * (-1e10) + + +class AllGather(torch.autograd.Function): + """An autograd function that performs allgather on a tensor.""" + + @staticmethod + def forward(ctx, tensor, args): + output = [torch.empty_like(tensor) for _ in range(args.world_size)] + torch.distributed.all_gather(output, tensor) + ctx.rank = args.rank + ctx.batch_size = tensor.shape[0] + return torch.cat(output, dim=0) + + @staticmethod + def backward(ctx, grad_output): + return ( + grad_output[ctx.batch_size * ctx.rank : ctx.batch_size * (ctx.rank + 1)], + None, + ) + + +allgather_wgrad = AllGather.apply diff --git a/models/v2dial.py b/models/v2dial.py new file mode 100644 index 0000000..be665c2 --- /dev/null +++ b/models/v2dial.py @@ -0,0 +1,2213 @@ +import json +import re +import glog as logging +import random +import os + +import torch +from torch.cuda.amp import autocast as autocast +import torch.nn as nn +import torch.nn.functional as F +import torch.distributed as dist +# from minigpt4.common.registry import registry +from .backbones.blip2 import Blip2Base, disabled_train +from transformers.models.t5.modeling_t5 import T5ForConditionalGeneration +from transformers.models.bart.modeling_bart import BartForConditionalGeneration +# from .backbones.encoder_decoder.xflan_t5 import T5ForConditionalGeneration +from .backbones.modeling_mistral import MistralForCausalLM +from .backbones.modeling_llama_v2 import LlamaForCausalLM +from .backbones.moes import MoELayer, Pooler +# from .backbones.moes_huggingface import MoEPooler +# from .backbones.moes_huggingface import MoELayer, MoEPooler +from .modules.temporal_modelling import SpatialAttention, TemporalAttention +from .common.dist_utils import concat_all_gather, all_gather_with_grad +from .utils import MLM +from utils.dist import is_main_process + +# from minigpt4.models.modeling_llama_v2 import LlamaForCausalLM as llm_model +# minigpt4.models.modeling_mistral import MistralForCausalLM as llm_model +# from minigpt4.conversation.conversation import Conversation, SeparatorStyle, StoppingCriteriaList, StoppingCriteriaSub + +from transformers import AutoTokenizer, DataCollatorForLanguageModeling +from transformers import BitsAndBytesConfig + +from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings + +from peft import ( + LoraConfig, + get_peft_model, + get_peft_model_state_dict, + prepare_model_for_kbit_training, + set_peft_model_state_dict, +) +import time +import numpy as np + +# from minigpt4.models import policies + +class V2DialAbstract(Blip2Base): + def __init__(self): + super(V2DialAbstract, self).__init__() + + def shift_right(self, input_ids): + decoder_start_token_id = self.llm.config.decoder_start_token_id + pad_token_id = self.llm.config.pad_token_id + + if decoder_start_token_id is None: + raise ValueError( + "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. " + "See T5 docs for more information." + ) + + + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() + shifted_input_ids[..., 0] = decoder_start_token_id + + if pad_token_id is None: + raise ValueError("self.model.config.pad_token_id has to be defined.") + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + def encode_vis(self, image, device, is_vid=True): + num_frames = image.size(1) + bs_pre_reshape = image.size(0) + if len(image.shape) > 4: + image = image.view(-1, *image.shape[-3:]) # for video input flatten the batch and time dimension (4,50,3,224,224) -> (200,3,224,224) + # with self.maybe_autocast(): # inherited from Blip2Base + image_embeds = self.ln_vision(self.visual_encoder(image)).to(device) # (200,3,224,224) -> (200,257,1408) + image_embeds = image_embeds[:,1:,:] # remove the first token (CLS) (200,256,1408) + + bs, pn, hs = image_embeds.shape + if self.vit_token_pooling: # concat the each 4 tokens into one token (200,64,5632) + image_embeds = image_embeds.view(bs, int(pn/4), int(hs*4)) # (200,64,5632) + + vis_embed = self.vit_proj(image_embeds) # project to LLM input size (200,64,5632) -> (200,64, d_hidden) + + # reshape the video features + vis_embed = vis_embed.view(bs_pre_reshape, num_frames, -1, vis_embed.size(-1)) + + # Perfrom spatial temporal attention + vis_embed_spatial = self.spatial_att(vis_embed) + vis_feat_len = vis_embed_spatial.size(1) + + if not self.config.embed_from_llm: + vis_embed_spatial = vis_embed_spatial + self.token_type_embedding(torch.zeros(bs_pre_reshape, vis_feat_len).long().to(device)) + vis_spatial_mask = torch.ones((bs_pre_reshape, vis_feat_len)).to(device) + + vis_embed_temporal, vis_temporal_mask = None, None + + if is_vid: + vis_embed_temporal = self.temporal_att(vis_embed) + if not self.config.embed_from_llm: + vis_embed_temporal = vis_embed_temporal + self.token_type_embedding(torch.ones(bs_pre_reshape, vis_feat_len).long().to(device)) + vis_temporal_mask = torch.ones((bs_pre_reshape, vis_feat_len)).to(device) + + return vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask + + def tokenize_text(self, text, device, add_bos=False, add_eos=False, max_len=None): + if max_len: + text_tokenized = self.tokenizer( + text, + return_tensors='pt', + padding='max_length', + max_length=max_len, + truncation=True, + add_special_tokens=False, + return_special_tokens_mask=True + ).to(device) + else: + text_tokenized = self.tokenizer( + text, + return_tensors='pt', + padding='longest', + add_special_tokens=False, + return_special_tokens_mask=True + ).to(device) + + text_ids = text_tokenized.input_ids + text_attention_mask = text_tokenized.attention_mask + + if add_bos: + bos_ids = torch.LongTensor(text_ids.size(0), 1).fill_(self.tokenizer.bos_token_id).to(device) + bos_att = torch.LongTensor(text_ids.size(0), 1).fill_(1).to(device) + + text_ids = torch.cat([bos_ids, text_ids], dim=1) + text_attention_mask = torch.cat([bos_att, text_attention_mask], dim=1) + + if add_eos: + eos_ids = torch.LongTensor(text_ids.size(0), 1).fill_(self.tokenizer.eos_token_id).to(device) + eos_att = torch.LongTensor(text_ids.size(0), 1).fill_(1).to(device) + + text_ids = torch.cat([text_ids, eos_ids], dim=1) + text_attention_mask = torch.cat([text_attention_mask, eos_att], dim=1) + + + return text_ids, text_attention_mask + + def get_extended_attention_mask(self, attention_mask=None): + if attention_mask.dim() == 2: + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + elif attention_mask.dim() == 3: + extended_attention_mask = attention_mask.unsqueeze(1) + else: + raise NotImplementedError + + return extended_attention_mask + + @staticmethod + def init_weights(module): + if isinstance(module, (nn.Linear, nn.Embedding)): + module.weight.data.normal_(mean=0.0, std=0.02) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + + +class V2DialBase(V2DialAbstract): + def __init__(self, config): + super(V2DialBase, self).__init__() + self.config = config + + ################## 1. Select Tokenizer -- We use BERT tokenizer ################## + bert_config = BertConfig.from_pretrained('bert-{}-uncased'.format(config.expert_size)) + + tokenizer = AutoTokenizer.from_pretrained('bert-{}-uncased'.format(config.expert_size)) + + text_embedding = BertEmbeddings(bert_config) + text_embedding.apply(self.init_weights) + + token_type_embedding = nn.Embedding(3, bert_config.hidden_size) # Number of modality types (temp/spa/text) + token_type_embedding.apply(self.init_weights) + + # Define the masking strategy + mlm_collactor = DataCollatorForLanguageModeling( + tokenizer, mlm=True, mlm_probability=config.masking_prob, return_tensors='pt') + + ################## 2. Select the backbone ViT ################## + logging.info('[INFO] Loading ViT in progress') + if config.freeze_vit: + # vit_precision = 'fp16' if config.fp16 else 'fp32' + logging.info(f'[INFO] ViT precision: {config.vit_precision}') + visual_encoder, ln_vision = self.init_vision_encoder( + config.vit_model, config.image_res, drop_path_rate=0, use_grad_checkpoint=False, precision=config.vit_precision + ) + for name, param in visual_encoder.named_parameters(): + param.requires_grad = False + visual_encoder = visual_encoder.eval() + visual_encoder.train = disabled_train + for name, param in ln_vision.named_parameters(): + param.requires_grad = False + ln_vision = ln_vision.eval() + ln_vision.train = disabled_train + logging.info('[INFO] ViT frozen') + + else: + vit_precision = 'fp32' + visual_encoder, ln_vision = self.init_vision_encoder( + config.vit_model, config.image_res, drop_path_rate=0, use_grad_checkpoint=False, vit_precision=vit_precision + ) + logging.info('[INFO] ViT hot') + logging.info('[INFO] ViT successfully loaded') + + ################## 3. Define the ViT-Expert communication Interface ################## + self.system_prompt = False + self.vit_token_pooling = config.vit_token_pooling + if self.vit_token_pooling: + vit_proj = nn.Linear( + 1408*4, bert_config.hidden_size + ) + else: + vit_proj = nn.Linear( + 1408, bert_config.hidden_size + ) + vit_proj.apply(self.init_weights) + + spatial_att = SpatialAttention(input_dim=bert_config.hidden_size) + temporal_att = TemporalAttention(input_dim=bert_config.hidden_size) + + spatial_att.apply(self.init_weights) + temporal_att.apply(self.init_weights) + + ################## 4. Define the Expert layers ################## + moe_layers = [] + + for moe_layer_idx in range(config.num_moe_layers): + if moe_layer_idx < self.config.num_moe_modality_layers: + expert_flag = 'modalities' + else: + expert_flag = 'fusion' + moe_layer = MoELayer( + bert_config.hidden_size, + bert_config.num_attention_heads, + expert_flag, + use_sep_spatial_temp_experts=config.use_sep_spatial_temp_experts + ) + moe_layer.apply(self.init_weights) + moe_layers.append(moe_layer) + + logging.info(f'[INFO] {moe_layer_idx+1}/{config.num_moe_layers} MoE layers successfully loaded') + + moe_layers = nn.ModuleList(moe_layers) + moe_norm = nn.LayerNorm(bert_config.hidden_size) + + ################## 5. Define the projection layers for contrastive learning ################## + temp_proj = nn.Linear(bert_config.hidden_size, config.joint_dim) + spatial_proj = nn.Linear(bert_config.hidden_size, config.joint_dim) + vision_proj = nn.Linear(bert_config.hidden_size, config.joint_dim) + cap_proj = nn.Linear(bert_config.hidden_size, config.joint_dim) + + temp_proj.apply(self.init_weights) + spatial_proj.apply(self.init_weights) + vision_proj.apply(self.init_weights) + cap_proj.apply(self.init_weights) + + ################## 6. Define the pooler for matching loss ################## + pooler = Pooler(bert_config.hidden_size) + pooler.apply(self.init_weights) + + ################## 5. Attach the matching heads ################## + stm_head = nn.Linear(bert_config.hidden_size, 2) + vcm_head = nn.Linear(bert_config.hidden_size, 2) + lm_head = nn.Linear(bert_config.hidden_size, len(tokenizer)) + + stm_head.apply(self.init_weights) + vcm_head.apply(self.init_weights) + lm_head.apply(self.init_weights) + + temp = nn.Parameter(0.07 * torch.ones([])) + # temp = 0.07 + + # Attach the components to self + self.tokenizer = tokenizer + self.mlm_collactor = mlm_collactor + self.text_embedding = text_embedding + self.token_type_embedding = token_type_embedding + self.visual_encoder = visual_encoder + self.ln_vision = ln_vision + self.vit_proj = vit_proj + self.moe_layers = moe_layers + self.moe_norm = moe_norm + self.spatial_att = spatial_att + self.temporal_att = temporal_att + self.temp_proj = temp_proj + self.spatial_proj = spatial_proj + self.vision_proj = vision_proj + self.cap_proj = cap_proj + self.pooler = pooler + self.stm_head = stm_head + self.vcm_head = vcm_head + self.lm_head = lm_head + self.temp = temp + + @staticmethod + def init_weights(module): + if isinstance(module, (nn.Linear, nn.Embedding)): + module.weight.data.normal_(mean=0.0, std=0.02) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + def build_query_embeds(self, num_query_tokens, dim_query_tokens): + query_embeds = nn.Parameter( + torch.zeros(1, num_query_tokens, dim_query_tokens) + ) + query_embeds.data.normal_(mean=0.0, std=0.02) + return query_embeds + + def encode_caption(self, cap): + cap_output = self.cap_expert( + input_ids=cap.input_ids, + attention_mask=cap.attention_mask, + return_dict=True, + ) + cap_embeds = cap_output.last_hidden_state + pooled_cap_embeds = cap_embeds[:, 0] + return cap_embeds, pooled_cap_embeds + + def encode_vis_old(self, vis, media_type): + # if media_type == 'webvid': + # bs, num_frames, c, h, w = vis.size() + # # reshape + # vis = vis.view(bs * num_frames, c, h, w) + vis_embed = self.beit(vis).last_hidden_state + # vis_embed = self.beit_layernorm(vis_output.last_hidden_state) + # remove cls token embedding + vis_embed = vis_embed[:, :, 1:, :] + vis_embed = self.beit_lin(vis_embed) + # perform spatial attention + vis_spatial_embed = self.spatial_att(vis_embed) + vis_temp_embed = self.tempotal_att(vis_embed) if media_type in ['webvid', 'msrvtt', 'champagne', 'avsd'] else None + + return vis_spatial_embed, vis_temp_embed + + def encode_queries(self, query_embeds, vis_embeds, vis_mode): + if vis_mode == 'spatial': + expert = self.spatial_expert + layer_norm = self.spatial_layernorm + elif vis_mode == 'temporal': + expert = self.temporal_expert + layer_norm = self.temporal_layernorm + else: + raise ValueError(f'[ERROR] {vis_mode} not implemented!') + + attention_mask = torch.ones( + query_embeds.size()[:-1], dtype=torch.long).to(vis_embeds.device) + + vis_attention_mask = torch.ones( + vis_embeds.size()[:-1], dtype=torch.long).to(vis_embeds.device) + + if self.config['expert_layer_type'] == 'bert': + + output_dict = expert( + encoder_embeds=query_embeds, + encoder_hidden_states=vis_embeds, + encoder_attention_mask=vis_attention_mask, + ) + query_embeds = layer_norm(output_dict.last_hidden_state) + pooled_query_embeds = output_dict.pooler_output + + elif self.config['expert_layer_type'] == 'bart': + output_dict = expert( + inputs_embeds=query_embeds, + attention_mask=attention_mask, + cross_embeds=vis_embeds, + cross_attention_mask=vis_attention_mask, + ) + + query_embeds = layer_norm(output_dict.last_hidden_state) + pooled_query_embeds = query_embeds[:, 0] + + return query_embeds, pooled_query_embeds + + # def encode_vis(self, image, device, is_vid=True): + # num_frames = image.size(1) + # bs_pre_reshape = image.size(0) + # if len(image.shape) > 4: + # image = image.view(-1, *image.shape[-3:]) # for video input flatten the batch and time dimension (4,50,3,224,224) -> (200,3,224,224) + # # with self.maybe_autocast(): # inherited from Blip2Base + # image_embeds = self.ln_vision(self.visual_encoder(image)).to(device) # (200,3,224,224) -> (200,257,1408) + # image_embeds = image_embeds[:,1:,:] # remove the first token (CLS) (200,256,1408) + + # bs, pn, hs = image_embeds.shape + # if self.vit_token_pooling: # concat the each 4 tokens into one token (200,64,5632) + # image_embeds = image_embeds.view(bs, int(pn/4), int(hs*4)) # (200,64,5632) + + # vis_embed = self.vit_proj(image_embeds) # project to llama input size (200,64,5632) -> (200,64,4096) + + # # reshape the video features + # vis_embed = vis_embed.view(bs_pre_reshape, num_frames, -1, vis_embed.size(-1)) + + + # # Perfrom spatial temporal attention + # vis_embed_spatial = self.spatial_att(vis_embed) + # vis_feat_len = vis_embed_spatial.size(1) + + # vis_embed_spatial = vis_embed_spatial + self.token_type_embedding(torch.zeros(bs_pre_reshape, vis_feat_len).long().to(device)) + # vis_spatial_mask = torch.ones((bs_pre_reshape, vis_feat_len)).to(device) + + # vis_embed_temporal, vis_temporal_mask = None, None + + # if is_vid: + # vis_embed_temporal = self.temporal_att(vis_embed) + self.token_type_embedding(torch.ones(bs_pre_reshape, vis_feat_len).long().to(device)) + # vis_temporal_mask = torch.ones((bs_pre_reshape, vis_feat_len)).to(device) + + # return vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask + + def encode_vis_with_seq_spa_temp_att(self, image, device, is_vid=True): + num_frames = image.size(1) + bs_pre_reshape = image.size(0) + if len(image.shape) > 4: + image = image.view(-1, *image.shape[-3:]) # for video input flatten the batch and time dimension (4,50,3,224,224) -> (200,3,224,224) + # with self.maybe_autocast(): # inherited from Blip2Base + image_embeds = self.ln_vision(self.visual_encoder(image)).to(device) # (200,3,224,224) -> (200,257,1408) + image_embeds = image_embeds[:,1:,:] # remove the first token (CLS) (200,256,1408) + + bs, pn, hs = image_embeds.shape + if self.vit_token_pooling: # concat the each 4 tokens into one token (200,64,5632) + image_embeds = image_embeds.view(bs, int(pn/4), int(hs*4)) # (200,64,5632) + + vis_embed = self.vit_proj(image_embeds) # project to llama input size (200,64,5632) -> (200,64,4096) + + # reshape the video features + vis_embed = vis_embed.view(bs_pre_reshape, num_frames, -1, vis_embed.size(-1)) + size_orig = vis_embed.size() + + # Perfrom spatial temporal attention + vis_embed = self.spatial_att(vis_embed) + if is_vid: + vis_embed = vis_embed.view(size_orig) + vis_embed = self.temporal_att(vis_embed) + + vis_feat_len = vis_embed.size(1) + + vis_embed = vis_embed + self.token_type_embedding(torch.zeros(bs_pre_reshape, vis_feat_len).long().to(device)) + vis_mask = torch.ones((bs_pre_reshape, vis_feat_len)).to(device) + + return vis_embed, vis_mask + + def tokenize_text(self, text, device, add_bos=False, add_eos=False, max_len=None): + if max_len: + text_tokenized = self.tokenizer( + text, + return_tensors='pt', + padding='max_length', + max_length=max_len, + truncation=True, + add_special_tokens=False, + return_special_tokens_mask=True + ).to(device) + else: + text_tokenized = self.tokenizer( + text, + return_tensors='pt', + padding='longest', + add_special_tokens=False, + return_special_tokens_mask=True + ).to(device) + + text_ids = text_tokenized.input_ids + text_attention_mask = text_tokenized.attention_mask + + if add_bos: + bos_ids = torch.LongTensor(text_ids.size(0), 1).fill_(self.tokenizer.bos_token_id).to(device) + bos_att = torch.LongTensor(text_ids.size(0), 1).fill_(1).to(device) + + text_ids = torch.cat([bos_ids, text_ids], dim=1) + text_attention_mask = torch.cat([bos_att, text_attention_mask], dim=1) + + if add_eos: + eos_ids = torch.LongTensor(text_ids.size(0), 1).fill_(self.tokenizer.eos_token_id).to(device) + eos_att = torch.LongTensor(text_ids.size(0), 1).fill_(1).to(device) + + text_ids = torch.cat([text_ids, eos_ids], dim=1) + text_attention_mask = torch.cat([text_attention_mask, eos_att], dim=1) + + + return text_ids, text_attention_mask + + def encode_text(self, text, max_len, device): + text_tokenized = self.tokenizer( + text, + return_tensors='pt', + padding='max_length', + max_length=max_len, + truncation=True, + add_special_tokens=False + ).to(device) + text_ids = text_tokenized.input_ids + text_embeds = self.embed(text_ids) + text_attention_mask = text_tokenized.attention_mask + return text_embeds, text_ids, text_attention_mask + + def encode_spatial_toks(self, batch_size, device): + # ['', '', '', '', ''] + + special_toks_ids = self.tokenizer( + '', + return_tensors='pt', + padding='longest', + truncation=True, + add_special_tokens=False + ).to(device) + + special_toks_embeds = self.embed(special_toks_ids.input_ids) + special_toks_embeds = special_toks_embeds.repeat(batch_size, 1, 1) + return special_toks_embeds + + def construt_input_embeds_stage_1(self, vis_embed, cap_embed, special_toks_embeds, cap_attention_mask, media_type, device): + batch_size = vis_embed.size(0) + embed_dim = vis_embed.size(-1) + vis_embed = vis_embed.view(batch_size, -1, embed_dim) + + input_embeds = [] + input_attention_mask = [] + special_toks_indices = { + '': 0, + '': 1, + '': 2, + } + # special_toks_embeds = + # for video: [spatial_featurres][temporal_featurres][caption_features] + # for image: [spatial_featurres][caption_features] + + input_embeds.append(special_toks_embeds[:, 0:3, :]) # + input_attention_mask.append(torch.ones(input_embeds[-1].size()[:-1], dtype=torch.long).to(device)) + + input_embeds.append(vis_embed.clone()) # [spatial_features] + input_attention_mask.append(torch.ones(input_embeds[-1].size()[:-1], dtype=torch.long).to(device)) + + if media_type == 'webvid': + # here we copy the original vis_embeds twice and will apply spatial and temporal attention later + input_embeds.append(special_toks_embeds[:, 3:4, :]) # + input_attention_mask.append(torch.ones(input_embeds[-1].size()[:-1], dtype=torch.long).to(device)) + special_toks_indices[''] = special_toks_indices[''] + input_embeds[-2].size(1) + 1 + + input_embeds.append(vis_embed.clone()) # [temporal_features] + input_attention_mask.append(torch.ones(input_embeds[-1].size()[:-1], dtype=torch.long).to(device)) + + + input_embeds.append(special_toks_embeds[:, 4:5, :]) # + input_attention_mask.append(torch.ones(input_embeds[-1].size()[:-1], dtype=torch.long).to(device)) + + if media_type == 'webvid': + special_toks_indices[''] = special_toks_indices[''] + input_embeds[-2].size(1) + 1 + elif media_type == 'cc3m': + special_toks_indices[''] = special_toks_indices[''] + input_embeds[-2].size(1) + 1 + + input_embeds.append(cap_embed) # [caption_features] + input_attention_mask.append(cap_attention_mask) + + input_embeds.append(special_toks_embeds[:, 6:7, :]) # + input_attention_mask.append(torch.ones(input_embeds[-1].size()[:-1], dtype=torch.long).to(device)) + special_toks_indices[''] = special_toks_indices[''] + input_embeds[-2].size(1) + 1 + + input_embeds = torch.cat(input_embeds, dim=1) + input_attention_mask = torch.cat(input_attention_mask, dim=1) + assert input_embeds.size()[:-1] == input_attention_mask.size() + + return input_embeds, input_attention_mask, special_toks_indices + + def construct_global_input(self, cap_ids, cap_attention_mask, vid_feat_len, media_type, device): + # for video: [spatial_featurres][temporal_features][caption_features] + # for image: [spatial_featurres][caption_features] + batch_size = cap_ids.size(0) + special_toks_indices = { + '': 0, + '': 1, + '': 2, + } + + ids = [self.added_vocab['']] + [self.added_vocab['']] + [self.added_vocab['']] + ids += vid_feat_len * [self.added_vocab['']] + if media_type == 'webvid': + ids += [self.added_vocab['']] + special_toks_indices[''] = len(ids) - 1 + ids += vid_feat_len * [self.added_vocab['']] + + ids += [self.added_vocab['']] + special_toks_indices[''] = len(ids) - 1 + ids += cap_ids.size(1) * [self.added_vocab['']] + + ids += [self.added_vocab['']] + special_toks_indices[''] = len(ids) - 1 + total_len = len(ids) + + ids = torch.tensor(ids, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) + + ids[:, special_toks_indices[''] + 1: special_toks_indices['']] = cap_ids + + mask = torch.ones((batch_size, total_len), device=device) + mask[:, special_toks_indices[''] + 1: special_toks_indices['']] = cap_attention_mask + + return ids, mask, special_toks_indices + + def compute_contrastive_loss(self, x, y_all, y, x_all): + sim_x2y = torch.mm(x, y_all.t()) # (bs, bs*ngpus) + sim_x2y = sim_x2y / self.temp + + sim_y2x = torch.mm(y, x_all.t()) # (bs, bs*ngpus) + sim_y2x = sim_y2x / self.temp + + rank = dist.get_rank() if self.config['distributed'] else 0 + + bs = x.size(0) + targets = torch.linspace(rank * bs, rank * bs + bs - 1, bs, dtype=int).to( + x.device + ) + loss_contrastive = ( + F.cross_entropy(sim_x2y, targets, label_smoothing=0.1) + + F.cross_entropy(sim_y2x, targets, label_smoothing=0.1) + ) / 2 + + return loss_contrastive, sim_x2y, sim_y2x + + def get_extended_attention_mask(self, attention_mask=None): + if attention_mask.dim() == 2: + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + elif attention_mask.dim() == 3: + extended_attention_mask = attention_mask.unsqueeze(1) + else: + raise NotImplementedError + + return extended_attention_mask + + def shared_forward( + self, + vis_spatial, vis_spatial_mask, vis_temporal, vis_temporal_mask, + cap_ids, cap_mask, is_vid, device): + + # is_vid = media_type == 'webvid' + # batch_size = len(cap) + vis_feat_len = vis_spatial.size(1) + input_embeds = [] + input_masks = [] + + input_embeds.append(vis_spatial) + input_masks.append(vis_spatial_mask) + + if is_vid: + input_embeds.append(vis_temporal) + input_masks.append(vis_temporal_mask) + + cap_embeds = self.text_embedding(cap_ids) + self.token_type_embedding(torch.ones_like(cap_ids).long().fill_(2)) + cap_feat_len = cap_embeds.size(1) + + input_embeds.append(cap_embeds) + input_masks.append(cap_mask) + + input_embeds = torch.cat(input_embeds, dim=1) + input_masks = torch.cat(input_masks, dim=1) + + # expand the mask + input_masks = self.get_extended_attention_mask(attention_mask=input_masks) + + # MoEs feed-forward + for moe_layer_idx, moe_layer in enumerate(self.moe_layers): + if moe_layer_idx < self.config.num_moe_modality_layers: + expert_flag = 'modalities' + else: + expert_flag = 'fusion' + + input_embeds = moe_layer(input_embeds, vis_feat_len, cap_feat_len, expert_flag, is_vid=is_vid, mask=input_masks) + + #TODO normalize the output () !!!!!! + input_embeds = self.moe_norm(input_embeds) + + # return the features + spatial_feats = input_embeds[:, :vis_feat_len] + temporal_feats = input_embeds[:, vis_feat_len:2*vis_feat_len] if is_vid else None + cap_feats = input_embeds[:, -cap_feat_len:] + cls_feats = self.pooler(cap_feats) + + moe_outputs = { + 'spatial_feats': spatial_feats, + 'temporal_feats': temporal_feats, + 'cap_feats': cap_feats, + 'cls_feats': cls_feats, + } + + return moe_outputs + + def shared_forward_no_sep_spatial_temporal_experts( + self, + vis, vis_mask, + cap_ids, cap_mask, is_vid, device): + + # is_vid = media_type == 'webvid' + # batch_size = len(cap) + vis_feat_len = vis.size(1) + input_embeds = [] + input_masks = [] + + input_embeds.append(vis) + input_masks.append(vis_mask) + + # if is_vid: + # input_embeds.append(vis_temporal) + # input_masks.append(vis_temporal_mask) + + cap_embeds = self.text_embedding(cap_ids) + self.token_type_embedding(torch.ones_like(cap_ids).long().fill_(2)) + cap_feat_len = cap_embeds.size(1) + + input_embeds.append(cap_embeds) + input_masks.append(cap_mask) + + input_embeds = torch.cat(input_embeds, dim=1) + input_masks = torch.cat(input_masks, dim=1) + + # expand the mask + input_masks = self.get_extended_attention_mask(attention_mask=input_masks) + + # MoEs feed-forward + for moe_layer_idx, moe_layer in enumerate(self.moe_layers): + if moe_layer_idx < self.config.num_moe_modality_layers: + expert_flag = 'modalities' + else: + expert_flag = 'fusion' + + input_embeds = moe_layer(input_embeds, vis_feat_len, cap_feat_len, expert_flag, is_vid=is_vid, mask=input_masks) + + #TODO normalize the output () !!!!!! + input_embeds = self.moe_norm(input_embeds) + + # return the features + vis_feats = input_embeds[:, :vis_feat_len] + cap_feats = input_embeds[:, -cap_feat_len:] + cls_feats = self.pooler(cap_feats) + + moe_outputs = { + 'vis_feats': vis_feats, + 'cap_feats': cap_feats, + 'cls_feats': cls_feats, + } + + return moe_outputs + + def vcm_iteration(self, vis, cap, neg_vis, is_vid, device): + # Prepare the vis data + # is_vid = media_type == 'webvid' + num_positive_samples = len(cap) // 2 + num_negative_samples = len(cap) - num_positive_samples + + vcm_labels = torch.cat([torch.ones(num_positive_samples), torch.zeros(num_negative_samples)]).to(device) + vcm_labels = vcm_labels[torch.randperm(vcm_labels.size(0))].long() + + # now get the mixed vis data + + vis_mixed = [p if vcm_labels[i] == 1 else n for i, (p, n) in enumerate(zip(vis, neg_vis))] + vis_mixed = torch.stack(vis_mixed, dim=0) + + cap_ids, cap_mask = self.tokenize_text(cap, device, max_len=self.config.max_cap_len) + + if self.config.use_sep_spatial_temp_experts: + vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask = self.encode_vis(vis_mixed, device, is_vid=is_vid) + moe_outputs = self.shared_forward( + vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask, cap_ids, cap_mask, is_vid, device) + else: + vis_embed, vis_mask = self.encode_vis_with_seq_spa_temp_att(vis, device, is_vid=is_vid) + moe_outputs = self.shared_forward_no_sep_spatial_temporal_experts( + vis_embed, vis_mask, cap_ids, cap_mask, is_vid, device) + + vcm_logits = self.vcm_head(moe_outputs['cls_feats']) + loss_vcm = F.cross_entropy(vcm_logits, vcm_labels) + return loss_vcm + + def stm_iteration(self, vis, cap, neg_vis, is_vid, device): + num_positive_samples = len(cap) // 2 + num_negative_samples = len(cap) - num_positive_samples + + stm_labels = torch.cat([torch.ones(num_positive_samples), torch.zeros(num_negative_samples)]).to(device) + stm_labels = stm_labels[torch.randperm(stm_labels.size(0))].long() + + vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask = self.encode_vis(vis, device, is_vid=is_vid) + neg_vis_embed_spatial, _ , neg_vis_embed_temporal, _ = self.encode_vis(neg_vis, device, is_vid=is_vid) + + # now get the mixed vis data + vis_embed_spatial_mixed = [] + vis_embed_temporal_mixed = [] + + for i, (pos_spatial, pos_temporal, neg_spatial, neg_temporal) in enumerate( + zip(vis_embed_spatial, vis_embed_temporal, S, neg_vis_embed_temporal)): + if stm_labels[i] == 1: + vis_embed_spatial_mixed.append(pos_spatial) + vis_embed_temporal_mixed.append(pos_temporal) + else: + # 50% negative spatial / 50% negative temporal + if torch.rand(1).item() < 0.5: + vis_embed_spatial_mixed.append(pos_spatial) + vis_embed_temporal_mixed.append(neg_temporal) + else: + vis_embed_spatial_mixed.append(neg_spatial) + vis_embed_temporal_mixed.append(pos_temporal) + + vis_embed_spatial_mixed = torch.stack(vis_embed_spatial_mixed, dim=0) + vis_embed_temporal_mixed = torch.stack(vis_embed_temporal_mixed, dim=0) + + cap_ids, cap_mask = self.tokenize_text(cap, device, max_len=self.config.max_cap_len) + + moe_outputs = self.shared_forward( + vis_embed_spatial_mixed, vis_spatial_mask, vis_embed_temporal_mixed, vis_temporal_mask, cap_ids, cap_mask, is_vid, device) + + stm_logits = self.vcm_head(moe_outputs['cls_feats']) + loss_stm = F.cross_entropy(stm_logits, stm_labels) + return loss_stm + + def mlm_iteration(self, vis, cap, is_vid, device): + if self.config.use_sep_spatial_temp_experts: + vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask = self.encode_vis(vis, device, is_vid=is_vid) + else: + vis_embed, vis_mask = self.encode_vis_with_seq_spa_temp_att(vis, device, is_vid=is_vid) + + cap_ids, cap_mask = self.tokenize_text(cap, device, max_len=self.config.max_cap_len) + cap_ids = cap_ids.tolist() + + # NOTE We make sure to mask some tokens here to avoid nan loss later + mlm_output = self.mlm_collactor(cap_ids) + cap_ids = mlm_output['input_ids'].to(device) + labels_cap = mlm_output['labels'].to(device) + + if self.config.use_sep_spatial_temp_experts: + moe_outputs = self.shared_forward( + vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask, cap_ids, cap_mask, is_vid, device) + else: + moe_outputs = self.shared_forward_no_sep_spatial_temporal_experts( + vis_embed, vis_mask, cap_ids, cap_mask, is_vid, device) + + mlm_logits = self.lm_head(moe_outputs['cap_feats']) + loss_mlm = F.cross_entropy(mlm_logits.view(-1, mlm_logits.size(-1)), labels_cap.view(-1)) + return loss_mlm + + def vcc_iteration(self, vis, cap, is_vid, device): + vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask = self.encode_vis(vis, device, is_vid=is_vid) + cap_ids, cap_mask = self.tokenize_text(cap, device, max_len=self.config.max_cap_len) + + if self.config.use_sep_spatial_temp_experts: + moe_outputs = self.shared_forward( + vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask, cap_ids, cap_mask, is_vid, device) + vis_feats = moe_outputs['spatial_feats'] + if is_vid: + vis_feats = torch.cat([moe_outputs['spatial_feats'], moe_outputs['temporal_feats']], dim=1) + else: + vis_embed, vis_mask = self.encode_vis_with_seq_spa_temp_att(vis, device, is_vid=is_vid) + moe_outputs = self.shared_forward_no_sep_spatial_temporal_experts( + vis_embed, vis_mask, cap_ids, cap_mask, is_vid, device) + vis_feats = moe_outputs['vis_feats'] + + cap_feats = F.normalize(self.cap_proj(moe_outputs['cls_feats']), dim=-1) + vis_feats = F.normalize(self.vision_proj(vis_feats), dim=-1) + + vis_feats_all = concat_all_gather(vis_feats) + cap_feats_all = concat_all_gather(cap_feats) + + sim_v2c = torch.matmul( + vis_feats.unsqueeze(1), cap_feats_all.unsqueeze(-1) + ).squeeze() + + sim_v2c, _ = sim_v2c.max(-1) + sim_v2c = sim_v2c / self.temp + + sim_c2v = torch.matmul( + cap_feats.unsqueeze(1).unsqueeze(1), vis_feats_all.permute(0, 2, 1) + ).squeeze() + + sim_c2v, _ = sim_c2v.max(-1) + sim_c2v = sim_c2v / self.temp + + rank = dist.get_rank() if self.config['distributed'] else 0 + + bs = vis_feats.size(0) + targets = torch.linspace(rank * bs, rank * bs + bs - 1, bs, dtype=int).to( + device + ) + loss_vcc = ( + F.cross_entropy(sim_v2c, targets, label_smoothing=0.1) + + F.cross_entropy(sim_c2v, targets, label_smoothing=0.1) + ) / 2 + return loss_vcc + + def stc_iteration(self, vis, cap, is_vid, device): + vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask = self.encode_vis(vis, device, is_vid=is_vid) + cap_ids, cap_mask = self.tokenize_text(cap, device, max_len=self.config.max_cap_len) + moe_outputs = self.shared_forward( + vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask, cap_ids, cap_mask, is_vid, device) + + spatial_feats = F.normalize(self.spatial_proj(moe_outputs['spatial_feats']), dim=-1) + temporal_feats = F.normalize(self.temp_proj(moe_outputs['temporal_feats']), dim=-1) + + spatial_feats_all = concat_all_gather(spatial_feats) + temporal_feats_all = concat_all_gather(temporal_feats) + + sim_s2t = torch.matmul( + spatial_feats.unsqueeze(1), temporal_feats_all + ) + + sim_s2t, _ = sim_s2t.max(-1) + sim_s2t, _ = sim_s2t.max(-1) + sim_s2t = sim_s2t / self.temp + + sim_t2s = torch.matmul( + temporal_feats.unsqueeze(1), spatial_feats_all + ) + + sim_t2s, _ = sim_t2s.max(-1) + sim_t2s, _ = sim_t2s.max(-1) + sim_t2s = sim_t2s / self.temp + + rank = dist.get_rank() if self.config['distributed'] else 0 + bs = vis.size(0) + targets = torch.linspace(rank * bs, rank * bs + bs - 1, bs, dtype=int).to( + device + ) + loss_stc = ( + F.cross_entropy(sim_s2t, targets, label_smoothing=0.1) + + F.cross_entropy(sim_t2s, targets, label_smoothing=0.1) + ) / 2 + return loss_stc + + + def forward(self, vis, cap, neg_vis, media_type): + device = vis.device + is_vid = media_type == 'webvid' + loss_stc = torch.tensor(0).to(device) + loss_stm = torch.tensor(0).to(device) + loss_vcc = torch.tensor(0).to(device) + loss_vcm = torch.tensor(0).to(device) + loss_mlm = torch.tensor(0).to(device) + + if self.config.loss_dict['vcm'] != 0: + loss_vcm = self.vcm_iteration(vis, cap, neg_vis, is_vid, device) + + if self.config.loss_dict['vcc'] != 0: + loss_vcc = self.vcc_iteration(vis, cap, is_vid, device) + + if self.config.loss_dict['stm'] != 0 and is_vid: + loss_stm = self.stm_iteration(vis, cap, neg_vis, is_vid, device) + + if self.config.loss_dict['stc'] != 0 and is_vid: + loss_stc = self.stc_iteration(vis, cap, is_vid, device) + + if self.config.loss_dict['mlm'] != 0: + loss_mlm = self.mlm_iteration(vis, cap, is_vid, device) + + return dict( + loss_stc = loss_stc * self.config.loss_dict['stc'], + loss_stm = loss_stm * self.config.loss_dict['stm'], + loss_vcc = loss_vcc * self.config.loss_dict['vcc'], + loss_vcm = loss_vcm * self.config.loss_dict['vcm'], + loss_mlm = loss_mlm * self.config.loss_dict['mlm'], + ) + + def forward__(self, vis, cap, neg_vis, media_type): + + device = vis.device + self.vcm_matching(vis, cap, neg_vis, media_type, device) + self.shared_forward(vis, cap, media_type, device) + + + # First init all losses to zeros + loss_stc = torch.tensor(0).to(device) + loss_stm = torch.tensor(0).to(device) + loss_vcc = torch.tensor(0).to(device) + loss_vcm = torch.tensor(0).to(device) + loss_mlm = torch.tensor(0).to(device) + + batch_size = len(cap) + # First get the visual features depending on the media type + vis_embed = self.encode_vis(vis) + neg_vis_embed = self.encode_vis(neg_vis) + + embed_dim = vis_embed.size(-1) + num_frames = vis.size(1) + # reshape the video features + vis_embed = vis_embed.view(batch_size, num_frames, -1, embed_dim) + neg_vis_embed = neg_vis_embed.view(batch_size, num_frames, -1, embed_dim) + + # Perfrom spatial temporal attention and reshape + vis_embed_spatial = self.spatial_att(vis_embed) + # vis_embed_spatial = vis_embed_spatial.view(batch_size, -1, embed_dim) + + neg_vis_embed_spatial = self.spatial_att(neg_vis_embed) + # neg_vis_embed_spatial = neg_vis_embed_spatial.view(batch_size, -1, embed_dim) + + if media_type == 'webvid': + vis_embed_temporal = self.temporal_att(vis_embed) + # vis_embed_temporal = vis_embed_temporal.view(batch_size, -1, embed_dim) + + neg_vis_embed_temporal = self.temporal_att(neg_vis_embed) + # neg_vis_embed_temporal = neg_vis_embed_temporal.view(batch_size, -1, embed_dim) + + spatial_feat_len = vis_embed_spatial.size(1) + + # construct the global input tensor --> use place holder for vis features + cap_ids, cap_attention_mask = self.tokenize_text(cap, device, max_len=self.config.max_cap_len) + input_ids, input_mask, special_toks_indices = self.construct_global_input(cap_ids, cap_attention_mask, spatial_feat_len, media_type, device) + + input_embeds = self.embed(input_ids) + + if media_type == 'webvid': + input_embeds[:, special_toks_indices[''] + 1: special_toks_indices[''], :] = vis_embed_spatial + input_embeds[:, special_toks_indices[''] + 1: special_toks_indices[''], :] = vis_embed_temporal + + elif media_type == 'cc3m': + input_embeds[:, special_toks_indices[''] + 1: special_toks_indices[''], :] = vis_embed_spatial + + # LLM --> MoEs + input_embeds = self.moe_llm_bottleneck(input_embeds) + input_embeds_orig = input_embeds.clone() + + neg_vis_embed_spatial = self.moe_llm_bottleneck(neg_vis_embed_spatial) + + if media_type == 'webvid': + neg_vis_embed_temporal = self.moe_llm_bottleneck(neg_vis_embed_temporal) + + for moe_layer_idx, moe_layer in enumerate(self.moe_layers): + if moe_layer_idx < self.config.num_moe_modality_layers: + expert_flag = 'modalities' + else: + expert_flag = 'fusion' + + input_embeds = moe_layer(input_embeds, special_toks_indices, expert_flag, mask=input_mask) + + #TODO normalize the output () !!!!!! + + #-------------------- Contrastive losses --------------------# + cap_proj_feats = F.normalize(self.cap_proj(input_embeds[:, special_toks_indices[''], :]), dim=-1) # (bs*gpus, H) + vis_proj_feats = F.normalize(self.vision_proj(input_embeds[:, special_toks_indices[''], :]), dim=-1) # (bs*gpus, H) + if media_type == 'webvid': + spatial_proj_feats = F.normalize(self.spatial_proj(input_embeds[:, special_toks_indices[''], :]), dim=-1) # (bs*gpus, H) + temp_proj_feats = F.normalize(self.temp_proj(input_embeds[:, special_toks_indices[''], :]), dim=-1) # (bs*gpus, H) + + if self.config.loss_dict['vcc'] != 0: + vis_proj_feats_all = concat_all_gather(vis_proj_feats) # (bs*gpus, H) + cap_proj_feats_all = concat_all_gather(cap_proj_feats) # (bs*gpus, H) + + loss_vcc, _, _ = self.compute_contrastive_loss(vis_proj_feats, cap_proj_feats_all, cap_proj_feats, vis_proj_feats_all) + + # 1- Spatial-Temporal + if media_type == 'webvid': + if self.config.loss_dict['stc'] != 0: + spatial_proj_feats_all = concat_all_gather(spatial_proj_feats) # (bs*gpus, H) + temp_proj_feats_all = concat_all_gather(temp_proj_feats) # (bs*gpus, H) + loss_stc, _, _ = self.compute_contrastive_loss(temp_proj_feats, spatial_proj_feats_all, spatial_proj_feats, temp_proj_feats_all) + + + #-------------------- Matching losses --------------------# + if self.config.loss_dict['vcm'] != 0: + # Negative caption with positive visual + neg_cap_ids, neg_cap_attention_mask, = self.tokenize_text(neg_cap, device, max_len=self.config.max_cap_len) + neg_cap_embed = self.moe_llm_bottleneck(self.embed(neg_cap_ids)) + input_embeds_neg_cap = input_embeds_orig.clone().detach() + input_embeds_neg_cap[:, special_toks_indices[''] + 1:special_toks_indices['']] = neg_cap_embed + input_mask_neg_cap = input_mask.clone().detach() + input_mask_neg_cap[:, special_toks_indices[''] + 1:special_toks_indices['']] = neg_cap_attention_mask + + # Negative visual with positive caption + input_embeds_neg_vis = input_embeds_orig.clone().detach() + input_mask_neg_vis = input_mask.clone().detach() + + # neg_vis_embed = self.encode_vis(neg_vis) + + # # reshape video features + # neg_vis_embed = neg_vis_embed.reshape(batch_size, num_frames, -1, embed_dim) + + # # Perfrom spatial temporal attention and reshape + # neg_vis_embed_spatial = self.spatial_att(neg_vis_embed) + # neg_vis_embed_spatial = neg_vis_embed_spatial.reshape(batch_size, -1, embed_dim) + if media_type == 'webvid': + # neg_vis_embed_temporal = self.temporal_att(neg_vis_embed) + # neg_vis_embed_temporal = neg_vis_embed_temporal.reshape(batch_size, -1, embed_dim) + + input_embeds_neg_vis[:, special_toks_indices[''] + 1: special_toks_indices[''], :] = neg_vis_embed_spatial + input_embeds_neg_vis[:, special_toks_indices[''] + 1: special_toks_indices[''], :] = neg_vis_embed_temporal + + elif media_type == 'cc3m': + # neg_vis_embed_spatial = self.moe_llm_bottleneck(neg_vis_embed_spatial) + input_embeds_neg_vis[:, special_toks_indices[''] + 1: special_toks_indices[''], :] = neg_vis_embed_spatial + + # Construct the input of VCM + final_input_embeds_vcm = torch.cat([input_embeds_orig, input_embeds_neg_cap, input_embeds_neg_vis], dim=0) + final_input_mask_vcm = torch.cat([input_mask, input_mask_neg_cap, input_mask_neg_vis], dim=0) + + for moe_layer_idx, moe_layer in enumerate(self.moe_layers): + if moe_layer_idx < self.config.num_moe_modality_layers: + expert_flag = 'modalities' + else: + expert_flag = 'fusion' + final_input_embeds_vcm = moe_layer(final_input_embeds_vcm, special_toks_indices, expert_flag, mask=final_input_mask_vcm) + + pooled_caption = self.caption_pooler(final_input_embeds_vcm, special_toks_indices['']) + pooled_vis = self.vis_pooler(final_input_embeds_vcm, special_toks_indices['']) + + vcm_feats = torch.mul(pooled_caption, pooled_vis) + vcm_logits = self.vcm_head(vcm_feats) + vcm_labels = torch.cat( + [torch.ones(batch_size, dtype=torch.long), torch.zeros(2 * batch_size, dtype=torch.long)], + dim=0, + ).to(device) + + # random permutation of the logits and labels --> make the task not trivial to learn + # perm_idx = torch.randperm(vcm_logits.size(0), device=device) + # perm_idx_extended = perm_idx.unsqueeze(-1).repeat(1, vcm_logits.size(-1)) + + # # Shuffle + # vcm_logits = vcm_logits.scatter(0, perm_idx_extended, vcm_logits) + # vcm_labels = vcm_labels.scatter(0, perm_idx, vcm_labels) + + # class_weight = torch.FloatTensor([1.0, 1.0/3]).to(device) + + loss_vcm = F.cross_entropy(vcm_logits, vcm_labels) # , weight=class_weight) + + if media_type == 'webvid': + if self.config.loss_dict['stm'] != 0: + # Negative spatial with positive temporal + input_embeds_neg_spatial = input_embeds_orig.clone().detach() + input_mask_neg_spatial = input_mask.clone().detach() + input_embeds_neg_spatial[:, special_toks_indices[''] + 1: special_toks_indices[''], :] = neg_vis_embed_spatial + + # Positive spatial with negative temporal + input_embeds_neg_temporal = input_embeds_orig.clone().detach() + input_mask_neg_temporal = input_mask.clone().detach() + input_embeds_neg_temporal[:, special_toks_indices[''] + 1: special_toks_indices[''], :] = neg_vis_embed_temporal + + # Construct the input of STM + final_input_embeds_stm = torch.cat([input_embeds_orig, input_embeds_neg_spatial, input_embeds_neg_temporal], dim=0) + final_input_mask_stm = torch.cat([input_mask, input_mask_neg_spatial, input_mask_neg_temporal], dim=0) + + for moe_layer_idx, moe_layer in enumerate(self.moe_layers): + if moe_layer_idx < self.config.num_moe_modality_layers: + expert_flag = 'modalities' + else: + expert_flag = 'fusion' + final_input_embeds_stm = moe_layer(final_input_embeds_stm, special_toks_indices, expert_flag, mask=final_input_mask_stm) + + pooled_spatial = self.spatial_pooler(final_input_embeds_stm, special_toks_indices['']) + pooled_temporal = self.temporal_pooler(final_input_embeds_stm, special_toks_indices['']) + + stm_feats = torch.mul(pooled_spatial, pooled_temporal) + stm_logits = self.stm_head(stm_feats) + stm_labels = torch.cat( + [torch.ones(batch_size, dtype=torch.long), torch.zeros(2 * batch_size, dtype=torch.long)], + dim=0, + ).to(device) + + # random permutation of the logits and labels --> make the task not trivial to learn + # perm_idx = torch.randperm(stm_logits.size(0), device=device) + # perm_idx_extended = perm_idx.unsqueeze(-1).repeat(1, stm_logits.size(-1)) + + # # Shuffle + # stm_logits = stm_logits.scatter(0, perm_idx_extended, stm_logits) + # stm_labels = stm_labels.scatter(0, perm_idx, stm_labels) + + # class_weight = torch.FloatTensor([1.0, 1.0/3]).to(device) + loss_stm = F.cross_entropy(stm_logits, stm_labels) # , weight=class_weight) + + if self.config.loss_dict['mlm'] != 0: + masked_cap_ids, labels = self.mlm(cap_ids.clone()) + masked_cap_embeds = self.moe_llm_bottleneck(self.embed(masked_cap_ids)) + # inject the masked embeddings instead of the original ones + # input_embeds_mlm[:, special_toks_indices['']+1 : special_toks_indices[''], :] = masked_cap_embeds + + for moe_layer_idx, moe_layer in enumerate(self.moe_layers): + if moe_layer_idx < self.config.num_moe_modality_layers: + expert_flag = 'modalities' + else: + expert_flag = 'fusion' + masked_cap_embeds = moe_layer(masked_cap_embeds, special_toks_indices, expert_flag, mask=cap_attention_mask, only_text=True) + + # extract the caption last hidden states + # masked_cap_embeds_last = input_embeds_mlm[:, special_toks_indices['']+1 : special_toks_indices[''], :] + lm_logits = self.lm_head(masked_cap_embeds) + loss_mlm = F.cross_entropy( + lm_logits.view(-1, len(self.tokenizer)), + labels.view(-1), + ignore_index=self.mlm.padding_token + ) + + return dict( + loss_stc = loss_stc * self.config.loss_dict['stc'], + loss_stm = loss_stm * self.config.loss_dict['stm'], + loss_vcc = loss_vcc * self.config.loss_dict['vcc'], + loss_vcm = loss_vcm * self.config.loss_dict['vcm'], + loss_mlm = loss_mlm * self.config.loss_dict['mlm'], + ) + + + def get_vis_enc_for_eval(self, vis, media_type): + # First get the visual features depending on the media type + vis_spatial_embed, vis_temporal_embed = self.encode_vis(vis, media_type) + + # Expand the query tokens + spatial_query_embeds = self.spatial_query_embeds.expand(vis_spatial_embed.size(0), -1, -1) + + # Run the spatial expert + spatial_query_embeds, pooled_spatial_query_embeds = self.encode_queries( + spatial_query_embeds, vis_spatial_embed, vis_mode='spatial') + + temporal_query_embeds = self.spatial_query_embeds.expand(vis_temporal_embed.size(0), -1, -1) + temporal_query_embeds, pooled_temporal_query_embeds = self.encode_queries( + temporal_query_embeds, vis_temporal_embed, vis_mode='temporal') + + vis_pooled = torch.cat((pooled_spatial_query_embeds, pooled_temporal_query_embeds), dim=1) + vis_embeds = torch.cat((spatial_query_embeds, temporal_query_embeds), dim=1) + + return vis_embeds, vis_pooled + + def get_expert_encoder(self, expert): + """get text encoder, used for text and cross-modal encoding""" + encoder = None + if expert == 'cap': + encoder = self.cap_expert + if expert == 'spatial': + encoder = self.spatial_expert + if expert == 'temporal': + encoder = self.temporal_expert + if expert == 'sap_att_grounding': + encoder = self.spa_temp_grounding_expert + if expert == 'vis_cap_grounding': + encoder = self.vis_cap_grounding_expert + assert encoder is not None + return encoder.bert if hasattr(encoder, "bert") else encoder + + + +class V2Dial(V2DialAbstract): + def __init__(self, config): + super(V2Dial, self).__init__() + self.config = config + + ################## 1. Select Tokenizer -- We use BERT tokenizer ################## + bert_config = BertConfig.from_pretrained('bert-{}-uncased'.format(config.expert_size)) + tokenizer = AutoTokenizer.from_pretrained('bert-{}-uncased'.format(config.expert_size)) + + text_embedding = BertEmbeddings(bert_config) + text_embedding.apply(self.init_weights) + + token_type_embedding = nn.Embedding(3, bert_config.hidden_size) # Number of modalities (temp/spa/cap/hist-ques-ans) + token_type_embedding.apply(self.init_weights) + + ################## 1. Select LLM -- We use BERT tokenizer ################## + if config.llm_family == 'llama': + logging.info('[INFO] LLM: LLAMA v2') + llm_model = LlamaForCausalLM + + elif config.llm_family == 'mistral': + logging.info('[INFO] LLM: Mistral') + llm_model = MistralForCausalLM + + elif config.llm_family == 'flan_t5': + logging.info('[INFO] LLM: Flan T5') + llm_model = T5ForConditionalGeneration + + elif config.llm_family == 'bart': + logging.info('[INFO] LLM: BART') + llm_model = BartForConditionalGeneration + else: + raise ValueError + + + llm_tokenizer = AutoTokenizer.from_pretrained( + config.llm_name, + use_fast=False, + token='your_token' + ) + # set the padding token to eos token for llama + if config.llm_family == 'llama': + llm_tokenizer.pad_token = llm_tokenizer.eos_token + + #________________________________ LLM Quantization ________________________________# + if config.llm_family in ['mistral', 'llama']: + dtype=None + quantization_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type='nf4', + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=torch.bfloat16 + ) + else: + if config.fp16: + dtype = torch.float16 + if config.llm_family == 'flan_t5': + dtype = torch.bfloat16 + else: + dtype = torch.float32 + quantization_config = None + + # llm_model.generate() + llm = llm_model.from_pretrained( + config.llm_name, + token='your_token', + torch_dtype=dtype, + quantization_config=quantization_config + ) + + if config.llm_family == 'llama': + llm_embed = llm.model.embed_tokens + elif config.llm_family == 'flan_t5': + llm_embed = llm.shared + elif config.llm_family == 'mistral': + llm_embed = llm.model.embed_tokens + elif config.llm_family == 'bart': + llm_embed = llm.model.shared + else: + raise ValueError + + # llm.resize_token_embeddings(len(self.tokenizer)) + if quantization_config is not None: + # Gradient checkpointing is not compatible with DDP!! + llm = prepare_model_for_kbit_training(llm, use_gradient_checkpointing=True) + + + if config.freeze_llm: + for _, param in llm.named_parameters(): + param.requires_grad = False + logging.info('[INFO] LLM frozen') + else: + if config.use_lora_llm: + # load the lora config + with open(config.lora_config, 'r') as f: + lora_config = json.load(f) + + if config.llm_family in ['llama', 'mistral']: + lora_config['target_modules'] = ['q_proj', 'v_proj'] + + elif config.llm_family in ['flan_t5']: + lora_config['target_modules'] = ['q', 'v'] + + lora_config = LoraConfig(**lora_config) + llm = get_peft_model(llm, lora_config) + + logging.info('[INFO] LLM hot with lora') + else: + logging.info('[INFO] LLM hot') + + logging.info('[INFO] LLM successfully loaded') + + for _, param in llm_embed.named_parameters(): + param.data = param.data.float() + param.requires_grad = True + + llm_to_moe = nn.Linear(llm.config.hidden_size, bert_config.hidden_size) + llm_to_moe.apply(self.init_weights) + + moe_to_llm = nn.Linear(bert_config.hidden_size, llm.config.hidden_size) + moe_to_llm.apply(self.init_weights) + + ################## 2. Select the backbone ViT ################## + logging.info('[INFO] Loading ViT in progress') + if config.freeze_vit: + # vit_precision = 'fp16' if config.fp16 else 'fp32' + logging.info(f'[INFO] ViT precision: {config.vit_precision}') + visual_encoder, ln_vision = self.init_vision_encoder( + config.vit_model, config.image_res, drop_path_rate=0, use_grad_checkpoint=False, precision=config.vit_precision + ) + for name, param in visual_encoder.named_parameters(): + param.requires_grad = False + visual_encoder = visual_encoder.eval() + visual_encoder.train = disabled_train + for name, param in ln_vision.named_parameters(): + param.requires_grad = False + ln_vision = ln_vision.eval() + ln_vision.train = disabled_train + logging.info('[INFO] ViT frozen') + + else: + vit_precision = 'fp32' + visual_encoder, ln_vision = self.init_vision_encoder( + config.vit_model, config.image_res, drop_path_rate=0, use_grad_checkpoint=False, vit_precision=vit_precision + ) + logging.info('[INFO] ViT hot') + logging.info('[INFO] ViT successfully loaded') + + ################## 3. Define the ViT-Expert communication Interface ################## + self.system_prompt = False + self.vit_token_pooling = config.vit_token_pooling + if self.vit_token_pooling: + vit_proj = nn.Linear( + 1408*4, bert_config.hidden_size + ) + else: + vit_proj = nn.Linear( + 1408, bert_config.hidden_size + ) + vit_proj.apply(self.init_weights) + + spatial_att = SpatialAttention(input_dim=bert_config.hidden_size) + temporal_att = TemporalAttention(input_dim=bert_config.hidden_size) + + spatial_att.apply(self.init_weights) + temporal_att.apply(self.init_weights) + + ################## 4. Define the Expert layers ################## + moe_layers = None + moe_norm = None + if config.use_moes: + moe_layers = [] + + for moe_layer_idx in range(config.num_moe_layers): + if moe_layer_idx < self.config.num_moe_modality_layers: + expert_flag = 'modalities' + else: + expert_flag = 'fusion' + moe_layer = MoELayer( + bert_config.hidden_size, + bert_config.num_attention_heads, + expert_flag, + has_hist=True, + use_sep_spatial_temp_experts=config.use_sep_spatial_temp_experts + ) + + moe_layer.apply(self.init_weights) + moe_layers.append(moe_layer) + + logging.info(f'[INFO] {moe_layer_idx+1}/{config.num_moe_layers} MoE layers successfully loaded') + + moe_layers = nn.ModuleList(moe_layers) + moe_norm = nn.LayerNorm(bert_config.hidden_size) + + ################## 5. Define the projection layers for contrastive learning ################## + # temp_proj = nn.Linear(bert_config.hidden_size, config.joint_dim) + # spatial_proj = nn.Linear(bert_config.hidden_size, config.joint_dim) + # vision_proj = nn.Linear(bert_config.hidden_size, config.joint_dim) + # cap_proj = nn.Linear(bert_config.hidden_size, config.joint_dim) + + # temp_proj.apply(self.init_weights) + # spatial_proj.apply(self.init_weights) + # vision_proj.apply(self.init_weights) + # cap_proj.apply(self.init_weights) + + ################## 6. Define the pooler for matching loss ################## + # pooler = Pooler(bert_config.hidden_size) + # pooler.apply(self.init_weights) + + ################## 5. Attach the matching heads ################## + # stm_head = nn.Linear(bert_config.hidden_size, 2) + # vcm_head = nn.Linear(bert_config.hidden_size, 2) + # lm_head = nn.Linear(bert_config.hidden_size, len(tokenizer)) + + # stm_head.apply(self.init_weights) + # vcm_head.apply(self.init_weights) + # lm_head.apply(self.init_weights) + + temp = nn.Parameter(0.07 * torch.ones([])) + # temp = 0.07 + + # Attach the components to self + if self.config.embed_from_llm: + self.tokenizer = llm_tokenizer + self.text_embedding = llm_embed + else: + self.tokenizer = tokenizer + self.text_embedding = text_embedding + self.token_type_embedding = token_type_embedding + + self.llm = llm + self.llm_to_moe = llm_to_moe + self.moe_to_llm = moe_to_llm + self.visual_encoder = visual_encoder + self.ln_vision = ln_vision + self.vit_proj = vit_proj + self.moe_layers = moe_layers + self.moe_norm = moe_norm + self.spatial_att = spatial_att + self.temporal_att = temporal_att + # self.temp_proj = temp_proj + # self.spatial_proj = spatial_proj + # self.vision_proj = vision_proj + # self.cap_proj = cap_proj + # self.pooler = pooler + # self.stm_head = stm_head + # self.vcm_head = vcm_head + # self.lm_head = lm_head + self.temp = temp + + def construct_global_input(self, cap_ids, cap_attention_mask, hist_ids, hist_attention_mask, vid_feat_len, device): + # for video: [spatial_feats][temp_feats][cap_feats][hist_feats] + + batch_size = cap_ids.size(0) + special_toks_indices = { + '': 0, + '': 1, + '': 2, + } + + ids = [self.added_vocab['']] + [self.added_vocab['']] + [self.added_vocab['']] + ids += vid_feat_len * [self.added_vocab['']] + + ids += [self.added_vocab['']] + special_toks_indices[''] = len(ids) - 1 + ids += vid_feat_len * [self.added_vocab['']] + + ids += [self.added_vocab['']] + special_toks_indices[''] = len(ids) - 1 + ids += cap_ids.size(1) * [self.added_vocab['']] + + ids += [self.added_vocab['']] + special_toks_indices[''] = len(ids) - 1 + ids += hist_ids.size(1) * [self.added_vocab['']] + + ids += [self.added_vocab['']] + special_toks_indices[''] = len(ids) - 1 + total_len = len(ids) + + ids = torch.tensor(ids, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) + + ids[:, special_toks_indices[''] + 1: special_toks_indices['']] = cap_ids + ids[:, special_toks_indices[''] + 1: special_toks_indices['']] = hist_ids + + + mask = torch.ones((batch_size, total_len), device=device) + mask[:, special_toks_indices[''] + 1: special_toks_indices['']] = cap_attention_mask + mask[:, special_toks_indices[''] + 1: special_toks_indices['']] = hist_attention_mask + + return ids, mask, special_toks_indices + + def construct_reg_labels(self, regress_ids, start_regress_idx, full_embeds, device): + + full_labels = torch.LongTensor(full_embeds.size(0), full_embeds.size(1)).fill_(-100).to(device) + + for i in range(regress_ids.size(0)): + + full_labels[i, start_regress_idx[i]: start_regress_idx[i] + regress_ids[i].size(-1)] = regress_ids[i] + # Add to the labels -- just before the response starts + full_labels[i, start_regress_idx[i] - 1] = self.tokenizer.eos_token_id + + # labels = regress_ids.masked_fill( + # regress_ids == self.tokenizer.pad_token_id, -100 + # ).to(device) + + # eos_from_cond = torch.LongTensor(labels.size(0), 1).fill_(self.tokenizer.eos_token_id).to(device) + # labels = torch.concat([eos_from_cond, labels], dim=1) + + # full_labels = torch.LongTensor(labels.size(0), full_len).fill_(-100).to(device) + + # full_labels[:, len_cond-1:] = labels + + return full_labels + + def rearrange_llm_input_decoder_only(self, input_embeds, output_emebds, input_mask, cap_mask, hist_mask, output_mask, spatial_feat_len): + ''' + Push all pads to the right + ''' + # full_embeds = [...][...][...][pad][...][pad][ans ...][pad] + # ------------> [...][...][...][...][ans ...][-----pad-----] + + init_len = input_embeds.size(1) + output_emebds.size(1) + + # First, we compute the initial offset of the visual features + offset = 3 + spatial_feat_len + 1 + spatial_feat_len # --> input_embeds[offset] = h_ + + offset_embeds = input_embeds[:, :offset, :] + offset_mask = input_mask[:, :offset] + + rest_input_embdes = input_embeds[:, offset:, :] + rest_input_mask = input_mask[:, offset:] + + start_output_idx = [] + full_embeds = [] + full_masks = [] + + for i in range(input_embeds.size(0)): + output_emebd_i = output_emebds[i] + output_mask_i = output_mask[i] + + cap_mask_i = cap_mask[i] + len_cap_i = cap_mask_i.sum() + end_cap_i = len_cap_i + 1 # +1 for the token + + cap_embdes_i_to_keep = rest_input_embdes[i, :end_cap_i, :] + cap_mask_i_to_keep = rest_input_mask[i, :end_cap_i,] + cap_embeds_i_to_push = rest_input_embdes[i, end_cap_i:cap_mask_i.size(-1) + 1, :] # +1 for the token + cap_mask_i_to_push = rest_input_mask[i, end_cap_i: cap_mask_i.size(-1) + 1] # +1 for the token + + hist_mask_i = hist_mask[i] + len_hist_i = hist_mask_i.sum() + start_hist_i = cap_mask_i.size(-1) + 1 + end_hist_i = start_hist_i + len_hist_i + 1 # +1 for token + + # fianl token to keep is which is the last in input_embdes/rest_input_embdes + final_tok_embedding_i = rest_input_embdes[i, -1, :].unsqueeze(0) + final_tok_mask_i = rest_input_mask[i, -1].unsqueeze(0) + + hist_embdes_i_to_keep = rest_input_embdes[i, start_hist_i:end_hist_i, :] + hist_mask_i_to_keep = rest_input_mask[i, start_hist_i:end_hist_i] + + # these two do not consider the last token --> we don't need to extra remove it from them + hist_embdes_i_to_push = rest_input_embdes[i, end_hist_i: cap_mask_i.size(-1) + 1 + hist_mask_i.size(-1) + 1, :] + hist_mask_i_to_push = rest_input_mask[i, end_hist_i: cap_mask_i.size(-1) + 1 + hist_mask_i.size(-1) + 1] + + full_embed_i = torch.cat( + [cap_embdes_i_to_keep, hist_embdes_i_to_keep, final_tok_embedding_i, output_emebd_i, cap_embeds_i_to_push, hist_embdes_i_to_push], + dim=0 + ) + + full_mask_i = torch.cat( + [cap_mask_i_to_keep, hist_mask_i_to_keep, final_tok_mask_i, output_mask_i, cap_mask_i_to_push, hist_mask_i_to_push], + dim=0 + ) + + start_output_idx.append(offset + cap_embdes_i_to_keep.size(0) + hist_embdes_i_to_keep.size(0) + 1 - 1) + + full_embeds.append(full_embed_i) + full_masks.append(full_mask_i) + + # Now stack to get the batch + full_embeds = torch.stack(full_embeds, dim=0) + full_masks = torch.stack(full_masks, dim=0) + + # Add the offset visual features + full_embeds = torch.cat([offset_embeds, full_embeds], dim=1) + full_masks = torch.cat([offset_mask, full_masks], dim=1) + + final_len = full_embeds.size(1) + + # Sanity check + assert init_len == final_len, 'The reconstructed embeds have length ({}) which is not the same as the length of initial embeds ({})'.format( + final_len, init_len + ) + + return full_embeds, full_masks, start_output_idx + + def pad_to_right_enc_dec(self, cap_embeds, cap_masks, hist_embeds, hist_masks, device): + """ + pushes all in-between pad tokens to the right + """ + res_embeds = [] + res_mask = [] + for cap_embed, cap_mask, hist_embed, hist_mask in zip(cap_embeds, cap_masks, hist_embeds, hist_masks): + len_cap = sum(cap_mask) + len_hist = sum(hist_mask) + + batch_embed = torch.cat([cap_embed[:len_cap], hist_embed[:len_hist], cap_embed[len_cap:], hist_embed[len_hist:]], dim=0) + batch_mask = torch.zeros(batch_embed.size(0)).long().to(device) + batch_mask[:len_cap+len_hist] = 1 + + res_embeds.append(batch_embed) + res_mask.append(batch_mask) + + res_embeds = torch.stack(res_embeds, dim=0) + res_mask = torch.stack(res_mask, dim=0) + + return res_embeds, res_mask + + def pad_to_right_dec_only(self, cap_embeds, cap_masks, hist_embeds, hist_masks, regress_embeds, regress_masks, device): + """ + pushes all in-between pad tokens to the right + """ + res_embeds = [] + res_mask = [] + regress_limits_txt_input = [] + for cap_embed, cap_mask, hist_embed, hist_mask, regress_emebd, regress_mask in zip( + cap_embeds, cap_masks, hist_embeds, hist_masks, regress_embeds, regress_masks): + + len_cap = sum(cap_mask) + len_hist = sum(hist_mask) + len_ans = sum(regress_mask) + regress_limits_txt_input.append((len_cap+len_hist, len_cap+len_hist+len_ans)) + + batch_embed = torch.cat([cap_embed[:len_cap], hist_embed[:len_hist], regress_emebd, cap_embed[len_cap:], hist_embed[len_hist:]], dim=0) + batch_mask = torch.zeros(batch_embed.size(0)).long().to(device) + batch_mask[:len_cap+len_hist+len_ans] = 1 + + res_embeds.append(batch_embed) + res_mask.append(batch_mask) + + res_embeds = torch.stack(res_embeds, dim=0) + res_mask = torch.stack(res_mask, dim=0) + + return res_embeds, res_mask, regress_limits_txt_input + + def pad_to_right_dec_only_gen_mode(self, cap_embeds, cap_masks, hist_embeds, hist_masks, device): + """ + pushes all in-between pad tokens to the right + """ + res_embeds = [] + res_mask = [] + for cap_embed, cap_mask, hist_embed, hist_mask in zip(cap_embeds, cap_masks, hist_embeds, hist_masks): + + len_cap = sum(cap_mask) + len_hist = sum(hist_mask) + + batch_embed = torch.cat([cap_embed[:len_cap], hist_embed[:len_hist], cap_embed[len_cap:], hist_embed[len_hist:]], dim=0) + batch_mask = torch.zeros(batch_embed.size(0)).long().to(device) + batch_mask[:len_cap+len_hist] = 1 + + res_embeds.append(batch_embed) + res_mask.append(batch_mask) + + res_embeds = torch.stack(res_embeds, dim=0) + res_mask = torch.stack(res_mask, dim=0) + + return res_embeds, res_mask + + def encode_vis_with_seq_spa_temp_att(self, image, device, is_vid=True): + num_frames = image.size(1) + bs_pre_reshape = image.size(0) + if len(image.shape) > 4: + image = image.view(-1, *image.shape[-3:]) # for video input flatten the batch and time dimension (4,50,3,224,224) -> (200,3,224,224) + # with self.maybe_autocast(): # inherited from Blip2Base + image_embeds = self.ln_vision(self.visual_encoder(image)).to(device) # (200,3,224,224) -> (200,257,1408) + image_embeds = image_embeds[:,1:,:] # remove the first token (CLS) (200,256,1408) + + bs, pn, hs = image_embeds.shape + if self.vit_token_pooling: # concat the each 4 tokens into one token (200,64,5632) + image_embeds = image_embeds.view(bs, int(pn/4), int(hs*4)) # (200,64,5632) + + vis_embed = self.vit_proj(image_embeds) # project to llama input size (200,64,5632) -> (200,64,4096) + + # reshape the video features + vis_embed = vis_embed.view(bs_pre_reshape, num_frames, -1, vis_embed.size(-1)) + size_orig = vis_embed.size() + + # Perfrom spatial temporal attention + vis_embed = self.spatial_att(vis_embed) + if is_vid: + vis_embed = vis_embed.view(size_orig) + vis_embed = self.temporal_att(vis_embed) + + vis_feat_len = vis_embed.size(1) + + # vis_embed = vis_embed + self.token_type_embedding(torch.zeros(bs_pre_reshape, vis_feat_len).long().to(device)) + vis_mask = torch.ones((bs_pre_reshape, vis_feat_len)).to(device) + + return vis_embed, vis_mask + + def moe_forward_no_sep_spatial_temporal( + self, + vis, vis_mask, + cap_ids, cap_mask, hist_ids, hist_mask, + is_vid, device): + + # is_vid = media_type == 'webvid' + # batch_size = len(cap) + vis_feat_len = vis.size(1) + input_embeds = [] + input_masks = [] + + input_embeds.append(vis) + input_masks.append(vis_mask) + + # if is_vid: + # input_embeds.append(vis_temporal) + # input_masks.append(vis_temporal_mask) + + if self.config.embed_from_llm: + cap_embeds = self.llm_to_moe(self.text_embedding(cap_ids)) + else: + cap_embeds = self.text_embedding(cap_ids) + self.token_type_embedding(torch.ones_like(cap_ids).long().fill_(2)) + + cap_feat_len = cap_embeds.size(1) + + input_embeds.append(cap_embeds) + input_masks.append(cap_mask) + + if self.config.embed_from_llm: + hist_embeds = self.llm_to_moe(self.text_embedding(hist_ids)) + else: + hist_embeds = self.text_embedding(hist_ids) + self.token_type_embedding(torch.ones_like(hist_ids).long().fill_(2)) + + hist_feat_len = hist_embeds.size(1) + + input_embeds.append(hist_embeds) + input_masks.append(hist_mask) + + input_embeds = torch.cat(input_embeds, dim=1) + input_masks = torch.cat(input_masks, dim=1) + + # expand the mask + input_masks = self.get_extended_attention_mask(attention_mask=input_masks) + + # MoEs feed-forward + for moe_layer_idx, moe_layer in enumerate(self.moe_layers): + if moe_layer_idx < self.config.num_moe_modality_layers: + expert_flag = 'modalities' + else: + expert_flag = 'fusion' + + input_embeds = moe_layer(input_embeds, vis_feat_len, cap_feat_len, expert_flag, hist_feat_len, is_vid=is_vid, mask=input_masks) + + #TODO normalize the output () !!!!!! + input_embeds = self.moe_norm(input_embeds) + + # return the features + vis_embeds = input_embeds[:, :vis_feat_len] + # temporal_embeds = input_embeds[:, vis_feat_len:2*vis_feat_len] if is_vid else None + cap_embeds = input_embeds[:, -(cap_feat_len + hist_feat_len): -hist_feat_len] + hist_embeds = input_embeds[:, -hist_feat_len:] + # cls_feats = self.pooler(cap_feats) + + moe_outputs = { + 'vis_embeds': vis_embeds, + # 'temporal_embeds': temporal_embeds, + 'cap_embeds': cap_embeds, + 'hist_embeds': hist_embeds, + # 'cls_feats': cls_feats, + # 'last_hidden': input_embeds + } + + return moe_outputs + + def moe_forward( + self, + vis_spatial, vis_spatial_mask, vis_temporal, vis_temporal_mask, + cap_ids, cap_mask, hist_ids, hist_mask, + is_vid, device): + + # is_vid = media_type == 'webvid' + # batch_size = len(cap) + vis_feat_len = vis_spatial.size(1) + input_embeds = [] + input_masks = [] + + input_embeds.append(vis_spatial) + input_masks.append(vis_spatial_mask) + + if is_vid: + input_embeds.append(vis_temporal) + input_masks.append(vis_temporal_mask) + + if self.config.embed_from_llm: + cap_embeds = self.llm_to_moe(self.text_embedding(cap_ids)) + else: + cap_embeds = self.text_embedding(cap_ids) + self.token_type_embedding(torch.ones_like(cap_ids).long().fill_(2)) + + cap_feat_len = cap_embeds.size(1) + + input_embeds.append(cap_embeds) + input_masks.append(cap_mask) + + if self.config.embed_from_llm: + hist_embeds = self.llm_to_moe(self.text_embedding(hist_ids)) + else: + hist_embeds = self.text_embedding(hist_ids) + self.token_type_embedding(torch.ones_like(hist_ids).long().fill_(2)) + + hist_feat_len = hist_embeds.size(1) + + input_embeds.append(hist_embeds) + input_masks.append(hist_mask) + + input_embeds = torch.cat(input_embeds, dim=1) + input_masks = torch.cat(input_masks, dim=1) + + # expand the mask + input_masks = self.get_extended_attention_mask(attention_mask=input_masks) + + # MoEs feed-forward + for moe_layer_idx, moe_layer in enumerate(self.moe_layers): + if moe_layer_idx < self.config.num_moe_modality_layers: + expert_flag = 'modalities' + else: + expert_flag = 'fusion' + + input_embeds = moe_layer( + input_embeds, vis_feat_len, cap_feat_len, expert_flag, hist_feat_len, + is_vid=is_vid, + mask=input_masks, + expert_permutation=self.config.expert_permutation + ) + + #TODO normalize the output () !!!!!! + input_embeds = self.moe_norm(input_embeds) + + # return the features + spatial_embeds = input_embeds[:, :vis_feat_len] + temporal_embeds = input_embeds[:, vis_feat_len:2*vis_feat_len] if is_vid else None + cap_embeds = input_embeds[:, -(cap_feat_len + hist_feat_len): -hist_feat_len] + hist_embeds = input_embeds[:, -hist_feat_len:] + # cls_feats = self.pooler(cap_feats) + + moe_outputs = { + 'spatial_embeds': spatial_embeds, + 'temporal_embeds': temporal_embeds, + 'cap_embeds': cap_embeds, + 'hist_embeds': hist_embeds, + # 'cls_feats': cls_feats, + # 'last_hidden': input_embeds + } + + return moe_outputs + + def forward(self, vis, cap, hist, ans, media_type): + + device = vis.device + is_vid = media_type in ['webvid', 'champagne', 'avsd', 'nextqa'] + loss_stc = torch.tensor(0) + loss_stm = torch.tensor(0) + loss_vhc = torch.tensor(0) + loss_vhm = torch.tensor(0) + loss_gen = torch.tensor(0) + + # construct the global input tensor --> use place holder for vis features + cap_ids, cap_mask = self.tokenize_text(cap, device, max_len=None) + hist_ids, hist_mask = self.tokenize_text(hist, device, max_len=None) + if self.config.use_moes: + # First get the visual features depending on the media type + if self.config.use_sep_spatial_temp_experts: + vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask = self.encode_vis(vis, device, is_vid=is_vid) + spatial_feat_len = vis_embed_spatial.size(1) + + else: + vis_embed, vis_mask = self.encode_vis_with_seq_spa_temp_att(vis, device, is_vid=is_vid) + + + if self.config.use_sep_spatial_temp_experts: + moe_outputs = self.moe_forward( + vis_embed_spatial, vis_spatial_mask, + vis_embed_temporal, vis_temporal_mask, + cap_ids, cap_mask, + hist_ids, hist_mask, + is_vid, device + ) + spatial_embeds = self.moe_to_llm(moe_outputs['spatial_embeds']) + temporal_embeds = self.moe_to_llm(moe_outputs['temporal_embeds']) if is_vid else None + # cap_embeds = self.moe_to_llm(moe_outputs['cap_embeds']) + # hist_embeds = self.moe_to_llm(moe_outputs['hist_embeds']) + + else: + moe_outputs = self.moe_forward_no_sep_spatial_temporal( + vis_embed, vis_mask, + cap_ids, cap_mask, + hist_ids, hist_mask, + is_vid, device + ) + vis_embeds = self.moe_to_llm(moe_outputs['vis_embeds']) + # temporal_embeds = self.moe_to_llm(moe_outputs['temporal_embeds']) if is_vid else None + cap_embeds = self.moe_to_llm(moe_outputs['cap_embeds']) + hist_embeds = self.moe_to_llm(moe_outputs['hist_embeds']) + else: + cap_embeds = self.llm_to_moe(self.text_embedding(cap_ids)) + hist_embeds = self.llm_to_moe(self.text_embedding(hist_ids)) + + vis_embeds, vis_mask = self.encode_vis_with_seq_spa_temp_att(vis, device, is_vid=is_vid) + + ans = [a + self.tokenizer.eos_token for a in ans] + + if self.config.llm_family in ['llama', 'mistral']: + bos = torch.ones_like(cap_ids[:, :1]) * self.tokenizer.bos_token_id + bos_embeds = self.text_embedding(bos) + bos_mask = cap_mask[:, :1] + + # add corresponding eos + + regress_ids, regress_mask = self.tokenize_text(ans, device, max_len=None) # pad the longest + + regress_embeds = self.text_embedding(regress_ids) + + inputs_embeds, attention_mask, regress_limits_txt_input = self.pad_to_right_dec_only(cap_embeds, cap_mask, hist_embeds, hist_mask, regress_embeds, regress_mask, device) + + if is_vid: + inputs_embeds = torch.cat([bos_embeds, spatial_embeds, temporal_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([bos_mask, vis_spatial_mask, vis_temporal_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([bos_embeds, spatial_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([bos_mask, vis_spatial_mask, attention_mask], dim=1) + + labels = torch.zeros(inputs_embeds.size()[:-1]).fill_(-100).long().to(device) + + for i in range(labels.size(0)): + start_regress = regress_limits_txt_input[i][0] + 1 + spatial_feat_len + spatial_feat_len * int(is_vid) # offset (bos + spatial + temporal) + end_regress = regress_limits_txt_input[i][1] + 1 + spatial_feat_len + spatial_feat_len * int(is_vid) # offset (bos + spatial + temporal) + + labels[i, start_regress:end_regress] = regress_ids[i, :regress_mask[i].sum()] + + + # get causal attention mask + + + # Compute the regression embeds + + # Now we need to right-pad the input to LLM (at least for llama) to avoid nan loss values + # This means, all pad tokens have to be placed to the right + # full_embeds = [...][...][...][pad][...][pad][ans ...][pad] + # ------------> [...][...][...][...][ans ...][-----pad-----] + + # full_embeds, full_masks, start_output_idx = self.rearrange_llm_input_dec_only(cond_embeds, regress_embeds, cond_mask, cap_mask, hist_mask, regress_mask, spatial_feat_len) + + # labels = self.construct_reg_labels(regress_ids, start_output_idx, full_embeds, device) + + lm_outputs = self.llm( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + labels=labels, + return_dict=True + ) + loss_gen = lm_outputs.loss + + # Encoder Decoder + else: + inputs_embeds, attention_mask = self.pad_to_right_enc_dec(cap_embeds, cap_mask, hist_embeds, hist_mask, device) + + # now merge the multi-modal inputs + if self.config.use_moes: + if self.config.use_sep_spatial_temp_experts: + if is_vid: + inputs_embeds = torch.cat([spatial_embeds, temporal_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([vis_spatial_mask, vis_temporal_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([spatial_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([vis_spatial_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([vis_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([vis_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([vis_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([vis_mask, attention_mask], dim=1) + + decoder_ids, decoder_mask = self.tokenize_text(ans, device, max_len=None) # pad the longest + + labels = decoder_ids.masked_fill(decoder_ids == self.tokenizer.pad_token_id, -100) + decoder_ids = self.shift_right(labels) + decoder_inputs_embeds = self.text_embedding(decoder_ids) + + lm_outputs = self.llm( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + decoder_inputs_embeds=decoder_inputs_embeds, + decoder_attention_mask=decoder_mask, + labels=labels, + return_dict=True + ) + + loss_gen = lm_outputs.loss + + return dict( + loss_stc = loss_stc * self.config.loss_dict['stc'], + loss_stm = loss_stm * self.config.loss_dict['stm'], + loss_vhc = loss_vhc * self.config.loss_dict['vhc'], + loss_vhm = loss_vhm * self.config.loss_dict['vhm'], + loss_gen = loss_gen * self.config.loss_dict['gen'], + ) + + +class V2DialNoMoes(V2Dial): + def __init__(self, config): + super(V2DialNoMoes, self).__init__(config) + + def encode_vis(self, image, device, is_vid=True): + num_frames = image.size(1) + bs_pre_reshape = image.size(0) + if len(image.shape) > 4: + image = image.view(-1, *image.shape[-3:]) # for video input flatten the batch and time dimension (4,50,3,224,224) -> (200,3,224,224) + # with self.maybe_autocast(): # inherited from Blip2Base + image_embeds = self.ln_vision(self.visual_encoder(image)).to(device) # (200,3,224,224) -> (200,257,1408) + image_embeds = image_embeds[:,1:,:] # remove the first token (CLS) (200,256,1408) + + bs, pn, hs = image_embeds.shape + if self.vit_token_pooling: # concat the each 4 tokens into one token (200,64,5632) + image_embeds = image_embeds.view(bs, int(pn/4), int(hs*4)) # (200,64,5632) + + vis_embed = self.vit_proj(image_embeds) # project to LLM input size (200,64,5632) -> (200,64, d_hidden) + + # reshape the video features + vis_embed = vis_embed.view(bs_pre_reshape, num_frames, -1, vis_embed.size(-1)) + + + # Perfrom spatial temporal attention + if is_vid: + vis_embed = self.temporal_att(vis_embed) + if not self.config.embed_from_llm: + vis_embed_temporal = vis_embed_temporal + self.token_type_embedding(torch.ones(bs_pre_reshape, vis_feat_len).long().to(device)) + # vis_temporal_mask = torch.ones((bs_pre_reshape, vis_feat_len)).to(device) + + vis_embed = self.spatial_att(vis_embed) + vis_feat_len = vis_embed_spatial.size(1) + + if not self.config.embed_from_llm: + vis_embed_spatial = vis_embed_spatial + self.token_type_embedding(torch.zeros(bs_pre_reshape, vis_feat_len).long().to(device)) + vis_mask = torch.ones((bs_pre_reshape, vis_feat_len)).to(device) + + return vis_embed, vis_mask + + + def forward(self, vis, cap, hist, ans, media_type): + + device = vis.device + is_vid = media_type in ['webvid', 'champagne', 'avsd', 'nextqa'] + loss_stc = torch.tensor(0) + loss_stm = torch.tensor(0) + loss_vhc = torch.tensor(0) + loss_vhm = torch.tensor(0) + loss_gen = torch.tensor(0) + + # First get the visual features depending on the media type + vis_embed, vis_mask = self.encode_vis(vis, device, is_vid=is_vid) + + # spatial_feat_len = vis_embed_spatial.size(1) + + # construct the global input tensor --> use place holder for vis features + # text = (c + h for c,h in zip(cap, hist)) + # cap_ids, cap_mask = self.tokenize_text(cap, device, max_len=None) + # hist_ids, hist_mask = self.tokenize_text(hist, device, max_len=None) + # text_ids, text_mask = self.tokenize_text(text, device, max_len=None) + + text_embeds = self.text_embedding(text_ids) + # moe_outputs = self.moe_forward( + # vis_embed_spatial, vis_spatial_mask, + # vis_embed_temporal, vis_temporal_mask, + # cap_ids, cap_mask, + # hist_ids, hist_mask, + # is_vid, device + # ) + # spatial_embeds = self.moe_to_llm(moe_outputs['spatial_embeds']) + # temporal_embeds = self.moe_to_llm(moe_outputs['temporal_embeds']) if is_vid else None + # cap_embeds = self.moe_to_llm(moe_outputs['cap_embeds']) + # hist_embeds = self.moe_to_llm(moe_outputs['hist_embeds']) + + ans = [a + self.tokenizer.eos_token for a in ans] + + if self.config.llm_family in ['llama', 'mistral']: + bos = torch.ones_like(cap_ids[:, :1]) * self.tokenizer.bos_token_id + bos_embeds = self.text_embedding(bos) + bos_mask = cap_mask[:, :1] + + # add corresponding eos + + regress_ids, regress_mask = self.tokenize_text(ans, device, max_len=None) # pad the longest + + regress_embeds = self.text_embedding(regress_ids) + + inputs_embeds, attention_mask, regress_limits_txt_input = self.pad_to_right_dec_only(cap_embeds, cap_mask, hist_embeds, hist_mask, regress_embeds, regress_mask, device) + + if is_vid: + inputs_embeds = torch.cat([bos_embeds, spatial_embeds, temporal_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([bos_mask, vis_spatial_mask, vis_temporal_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([bos_embeds, spatial_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([bos_mask, vis_spatial_mask, attention_mask], dim=1) + + labels = torch.zeros(inputs_embeds.size()[:-1]).fill_(-100).long().to(device) + + for i in range(labels.size(0)): + start_regress = regress_limits_txt_input[i][0] + 1 + spatial_feat_len + spatial_feat_len * int(is_vid) # offset (bos + spatial + temporal) + end_regress = regress_limits_txt_input[i][1] + 1 + spatial_feat_len + spatial_feat_len * int(is_vid) # offset (bos + spatial + temporal) + + labels[i, start_regress:end_regress] = regress_ids[i, :regress_mask[i].sum()] + + + # get causal attention mask + + + # Compute the regression embeds + + # Now we need to right-pad the input to LLM (at least for llama) to avoid nan loss values + # This means, all pad tokens have to be placed to the right + # full_embeds = [...][...][...][pad][...][pad][ans ...][pad] + # ------------> [...][...][...][...][ans ...][-----pad-----] + + # full_embeds, full_masks, start_output_idx = self.rearrange_llm_input_dec_only(cond_embeds, regress_embeds, cond_mask, cap_mask, hist_mask, regress_mask, spatial_feat_len) + + # labels = self.construct_reg_labels(regress_ids, start_output_idx, full_embeds, device) + + lm_outputs = self.llm( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + labels=labels, + return_dict=True + ) + loss_gen = lm_outputs.loss + + # Encoder Decoder + else: + # inputs_embeds, attention_mask = self.pad_to_right_enc_dec(cap_embeds, cap_mask, hist_embeds, hist_mask, device) + + # now merge the multi-modal inputs + # if is_vid: + # inputs_embeds = torch.cat([spatial_embeds, temporal_embeds, inputs_embeds], dim=1) + # attention_mask = torch.cat([vis_spatial_mask, vis_temporal_mask, attention_mask], dim=1) + # else: + inputs_embeds = torch.cat([vis_embed, text_embeds], dim=1) + attention_mask = torch.cat([vis_mask, text_mask], dim=1) + + decoder_ids, decoder_mask = self.tokenize_text(ans, device, max_len=None) # pad the longest + + labels = decoder_ids.masked_fill(decoder_ids == self.tokenizer.pad_token_id, -100) + decoder_ids = self.shift_right(labels) + decoder_inputs_embeds = self.text_embedding(decoder_ids) + + lm_outputs = self.llm( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + decoder_inputs_embeds=decoder_inputs_embeds, + decoder_attention_mask=decoder_mask, + labels=labels, + return_dict=True + ) + + loss_gen = lm_outputs.loss + + return dict( + loss_stc = loss_stc * self.config.loss_dict['stc'], + loss_stm = loss_stm * self.config.loss_dict['stm'], + loss_vhc = loss_vhc * self.config.loss_dict['vhc'], + loss_vhm = loss_vhm * self.config.loss_dict['vhm'], + loss_gen = loss_gen * self.config.loss_dict['gen'], + ) \ No newline at end of file diff --git a/processors/__init__.py b/processors/__init__.py new file mode 100755 index 0000000..8b13789 --- /dev/null +++ b/processors/__init__.py @@ -0,0 +1 @@ + diff --git a/processors/base_processor.py b/processors/base_processor.py new file mode 100755 index 0000000..39b33cd --- /dev/null +++ b/processors/base_processor.py @@ -0,0 +1,26 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +from omegaconf import OmegaConf + + +class BaseProcessor: + def __init__(self): + self.transform = lambda x: x + return + + def __call__(self, item): + return self.transform(item) + + @classmethod + def from_config(cls, cfg=None): + return cls() + + def build(self, **kwargs): + cfg = OmegaConf.create(kwargs) + + return self.from_config(cfg) diff --git a/processors/blip_processors.py b/processors/blip_processors.py new file mode 100755 index 0000000..b6c3929 --- /dev/null +++ b/processors/blip_processors.py @@ -0,0 +1,214 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import re +import torch +from processors.base_processor import BaseProcessor +from omegaconf import OmegaConf +from torchvision import transforms +from torchvision.transforms.functional import InterpolationMode + + +class BlipImageBaseProcessor(BaseProcessor): + def __init__(self, mean=None, std=None): + if mean is None: + mean = (0.48145466, 0.4578275, 0.40821073) + if std is None: + std = (0.26862954, 0.26130258, 0.27577711) + + + segment_mean = (0.485, 0.456, 0.406) + segment_std = (0.229, 0.224, 0.225) + + self.normalize = transforms.Normalize(segment_mean, segment_std) + + +class BlipCaptionProcessor(BaseProcessor): + def __init__(self, prompt="", max_words=50): + self.prompt = prompt + self.max_words = max_words + + def __call__(self, caption): + caption = self.prompt + self.pre_caption(caption) + + return caption + + @classmethod + def from_config(cls, cfg=None): + if cfg is None: + cfg = OmegaConf.create() + + prompt = cfg.get("prompt", "") + max_words = cfg.get("max_words", 50) + + return cls(prompt=prompt, max_words=max_words) + + def pre_caption(self, caption): + caption = re.sub( + r"([.!\"()*#|:;~])", + " ", + caption.lower(), + ) + caption = re.sub( + r"\s{2,}", + " ", + caption, + ) + caption = caption.rstrip("\n") + caption = caption.strip(" ") + + # truncate caption + caption_words = caption.split(" ") + if len(caption_words) > self.max_words: + caption = " ".join(caption_words[: self.max_words]) + + return caption + + +class BlipDialogProcessor(BlipCaptionProcessor): + def __init__(self, prompt="", max_words=50): + self.prompt = prompt + self.max_words = max_words + + def pre_caption_rm_period(self, text): + text = re.sub( + r"([.!\"()*#|:;~])", + " ", + text.lower(), + ) + text = re.sub( + r"\s{2,}", + " ", + text, + ) + text = text.rstrip("\n") + text = text.strip(" ") + + # truncate caption + text_words = text.split(" ") + if len(text_words) > self.max_words: + text = " ".join(text_words[: self.max_words]) + return text + + def pre_caption(self, text): + text = re.sub( + r"([\"()*#|:;~])", + " ", + text.lower(), + ) + text = re.sub( + r"\s{2,}", + " ", + text, + ) + text = text.rstrip("\n") + text = text.strip(" ") + + # truncate caption + text_words = text.split(" ") + if len(text_words) > self.max_words: + text = " ".join(text_words[: self.max_words]) + return text + + def __call__(self, caption, remove_period=False): + if remove_period: + caption = self.prompt + self.pre_caption_rm_period(caption) + else: + caption = self.prompt + self.pre_caption(caption) + return caption + + +class Blip2ImageTrainProcessor(BlipImageBaseProcessor): + def __init__(self, image_size=224, mean=None, std=None, min_scale=0.5, max_scale=1.0): + super().__init__(mean=mean, std=std) + + # self.transform = transforms.Compose( + # [ + # transforms.RandomResizedCrop( + # image_size, + # scale=(min_scale, max_scale), + # interpolation=InterpolationMode.BICUBIC, + # ), + # transforms.ToTensor(), + # self.normalize, + # ] + # ) + self.transform = transforms.Compose([ + transforms.Resize( + (image_size, image_size), interpolation=InterpolationMode.BICUBIC, antialias=True + ), + transforms.ToTensor(), + self.normalize, + ] + ) + + + + # ### segment anything + # ''' + # x = (x - self.pixel_mean) / self.pixel_std + + # # Pad + # h, w = x.shape[-2:] + # padh = self.image_encoder.img_size - h + # padw = self.image_encoder.img_size - w + # x = F.pad(x, (0, padw, 0, padh)) + # ''' + + def __call__(self, item): + return self.transform(item) + + @classmethod + def from_config(cls, cfg=None): + if cfg is None: + cfg = OmegaConf.create() + + image_size = cfg.get("image_size", 224) + + mean = cfg.get("mean", None) + std = cfg.get("std", None) + + min_scale = cfg.get("min_scale", 0.5) + max_scale = cfg.get("max_scale", 1.0) + + return cls( + image_size=image_size, + mean=mean, + std=std, + min_scale=min_scale, + max_scale=max_scale, + ) + + +class Blip2ImageEvalProcessor(BlipImageBaseProcessor): + def __init__(self, image_size=224, mean=None, std=None): + super().__init__(mean=mean, std=std) + + self.transform = transforms.Compose( + [ + transforms.Resize( + (image_size, image_size), interpolation=InterpolationMode.BICUBIC + ), + transforms.ToTensor(), + self.normalize, + ] + ) + + def __call__(self, item): + return self.transform(item) + + @classmethod + def from_config(cls, cfg=None): + if cfg is None: + cfg = OmegaConf.create() + + image_size = cfg.get("image_size", 224) + + mean = cfg.get("mean", None) + std = cfg.get("std", None) + + return cls(image_size=image_size, mean=mean, std=std) \ No newline at end of file diff --git a/processors/randaugment.py b/processors/randaugment.py new file mode 100755 index 0000000..7034a49 --- /dev/null +++ b/processors/randaugment.py @@ -0,0 +1,398 @@ +""" + Copyright (c) 2022, salesforce.com, inc. + All rights reserved. + SPDX-License-Identifier: BSD-3-Clause + For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause +""" + +import cv2 +import numpy as np + +import torch + + +## aug functions +def identity_func(img): + return img + + +def autocontrast_func(img, cutoff=0): + """ + same output as PIL.ImageOps.autocontrast + """ + n_bins = 256 + + def tune_channel(ch): + n = ch.size + cut = cutoff * n // 100 + if cut == 0: + high, low = ch.max(), ch.min() + else: + hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins]) + low = np.argwhere(np.cumsum(hist) > cut) + low = 0 if low.shape[0] == 0 else low[0] + high = np.argwhere(np.cumsum(hist[::-1]) > cut) + high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0] + if high <= low: + table = np.arange(n_bins) + else: + scale = (n_bins - 1) / (high - low) + offset = -low * scale + table = np.arange(n_bins) * scale + offset + table[table < 0] = 0 + table[table > n_bins - 1] = n_bins - 1 + table = table.clip(0, 255).astype(np.uint8) + return table[ch] + + channels = [tune_channel(ch) for ch in cv2.split(img)] + out = cv2.merge(channels) + return out + + +def equalize_func(img): + """ + same output as PIL.ImageOps.equalize + PIL's implementation is different from cv2.equalize + """ + n_bins = 256 + + def tune_channel(ch): + hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins]) + non_zero_hist = hist[hist != 0].reshape(-1) + step = np.sum(non_zero_hist[:-1]) // (n_bins - 1) + if step == 0: + return ch + n = np.empty_like(hist) + n[0] = step // 2 + n[1:] = hist[:-1] + table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8) + return table[ch] + + channels = [tune_channel(ch) for ch in cv2.split(img)] + out = cv2.merge(channels) + return out + + +def rotate_func(img, degree, fill=(0, 0, 0)): + """ + like PIL, rotate by degree, not radians + """ + H, W = img.shape[0], img.shape[1] + center = W / 2, H / 2 + M = cv2.getRotationMatrix2D(center, degree, 1) + out = cv2.warpAffine(img, M, (W, H), borderValue=fill) + return out + + +def solarize_func(img, thresh=128): + """ + same output as PIL.ImageOps.posterize + """ + table = np.array([el if el < thresh else 255 - el for el in range(256)]) + table = table.clip(0, 255).astype(np.uint8) + out = table[img] + return out + + +def color_func(img, factor): + """ + same output as PIL.ImageEnhance.Color + """ + ## implementation according to PIL definition, quite slow + # degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis] + # out = blend(degenerate, img, factor) + # M = ( + # np.eye(3) * factor + # + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor) + # )[np.newaxis, np.newaxis, :] + M = np.float32( + [[0.886, -0.114, -0.114], [-0.587, 0.413, -0.587], [-0.299, -0.299, 0.701]] + ) * factor + np.float32([[0.114], [0.587], [0.299]]) + out = np.matmul(img, M).clip(0, 255).astype(np.uint8) + return out + + +def contrast_func(img, factor): + """ + same output as PIL.ImageEnhance.Contrast + """ + mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299])) + table = ( + np.array([(el - mean) * factor + mean for el in range(256)]) + .clip(0, 255) + .astype(np.uint8) + ) + out = table[img] + return out + + +def brightness_func(img, factor): + """ + same output as PIL.ImageEnhance.Contrast + """ + table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8) + out = table[img] + return out + + +def sharpness_func(img, factor): + """ + The differences the this result and PIL are all on the 4 boundaries, the center + areas are same + """ + kernel = np.ones((3, 3), dtype=np.float32) + kernel[1][1] = 5 + kernel /= 13 + degenerate = cv2.filter2D(img, -1, kernel) + if factor == 0.0: + out = degenerate + elif factor == 1.0: + out = img + else: + out = img.astype(np.float32) + degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :] + out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate) + out = out.astype(np.uint8) + return out + + +def shear_x_func(img, factor, fill=(0, 0, 0)): + H, W = img.shape[0], img.shape[1] + M = np.float32([[1, factor, 0], [0, 1, 0]]) + out = cv2.warpAffine( + img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR + ).astype(np.uint8) + return out + + +def translate_x_func(img, offset, fill=(0, 0, 0)): + """ + same output as PIL.Image.transform + """ + H, W = img.shape[0], img.shape[1] + M = np.float32([[1, 0, -offset], [0, 1, 0]]) + out = cv2.warpAffine( + img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR + ).astype(np.uint8) + return out + + +def translate_y_func(img, offset, fill=(0, 0, 0)): + """ + same output as PIL.Image.transform + """ + H, W = img.shape[0], img.shape[1] + M = np.float32([[1, 0, 0], [0, 1, -offset]]) + out = cv2.warpAffine( + img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR + ).astype(np.uint8) + return out + + +def posterize_func(img, bits): + """ + same output as PIL.ImageOps.posterize + """ + out = np.bitwise_and(img, np.uint8(255 << (8 - bits))) + return out + + +def shear_y_func(img, factor, fill=(0, 0, 0)): + H, W = img.shape[0], img.shape[1] + M = np.float32([[1, 0, 0], [factor, 1, 0]]) + out = cv2.warpAffine( + img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR + ).astype(np.uint8) + return out + + +def cutout_func(img, pad_size, replace=(0, 0, 0)): + replace = np.array(replace, dtype=np.uint8) + H, W = img.shape[0], img.shape[1] + rh, rw = np.random.random(2) + pad_size = pad_size // 2 + ch, cw = int(rh * H), int(rw * W) + x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H) + y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W) + out = img.copy() + out[x1:x2, y1:y2, :] = replace + return out + + +### level to args +def enhance_level_to_args(MAX_LEVEL): + def level_to_args(level): + return ((level / MAX_LEVEL) * 1.8 + 0.1,) + + return level_to_args + + +def shear_level_to_args(MAX_LEVEL, replace_value): + def level_to_args(level): + level = (level / MAX_LEVEL) * 0.3 + if np.random.random() > 0.5: + level = -level + return (level, replace_value) + + return level_to_args + + +def translate_level_to_args(translate_const, MAX_LEVEL, replace_value): + def level_to_args(level): + level = (level / MAX_LEVEL) * float(translate_const) + if np.random.random() > 0.5: + level = -level + return (level, replace_value) + + return level_to_args + + +def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value): + def level_to_args(level): + level = int((level / MAX_LEVEL) * cutout_const) + return (level, replace_value) + + return level_to_args + + +def solarize_level_to_args(MAX_LEVEL): + def level_to_args(level): + level = int((level / MAX_LEVEL) * 256) + return (level,) + + return level_to_args + + +def none_level_to_args(level): + return () + + +def posterize_level_to_args(MAX_LEVEL): + def level_to_args(level): + level = int((level / MAX_LEVEL) * 4) + return (level,) + + return level_to_args + + +def rotate_level_to_args(MAX_LEVEL, replace_value): + def level_to_args(level): + level = (level / MAX_LEVEL) * 30 + if np.random.random() < 0.5: + level = -level + return (level, replace_value) + + return level_to_args + + +func_dict = { + "Identity": identity_func, + "AutoContrast": autocontrast_func, + "Equalize": equalize_func, + "Rotate": rotate_func, + "Solarize": solarize_func, + "Color": color_func, + "Contrast": contrast_func, + "Brightness": brightness_func, + "Sharpness": sharpness_func, + "ShearX": shear_x_func, + "TranslateX": translate_x_func, + "TranslateY": translate_y_func, + "Posterize": posterize_func, + "ShearY": shear_y_func, +} + +translate_const = 10 +MAX_LEVEL = 10 +replace_value = (128, 128, 128) +arg_dict = { + "Identity": none_level_to_args, + "AutoContrast": none_level_to_args, + "Equalize": none_level_to_args, + "Rotate": rotate_level_to_args(MAX_LEVEL, replace_value), + "Solarize": solarize_level_to_args(MAX_LEVEL), + "Color": enhance_level_to_args(MAX_LEVEL), + "Contrast": enhance_level_to_args(MAX_LEVEL), + "Brightness": enhance_level_to_args(MAX_LEVEL), + "Sharpness": enhance_level_to_args(MAX_LEVEL), + "ShearX": shear_level_to_args(MAX_LEVEL, replace_value), + "TranslateX": translate_level_to_args(translate_const, MAX_LEVEL, replace_value), + "TranslateY": translate_level_to_args(translate_const, MAX_LEVEL, replace_value), + "Posterize": posterize_level_to_args(MAX_LEVEL), + "ShearY": shear_level_to_args(MAX_LEVEL, replace_value), +} + + +class RandomAugment(object): + def __init__(self, N=2, M=10, isPIL=False, augs=[]): + self.N = N + self.M = M + self.isPIL = isPIL + if augs: + self.augs = augs + else: + self.augs = list(arg_dict.keys()) + + def get_random_ops(self): + sampled_ops = np.random.choice(self.augs, self.N) + return [(op, 0.5, self.M) for op in sampled_ops] + + def __call__(self, img): + if self.isPIL: + img = np.array(img) + ops = self.get_random_ops() + for name, prob, level in ops: + if np.random.random() > prob: + continue + args = arg_dict[name](level) + img = func_dict[name](img, *args) + return img + + +class VideoRandomAugment(object): + def __init__(self, N=2, M=10, p=0.0, tensor_in_tensor_out=True, augs=[]): + self.N = N + self.M = M + self.p = p + self.tensor_in_tensor_out = tensor_in_tensor_out + if augs: + self.augs = augs + else: + self.augs = list(arg_dict.keys()) + + def get_random_ops(self): + sampled_ops = np.random.choice(self.augs, self.N, replace=False) + return [(op, self.M) for op in sampled_ops] + + def __call__(self, frames): + assert ( + frames.shape[-1] == 3 + ), "Expecting last dimension for 3-channels RGB (b, h, w, c)." + + if self.tensor_in_tensor_out: + frames = frames.numpy().astype(np.uint8) + + num_frames = frames.shape[0] + + ops = num_frames * [self.get_random_ops()] + apply_or_not = num_frames * [np.random.random(size=self.N) > self.p] + + frames = torch.stack( + list(map(self._aug, frames, ops, apply_or_not)), dim=0 + ).float() + + return frames + + def _aug(self, img, ops, apply_or_not): + for i, (name, level) in enumerate(ops): + if not apply_or_not[i]: + continue + args = arg_dict[name](level) + img = func_dict[name](img, *args) + return torch.from_numpy(img) + + +if __name__ == "__main__": + a = RandomAugment() + img = np.random.randn(32, 32, 3) + a(img) diff --git a/tasks/pre_train.py b/tasks/pre_train.py new file mode 100644 index 0000000..c39daa7 --- /dev/null +++ b/tasks/pre_train.py @@ -0,0 +1,413 @@ +import os +import datetime +import wandb +import torch +import pandas as pd +from time import time + +import torch.distributed as dist +from torch.distributed import ReduceOp + +from torch.nn.utils.clip_grad import clip_grad_value_ +from utils.basic import MetricLogger, SmoothedValue, setup_seed, average_dicts +from datasets.utils import get_datasets_media +from datasets.dataloader import MetaLoader +from utils.dist import is_main_process, get_rank, get_world_size +from utils.logger import setup_wandb, log_dict_to_wandb +from .retrieval_utils import evaluation_wrapper +import glog as logger + + +def run_epoch( + model, + train_dataloaders, + optimizer, + epoch, + global_step, + webvid_step, + cc3m_step, + device, + scheduler, + scaler, + config +): + model.train() + media_types = list(train_dataloaders.keys()) + + log_freq = config['log_freq'] + # metric_logger = MetricLogger(delimiter=' ') + # metric_logger.add_meter('lr', SmoothedValue(window=log_freq, fmt='{value:.6f}')) + # metric_logger.add_meter("temperature", SmoothedValue(window=log_freq, fmt="{value:.4f}")) + + loss_names = ['loss_' + k for k in config['loss_dict'].keys()] + # for l in loss_names: + # for m in media_types: + # metric_logger.add_meter( + # f'{m}/{l}', SmoothedValue(window=log_freq, fmt="{value:.4f}") + # ) + + + # header = '{} | Epoch = {}'.format(config['stage'], epoch) + + model_without_ddp = model + if config['distributed']: + model_without_ddp = model.module + for k in train_dataloaders: + train_dataloaders[k].sampler.set_epoch(epoch) + + train_dataloader = MetaLoader(name2loader=train_dataloaders) + + log_text_template = '\n' + '-' * 25 + '\n[Epoch {}/{}][Iter. {}/{}][Media-type {}]\n' + log_text_template += '[Losses] mlm (x{}) = {:.4f} | vcc (x{}) = {:.4f} | vcm (x{}) = {:.4f} | stc (x{}) = {:.4f} | stm (x{}) = {:.4f}\n' + log_text_template += '[Other] lr = {:.4f} | temp = {:.4f} | eta = {}\n' + + # iterator = metric_logger.log_every(train_dataloader, log_freq, header) + local_step = 0 + for media_type, (vis, caption, neg_vis) in train_dataloader: + start = time() + # loss_dict = {} + vis = vis.to(device) + neg_vis = neg_vis.to(device) + # idx = idx.to(device) + + with torch.cuda.amp.autocast(enabled=config.fp16): + loss_dict = model(vis, caption, neg_vis, media_type) + # loss_dict.update(losses) + loss = sum(loss_dict.values()) + loss_accum_grad = loss / config.accum_grad_every + + scaler.scale(loss_accum_grad).backward() + + # Perfrom gradient clipping: unscale --> clip + if config['clip_grad_value'] > 0: + # scaler.unscale_(optimizer) + clip_grad_value_(model.parameters(), config.clip_grad_value) + + if local_step % config.accum_grad_every == 0: + scaler.step(optimizer) + scaler.update() + # scheduler.step(epoch, global_step) + scheduler.step() + optimizer.zero_grad() + + time_iter = time() - start + eta = (len(train_dataloader) - local_step - 1) * time_iter + eta = str(datetime.timedelta(seconds=eta)) + # log + log_dict_webvid = {} + log_dict_cc3m = {} + log_dict_rest = {} + for loss_name in loss_names: + value = loss_dict[loss_name] + value = value if isinstance(value, float) else value.item() + # metric_logger.update(**{f"{media_type}/{loss_name}": value}) + if media_type == "cc3m": + log_dict_cc3m[f"train/{media_type}/{loss_name}"] = value + else: + log_dict_webvid[f"train/{media_type}/{loss_name}"] = value + + # metric_logger.update(lr=optimizer.param_groups[0]["lr"]) + # metric_logger.update(temperature=model_without_ddp.temp.item()) + log_dict_rest['train/other/lr'] = optimizer.param_groups[0]["lr"] + log_dict_rest['train/other/temperature'] = model_without_ddp.temp.item() + + if is_main_process() and global_step % log_freq == 0 and local_step % config.accum_grad_every == 0: + log_dict_rest['train/other/step'] = global_step + if media_type == 'cc3m': + log_dict_cc3m['train/cc3m/step'] = cc3m_step + + log_text = log_text_template.format( + epoch, config.epochs-1, local_step, len(train_dataloader) , media_type, + config.loss_dict['mlm'], log_dict_cc3m['train/cc3m/loss_mlm'], + config.loss_dict['vcc'], log_dict_cc3m['train/cc3m/loss_vcc'], + config.loss_dict['vcm'], log_dict_cc3m['train/cc3m/loss_vcm'], + config.loss_dict['stc'], log_dict_cc3m['train/cc3m/loss_stc'], + config.loss_dict['stm'], log_dict_cc3m['train/cc3m/loss_stc'], + log_dict_rest['train/other/lr'], log_dict_rest['train/other/temperature'], eta + ) + logger.info(log_text) + + if config['wandb_enabled']: + wandb.log(log_dict_rest) + wandb.log(log_dict_cc3m) + # log_text_template = '[Epoch {}/{}][Iter. {}/{}][Media-type {}]\n' + # log_text_template += '[losses: mlm = {:.4f} | vcc = {:4f} | vcm = {:.4f} | stc = {:.4f} | stm = {:.4f}]\n' + # log_text_template += '[Other: lr = {:.4f} | temp = {:4f}]\n' + + else: + log_dict_webvid['train/webvid/step'] = webvid_step + log_text = log_text_template.format( + epoch, config.epochs-1, local_step, len(train_dataloader) , media_type, + config.loss_dict['mlm'], log_dict_webvid['train/webvid/loss_mlm'], + config.loss_dict['vcc'], log_dict_webvid['train/webvid/loss_vcc'], + config.loss_dict['vcm'], log_dict_webvid['train/webvid/loss_vcm'], + config.loss_dict['stc'], log_dict_webvid['train/webvid/loss_stc'], + config.loss_dict['stm'], log_dict_webvid['train/webvid/loss_stm'], + log_dict_rest['train/other/lr'], log_dict_rest['train/other/temperature'], eta + ) + logger.info(log_text) + + if config['wandb_enabled']: + wandb.log(log_dict_rest) + wandb.log(log_dict_webvid) + + + if media_type == "cc3m": + cc3m_step += 1 + else: + webvid_step += 1 + global_step += 1 + local_step += 1 + # gather the stats from all processes + # metric_logger.synchronize_between_processes() + # logger.info(f"Averaged stats: {metric_logger.global_avg()}") + + return global_step, webvid_step, cc3m_step + + +def eval(model, val_dataloader, device, epoch, config): + + model.eval() + + log_text_template = '\n' + '-' * 25 + '\n[Val Epoch{}][Iter. {}/{}][Media-type {}]\n' + log_text_template += '[Losses] mlm = {:.4f} | vcc = {:.4f} | vcm = {:.4f} | stc = {:.4f} | stm = {:.4f} \n' + + # log_text_template += '[Losses] vcc = {:.4f} | vcm = {:.4f} | stc = {:.4f} | stm = {:.4f} | mlm = {:.4f} \n' + # log_text_template += '[Losses] vhc = {:.4f} | vhm = {:.4f} | chc = {:.4f} | chm = {:.4f} | gen = {:.4f} \n' + + cum_loss_stc = 0 + cum_loss_stm = 0 + cum_loss_vcc = 0 + cum_loss_vcm = 0 + cum_loss_mlm = 0 + cum_loss_tot = 0 + val_step = 0 + + # val_dataloader = MetaLoader(name2loader=val_dataloaders) + media_type = val_dataloader.dataset.medium + + if is_main_process(): + start_time = time() + + # for vis, cap_ids, hist_ids, ques_ids, label_ids, enc_dec_input_ids, idx, _ in val_dataloader: + for vis, caption, neg_vis in val_dataloader: + # for vis, cap_ids, hist_ids, label_ids, enc_dec_input_ids, idx, _ in val_dataloader: + vis = vis.to(device) + neg_vis = neg_vis.to(device) + # idx = idx.to(device) + + with torch.cuda.amp.autocast(enabled=config['fp16']): + with torch.no_grad(): + # loss_dict, _ = model(vis, cap_ids, hist_ids, ques_ids, label_ids, enc_dec_input_ids, media_type) + # loss_dict = model(vis, caption, neg_vis, neg_caption, media_type, file, neg_file) + loss_dict = model(vis, caption, neg_vis, media_type) + + loss = sum(loss_dict.values()) + loss_stc = loss_dict['loss_stc'] + loss_stm = loss_dict['loss_stm'] + loss_vcc = loss_dict['loss_vcc'] + loss_vcm = loss_dict['loss_vcm'] + loss_mlm = loss_dict['loss_mlm'] + + if config['distributed']: + dist.all_reduce(loss, op=ReduceOp.AVG) + if config.loss_dict['stc'] != 0: + dist.all_reduce(loss_stc, op=ReduceOp.AVG) + if config.loss_dict['stm'] != 0: + dist.all_reduce(loss_stm, op=ReduceOp.AVG) + if config.loss_dict['vcc'] != 0: + dist.all_reduce(loss_vcc, op=ReduceOp.AVG) + if config.loss_dict['vcm'] != 0: + dist.all_reduce(loss_vcm, op=ReduceOp.AVG) + if config.loss_dict['mlm'] != 0: + dist.all_reduce(loss_mlm, op=ReduceOp.AVG) + + if is_main_process(): + cum_loss_tot += loss.item() + cum_loss_stc += loss_stc.item() + cum_loss_stm += loss_stm.item() + cum_loss_vcc += loss_vcc.item() + cum_loss_vcm += loss_vcm.item() + cum_loss_mlm += loss_mlm.item() + + if val_step % config.log_freq == 0: + log_text = log_text_template.format( + epoch, val_step, len(val_dataloader), media_type, + loss_mlm, loss_vcc, loss_vcm, loss_stc, loss_stm) + # log_text_template = '\n' + '-' * 25 + '\n[Val Eoch{}][Iter. {}/{}][Media-type {}]\n' + # log_text_template += '[Losses] vcc = {:.4f} | vcm = {:.4f} | stc = {:.4f} | stm = {:.4f} | mlm = {:.4f} \n' + # log_text_template += '[Losses] vhc = {:.4f} | vhm = {:.4f} | chc = {:.4f} | chm = {:.4f} | gen = {:.4f} \n' + # log_text = log_text_template.format( + # epoch, val_step, len(val_dataloader), media_type, + # loss_vcc, loss_vcm, loss_stc, loss_stm, 0, + # loss_vhc, loss_vhm, loss_chc, loss_chm, loss_gen + # ) + + logger.info(log_text) + # logger.info('[INFO] [Eval. Epoch {}][Iter. {}/{}][Losses] gen = {:.4f} | total = {:.4f}'.format( + # epoch, val_step, len(val_dataloader), gen_loss, loss + # )) + val_step += 1 + + if config['distributed']: + dist.barrier() + + if is_main_process(): + duration = time() - start_time + + cum_loss_tot /= len(val_dataloader) + cum_loss_stc /= len(val_dataloader) + cum_loss_stm /= len(val_dataloader) + cum_loss_vcc /= len(val_dataloader) + cum_loss_vcm /= len(val_dataloader) + cum_loss_mlm /= len(val_dataloader) + + # cum_loss_vhc /= len(val_dataloader) + # cum_loss_vhm /= len(val_dataloader) + # cum_loss_chc /= len(val_dataloader) + # cum_loss_chm /= len(val_dataloader) + # cum_loss_gen /= len(val_dataloader) + logger.info('\n' + '-' * 25 + '\n' + 'Eval. took {}\n[Losses] cum_total = {:.4f}'.format( + datetime.timedelta(seconds=int(duration)), cum_loss_tot + )) + + # logger.info('\n' + '-' * 25 + '\n' + 'Eval. took {}\n[Losses] cum_gen = {:.4f} | cum_total = {:.4f}'.format( + # datetime.timedelta(seconds=int(duration)), cum_loss_gen, cum_loss_tot + # )) + + loss_dict = { + 'stc': cum_loss_stc, + 'stm': cum_loss_stm, + 'vcc': cum_loss_vcc, + 'vcm': cum_loss_vcm, + # 'vhc': cum_loss_vhc, + # 'vhm': cum_loss_vhm, + # 'chc': cum_loss_chc, + # 'chm': cum_loss_chm, + 'mlm': cum_loss_mlm, + # 'gen': cum_loss_gen, + 'tot': cum_loss_tot + } + return loss_dict + + +def pre_train( + model, + model_without_ddp, + train_dataloaders, + val_dataloaders, + optimizer, + global_step, + webvid_step, + cc3m_step, + scheduler, + scaler, + start_epoch, + config +): + if is_main_process() and config['wandb_enabled']: + run = setup_wandb(config) + setup_seed(config['seed'] + get_rank()) + device = torch.device('cuda:{}'.format(config['gpu'])) + + if is_main_process() and config['wandb_enabled']: + wandb.watch(model) + + best = float('inf') + best_epoch = 0 + + logger.info('[INFO] Start training...') + start_time_all = time() + for epoch in range(start_epoch, config['epochs']): + if not config['evaluate']: + start_time_epoch = time() + global_step, webvid_step, cc3m_step = run_epoch( + model, + train_dataloaders, + optimizer, + epoch, + global_step, + webvid_step, + cc3m_step, + device, + scheduler, + scaler, + config + ) + end_time_epoch = time() + epoch_time = end_time_epoch - start_time_epoch + epoch_time_str = str(datetime.timedelta(seconds=int(epoch_time))) + logger.info(f'[INFO] Epoch took {epoch_time_str}') + + if not config['debugging']: + with torch.cuda.amp.autocast(enabled=config['fp16']): + # # TODO + # eval_res = {} + # for val_name, val_loader in val_dataloaders_dict.items(): + # res = evaluation_wrapper( + # model_without_ddp, val_loader, tokenizer, device, config, prefix=val_name + # ) + # eval_res.update(res) + val_res = {} + + for medium in val_dataloaders: + res = eval( + model, + val_dataloaders[medium], + device, + epoch, + config + ) + val_res[medium] = res + + if is_main_process(): + # Average across all datasets + avg_val_res = average_dicts(val_res) + # log to wandb + if config.wandb_enabled: + for medium in val_res: + log_dict_val = {} + # log_dict_val[f'val/{medium}/step'] = epoch + for l in val_res[medium]: + log_dict_val[f'val/{medium}/{l}'] = val_res[medium][l] + wandb.log(log_dict_val) + # for p, v in eval_res.items(): + # log_dict_to_wandb(v, step=global_step, prefix=p) + if config.stop_key is not None and config.stop_key in avg_val_res: + cur_best = avg_val_res[config.stop_key] + else: # stop_key = None + cur_best = best - 1 # save the last as the best + + # Don't save vit weights as they are frozen + state_dict = model_without_ddp.state_dict() + state_dict = {k:v for k,v in state_dict.items() if 'visual_encoder' not in k} + + save_obj = { + "model": state_dict, + "optimizer": optimizer.state_dict(), + "scheduler": scheduler.state_dict(), + "scaler": scaler.state_dict(), + "config": config, + "epoch": epoch, + "global_step": global_step, + } + torch.save(save_obj, os.path.join(config.log_dir, f"ckpt_{epoch:02d}.pth")) + + if not config.evaluate and cur_best < best: + torch.save(save_obj, os.path.join(config.log_dir, "ckpt_best.pth")) + # eval_file = "eval_res_best.json" + # eval_res.to_json(os.path.join(config.log_dir, eval_file)) + best = cur_best + + if config.evaluate: + break + if config['distributed']: + dist.barrier() + + total_time = time() - start_time_all + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + logger.info(f'[INFO] Training took {total_time_str}') + + if is_main_process() and config['wandb_enabled']: + run.finish() + diff --git a/tasks/retrieval_utils.py b/tasks/retrieval_utils.py new file mode 100644 index 0000000..be9192b --- /dev/null +++ b/tasks/retrieval_utils.py @@ -0,0 +1,435 @@ +import datetime +import logging +import time + +import numpy as np +import torch +import torch.distributed as dist +from einops import rearrange + +from models.criteria import get_sim +from utils.basic import MetricLogger +from utils.dist import get_rank, get_world_size + +logger = logging.getLogger(__name__) + + +def extract_text_feats(texts, max_txt_l, tokenizer, model, device): + num_text = len(texts) + text_bs = 256 + text_feats = [] + text_atts = [] + + for i in range(0, num_text, text_bs): + text = texts[i : min(num_text, i + text_bs)] + text_input = tokenizer( + text, + padding="max_length", + truncation=True, + max_length=max_txt_l, + return_tensors="pt", + ).to(device) + + text_feat = model.encode_text(text_input)[0] + text_feats.append(text_feat) + text_atts.append(text_input.attention_mask) + + text_feats = torch.cat(text_feats, dim=0) + text_atts = torch.cat(text_atts, dim=0) + return text_feats, text_atts + + +def extract_vision_feats(data_loader, model, device, config): + image_feats_all = [] + pooled_image_feats_all = [] + metric_logger = MetricLogger(delimiter=" ") + header = "extracting image feats" + iterator = metric_logger.log_every(data_loader, 100, header) + media_type = data_loader.dataset.medium + for vis, _ in iterator: + vis = vis.to(device, non_blocking=True) + vis_feat, pooled_vis_feat = model.get_vis_enc_for_eval(vis, media_type) + # if config.evaluation.eval_frame_ensemble == "concat": # default + # image_feat = rearrange(image_feat, "b t l c -> b (t l) c").contiguous() + vis_feat = vis_feat.unsqueeze(1) # (bsz, 1, l, d) + # else: + # assert config.video_input.num_frames == 1, "only support single-frame" + # assert config.evaluation.eval_frame_ensemble in ["mean", "max", "lse"] + if not config.eval_offload: + image_feats_all.append(vis_feat.cpu()) + pooled_image_feats_all.append(pooled_vis_feat.cpu()) + else: + image_feats_all.append(vis_feat) + pooled_image_feats_all.append(pooled_vis_feat) + + image_feats_all = torch.cat(image_feats_all, dim=0) + + pooled_image_feats_all = torch.cat(pooled_image_feats_all, dim=0) + return image_feats_all, pooled_image_feats_all + + +@torch.no_grad() +def evaluation_wrapper(model, data_loader, tokenizer, device, config, prefix=""): + with torch.cuda.amp.autocast(enabled=config.fp16): + i2t_x, t2i_x, i2t_emb, t2i_emb = evaluation( + model, data_loader, tokenizer, device, config + ) + score_pairs = [ + (prefix + "/", i2t_x, t2i_x), + (prefix + "_emb/", i2t_emb, t2i_emb), + ] + res = dict() + for name, i2t, t2i in score_pairs: + if i2t is not None: + txt2img_ids = data_loader.dataset.txt2vis + img2txt_ids = data_loader.dataset.vis2txt + res[name] = itm_eval(i2t, t2i, txt2img_ids, img2txt_ids) + return res + + +@torch.no_grad() +def evaluation(model, data_loader, tokenizer, device, config): + model.eval() + + metric_logger = MetricLogger(delimiter=" ") + header = "Evaluation:" + dtype = torch.half if config.fp16 else torch.float + media_type = data_loader.dataset.medium + logger.info(f"Start evaluation for {media_type}") + + logger.info("Computing dual encoder features...") + start_time = time.time() + + # this computes all features in each GPU + texts = data_loader.dataset.text + max_txt_l = config.max_cap_len + + text_feats, text_atts = extract_text_feats( + texts, max_txt_l, tokenizer, model, device + ) # (bsz, Lt, d), (bsz, Lt) + + image_feats, pooled_image_feats = extract_vision_feats( + data_loader, model, device, config + ) # (bsz, 1, #frm*Li, d) or (bsz, #frm, Li, d), (bsz, #frm, d) + logger.info("Finished feature extraction") + logger.info("Computing ITC scores [dot-product]") + _pooled_image_feats = ( + pooled_image_feats.to(device, non_blocking=True) + if config.eval_offload + else pooled_image_feats + ) + i2t_scores, t2i_scores = get_sim( + model.vis_proj(_pooled_image_feats), model.cap_proj(text_feats[:, 0]) + ) + logger.info("Computing ITC scores [dot-product], done!") + + num_images = len(data_loader.dataset.vis) + i2t_scores_x = torch.full((num_images, len(texts)), -100.0).to( + device, torch.float, non_blocking=True + ) + + # computes only part of the scores at each GPU, gather at the end + logger.info("Rerank dual-encoder results with cross-encoder...") + num_tasks = get_world_size() + rank = get_rank() + # only uses the part associated with the raw eval set + # compute image2text # + step = num_images // num_tasks + 1 + start = rank * step + end = min(num_images, start + step) + + text_encoder = model.get_expert_encoder('vis_cap_grounding') + + iterator = metric_logger.log_every(i2t_scores[start:end], 100, header) + logger.info(f"i2t_scores.shape {i2t_scores[start:end].shape}") + + # generate score for each clip, and aggregate all clip scores for a video + n_clip_per_video = 1 + # ( + # image_feats.shape[1] if not False else image_feats[0].shape[1] + # ) + + # logger.info( + # f"n_clip_per_video={n_clip_per_video}, with eval_frame_ensemble={'concat'}" + # ) + for i, sims in enumerate(iterator): + k = min(len(sims), config.eval_k_test) + topk_sim, topk_idx = sims.topk(k=k, dim=0) + + clip_scores = [] + for clip_idx in range(n_clip_per_video): + # if config.deep_fusion: + # encoder_output = [ + # feat[start + i, clip_idx].to(device, non_blocking=True) + # for feat in image_feats + # ] + + # else: + encoder_output = ( + image_feats[start + i, clip_idx].to(device, non_blocking=True) + if config.eval_offload + else image_feats[start + i, clip_idx] + ) # (#frm*Li, d) + + """ original + encoder_output = encoder_output.repeat(k, 1, 1) # (k=128, #frm*Li, d) + encoder_att = torch.ones( + encoder_output.size()[:-1], dtype=torch.long + ).to(device, non_blocking=True) + output = text_encoder( + encoder_embeds=text_feats[topk_idx], + attention_mask=text_atts[topk_idx], + encoder_hidden_states=encoder_output, + encoder_attention_mask=encoder_att, + return_dict=True, + mode="fusion" + ) + + itm_embeds = output.last_hidden_state[:, 0] + """ + + # new + bs = 32 + # bs = config.batch_size_test.video + itm_embeds = [] + + # if config.deep_fusion: + # encoder_output = [feat.repeat(bs, 1, 1) for feat in encoder_output] + # encoder_att = [ + # torch.ones(feat.size()[:-1], dtype=torch.long).to( + # device, non_blocking=True + # ) + # for feat in encoder_output + # ] + # else: + encoder_output = encoder_output.repeat(bs, 1, 1) # (k=128, #frm*Li, d) + encoder_att = torch.ones(encoder_output.size()[:-1], dtype=torch.long).to( + device, non_blocking=True + ) + + for j in range(0, len(topk_idx), bs): + output = text_encoder( + encoder_embeds=text_feats[topk_idx[j : j + bs]], + attention_mask=text_atts[topk_idx[j : j + bs]], + encoder_hidden_states=encoder_output, + encoder_attention_mask=encoder_att, + return_dict=True, + ) + batch_itm_embeds = output.last_hidden_state[:, 0] + itm_embeds.append(batch_itm_embeds) + itm_embeds = torch.cat(itm_embeds, dim=0) + # end new + + score = model.vcm_head(itm_embeds)[:, 1] + clip_scores.append(score) + + # if len(clip_scores) == 1: + score = clip_scores[0] + # else: + # assert config.evaluation.eval_frame_ensemble in ["mean", "max", "lse"] + # clip_scores = torch.stack(clip_scores) # (#clips, k) + # if config.evaluation.eval_frame_ensemble == "mean": + # score = clip_scores.mean(0) + # elif config.evaluation.eval_frame_ensemble == "max": + # score = clip_scores.max(0)[0] + # elif config.evaluation.eval_frame_ensemble == "lse": # LogSumExp + # score = torch.logsumexp(clip_scores, dim=0) + # else: + # raise ValueError( + # "config.evaluation.eval_frame_ensemble must in [mean, max, lse] when #clip > 1." + # ) + + i2t_scores_x[start + i, topk_idx] = score.to(i2t_scores_x.dtype) + + # compute text2image # + num_text = len(data_loader.dataset.text) + t2i_scores_x = torch.full((num_text, len(data_loader.dataset.vis)), -100.0).to( + device, torch.float, non_blocking=True + ) + + step = num_text // num_tasks + 1 + start = rank * step + end = min(num_text, start + step) + + iterator = metric_logger.log_every(t2i_scores[start:end], 100, header) + logger.info(f"t2i_scores.shape {t2i_scores[start:end].shape}") + # generate score for each clip, and aggregate all clip scores for a video + n_clip_per_video = 1 + # ( + # image_feats.shape[1] if not config.deep_fusion else image_feats[0].shape[1] + # ) + for i, sims in enumerate(iterator): + k = min(len(sims), config.eval_k_test) + topk_sim, topk_idx = sims.topk(k=k, dim=0) + # topk_idx = + clip_scores = [] + for clip_idx in range(n_clip_per_video): + + """old + encoder_output = image_feats[topk_idx, clip_idx].to(device, non_blocking=True) \ + if config.evaluation.eval_offload else image_feats[topk_idx, clip_idx] + encoder_att = torch.ones( + encoder_output.size()[:-1], dtype=torch.long + ).to(device, non_blocking=True) + output = text_encoder( + encoder_embeds=text_feats[start+i].repeat(k, 1, 1), + attention_mask=text_atts[start+i].repeat(k, 1), + encoder_hidden_states=encoder_output, + encoder_attention_mask=encoder_att, + return_dict=True, + mode="fusion" + ) + + itm_embeds = output.last_hidden_state[:, 0] + """ + + # new + bs = 32 + # bs = config.batch_size_test.video + itm_embeds = [] + for j in range(0, len(topk_idx), bs): + + # if config.deep_fusion: + # encoder_output = [ + # feat[topk_idx[j : j + bs], clip_idx].to(device, non_blocking=True) + # for feat in image_feats + # ] + # encoder_att = [ + # torch.ones(feat.size()[:-1], dtype=torch.long).to( + # device, non_blocking=True + # ) + # for feat in encoder_output + # ] + # else: + encoder_output = ( + image_feats[topk_idx[j : j + bs], clip_idx].to( + device, non_blocking=True + ) + if config.eval_offload + else image_feats[topk_idx[j : j + bs], clip_idx] + ) + encoder_att = torch.ones(encoder_output.size()[:-1], dtype=torch.long).to( + device, non_blocking=True + ) + + repeat_n = ( + encoder_output.shape[0] + # if not config.deep_fusion + # else encoder_output[0].shape[0] + ) + output = text_encoder( + encoder_embeds=text_feats[start + i].repeat(repeat_n, 1, 1), + attention_mask=text_atts[start + i].repeat(repeat_n, 1), + encoder_hidden_states=encoder_output, + encoder_attention_mask=encoder_att, + return_dict=True, + # mode="fusion", + ) + + batch_itm_embeds = output.last_hidden_state[:, 0] + itm_embeds.append(batch_itm_embeds) + + itm_embeds = torch.cat(itm_embeds, dim=0) + # end new + + score = model.vcm_head(itm_embeds)[:, 1] + clip_scores.append(score) + + # if len(clip_scores) == 1: + score = clip_scores[0] + # else: + # assert config.evaluation.eval_frame_ensemble in ["mean", "max", "lse"] + # clip_scores = torch.stack(clip_scores) # (#clips, k) + # if config.evaluation.eval_frame_ensemble == "mean": + # score = clip_scores.mean(0) + # elif config.evaluation.eval_frame_ensemble == "max": + # score = clip_scores.max(0)[0] + # elif config.evaluation.eval_frame_ensemble == "lse": # LogSumExp + # score = torch.logsumexp(clip_scores, dim=0) + # else: + # raise ValueError( + # "config.evaluation.eval_frame_ensemble must in [mean, max, lse] when #clip > 1." + # ) + + t2i_scores_x[start + i, topk_idx] = score.to(t2i_scores_x.dtype) + + if config.distributed: + # gether across GPUs + dist.barrier() + dist.all_reduce(i2t_scores_x, op=dist.ReduceOp.SUM) + dist.all_reduce(t2i_scores_x, op=dist.ReduceOp.SUM) + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + logger.info(f"Evaluation time {total_time_str}") + + return ( + i2t_scores_x.cpu().numpy(), + t2i_scores_x.cpu().numpy(), + i2t_scores.cpu().numpy(), + i2t_scores.T.cpu().numpy(), + ) + + +@torch.no_grad() +def itm_eval(scores_i2t, scores_t2i, txt2img, img2txt): + # Images->Text + ranks = np.zeros(scores_i2t.shape[0]) + for index, score in enumerate(scores_i2t): + inds = np.argsort(score)[::-1] + # Score + gt_txt_ids = img2txt[index] + if isinstance(gt_txt_ids, int): + ranks[index] = np.where(inds == gt_txt_ids)[0][0] + else: + rank = 1e20 + for i in gt_txt_ids: + tmp = np.where(inds == i)[0][0] + if tmp < rank: + rank = tmp + ranks[index] = rank + + # Compute metrics + tr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks) + tr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks) + tr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks) + + # Text->Images + ranks = np.zeros(scores_t2i.shape[0]) + + for index, score in enumerate(scores_t2i): + inds = np.argsort(score)[::-1] + gt_img_ids = txt2img[index] + if isinstance(gt_img_ids, int): + ranks[index] = np.where(inds == gt_img_ids)[0][0] + else: # list, used in the case each caption has multiple GT images + # Score + rank = 1e20 + for i in gt_img_ids: + tmp = np.where(inds == i)[0][0] + if tmp < rank: + rank = tmp + ranks[index] = rank + + # Compute metrics + ir1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks) + ir5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks) + ir10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks) + + tr_mean = (tr1 + tr5 + tr10) / 3 + ir_mean = (ir1 + ir5 + ir10) / 3 + r_mean = (tr_mean + ir_mean) / 2 + + eval_result = { + "txt_r1": tr1, + "txt_r5": tr5, + "txt_r10": tr10, + "txt_r_mean": tr_mean, + "vis_r1": ir1, + "vis_r5": ir5, + "vis_r10": ir10, + "vis_r_mean": ir_mean, + "r_mean": r_mean, + } + eval_result = {k: round(v, 2) for k, v in eval_result.items()} + return eval_result diff --git a/tasks/stage_2.py b/tasks/stage_2.py new file mode 100644 index 0000000..e083d16 --- /dev/null +++ b/tasks/stage_2.py @@ -0,0 +1,373 @@ +import os +import datetime +import wandb +import torch +import pandas as pd +from time import time + +import torch.distributed as dist +from torch.distributed import ReduceOp + +from torch.nn.utils.clip_grad import clip_grad_value_ +from utils.basic import MetricLogger, SmoothedValue, setup_seed, average_dicts +from datasets.utils import get_datasets_media +from datasets.dataloader import MetaLoader +from utils.dist import is_main_process, get_rank, get_world_size +from utils.logger import setup_wandb, log_dict_to_wandb +from .retrieval_utils import evaluation_wrapper +import glog as logger + + +def run_epoch( + model, + train_dataloaders, + optimizer, + epoch, + global_step, + device, + scheduler, + scaler, + config +): + model.train() + media_types = list(train_dataloaders.keys()) + + log_freq = config['log_freq'] + # metric_logger = MetricLogger(delimiter=' ') + # metric_logger.add_meter('lr', SmoothedValue(window=log_freq, fmt='{value:.6f}')) + # metric_logger.add_meter("temperature", SmoothedValue(window=log_freq, fmt="{value:.4f}")) + + loss_names = ['loss_' + k for k in config['loss_dict'].keys()] + # for l in loss_names: + # for m in media_types: + # metric_logger.add_meter( + # f'{m}/{l}', SmoothedValue(window=log_freq, fmt="{value:.4f}") + # ) + + + # header = '{} | Epoch = {}'.format(config['stage'], epoch) + + model_without_ddp = model + if config['distributed']: + model_without_ddp = model.module + for k in train_dataloaders: + train_dataloaders[k].sampler.set_epoch(epoch) + + train_dataloader = MetaLoader(name2loader=train_dataloaders) + + log_text_template = '\n' + '-' * 25 + '\n[Epoch {}/{}][Iter. {}/{}][Media-type {}]\n' + log_text_template += '[Losses] gen = {:.4f} | vhc = {:.4f} | vhm = {:.4f} | stc = {:.4f} | stm = {:.4f}\n' + log_text_template += '[Other] lr = {:.4f} | temp = {:.4f} | iter_time = {:.2f} | eta = {}\n' + + # iterator = metric_logger.log_every(train_dataloader, log_freq, header) + local_step = 0 + for media_type, (vis, caption, history, answer) in train_dataloader: + # for media_type, (vis, caption, neg_vis, neg_caption, idx) in train_dataloader: + + start = time() + # loss_dict = {} + vis = vis.to(device) + # neg_vis = neg_vis.to(device) + # idx = idx.to(device) + + with torch.cuda.amp.autocast(enabled=config.fp16): + loss_dict = model(vis, caption, history, answer, media_type) + loss = sum(loss_dict.values()) + loss_accum_grad = loss / config.accum_grad_every + + scaler.scale(loss_accum_grad).backward() + + # Perfrom gradient clipping: unscale --> clip + if config['clip_grad_value'] > 0: + scaler.unscale_(optimizer) + clip_grad_value_(model.parameters(), config.clip_grad_value) + + if local_step % config.accum_grad_every == 0: + scaler.step(optimizer) + scaler.update() + scheduler.step() + optimizer.zero_grad() + + time_iter = time() - start + eta = (len(train_dataloader) - local_step - 1) * time_iter + eta = str(datetime.timedelta(seconds=eta)) + # log + log_dict = {} + log_dict_rest = {} + for loss_name in loss_names: + value = loss_dict[loss_name] + value = value if isinstance(value, float) else value.item() + log_dict[f"train/{media_type}/{loss_name}"] = value + + # metric_logger.update(lr=optimizer.param_groups[0]["lr"]) + # metric_logger.update(temperature=model_without_ddp.temp.item()) + log_dict_rest['train/other/lr'] = optimizer.param_groups[0]["lr"] + log_dict_rest['train/other/temperature'] = model_without_ddp.temp.item() + + if is_main_process() and global_step % log_freq == 0 and local_step % config.accum_grad_every == 0: + # log_dict['train/webvid/step'] = webvid_step + log_text = log_text_template.format( + epoch, config.epochs-1, local_step, len(train_dataloader) , media_type, + log_dict['train/champagne/loss_gen'], log_dict['train/champagne/loss_vhc'], log_dict['train/champagne/loss_vhm'], + log_dict['train/champagne/loss_stc'], log_dict['train/champagne/loss_stm'], + log_dict_rest['train/other/lr'], log_dict_rest['train/other/temperature'], time_iter, eta + ) + logger.info(log_text) + log_dict_rest['train/other/step'] = global_step + log_dict['train/champagne/step'] = global_step + + if config['wandb_enabled']: + wandb.log(log_dict) + wandb.log(log_dict_rest) + + global_step += 1 + local_step += 1 + # gather the stats from all processes + # metric_logger.synchronize_between_processes() + # logger.info(f"Averaged stats: {metric_logger.global_avg()}") + + return global_step + + +def eval(model, val_dataloader, device, epoch, config): + + model.eval() + + log_text_template = '\n' + '-' * 25 + '\n[Val Epoch{}][Iter. {}/{}][Media-type {}]\n' + log_text_template += '[Losses] gen = {:.4f} | vhc = {:.4f} | vhm = {:.4f} | stc = {:.4f} | stm = {:.4f} \n' + + # log_text_template += '[Losses] vcc = {:.4f} | vcm = {:.4f} | stc = {:.4f} | stm = {:.4f} | mlm = {:.4f} \n' + # log_text_template += '[Losses] vhc = {:.4f} | vhm = {:.4f} | chc = {:.4f} | chm = {:.4f} | gen = {:.4f} \n' + + cum_loss_stc = 0 + cum_loss_stm = 0 + cum_loss_vhc = 0 + cum_loss_vhm = 0 + cum_loss_gen = 0 + cum_loss_tot = 0 + val_step = 0 + + # val_dataloader = MetaLoader(name2loader=val_dataloaders) + media_type = val_dataloader.dataset.medium + + if is_main_process(): + start_time = time() + + # for vis, cap_ids, hist_ids, ques_ids, label_ids, enc_dec_input_ids, idx, _ in val_dataloader: + for vis, caption, history, answer in val_dataloader: + # for vis, cap_ids, hist_ids, label_ids, enc_dec_input_ids, idx, _ in val_dataloader: + vis = vis.to(device) + # neg_vis = neg_vis.to(device) + # idx = idx.to(device) + + with torch.cuda.amp.autocast(enabled=config['fp16']): + with torch.no_grad(): + # loss_dict, _ = model(vis, cap_ids, hist_ids, ques_ids, label_ids, enc_dec_input_ids, media_type) + loss_dict = model(vis, caption, history, answer, media_type) + + loss = sum(loss_dict.values()) + loss_stc = loss_dict['loss_stc'] + loss_stm = loss_dict['loss_stm'] + loss_vhc = loss_dict['loss_vhc'] + loss_vhm = loss_dict['loss_vhm'] + loss_gen = loss_dict['loss_gen'] + + if config['distributed']: + dist.all_reduce(loss, op=ReduceOp.AVG) + if config.loss_dict['stc'] != 0: + dist.all_reduce(loss_stc, op=ReduceOp.AVG) + if config.loss_dict['stm'] != 0: + dist.all_reduce(loss_stm, op=ReduceOp.AVG) + if config.loss_dict['vhc'] != 0: + dist.all_reduce(loss_vhc, op=ReduceOp.AVG) + if config.loss_dict['vhm'] != 0: + dist.all_reduce(loss_vhm, op=ReduceOp.AVG) + if config.loss_dict['gen'] != 0: + dist.all_reduce(loss_gen, op=ReduceOp.AVG) + + if is_main_process(): + cum_loss_tot += loss.item() + cum_loss_stc += loss_stc.item() + cum_loss_stm += loss_stm.item() + cum_loss_vhc += loss_vhc.item() + cum_loss_vhm += loss_vhm.item() + cum_loss_gen += loss_gen.item() + + if val_step % config.log_freq == 0: + log_text = log_text_template.format( + epoch, val_step, len(val_dataloader), media_type, + loss_gen, loss_vhc, loss_vhm, loss_stc, loss_stm) + # log_text_template = '\n' + '-' * 25 + '\n[Val Eoch{}][Iter. {}/{}][Media-type {}]\n' + # log_text_template += '[Losses] vcc = {:.4f} | vcm = {:.4f} | stc = {:.4f} | stm = {:.4f} | mlm = {:.4f} \n' + # log_text_template += '[Losses] vhc = {:.4f} | vhm = {:.4f} | chc = {:.4f} | chm = {:.4f} | gen = {:.4f} \n' + # log_text = log_text_template.format( + # epoch, val_step, len(val_dataloader), media_type, + # loss_vcc, loss_vcm, loss_stc, loss_stm, 0, + # loss_vhc, loss_vhm, loss_chc, loss_chm, loss_gen + # ) + + logger.info(log_text) + # logger.info('[INFO] [Eval. Epoch {}][Iter. {}/{}][Losses] gen = {:.4f} | total = {:.4f}'.format( + # epoch, val_step, len(val_dataloader), gen_loss, loss + # )) + val_step += 1 + + if config['distributed']: + dist.barrier() + + if is_main_process(): + duration = time() - start_time + + cum_loss_tot /= len(val_dataloader) + cum_loss_stc /= len(val_dataloader) + cum_loss_stm /= len(val_dataloader) + cum_loss_vhc /= len(val_dataloader) + cum_loss_vhm /= len(val_dataloader) + cum_loss_gen /= len(val_dataloader) + + # cum_loss_vhc /= len(val_dataloader) + # cum_loss_vhm /= len(val_dataloader) + # cum_loss_chc /= len(val_dataloader) + # cum_loss_chm /= len(val_dataloader) + # cum_loss_gen /= len(val_dataloader) + logger.info('\n' + '-' * 25 + '\n' + 'Eval. took {}\n[Losses] cum_total = {:.4f}'.format( + datetime.timedelta(seconds=int(duration)), cum_loss_tot + )) + + # logger.info('\n' + '-' * 25 + '\n' + 'Eval. took {}\n[Losses] cum_gen = {:.4f} | cum_total = {:.4f}'.format( + # datetime.timedelta(seconds=int(duration)), cum_loss_gen, cum_loss_tot + # )) + + # switch back to training mode + model.train() + + loss_dict = { + 'stc': cum_loss_stc, + 'stm': cum_loss_stm, + 'vhc': cum_loss_vhc, + 'vhm': cum_loss_vhm, + # 'vhc': cum_loss_vhc, + # 'vhm': cum_loss_vhm, + # 'chc': cum_loss_chc, + # 'chm': cum_loss_chm, + 'gen': cum_loss_gen, + # 'gen': cum_loss_gen, + 'tot': cum_loss_tot + } + return loss_dict + + +def train( + model, + model_without_ddp, + train_dataloaders, + val_dataloaders, + optimizer, + global_step, + scheduler, + scaler, + start_epoch, + config +): + if is_main_process() and config['wandb_enabled']: + run = setup_wandb(config) + setup_seed(config['seed'] + get_rank()) + device = torch.device('cuda:{}'.format(config['gpu'])) + + if is_main_process() and config['wandb_enabled']: + wandb.watch(model) + + best = float('inf') + best_epoch = 0 + + logger.info('[INFO] Start training...') + start_time_all = time() + for epoch in range(start_epoch, config['epochs']): + if not config['evaluate']: + start_time_epoch = time() + global_step = run_epoch( + model, + train_dataloaders, + optimizer, + epoch, + global_step, + device, + scheduler, + scaler, + config + ) + end_time_epoch = time() + epoch_time = end_time_epoch - start_time_epoch + epoch_time_str = str(datetime.timedelta(seconds=int(epoch_time))) + logger.info(f'[INFO] Epoch took {epoch_time_str}') + + if not config['debugging']: + with torch.cuda.amp.autocast(enabled=config['fp16']): + val_res = {} + + for medium in val_dataloaders: + res = eval( + model, + val_dataloaders[medium], + device, + epoch, + config + ) + val_res[medium] = res + + + if is_main_process(): + # Average across all datasets + avg_val_res = average_dicts(val_res) + # log to wandb + if config.wandb_enabled: + for medium in val_res: + log_dict_val = {} + # log_dict_val[f'val/{medium}/step'] = epoch + for l in val_res[medium]: + log_dict_val[f'val/{medium}/{l}'] = val_res[medium][l] + wandb.log(log_dict_val) + # for p, v in eval_res.items(): + # log_dict_to_wandb(v, step=global_step, prefix=p) + if config.stop_key is not None and config.stop_key in avg_val_res: + cur_best = avg_val_res[config.stop_key] + else: # stop_key = None + cur_best = best - 1 # save the last as the best + + # Don't save vit and llm weights as they are frozen + state_dict = model_without_ddp.state_dict() + if config.freeze_vit: + state_dict = {k:v for k,v in state_dict.items() if 'visual_encoder' not in k} + + if config.freeze_llm: + state_dict = {k:v for k,v in state_dict.items() if 'llm' not in k} + + save_obj = { + "model": state_dict, + "optimizer": optimizer.state_dict(), + "scheduler": scheduler.state_dict(), + "scaler": scaler.state_dict(), + "config": config, + "epoch": epoch, + "global_step": global_step, + } + torch.save(save_obj, os.path.join(config.log_dir, f"ckpt_{epoch:02d}.pth")) + + if not config.evaluate and cur_best < best: + torch.save(save_obj, os.path.join(config.log_dir, "ckpt_best.pth")) + # eval_file = "eval_res_best.json" + # eval_res.to_json(os.path.join(config.log_dir, eval_file)) + best = cur_best + + if config.evaluate: + break + if config['distributed']: + dist.barrier() + + total_time = time() - start_time_all + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + logger.info(f'[INFO] Training took {total_time_str}') + + if is_main_process() and config['wandb_enabled']: + run.finish() + diff --git a/tasks/stage_3.py b/tasks/stage_3.py new file mode 100644 index 0000000..10b6ee9 --- /dev/null +++ b/tasks/stage_3.py @@ -0,0 +1,1051 @@ +import os +import datetime +import wandb +import torch +import json +import numpy as np +from copy import deepcopy +from time import time +import torch.nn.functional as F + +import torch.distributed as dist +from torch.distributed import ReduceOp +from torch.nn.utils.clip_grad import clip_grad_value_ +from utils.basic import MetricLogger, SmoothedValue, setup_seed, average_dicts +from datasets.utils import get_datasets_media +from datasets.dataloader import MetaLoader +from utils.dist import is_main_process, get_rank, get_world_size +from utils.logger import setup_wandb, log_dict_to_wandb +from .retrieval_utils import evaluation_wrapper +import glog as logger + + +def run_epoch( + model, + train_dataloaders, + # expert_tokenizer, + # enc_dec_tokenizer, + optimizer, + epoch, + global_step, + visdial_step, + avsd_step, + nextqa_step, + device, + scheduler, + scaler, + config +): + model.train() + media_types = list(train_dataloaders.keys()) + + log_freq = config['log_freq'] + # metric_logger = MetricLogger(delimiter=' ') + # metric_logger.add_meter('lr', SmoothedValue(window=log_freq, fmt='{value:.6f}')) + # metric_logger.add_meter("temperature", SmoothedValue(window=log_freq, fmt="{value:.4f}")) + + loss_names = ['loss_' + k for k in config['loss_dict'].keys()] + # for l in loss_names: + # for m in media_types: + # metric_logger.add_meter( + # f'{m}/{l}', SmoothedValue(window=log_freq, fmt="{value:.4f}") + # ) + + # header = '{} | Epoch = {}'.format(config['stage'], epoch) + + model_without_ddp = model + if config['distributed']: + model_without_ddp = model.module + for k in train_dataloaders: + train_dataloaders[k].sampler.set_epoch(epoch) + + # if len(train_dataloaders) == 1: + # train_dataloader = list(train_dataloaders.values())[0] + # else: + train_dataloader = MetaLoader(name2loader=train_dataloaders) + + log_text_template = '\n' + '-' * 25 + '\n[Epoch {}/{}][Iter. {}/{}][Media-type {}]\n' + log_text_template += '[Loss] tot = {:.4f} | gen = {:.4f} \n' + log_text_template += '[Other] lr = {:.6f} | iter_time = {:.2f} | eta = {}\n' + + # iterator = metric_logger.log_every(train_dataloader, log_freq, header) + local_step = 0 + # vis, cap, hist, ques, ans, enc_dec_input, index, vid_id_list + for media_type, batch in train_dataloader: + vis, caption, history, answer = batch[0], batch[1], batch[2], batch[3] + + start = time() + vis = vis.to(device) + + with torch.cuda.amp.autocast(enabled=config.fp16): + loss_dict = model(vis, caption, history, answer, media_type) + loss = sum(loss_dict.values()) + loss = loss / config['accum_grad_every'] + + scaler.scale(loss).backward() + + # Perfrom gradient clipping: unscale --> clip + if config['clip_grad_value'] > 0: + # scaler.unscale_(optimizer) + clip_grad_value_(model.parameters(), config['clip_grad_value']) + + if local_step % config.accum_grad_every == 0: + scaler.step(optimizer) + scaler.update() + scheduler.step() + optimizer.zero_grad() + + time_iter = time() - start + eta = (len(train_dataloader) - local_step - 1) * time_iter + eta = str(datetime.timedelta(seconds=eta)) + # log + log_dict_visdial = {} + log_dict_avsd = {} + log_dict_nextqa = {} + + log_dict_rest = {} + + for loss_name in loss_names: + value = loss_dict[loss_name] + value = value if isinstance(value, float) else value.item() + # metric_logger.update(**{f"{media_type}/{loss_name}": value}) + if media_type == 'visdial': + log_dict_visdial[f"train/{media_type}/{loss_name}"] = value + elif media_type == 'avsd': + log_dict_avsd[f"train/{media_type}/{loss_name}"] = value + elif media_type == 'nextqa': + log_dict_nextqa[f"train/{media_type}/{loss_name}"] = value + + log_dict_rest['train/other/lr'] = optimizer.param_groups[0]["lr"] + + if is_main_process() and local_step % log_freq == 0 and local_step % config['accum_grad_every'] == 0: + log_dict_rest['train/other/step'] = global_step + + if media_type == 'visdial': + log_dict_visdial['train/visdial/step'] = visdial_step + log_dict = log_dict_visdial + + elif media_type == 'avsd': + log_dict_avsd['train/avsd/step'] = avsd_step + log_dict = log_dict_avsd + + elif media_type == 'nextqa': + log_dict_nextqa['train/nextqa/step'] = nextqa_step + log_dict = log_dict_nextqa + + log_text = log_text_template.format( + epoch, config.epochs-1, local_step, len(train_dataloader) , media_type, loss.item(), + log_dict[f'train/{media_type}/loss_gen'], log_dict_rest['train/other/lr'], + time_iter, eta + ) + logger.info(log_text) + + if config['wandb_enabled']: + wandb.log(log_dict_rest) + wandb.log(log_dict) + + if media_type == 'visdial': + visdial_step += 1 + elif media_type == 'avsd': + avsd_step += 1 + elif media_type == 'nextqa': + nextqa_step += 1 + + + local_step += 1 + global_step += 1 + + return global_step, visdial_step, avsd_step, nextqa_step + + + # if is_main_process() and local_step % config['log_model_outputs_every'] == 0 and config['log_model_outputs']: + # predictions = [] + # labels = [] + # probs = F.softmax(logits, dim=-1) + # preds = torch.topk(probs, 1)[1].squeeze(-1) + # preds = preds.tolist() + # lm_labels_list = label_ids['input_ids'].tolist() + # lm_labels_list = [[s for s in label if s != 1] for label in lm_labels_list] + # # reponses = '' + # # labels = '' + # model_pred_text = '' + # for pred, label in zip(preds, lm_labels_list): + # predictions.append('\n' + 'Pred: ' + tokenizer_enc_dec.decode(pred) + '\n') + # labels.append('\n' + 'GT: ' + tokenizer_enc_dec.decode(label) + '\n') + + # if len(predictions) < 4: + # predictions = predictions[:4] + # labels = labels[:4] + + + # for label, pred in zip(labels, predictions): + # model_pred_text += label + pred + # model_pred_text += "---------------------" + # logger.info(model_pred_text) + + # # output['reponses'] = reponses + # # output['gt'] = labels + + + + +def eval(model, val_dataloader, device, epoch, config): + + model.eval() + + log_text_template = '\n' + '-' * 25 + '\n[Val Epoch {}][Iter. {}/{}][Media-type {}]\n' + # log_text_template += '[Losses] vcc = {:.4f} | vcm = {:.4f} | stc = {:.4f} | stm = {:.4f} | mlm = {:.4f} \n' + # log_text_template += '[Losses] vhc = {:.4f} | vhm = {:.4f} | chc = {:.4f} | chm = {:.4f} | gen = {:.4f} \n' + + log_text_template += '[Losses] gen = {:.4f} \n' + + # cum_loss_stc = 0 + # cum_loss_stm = 0 + # cum_loss_vcc = 0 + # cum_loss_vcm = 0 + # cum_loss_vhc = 0 + # cum_loss_vhm = 0 + # cum_loss_chc = 0 + # cum_loss_chm = 0 + # cum_loss_mlm = 0 + cum_loss_gen = 0 + cum_loss_tot = 0 + val_step = 0 + media_type = val_dataloader.dataset.medium + if is_main_process(): + start_time = time() + + # for vis, cap_ids, hist_ids, ques_ids, label_ids, enc_dec_input_ids, idx, _ in val_dataloader: + for batch in val_dataloader: + + vis, caption, history, answer = batch[0], batch[1], batch[2], batch[3] + + vis = vis.to(device) + + with torch.cuda.amp.autocast(enabled=config.fp16): + with torch.no_grad(): + # loss_dict, _ = model(vis, cap_ids, hist_ids, ques_ids, label_ids, enc_dec_input_ids, media_type) + # loss_dict, _ = model(vis, cap_ids, hist_ids, label_ids, enc_dec_input_ids, media_type) + loss_dict = model(vis, caption, history, answer, media_type) + + # loss_dict = model(vis, cap_ids, hist_ids, ques_ids, label_ids, media_type) + loss = sum(loss_dict.values()) + # loss_stc = loss_dict['loss_stc'] + # loss_stm = loss_dict['loss_stm'] + # loss_vcc = loss_dict['loss_vcc'] + # loss_vcm = loss_dict['loss_vcm'] + # loss_vhc = loss_dict['loss_vhc'] + # loss_vhm = loss_dict['loss_vhm'] + # loss_chc = loss_dict['loss_chc'] + # loss_chm = loss_dict['loss_chm'] + # loss_mlm = loss_dict['loss_mlm'] + loss_gen = loss_dict['loss_gen'] + + if config['distributed']: + dist.all_reduce(loss, op=ReduceOp.AVG) + # if config.loss_dict['stc'] != 0: + # dist.all_reduce(loss_stc, op=ReduceOp.AVG) + # if config.loss_dict['stm'] != 0: + # dist.all_reduce(loss_stm, op=ReduceOp.AVG) + # if config.loss_dict['vcc'] != 0: + # dist.all_reduce(loss_vcc, op=ReduceOp.AVG) + # if config.loss_dict['vcm'] != 0: + # dist.all_reduce(loss_vcm, op=ReduceOp.AVG) + # if config.loss_dict['vhc'] != 0: + # dist.all_reduce(loss_vhc, op=ReduceOp.AVG) + # if config.loss_dict['vhm'] != 0: + # dist.all_reduce(loss_vhm, op=ReduceOp.AVG) + # if config.loss_dict['chc'] != 0: + # dist.all_reduce(loss_chc, op=ReduceOp.AVG) + # if config.loss_dict['chm'] != 0: + # dist.all_reduce(loss_chm, op=ReduceOp.AVG) + # if config.loss_dict['mlm'] != 0: + # dist.all_reduce(loss_mlm, op=ReduceOp.AVG) + if config.loss_dict['gen'] != 0: + dist.all_reduce(loss_gen, op=ReduceOp.AVG) + + if is_main_process(): + cum_loss_tot += loss.item() + # cum_loss_stc += loss_stc.item() + # cum_loss_stm += loss_stm.item() + # cum_loss_vcc += loss_vcc.item() + # cum_loss_vcm += loss_vcm.item() + # cum_loss_vhc += loss_vhc.item() + # cum_loss_vhm += loss_vhm.item() + # cum_loss_chc += loss_chc.item() + # cum_loss_chm += loss_chm.item() + # cum_loss_mlm += loss_mlm.item() + cum_loss_gen += loss_gen.item() + + if val_step % config.log_freq == 0: + # log_text_template = '\n' + '-' * 25 + '\n[Val Eoch{}][Iter. {}/{}][Media-type {}]\n' + # log_text_template += '[Losses] vcc = {:.4f} | vcm = {:.4f} | stc = {:.4f} | stm = {:.4f} | mlm = {:.4f} \n' + # log_text_template += '[Losses] vhc = {:.4f} | vhm = {:.4f} | chc = {:.4f} | chm = {:.4f} | gen = {:.4f} \n' + log_text = log_text_template.format( + epoch, val_step, len(val_dataloader), media_type, + # loss_vcc, loss_vcm, loss_stc, loss_stm, 0, + # loss_vhc, loss_vhm, loss_chc, loss_chm, + loss_gen + ) + + logger.info(log_text) + # logger.info('[INFO] [Eval. Epoch {}][Iter. {}/{}][Losses] gen = {:.4f} | total = {:.4f}'.format( + # epoch, val_step, len(val_dataloader), gen_loss, loss + # )) + val_step += 1 + + if config['distributed']: + dist.barrier() + + if is_main_process(): + duration = time() - start_time + + cum_loss_tot /= len(val_dataloader) + # cum_loss_stc /= len(val_dataloader) + # cum_loss_stm /= len(val_dataloader) + # cum_loss_vcc /= len(val_dataloader) + # cum_loss_vcm /= len(val_dataloader) + # cum_loss_vhc /= len(val_dataloader) + # cum_loss_vhm /= len(val_dataloader) + # cum_loss_chc /= len(val_dataloader) + # cum_loss_chm /= len(val_dataloader) + # cum_loss_mlm /= len(val_dataloader) + cum_loss_gen /= len(val_dataloader) + + logger.info('\n' + '-' * 25 + '\n' + 'Eval. took {}\n[Losses] cum_gen = {:.4f} | cum_total = {:.4f}'.format( + datetime.timedelta(seconds=int(duration)), cum_loss_gen, cum_loss_tot + )) + loss_dict = { + # 'stc': cum_loss_stc, + # 'stm': cum_loss_stm, + # 'vcc': cum_loss_vcc, + # 'vcm': cum_loss_vcm, + # 'vhc': cum_loss_vhc, + # 'vhm': cum_loss_vhm, + # 'chc': cum_loss_chc, + # 'chm': cum_loss_chm, + # 'mlm': cum_loss_mlm, + 'gen': cum_loss_gen, + 'tot': cum_loss_tot + } + return loss_dict + + +def ft_avsd( + model, + model_without_ddp, + train_dataloaders, + val_dataloaders, + optimizer, + global_step, + visdial_step, + avsd_step, + nextqa_step, + scheduler, + scaler, + start_epoch, + config +): + if is_main_process() and config['wandb_enabled']: + run = setup_wandb(config) + setup_seed(config['seed'] + get_rank()) + # device = torch.device('cuda:{}'.format(config['gpu'])) + device = config.device + # expert_tokenizer = model_without_ddp.expert_tokenizer + # enc_dec_tokenizer = model_without_ddp.enc_dec_tokenizer + + if is_main_process() and config['wandb_enabled']: + wandb.watch(model) + + best = float('inf') + + logger.info('[INFO] Start training...') + start_time_all = time() + for epoch in range(start_epoch, config['epochs']): + if not config['evaluate']: + if is_main_process(): + start_time_epoch = time() + + global_step, visdial_step, avsd_step, nextqa_step = run_epoch( + model, + train_dataloaders, + # expert_tokenizer, + # enc_dec_tokenizer, + optimizer, + epoch, + global_step, + visdial_step, + avsd_step, + nextqa_step, + device, + scheduler, + scaler, + config + ) + if is_main_process(): + end_time_epoch = time() + epoch_time = end_time_epoch - start_time_epoch + epoch_time_str = str(datetime.timedelta(seconds=int(epoch_time))) + logger.info(f'[INFO] Epoch took {epoch_time_str}') + + if not config['debugging']: + with torch.cuda.amp.autocast(enabled=config['fp16']): + val_res = {} + + for medium in val_dataloaders: + res = eval( + model, + val_dataloaders[medium], + # expert_tokenizer, + # enc_dec_tokenizer, + device, + epoch, + config + ) + val_res[medium] = res + + if is_main_process(): + # Average across all datasets + avg_val_res = average_dicts(val_res) + # log to wandb + if config.wandb_enabled: + for medium in val_res: + log_dict_val = {} + # log_dict_val[f'val/{medium}/step'] = epoch + for l in val_res[medium]: + log_dict_val[f'val/{medium}/{l}'] = val_res[medium][l] + wandb.log(log_dict_val) + # for p, v in eval_res.items(): + # log_dict_to_wandb(v, step=global_step, prefix=p) + if config.stop_key is not None and config.stop_key in avg_val_res: + cur_best = avg_val_res[config.stop_key] + else: # stop_key = None + cur_best = best - 1 # save the last as the best + + # Don't save vit and llm weights as they are frozen + state_dict = model_without_ddp.state_dict() + if config.freeze_vit: + state_dict = {k:v for k,v in state_dict.items() if 'visual_encoder' not in k} + + if config.freeze_llm: + state_dict = {k:v for k,v in state_dict.items() if 'llm' not in k} + + save_obj = { + "model": state_dict, + "optimizer": optimizer.state_dict(), + "scheduler": scheduler.state_dict(), + "scaler": scaler.state_dict(), + "config": config, + "epoch": epoch, + "global_step": global_step, + "visdial_step": visdial_step, + "avsd_step": avsd_step, + "nextqa_step": nextqa_step + } + torch.save(save_obj, os.path.join(config.log_dir, f"ckpt_{epoch:02d}.pth")) + + if not config.evaluate and cur_best < best: + torch.save(save_obj, os.path.join(config.log_dir, "ckpt_best.pth")) + # eval_file = "eval_res_best.json" + # eval_res.to_json(os.path.join(config.log_dir, eval_file)) + best = cur_best + + if config.evaluate: + break + if config['distributed']: + dist.barrier() + + total_time = time() - start_time_all + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + logger.info(f'[INFO] Training took {total_time_str}') + + if is_main_process() and config['wandb_enabled']: + run.finish() + + +def generate(model, dataloader, tag, config, gen_subset_num=None): + + model.eval() + responses = {} + # tokenizer_enc_dec = dataloader.dataset.tokenizer_enc_dec + device = next(model.parameters()).device # Assumes all model parameters are on the same device + # Generate the repsonse for each round + logger.info('[INFO] Generating responses for {} samples'.format(len(dataloader))) + with torch.no_grad(): + # for counter, (vis, cap_ids, hist_ids, ques_ids, _, enc_dec_input_ids, _, vid_id) in enumerate(dataloader): + for counter, (vis, cap, hist, ans, vis_ids) in enumerate(dataloader): + + start_time = time() + vis = vis.to(device, non_blocking=True) + is_vid = config.media_test in ['webvid', 'champagne', 'avsd', 'nextqa'] + + # First get the visual features depending on the media type + with torch.cuda.amp.autocast(enabled=config.fp16): + cap_ids, cap_mask = model.tokenize_text(cap, device, max_len=None) + hist_ids, hist_mask = model.tokenize_text(hist, device, max_len=None) + + if config.use_moes: + if config.use_sep_spatial_temp_experts: + vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask = model.encode_vis(vis, device, is_vid=is_vid) + else: + vis_embed, vis_mask = model.encode_vis_with_seq_spa_temp_att(vis, device, is_vid=is_vid) + + # construct the global input tensor --> use place holder for vis features + + if config.use_sep_spatial_temp_experts: + moe_outputs = model.moe_forward( + vis_embed_spatial, vis_spatial_mask, + vis_embed_temporal, vis_temporal_mask, + cap_ids, cap_mask, + hist_ids, hist_mask, + is_vid, device + ) + spatial_embeds = model.moe_to_llm(moe_outputs['spatial_embeds']) + temporal_embeds = model.moe_to_llm(moe_outputs['temporal_embeds']) if is_vid else None + + else: + moe_outputs = model.moe_forward_no_sep_spatial_temporal( + vis_embed, vis_mask, + cap_ids, cap_mask, + hist_ids, hist_mask, + is_vid, device + ) + vis_embeds = model.moe_to_llm(moe_outputs['vis_embeds']) + + cap_embeds = model.moe_to_llm(moe_outputs['cap_embeds']) + hist_embeds = model.moe_to_llm(moe_outputs['hist_embeds']) + else: + cap_embeds = model.llm_to_moe(model.text_embedding(cap_ids)) + hist_embeds = model.llm_to_moe(model.text_embedding(hist_ids)) + vis_embeds, vis_mask = model.encode_vis_with_seq_spa_temp_att(vis, device, is_vid=is_vid) + + + if config.llm_family in ['llama', 'mistral']: + bos = torch.ones_like(cap_ids[:, :1]) * model.tokenizer.bos_token_id + bos_embeds = model.text_embedding(bos) + bos_mask = cap_mask[:, :1] + + inputs_embeds, attention_mask = model.pad_to_right_dec_only_gen_mode(cap_embeds, cap_mask, hist_embeds, hist_mask, device) + if is_vid: + inputs_embeds = torch.cat([bos_embeds, spatial_embeds, temporal_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([bos_mask, vis_spatial_mask, vis_temporal_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([bos_embeds, spatial_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([bos_mask, vis_spatial_mask, attention_mask], dim=1) + + else: + inputs_embeds, attention_mask = model.pad_to_right_enc_dec(cap_embeds, cap_mask, hist_embeds, hist_mask, device) + if config.use_moes: + if not config.drop_vis_features: + if config.use_sep_spatial_temp_experts: + if is_vid: + inputs_embeds = torch.cat([spatial_embeds, temporal_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([vis_spatial_mask, vis_temporal_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([spatial_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([vis_spatial_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([vis_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([vis_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([vis_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([vis_mask, attention_mask], dim=1) + + decoded_ids = model.llm.generate( + inputs_embeds=inputs_embeds, + do_sample=False, + top_p=config.top_p, + temperature=config.temperature, + num_beams=config.beam_depth, + length_penalty=config.length_penalty, + max_length=config.max_generation_length, + pad_token_id=model.tokenizer.pad_token_id, + eos_token_id=model.tokenizer.eos_token_id, + # use_cache=True + ) + + response_batch = [model.tokenizer.decode(decoded_id, skip_special_tokens=True) for decoded_id in decoded_ids] + + for vis_id, response in zip(vis_ids, response_batch): + responses[vis_id] = response + + time_elapsed = int(time() - start_time) + print('Generating resonse {} / {} -- took {}s'.format(counter + 1, len(dataloader), time_elapsed)) + + # Create a file with all responses + with open(config['anno_avsd_test_dstc_{}'.format(config['dstc'])], 'r') as f: + test_data = json.load(f) + test_dialogs = deepcopy(test_data['dialogs']) + # Filter the predicted dialogs + test_dialogs = list(filter(lambda diag: diag['image_id'] in responses, test_dialogs)) + + for i, dialog in enumerate(test_dialogs): + vid_id = dialog['image_id'] + gen_response = responses[vid_id] + round_num_to_answer = len(dialog['dialog'])-1 + assert dialog['dialog'][round_num_to_answer]['answer'] == '__UNDISCLOSED__' + dialog['dialog'][round_num_to_answer]['answer'] = gen_response + test_dialogs[i] = dialog + + # Log the file + file_name = '{}_results_dstc{}_beam_depth_{}_lenPen_{}'.format(config['llm_name'].replace('/', '-'), config['dstc'], config['beam_depth'], config['length_penalty']) + if gen_subset_num is not None: + file_name += f'-part_{gen_subset_num}' + file_name = f'{tag}_' + file_name + output_path = os.path.join(config['output_dir_avsd_{}'.format(config['dstc'])], file_name + '.json') + with open(output_path, 'w') as f: + json.dump({'dialogs': test_dialogs}, f, indent=4) + logger.info('Results logged to {}'.format(output_path)) + # Switch back to training mode + model.train() + + +def generate_visdial(model, dataloader, tag, config, gen_subset_num=None): + + model.eval() + responses = {} + # tokenizer_enc_dec = dataloader.dataset.tokenizer_enc_dec + device = next(model.parameters()).device # Assumes all model parameters are on the same device + # Generate the repsonse for each round + logger.info('[INFO] Generating responses for {} samples'.format(len(dataloader))) + with torch.no_grad(): + # for counter, (vis, cap_ids, hist_ids, ques_ids, _, enc_dec_input_ids, _, vid_id) in enumerate(dataloader): + for counter, (vis, cap, hist, ans, vis_ids, d_rounds) in enumerate(dataloader): + + start_time = time() + vis = vis.to(device, non_blocking=True) + is_vid = config.media_test in ['webvid', 'champagne', 'avsd', 'nextqa'] + + # First get the visual features depending on the media type + with torch.cuda.amp.autocast(enabled=config.fp16): + # construct the global input tensor --> use place holder for vis features + cap_ids, cap_mask = model.tokenize_text(cap, device, max_len=None) + hist_ids, hist_mask = model.tokenize_text(hist, device, max_len=None) + + if config.use_moes: + if config.use_sep_spatial_temp_experts: + vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask = model.encode_vis(vis, device, is_vid=is_vid) + else: + vis_embed, vis_mask = model.encode_vis_with_seq_spa_temp_att(vis, device, is_vid=is_vid) + + + if config.use_sep_spatial_temp_experts: + moe_outputs = model.moe_forward( + vis_embed_spatial, vis_spatial_mask, + vis_embed_temporal, vis_temporal_mask, + cap_ids, cap_mask, + hist_ids, hist_mask, + is_vid, device + ) + spatial_embeds = model.moe_to_llm(moe_outputs['spatial_embeds']) + temporal_embeds = model.moe_to_llm(moe_outputs['temporal_embeds']) if is_vid else None + else: + moe_outputs = model.moe_forward_no_sep_spatial_temporal( + vis_embed, vis_mask, + cap_ids, cap_mask, + hist_ids, hist_mask, + is_vid, device + ) + vis_embeds = model.moe_to_llm(moe_outputs['vis_embeds']) + + cap_embeds = model.moe_to_llm(moe_outputs['cap_embeds']) + hist_embeds = model.moe_to_llm(moe_outputs['hist_embeds']) + else: + cap_embeds = model.llm_to_moe(model.text_embedding(cap_ids)) + hist_embeds = model.llm_to_moe(model.text_embedding(hist_ids)) + vis_embeds, vis_mask = model.encode_vis_with_seq_spa_temp_att(vis, device, is_vid=is_vid) + + if config.llm_family in ['llama', 'mistral']: + bos = torch.ones_like(cap_ids[:, :1]) * model.tokenizer.bos_token_id + bos_embeds = model.text_embedding(bos) + bos_mask = cap_mask[:, :1] + + inputs_embeds, attention_mask = model.pad_to_right_dec_only_gen_mode(cap_embeds, cap_mask, hist_embeds, hist_mask, device) + if is_vid: + inputs_embeds = torch.cat([bos_embeds, spatial_embeds, temporal_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([bos_mask, vis_spatial_mask, vis_temporal_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([bos_embeds, spatial_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([bos_mask, vis_spatial_mask, attention_mask], dim=1) + + else: + inputs_embeds, attention_mask = model.pad_to_right_enc_dec(cap_embeds, cap_mask, hist_embeds, hist_mask, device) + if config.use_moes: + if not config.drop_vis_features: + if config.use_sep_spatial_temp_experts: + if is_vid: + inputs_embeds = torch.cat([spatial_embeds, temporal_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([vis_spatial_mask, vis_temporal_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([spatial_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([vis_spatial_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([vis_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([vis_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([vis_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([vis_mask, attention_mask], dim=1) + + decoded_ids = model.llm.generate( + inputs_embeds=inputs_embeds, + do_sample=False, + top_p=config.top_p, + temperature=config.temperature, + num_beams=config.beam_depth, + length_penalty=config.length_penalty, + max_length=config.max_generation_length, + pad_token_id=model.tokenizer.pad_token_id, + eos_token_id=model.tokenizer.eos_token_id, + # use_cache=True + ) + + response_batch = [model.tokenizer.decode(decoded_id, skip_special_tokens=True) for decoded_id in decoded_ids] + + for vis_id, d_round, response in zip(vis_ids.tolist(), d_rounds.tolist(), response_batch): + responses[str(vis_id) + '_' + str(d_round)] = response + + time_elapsed = time() - start_time + print('Generating resonse {} / {} -- eta = {} '.format(counter + 1, len(dataloader), str(datetime.timedelta(seconds=time_elapsed * (len(dataloader)-counter))) + )) + + # # Create a file with all responses + # with open(config['anno_avsd_test_dstc_{}'.format(config['dstc'])], 'r') as f: + # test_data = json.load(f) + # test_dialogs = deepcopy(test_data['dialogs']) + # # Filter the predicted dialogs + # test_dialogs = list(filter(lambda diag: diag['image_id'] in responses, test_dialogs)) + + # for i, dialog in enumerate(test_dialogs): + # vid_id = dialog['image_id'] + # gen_response = responses[vid_id] + # round_num_to_answer = len(dialog['dialog'])-1 + # assert dialog['dialog'][round_num_to_answer]['answer'] == '__UNDISCLOSED__' + # dialog['dialog'][round_num_to_answer]['answer'] = gen_response + # test_dialogs[i] = dialog + + # Log the file + file_name = '{}_results_dstc{}_beam_depth_{}_lenPen_{}'.format(config['llm_name'].replace('/', '-'), config['dstc'], config['beam_depth'], config['length_penalty']) + if gen_subset_num is not None: + file_name += f'-part_{gen_subset_num}' + file_name = f'{tag}_' + file_name + output_path = os.path.join(config['output_dir_visdial'], file_name + '.json') + with open(output_path, 'w') as f: + json.dump(responses, f, indent=4) + logger.info('Results logged to {}'.format(output_path)) + # Switch back to training mode + model.train() + +def generate_nextqa(model, dataloader, tag, config, gen_subset_num=None): + + model.eval() + responses = {} + # tokenizer_enc_dec = dataloader.dataset.tokenizer_enc_dec + device = next(model.parameters()).device # Assumes all model parameters are on the same device + # Generate the repsonse for each round + logger.info('[INFO] Generating responses for {} samples'.format(len(dataloader))) + with torch.no_grad(): + # for counter, (vis, cap_ids, hist_ids, ques_ids, _, enc_dec_input_ids, _, vid_id) in enumerate(dataloader): + for counter, (vis, cap, hist, _, vid_ids, qid) in enumerate(dataloader): + + start_time = time() + vis = vis.to(device, non_blocking=True) + is_vid = config.media_test in ['webvid', 'champagne', 'avsd', 'nextqa'] + + vid_id = vid_ids[0] + qid = qid[0] + if vid_id not in responses: + responses[vid_id] = {} + + # First get the visual features depending on the media type + with torch.cuda.amp.autocast(enabled=config.fp16): + vis_embed_spatial, vis_spatial_mask, vis_embed_temporal, vis_temporal_mask = model.encode_vis(vis, device, is_vid=is_vid) + + # construct the global input tensor --> use place holder for vis features + cap_ids, cap_mask = model.tokenize_text(cap, device, max_len=None) + hist_ids, hist_mask = model.tokenize_text(hist, device, max_len=None) + + moe_outputs = model.moe_forward( + vis_embed_spatial, vis_spatial_mask, + vis_embed_temporal, vis_temporal_mask, + cap_ids, cap_mask, + hist_ids, hist_mask, + is_vid, device + ) + spatial_embeds = model.moe_to_llm(moe_outputs['spatial_embeds']) + temporal_embeds = model.moe_to_llm(moe_outputs['temporal_embeds']) if is_vid else None + cap_embeds = model.moe_to_llm(moe_outputs['cap_embeds']) + hist_embeds = model.moe_to_llm(moe_outputs['hist_embeds']) + + if config.llm_family in ['llama', 'mistral']: + bos = torch.ones_like(cap_ids[:, :1]) * model.tokenizer.bos_token_id + bos_embeds = model.text_embedding(bos) + bos_mask = cap_mask[:, :1] + + inputs_embeds, attention_mask = model.pad_to_right_dec_only_gen_mode(cap_embeds, cap_mask, hist_embeds, hist_mask, device) + if is_vid: + inputs_embeds = torch.cat([bos_embeds, spatial_embeds, temporal_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([bos_mask, vis_spatial_mask, vis_temporal_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([bos_embeds, spatial_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([bos_mask, vis_spatial_mask, attention_mask], dim=1) + + else: + inputs_embeds, attention_mask = model.pad_to_right_enc_dec(cap_embeds, cap_mask, hist_embeds, hist_mask, device) + + if is_vid: + inputs_embeds = torch.cat([spatial_embeds, temporal_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([vis_spatial_mask, vis_temporal_mask, attention_mask], dim=1) + else: + inputs_embeds = torch.cat([spatial_embeds, inputs_embeds], dim=1) + attention_mask = torch.cat([vis_spatial_mask, attention_mask], dim=1) + + decoded_ids = model.llm.generate( + inputs_embeds=inputs_embeds, + do_sample=False, + top_p=config.top_p, + temperature=config.temperature, + num_beams=config.beam_depth, + length_penalty=config.length_penalty, + max_length=config.max_generation_length, + pad_token_id=model.tokenizer.pad_token_id, + eos_token_id=model.tokenizer.eos_token_id, + # use_cache=True + ) + + response = model.tokenizer.decode(decoded_ids[0], skip_special_tokens=True) + responses[vid_id][qid] = response + + # for vis_id, response in zip(vis_ids, response_batch): + # responses[vis_id] = response + + time_elapsed = int(time() - start_time) + print('Generating resonse {} / {} -- took {}s'.format(counter + 1, len(dataloader), time_elapsed)) + + # Create a file with all responses + file_name = 'results_nextqa_beam_depth_{}'.format(config['beam_depth']) + if gen_subset_num is not None: + file_name += f'-part_{gen_subset_num}' + file_name = f'{tag}_' + file_name + output_path = os.path.join(config['output_dir_nextqa'], file_name + '.json') + with open(output_path, 'w') as f: + json.dump(responses, f, indent=4) + print('Results logged to {}'.format(output_path)) + print(os.getcwd()) + # Switch back to training mode + model.train() + + +def generate_enc_dec(model, dataloader, tag, config, gen_subset_num=None): + + model.eval() + responses = {} + tokenizer_enc_dec = dataloader.dataset.tokenizer_enc_dec + device = next(model.parameters()).device # Assumes all model parameters are on the same device + # Generate the repsonse for each round + logger.info('[INFO] Generating responses for {} samples'.format(len(dataloader))) + with torch.no_grad(): + # for counter, (vis, cap_ids, hist_ids, ques_ids, _, enc_dec_input_ids, _, vid_id) in enumerate(dataloader): + for counter, (vis, cap_ids, hist_ids, _, enc_dec_input_ids, _, vid_id) in enumerate(dataloader): + + start_time = time() + vis = vis.to(device, non_blocking=True) + + for k in cap_ids: + if isinstance(cap_ids[k], torch.Tensor): + cap_ids[k] = cap_ids[k].to(device) + + for k in hist_ids: + if isinstance(hist_ids[k], torch.Tensor): + hist_ids[k] = hist_ids[k].to(device) + + # for k in ques_ids: + # if isinstance(ques_ids[k], torch.Tensor): + # ques_ids[k] = ques_ids[k].to(device) + + for k in enc_dec_input_ids: + if isinstance(enc_dec_input_ids[k], torch.Tensor): + enc_dec_input_ids[k] = enc_dec_input_ids[k].to(device) + + # response = beam_search_generation( + # model, vis, cap_ids, hist_ids, ques_ids, enc_dec_input_ids, tokenizer_enc_dec, config + # ) + + response = beam_search_generation( + model, vis, cap_ids, hist_ids, enc_dec_input_ids, tokenizer_enc_dec, config + ) + + # Decode the response + response = tokenizer_enc_dec.decode(response) + responses[vid_id[0]] = response + # all_graphs[vid] = graphs + time_elapsed = int(time() - start_time) + print('Generating resonse {} / {} -- took {}s'.format(counter + 1, len(dataloader), time_elapsed)) + + # Create a file with all responses + with open(config['anno_avsd_test_{}'.format(config['dstc'])], 'r') as f: + test_data = json.load(f) + test_dialogs = deepcopy(test_data['dialogs']) + # Filter the predicted dialogs + test_dialogs = list(filter(lambda diag: diag['image_id'] in responses, test_dialogs)) + + for i, dialog in enumerate(test_dialogs): + vid_id = dialog['image_id'] + gen_response = responses[vid_id] + round_num_to_answer = len(dialog['dialog'])-1 + assert dialog['dialog'][round_num_to_answer]['answer'] == '__UNDISCLOSED__' + dialog['dialog'][round_num_to_answer]['answer'] = gen_response + test_dialogs[i] = dialog + + # Log the file + file_name = 'results_dstc{}_beam_depth_{}'.format(config['dstc'], config['beam_depth']) + if gen_subset_num is not None: + file_name += f'-part_{gen_subset_num}' + file_name = f'{tag}_' + file_name + output_path = os.path.join(config['output_dir_avsd_{}'.format(config['dstc'])], file_name + '.json') + with open(output_path, 'w') as f: + json.dump({'dialogs': test_dialogs}, f, indent=4) + logger.info('Results logged to {}'.format(output_path)) + # Switch back to training mode + model.train() + + +def beam_search_generation_decoder_only(model, vis, caption, history, enc_dec_input, tokenizer_enc_dec, config): + + # gen_ans = [bos_token] + hyplist = [([], 0.0, [])] + best_state = None + comp_hyplist = [] + + # drop_caption = self.config['dstc'] == 10 + # instance = build_input_from_segments(caption, history, gen_ans, tokenizer, drop_caption=drop_caption) + + encoder_outputs = None + + for i in range(config['max_generation_length']): + new_hyplist = [] + argmin = 0 + for out, lp, st in hyplist: + decoder_input_ids = torch.tensor(st).long().cuda().unsqueeze(0) + + # output = model.generate(vis, caption, history, ques, decoder_input_ids, enc_dec_input, encoder_outputs, 'avsd') + output = model.generate(vis, caption, history, decoder_input_ids, enc_dec_input, encoder_outputs, 'avsd') + + if encoder_outputs is None: + encoder_outputs = output.encoder_outputs + + logits = output['logits'][:,-1,:].squeeze() # get the logits of the last token + logp = F.log_softmax(logits, dim=0) + lp_vec = logp.cpu().data.numpy() + lp + if i >= config['min_generation_length']: + new_lp = lp_vec[eos_token] + config['length_penalty'] * (len(out) + 1) + comp_hyplist.append((out, new_lp)) + if best_state is None or best_state < new_lp: + best_state = new_lp + count = 1 + for o in np.argsort(lp_vec)[::-1]: # reverse the order + if o in [eos_token, unk_token]: + continue + new_lp = lp_vec[o] + if len(new_hyplist) == config['beam_depth']: + if new_hyplist[argmin][1] < new_lp: + new_st = deepcopy(st) + new_st.append(int(o)) + new_hyplist[argmin] = (out + [o], new_lp, new_st) + argmin = min(enumerate(new_hyplist), key=lambda h: h[1][1])[0] + else: + break + else: + new_st = deepcopy(st) + new_st.append(int(o)) + new_hyplist.append((out + [o], new_lp, new_st)) + if len(new_hyplist) == config['beam_depth']: + argmin = min(enumerate(new_hyplist), key=lambda h: h[1][1])[0] + count += 1 + hyplist = new_hyplist + + if len(comp_hyplist) > 0: + maxhyps = sorted(comp_hyplist, key=lambda h: -h[1])[:1] + res = maxhyps[0][0] + if res[0] == bos_token: + res = res[1:] + if res[-1] == eos_token: + res = res[:-1] + return res + else: + return [] + + +# def beam_search_generation(model, vis, caption, history, ques, enc_dec_input, tokenizer_enc_dec, config): +def beam_search_generation(model, vis, caption, history, enc_dec_input, tokenizer_enc_dec, config): + + if config['enc_dec_family'] == 'flan_t5': + bos_token = tokenizer_enc_dec.pad_token_id + eos_token = tokenizer_enc_dec.eos_token_id + else: + bos_token = tokenizer_enc_dec.bos_token_id + eos_token = tokenizer_enc_dec.eos_token_id + + unk_token = tokenizer_enc_dec.unk_token_id + + # gen_ans = [bos_token] + hyplist = [([], 0.0, [bos_token])] + best_state = None + comp_hyplist = [] + + # drop_caption = self.config['dstc'] == 10 + # instance = build_input_from_segments(caption, history, gen_ans, tokenizer, drop_caption=drop_caption) + + encoder_outputs = None + + for i in range(config['max_generation_length']): + new_hyplist = [] + argmin = 0 + for out, lp, st in hyplist: + decoder_input_ids = torch.tensor(st).long().cuda().unsqueeze(0) + + # output = model.generate(vis, caption, history, ques, decoder_input_ids, enc_dec_input, encoder_outputs, 'avsd') + output = model.generate(vis, caption, history, decoder_input_ids, enc_dec_input, encoder_outputs, 'avsd') + + if encoder_outputs is None: + encoder_outputs = output.encoder_outputs + + logits = output['logits'][:,-1,:].squeeze() # get the logits of the last token + logp = F.log_softmax(logits, dim=0) + lp_vec = logp.cpu().data.numpy() + lp + if i >= config['min_generation_length']: + new_lp = lp_vec[eos_token] + config['length_penalty'] * (len(out) + 1) + comp_hyplist.append((out, new_lp)) + if best_state is None or best_state < new_lp: + best_state = new_lp + count = 1 + for o in np.argsort(lp_vec)[::-1]: # reverse the order + if o in [eos_token, unk_token]: + continue + new_lp = lp_vec[o] + if len(new_hyplist) == config['beam_depth']: + if new_hyplist[argmin][1] < new_lp: + new_st = deepcopy(st) + new_st.append(int(o)) + new_hyplist[argmin] = (out + [o], new_lp, new_st) + argmin = min(enumerate(new_hyplist), key=lambda h: h[1][1])[0] + else: + break + else: + new_st = deepcopy(st) + new_st.append(int(o)) + new_hyplist.append((out + [o], new_lp, new_st)) + if len(new_hyplist) == config['beam_depth']: + argmin = min(enumerate(new_hyplist), key=lambda h: h[1][1])[0] + count += 1 + hyplist = new_hyplist + + if len(comp_hyplist) > 0: + maxhyps = sorted(comp_hyplist, key=lambda h: -h[1])[:1] + res = maxhyps[0][0] + if res[0] == bos_token: + res = res[1:] + if res[-1] == eos_token: + res = res[:-1] + return res + else: + return [] \ No newline at end of file diff --git a/tokenizers/flan_t5/added_tokens.json b/tokenizers/flan_t5/added_tokens.json new file mode 100644 index 0000000..af404f1 --- /dev/null +++ b/tokenizers/flan_t5/added_tokens.json @@ -0,0 +1,109 @@ +{ + "": 32105, + "": 32099, + "": 32089, + "": 32088, + "": 32087, + "": 32086, + "": 32085, + "": 32084, + "": 32083, + "": 32082, + "": 32081, + "": 32080, + "": 32098, + "": 32079, + "": 32078, + "": 32077, + "": 32076, + "": 32075, + "": 32074, + "": 32073, + "": 32072, + "": 32071, + "": 32070, + "": 32097, + "": 32069, + "": 32068, + "": 32067, + "": 32066, + "": 32065, + "": 32064, + "": 32063, + "": 32062, + "": 32061, + "": 32060, + "": 32096, + "": 32059, + "": 32058, + "": 32057, + "": 32056, + "": 32055, + "": 32054, + "": 32053, + "": 32052, + "": 32051, + "": 32050, + "": 32095, + "": 32049, + "": 32048, + "": 32047, + "": 32046, + "": 32045, + "": 32044, + "": 32043, + "": 32042, + "": 32041, + "": 32040, + "": 32094, + "": 32039, + "": 32038, + "": 32037, + "": 32036, + "": 32035, + "": 32034, + "": 32033, + "": 32032, + "": 32031, + "": 32030, + "": 32093, + "": 32029, + "": 32028, + "": 32027, + "": 32026, + "": 32025, + "": 32024, + "": 32023, + "": 32022, + "": 32021, + "": 32020, + "": 32092, + "": 32019, + "": 32018, + "": 32017, + "": 32016, + "": 32015, + "": 32014, + "": 32013, + "": 32012, + "": 32011, + "": 32010, + "": 32091, + "": 32009, + "": 32008, + "": 32007, + "": 32006, + "": 32005, + "": 32004, + "": 32003, + "": 32002, + "": 32001, + "": 32000, + "": 32090, + "": 32106, + "": 32101, + "": 32100, + "": 32103, + "": 32104, + "": 32102 +} diff --git a/tokenizers/flan_t5/special_tokens_map.json b/tokenizers/flan_t5/special_tokens_map.json new file mode 100644 index 0000000..2e4806a --- /dev/null +++ b/tokenizers/flan_t5/special_tokens_map.json @@ -0,0 +1,74 @@ +{ + "additional_special_tokens": [ + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } + ], + "bos_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "mask_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "unk_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenizers/flan_t5/spiece.model b/tokenizers/flan_t5/spiece.model new file mode 100644 index 0000000000000000000000000000000000000000..4e28ff6ebdf584f5372d9de68867399142435d9a GIT binary patch literal 791656 zcmYhEd7zY2AIGh!Ys>Nr2`!XY`!31bZ7;2Ei&DJL%sltbbmz|8=9#&-TcOCllQmhg zhJ=U+QCYKlkz|QimXIV;ct5}A_sqHGuk-yrXZfAq*?(u5k^@TiKDpXUoFcpNJABWQ z{Y&;bxntmF{|hbhVR#SENE$ zAg8`lw~{Cu5GJ4WrCa9g@Pj8ak32tgoh0r1T~YMnWfIb=2Do3z-Wnn+*}5x>C|fS& zBd=XqazM#Gd@E0-({gn=7%6<&deikqf2oXA?YcSS^D=keg`umx9Mj@%=Od7J4_Z`2 z3152b`Cw6m@@!6SxbC5qVMb@)EbA{?97d3mLmDk9CZ11vdAYj!v0|XQY(dVGrJ=`! z{4^2Qz9oAh%r$bj(+THeS zF$FK@OW7Mh(@ZKamo|QGWl5uwJ^YMx9EQ_A#C%yl4`eDIi^}^x{Cx4F7s!wgUMPx# z&^h_K_v$dQw7gV<%cMSDKK^!1F%bGV;JKHJfs$U0FNIfMDSkx{_s@8(__aKjt&&qa zz3#qdWNTNDNmEPulY0QI1oO#MAYF%mH5WO#<6;*s?a4bgfhgK4d35)+p(`hs?1zgA zn95Yh)WdLFym2ohbK>Tyh5S78jbaM5T1N(41CEqaCtX^+8AkSRx$*gTixFh9d0BGM zd!ZocdEtjRN%~*1>dg0xVQYQ4>pBp8ooxFVmnB}H%C>&`!y-(Q#~aE%DnbtdHqCP| zk@Ds9d)F7g#tM0PVXqBEsO9nWQNROB8d`fveLaS3iWS3&Oj7#HfMjePtiP751k<>Zl{7GtKJ*2&oWKP$$S&G<4d z`FT++FP25@+lFo|hOVlUKSqLxwvd)Pb2u#nwaHXGDO;O<8Gc)Jhji$Q(~=yTk)ho| z$k$O$J;}UKTU$;tvRr0f;5?^&>9hc3dDG-u9raaFTrZ9IqX&VDSr4T5UjK6m<>iP@ z4i&29gY!YA(3W!lMS!R}rmB3o>M<}GS$)3r++faBxoqC-e2vvgyI0Gvi_uT;W%uJY zh54wKL)zi8{3Wt7cLYeg%h}h-jEixbd>d!qDxcg2BpKTMCy(Pq3rX=?eNzk_q-4v- zAiR+G|IPch7&z?*^8B9P72&FfGqR(xgDTrO2?uMFSy|Wn`(g&usY+jZZUmEDOd9OD zx%j0vNhfmC31GhTmajXyFEO7ltIh&jk!GnJIe9Jvp|aIU<2x->OL=dfAHwhzGWq~q zriOSywhjfEisRag4g0|1TJ1*9UJo|aWc*sWXBSXalasZ5e=LR!vJ_6)D`4|k;pb&n z*-ypKwLTHQ^t8iM3qf8kd=c!L5+45jiX&>Xg>*u8z5_v)%jBK!Tv&2gC0=$*F)1%o zC&TXtBGkV+NX2=UA#jn{KisIP-Y^h9S5Ng_JTM;I+wNf)4 zXkjy1c`N~HT+)cXrP|L{Q?r5@FyxD5R?Wc~*472G^LC?GEs}Xl&B=iC;0rh{vZ3X& z|BE1WS1IFL{aWO!QF12`QBoDVOu`vy08z(Z4z!HbW^ov)xJ=NvFZ0)d zEWTH6PDziSfhKp-Pgl$IMt>G@%%@Zr><`vP*iH^>>X#o$AH*{u`E;tqUYN+$y?aQUhg zIr(jt`|K6U<@a-dCSX^YIU9gln8qeA#z`AiZC%rFw-=%)VqUybRyT7vC@?I4*Ai^S z8zjTtuyrRG6fj-x?SsoQ1FHs)3r;180Jf zOQ-jnJac#G&_Tw{bpDds!OxoqCM)!%=iiFc67XqHyS?Om`ZYAD;V-+-(+a*cUF%?i zx_82R&UapW*XflYT6IiLP~Nz!?sTzok@1Gc6X^W&a&NF&aJl9B^xeW?r!kSZ^q zqD75=0GeVhqzfteafyqjJcS;ozYeCf`pby#aYc298Ptf~_jn885OcOEH8U!J=MY-z5!JtcQN3Ou-EU-BL+1b7Gn+4m(l(_t=Ek<3fU%Px+vai!jI z9%4jVA$R-=VWRbup1--T`D7OTzWh6c)%BQP4wVK{c_zqWA0gje;sW{}ZKLl) zU`t^d#XRvDpk=)}?a@gtei;I3;;gKmTx)Kv!$0r4S5bgKr`aP82V20gp^kp;1ffNc zlVl&9mPQ#Xkky`p*;>}2)j-P%rJk1?r-L>Bq|)wO+!iZiHdCfQ=RlB`D*bxc`So9u zDF(B)%OEVFe$3L}T@Ez8dkiWl`jEFRIHf&YPJRb~gbazNZ@?MJkOmLjWACCm$7bb= zy+EeG?s8uPfO$<)Ipl~V98PDc3hkSLDWd_h&oM680NJ3xWT9E6nPeHV#pxceJ=Skbga9c(^?InL)=5RWjt(5CucR0qpTV{P~VbgS3PpI3%CbSI8 zzO-+78uhlXaYxzk@;xN^wPNp{nqrDOGY6};zn&IyE z*3M(X%X{)^Td?V3thDarKBk$L$^+fOmT$UiW-c@PK$uLOJITO)_1-=DogD)m4m`$z zrKh^!+9g!T_hrsQd5-b%x=X>P<^;3#11@v^gS@W!hhGJOnmzj4_SczP$Enlr#a--s z^R>FTnYkQ_43C!6SKy*LVnTUo9ZoA-uf|KI! zpQ1tr%l5{ALreCja`@A${anh=P=Dqg4Qpzt$ZEcl9U-hVf)uiT-4Bc~Q)R#qT$%$q ztJ+bxEw=&dp7$>@PhF(PJY3NjUdPx{B|~n4F~L(+)!AHLyNWG$xbWqyuo^!Fw6Z8q z$;FCDNc)gmp22P6Owgs-dGCO&Z;qM(^`KG1VjgJF%ndFqfhYz-q4(_vHldiyYma|l zGZ>S9BN3?e?*bXlFt)}$Cr8mFMp!yp--cAoP-qsgz+z)_cyW}X>H&2$*c z&H!JoodvSA;*8FnFK{r)X!zB&K&!Nau#W&NDs7wFW(rj?lyL6`7z7?Gy}onb5`~;b(ClZ&lzUYvdY(nUIVPRT=jk|)-w9Lf zD71zrrBRWnr55L8YZC~|m{!0QM*%I@r5U-kwV{}&3aL>tn*2Ym*Lew zDwa?72IrjDtk#*cXM%NXDoElY7mB%%G`$#T^025+P@kI2b72!%%pZr|0Jic_gQ{AC z7AR%PU=2L_W*0@m4^kBwS-HS{ukmE#5|C*!!EEonbwJbBD0z631LLIm9{?SnswLOB zq!`9$7`v<$(26UajbjSfy#s{lBI8fbOOIX-Glj%rTI#~5F*dH705pM5^QFTi_c5QX zmggorIEv}#{PTd8R#2t(#9c~QTIB`sAD4rz)~cB#tjp4IJ|x;NPyzq<0B*~04P(c` zH9+d>K>15u7E?uvZhPN%z?M{1IPXfl2VuJQFp{e6=h9E1OkF*s?kj-BT#6(Qf9<@} zlH)p+Yw-;fi4B+czsF^fmxptKb^GjFOr1_XA?Xrul+l!Smz$1p9ww;ie6JQ@;yP6} zw8o_+QzcV+;*2T`o>(iCoaBO4t2JQYNU#Zs0ZOi(2n;pj%eFLb%gji5tJ;BK(j)+o zLMkqs&c;cyr^&o4aS>FT$@O=c2TJIVdvRIeOMxa)bs?4}hSAGlOwX0RUo9`K z0b8;|B>pnMvYn;E=j4Qs9TDVpLe}te2xReR$gRKPw4^r9zDY*^4b;5kQgY#5`xWa{ zmrl~Q#Gz3cDL()drJ++58Q2}d3U*q-qZ6tVzmKETJfl6=r(;|sBeT+>3}m?&tD{3n zg~Js(YOk6HHrbezOOqRbmS#SytFkk0b3{gG1#-tS7Xd?YrF``a*jhzdURJDfU(0=; zCiWWGqOYdSU;jQZ>IVE+IC}2(F%$(gQBL^Gg(c-c4*T4Breap)7)Xx((os6w((Eq! z4a$n;R5^GDAR7Df1v(-OPxtJEvyzSL+72V%pY;sJI(o$!yX{}pQcP!YYjy`yM5^Mp zd*hBeZD!B=f-D8KD(60Sl=%B#Roofs>k$TGBhq;eBg_R51fh{`FTOC0rT$LB* zXL&dGy`L|q^#Dc5<+6!FoDBV`oE2%Dp+nB@<>Cn9$!s<)eNKiktp}80^PxcGVOrwJ ziK79Q{s`*N1dx??HItjX9C(Hcij7|GI}>Q>$5|{Sm@muz9$chQY;Q;~cn*w7gju3i zV%LC80Vm2GHvlXNCad`VUI~++^NBL(HupL01~zL7wydiGGLXxZ(7YvO zd<&dW$+Bq8W~dEUwzSW*%Z}}FMk>uxE=#+BO=(lv)EWaa5i*GIahZb^3{=&pJHG?6 z^6(iT?f>ZIUZ0HH^p~VE(mmf7gR$HP)qXsM6m8CiLdXs+W#g3969N|@XboYPHcOlBvjyL{RhpRm*LC1w) z(@s9izJU&6M=XLv1pWTt1aW3y{lVSMd9RErANw z>D97OiDHaYp}N6ft!%7ehTre?cEexTZA5`UFZIV(BfHtRLjZjs{X!rS49@6qfgUx%W=8|im5bVXpvfHgVEsM1G zS04gdax~348SxxAs;*v*_V)jJ9>y|Von@{mQ(tpIn9`-?w+&!RF05$?J@Zo+3${C5 zynMU8K9F`1mmb)-SV_jqI|l=->gDLJyc~ryrWu@x*}04ScofLAP>a3bn-;*ZY2;*5 zOWY=(P3dZSIl>Dq9iI+@peVY@E_GS%ym&b8y0FZ7PBYl1<*$08I;X~rnkmkQ+T}9R z(t|LS1-c0Py%KDaj?2idD(Ah*S3~Cwvmi+Qc&VF<%Q6v5Rmj@Q9UM%1oO3V|%DRk{5Wr{4rYU9nzq6Z+%ESk_fWUE7~(~yUtEkd=GO0&fdGb$w{ z`LM$b$qD)45paFA$uX)OyUbBkzHl<~%(E^OmOggqR)Z~5Q?oI7<~{d0>CwKw+vGmi zGO1krz55&vjs1UepS?sv`fmYR)@rhO8h!s?od2}WydP0)9&+I{qV0A^l!hkwtP&>A za8^MZesi##^gr0=ps?a-Kh?YuZu6PNVz;Xq(6UeCEZ0G4QGvP*2T_+eSLMNHm1D7Qgs8SgLHukoljMp-CIBrV5 zs!EogiPP*O<1#29#{{lUUiV;45>85x@I&PgCTRbxyq9vHwe1BmYbr#s+UJ-<$@WT^ zsLiIeHq=SUG$^|5b{l6ekdhq0>P{@n$6sfHO;-aja@Cvk5oaTE73Y&Ul`1=p*rN;V(qh!}0Q zR-S1ELH!sdCme^%M$ld|s*CfNrfbBA(FcO8>Wco`Q`|>22xX;a7=%ge=i~C;M4(lS z7ax>b&wZT1@Yc%*nkFi}Oh)=7!KUS^RHl$eStnOI%Af9q#d3YE3q`#S?~ceeo$09u7|T^nEf%Euaetugd*$1sM|RNIPOX#H3!tKY zOvkbd?r=C?$4>f%iqVJG!{zFB<01A&nuo;ha{)p-+v86<9M|TQT?MwHi6?#b055pi z5sA?Lc;;&m6bPH-EQZ$tEQ{sTg4XW=E&l^E()&Y@3ClTyoLsTqLXvQW^x6clgaUd; zU%vVVLOTqiJ@;GOrYdsDViN;c$K4Jo2B`7UY~VHA9fHa_BPB2Hh11gYVzR0c$Qm5x zYu%EWy&sH8!fBX5Zd7cEXTr+;-r+8S0hpSO0a>Xq!_4Kek6qu~g`noqR1R+BPQ0zl2>F2^Qz-DAbe=xe$UO|2x0x%)*O)dtG zbm2q0N+6}99HY|@VK;;&}T^!ywkg~Awm$XuMJkgWCs&s<`mDwsF) zxg57eTDfXjMyC>^uYt0Nv z$kaKDaa*-0t&;1X0GT}4@G`PxCD>%psiDpu?s?q;>vS~tDQ>HSu>xyPt+Xe74rgV` z7FyaN@P53v5)XY*kL_Y5{aXHLE*rQ*b^EH`zO>k$t)I9?U~D3DUp zDe5iH*9Y|zDNjbe0Ab_n8S>cM04owQ$<&V#;kS?Ata8SDuaHk+K$gQlbJ3_3nDKuj zgh`MfJEOk=T4kuvGiRBwGW7h;dCD*zY}(=?@#!+t(qyY6^4UDf?eqtPl|x4BC)5_D zOt}ooSSEIYoya=y2QqV)3s&RhQaUnUx|u-m5iRx$;drNLs$nhG%c?-2br|xx_gJkMOwHJoWMZ*SuG(fg;=3Z za>oKq`q2JSPKWzh8#ogy6$JDLGO;sElxMXM9^cJjCMO9w<3zB@6<~L)lZuldELxTl zsqkb-w^A1Z*>q=U)qDLR7JBl$J~9L0I)I&!!n!w43o4Ko)xlKSx*k!2^zA&Y)}Ekg=FJ z6wC1a*W)hgShz`d_%aBSGUsy;BggnV@kuz^+M2skGV~eTR%~p4bB^{|FdgYx;y-7; zwB;{-0k_qmgeUi`a%fzcoc%J$a9c}L) zD`mN$4Cv`D`4Ga2y%N({&yU@AU4#a5)F%)oG{)ONhJEI6mrfW=f7|2&gk7oZyBWgd zADEMaeg=_$J+(AuwD)NA@fXh@7@n*)Yb~Qur|~+b`*<<6yt|{a*ze!8ps<5G1h0@w4~I}m8BpR! zx{slqS=t^0VOn5>&dFD;z$VLR8x{Z6!Gfz%yImI-JRNSl4phRtlb9Oir<`cswPyX+ z2e;Lmk>MFD`h~WqxF~{r9kuPez7WMW&+6v#{{QgcaVlfxP)BfR9(8Y-0$~M#iLxZ_ z!k6ptoSo`$jLCy+QfxxljwhA(%1Y;_R=dNQS+K=8z>|t;AX7P0xNxPuqu>HC__1wP z1GXZaK?mRF9H8l(=7ET-l|bjTb-C<3+$KOgntjUhi(P3l%%T7vJ>G^CA zBJ0`r!bFWi4}jdi*kQH@6Vmlbhx_X)YTLh0}S%F zA`2d4V3O;Ow&3Anap5uMm6=~zN;)?OS!7ub8is@VS*_tv;feD43An7(7?|mm6D(e3 zaR*pRNv(Kabi!@w(k;_Kx}OALn#fk$)PIpurt}f4sqzCI9I5*&gMp^ZoFASoxoId& zSPOFU@o3x@f3l#bKCU<&!USfV*9lTWE|~;p^|4=_{Bjm3Djhv`6o{9AAwxYS=eut@ zo}$`DX&BR0mCt0emtu>ndnajBRsr8=aSYcp~fe z+aXNcDo?i6qfuO>dVp`FJW1LtalShDlH;EQ*&HIJ3yN@#GUq8s%L+%iY@zb-vv5UC zhNk{Zb8vqZC~7M@wUf_QyGWvK7nHxod5Wu*OO9U)LGkKEo-J?UwiLW{mF^yV z`xXpUJ*E0hybrM4b?zoR*MaCl83@X zQs`QGLlbZ`Rij;H!abmxBOqP%RSQ`l+SvMHMav6jt{}_FSLPgc3C-zd1b+*~? z(3)+beRC?bDJw=fE*}ZBte2}HtmPOm^)H+fFCFjv4nUmiCplQdTJw`LfGAZ@lk}gA z+r-n|I$z>bAWXdKY;}R%cxmf7KV?{W*$7KOATOf;qsr9s`p@A;aL)>eu-B(4bW8yv zm5~6_oH-SSmP=7F?wbZSAsG=#^3yzoDQp;Xw1WGb zs?vSot+f!SKk3QX>FyhS19h%*T|I&EJtHy`0%;jnCdsq20jA^xN}fFjXo3vV`>_{Z z0ygC&d=@&KjaYjnl*y(?X*dCYHAJzcU^dmM=Iz(PX_nbaIdq=;&glh?|1eS6?`AlQ zw~A_|$I^P=;=F0^6Pjh#*K^!6V2r@uiUs&PtfVpGb8>@`p*WRYey@v@1syZ6g#3Iz zl*wA{>E!PhC9H5%QVL|sBABRJsR{V{#b8rN7Sq6mkGRj_ykz83a8z4zsTv&|#w~{` zHaP^+3F(5T;mCiC+O%XveH0!k>{?lm^9-{;zW}E2xcu?QYMhpM8AIsiSAZrh+1Ha? z1J^o6+cxrl^aczm>4an5ySS|*tIn!nQT4UuJxJ3*j1eRuZ+zg0@|4f6+Y{?tFlGu~ zJ~TBTVPDjG>~wQ_o(eqrsvS)ss-FnIrFB68&wJ4{SIYH zu%j{BWKaGZ5>dG0Il93SMIHA?$3a7&dH3nfY@WB~07#Uo8;aW-?8brJixK_#_BZP9d<(jA^~0tozaWU&AyRkMWaTNWE22 zC*LVyc~1K*sM_^$!PG-49(I%4l{H^geljP+P64AeJ=K0=Kip9}@pEyVXYV(_c`x@E zNnRZYHhGgwvg>5nU@Ld&W<6Srl6u|@0MhO=Ny4WZtsKR+u)0KUsZ9_}cAY~sqrF&6=2u^xqZFYY3U zhlln8IiSLMVw09)l+G!~=$$bd!T6~z7_%q3q(GikDoQI$ZVQ!Q%gIRgk&@Ci4H1rF z97dm81+c8J|EF2mw;IM2L1{_rz_1oUb)Ydv&2W0%Lf;NOnNiE z$+Z{5nDlBcn|_t^kn_V8?YJ9YBK2VLk_|UHA1oB}G>W6NO&>E~WMQtFY)p?Ns-_h1-N;Skij~-@FTBX|g&`%h(Sc4zEJAUgtto zWn((>Uh^r86}21FzfjKN#j>C$65jvZ#gj^+jsw1}_mRx%+~X%@kc&xxocx`+)fAuj z(fJ&l&FaQzgP&k*;^}8^jQ!Vcd5Py@>m>fB1N{XgnT^}&f=u=QOnYl zX)s8DF|0v=(=w>@%fs_PYMsa4^d-}AlQM@{ax!fuz!DmwM$8w^HII5GqU&tjRuxA0 z^5+H4C)e2%^3-MENdFkWDrM_c&O58f!>$II0CCQ+ym-Cytyh=>x$<@h%aQIzbBZM= zP49y-$H**gV?GA5S~q)l9!=V}acD3>^6g*IS)m0GKio^neU_7b{~WMq7(l z(%*D`5eh9ZIguk*<2LzfSj1fWGSKpxNXb@37Pc~#XMvKDwXZlT&+9LtJkh-SP?8 zB+_j(ov_?qPkB||3~grLC`AT!KDOJJxKU3BJtubacL2++E-#Xs?HixkN#Cu2sJiQE z1~msX{RPglrl%ui-8P3O=(N4;Plvf{%3X=Qk96@fAzIP^XuCn)5;j~D$mX}}Z8Qr-c)!XZtiZt`+v}|qULg~~8NoMWuBF1t{+HWgC z$zngE7#|Rjm66_Z)mds@4mk`GRi*Nb(@T#6(1OdGdkn4h(^D5 z$8D8B8VrUo)dbNriW%WhNQ-tf#XQV;@o7|ce)Wvukffz=Uc4{@w-vND5mIkA8iv$c zc92WQ25q6M&Xhw@xx;63D9M&~OcpJ$L*Wj*Co^328hT&U90# zrY}F?Kj5epPN(gpwXK}5q6Gn1c^E1~ zFKJeT%VebnC*-*4V3W0T2l-~U`^r98MoQ;^BWZQ$2;`HwFo<$e$Br`S5_9W_bKljt zO{{W{i*vf^^}#jvJ}#GfsS1v zX-S6NhR*hyJcvir&(JD4cXlzaza3hVDlpUn3vVn6%6#dP(*eFbdOilOMLmZ3@P+63%`6Hc(cCBXaUnM^L2h@ao51T_7yBM3!0J zzq&gQs}|`XcW^HV(`PI@T}t}^EvvL-8lYVDZ|8wiDRN|AAoZQ26IieN15BRiypr59 z0Lmmth2#7B;SeSP9biIUALGL4ri9k3UQ--}%{N@NU#^tta}f5y@WNr+GhIw0bWdrO z2iC!wQ#GVB>@fprVcq+blgjmEqdUT|^l@ zRFszaSHPIadhaffo!2@qvOQvuEl(WM^vm@bv2nLKF9A)i-JL*c zkRCc4ewXuZ_Hf41h02+1MZ0+*ZSRIONvc*ozn1-a;PqA(Uwu3u3`c$II72 zl1>fgNglmaT#CHp8QzSZFpk#*UW`8$c#ycrgk?@K-P< z1?NS>TbtFJp-@A3SUvHR3&HfTfIgRPx6j0Qg7nz|usjW*w;rv~l%yvncxvb`h^TmU z+1Fv8qr$ku7ePMX7ht}5*o0Z;^u`coK4H)|yB;-u|AU~(BPW_gXeJDyJ$w~iep_zhjr~HFLeUjX{*UGiZPd}mL7^t$=O^5_eW+})9RRXk&q6`yEf$l^0=ZeD;1lp{i$o7 zl%$=9(Q5U0rpkFxedMLtG)Kgk2PC+31z~kM>`hsk>Uj2G0UB{D7z-BF;x2ZRzPzK* zq}K_O%&l{{mNqFRGyaJy<+GV!qDpeHf7Wc=)+vqTP{xHI6Ms$Cbi2pO6(72f!}M9c^Vfr|lIfc-DNGew8GlyVV&!nVR=Yz#fr&~f zyk*ky(|-_ITH_C2Kv>o}ES!+DHiAu8rL>6WH`V*+G)h_$-#CIopo(0pge5wh%^Bt} zZ~o{Q<|}&Jc(0#eEYk6m)1N=PIP?%c3jG~|szo!U@t>_IDte6Da;{=aK{w-wZ}J~5 zg2CY(?je7PShX@J|(9-$kr zdzaWZeWCQ){anC`f^K`h1Swhe27R8ahWaj6ZV-%-lKutm=Ck(Tvd zXb@WSUOCQDEMYLL`?BbGD6*=DK%P`Lf`;bP4gUg|mN*bB51!zH+FhjS@Maw zo+nlFg|75lOoBxv;X2?Q4`5=lZVpdj9a0Wuqqm;R3Wt^blW-O{Q+a)1p?exe>r)Vy zv#N2MOnx1&D^3HN5>-ic^1?ah7n`;Gp>9$Sx6M1v0a&dgFQ1+VvV`gG^5H%7yU&NS zxbj(j&@Vj~!UU#=V3`Fd_5>7xp-Aa0C`5y826$un5D400MdVMi?vF$iTozGHwAR z=|=AYNl!~PtL~^%^kky&lA;;7

l4{)H$osCy`k*$ zC2o_heS1lN1FEmm)mX^Bg|X}wXNbg5p)3-5mD2Y+7eS9sa?)lOK|s4_Ovr6<%m91y z?G0tDE~Pp5pjNiZ-=QpH`Y2;uxkq=vndz|poTRRf%6Gc(Ilm%2o0I+%iX7=Xn*08R z+p^zJw_4i&4Mx_}uo3ThOtF;NO_#s-0-9n5Yab=6_lB^-@wrx-t@BB)%|4K(6T3kn zKQwgV)O5?dqXa^$7xn%9{c&6FB725p4uvv_)E+0h>Tyihh<}Oup3q0gUTFrl6iW*g zdMx72V_-}$c2%_3V*)j#Ii%IuQl6eZv89WpRtJh(TDvge<8$E?2#t=1L{c_><%AP( zM?EExhF3UOw}rDfS&2xlJjGg}VGU9xqq~AFw>I+RWuj6R?MQaB!)~Z^ zH#p0Ym9IS6(?x^@qYDq=f-T{p^f!0+1r{gAbmLf?f+UNjX8l}5;fXF^yg@Kp^tlQ- zSKTHZ-9TPm9_etcK0vhRD6nM*n>ORmJ!KFkD;Mi+aeLZ0IBOAII!NwxkY%ve=QIeD zg(;I=pzz+hawmFlmcb$R=F(tgsCF&eNryO2OO{C}>x;&|BlIC8p3A6!uvBU|C4kvi zx+EQ?%LA>+4bv`|KEobJHzh2;F} z>hM#X(b?fOr1^RlEF%;9K3I!zr>x|<^B|G2 zxP-2}1Z?_9a9%t-?y}`FILkdVknlB|4VODgM?Z~z_Ek_OE9N*ZgNO5+^~#z`I65hF zuLs)j-by~a89;e=?j%p&g40wrmB$=p_5!fwiY*t`42Rs|2yTf{F*`1FA%+#0>vX;g zZ0e+oQ&a2S_rOrivERw$2VHRb0jhD&#SoUy>LjJEi{VL2;Y9&xgy7_YE zYAB1J2|CKSLJ87-@nN~~AK&PIDLweAvuGUf2MG^{KCP-H5Ab2Ps}C;u^?TE{aLq zQIi?}10#DyK?eNjKI!>=nXlMH%Vbls>nC8;>h!U29xd4dqxxs&xK-UITCJW9UA7e* z={S0@n&sF-+h9#~Hog4pHwcpF0#LKRa1tBmcchiNttAf3%hP|GCtjM|u}AY_wTOi(pO7omX{9xRgGstOpWn+-RQu4Px_ECWlcG{z-jOF7LRd~q*}$J$;(`qG zG6mh*Yc+N{SFweVIQV|E3XY(;Wp)$(QDB4EKtUx4Bslz%9K69acUxEfZ;!m+QXSj z7!J$z$)yK7Kv~%g@bx%wSkv$8>^%C@Q7%p5C#5XaQTDp>+^#Ulqz_?VdLnMLmSXPD ztmv#>KvRjngcZogeIQI{`V5Jj-p~A#;RN?crw#;K1`NJBCA9 zr1|h7*XA-u)Q0DPmyCs=$0}qvv$tgu0P*t71x}3Nw&ZC(X&Ir|y4Ucf)F4S$-5?HW zS`42{*2&nxKAfe=PCSqHrjig=8&Azjoua74($7t*0&2CT_Z&D4wWtmt5t_d)?RJS@~EI8PUIM zc=sXPk*XQ@w<#nc?$c1;1}t_T%UJ_GrqD*NS@O)I?pr@UCEnveD~D+4lfp766MCd4 zv!4XfZBCPaJ%fw9^H{>xl{l@L^yk%DiYtS~=f)RcqiVzf6i%z5&*zmS6gL-_yol4% z(wAhlS(dJbvTSI#`NJCLgBv?)z?5TNfwD-MwbG;Ly^o2nLz1Ia3t5LRSqrdA8H9b- zfp0-XTI9a6T=6y-z3D{ePjy>?>%n}K6?@0|9__I_JMUw#WxszxhHfxKGo{CPcO!^& z^&q<3{T0BX(sRMo*Sj{s6cc9M+W#AeN2}qq1#pO*h&JfOoFye$x>XMN9CE#R?j^|9Hj4)kHUKOjx{ zV}}fsuN9i~3|m-o|8!xPBWCC{$!hP7&kAsOSJ*y2}UVYXEDyl@(ioEP|JVdd9@hGK2 zi<8kvgD7SPQ!5==2N=^@DO2$9ovLFy!dZ+JDQfA=PGGBd zT|3A_T|guszQ31~kGcXQrRl*DI_-VCnGePcE-A<`g%*6^bY3q3S%L%DqY9U0TTgOa zWVhq$dS5upHbyAy-#7PnM8M>j+3Ro!6O$)QV{+68uw|}{*(ejU-<2`h`turyPZ2ab z4a$UB^|1693!>G8r=4$C5M^7BdXJn0HVNoOsg;AzfS|50`pTK=w#2y44<55|3^*y;?g*RYMlq;$n%I218n=bLLHz+#6t^Awmg z1NAQM!X++rwe2x5bAAlY3WH%mje4yghhdmJL4JG+m+7T=OF1E9mqSL?5R0nLrPe6c=PsD|@)UB(btb~%7MVWuS zWib=X+zIjSUXJJ=J|_8bi6dyJXl=#5 zj?k5~FR$zmVXBGikn79D17S$R&IK%KY>1JhIxD9h46-bO(JH~)`x z0ZMt3@K_KE(}UA8OaWQe;}-y`z-L%3p^B()uKaInj^dU9qLiUk1o0eSlWk zI7&ld^B~sDf5U~X+?T7>ZP8KjxWLjMY$Egzuccf+z&tY+$khX#=LvcP^`=2!%YpX1 zbbogYb-}r3fnmJOa7To8cgc@-VfY&>UZ#{M#yBr}Tt6eLl|bd8@wH(Yzy#D66{UKd z^IaAmBK>YWL^MN<-epY4-xFOZOl!XEGYL#h3s=I@=?svmHT-7)y4MDip{zxPht8RX zs5xlxS&r6&nQSPBmZ-`Ki;k)~Bqaw{gQ8-~spWjdGzerao??5y7HowOzAdgj_3)W+ zCWG2lb<|3q?Y!xNJyBp7SgM?P<6vocGw0=cZ{dVkpIq2u4&EqI-QT=V-4BU6>JQX8e0%W7|OJ_y4MOlI5+6E;`YzjGCscMS7g<>cF(_bU6JnUXu4 zk5WA@()Uj1gF{!kVji&2`QY3+|C@3b*gCKQDVcLW2o1dboSglT^H`x%imMf)I}J2r zv3Z+XkRzAiw1zZ}HCNkZK+9v^j*354&*-5GH2=|4_%Y>h%9O%MvmAYGxeJKNQY%&b zX&B2bo6dpsc@Av#2OZPBf24$^#n`R)%FcfU#>zB^vr@p*$4VhK!?@i0s*3?h(0Avx z5SIBaCraI$=2Z`H-TW5rVods<9y}HOHk6GC3>$e*U%|;szjvHRJqaJ@USae9T0HvC zOt*c6+l2PQA+=%ygjI4@Pb_)3EwDp56Jx9=TRsI@w%D{%F)#QWCaT@4LW*&>FC5h` z`ajG$N}0a6y_S?^8^KmGTuzq%eFcov$8A!Mkbe(h`LX9lLOc4D&9Igh^Ib0Q(U&M? z5mhl_&8+vq5ECwpFICD!jwi#nY8UJ)wghk0DLjS&D@_~*CFa7wEY3vxDveH{uWe34nv4juZ>Or?aCMauhh5Hl< zxmOu9T$7R;4zB;G>;6DqI|Rmbu73!k-R7Z(LRp5nj}k~mTp%{w^uY7f!(pr%j_xmO z6q$cEqS(MX(tYihlD(U`uPnlNlA{IK5{xnC=`4B2KPZpgeNq^HEWT)ry2`#t-Ik*a z6JYws*N$^>GQHv-qS}D9N7bol#lPIgA-WR%L$M_o&F$p(j*imXi#l!Jr<0>-HhL3J z#&mWOF{iGQL%V^km^pZ>Cxa{GsP1qk8*`8h6S;*aTBuYIZg|S`iqSsXqgV9CX_C;w z>A7-p=*i}rfv-~ko_(ca8#YU$>7;6CeP3uxW|Td-`BFdUQM(xa2P?Gh{?s3u)Vg(- zAp>w)u?*J%;!DMm9_Vl@khSYzC~L30c`nsMfGE8N_vB%?t%yfpz*J;$vBFHq$Pr+x zdeP)jrTku5^A(=8Su)CD7Hk|N9S7DzWlj72c-)pF9j+1PjMJTOY*Kl7X`=JNF(Xy> z5of@dOcPS_^;sYjeuNj7{o~HNod}Vs3CHj}r$+En1&pa+aP%zW<4Gu!*$iQdi?OEV zu#}6V%>Dv$+EkcmOmdgQR!xJn7EQgNM&<$tvZ~KVzOHVIvWmNQ3-iFJT-0R#Z9Uda zS2@0x?!?}_6$X6f4Pz-%EdDbHOD4yy*6l9%JQ7{k904&=>iU@ZFs zXiOMXm7p3{rsSh*and%!P1^J40W8`W#$|b;9`k~FAPcXzh-j!x{}HRlO%RqA8!)=+ z|93s3n=VZ0ZlCWW)pz&grv+fuJ#Dz(Z5AQz%VYJNkXg4wn0ECoeXSM$zT5fXcslns zb}3=Hz}g}C``vdA@?&H;;34-t=2ggUi-AR5@pljhC^l`BVu#|Y`oqox2lXnY@e;6Q zDx5CoWWrJyME2PP?YIoLbu*am=_t@k?bPp+{L_6*NTMV178 zqHIuTN$8n`&>&j+vg1xG{!7o(%2`6ydb+9NH5U!DWJ$m7;5dpr_7<>M3G^VgeDI!Q zLR%aAMeoB<&BC{dmPTNP?)Pr~fG?KCEvc% zjJ&Ok6)HAUn&D<8ta-0m#1&<0-px0{nh@9l_=n0b!B+H4H^TXC^RJ;uNB^X?^(New z$+2ucy}H@?W_`>3JeCVTI3Ib==X_TWdAC#^{E@Gg+(dsq`;_uin#r#KMXlq50nSk#R|usv8sG7XzQT7k3HCh(%VT{xlx>-Dqc9(QFd~ZdMZ5J8%X=Zpsdb~_2vG$bz6>E#fGb>4lpFnD~%00xsUqi8|15*GS=Q~I;-1^^E*SD z{PfK9dsW?BxI%UKRCvSgFebuS{R`u(dV-@W$e^Pg;oOtzDgC=>=Dlw!MMYE1%kRAb zmZ#dRoOlYzY8m~vWcoVDMP2^84QRR%$S=xJTJ1Z_{82bfB{7U(i^td(E(ZUuPAZ!o z$bN9Fx%Jjh={Vdb50^Yrx=fm;6kfS(A`ef*ZK>(iMH=(_lVOU*h&;WffK6>`-(V_m zCWOUk{|X&Q?OBcr-`v61r%I7XB}#1U;U)uPN5~oKwAj3Kj0a$b`Vgj^ER^a+oMoGb+rfCm6Z8DE@ej3&A?OLMGL^I5}Lc&G*ddFz9pO0O>sgr{yNR z-ma~z!Pzc?1c$=qa>Z6)RD8}U*U9X2oR{G#?$M7t7aZx`%i-ZXB`l1dQqw~LO=d$` z-dMt`nPJWxn5cpB?{CW$S|mJlPSuW|?}!QEDr<)l(Tq2t^B&z#?t395`s09{w7A&Z zdTaI?bz5}U%4i2IFR_o9=H%H+ahrThRKmTnotHa`9SXKCiqD$IF~IIn<%5BKi%znhoegA zfpy!V&7CgP4BiHjjf$h1VjB}3?{YpEfs^w0-C*>RUA0{PbsugMRR3EikO!48!7!k6 zzl)Kr+<8R<0K9zdtT zGjJ*yMLX|h+_p9R&nui<*?f%;QFY^n_r%Jfw_O3(U)Ld1O79++YyC`xBfd zR2O=e<2L|{I@dwwu+Lo3QFwEu{tP6!bJ@%^&kyy{)-b z8M0H&bmHDGaa%)*{?`C&Oxgb+tw{TEhfFtK7JLI`;^;pWW~Ar05GHPnX-%u|fR>8B zwLl~LSsAj{tCQTl8K>pC+~Y8f{NOxej#umah96;~N`xl$AKXs+2?{A!pI;?!tJ^+v z43xW)z8t&-%Hrg3A^&x_6>I`pln4F_VFC{4Hsx-=0j&j}mXdD2gQDI}bEq?umz1*@ z<5^w;t=<7916*+YW2ZS;QLNvE6S3%VDkt9988_PgOBc zovu_=-NJe6RrSQGht**4v=XF&nOTn8C4#)>o0BeS~Ea#a`SD|{B4{k*O)S~RS%E?2jyG2Dmu z^<8>4gS$bS+&X?!OBsBRIK{&03$h3H#ckr)!={0Zsi#ITCV6zvla#W8*Q=({Wxacq zwa9h2^V#q5Wk4xpv3KF9K5thlV`aq4vh>Rr^@E^n!-?G^{oPki^3#A18w_CzqjT*g zj|>A_+U!c`OB!l2KKE4EXp|o+Lq>osQGNHCGHx~!#>$Vwz4FOuAjQ+QyUZJd)5Ku_ zA#*LuT;T8@9{Hz-agO2EgZ7Z`OmKlt(_T$3I~~H}@5l7|r-?3vp1)yVr{iQ8%Lf0k z%Q4?G9Uh%#A%D_;z@~U=RM@ottCW>GCL(QpT#NHuu$*e=uNc?{N(@x)@7)5~Jaz!9HQMs`fRY(i&5e1?X+_ok!0JFe<7F)VRddKFS}g;veY@c6`q2-R4I#cIK8Yq zeWAl6)%tSPMfE{=hDsIj%f(PpnNO#&>DjGWSNwz1H^LZ054#dBs?XT^uDA+lDb?y+ zzVEdT4_dV%C;zz)Y?&F$@p>MDy9vT%=fBKOz14l@H3#mE3|Ig``{>+Lo{pS7JIRf= z!&(-p7);19klqP}g7fV9zo2du>~T*;j$H^aiH3Qs&?*#1666Y8>%X(!15cXsA1HU> z!6K(dh1)kT-VbF$mQ_oKMJ@&oQqbm(dJuxNdEH5#RJSEpp5;v32Z}8=eWX%VyY68Z zKH;e?uIUmB&$&Zs{0MF`{PrVqa^&pXO?o{FYqcbNwUGybmpZ1Dhc%g`^?w2;N?HFA zW7rCZS*_;f^k?g_TI>#Z-r>Qz2e)2v)VH(S6}}j_W+hxSux2yNvck#1Z_1hqd2$cs zw|@Zw{qR3;REgJDf$X?WG}Ku)a`$R@OMp_-D<}!sZw;Ic4~&35lhW`4WbQR+YlcG^ zEjh|VDjzCo$qozuuwlqr2-AwTQ|4bi-+-Yc7@4HOo90e2U)rK>D}`hs2oDM#@sMH+^P)_4$MK>b5#~Dt8FK08z!m z*T+te0AnbZp}yoxvC|Cy7z#D-P%_F7hk;sb0$REp=}*f&itV$W82jIMKqA*3R9b!y zhzgrh@Z{_tAx!I|dCP@5``k}3CR(*8r)&YK8c&q#)M+I?Ahed#Kpv~-nBeNUJRb1e zia*mJLn-4Lk4pUtWvOr@MeA$t?Jlwyi_~0}jO_Rg&N31j0sh8CP#aeKxn7eS(>TS+6N|lJ(?WO zGQLon7Q)7~J4QOs)6=>Vumgm4G*%Yz`Sgt^-;Z4L#R5aItyF`m~mY;^0v zzXz0w;s}-7TOh16p#7@FR3|)_HGx%OPn2)f87Vi&b7M!=AK~JNg>RfpI2yugggu18 zQt`)Q;HW;S@Oj{o&0Qq?!Ke0@XSQ$*I@i_bwaRGrwez`NfknxTikTQ=Xx~l3%_t{5)onhR1IHLFYg;=e{NLa9-SZ!JRXh&^qQP^0&e8>^dIFPR$#@Y4L~0i`m8M!8qgQucp1K(^H9lY6ji~d z_E44|=AI|YpB*d!vznkFZ*+74VyOx`0UjdheOV<1wU1_ z_gli@M6OUFpZQ?xP_#P<#b-8K4r>LghkuVvx}>m{Q}mC{#jPdTLmJLDTHiDst8UYx9(d6st2Ij5_+7l&&T{~B zAgywQ|4OeThkr=J=v3fWbmF#;7U&(eq*5B&1 zk*xkEOdw|~ZGwAoY##a+;3DNrQnja(l>;t?iPWxF#_qWcjN0|y<-BWfTgw>D-uiVQ z)2JT2*XITwQOb>>NNMdPGY=Zsx*sR&Zop{?hBNv=j<^xV667ob(|X0`<21d%nwO?G z!amxbc)&LWG;7Ji>+o;UB^-~DUObLX8obH>k{Inx_Y2(nD; zIDsz2y8>Vx-vcrGB8S`96~3pi)2rn`ps2&Ky34@K1s1ylqUQ=|*9Uz;ie{v>%2oM? z9Dx-o-n!aq_t0jR=pU9#!jd1})T+?n@6y8df>Vyy=pQOfK|3Y(2-azj4)L1N{qbt~gpyHdZkE_n!}$wWXGLQ#bEWGJbNlOBPFEA2A}KAuNrH$~Ea z65t9QMXO3rb^amD*`Z#Kt)75%Rj|U+q(d*^ARnyE3SVA$i+up zGBb+L=HDc1?Kq|ckZ(_8{P+1hk>qNhVQcjl^F&z6^pAH+bd$_Aetwz%EfZY@V9oy0 zD|t3}uV!1c&8q;erO7=}DGKSxNb4|#Exx2~md-Q|d>v3@@1UrDlb}n$%Dg51cpKU& zVQM_~-JBYO6(!v>8c+9%t5tZe2HCu%Avv>7QXcE zH-64Pt6^`|9m9Xk@ogNkVVX@yoVw3>fb;tw`6Co#ZAl?XkkszNmb=y+w_%J z_GU?&_;+DuO8zc1%ipsE5aNFBuGn@-2PYI_x`NJu(xi4>jH3lw66K8Y$1at}$yQes zXy~$lmIF_k&;y!EfcAjw+Xa}<_5x=UgO-aK$(=ZBm846Xj*_4)u%-MU#Cuy zuNQ45iicv3&I9XfgTO@ci&7VL>*gOc7H|N1*?Of9WPH~h$E^?I7Tehp^&3N(Y2R?C zT&r#hW*%!%Y-k?YdNY5-ywB(`cnc_VTq!g8A4FRaHRSNqR{8hS5 zaSF2P=*KUv6+v0CmWpS$CF}~esjbu`pX>x^0kWP%`~2I^0M4ro_4nEXAv1v#@RPtJ z)`LJY33L1?yU}Zh0+9a5jCeo8J{gHZpoXY)`_L^1u;sZ5dS7(WMDhD}S*P%qeojOHFP&I*-)O71`uL z^k`w03F9F)8M4~hcMPCcz1b;`++RH-)PiYeHaTo8r0a-F54gGA3)&2g>A|iirz$4J zB2j%^t4~KaeNqlGwV(sgQB7qvV5ZLN^D|J4ev?5hE;czT+yWskFrMf90^m>kftiEB z{i5?gD9f)VyDD?fK|p4s66Kypu}vBQT@Kw>i7Q3Ze3F~R?@ND{$y8|EbpFg*O9%IF z<1hhE5-D)*DDBA98GvR4RqzI_P-Zm^DlOA{qpcmxvfLJHqyZg}(yTjoC7H3QQ z3~fZeJrcx8qvCCeJ&(#`pz`ctk22+G5ND=z#oMuhoY}NOBOO5<`Cos@Q>xe9z|&8F zcIGVP=upR=2%sK`7ntq;N;pe&HXh1p_Cp2ncQ+mOtWyX(11-ogrQ~??3xSeVfg$nU zQz4vsa)lGio(7vI~NJTHFSoYS*3TwNjB33?e3SKR_)-jZ<+9{w;rx)o6MgukHre-L)zJjUbs z?Kc0-02!Am>=b!7f(<}?eFunBl(jXEh*_k+d#68P4-$X5i?ErlmfMvd?}qjYL^oo! z6PLBv_v|!1ot9DxcKO)p0Pwzp+xAFbZu!{Lnyq1N#S=Q7%V4TyWxL@I{sUSmxiW+|93%ShKuqx^N^nCp6(?svyQZ{&!z_3`oBIf9-*7oMJa2P9Q41}qQ13*jUg zgE3@#58BzLWvl9IwfDiYs*u-%#&p+y=0{G3!3_&q5iS&SG)-^J#{gz6y;j;7X9}^9 zSe(t`!GxIk36L|VtxS)5MEfIb5t3Evr#WJm!O{5{h&#k6Mx53TKO(lr=fHhc(1qHv z9lLoTwOj7dy`(T@*5irswg~EG6yE&5BIvr|ZeVQrHKbefR91YVh*wq}OMOGooF-G4 zlfQ>{c9}*nE*50w$IygWsXSgn(u%YYEB!zm*jR84eR#MCr^)*bUGaH|u9S*y(og(v z-tgfXCzZ(Yu)4z{{)M>(mqqs<1mQYanMBo++x8_pS1c{V%zlxVjH7NPXtx}O`4(&nczHsY<4lvA zug%whc3!zwG4@;wFq5UaI)yth^6zkN%u&ud(8#S>7jO3yLCGggj)H>b3YSzeV7g&_ z5GR(FM=eCZ< zy8HdYW}q~)Zd8!3XJK9Ze}D5l#Wws=7CHC;aO0|-6dP>?Wv-^zp&Mw3AB9jz-5-s? zTSK_=%V{hB-6sFGz9TN)7Rq9+M9mQIZx4+`>U-iZ0}1}GKZX|iBkGZ-g{L;6sjqo3 z6j`NHq92Aqn3Hq>(;gcS2eB|}xP#UjQv^^(UGsZhVM~}JgycQ!xm|$FPg8+WX!G52 zn#;Z|vBF4br$^UE6dUaU;7-f{gF4+Wa;~u_uzDJb`iP0>?jU0Nm!P8e*r~Jo+eA z!)>DZmt!|rdT&bpudc4du2Z3@4SX`jXocMzE7xF`@IOl*oN_BWa-*8N<(pX5ftu*4T&Fn9IUK}s@eI+t*Dio;}8MPURU?oCVwb|5Bm>^rREW& zC2>tJR#(`S0`JM5X|JPzY@e1M0Ms6feRnZ_)gMlDOhL!XvC;7$mLK*GEInAVY$J>w zk(Qe)odA*53CpyrN@NW~#LFW4T8U;Ls^qcLz$}+?ZnnfXqTOXG%A)QZC`$<=Vh%@N zJ1>te8@xE^LTHL92fqqBrD6yBrQ$`xcf<_sZyjY~rrCzVaH z{^g)q#@r6Y*UXgw8XURe)c-2N<`L_}GEvSKS6_vw675e}s&R+lqpJa&XSzP$5xZOu zk_m$81~mb;#y8}jp?_x=P%glqmD^DB$QwZ{U5ry=(9Mv}7=7`Oc;c4)gCZIQW~SmL4b;<8UT{a#2XUc<&AM%)MOQWZHyX6<%A00Pmm z)L$QPmO!!534HO6N1^ElJwLusUFmmdYL zM08*&#$;Jmo+R2Vk6{J3%fpav+j8ZgYFzozJPkcgMs+_R#O09MkafZ10A>Ymlzqnv zd8CF`Y3omV0>DYzIEK5Xf}Hs%(yesI%Pr zkWMojAmZi^fLx6&)jR=Gh)q7szZs-sj*lRnsg?;X9#A*>I8UiAoz558ujCOK;FFJJ zWc2N4pyo*C4KeR?XjjD4j(A6u8Pc*?#%wdc%s=SjJ=j?PD`<@FN5)`<{OOpMxLK5o z#)(KQ_!<&XZnoKG@wCFOyDB+lijMyPxDjC{)|ejm+2~s^my0Pedy+Y}63T!zZemPS z(2|*r3sI`;U*CbaEZ8FT#+pAsJD)n-G4Xn)0KP``I{znvFv}v4R$TZED(ft=>uHSN z{tJ>-1)B(M=N4N8y+bkmbumI&~VKI{5x8fsp#>mN;-_f(&iR4cQ?gNMym4UGYd3mM5xL6t+~f zD%G8UOIHDMt&_@{t3sK_=^OyXO``p8#i+P!btsb626yW<2%EQAn5xbcY2Gw`^X~L& zYdQi4gg7LQ`68Wixn9}(T(nt{Izg_;uU{*V(#dmGvBuiaWG7$t2P*9JStzI1JJ-z- zb};Qttjg8{DNT7={uN@=4FH{QQ%hHCOcU*D%idkUi`#~IoUuC3{7y7tt*nWW8xf?K zom}*!n7eKaX*R|)2GMB8Yzm_KB7Bj;E|Z)b=}Os?g4~3#V!(9f<<0UGu>6-KcG)8T zik>!8H~RJx;aKePjwp5r?NUm7pv{IyW z(#oS0(X=Cx;7l{nd;JJIhbkEZb;Zv@5OGgCUAY+B?VP8mHvus`8VJA$wPj3J$R*|} zx2CvXwA0cU&Oq?I5ZGbbwYqQCAOKYl8;89I6Lw>s(iy{sK)JFiWN(Td|HffJeKJaV ze3*Z5S3qZ4lnunccF*Y=OY+;+>%D^=W$?WLMDzHH}jP$5-*kx+|=XZ6wUf`+rNl-??p?#6Pt05hG&V z$ppP`XF9f<1CBqwnhNZ(TFXToIjRD#p9bn8Vk{|lf4p40FBmdN=X?`IWR;HJJT(rl z2roH^KU@y37}4sl5#Xe%bl+4R<1S(9Hq5`LACiAigGmwIVCw+LhtljGr`AKb>hRIV zx2UiMB|b>0u>m3*D(ZTRaYSP&zD84K$HoGX1(~-0=&gm2Lb~E{f`~qbxg8jDoAM;G zyFYPBGf>vVbhd!iZ6QvY3Fz9bJ8aIVQGG3n1h6eT%jSSTWE|H*3z zeSMeX!PL-(#6BkxRNrJ%eUHKxN+XSgY2(vV{O?TE5!ang*agw*DyE;2(-U+w_sW?$ zjSX%wem)D@snua!a7{55I|sz9>eXhM&<+t~L34Tr@8i$SztRgfDQ@|7Uw$4^(#i_R40 z{A%!Ae6dLLBSnQ4I{P~4 ztP{#G3W4?9kVE3L4O_~01UZ=n7wW}}MFRTd)>fZHzwqy1@R_;rc`G9k1nyyC=#BjA zvid=}xa1}fXU#a>szbVqZUtf{n9bW#&GEP>3WW)HOZ5H&ffleG5aUP5h?ovG8&Cz0XtUzmj(+s6YU$y(K7w$P}(&$+74k$QDcWpKh zk#+ZU9pU6V{jYSvvGQFI7I3Ymz%J1)_qbx0u6dqaLKwufjo$U{JPvz)t-u?FSib3D zL$Uh#eR;}qofJR*6Pm_FFOHM$Cv2YNfv6a7JqVET-^{BQP+8l1Ie;mw->s=^K3>0&| zP=BduCysmu+G)4gvgJb|j)7C6Vr=#-bk-fvX{K$$RSUsfvdSrO@bgeEI1fIuQ(fkT z9H>r{)Uz+;0B-nc%+;>|P?;!x8sqU-A)JGqd6Db$!knh8+lz7h>i|gl{XgFj-zn^( zX|5$p9Trr*;TTvf#*uI4G3-GgS-_p}7N`@|4K1#cz6}5y4f5l%NQ5g0byllhii&sh z4<<9dvau3iacLbwcUkj&5O;)0^yRfafV5=Ts$TI46il;djz<-y^9&ggk9|teIThNm zCtxH${c|8EtdWVP^^_1t;?TN9cXNL%MXJ)aygiz}BvRH!3-x~cyG58;7*!d+mST)n zC+5aU%>0@du*KFSKKzETWm2uzr()`V09^lFGAR2%G!cBCo~_Ke-vXFbw&!xid65u* zGFckrA4Mb9WS-poJp}C3re)WxAN`lE<;C*9=HIckm=nAH25nxrgo(H({};f;lZr7_ z;tRt1D!+ZYG*587!|(a0dvRw6(?0=y#XwRI|B=&8J*;FNTx|Qkys+1js$PKQ%|b0{ ziLP1#$aSk`_vfV`o#g12*0wlu>6|9Vx|p>bw7KQIaTbqEMYDxz$Q&BKl>SafFJgP) ziu|nx($Nz?txnjvwWIldVl7DLR;^{mT5Cfi$BN|b{tZ#)b|O}cQv9v4E|8@sM>u#$ z72rV`J2uYZHL9(-Q67t>ClAklv?+kIEt@zhHrXsED;U&v-2&2F)bmbMj27)oWNXIl zz-ejm+aV z;QcVR+5_54c@0)w1vazEGHT?W{9(T7TRJ~v@>oD;Ih83E#m)~xvNqe^J3AR3Cd@M9buu1N z8b=)RtgVc1_995NG-C}mZ-T?R{W!zi!fp9))kGVs@hiRqJpoC{|qEvZH9S({^c z6mCskg^w0S3AE6QC#FJ>ms;(q(+GQag@bkPLV@oUOnm9BW0`#*oG-S1oU6CTIVH%N zGA;uNvef8wX!Azz>omAdS{KXj2SJ`JGNRuBge`dd+>R6F)HU*Jt^K?pBFnGYiwaw2 ztR7lp@dKgZj2oKm@zz0(h_QLxbTHw*bjY8HXYG0*k7*q- zPn7xRk^s}**QHPT9-@tBXA%j{y4Y?k*Pef%LANpH2;fiI3oTvD|7=UIUmYaC>>&^O z1MYjf9lr_l_DgyQd$1^{Fb|6CD@;2T(;dIh&LNqKY0-4doE%~soAgx6%>{Bfu^g)7 z}$K_lXZqChQV0gtHr|JT(XOL|#`+KP^we zuX|FgPGJ@Uh6*wHbV!$aR0qq>>q_)^`5qs0CjY0#&=&1k7$dZJBn!*w^cLsjNlxPC z{y`$m*A#7FW8rxK7F&f58*K3Mve2wGaHsR17ebnmiSjJ7@kKd}EeW?=MfbHW=}{Wv z2SLtA<54m8xFiP{p%K6>mjSr-b18G=6{dVEWwt0M*xexyIK8p-RX`NBi51jx3j0&; z7w#>J<~4L^oT{LP8CfHoaJBJ@=2+s|{DUTZnu&6Ca-C9p-?2jYj!U;K@so%?8KuGI z2LIb44-B1uhj88lcZ@G?f$A&y^fd8(bQ_RU$4XsxWeaZyvG}ARj@#~lw#@b38zR?X zMB#2w5@t(6y*zl&|D>YFJYv^-^HeZD?TFU;*~p z7J-(e6o&D!Xs5|~SJ$gndKH9bS2-c-Ud#W)N>TSC3qqVBa~gBrIj;j`nPg`}Os)c* z2?y3g&TXvJT?dFJA?YZu`IvCV9);RpkA43IaM~;h(i`W^i}E;KytLXFp9pZA8fo9@IbW9`E}*21M%(ZCXB_o1 zldbj#fZGu}J*LdKObAk#z>0FQ9s1IQvGf-E>Wc%pVw$j5r^$1I)gv(LS=cnuyv|!9 zkA^W?whk>@641SWBA#cTTMAN*VHP*~r*hVKvv&mjSZ)P@H#h6w}4LFU`^7 z%khty;G8|Xb+Cg#OHu2l*hw_aiD{N|Ny2KE%%xE+!V#;PZDZLLp{a~1X$^6)D3=Ap zW(|GEtptKV1`m#9S0(6XkqHF2Lu6T2G0H@F0gSJ?54QLplOU20wlh%S8 zIZGty4GH{mn4dkc@M?gY0otkTF%MiD#0gf&A^f-NLR(hTycdsi*8?KmNi}hwg65|V zc8c+F2}q70WB&U2_h_K1ybcdas>uBn?X3YnS6Qv=KgZ%@VH7re>t7= zGM(quYyp(bRvc5QsWaGovo)x>X;cC3*fys}GZn=q+vPM~9O`1KXpaun&GBbZmN{-) zwF10(dk~kn)31gXV46Y$ z3bFADyU+`+u8TkVLAqFMqxzf-&E6T*Y%qEIMUGR2nOP<-6>iV~5Eq`MJnq*9 zOkgJTj-I+0I1C!0a!^8>8Y0S>l%b$uh!Q}FliLm~wuk5c)0xQkG66`ZFqemeM>sB% zY-cO}p?86VKUzymEVdhj1;AQNv*V>BLCkRtXTs?=z(afb6PDvG@wEt7H;M0G24%jn zo$8Dc<iDtDC_uCC>mz4W0>B=ZeWG;?;l6sY&NDtA3r4Egte^qyirT&M z1W;cw@SIQrXrs*ZUE_&iuEzJoJW#aZ)7*b=|GHccMNPz=-^S)0SUJ(?3$xl zgtM&IHRCqX?!)pDD>WFo{z<57dF-e-N|dUW70|f~lUTBg8rccq#Om~DL`xCcC5KsV z{P#wo2guwqoW%LFAiWZ1mDr;jEq;*csyY1O*4fut~O+a+E}+I8C`WGm|1N^QLBqGTO9-Biq!Us`?Lbw z8qkMv_SV5V=*wgCSS+(x2fuzCfKz8HBDF~|j{PebtjyA^Gy8847IFTB8iw8{gVIC? z4UZE}$y3md9@p@NxLueFi)KVyo|R9{k;#R0uCvb>AT9&8Yxtzc7WYgb*9b566yiA1 zj)G-(GH;r97Es2oY$i09u6%a>SyL-h(HPOx8gfAQ6q}v{i7-+5#a;?qN>X9-oc6f@ zPR}REr1#Nzc_gm9F|drh5Wsw}aZTKxAjo~fkD4-v(ezFhoYg`HN|u;=5dfJtGIi~G zF=2!@p)&rZpi^vHa9=w48FL8`Qm1RTN6n?#XN1pIn9qg`i7yl+`DERs+Bxtt0Jotn z2gakK$V~d@6D}ug2E9A>Kk4GYZ{vyW`-S0APP+Z@jFaOFd0$QI5~Y-<(HhXUX_; zq5xM&+k!iqbW13jZpkCHw96UZMFPxuYaRNCZEuBkTTg8e%U;y#w*$I3JcTQ1?I^_E zc?#|w2a0qmEX8|cSJ8-%^+$YI`Z(R{_uxHC0hXIY@4-iIaYMm>~yt>b=R4K{8@IM##!#e`Dbj@+T-^}pq&#M zy=Z+L5;={g)cu|y3`e*fY>G)w=J9mhbjzn8ojdBv))+228?D=$P`jiFY`V~_)oDoL z*}oDpp-{(pUaa*ralCZ*iD#yPIPG8HS-tCgJ>6sbbs-=up~@=6zn+J1p4e+@(XWe( ztG)nA2{09na};*rc*`oT$o|43e%zz5#l>f7{CQH4tE8cBE=#|0F9EopF>7E~YBS^m zUIupA%Zlg*k9swyX&uec`Z_cm@Yr>{pm5fw(wx1=8#y4iti_lwz~yILcH zGsv@HC}7)Tz*|5r4hI2oqDX{+V!k)t6p{6d&tS%bv`MRSb)DF1+p>tUvXjky9TyH1F2 z``BUJF~=645+JTj%=tHeTTZx8i-})Cy1wvp(#_4woNg>&k@=oz=dVMTq;5a%n>-FzN9y9P z|3JITkE@OozJ+oIY%61z?;x|@tv5xBafT2xs5_0Sj5*%}x!CD4Mm+yxo=7<}+wDK) zpV+r6SJ!^dA)DwGV^9gg4vU`I9p4Kw`%({dNzZrLFJRdemY(tEfgU>X_P+sB3YC$! zD}Fa+W4*=xFqO`o;wVurKsrOGnLjDWVw@`zvM(0jv9HvvJ#*yXVCoVeS(9b8rX1H? zvIL>u6SU1zKyGNc9YEYFklMw;Qv9GWT>4<|CsCGLy%bfk&e8w~A00|3!arUlEvup< zmR-jGu+Qm0Pd8Dd#|KU+`yPS0WLeO@cC2aq#O3m@YFI{{w|xG8atIp_3vjs`J5V64 zuwqW5g%}ijh_?Le3(eduVCWnrjN){%G^kjKFbobI6hA2FyvO$FsC@fX0kUCpj=F!k zl6u{mpl(m?1r8Lrzq=NQWhUn>I93ZOm10>!<-we>%s6DXzSaEX@4~aT+m!=9uABq6sZeQcT zrJ6W;i~M8ynbr^23NZ(5+|Zf6B_z@xH95++b#VBIxN19sS^w10By1eDgZ~{dJQgTu z*=fn2_VK6gn13Pf+jYF^@YcW_lE=|>hkP&)RU*som^u`~rLDp12A6gj8O{@E_P7lk zKb7LJx{^O)9N`jzN8$auZ=Tvbg(GdcvK!M z8Rxb~^Joxrh2ckhD>54ns92sMj^)PWF(!03#@b^covbb>FopNVq7p{KwbFmqc;Zos z!-mIx69}5wDd`;cb^*?X0Rw*kt#M~54)uao^4zNYqfRxtxKKd$5iZOm;6W3C+!s>q zb%=IhunUgIOJwz^(XqlL{>?gW=E?%~{eWsv*Emv3TH<2V3RM3e5l<@UJecTtk9Tiq z7iKCZF&$Bu4B&AabrcovmHl;sT_Mv8xO?Npx*8DY&oHQMi^_0QZJwU}s%$tV|1ce; zqs|f{Q4Wo+sRYgKbj~fM>0w{C4tBLfHJHdDRfX%y(ZMAc@Q5F1ia4ZeQ7pLB)7zdI{@gxRL9yRa(HzFL!ks zqhTg=U-MuHKySQ88;~<&P0xTK-Fat69*-L!8op!iP9Rq)_Zb-ThIi!vb~VVXsvE$W zabnRCyU&7l%4zORtHLh?Qfyu+R>jVr4e5e3vkK)n5QSkc7$s#F&YK}~A)Kw8pS8y> zhXFX31|6(zbvSg^oY@0SI0DioWEe&x!k+3#Akx#c8xM+bdPOu#952oTFbBNqV2iZF zjsnwkz!~qo3cHHuUl9xc-1iU5t)mG$DP2p%b;46e+9@2E0r`ge3c+GHKPY7 zY*GI=C&9GB_U61(pePv!sqsk`W{i&uPX{-nZAJK=v*+;#pm;L3R+R6KAv9Q zY1gc=!uk9G&rPg3PEgqCHuAVZI(k|AA}}+{;tcbFS(oGh4sNjrnkT@e!`d!B5=nx( zA^WYuZltX|=uqtLMXvWvb!Q~&LdQj&04oYIvd>`NNbNxg!v;)DXhslg=B*U+r?P+wwxZV z`)u&hkq;9l zrJ)03*GCB=)dtxv$1S3=!K9f3#{C|HrU)83VzI~l)$rkQt%6xRsr_$%654{)=^bsN z7`HwJ<{H8$QT8g&T2BL-QJhA#G!)|$A!NZ3XmcF!4207yV>uLSJ_`-gx~n}+VgJXw z6K!$zbI|U0qY6FUjAqI8U-Lp>(o1`>9ubykMdhSeDAGdFVsmCpc|HfGC4>6iU+~xT z;X{oVyQNSs|DhDC zphK~!euG#R3w}0b%+eslSxl~s)7~`Y?)cSPP?kgwrVer1+tAK*43{!-cK;p#yy^v( z1qwSd?mW0l{XVp_(wNMOaruuxXboi(;%NmPnNg}MUi%n2KW^2g9lH_MZsR`zca9zE zpHcBC0PLz8HpXtB<$vROj+x+P0a-_5YG9j;Ezy_$giTz391MO9<($iOed0*b6oEsa zxH$`VEx0efEyZI~HY_gtj(9NN)D+h#n2AM3en)-}?UF9I78{=KX!rr7Z+_@Zr{XvM z4}|FydTx9s<>}0p!tJP^pj}wZ_c2R-|K}Xg$rp!qzW_L=-9`a!QaEe9$oQf~kmi0eiV__TzAA`p1`UhT7Tc*W zas!L@UqqTE&J|l(M*OxoNLKhRZcZWO|63A>&xa0);Y$;w0$CrjUY#k*qHIz-m?PTt zr(IIAiukiIv)(PeRV-Q-0M>a=A|79XFv&?fSd67t%0Fj{L<`Ny$%vI5g9G6!yW|)8M8GY&WH5_qNN8G2Nxx|WVPTHp~6ow4# z?Js`GV%JQGvNid~D{Giy|AB2YN7zDLa!l9p5WN<(M;QiTZczCm|Gt9Fhm9uV3?XnS zH$H2xP1vPD^EIMhRF(ieM8zTIdinRCHP{dGG7NZq({iSUH z-&`li`Sf(jD0Z2R04#xux^~Ru!Tv4Gt*D$C+mk zY(j*-qPB5^J0=S8Cny&v@7V&f=E1ptau_mWQ!o!;RXwr6W>97xRUBIkbn%<#ANd6m z5_)DUFz1yN>g};mh?5zOGxe>Z5Xao!SbG}>2M>(P6*NC=W9h;3wgqr9*$L#~+d1~c zj@Wbuf))Tvu0qu8!F3Ys-9T|{di2JPmRogpNB`f9^V3F;hXAg`NMXmZo$SYNc`@3yOPNq!9fpK%58PGs8b7zsRI<@_a9nnkv9Do{Nu5y}A;|2(w70%JcW?qoFBm{}Hi=g8qvs zgqyyiDMdf*iRO=Sv_XSn;#h*_n2ntbn=TPTM5!o-jw9?M&h26Lp}VvRa)G$8k2%8C z0{SNSh7K0$+)SFcmlNS&D}GYgS!k8V6g+%92yE8W#>gsyu7COjUcfq2MIJlRF{)~! zWl|QoD!LRjJG@jx?!9rDAWKCPLb}p(uQ0ld?%s6AqkBV{VHrpJ7{k@f@&A{uGO?_*P-Z#Nk43eAfi+oG{Z*qr}_IK-u(VOGX8zOx+Q*Ex){U`V zCy<-mfFb>3Zg-xN^lTh=JFckPQ&F3d@56Q$+GpJoF&>1w|E zi%51Gv??y}&Vit6Fr@jWs_r11^YU6LjnI_6!%(z{y+&9qUmn{7E@9*%3%FUVJ<5}OnS5OvXg;ao@C0( zE8C}nm{m^C>$vLx?UZM-@WxL1EC5N58Fy=i{U>jn@B-C81Z2%i{>QtMTY(kN26e`G zgh&_Y))(YPT-Fd5oRfc+EAx0$_gv7dDYoF3qbuGP;)J+2mr8KVc|Zt!R9Or;pP(gE zn1iKUYC<=bz>cICmTlVQ7Z5X(jtsr!P2#!|R>R8#X^4JKm@5}|J<)z4Bt@@aO?QRD z{)-y|@$*HHu<`f>QFXC{{RhU83c8e>t6q5pRF*3P6(=i=@u)DDu^g9lw2eQ6kkYtO z(eFxvRW<4oL`*PIJG5R_PbDZvj%3;LV>%rVWnmTxmc%2)dUB>bBt-nLL4&>L} zL-DZ!ow00k=~BN4al?@V!X4x08v!iNBATx|MUrTDi>!*q-UQ{@mm$W5V<7(OW?&>W zVptSzA?O@xSl7}co)Kb3Z2K+nKeqz9ywo8buQj^<0fYo+v3)vQVaphMJz2iXNba9P z{n2!sw&5ZoJ`;qHutF%rzePB2x|Qq#I&TND*jOsCJR_CW?*Mb!eQqRaMP~_h+T$?y z)Bv~Yoj^*Pu`1d{SlA797~y?)SDuQTH5qUnBfH>jmqWLZ;;+4llas7~gVcNBKU zu#Y&Ca&pD{LCv&`pqLJqdmu+<>*S#;&V3LB`3~4Io>q|h=#k}Ld?L!SO^+v`Cg8cD zhXCPQZZzYN`4E->mW{0_ziYjcG7fn>Pi1Joc=U-p6>5Zy|MMjP4Ki!$wVotKA1`&9*L7O>sZdKB zdmA3!dMZzcd!JlvY<(KQ+}M=5G4_21$lPH0q)u6k6NMpu?b|L<*bRFUNme}vY0+7! zDkJu30-dZ~BXd?K2-#Ov#%2o%k{TwPah1ZB&ve}A@qk$g;1GcP%AO~NE0+C@xaoNz z|AtprW;e!l0?cwvcaK(|#rXV1u&m$OeF&01^d&%z1zKnQD#BmMG8^lmu4s7`%wnWJ z^}TV?`x>ZODEheMv^Vl^?f8tu-+nX4vgIF>&s$)w0C`}D!%H*=n2oXjyZrCQQqdC2 zzXyekCRfHI3Yz_N?L4haCcmG9C@WeUXESm{n?ODqIylzhhZVo)WmP|VL_;;Q{D?=i>;o2`j5C!MhR7A~JP!=5OwgFN2 zCQpL<3(4|h^0z={9CJfXX&CH}5oX5NR!OOLqmVqI!SUJuM-Pn0zstXn4J*2>&%Ot7 z;nM>%apDg-kdy=QjR1{kxV4A}euO|Y@U3;q?;>4RtOHV0T;f+CWYgU;GcHtEpUN6y z^Z!Cnbp!jw4T}i6AxxPVgMWuI(=+sR^p_=?BkSh4aPfY9)iEA{-?ap!r;`S3!t?;U zCWk+k1kM^btf)K1V@rd%iD=)?IHzT4zhyz;3ZJ%JF>W~s*Y#vB#Jnuhyfn%YY~}LM zPL;(lt(e*GJYi-aotd{6nkjlbFW4zfs2mmFiL_{#sTg^?qHcx!L;2ujq)j*O9$OJO zyG+z~(BsAV6D#o#A{#y=s#eaE!-quD$(^?fka-=WQG1hBq1_y~>>-8lq}4zuDeuQM zMR!JUMXx(nT!VjH?JQB5a~59{fUJg%ho2%l{h17A1CD-+v7o5Um>$ zbatcI9gp1@(rsvT2l_UszD_qCV}L?T+ziM&OL_-0O{VNC3U}^hw4OLgv_*ljaAA6! zzXgEHKeq4jqldT5BWVT0#iy+REbLy4Bse+g6haBHuZu-n=Si>-s7n?RW3~Y^i#Q8x z&@H$}w*xXOwNe`VbNf6BMic2|wR#62=aU>H)Wv>6%&%PRG{$TJPGy?9^tGb%$v_v< ziS+4?`B$h0Sq-n%4?trWz53vt31>Bqwg3-Y#aJNBspy)J9)HDmfr5Om}dcohvB2oOs#;q%bjiy#{8f#}Y1}xs#`?QJJC$0T>OHe-PEMRu8{6y# zX+d_j$d75e0JC1x9LIfn@;H?}am1cbeWI5CSzS4NS)kLcVHH}4 zSIcrh-od!VRGud?sWV;_04TTvP)>LF?It-Kj9iV3AdfF;gI1NzC~y zxto|8E)i%^*!AGc0Z3JDG z$*kT`EHB%hBds{d;`ZA{9gbcxIo3Ti|C37vIu9H#gtW?QqDnzaf$@vkXO@7h&N*7s zVQ5pdbOB~{Neeu!2x$hYS{fUf3Rdg}A;n>+iWGEQ41;689!P2_y#Vp?Y)2bc6MM|b zqm5?^I;|HHLDsd;jd==Za&GBjgWer;=K?v$3cqUI{U;E~V_58a1VNV}dCrV;1@K?( z)c8O_MkbcI431^<@|e|WJSi7Iwp}=&_(I_Tn16xfWTDeR47js+qE-olE9k0Y!Gi-IPeJU)+$`#0J(e!TwzWXV-A&m4LJ zqzBb0U2(N2K2G($#YqtPxpxt7A#Q8i{1sdUuZhDHbUh~9!_;NI7nm*U=u22^sRT>> z&0h^097~)`&@!my0*chtI|*{Pn5m7{38LM6rB!5`Sr;3gl4JBtw9agBcMz5@A${ev z5KEp4+UMxO)<|Z3mi}d?-RqV{8n# z!Z><`rWk*v@!YhZbcPhi5sCU1q}K+(jDufux!Xn3i0?zhz1O* z@pFZpMGb>}U0i(=fQ9cj>NJS`cr&0GV>>cCR=fqeZ=h<4##&VrLC#m~2c zI%AgZQjScy0|b$zcg0q^(;ropNAa#aW-@~*#tA|!PmJz3nB_g?dw}3t#yvaVOW0hG zVP}gitpNU%N80iC<=^6UJSm!59|UrT!XHs&8&EQ)W&WSAn`jHdc+(N(ct^vV&af5#qR6vS3-|QK-Ih;GdoDxtSmjKxvM^bd36s4ySmJc!f4cOYCn zJP3CB`%o@gxzq)J`w-f>;zO{jzBji07{uag<4lsyP>j=rISuZBkNpJ7tgv`yLvT_F z=t(aM4E>B4ZUla7rw})N0pyG@E7G?4xe~(7I6ZN=!oP_lzR$y4ZmTbyOHU^kEYu~B z{VGo(z4P1{#|eQgl%D95mipQsV1rezJHF8W_?{O{5hvM%-G=FAw~4-s{5UV`ip(*;$l%2Zkeo4+Ir*sUxCDfw!6=W zpnVP4D)#@4pi6@T9$q8f#QC{b@}@?64%1NB(i8H^G@AtM!MQfKJIkju66!Nof@4mHtOX(1_vS z`9j>E(>5)6m3Vq7K$>4)504p3=jqZ^bl3GAA&6kJ&12(b2*R`cBQC!zVZ_v-)A-v& zxD~s*y-)(M*GB)i?(&Y_rgz&95@F`6Fvr5F*$*W|%X=h$@e0JTxG_I&jV)J%hK&&e zqG6@{bG;?Znqc>pK`1*1fNZp$5#^HN=%%fJE*5j9RUC!0H`@PiqMUlQ4juPc723^P zmlF7*F-{TY*37|ltiBqg%UHokR*Yi=WUU?ff4({-0%6}bGky`_|C%@{>sZ78>3}kh z72$-iSD|LMT@%1nzU{y`Lli0oII&6;V3Beo4xJ~%%IWKaxl6Vc+A!x#J$i!;^2D(Goyp*}mk`$=o-TCe z0=U?Qd4fJV2AeI+5gC@SHqyfeQ#JxLE4&iKeE5g}+Ro&%c%k$k2QUn5H*CUxE*P87 zSaDNGq(74;xc}yavv!nJ0%|!k1@=ji_u_RCkD2VFEr2Z$T$uyL23vuUX+>q6rJ#$g z(}-jW)EEP|26g4)cD5xh6m5aY9*}jCDukUpAu@iP55+Hg@asZhVgr zORJm<0uPEb%UnXzrlc!Q+b#b{j>@Q{&34aYsVkzr?Tj6Sk&s`xj-5p!c0J=&K9aE6 zs%dD(tneNIW<$OyDdNIC@+d5AhwT{8m*r7(+(xFSmV=lzHXu?)bj2&9z$}?6c^Fx4 zG&GVKUB;{J1Yw|^ro7D~jBTh)9(k~ke=)P9h+R&jl=*O3!w0vXlRVT;-#xir{ zI43IG$k7V>`^Eyj6&73ea%9w0l<62zSr2P&;gt+XOMxjF&&Nww0b~`exvVP=pOi;w zNar{gR0FttD12weLnXQn6V>>5GXGm9EPL56a$Ej@nmiH8N=v?3fD_^U?z&jK7TSz= z)Td{tV)7Iqv!Z0^Dh~=ll4Vsm74#{Jbb@6^3*)`Vzxw~qWs?m_D?G)g|Lwh(kZC9XAwT_w!b z!ZpeE*sl%R>@r=RlK3^3J^OT!^e! zr8OE&a*!~mYx7-A9Z!}bHnHza^L6K+i0B5|l;%x5DGGwa26#0W6`aj(Y7LM+$La zyY-^W4AD-t(8cj3%jo#~y!CYEkwbY|ta=A)QFnC5UVkBqOWKH;TRNRTT^Ov{<=n^@A6kj_|&)_`fElFAYxRDo5SNNWc=X#m zVSE7)Y)TzGM`8cX(JTt#rud=+(J|6OdH%)3BGYaeB7G%-&)Js6oJ$B>-cxGI<13M_ zHWa!YU3&R$kITR;!=et#{)e7`)Z%FUPudJPbnnX{NV{cvZ+v+LVT88eAMyQF1YJ&A z3~PY5uLf|#%=~b&3*P=3K<88`XIN`p2W??=V*ElqbPIu94436&_;iuv(0@?WTu;!P zb9q$#u{UlJ=EC93HjcZ|q}0zy@hXw<&BCfZ9=gQ@H>V15!>tfhW`P}lAvXMnDOT@X z#~vk$EJu%vmG2tn8E|M04Ib;VhM4=gCK6f zobmKzdiQM~0;OO(4UC<#;MkftP(h0|yBm+PQ<%%$oGub=w*Y{Gv}5(rq_DYUC5M@3 z5pPOBuGR_-QENN`ZH~0l;L!c>M?sw8R7MA0C3r0Vgm;QtV(llOVQ%<m zKbc2E!`>NJiuTB93$2zWYv%WUia(qIukLV0Irtd>^S~ZMOUP@4xN;`bAcj8+>4J~y zX=_Ir-xGTYbCkV03UP->C)$(@x|Vw`|9>ir+G`g=W`nOhZ6-BA`wRI;aum=NV_(dH z8jMA|WBN+~mLN7b$$o3smw_xnO}(+3Xh$h)Z(_RV=IX0Jv}XK=G{=Uo<^N+P!;N&d zNP_skL5^CNcmu*M0DTT?pfpI_{wAn%nW$a+jJKf8KjzUaGNjA@Ss;A_0}VRhB2gA2 ztxe<6{%?a=Ml&&W`duW^ZR)qvJ9%`L18j4m;e8M)7W=_dJ|IZN*3X?Cn}0~y1!N7D z)Uxd#0hzneat&7gF|?(RH4_rp=@S65N>9bb2vL1S)ZO27Ww)5cK~;&E7K$QA%>##h zM%aAI9Dmo(AzgNRhMkKHPZo#_wI+?1zJzemrB%TOdqVm=IfXq3_FV2$EiC?Unh5 z5~1X8ioc3*O3>^N&Ri7$d6Z3zp4ADuw4>PoXc<%{VK^F78LugrwJF2|Lo2Tf z;(ezaLl@=UlTyZf;UvJhRBW>zVYh}fge9fZ`RfC^aE12ioN*Q7@eP2oO3=RVKN~~4 z5~>(WzS$Hq6BRCoi}8a1Ct>TFoCip$5}ih0AOWk##~wR~}au0NdDBNq2wkHu?XPxgXjQ zHwZwULk36FwvI{{*KztEB1xAu5ss<1gRnI8ltWy;J+zC0H{c!yh@XUHb+6^)Mms>8 zlga54_$C4zfhABOJ`?REsu=ij|FP1JIl|2}J&Jfv3E?qxb_j9VPQ=No0jr|D`$1aj zjKs-@#_l`+Pb&0W_K_u|S})TV;_&`?D%DIJ!v{bjgejG=U=Tr?N*kO17Ze7rtc=bf z1f7rEtY|BAk04@lX)kUbO4x<1LIJ?joWlTISQZIQarAI#S8G{k)Qo_#!hl-|vbaf# zor?uK6Ag*7tEt^3PiL~u)%MsG+C7T(3Wecit5tW;!2N17LR8iep$S)$x@ADS+>q4j2pN0^r3qo*D4SNJhf1hcuMuYGA4m0_krM@E;z{z3 zFKY8BC=A%ZS5E<;G0g2@#!=Ycqw}E{FHQBI_>qZM6sFNFxRyIZzlxx=>85^MHVw++ zVBSs+m#^3d$VrL6LOj1OfD6e;h$q$kpv_uxN75D>3vrApI$<0y+AV>*opHKIS3j5C z@bJnMv-AOA&SgfAs0@W1j@ia(u$0L-U-(HG-C2LqW69fDEj9qaN) zI*rrD^{IwDA(R8C&^{AjUTU;T&}L*nzPMcMy&;?genyqWx;i3L?TTjY(}a-=L>z>SsKK`0le{f)71 z2ZV;Xs<>1^l~?A~uXI9~eXhasz*QHt*>6B0g5?sIiiGj)psiwqB0);tO3Qpw;cT#A zMS&#@vRbCwamy;ANkQViKkwX_*F!jy0JG)aM3N#ez$`b5APlfAL8aTyol$646Kj2T z$MU_dWy#_C!%38P%!vy`S_0`RG0w_+K`gTg9a51#Ccw>!jTJg;4h-hzkaStPZ*;#P zr(1zlRP$jui7|4la(GU%S&4l`I!XFAW>LlX{eK{eps{Aa5yUblZLH%OqP_%7Ow;@5 zd3lT(JvxcLSb({x=79Noky!)Q{bNn|*Z&y|p-q|)Pbf(D_RYRGj)pQ*Qs?3VGFChW z$Q(>cs)GM30j%>l&>40tG5UHlo7KN$bmne2CLYKCW@uj;WI-gEFSsNwI)Px;_8vZ3 zY27Bo+)5;AseIsxVCG=7+`q<#Cjrpx&@SObPlR(%PIylL8#HZ2Cs02s>@RWJjX0M( z8NjT__1J2sK*CDJl=$XUf@XzR8T1NQ@iY*#!tC3~<_y1v!kl1vs`6{k0C4WyIP8tv zM0-x^^N*z#x7L~YcWgS@D(o#lJx32dJS~DcV2G!5&!1(A#k?Is9)f(auR#)fl&&o2O9I8!wfp3B9rB`TR!-QYVo}4ib=Mz_tCDCpv2a?d=P$ zWyCmLkYz)o)LLTS3qTMuFHyxh7ZFB?H65{@g20blQHZk@_DAge*n-4;LYxz-u424! zDKwQKGiAo`{+B_jR%Ob2xd<1_1~C{FJ|(Pg=i12nEscmgIdxbO z*AsTB28`gz`@2gt4Y)fN|GU$fT!Vjif-V4c#RCQDM)Sm*K+TW_5}n0Q5#(mt&UX7E zk06CjaGJ zK;wfJ>Nkt%O9X$jOtwos0p#*a8%Tw%Da1+a-BaMJG67j_a6ozNQ;_Ojx|Dp82&afu z3UgOmv_9kiTjnxpJSc(&-qY9;oBfNhYY1EL`=5n!#<&mC2sCXWhy{T@N6&+(L{AB| zAQ;yRvBe9}E_D;Tje@oVe|<4e3&HT9b(|+8TUqIBs!2WwHGs}|i9g&%TWH64SR&2c zUIy%|XWgRbs*NB^p`62#rr6^Z0B>^oTr)U9;Sw-UfW+6YnW&fL>4-NVT*7+hdK&En zA(mt{bJQkpLL!_d_!|5+Vdtj{-<*sHaJg%nwdD~_J{4Hwzd&IF2#%ZXeg{|~H5P3FeM|_J2n6K;Z zQ0b2N021cq+Pm^Y$3q#}9p8v>Nowd}+-m&DAJQ}RX54WfL&7_+53+7O_mey}7pa&d z{`eFCR%zw5YQ2?n&}Sgz#M?M=p2B7l(+LbR8slz3&Tm>$WIQ5(4qQDU3ZD~nlk(h= zj&6Px>S`!s`+e{ij$b)3ZdcHa3%__=q@ZRV6z24qzA4~B0es%lgwKxdnDZs1Wh*@+ zW5f<$1RpFWoPjFtEz#}L?*DCh z{#R$_t)mCBFz&J9Lxr7cD{evlEz%{M+S0{RjH&gE6~MB!h%R{A#OXqzmT4U?SmP^4 zfGW3zBcfAQgvbVAU47=5pcronbY8q?8>g?7lN0HM7l?F`nP?h&aNG2(AOteHBHmNb z*~x4TQ>2yi=vZXR5pyiF3XnMHW{W&QgfqkTFg_M(hKf9@z2I8Ca@VzL9%n2^(>IAk zMvO~w|7s5FU6eN!wg8eQgO)~lKN6a?_CfLZnowky?s1~#UMu^Y;cHigT`)Crj3VN^ z0CH_HNVdoSi6&=xs9C5mg&w$7e4D}YKDhTfz%D>#?&9WkfvzqbVa`m~Ajhohv=-cm zDr7yUHK1RNTc4oiQO5aKb@mq3y8xI+lq5`o zyZ%oE9zTm01emGv%;$cbu$w>Xd-D3M-JzXT8ILV8n4loJU!W_sttqY_31!Y@0Kv!Csf9x{m-??%3*hw-(B2;Upt=nF8tNAjdN4x_4YE z&@%Gm7b{E!$?~adD@qwrHVw%6@bWP-cu|1k@V-SdYOL5NPe_YRW`!3^2=}}+@3-vh zI0!%{R3N_zu?W}+W8p&L>+KJgwS7IZwYz8p&hnMVy$*mtAhqM-eg$1TIbHtbAV?>L zYiFNyo^dc3&6LGq+*{`_%gSOzylz`EpbU!GWHmv#oXM>j-0dz1 zS)@xXD#FAJU}@2WKVKUUy z?I4VZ@g!b|-8&&H|Atlu&mPf;#2uog3yKP;sEPsI1kG_;5HkOk3o^&mC>3Y-JXSvo)-J*%v)6#7&JAGnUJz^a5z4p?h9DmvA-?(CRo8ZH#jd12ea^ z9C^mjKS7(RvB|qwZ|o*WJyxZw5#co0^WZq_|1ove;dND68^>J-7Vc2Ag|@V`*g%>l zZQ7;@OqMf1p-IYaVw5KCCoFI%ZUSdfSB!N9oO%wl z8Tyww9Kx}&@bx0?RdM!2o8{!L>j(%ZS%$upt2swPA@gE#rR6Lk=aJ2<;c@;^8BP>d z?hz(EZ3wXSXuB~h7_S|Z`KwjeKI2%hdjQUF()*Yfi*)w-c@%~kwc2qGLMqlP&%sUh zTBX}{*YTOxx<1zXJ@JSbrwp^#o$=HOnU8#~Nfy_aI}wKZGI~VplR9gs#kMEGrXAHl z>>@<^o6&9EM{b9hBlGcxu#-nSQcJr3WSF#lu?}H?(R^s(Q(#G`tZ~JO0tDFFmWy>y z#qA=H4jZ>+xWp&U5>FISkoXy=Lpjtu8&B;r5mt;##rk7n$tBlQCY%9B;4Bqlw%o*; zyk-2rnE;2%W_7Zk@w+J3ES8J0+dqKL+*FwYsyi#gOikP3bYX(dV=m+~xgB+qY%Ps8 zI2+DYXCe#Gffy~qa%}2~0U=M8^k5PTQlGuvBc3vtP!pfXX>nHY3=6uC>zo7QG)?a$ zOCw5ng!V(eD`9LWHphR3$uieDct`nMfaBk2Yt&gm5<45P*I$6!NlofnEN~%2+Q?AB zntPGI%t_TTmb}=tz;8wmR=T{r4U ztg>$=l8jHQiakpKke;rlI9~*bl|J(5OYLubMZ6+sI!dP2yYsQaWsr`&p-ZQAXNho8 zcWTZrZh6iPAybbQNyIjyAyT*k>-l;j@pbk$<^sVVodMf9Uz*U{OxAn)=N6MT83rJKQ^7 zm;H_5D^~8?Tn|BP-3$nu+<@B=u-hi(_nXByG_2(?Lz{b}y|}ESekcgL1B<$ytoYL^xMmO2|j^{b1tBB`Te> z9?1T!rB{o&!j53m5u3$p{{%XMWdD`$Y94}e1el1Rd+&KT^Hb!*lyZN&^<`qK%>`zm$D}c78tt?Xs_= zU+A1;1p(xgW6b(PZlqaT5##=a)A>gGm&wONuiD@K4DbuQhMOoGbQ6Cm0Rqs&STp5z zHD|I-Z;&iAFBR)F)28WV;+8Z38QzJ}uk*L1hf+;j9Q+2@1u}ld_&DUv?CXXusVT&X zZ^1ZiMK-H?<604(rV!MaZ#o%T3nUpA%zopuWfs z=!x%2ObfP*5X(Qmy$o4e$e-juKnG8EGttbnoC4G&}s^I zwf_KJ!3Iq>(nM|-oAy%l{hG;sEo!jNCHuSImpo}^p#8m5f54NoQ{LXyhrwnMX^~nV zeBB>m$e?WEZShm~Z=H7Gi*eP@SpYiTrQ;JpSuu3dwq6oieW|}``U$C}DO9RJ_U6V<0Z(K)+HT$+1!fpmfK!4kCx z-n5;x#IN{r*7~`NK#5d}ab0AJ*&7S}FAImBFVTOW{09{A_cKA#4s85G1hpY~y>R-U z0GACFHO>~rd@@JECeNE9O zY^e+`K6%p?&Qf7-o|L~N!XG7bgv%ENJ7JR<3f>iT7|9hC=@b357@Wm{DwaCJQy0%Z z8a;eSTvvhY!10lnnK5D6?9<7V65+ozqsD#ojBz4CYlzil6_@UZ_)3Y##oYm0(C(a**|afJMrL zj9CqriqiklaBb%)9sWl&8n{T>E`lVLGae>?Xi0@Jp%?Y+=trw$cs6%)WB96Iw^HhJ zX33&Pv;{tnlZuZ7-N1R>n(H(g$0}EcbT`)66Augd3r4XxX^rg9%Jz7EO%Q!WS$$Nm zg_HKz#suspxt%w}p4?w3t_@{TSF@2A_X*=2OZ2hSI(ExI+YxfRxYaWw*yF|cOLXVZ z>0K9`c1kSRnG91Wwq378Ca*~7Y<=zZA)WMERAhb=bkN;g;bir;@CGmz(%@AG4O?r9 zwnrvwsEx&RF-}8bxca@)hpFyG(pN09A>I(jux+A6PJbbtmv}=Mp;ERacg2lBj#&K- zwht1?PZ!PiTW=Z556Ql-mf4>dga<{+i43)O^^KuC8nR8@$%B-Mm5LZA-sM-{5U9&~@J*|kA&4JNSDA)cb zY}VxCQ4y9QZ%pgtc;#W)SDf5ocAtl`L}{pLiUqgCNdnj|oUP@=R>E#{>CRd)9!mXC z=h3|Dh%2|k4~f*lXiq#PL_tm55s1 zNsk9aILjC|iB(1dUFjH;(lzI%C6YxD%4e!m-6F};i?_$z(*L=3u5`-cY}_p}6%1QI z2xjR~FiuROTNIa-2#$>^0(~cVQh8X9cJ!Bay%E!Cn$Ug?L z;@Gjc9nDnj?Z!PK9BM6>jAZBQLs2gFsk_JY?K2O&KiVpj2KS3{>e;wdmp*z2drBr` zdhL23Zru^eF<~;TbyiIMJ4D)KnM9yOg0FK#Iw)+{%2wMQV*DK^`J=XsaXXnn2NgY9 zYVM^XoJS6S^NFtI#yi6~&Ac79uaKj%2Aan3CCVUOn3GY=x~BvuqKkUVIR16r@6=-N zs&X(wL&GKx9FN;cs!X)8uM$BY&b+H9?n*&&r~IvwSC+MHQ|SI0V$jn~_%(qOrj z)jkQg^HI(9HSRWcMvpiuPFqKuBBx7+8>E^%#IKY6HB;mGdo%#;v8$`H12qO zF6=%m->S)>jM)@pr5VsJRu;ZmtuMPPjEjT{lWa@p<1~>Zic^w4CLwR_miggEZ1P?e zZz1e%KeYD|BZtS6f(U5L$f(#8rv*eVlVM_xC<=x-4J+Nfvj5Sx(LIJZa&IV?WEGXI zJ-*l{BRG_1O?1n?5YDuwfL*$ke$;+&77u$k*sY_bVPrc8>*y1+&;-A?sH}s)s)r>1 zYy+f4R%m9Q;)O)cY0mi99&4TV>#U=<#HR;sTD_N&DLp#Q5KM5JtE!n3s z8(xf4L^yP5)3Y}|rWMBVPu2aPXM~XvE5O#6zCVC!&OvJIB{v@R9-|U|sIY_LjS?+Y zt`{@dt~wL7CJr%6-V$4`y|G>!-Ymm{tmi1O7sL@!IZbcncHGX#q^_1ujjfM~N<~bg z(!&+>X3vYYU}z!qz#o=~@p|?)w(r1)t93ae)-s(yCpoz%!~Tbuv}#i+&|lNwzI-<< z)qnI*ZCIAbyCTye8f~2&c=uLA!Qi56LiHG`gk2No*JuIuy5qW+_eW8-EytYhN}Wr0M4m zhjyZ8vg4tng1ban1QSuw*x?9Z+Q#*SL88kwQKbFs%GBa0Xdzh))pQs z7IbWWBq+8#5r&lYbu`5MlW>#d)?T*7E;<=t$<__t-ajS#y01HV*Lx>1_SDxcrFLr6 zX;5i9o5k!$M`Oo`vo}8-!!Cv>=aQg$<1Aq(bOMvCO-=`rton*+ac}A#HezH9Jp-O3 z%1}rA{TzVHi0216Z!H(Wzp8w3%efAMhfw3`^ZW-(+tzqT0P&0+5_8VSX?c`MIjEn5 z0GPB?WzXTr3&4(p{INWkbs+@BfwBgj>)Ar~&g`u-eh{|z2?f?evGzp}Bq>o#IZwc{ z;qtq$PFyd}6jE18!xDaq&wsT+M7c#$5^~Tolb(;|>AGN>gQveuK;G zyCGeKZzJq{qJ<;vfZEGpTqNCCT9RtaM^`{OC0JG@Ep+pYSeIOx(Rv<;Ie5|yzAPFk zj;o4=uf|DX>7Dl7=HgDv1eMT%Xt@T$d1kC;sz#89h;!sD5!oEBG$eUj4UZDQ z?=F4un4DP=qiE;-o0V_1_vC%)$pVghSETl7`X8%>_Q>7Fe`%|wNg=Jm?J(}tx*BA3 zOT*6TcV^x={l>a{oG*qnOf8Gs?!xK%G?4?sxaaN+V>hHXZoDV^*oPbOofv;XPhNS|XJ53R)O*HYx#W?+w=yf-~&z`qoBs=7R%yS3pLl(+&#W?7EVgFn_`5;)8 zS(E>F1uQKY;L)X833tnfvTs`S;_Ab~X}!|Xi~MZ#2poyv3ZL$4?I}o#QNE0u<#v_R z_7=vQV(WQOX|ZIt?h5hzW6*WfCU&`5k@MV$Qv zKAgbugVu?be=-ZDQH#5!o&u+>g@q%h=Ou#s0`y|fh;Y0Zq$tJ@!p=vHo~xq|n)(cs zlR^iD)|NHgH+&WvVIUZGv|^s1qiUe9EX1cGsQbOiIKBJxAcvb=`{5?VrJ`JRsG%0ANl zuXc9zei!A$*0S}88Q53-DXYgA^%`z!3&xf5aqR1v&vuMLagpIo2sdd?>nnuFgOY0YAsDUMW?)mOxjw{RyFk=8RqfRK13m&){D`d4CW^s=}4D=oL=G~#Iy z77iC|cuhPue8*q2L>ONaV}$%ada0_bzm}MORWt6|yqg883(37P{5=R{R8<*|%IQj( z?Dcv-dJY< zhW7_WIGTn6rc0V){Esk*N-7xV$nDD88aw{vKl+*EH^`l)PHV-E*!|~>Y3Li!s^CO1 z#7x&nRs3qck#?Dj@xNa{PJFI`x}fVfm;D#ciRWH3tB>~B;8&=$rlBx|b^;yRsUj_h zaZIO<`VE-Y6b$*YJ74yD<^g@~DEnXLfx5uDHs**S1ijirx3$b4hODL~`FsBaB7kPp z_7+}n>%ncuWWvyy+8?N02tLcd8s7tAoO->NBWmS^p`6fKRBK-lB;1x?zCOd z8?n*57{uTZp&jyMQPD5f(fCepT(UR}LgnEb4nF1yIT#GlQUR7;0)jYZ>Wci5*~jBs zi!zCJ|57j%r>+deeoJS6=cH)GA-$NvipVA{;$WB>y9_K{#`Nl#D(B#lq;4>4f1cIC zKFh*88CV3Nhj@262$C`H3U=6+$L%u2H*HC~)m0+>1scp$kB?V`u&m0s`~SnrKvJqn zM>MShuoR|uC#rI1i*XZToNS1%NtFc>;tYD^4xsYni;{!!v4+^B3!u{7k#UK-?iYJ zhg?1}#B{S5`=Y68xy_Pv?TkyBXD;6rw~Nc}n`!qmImPXaE!N==gv`B%I7x079aowu z%|zq!NMP z|HL6fR>v~Z)xROoeazOQ;#(m;Zek&bzTA9uXun zV}``Rqj09f1j8U}6k?wvk{mYY(6KySz-eQ0LfP8A0@UE^xp%Tzcg=~;eOdH>=RB)XJ~A)J;>6;@H#?>uf#YwHtyot9e@@t zIzf#sTvz#ONBcOFrAnKSEvq68*PB28#oZJOnXy@$zNz9+RTjJU>2S7C_XDDBH z9MDm;(2eO_V4Nt#gUSiFumzBYrqpxB+dY=Cb@vL~7TY8lIEtkz{U_!(Vn4YZDATwC z#scD0QTETKCmN`~On{(}rdGv}i8x(IQVQYdnX0x}RrUdg%Z#4QlOZgd@r;)D2s(}& zFQsdHbW(LjaVC`*syt^Z6nUYiBZ)Ntmrg?;9sTvfu1qr-7*o?t4Yjb2r>wIx?iA!B zEMBSl#Q&y&kuTxrdt+D~D9tHTPHjYAC5ns<*(_G8&;FH;sBCdIi*jBQP4}ci-YU`w zn;Dj;iT`w-Fw!wD=t07lMrJNS|L~POUJ;C71l=!&^Q zB%-`NZkE$oV}92W9}7EC#aM!4wbqiXk(S*TF6&xYdL3XE$%ieAUroVE~*Oy+a<- zY3}W7hz>!dR#_jX%V}xXChx=d#+#y?WHfBslO6733UHQsWluaPM0q@RITM^F+)j3D zEoTfLG=o(w#z%PzPN$7Ql(Op-k&2E!?BZ6Ss+YE5R&O&v6SFh66>y;3ok4A(9qd%f z?0$h6YgLKTY+Z(4nnV#EFBbR4S{+#sXpE1I9fci4u5%VoK2xo_pqx?Z&?KrkHR2pI zDtR1K2|EwDymVuRc0&*eC$e0&6L6q1Dx#CPgL2TuSU zacwWy!DGCOHvL+On4)8!nA?XB%WN9Ga@EWXQU_v+AgO6(<1!8sNL3egE4!6r4GoH4 z^}{=rI?~jM)wQ#soDLrA<^`4mGn~A^v(-Uh!tIqB+cyHPc<2!7O(157EeAt7<+>5Y zjW`h$d|73DB&R=Mxzu14>^}~LqzdW0pfyGv4sy22Te@P^BY?!mjlf(Sd87gLqB%w% z1#sfacnFy8H%=DiZ>y@R;sZgaq?eL!i6xH7K9Rvc^&o4Os60B~GEBVjvG^iN>_2kf z@i+j9pH>^!&B5u!>kx>=8=3n_B%XO#q%$@<0pMb7&-H6+blr(CPC^yta0>CA2*R6K z6C0g`b8uOSq5xG*dzTYMy9BT_REYe^{$fH|Tqh@z(k5qX{3}I%%y6kw`H#F~TuMp= z8wm1N(w?!eTxn&-4hD%RMw>V%tFD!DjfGFkJn1S(ze>05OgJhm%gyMNn{2Yb$`xAy z3xWwP!-|qU^&eRXY%Z_}q}}~4&e3=-&~O%nJGjJ7maM-N)^sd2SY% z*We75jpxBB^T}%@3ta#ryI2*M_J6scg~qdlm=qFL8L}>qs|3i1cJ(&65VxhF@i0-J z*jJpZ%``fy_bviDtbs0G;F)qUIL$7{f8LiLc?l#D^vbU0xdQG4YwF@kA?G@IOg(ND zkyZWpyEOCCz~*}~CSDHVIN2HLh(=*TM2tKke5w$UaR}BHR|$|Cu8_p?SK@YV z*kkGFR-@iRoZAHpxnuBH=Cov2he z*u48#&{?hQi~k5YH2PX*u4~POa8}t7(_Eyg#4v%BRh=#|1R<3_47OhbB5lJ*#nW;+ zv^rj)#rE(GS!mKS;oJRhgdw$$T}%VW-3)MAw%#^w60$7P#aS`l7KitK>6k5k3xJrm z9vXYeX>qd(L^DO$$>u;+)p_Y#OFoz&Ma6AkLh7W-#)Y>792MH?%*puR#Jk|sy(7j4 z@6JB1W)m>hxF_>Z=-$z}R-7}-Ng0FA4pFu9Vlb$CFtV*>E=YC>%Met&gka^xulGOSkK{e+`X9WM(IWozl>t@uM6DXOf9MW4++ z9nYL*W5L1NtbK=@MIf+T@^st-CEpATvF&sIlQmI8Ps|qbx7@kd zvO@sX!g0a6l3LA;ho~nKJF`{;lGP0HEz9zlM`^!*RTZrMKGJQyL?^f8vGeI1r|(ZEu_<_+8yLIdMFE&o_CZW0@fjyK)}IZ@d6 z(HTv|+fa^pqUy{N?|?~UgZhnC1^o4>k+Es|7t1=PobP7eq4rdW(}bO$vf8RR={-ZX z)1(sF{63IGR!psrf6MJ1Ax_Suy<`3D_~PIEVNo|FSCbe20K#dTpqbk255bOO0_&XL z1=9-Kt*z;sK7w%eWPwEk*PEhJiD35=>?!-Q7_vc{e_^)PQi*ddF7>rjMUFw8KQnBvIM{%Zx(?6hF9BpI#0_V+Wj%KPUx0B3@^0EIf{ z+l*+y22x9`@*RZ5MmCdE{3)U=>1lM#{}N2|-N}nS63$!VELrJfVZW&Rdl=W2y0%#P zKOl;{n|^M%+)jh`c#5&W4-n3H69#X2)t-skKui+tF!$%(Y*b>76hn74y*#dxGp&q6 z<2@nE7|RreSn|j0Q#Ryj?J-P@^S}xsZYp`8`_S3foni?4#rxw0Io-420JxL7jYfzHT?`MAlDwOmy;wPKv^woWc6 zC)hF5-z3K;)bus~n}w`;#fnN6WdAPKVw^VbetPKLMUaCj<*~!BINf=nqu9dUHob6* zNHTypI%1{Y0G7u%Bs=;Kpv#nwpu45Dk%#BRr8R+mnNo>sL=glUmF;nx070-VqlwY< zKY>mU);4Ghi!U%L`Jac}v@rQp&{3qS*Twi;9O3oNWHrCaLI&MhY3v~29LS~u8b#FJ z7ercIoMvJpe-W@#xJ#e3C)k=?9Lf?zIY6^r9pB6qYs0=An;=?3^8v{R`X`+{Bo;nQ z|8ogwi)Yi3o5eay0!gR#TH{v%r-IIohv$|8J7eWJh9eqo+$+w0Mh-^>a*L(GuFG`A zvguzZh9DEAtdEw-{%){c#dVg4u{5hu?Q4rCMWpSiJYE-a&O7Ga+1SFM`TL4cj#5=J z{`NPpv(QG@K78fu3!ThKSA0KF)XhHjDL#@o~P~h#;{X{-l7z<9rx7#up`~g%O!5 zwB%Y@c#~;9662quEc|XHPeUxm7Hh*<5*=K`iDAOTDJu%E%k6x&W3n_KZ;5ccOrG_^ z;8$XhG^;r}$XC|^rJVs2y8&NYIAUE`N5Q#kYqYPIh0@f6j(;)US|5hYp*lpaH^}}* z;Zt%Lx*?23qqT|@97k;gj*!iDHW^ z*etv`&^h6COM1&PF>9mcjh--#tU*V;V=b5nk1LZoQH`mp9#BL+t92r$7AGZoyAUuwx zU^g2HLxCll8e;`4of)i3w=cm4+Qxxf^RKgl9W4s`P7yAY(k!MBQ$}U}YISiX%0_4Y zva@)CUt*h#&NZS#roAZWQKB6U`rn*D^^MK^(S|iyN&GFmEwshWZb}#PTY8{_MLL7@ zTAIB6Z95o?1#MRZqa8?bjv_}bWFN3Z=phB(so5SME?(kH29o1M`6E`rbk-;C05Ldh z^$x1QohKH7Cgz(z67V-=MVZ9hXh#SaNb&^79>PvlWT}^zJ z^6V$%;^x0+!6av3|J*4HhT}9DC7!r5%wLSUvsWkyA>RY_kE7(8DxGNjPmCr3_r)<~ zIO#NaH8LI=hugJb7v6PVV?5AF=L$e~Y$VLzDi9_gn+aNCwLM+?7qG#0Nd=U1&xSiT zn{N=|s4=&Yr^Pl_!nhDQjpm?9HyW-L>7e*7*|NS@lq0WJsjfT$?4T2K+p)C>d&ue1 z;KVAh^RKO5?HcLOm3hV9`o~c=qqRu!(m{@!l##m1$8wvtQ6&;#Xa{FN?zG@gu!=n= zg9p=y@pr8=N|ZDWSOsBE311W@n%af@#tfo)c~~Q)nF2x z+G4yemb6e6+T++pf1x!tm3hUyA%)t7g3-7pLn;#L0`c!=pc9@LTBjB7(gGzxa43mU zW+BI4jSj+p1zpX`wSf@7wL+-M-WxmYpLrd}!tuBqLb`F3|&*=8r^gEa-CYX2fRG`T;Rc3r4v)>gGkNUMP}Um{o{EA8zu+8FzDR zI1`Z8DGk#zOBlVT#+cZjg~d#!Rj2B^iE=`<3y-BvrUdgvTFN=i9b#Zs7VvcG-aIah z)Th_S4g;Cr7V2?N>?*>EDC&;PQnSIXGL9jcZ!U09=8G$11JNpMIqc6i-r<5471MSd zx1oP>h(A%q(=c(mu>D|FinMcd$fVG|~al%@LGEl0IS2`Szj!*g!+X=cX zw;2_03ZjMtBYODhZy<=!<1=;(U8*y8Dp zOOLk)-EG0n!e@e15X=EHyFC%$jA8qLwds)}oF4o)#(gJ&Jz?V1y|tLCtF3Wz=8c=5 zow3R(VAVi0kybr5b)y0~OKyK&o*q3(*zcSU?Mjv^z)+lxXFxjT)G^)>jF-iz8&Tc; zKLMiX<9&qkGjThW{ar2XG4vnyDD|*-S^$sDon7&Na;Gf}GY4cDtGj3U|2o8brrc@% zsn^(MI6EWgGf34BA}n6+-Nw1+WdEb?M(?)Wxh0~NjhQH%2jRRieq+Z<`v;eZBux!0 z$A6cbw(`;u(R@Bmm4#NshX}xzO^*vxUMHLJoPaaS9!XbxD(ro!(xY+;;MfcJ!vgf{ zK=FeZ!q<#EPP!P7c4DbHz)MA?r7e3SYh03f;nD;Bbz|&zse|AomaRGg$B@?*vCg

1IoaV{Q10NSe7G1NL8jQNvKu?B6QNa1lGrQD2;bQke zSwIue=WD}W6ogZV3MC3=B@ajtksMJXIAo-HY`d0wh1 ze8(ej?sk2!?LD^@=Gp6qn6z+t7UN=ZP9O7b%qkuED1_73A*(kJK9+f?$3{_G{8xkv zS9@qYbXtrR9*1+nHT2R}-w;EEV3(3Uto8|z1EnFi#X?VkZ4QQWA9Xd1)(pgK3f-W; z9275g@H7zzV%f))YPp{YyZ(>siOrt@IRn!$@W@lMBB%uHDDpg`0Mg~UaGdk3|5*RS z!!C^DpYtE+V#IIH<4#M>=R>Ez1mk*z3RArHGLRUt&dCFU(V#jTpu-M`8Vvh z6(z(Gayur*w7z&y*y$_y^;Bk!Z^Jou-s4H`2o&CdvKXuC;uayNrFL3P{37TuXX@6} zqVIx9TMuV=2gvQVnrL$-r!GIe2TA`lY)s5}A14LCQPx1L@Na+v+EoV|`+Wc=R%&Fi zBc2oDU-dxh%=h}p|FZSVX7}+zL|VqE{<7STl*LJENacsr1BVV#`7s{ShNgi&dPKNL zSh{NypJceXl|JBJVTZ~t1!k+B{xl1Nmy(j+`y)~K zQO89kF2+uuL6Aq3k%{6rA;h7@d@S=hz}cvzo^&PF_|FyT0;^_clbh{WQ~VOn2}A9$ z5Vs1a%|dp$dCXLdB$>Qi^7~gHWQBR)SngZglttaNSm--DtEa>#ayo>%9J_4;@t^Nu z9EZ;GW8MD)6UJ07kUsk(Zr6^j$Hc2b-kT`<%iaV7!jeDnkL%6IEr&WM5mhl9ok7vSC1#m&w?zcy6yH0(4{t2H|v;m9aW+jUT6dD zasIFPazS;s&c^)ZPQO8r0Vxi&#)#iR6oP6_%n?YdN~=yZt`p$`@UoO7bf z)KnIFwNFJlE$qZFLGO)^{>=Pg`YV@bG~`tYkq%HB1&Mux;R}pTN@aR&OqSEZNVi@) zH?u^!bhsj_!}SBjIF+m+sa8k-<*ObHP?PIr7cBXa1(eu+p)8nYsgC8NSBwjY1B&=o z&}GgQh+@ny{oUHaHhl`&f711srbY0b=2x5jEX;aEQMsnn#>sLfHJi!PxWxdDr-9M; zi4tUX8@~z?5V_UCj{4~f7Kd^T8$CMemjJmklup!$ZddX4#oYH1p|r;8n03UGV5hHw zJ?CcDTi-2}eNZzw)>;~5AvWvwR9qs$UtriGF)X$7G8x4LyIp#1Wujaf7$xACCR)Te zX1&=OM+*;@a9=)|;*44r&aDKa4v7idZr?I~TmSg$CZ0f!1DDG%+fds?=L%V9+&rTO z(L6pAm*$oB8b_=Ic2+tP^L@vPz(W)B3N38*2(S~)R;(-)zExsc`q)e6okTGf=cZf7 zMXP`;jGlSdBsUm-Ts8ABeiBbAfGAlKem4-0tZNVBrp0S=x-@wxr!P@$-fBHKdc$&h z#tqlU?W|LG718TrkPLTGdSd4d04}NtI?~%;*nNkd%hFy;A^JrUUU$nttg<0)XKx%^ zziSCvj!ec`wfDr;qS6{@J3w>ASO#>KoC+kjwO;v)KMv#ga<7f>M?6}tz9=`TOl-lu zEZ`uEg;|`##;ZeM(!POd&N>?d5nfGwl*{RQG9B};LJEe(R163>v%5;+?FeClWz1&F zK(cAw#NJzrth~Muu=n;H+x*XM3MP4pxs=W9-k<%)lQzfgK>~Y*yu783F&`^#VK0o= zagf|3L+4BJs(@w2`i+sa$ZHl*j;exc6^{+Ie~e`g#FoPVE)xdoM6Wg$HJdR`QvbqYj)C; zM2vtSPzGF{vZWTLxS5L?dem(7k^Dbc)tO)9uy@Yw0I^Bs$;D4{yPNszAvsNL)lm*x z6MU@x#Ji)x&NCB!)_r=4v9JxK^IVwKg1P{A3dTT@XUyC#Dkl+jAnym|cJ&xf#&#GB zOiM`i)YOhh+7{bFTLMYjDaJ4{loJCC<3j20rrf-%X|@M#mj&Be!|Q*7PNZHL#E1-4 zsIWccVE3AA@9|cQID+leJ@7qt05})rQkErS7nCUaE8YO_m~xohj;)w%Lnd>@70Y2QV{BxX=Gh)?!lb{C05+Ly_qJSw znj<$c*Hy+^Q*c@&ZD^h{;n`1&vmx_u&4P|5dApIEwUsE=gh|^zLX_o*aeI=oeRURY zavh>Kt}8L<>1(IxRxy%L19~n?P6ebgE$Gq4MEzvT8i&ictpyvpz#^PpE!-*Or|Ys0 zrBl}_Ge3=S(DclojD8@14QIeOBlTUH&UeK&yFt;uwQW+2YlU10*}Nt05$TMf5;}6r z_)>%ttsQ_Hch5f2dcD{rgCpDR0cinNQgMzDOpBhRF$NamMsdzX8Chd-^}HC0q-K1~ zFMVI#SRU){$v;HMSx;}QFW?ep541hDP5;ycF%B1SS}{(=#QAs;X|-baKe6N(hwKIE z|qcWJ|q+d=`|1Qtc=eoqnWBu>$NmK)l^WL#&e@o*;vE~ouz z*D-$W0y_=UaxH2^=XArkUT`v=8uK1@0Ho^0VYl*&i+fUd*5UP>k1p1i1=6fnnVRGD znJ{ioxjw0E=i@d}j%io!P7u>4Vg~y??F{kZx8jhUHpLe1&jMj%Zl>Bs)hsC2;+o_k ztKCI-0yBIBhf<)y`jrkb=ZnL~qepUj7YiSl`Jfhg5L&s!<*_*4E34x_i6b4l0si$t zxDnsPs(9^SoX!`kjKti)N1`mzelCc_@Ix{$bUDc`>|{|CMz!qGUMb)#jHh0A#6u!1 z7;2ZQLFeHx4w4O$&iF>ynWQwgIs)VZVj!hA()q>WM`oY2^Hg)JE9_Y6*mF z2OH>DfAK^$D#q8PP&D_9>yP3iN6Zs~dma6kFzEo|pc0|&y^g3k1|RM|%IQ_FD#2+? z{iYt9B>_ztWKSP8M;vly9FMEzcD5_EkF(!#U<;dZLwX|djVQe5I31cWC-dIT+gEX% zu;o~HK%erQJ3eD3OJl8*hgD!4Bvp_D2%cI#0m^yYW^_C$avyN5d zrWiSW>WZN!0i4xoNUl}T@@9XPGP_y?AI?V6^bNTkXuX~#{^4Y>Q>uj#GrHYRfg$L| zfyVY|6mVE9q)_oVF8wz#Vs^5C1FK`17554cCZsXmP5rX>#}3u0*?;wgPW$*oe;_tJ17kf#xD75{ZYzs{XQJ{P%NlJLl*W_Uax)P82!fg(WVQ6{F`ou;b@qjbw9# z7=MIP8Iz6nIHTl=R*2Q(SpIxGkwNBM15p%k^_apfODvgOkOh%!R1;6XD3ZkvLcAxp z6}boD3a^*i1G=Q&a7G6_3BqDjL6eN6H50_tFh?9TKLjRx_8 zfD@z3Tgv~ZqS7j(^=OQ`F(Von$ck}|2&V#(v@*r!(ziGZLT?r_bJq}j%T2HjUYj1O z<&TSU@MYSLjFWGMaOOG3;F0EAvXGkTnbmR#f1_JrE%%wMJ4XsSVBXY99(r2sHmJd3 zA2iFo-R+Q0s`Mu3&85uFyEFSXxp~Y2;xSRK%9T^%Z6SZeOoKTJ?QEU9;HVW7s^SDW z(?Y>Mq^4W5@P1RN(bKnw@n2ezR4I{T94?#(5tfX!T$)1vp| z%hKgR^w{(PprcI<#}s4n2eW{ReHg!{O#Us(`C+>adni{u1TolSVd97rWlp*wbQj8 zgQi5WQ$UfwDuh%94~c#kc0y%x6;;gE$6*{j?-KHnCYZN~v@q**KE2EnU?iQ>Ks`gi zU+4<Mj=~D{S2(@bzA_PbT#|p(fxss@NQD ziF-vj4O37JNFbkLmvx6?I<36!Nz`%EiQF!pkK>o4T>D2$vp{5H0WKl)Mx> z^n_^nD|m4^uug7?wf_YsLRN^o7ouLs|BlC^W%QIVTkoUevJ!5f*NA_-ivL8Kvt!BE z9TNQ+C*d0lxp}c2lkDzYB#Nwd7Lp~>zXXw>o*s^I*qHYms+00-VT;@%n zbJy6_t9`Ar#kh)Rn|n0BP2Yxgxi|3wHS1jV!^HVxwgWWY?)wf5X|JXL|NXAr++~Zc z-^cA%h*_edUkr@}KY&F}MP?7pah4Eir$5WbXCDJBG`7FnW5g$5mlL;g>E6md%?Or7 zME#ry3x}C&vMXNy8I-f4jlCSBgf@Ae6`drKm7d%rfyWJ5$*=u6$d#U5CFT%~o{1jt z1-xUI24OKy{tM9}ov&ZM#D`sU5pyI< zf0KPYgE3E=zzctyQEHD0?^ID13D?lDCRg%;tyV0nV7K$T%*%MK6@L&$qz$qiF#kUQ zr=f-}gh;0TV1~JzS{ZS4{U408!R4;jJPGaj5$Z2pUaEk%;!n_|zmsTpl$$!$#Qh`W zn@?ezS2kmQ=3iPox1+w6)4?-=(PSwupAY4>Qb+F~G}z4*60^($vH33<=^Ge2JK5pC zkc3)YAJ@p~P#X%`4&=yN6d7+PPFb=;XHw#6iM@9IPiJA>8 zw=h`sJei5^E95e*==Z=>u7j?vfqAwHM1gv2vvOe=oxIfML8n&(3Q;7G50E zoruQg^xou_Vx1S-63bFgX8s|X4Ci}f`skn~K`xHT%#sgX3g~vuydk;gkr1?YNGnP{!Fi*O=! zuBWT{o39F$PWxpGfl5HsZ?BepiIGXxJnO6u;dHl1fA!-v>>)X(RxdZOCWOnPtg}-` z4o8Ve1Vx{!~D$bhNQBEg-=LpzC`c;I}%rr`yP~UEl zF*)Xh%>UNj5XOZ_Q;<1pHnzpNC26Kh@6B3as7T9xe^j>rE=auNtK%#=iB}5l9U3(j z*(mc`*VY=B2+C`1R1U#unW5Rx9j6KN*T`Y9`^Gq3P@KSWoShu~e6dOP0ix@Q?}VLP z9f)#UxfpwI>OksdM8BMb&&el8^6l}uFkm3nLm^05<+ONKPQJoKXLGzIU@0`CJNt(q#eybryf_TED<3wnlZF0+TV@pI4V$&t z9<>#eQk0C(I|>mT=eW|IcxAXDodWk{e=L}Tp5kC6baiUz*>$c*#W*P)bRAk9UN#QOLQk&x=+W%vxKFG-7L!}P z`FOTOr6X&+B#Popp4C32957gY+v6NTCr5fk6vJENVchh2HkpVsZT(UowOZ|_ zVx7LafxHa!ej&yZW7;eohQiW8oFbn8h}&KD6>mrOL(fZgd2pZX){@YA6<*vshfeu zkPZoAhh5+X8<5@!Oy4YLOnaR0Ee_L2s z_Eov{)WJAc6*Tny>VbnPXM8$Fn2#~w8K22b{9IHiU^A&B{v*QQj-y^IF&*f@Wm}qo zb$W@RchN%bOEIoHtPZqk!GdzD8Cf`0^wlQ|I$IUo2v()JT9lJDMXK7D?h2-doOccC zUGce)|Lv5?x7=>vG+kXhK`H3)3LKN@-q*FFEJ_UAvZH+79vOi~7(?0nA}G(UyjHw> z0-S2Ducn56|1FX<&=GGmDi+ualtwa317cs)igL1~njD?N1jz|yd;C|x#m)wh_G&ug zf_<_NbQv~&C}CFgqoR9X{`ZuW#hy3h$;?}#E%#(oi_vaB7^gRTaXzjQ>EzIX7UP{9 z*eT%H(>I5wHo#eOoW#d5d7#C_Y=v*OD`elW&ua57ed6q?8`a8=xU1wTv0bItKTdAK z6ZNQp@+fb{ZBey$?#~W+K29!C{fTiTY$mj1zM2yAA;q|&6$$~eBTdJ!KOpUeG|NqF zG(Xp#k%RB*@nD2VXR4kx)AxcF7caP~4Q$o{;~YrQwHUKS@P*a_H1)d#ExAgWd4Hl4 zY{@CdTzPGbfi5^VS!U4eq9^xEPZsN_Q4x$yx`EC)#;AG7kcYKI*+*s9tk~%Qpa-#J z?vk3C!NbD2h{2^*KWP9I!2&W#C`RMYYawRJ3s2FYgw13kp)_IzU_I$eQ@uUEG9FH;U*7!R8 z3r*0*SnY8BqSVorh|zL8h>^pG#J+--YI6@qTr-88H}>gOUNuL+5H5=>P6y(s?0=d6 zWi7wH7!vX1`3ys~rN5I`H;YkyboPN1gBby@7vm^d-{fM|W5BA1Gh!b(Ek}+Tbh|eG zJQj-Jlikkcj>|sf9CjdX5GFI}g{Y}>fKI#?zYGE+j)zH`O6NeLAanl-Q0eO071!#n zZ=DG5WR2J1@#ZIGpLP~7&ql@AL6pNkpbum1#du5%X`fOT@1L6aFT<`Db1v+8LLoYK zq0oeHO3}P)+G${O&fvea-=uU=+04f(XTqhO5mRqA)|kjW_ZJeaFWZ#b>MVS^c44(i z=Sj&ZEY{!GGez~)>Tg6^1itKqQoAT*nj9YAcQzpHhI9u*I-0AW17}%qVK5hK2$K=E z{}99PLKcJW8Itf#&V_M{nt`eNV$3NKyb!~@@(dBKcHm7SAi&NHCMdAsTS z%p=yaVu&CflV$E`0q1WV&kM52!l}+>A{{q}9_+8*Cc=s5J~1nX3&56@jGAz$C|Tvi z4z{qSCZ+&1Wv-`+_Qy;Nxt%YpgzCZD6EDvGWZl&h=UoC$1);++)ZN5LvN}w-N5FaJ z#%{cL84w}5Prvc<>{}^J6WVKH9QA~bhKcc~Fk#Vx88WW`I4sTF4-ur^)nH@xN}Mh~ z)}pFu#h7-L{~k9k?!FqQ)96<+<1sN7*o0)oKYK2O!>(hm)5ZS@;e0gp#Yzv_FSpn`W7UTNZn5m5>qY}MqfqlL(J0A~Ta^f=j6}PN zq6C$AZjWCdfpksB9DAHL&%r-=E=j*v!2h-DHJB7)(n<=v8$O!-E8BmY3Ob*Zx$Mlv z$P(4S387lV)+MUgJHU;;mY81(r-MJm*J%Ljnhec}I z35nI703akktFoI=S~*x+7vrBIvYm;+_ak|Q_(_M67-(N!fH0cS)5ynAAxld_M3)cA z9{m)g1D3i~cU(~-${94<;|>v|ij(p7m|yz0Gf~w)=4t-5kZRe-rr(RpL^;T*yz_)* zpc26@VIQy1JcAFXMC%_tZ9=as(jiTvKhUeK=ZYE}wz_DTtk`;=&Af5HAvtrpM3h^8 zbpfMx`FLH7lOPjM`FKMFnMjP6eOvNBmB%vaYFB%nza?x==<+e}LKYsImD)*L>?IiL z4dVw2HnEwYlZu@-rZ7B5A<8YFAX}BQUje%!kLMozVE?-DtI%o~n$7)Pz~8V(REVcu z16wTg6%4~VA^WaG)?pGty)-&cujA7R;F>s_eY|`w%2}ps=EF5aC{KyO#Z84ASB-Lf zjxaqy8-3K(a=TSb;fDXSf({%jWsF2v+Ij=Z67E1>mG8HEGy7)5RwHBYx3X_oDd=Kp zEd34?F{)v4hE=(f*0z*;qG;BEgX z^HfbPZxwW$95{CM5XJHzXB0P8(FM!L`JxD!%A$>>hXoyRxia^Iu(H>`b1;Rd{nB_}fEvblG-_CE@&(AHkev0? z!$iF}yffb{#t8yWUOgAw;yqzUHk;EWmXTj(Auw+6MhjW|+gEU^#Zsq>)xHLiVU3w= zS|6W662QLQ%s0?3W2}|v_QCSs!8qoLg-&|G2Sm7tFtib?eGha> z+0Jc?3Bn|znC#SEE#&Z04e)EkxU?Aad10>*TmO)G%H_HL!JI;jJtZqZ8vIWG2jz4$ zGV90miU|9$;nhO?^dl5CtC?Y==BMm$X2vp}7u7#Qxi%%$wOyx=@v>M=Tv#fTi>#KK*atx`#(9YQTH#mnnV#Jm) z^0NFwxE&>9pKSU!$6ez51ydyLm~Fo>jD^8FRqb)2a4KXb zHmvWT7ekB_nRhR{NalOm0Ofh1utyzDJhV%NW`{`2vnj_@S#j8+5KakG>1z@@Xorg< z-o8{J zan$nmOi^jC^NtmOPFsJW6=nA)M5M<#U97Vkw0>6I{dgt*Aln?3#d<5_woo|k(4f9b zz>uOU9uMt|V}%?l{cQs$^s7LS#HJjlCcL*Tmm#?N;}r71zSVGq*xw_qB1S6(QMHh*$o?^s!aYz_~Df9C!_@u8jo!Nl#ycW(uE_Jg~Zp zUj<25?S$BRJ)9N^Z%$z-QrH=mrTe1hQcsCeV(tcf;FL_$TIiOIz|MtUV4|yCXh;@IT`v|Z zDbamJIcdy%>E=gl3_;e$Pmf#WbfRS8vWXEoze(o3dERAYtwn?*K_yTR6V-2;eZ>H* z*P&w%QTD`rVjIG1l$@k1)N{hakm{BSBOZZrk>?qVV9rI4mo@uh^NIN2$~Gr zC#_g~imkTH!ogfNGrbE$49;lKAM9+_p3OyKsfZ0!niaOPyJl)dtT#OSyt%!LNBJg- zaL=%7Sv)4>Y2e^ABAFm- zx#ynj7c96fq@|0hIpP;~qU&=5Ivi-(4u(Qy{@xu6Zx65#CoubEwJAf{7m9W39LN2W zRd&e2NbG=+aWNKNWqBMRoGtPDgs_sqKjL$v-!Ej1UBd37KR}P!Fr^xMK>R99#;&lG_DQbCF`=Un7&iq%Zs3VUOMc> zn`2D+FLMB98c24!2Dq@i@iuw656ev^XdEuI%gTpb1V z)(%=vC2e%Q-JuabN9oM1u-vhy!=XM*V=mme7tnI9)}C@4 zE&_?siYU;%0ZuR4t2z>DjIrYU1%iY)a9;=)FsB)PasPf`#+ZuwxFDDPg>qqAd{e?4 zVqgzWcu=Z-`zRk*HbA<)j~N;h^B_kzHQA0iK?Idn_qKnK+v)2-A(UsS+v6v3WT2Ci zjkvr3a0xY_eRPGOrIt&~_{6nhiXdXVW$iOeg6<5pQE;n zpXE%;ReJ+^gQLF%&I#ANwh&wFZz8pf)EPU8ASc6kk>I)XPl^Sl2o??7wE0g?KVq)j zB(kV?PM#J>^T7LNjGW(yA(NWL6VD;-Ku1i&pcF3>VIkBqYJ4o{04k+oRfxknV91vi z-97QO&|rhah5<&WsW%ID!IB0Y=fvg$4x*RUBdvVK0WgjqBRu7vSisd3uk6U%JnKW2841_Xs$_?9BA0 zjq`w+X$UMM<2L~}vKfpSYP{?9LpcRVoGm=zG=bdZUwszPWj%aoY?`97`l_0#F-yqB zog5msQFpEq=iqq!vJhJifF0vRCW=zutPn+cH)6f*!`XJD!W54kh}$t&Aj-850=iKo z55dwe6{Ad~=A;wj`qKaG<#C}(=Mgs@%KuK1CYWqMwZ=!{oFsOs2V$|q!NlLhmgm`W zTcoUs+2D_wqcYzd^R!2cuv0}_jc<BlA(Fge@Wv%Z!?s zB_}neU;FP@l{}*VMYr~g2uD#?AEQnH5vDAV?jSecwdj$Qvjr@&7ODSVk^YJ8nbuh7 zMEgXUtQc#Y1R&-KwJ}0YC%BfG`VA)ook-^45c{11LEKhCYZY=yF-2=Z&9N9~i*pFo z%v?S#!EsD2=L?cT?S916Qw^w4#kfBOoPV{%o~QATh#3&xLQujGFAAei+n<4Dg>4rYP!FOa^IV1!C2RE(W^3# z-F>LcQafjgvUsPk!aq*Xk<))+vM9a~L*nVv=mW2|Z#og(hwPjSq-wBeVCQ(FYe1GR z|F&aJ1B3M?Zt$IiSmD~tU!rfSDeCItEEQcd<5foy&S`&1}HVB(?qyf8rb*e6z;HFVH`!WXq$IM6E$yz+hEBPw~w1+=M=Ew;psmC zwKr7dKa$(u(s%XB04yWPuD8QFj=Z)I7+XpV2Zfwk#1?nr2a)G|**hi)6CWc}SKKP# zU|E$6UI2YkESa2I9wYDa*Yq8^1_r7L!WJd_Ysu3u`-*bTn@Lw8P7*O#sgg(Adt=N! zaO9yPBie!tX3ISQ zkq!j2y4gvh5B%goIP#eo8rV~Hzb=^kC@e9U?{^Ov4A=HnD`1cc$+ zp7=_@+X7O?ORlsluvH#`Cx3Yir|m0uDpb~OUBxb0u*-=}J2Lc05>s#Ih;wt8q_+Bz zFoK^_6<^6|L9?ZlDE&>F2W6R5SH+b=mSR<1P5dZGQ0V?Ay~*m2W`ECQraDD%unAC6 zSZv%}@-dEm;B6nnhwBm-IMmhhSuW0n?wr%?&jJ=d6YBTG-lzGvWZEEu7Vo2hP53pQs5O5w6 z%Vz4n-zPDkE6VVY*PvYWC-ubbLW2TH zk}~vl7#Dpto$*eBgS9F#R>qv|oHrmHY&~6Rnx|{>C z{F~W_Tx3nA*=?en0$szVV?0BQDwp;lWBs?X|2aHhIUNIHoJ%y#F_$~yZGS)yH)2?{ z2|2(P?4}UJ{1U@1wZ?F~&51^d!Djjbm#@auGa|)Sa(vL6rQT83jX0$mb^`abU*_0{qg&;*cqbw>u#_13% zW~s9viI<4OH;q#|ffs!O^wg`ibc-?mEn5An9@zd?z;bAyRnplH{S?Nvl6wq=*hX01 zi6eFtaGKeK;A(sKXa094uhYdHp92sAYxy|l3%kpv)yFwRTYSiH?dlwXPL%9GHVn zWPdm6#>DZ$PHA1Co!hN(ju@vjeFa(T*x$sediHn4XFuY$EI3YQr#I$9I8I*tn;pZ1 zDRCaC%f~+j93Dq;UDz^Hcn|ynYnkizD7$HNaN_t(in;OgZvdy8GoIM>_Y!1JW9<{JQR{?*M?hm*uE?f}avFk#mZg%{>kiSbbRyJPPfDO|^j1jP$ zvWHTOals<6PN4cJ=Dk}j3PVk^j>xG(mSY|E6%qxfGsLBt=3))&@K|UuD3Xh?nqm_H zic1>*DvL4cpKKeqL_YnKUJeWL?Qx5+B`rOtV$4_^!jeX>J$WWufqWu5?ZK1x5j6@N zwge>6&StQQPX(ORY1+uefKz-X&Y8|NvxCa-K&E3z!XTw`I{~W zljWCmH*kiYkMl%3jp+EHHZ1HkN?oTX9uVP>bRU~fhAa<*G<5V9TM6*}bZa)oSYhXY zqhq4zjKx;WDB4G|jj^sMg3um&Y#@L@lX*;f7inv8&JN>jJRz8-is^8B-2JzV=uPJS z7p`Oi-?U81>?sjMk=N3H+m#Ji9~EMnfODkyDfH=LT%>4-%bEiEfdOa1G{}<06T(hH zHAVqjDKc$+=wl7AZN0@>P)=}SUy3JB#Skvj_MZ4t$Yun<-SE2n%`r zUkJa?dFSbEe*4#)v-76h=`(lk#K_Gd97BE5ILo+E)_iUeb|8fwCIwOx__FMWhM9}Q zx4;jBUiE}ncT1d>vh=>#-D7;-K%{HTII6^pzsrJT;nK|B1SXiaf=km?+JhV))K?E0V0INeAE@&0-WuVY^e;yZDJ5hkL-lT<3cWBCJVZL8Xd!+2zr3m z(e9UkE4eBb2{K=-qVS5h53{9E|p7=%_c|^6Y6zhxz3=QQ&J13=hOtf1-UZ(PVo$6Du&b}TaSA9Hjhsk7j&L-DhqQz1bzH7dpVJHnBPIqbZjyOZ5$K*arWTb@%HwEE&B5tbCT2v!>l zBJs1;eXJ|s%+J)m*6zao7F`A=B0Am|-mhRkcDemDLl)g5%1g}7WO{hmWqj>7U7 z`G&JFHTX%GUy2%P3mmxc{G=G?5JSdY@n7Lol6uxu=cMgbaJ1ga@o~1CWFyCGVeuyc zOQR*Z=<~V=Qmpr_5$sRZhE&0N_MO(R1v>I%U$7W&h#?R3n^dLWj?4b%s<|%UY$C>e z1#c{N4bD-gxS2>7S9M?fW<1E5qchMvOBXqp8l$YF@dK(+z=(S!~BAdH6-FQ^iFqrRun;FwQ7?%e+6_71h(SPq~nY ziYbSx`Dki@_4HM9%w+Z~np3cN7tbxQBItljGeKXHn7G_# zR`z9UM=OhzO}ik{GGp1svUmS7(>uV)MNf?F!4K&e-~i@exrw^1kZS3jof1^Yaa2(* z4;1l1O7#S2JSX6mlS|h~pNkqAlrnYuSg{w{B`}UF)+Y-(i#2SF55{$eKoD%QJ-c=v zzzx@1C9ID*jTXx{GIhuh(iZ;~L6KCkjN*nyyk9aVtyOyFV~c*U>o<=ejuxUiG)#{{ zIf?A?tJ(29V>ZA^sh!JIKkgS{!Pe_ELR&M>h;!*rDzvu78)ZKz1#!B764i@y@I!Ro zgMU}ZUyP?H;!9zo@9d0UQs<^y$BuJhT|Sk1HK!-~2VqECKaUH?rvjEQ7i_3XygNBB zqnHvTdwsQ{kgaY|(dbVRBpqDaWO97+p&(~ZdZw6u6){vm$@PF_3;w3Vpk2qbD)cqp zMIwhLn60D6v4$ zQmSq*w8m?~PHiP`SaGfFC}WB^>^d5X7;AgtdO4j1a=@F5dZT@_ zV>6P~t;}c8*cTm_g~JK~wLGq59uMV+##P2cWi)zpEOi3^S!7&y^Icr(?E9i!QFw>j z&k3J)BAn~_Og07nC};uJv6^Mzy+Mq#;MXThQGHV8se6DqW>@ShhDIJ6plI0iZ?V+r_R9-sJpXkoIVD zr6HC-C;LIfD$%7mc9t56Eg&nQQ$>i~u@vmhuQ+HZ#z9Pen z&-3EeE5VjcrS42?b^Eb63!|NvG6!SYRS@KYwu@b&y@k?x>W!6eulE1aL?4Ksg#7tr zHsyL^uWKM&tFTCxRJ8e`97bYpqCegjOJV%^)YAKdbzhrK|`*m>0jdeCJW(cHl6x*?oE$oD$n35by>@pvU=E`P49GSXV zN5_qFJHZ?VvUL<6iE)yW>6PYUYhDjWY|WaCydmIn9<{D(xB+&WllTQ}%T+pO1qDEtlzC8cSP6I3X-N zsAC$!PQDq^vS=A#2)Rf&^F&3Er!8)QbgrwDt&eCIgUFbCCb!_IF3o>}a^h#P1Mq;L zqgQFS#um3ikQ;8nw8nsdJvVZMa-X0*S0>uKUy4ZU3X3jj26x{Mg#bnl8-W$b3saO> z=8ETq2%Cr9`{J-W0S>!{rQcVA&LIZ*^4wUrU)joue+&3eo_8$8PwAhsHWnin@Fxv; zT3wteC-tF*?t9VQxE=mZq-5khK=)k#vyUEs5}oFd4s*?WfzD7p69w%stu0D*OmD@O z|JKo&w#g=oB2Rf4$$z~3KP7QM;fO6;IHS1aup%78z8!S!XInoh`c zU+i_iL#HFzA=Z2VU@3Izt>N{Bo%R`OxDNtJXjeyPtoD%IdgWjfx%sS7^_qCD6ZU6S zSOwqa&p_82j{2q6Fhz`$oF`?;Zs7Og)WDOAW`{fsvY2{{ezSAmN1&WI4trYTKw*z7 z)lBA-Y)W1yJnDcrAz=ufx)4N~_h4+Um)p^zuGu2#$I*|$IjwcfI4%?<6--yFo6uPR zrK!+7h1&h)<4|c6kQ!2f_m*Ltu5rBj5`#}>A2Ao?6ao{yPeBoCU(!)+FT@8L4VB<0 zg#FlZW>OlNXg6uZ(^*)aKy*jHsG;`M!3o=gf60hsbcm-!IMvLzbWbGieg?`p?<~;s z==6Mr=Q57n3oa|MZ8J+8(&|KV|9u`eiPm+Sd9m?dGvCQ|P&JaV;+*rz=mo^5!tOKJ zdDM9b0$ch8NDFKlhlk{MIWf)wvnP%MHWxwahBV}~tbe}jsSW)r77`+eYzo^R+VER* z5m?=b>>nk-L2A4n7K_q^U95$1#6{NZSfJR|Ovgyd&0m!$w>& znqLB|k#hU)^_R0C&@7<^pZ^MkC7#Uxk@yj>LZ!WZVvL-^j(>=BWot#>e)rch*ubG- zlb}BtdMkr{O_3Jg6qIfW`Hk0M$atGhKtB_3!D17RnGlWtb8$|(HojxQ8<`*85g`Z` zSZ|A?OY3ZlWB-ZM?T8^r+puYa6z~3wHH&H{YXxoz{woWO-HaR;>H1@NajGil9^_*q zA$3^AzSh_;1*~)W<46IgtWJgqo)C7ip=rv5_SCmBqPa`gw=Nc;kdv~zOvu7uGhLVe zmwp?@A@[-|zqhBr(43DLpJ^{F=cclsf(4z-Q9fSP_aK zZ3UUOJzs#@Rbpw?9{T1&O#UPb zqKfl>DovvpJg^ThC8{<-;;XN1h(Wn67fzXz7aQoZt`wVged*QUSJIEVFqu^KZ~k_@ zW2P#T;Dz}5Q#jY|skJks@-v`wOx5A&>{t>02IG_zCJJA^HR)-ICBASllvKsoF(aZ!*m=@69&(&rq%@ShA6VVJXJ^4>x3edD4IJ=Tg{e^SD+hjZ_!t9ul_9E77H0{uiJN ziH#|=0Whr?<3zfmq?h3|A}_JT*dYPqRd9;uxX!fKSAWpbej7X0;D3x)>R=^ zSQ6y4r~1tK*hHLD)|=zl>sDdDAT9>l6_y4e@3F&U6FHrs(G@XI$kK0O)2lN+5aBE^ z`D5#eAvWeO1L+9SbK+o|mh*}@XT6kbi4~Uxy25ZPR(3&m5<>wL*pvN-++>D>$Nu;w z1?Y?x(6!(n7Y!St?C0fUk|>8=bj*HPp|{c2If zJn;DCF}Pe7K9VFmeQ}K_=XpwUo$Bu8Ge3#33hMu_SAeoOCt$=dwidQnCa@`q{cl9@ zsz{YevcP;($T{ibRdH#SzEuu|=S4ZcSP?JIStF%#b{W?3I@x1_YGGVlil+sf0InkU z#J_~e(`-iMUw(rdStBj_0d4bau#y=%CuXENh~9c-7*dlg@>!Qs<%fxKkZo)_#t31{ zfn7P$lNUh(TH1-^S^*;LWxN=>O6IdJS)Pv;T!zV`IJ|AY5T}p@ke@Eye}NR6<4+;+e9iw1a<73~_KL zB1Wy9g_*n>5_^{kR&Q;^=qwX-N?Z>(NrY>F)Godibd=oGOy-%D>p-b%oE~een}y#~ zK>siLML1K5X|mIV)5gkh#{ZvqpR`35K64W{UAi~ z=lbZ42G;j?TvgR1=t)6Kt5#a#{qd?8&)pdRn1c+UJ;nj!2GGRL;TikoC;k@XcdC

kM9h*Ie1)a4tw zEkm2l;ReFSPz2Yf_3S?cT=IOv8S%25M7HpHEG)#@ zn}Zxd9S2Nk=*0?KK)E2s>4Lz?!Y)?yOyU|r*Lv-}GtuLYfH+GKtxwJU_S_OCt&$jq z%g1XXh?XmLXhFR3JCJ13#eUfGTLH*0uf)VuxgFW`hADB9AYVyNJ@LJeqoIvxJ9xuw z{3+%-W2)SQ$y|!wDFt&dc^TgcI5F(^vrcQ+HVdd$+D&~iPmJTOqPO^qpud=~Z~=Q; zpNgTlyZq=<@0d$d-deJ^6^&5zCcsfdFX)s$1-z1t$ zbO@Dc*RkH}eoQnuAV(NKCsx;We<|XT#zo*~P;#n`mL8I{IsC(IZkpzwu zWx-G;strjKcv!T1K5r7US;E6>qhZPA(BgJgVbYU4br`T3&+b zYKtl{#HA$?yK3_V-I|(oheflaTg17KurWXNDc9_lg~a?@Pf~p-hG=IqL-|H-r;(`+ zo44PJa4u(b6#9FZlP|S9RGMtvr{1xQtE7i>k+7veO5zPc2g#&|5#i_Z|CJot?70X3 zyM)=uPSi^Q9@iXiA)*+k$;zOol4#r*~D9FD5RixYzpTh!0v znleKZ&Cv>X9*3WF{y;rS4hwD)=kJo|W=iph7!rd56<0~d8w~U@u+&WeIOV!Wq$OIn zC`7gJx;RTtheX9ImOA1lF(jLtgUK@R!SeSkACqP~W+GmOT0$Y-6tni}>CX(JyO4p@m6LLti3Ye(p0=<~D9sA0vy4-il{?f<)-Y@%W$@LByvR-2U zEIfLFZW_UHe$MOflb2E@HeX3PEA3C4O@N?ldp6Q#+V3S7cffIYW3s&^4ho z(SNQ#5CWl3X^K1KOnWL>Wr$-Bf^a4$atOfQ%JpKh&WZ}aWSwWC56=80_r952x>#S-oOVc}vwHkt*=HI+U(Y}7+%US z>Z&7SpMJ2z*HV*DxL7HU(2@ztxdIk-`GsQAc#U{x6ytu$#=>=?kZLm;rz;HLw!j-O ze6g~ilgv|pbfD2tW;$&&;At_I%Itv{I~(NiXBPaZ>Zmzcn3I`>j1i<26kB+=K{JNK z=0Z5H99op(ph0_JYEon$vm<^K;Z*bv%$r9^tUeEhtfCMyH r;565^6l2rFfKJa2 zBV)Lb+bj>p%#+r2drTJTL`tt#y8_1_o>5%MPNv#R908@;#)DsL%k4CdA0Ukf9tpPm ziL)m$vva&S`=Q_AK4BTp>p*eg&*g8dQ$U^gR-DsLj}R*#1$3!tlFBBV>JFD3#k=QV z_@r@fFT!!5rq&w+!i1v-87bw91(5-R5tF-Tg~&+P!Uge$+|<#j^)d5koDQ^#6aLP4 zrc5v=L@3)FgO8!)XfCX+4qDq{@y)?^6&_z0#nzO`@!o)sq zTr6=sP8Vv4Nd=zU#n|)AA_sqe5>87gHOlv#DDw6A)oPC~p8z5QeiDhb+t(*SILu_r z1ue;w{efN+?TP(_hyi;xvY8u8o|5@bZkF&6w;0Q$p(sh7dTK_n1&z+rxguO*lL+$L z(|}Giry_%~(&^dXn^%m8R|TDiYFSrh?`-umppe1%fmkG`<;eOk-_^lWc-x$racC{+ zeI+zg#W@tTH#+jn2KPM+$_Yi&ot5B4=RhEu4z7mx#Z^N7n?)V5#<>uViU+t^06^?#|=C zB3(Rf>?bjw{?+ecki`yL$9i%)0nO^tl3Bnp>Hm!r;tV;{`l4wX21UhC67-}pt@mT< zcZcCI{yhG1x~EYOrNEdI$z)8! zzVUjOX8st2Vq-yOg#}!n^e4X;c1}4+tiKH8xN|Z(7Asy3VZk-B zR-~Oz65|43pGr@E#_^(D0jufKs5-3X%O6Vl8@be6k@;e7&{L8{%a6n%t0ISxvEh{f z=W}10-@}5GN9EL}c=RgV>4b+EP%~3sRk|8B?W>ZlPm;I9HE_-W_Y|-!J6Qx7l)*w2 zxr7`C*MH+OLD$T*-zGcvicG5*mzbrG^Svl!-ZU;oUyIWbG@@C;&Z!7{r#U1V`X~Gm zidZ<2?`QdTr64(BZ#P~~-P=`EU}y!N+|QAg!)yUdx=HFh%#-Ik6b8%ML997n4|H0G zOypwRQY6JWbYgY2@Nz3Gd06~aF4c|XX~|boe}vUsKxyZj^xxJFU8z{=2K+j1Rs$@R z;yf`HEel5uEZMSL_C`3%ZWIS}!vyVxEwnsFgrjj`NTkJS8R4O zz!SpS^1Wet!gl?EFBCre;*SEZme>@zLkIz~UWq54?FY*3zw79hRY5Pm6UyR9r=#lfSG@~Xm0lfng@A=xjoiD_|JWkzk5d=$AE8VC zb@)}3>WC${~GthAd{M^_~QfqlsB!E;vWS`u?)3s`5D-CvhEt|xOW0QZ+i z@#i|2Oo%Y3#ZxRI$$KZ_?_P)-VRIjhb)$fN7kXwD2a|(>iH|{&0a7PpLT9It0%@W% zxL?o}Cs`HhZc!;d6>HJe=pyTk$H5j)9ep4Yp?37)B}m2ImetQGY^wwq`DN(i?I;7Qk-0N+yAM|llFOe{;ORKa?pAF zpX7E5$Fp!{A58>;p%TuG6`uw;xMVhq;MNeu|H+Z~rUGdvRFE}AWKj7`_6?6GaAwKw zS()PKNdsM6^{j(o+SMORJqK_i8k6q%S0Y>>l`N>~A?c*f5ozJ}OI7q?VMpFesLA`~ zD?Ok2k+MiUF6@Rg4mE(a|C)V*a%ys*6-9BDa&J3V&ah1>2KiK?o%V&?o&!jM+Am0v z{%v1?cfb{xxI9zP-5GoFOE1b0FV4RuWUtuljn!TRIzG-IsI4&|hL3H$@p>Tyj8YXl zECQBHX+Y)G{Wl1gNV3~Rausvj~+v*JhD|5Y9R$*Z*Mh)U(cz5e6{nDfLD zU0vnWxIu0bDFrhQ+&%_5n;1jZX3N86MwW?~tE}@$=BE)O@AN#={|!UwF@7X>y1G+L zXIH6RuET{LYH|dEx}q3mvLTlI6sH?m3x%@2pe5;PqYAgV>}RWrouWg~a_KKh|KwN^ z2toG#Vx`XjLk$hR3>wkrqS6m!0Vpo|9Kx{_<`z)tR(ahk(xS3?Vs3$aBhG;(*UQN5 zGG9QYxj{#n4-XRKI>V`mwvv>7m@Z6N>QSr%<#w*JYt*sJSCGh8*I^mmj}>(4lasri z_`MjS;Pk_D%4-%wIEq>x#*E*54NjX*a#OM=TFO)_!zDS4Uqm@zOcYC=#cv!i)u0+{ z1iT&m9|t@>wU4^Ius_LjTb|lBQ;egUSnA6$GR3Q9%FUX2!~c9o(ed4R-}0S1dsa?p z`N(`Cx&2DK#mAW%g}3f^5Ec=m8HZCjE(mTf(y?}Ow5#UXFUm5&AXzS+7Ir{1MHWN< zE_*-~b1+W%9uHK(q|w|h;EcI?#lR04!zsHCs*V-o5Ua5~mXEW_1bU+QcvFPaz@A?& zCj1C?GP%ouN(Ap+iK6r?url60yn22oAMo z8^SKd3nDEYtznp4?Y$%nvGD{pvOiDA|F;&Bo&0yjIQ~h}i`ZtV3^&rB#=gR74W;2| z@9*xVp&WCLWlKAwP)mF*(o(46f;bQBEd$}Gn%QWgCM^(yMCYLgy7scT$!2nA|8)U> zOXcFg^am0Cw!>TZ7yJsw$-pXDqDHmUuc4gsMrjaiE$lYVePEwC6_uO|e^SQ82zU(4+| zu#R!*pEMm`eg#;k74v^s5H8~e)uClpJLYZSqm+%MUdQ{h1ayS zYh;Z%WiJ>2<37?asTVqjh62+~-op8fKZ2=4w~ZCsTrp{fh$dTyw193Dhg^Kh_pwlt zLsa&EyHXZFqilQd+S|(ZY%7dE30Y(E?Cz__0{a zf2Q_^S6K~AiM26lI#+I|otOENJ4v^$4&{__DNkE_4~ua*G)jpe>ehfjJm}Amz%>DB zUI)}DC+Q0(TaVMn1OW{M;Yy*!4;dT?`)#haR~)1W;Knk`OlO|PnOts2eZfm$9L1ib{>x*?`;MkTX^SlV=wA=3#Yi@&6m7Kn> z6uStKT4wh$6?RejCpQb}WOvz?Kb^Z8mUMa*`yNpg0ctn#SGg^6ZhFQ3!+@5W4HZa@ z;sKHPXL%Hlr|x!isa})YDXcDPWB!2cAdp0d7FAaWIHy^`y(o?^I96v%iT>fkl%Up* zON_Di4aWzWq^VVHkJBl~Y9jWg`eL^c{yw>?gwhF50%d;CfJw^uU!t6&wn8!96?73u zcaKhGtqK@YsV9A7vOpq>-Z)Zj=Lf5nj1NbNa8Tp0B7UT{1tIfy&2XVKSJU=Wn-b_5q3}8anfF zi8xoKt^rIqQSkTd21QZP;H6l*%kG&ETO}yPiK0}SwGnbo`Tyi`2?A}|ga0in-IpoF zt$Suf9d)Y^--{rGCJZXaW|aVEm9r-8bX_FIsiK2StcJfL3OTlM>8CBWtMd2L&{c@D z1RNc+R=S$^MGVOjozKbDnTI5av8X2VFhTDI466lOl%9+aTj)6ej_l!Fq+j zt*2K+S#aEXYK|o)XF;g`7NSXnzrh0~*%yBW#ScaETRdp<1&wnmN-h@t-WvfLh@hIiXUI5D_ zF=_{TlI|4&=cF6!9n9O}^}V1hPVQqC;{SwQ$TE_}05_rm#seIcj8oy{XsK4LQZ4P# zXv{v!7g}}W;(RfViC!B$_fkAtrZ~To=3uPa1lYon!w}_)b&Gm!Ar*WeIRj*>Zk+H`N+*#w|&v!+TINAvhEzbm`Ng>bDYF{kM#ehAYLSl{Qq`fn5 z?G&kGcZn!ugMJIus5vV4$-Jn+tJj#auf0%0EDu`Z7-46kntrNcKOhN_W@wDqAK+wG z4&*u9oG-#zOok*Dzhy5C$??S22jIn}+{#`RZGz+xloB|L2*kQj9tX{|K3fQ{v2naZ@vUlkKTr z=RiZV9<(8>LHe{<=f0Ks!MJ81xu-AUJ(v{mW?pC9CO{6@bn)@<8Z8h*t(r=Awy?wJ zgG8~M0cW?o1EfuN#B~CWtclU|c0miXhRrNyJCBJW^?h8qU#@`L;bW1Bm&%1zLu3)- zMgctb_728(t+*}QW*KmfVQtx$EwWeBFYI_|KP@a)Z)?v!nb6UNx=B+u5uK9G|)D>o-z5On&xXeKCwd@hFaWHH(iTlWK!taitlaywnsT?4b?FT!c@ z<$ASli2)clF|G31p{KT9E|TbJ*E4Hl((Eh{M4pd*go!{#xxP=`Z7hkFos)evdQ8lj zYhRtLeeWIgKi0BYoFcYn=;il6l>bOOO_!ZR0ZZGpAQzvODc*SGxU@euKMY@<*vZ6v zawCOQH;PU(S4hnK&nq*`X}KR3w~9&oKD0rSQ@bz3S-j)x8{?3}fn<+oJY$~RE;LF% zUyR2^xMfa6t+dpi3Q5&Kvw9yr5ycY9KI&Jxwun= zEjnmqO^qI**wl_OE&d{1m>Q4IhC5FFQ4e%uG835Pw%Jx_wlopTn z@;Ng)6UM=-tC4n0hbZT{3X`QVSjOxy%#PE};(x+tjmS&IX9Fw{bZ0pH?mGt}m8{Mj zdSZbXS15EonUt_?!IkB6VM!R(hGkQY5MeHy&jjyC0TNJ&)lH6&<7HvT!9f9fY1^M? z4>aD|xmPUcPM1$goKMGFB9ju+Y;V?; zAeU-&qRB05rSY;@f5;vfmpU}fntBzaV`k-5h`WV}35DO7bq#Kdnt?`2^hb%YZy7g< zi-n!PZXb&!9n$94LL(^h(igJ@5VB@jl;?BT0i8B3#A)>>iQGIN*20rHHSKD~bK+b& zGufYiM=3G@EkH`3h=J8}=RO4Op)7(9>*p?o1k;9SCM#mLxfr$%2OTji_lQjW4fE%=h*(PEC_7F$C?WqghrcA znSWXSZV005WcQ;&ZilZb!y!!iXRma{aO=xoedD92% zIeB^dWC2%6RD~9w*(XeXnFv-t z4B=w6idBi{yP%vt?zP2DWmw15bMr^|*MeftTIaCZIw**BOjv@Ft&N$FIutg9_592+ zf-XK+3- z2i#3h28%~Txo;S+?Hg9F@w><2h^um9)X6!Nh3uu1+r%Q-W;AP_l-pr8Fg;e!ao!V9 zmdbd>iOU4>-OAecak-tMeTqD|{deJ^K3rER*nLAavEoT+i-5h6jY`-pNHv|5c>!md?Ql5>ew{{rC>VsAFb zZL=CNmJ&7qDHSGVFN<@BkXblS^zgQQhCkBmGtc~1(BINrbbYNqR(=+aQo~>YonwWN z6V7u~$qKy`cRdH`RP{5xpl=s}7-#du*@1t}{+yI+DK^K+A{h*8e{urZ(Tw#AF(jdKYE7*3B5tP-4H4#J zhn5NE`&8>|MJU0nSfBkHZi20u7XOjc;*lQxPlB!ibPFt}{1(aTFZts>rm$Pf?XpHS zhY14jcZwnqHUzZL7$ZoglBarK5^(ueX-S@7f38K8jvP;X881%!9G1SNxL1S)K^dWP z5=*@TLyE^uizYcKElh3GW#xs)Rv-6cru`ka!>Q-(yskK1gyZg&t&}^2T?R}G*u6+L ziXSQann;$T>Q#J^{2m?hA0Xi5P8;aY7imF9igE=>m9rpE73rY$+$5(wta##du}%>> z->m4ac@2WFxnI~4FA0!#YD28@I&LQwDLlw+;6 z@*Ky*ap)Vc6h{j-y2--N{{Rvv2YWll$bV*Ea4S!vA!apV>wnn`Og_e>fOp~^idq4u zOXor{LD)r;X#DaXlo+Q}yBwOsE&mpj>pufCOAyL*{M&Hp{3$U+<2|O6MLYa@G*09E zcfe^gDe|TUTjcDbZt*T`Dh+z&S@fRuqNoN;f2ZZ+5Fy9dq`L?7?{U632i>YY-xJ=; zz8E_!+TYLqoueUm_6OiJ8+v#IL9F>96y?*)VE3Wi&Nqj#QsUX*BL~3v#gkP1@!J%W zAU#?YV}+e0Pl)JA4-%J*54z~{w2;F@mqDkfFP9lk^i^d)5aSxu*ee?sUyGn6Os7Vq4 zy%+TeTE9Rlfv!YXXE6O+>L77O3R=4e`g#G^jM)R6eWP2Bd>#_(Tqibo5bJwok8F7* zFP75^R{Ro=WOWV|O08mLVRw8@By#hw{5P^})^`7;!j^kJ*H84*jbdzw2tvI#+UJ|+`M^lkTB`wEw=bv?x7li zf#$B*;6E^As#jOW(5(=3UzgKcLreoX8vYj2k#bK$H)3M*@1Pthi}SebvY7tN{K z%YSpk*%hyrF;`+M;x%D+4jj1adRLOswZDgUA=C^=J7eV^AjtVvTgU2hT4Y#aMyyG7 zSW_$+(ju5`H+IAK6GQEz%qiyTA3@F!0;MYGk-P;Wop?+ow#7nWB!u0Qcv5cXb>D%` zfgaS&Ulv6onx@ANKjCyDqydbEi;e+L6`P*<=zN8V26y#2BY#YPSlqQTdBqD{|INJO zOLsKWtRJR7T=v>u>F7(GWED({V6}?0zM|-XaoNaw!`XZl zXy-%^j3Op~aY5>LzNsHLt&wI!L)igeP|C8PJ&6@L}y#ywT5syJ>n2&aKQMC zsp6@P9|RqjY+{qZs&t7FFeIxw0Tv}=z1fe?*IuhT!dF77luG`dP`E+>?8=y;jP4#w;9=;YTk#9X;P z&~eP-5Rn9w>5RSeM8!w{BJ3}vbg`fH@h-5WRju{)_ zwA|2vA_t#{AORdS&52nwc@#ZM+nzurVca5f=FU3$MW%e0f`O^_20?uT#rY$E_Mc{;~#b>hbFO;MDsaH z?3e=(@F&g8wfEfxXhE^a=!%1d`5Wm=LE|Sumo>vVHsTovw%Qer;Al1ZSZX(b1*RQQ zTcX-WBtCQ!5&Ncqy#+Q{1swYXj+6%kiJkW@lSR_byMvKKV#!ex`rA^14y(I3NP?E# z6CZAXtOj%4+7T;L>I(bVCfceJU&yFpg*HQb=bATyJ4XLokoC!-p0ML+D{whNHZ6V>=a00P#@28x z1kG}KRg9L?f~@31CxbGD)hLo`&@!mUC+o+7oT3@5dfyauXxuBO&wfXQbIQ(Ovh*sB zcK|d=v{N3L0CG*mDxzxY>Jwqy&!q;eJgb>LDGN%P^;~U;`pFK8qL^A4R|q*MHceU> zZAA3##xRGKnZ0PcOi&%eDH-KG??)a<@#!{1tUVJU=dOy7@dZzl{^jbl@ zE$kd4KKjCnX%LpkR-?9#T0zUavY(Dgud^Id=ISt@!rUPjec~KzN%yE0l!M{HqnJ^j zg<8cN;5ot6w|e)!y}%Z8wNI7eC{ZfMnmA8RC#jC!{Z>If?qFdn)8>VS>|+e-uzOpI zTbiKARdUBi5j-y1<>g>eLz(XGkHhu`lP%0oXt(VaLH}JRP2=J| z;IzN0kCTPcUX;a+%>HuRUM3qjUX%fhQY;qfaO$bDar(Ys*N`Sz#K1W4elSDRZmt;7 zG3laFLZ^#1H28Y8h}HJmVtFT>Z3S-k^vCu41MQ8W3(I|}k%PO%yC$(*V4UzO zJ&ZX)m^A5JI2uzx4}&Ro0c97Hi4V$6V%ZL6#d~)vD3wk6jMIpAD2uq6osDL}R2)O5 z)C=OA?wPWm#GzkLEZLC-Rn0y#_WoiyQHU)Oj1-*KEc@xg*10q#>Sy66Em5{o&lM!8 zQVu>}ZZgD1?&w&uGxOI)6OPDP%y5_J#-HmC9X68RMua8bplV-=BYGTeVtDFpfwaD%3DOh$6d_zLMcpho zO4!w(NNs~kJ+%@ICdqe#R2Mb{k>Lg<5T!GZ zq4`O(rK4X$a#^`Z%fYpQH?6ynN3F!g|!ndil7^8nkWBbuo;U=_$kQ+_E*G+xX69H| z+#swrQ|OFu<#upYi5AGYM?yGJ$vGOO_>?FTEiLY-Kgs}Ix30KKz;QDZ7}V{N__H_% zQ16EkGi% zaox!vC!v}Z%bh}giQN1h|U|>hk8Kr)9#2ICb_*&uy zVT;ojhp0rKi6a$>?aMgsG!R9H(pr2Vw`!^EY(089z(I2rurpo|=1)xo<9h+8y+Pf^ z895>giwDX+Fu{j_SbLv|4@V%IjHsW1-IQdxL;3XphW~yydh5W@iCTSY73qu7&llv!;X}cyT^b~W?g(2kG zoEV%cx3fNF8m}ilE$pIBj!6?miM`Itf@o#IMb7pXy0Fa5VLO5;!5w0p3M}lg$ISUHngJJOUv|)6F&cg@ zhO*V2;&}LC11wW^#ZH$1TsmZvLm3w?#4^u$%*dMcE)o; ze6r{$4qrbNK*C8)qeXm?{;492QJ3?lMN*qndw%5#2t-D&KCtlm`1wkpbChau>#U>q zDp;p-D)v>hoPAZ4ONj*-&*w5LS}e{2?c(h{D)ld7NT%+pVB_X$5aPv(Nt`aX6T)N0 zoC%z8jR~f^EcX%PSt8Q5C5^Oigb9l}+B6tTT?=xOa!loS6@cD@dG8VPeq8htz?%2lksy(XR&MDiS* zcEutArx;5=UC}t-e_~Xx7=ILS8Z;cSl5i5RbDVl(1EnKYydH0sOAC7F?NKknX=rES zgiZdqs!Yvb_g-s@xi{d;<%a&xQ0`{k2uoge8nH#R-GtkTo1&aQA?%FkUA4cJ|DIXs z>x`9e=I>N6NS`RiiE)WzJCkirs^L-M80e==iS=*6i6lBDrB?-rY@ij*a~j@j>7TMo z9EbmjKhrv$J%>tVIqOz%F6lP>CN{)F4IZ}jMm zT5XdkWT2M?Ke-(@F>t6P(>|Nu0dyXD35KpPwh-eunUN>0zU$7+6W4I{An5U8?1>i_ zIH_RI`YtGUewxJCM)*YwAT4n$jqsZD-I=FkyH7Ljs1)g1UE3RH3R%Y7=u|QCc!)TH z;dH*Ju5YohGBme8HoeFH(I%1|)iJ^rVYL+h1Su1yK)vw?0lphGW{0T1H}ls>g?dZy ze;y36wGzERebs#qp{0m^@9y^loVuB4=jl<%=oOc?V`iZLAJMGq_i@<+_^^~U+Rz-& zdJrZpS(N-$O3#)lMmwE<#;+d2m#Z{JMbcBMSo>jE3pIISa9A00g1J-Vg~_Gf_B};z z$E;C})UNqR_BF;@N5sv-E;lV_`WD_vo{xGo`;3uHGnUiEAl6jr^LataOtv{O{;&|_ z6tP2}k57f2?y?p{Z2wrs%2sf4F##1`v6kM*5hFMnT;}nN&^i`Fz-6L2c>;X62n(+( zN86%%ZV=^sR`dP~$54%8T&t>Q#3@29H&r||K=bk36L3@wY=n2mg--&U3{FXq{bM35 z7nU6zvv`3p7hj50ZK7hvHBSMY>wUF++UsdBF`|P;r?ui=Kn}3BZ4d(uTZwSRoH}kw z99V`GBZoy%(9v;&tIIZxE-sTZuz-*ZuCZ7o9nQl0W7IQQ96iZgO`tNIv2+VGAyKol^bg;PY?}v=z-Z`hyq5AQxuH zJfkh(AT>>ojs9vZ8~Uo~e@HPccB(9u_rJjZPEix{wO9mnjOpHKUrZB6_=}ESbW98g zAb{%0ak88ivW8o3)4vEY)C<{Q7T4-U(jc^@)bcWJht4tvolFvR>?>I)v)M1`h{Yo8 zNr#?Vc1Go^4y2K5v1iClNmCu!D}7eTzS*xv|B!L-6>%2Pc&a)gdNcig;hj{4*ZsYf z*n5QBTFOcsG5if!XDzwv8rzGoHy(`|y3%*9SR}*v&>OD`IM&LZL75F%z@PaiFhz z?7J{dBdYeS(uAoKmHWmbIVl`&`En%E{65HqgDyp)nj6b}V9!l8@mo0^GWSgMqRd<| zRF+=ecm9jq7FRb)y*kUS{}9R%PteBKEMX#UWSi+DoTR_KzZ=8%0uGo{UrmMX6GOV5 zx>$D{z7(QhxSpXZIRE3!dllN>7uknA7?W7_ z9Pw55A@?}D5^DpogGh^|O7&z%;dHrKD32W$fro^)SdrR&*6qOF~IQKYUsf^*^N{XT#V$v2}%rBge zNN*EG4rc1f+~)-xFZ~!x)s>cpka% zn`Ba5O6m$n3nMrtLp^b%0ClUDM^fZWYauhMv(wKgxqj_#uC|gtE3u~YVi%2V!KKAow$hN{S$AeBGemmH#!9Rc8>D)CxD@lmJJ6iZR##aK z#`&j?#KwZoqBNi}P*WzFI2Pl@k=5~mkVmssT`PB5uZz5JOK*LED2F$m{gO)sow~#b zR$@Z&s`6*HjMafYSN2&Q+ZwHF7;;*oeXOTam`*Cc7j!Pr7}JQSV6pq%R3Ds@ca7tEu?lAv-P|hz zjBC`#@s-iAp8ukTU=n}*>}ze{F)PtZzCocgI{YC!`)o5f2r{tAY-#QK9`J?D)|NCfjRo2;r)jOoue2 zs?(e&o@lYxNz4C9h+tSqCPVWkzxBVwvcu@uT*!fHjn6~%A{=WIeRpgjjLaB4i*c-g zWzWTyV4!l6fLF~LXTR$ zE{44IYhJeJW&kSOHrs3yKcwzX_R`kf0zU0w(d*ZySt*)CI(F3E>5<|rF{G}$*vv`c zW?O;?SvxN=U%-7AHYPhz&P+PpA4OZ3vUa0s#jk(o&oCy=!+AqsFy9w12)LX$E$0yY z6A_m5Jk~S4TV?-O6?>R(Jg_wc<;Vd_auH{>-4>iq>o`eB zSNC(prX3)PJ|5VX7zRmWuNfC1CnDpHN^B}OIUX^b&4gV9Tq#EE7{{c4A&gv{C*X8f zj2gz{oQ8cYcB#5w2s*}&o>^&(t8E8KG>9%Xk(+4dB*USACD_uAogVgFw%s1e`!{U$ zv}$FiH!2wfj@3VwV3YLc;zJScnR=vuAR~T@#Zj*L!a%=tB6{Mw5im|s*TCHL#?{D* zEG&+cSo=*9L9)~%#ohwWVm4Yz5f<>% z38+;%J;fajm~U{aOMvq1B!S!S-A!#|6T~&A7{O_-y(w z{n&)2x+yVvEdM$!OyOve2a2$$nWl7Us*ch8*m7r(<*UO~nzmIV4;AlZG`1Au z;az~Km{OzUTlcYjO9p-09d`%1$?@iJF7_8j3Z>3m z%-GZJc6Qnik=yy;iXPLDf0v13FI!e9zv083Pdg8EF>>4@#$mHjfhlrf!k)&`|NXM> zHg%Le@u?UmtYU{@v0b&H>N`v7CP9Z)CvBTq!p^=-tg&*V3f@wa1%VZx*sT`mG-J}9 zf#S$AQN<*wFOD0R`CuSo@AgI!B!Z0uy*u-!ASHw8SxzT+7!Ps|=XhN>UQF6qqMOl0 z<=ekZNlU1gQ0@`soQ)c>MLZ%%pt|7P8?Otwj4-jG8!n|ddO{W~!!l9jVo{l*H8bz- zkAI0uv&UsaibMp}yq+s86LC69v=pLB&^2uHF>z7(&vA(98$k~Q_0j-ff~9SZ6(&JD zwdkH#> zoNOS_d7LTA@l4ip_VJ|%%cxG)TC|7rl{i<3QW0I`E$hI}F=jgAX~C=&c|oB%xyMv{ zlVK2w=0HJ*!kI%U4o?3jaon?vm8aQva{IX_P8C!?G9@O~<8+dEzg@;W`{Oim7Nmyb zq~g*FPZ#YtCaAxgFH9(VY!YiV;3UzFD$(vZPS8HbGpmg!gc_nGUuCAZNIe*g zTN?3W`ElgJj4U5_i=v$sc`p0q)GZ@UJ2k-*n~u-viH;Zamsl7});E`l;>$K#>y+uZ z9eFk9W1NqiA%=e^a+-9%-2S*xZ&H!{HD*A$*f>DrxNW=$%ZP!(Hw{~6!a0lF&*+WU z%D6j+&csoBXW=$;q@l&=)P10w5cFSl)A@ceZv2?{Dh)D8>5lhAQpae~Makr-@w zcIm!p%-$Em5jXPaQ9Lc|#MMZ!Ty)CJ?>+~TtSE3Dz&nn9M~CQ%E9K-nIi0=Mpspq zZUHk&V%shk#|v26nB-uao+W;~DiTpK^^EsZx2(!sorfoxx-L!KUm)m=r?cokryewc zCLeznB&J?AKVzo?z{!wOQ|v9QQC~IV&{hC3ZZ&%AI7)73ptDDB^t~a%GexZjll=oO zX|=&xW_hyxPeFo{A^upk9pJbc&SXi?d z%SQh2MyGe9NP?Cch)VzXSzxCYJ5MyEOGWTi`V@pG-ph1Ck^pVU7GfnKSHBt_rQx1# zOczD4= zTq2wXL0d|mA$w7rgGlBtRI^XZR6Bi)dhojs!B?6|n+hdEZ;7;sTRYg6ipsun_#A~N zJ69))a~e-3=%_49x(K(CJ(P|K9;ZfGa>Ls1rLfhjYKv_t(_ zYIi}4j6+0`Vb0WZ@v4A7V9SztMjr-2+KGfZvbT_n6@{+uSoUzR^P&w(TeP1g(qSYM zkvO?bAaM@oh97|si-_YCt$QbxnHenONY?RU+`mt>sR_&fbMclGRuOa*j&_!~wC=FC z!uWV31j1@xcqfLWVk<$l4o(%nO5NL4jESnF;GGU))$Q%uQifx9N?dl(Ha;3ZY4c>L zjpk#FRn|G|Q1%`RMts=P<7La^0B+h1nC^a4&}n7sg~=2m{6U--d)ilKIi!aNi2C&7 z`D3WDvAU8S?RZh7Lr7+#s`NjJb0kg7WR^PtNCGqqij4%2L^CftCIjgfA}mJC$`tjc zO;VlS7Vm(olcqrB_~;}!2Rw!GW%S90J#442O5IJ+!fM6#Fh^=%h)KoH{F_I)PJuwq zT^N`eDL2VVW|hYYxb^C^B2B|pVl7%uNOEzPu;b^w*f`--AfZ(Z9~sxloz|%qy5zX^ zGzc&ASxqEwTcS~{inIRd4h8iJbpaa)x}oq`Xi)>|U1iE9fOGM0QN-1$jf^uKKw`|c zKMoQkBf450{Q~%73o0H=-8-RbvDBIH7LM;8#YSQ*;Bly!f5pa_zSN*{0J z;&#B&d|=e4EiKL@^l9oy`{N8DWHi7b&eN&8gBo`3AK($q zv}y5`oK7w?aJ{PbjTl$xCZ--eg?7DxUVj0sMWDSWnlO)Li%WIg*zBQW9y+=>{6hY5 zMwqC^nSw67$^rW4xKV^d!+4=A=ak|JagLW7L@&WZ5nb_YIV3h8;=S^RO}yirj7lGi zBR3P8Vy}yE60ht|P`9p4A@f-++#eNmGFU2cZ1cJZmke8OSgZDO^O}n@e+?Wb7h>`y z5bB4dCAp7))5&0*nn8-2#afQ6;^xtFKP84rDRVr1><=f~*MAdf!AxdH@Bai z*==GSeFG}}4+~OTwAkU4_HvN3&|MhN@?y^`U@S27mUbE*bs}6N$4lLOe__YMMfskE z*KrA*!+r^v0I=QIb#f=Gt zI-~3rJO0dnuet`WSxb{1>d#`FyyU=cmuta9wD62LL{9(J)W>ewJHr0BNwy5x_Yh;* z)T8WHmI@g>S(Ix8st!G!r8ZgxJ=?FYhew(yI&gzvTOsF!vgf{F zoRt1gwXd&@Gs^!ru*b#r#J6Hd7Wpd0GB+Bi0mLc-PDEu74?^&0_3EP1>bLNE9Um5A z_)Ty~Lh72yQ2e`_A)L25%r`tGXi4!z1U>(cBAi*K^_{WBE#R~_VKXDS8oB46;G7R? ztUBd=#5kClMdp%mvk0PT)e%Qeye5bod~@{FTS0Ejeh1KF=4qlGNU2lrnJg?5O&Ukz zeGyK4#g4JASfN-v` zWU0Py`8#1qbyuQmvyG4??st;xtn>dnrN{FS!Tr$cpmoWn67gM_Z zMLOdFan3y-#dc2t$&H`1z--%JvQOvMkmI#o8+53O{j?n<+L<80B+?3EPfW{!kovU7Q!0-98B7epfOWKX)soPKJzt0 zdH_Aq{?{xh8FOJ-66c6=vWQt`m^8#ZB-Z(@Z>)(uUI02T%`y@Z z3hjOIdts*z)7lzQMlFJIQ8JP5U@T(@o-7if_i6k2_W~3WhF;=Axrtmghpb&D=-^13 z4tTB;V~KQYvy!>Uo`3WA*Z_=2dGzXWyzt)nD(!d8U8Po6_!&UK|IN5Y2J z0Q+Nah_PT2>mR;E*8g(m52Z_8PIyj?{gpbp*&9t3D9gQ)`NOtOD`U+Te}{2N|Bq?p zQtTt%velzNv40t}tCEj?VZ?;$AdjKG3UFlg$!1Lw*)6Zby6~&HsF3Iw&Uyom3dGUR zeR4Xx$)$0|^qv0!6-t-kEPzsWn@-2cZs29C0lv1&YK{|U$57v zV=oa7G_O~cxj=H4sI2B=d6n*K!?!XHOS;V5xvRgsIDe`ks)Tm*v7#KkY$;JlTfPH> zT zsF@N6lkTlPf_5aRiei^7=8AD7l_)KtAPaUEQrQs8eC&TY|8AF_@?*kILSOQBLMeU_ zMXX(1f9?GQH_2hK$5E;P^>s>J-1=`jr}I#ToDKo=w?%a9CjG11sqD8gUrh=v<2^!?wIeN~~H z+7sJ+fiH4z!(S%|rCrDXhrg(h(+h;`@jA{HK^c20 zsOk88F^-YJjJ-9wZyN1)qFv$hbZ|9`fkSPG(EyXsgHaIa;+n;wl&n%s_!^1=uN+_d z|EN0ez`U#K{{w>TDFXe0h>Q8Dx?tX`3>XB|}8^5ZNG9 zw#cpn!d?QhrwE9EfPid)-|KzfpL`yE$v^kJpZmGv+h~htKiQ!7$HQMjyJ$&Dgo2u6U+)X{*kYF_UC7VA0&z)D{E@W&Yk+J{L{}w! zHeZ-Oqda;C`EemmAECIqD#@c)D%e#o;A`3*z>ipNuhFu%=z&7|*S&(^tBxQj`exLG z^rHMTqFf@$4ba$G9w3{EkQ5|+hE8Ewi8@d^#DTOBbD&XH_bacE$7p8+GjMJe;uzR` z!*kIvkVUT$pOmBP`HN(VAoJqYXfnU07}Mv01s-l$2;W+gY5T8am>p37ggLZ72*;BE|8>e zJWr0r@hV`N7?M`sW1_NNm|90BYOAdZn9XdhT(h!W`=kspBD!TKc*AOWELqmc@ydIG zvR=XFmw{J`F{^`RZNMC-ZP^+i)WcKpAPTF)OlbXtrf zX=A;WrN#2uII;}&btJ8$(eF zgn)&S3($WPO=HN6F&-4*4ul&uu1DoB<)lsg1qoK6S{b)}&tK?GrC78Xge%Srj=1A} z0h($S%E{)In?tz@dZ%?*yX38f`IAP?TJe7Z%$Nl%?{R85!pt;tJevXXi;H$wYdkN& zth1hBd~XX7&jhvA^|56}>S-j7z&nVflIddp_HtB=bK*IXE)L~qcG!H&JaJS>wc;OAZ;bc!zcakav3_hxM>Xy@g(^4Do$IZ%XqZ=c?=k)5VG-Xh^< zR5c3)u4;I5Ntj#1#}K9V3xQ;&J^8y^6NcB`6MZO(U#e>wqHbINLYr+pDgGeJ?5fb~ z6YX)E5Gt)rO)`SISAfgS?m9_%*4-Y+vuTz6OC_7%%>+BK9yUmDq*n;V!1ETZ_q+1% z{m94|;tvXO+KsZHMGkQO7$8^IngB}*vGWg{3<~D#eRmb*7$``{%DX*&zav;SVtnnt z;!ggCdd?e;l|;D{G$O5BPh`exMkP)My3-BweZt-1)Sro9?#=*aw5<&^7zcI%bOAKw zW2AyQo3N~Pxl9mLF&4;qj@xZ)>?_h!4<%;rk=3=W;jPZiZYaV35Cswk! zDn*xY7Xei&4mN2;Ub1VB%IsbzaSApv5!402AqZ3Ye+8Jg)44O4xErLi)&7U}zCeh3Hci_iVTw-HmkMT>Ykw8T?+%d_ypQ+V z7!(ZCeI|p_9CF%>vtm&$UmGqPKPZz;uj)L&+|CfV!8V zwpx-L1n-bwZda_3in02h(C(heJwsBq{GmYSjafm=5otz9*oKys04Gw9-{zRP7c{d& zQ$s~utFSrJ)S<;Ot{0L`HIkmB_Sj^fJX#AfC^oCV65>q9?Yd)ZTmhN&FLo6uYw8Sf zk5D4%IK`p=WszjYC;&baY6f0Ca6$X0p zT8ofI2u*a1|;8dnaf$5TIc`EJ6 zIwDgN{oZu&tT>D#`uwS7L~|wkhEIezaVB~;A$!fpfvPkutTPk93`Nv&P?3)w5af^0 zhoV0;)B&ixNa~_p1RUvN=iqUZFpQ1q*m4D zS5sl!AjCa6IpCEn_C;aJRC-;z)kBPSp0P1sq!Z;zlp)t0j|!qFea*?zpeyu>s!=^mx8x5ivG-+y3&0AmRxzq_eHrNT#D%JrM~?@+-itf8C2)~B#(k| z(n78jHryY?rJE|9)SX1TC-*YON1ePxt8g~XO%3rE1)YyP&LCRd;{cFsY2;eeo0UJ8 zL789sl=bufBPNxvs+bjP9Z1-$Mb#Sx>fty^kmrgXYpg=H4&~I%ZJvESi7|hX*r?q?<9!Ur90wox=s|8 zn9X@_c9HO?eF^(7HYMik8T!G@PHH&_3VD~R4baWlD?RP$qTvs&Bh%R?ZV^CnaJrJ@ zIOOmjL9T0Q2zSPV@nty`R3`90)lLlRUP4UB9^mI0$@JNTjr%PkU69<6Ee`GQC&bZA zdX`AHlMFzMu~dKyIz=~N50)b!vSkH)uN+~K3`SP&vow^l^bGnQO=m7 z7i0N^GQd6_CF{opxO$CTA;+tt-Fu_}RoR`BjR=?yER#(LK2Pf5SDGIWkMX|3E*Up0 z>7BY`dV!qw{9>UaP8Ch`T@a96Q2vc)Z+W|3G_|1IGlpZ0zC1CeW>mRxjwp!6w?{gy zDQ}k}HL>H<`tXq;=g6LKAkH`#I;#!3>9~*{&1CGZvAHOZG&VkZl~~)Qg!&U*OL3R*_kQ_YL6)0b@u(?y0rB+{i{{r;@*+et3|I|B7T7GL3Y^ zDMJu$CA3(P%RDN;DN6rFu3+|92&7hKB5jT5M7dO`o9P~{H8xrV=H6W+X<(A2%oIpI z?QB}&RS`~B+B$NGzV2cmnxqlG>bENFVsxi^z9E2*+2EAoa}nQ|`4Le><8b~3?%tT+ zuUO(QxOUhr{w0DUcXAb2jJHHNb?St}uq_S&a9btQ_d(I-SySJzo)GCL=o7Mn_#1 ztBpt24p%svZu-&K=P98wvH%lHI56g?PN}Pa+R)ELWZ7B>R*J`jxnIwk9^XGAk1>@K zYBDfuIugv;)HA73qIp6nYWGk}9HFqkpSWABd=wOm+T8lsM?o{AWoV)FlVYJD=U;(W z)U<6qL72JR!h0TZVmS`_ljtFk-?m2+2X^2Vt|JZ=;SZ3D@N(NR{&7%Fiidt~0tM{B z_|35p=KbiS?Cg0T6iVR-rF5>aHHeVh^~fDZS`tdJ?{SV*QB@P|3Y!7ii*iaHdpwBy zN+afsrTF#<0F)2~F5dGN;Sx?^QjDL9&W6uH5_}#fz^uUsanf7(r6A|g)!8cF-glQH zc|XYPQHExB591fQC2CL1)8;W@M%kt(0WkLa7$L|iuMz3ID_Xgr!#`!7QF>RT6or#P zXhBZ57-rlq%4JLI3-j0$Sp+n{SYN&psoF}s`s2vd8|Ab3GxPf!!a?ijC% zbj^|r2F{n|2vd2*bk3C5NX0!fH>|h$=Litl$eGwS*DNokarSQj%$2H9)y23~pg)>TvNh*HQt+L| z?HaWT`=@#~FZ0ib%z7JIYdVuYCWtC<22UFBUzMXs5XhxsY<>Yz+@)spCc*s)WrX7! z=jd4HLgIL?u`dJ|zNIj8hpD&}f6QpVep8BPMN^uz6_JRlJzmU`u@65>wBHtH=Cki< zgRe*_SgTgNC{JFda&f$9R}OO;*1&-{r3~Q+03{SUryyDY?@U8+uZU6p>Ur8p7Xx?> z(+f=L9LzPg&`g_F8Wn7eiYP-f5bxG6&eJ|6v6B%*23Tmf#-}1imjEvF`YFMlmw?eG zNELZsI<69FE@9k^KD=lT$86)2L?}})9sLTW>)vP<>5{Xgad&p10OvGF4`v{v`lDWV&_AcQK)*LZaP z7Q$U}5rX6n7=!hgKzEtxQY%K$>2eSkcwSN(T_C{qULap#YhD3uR&p_#?rFCXdV!|6tNcsq9xq@f=^|>Pwf|T|zxFW^xD~66uOp zv1Ma3#LLyfoLN0H0(%|-E;5gv_rwangLZx`=(_PI(%}Wm;Ae^#3o`Ft9}t;U=5qo;dDW2uB&YrU!-&^>WE@w$QAHpo!es z!YH^_n7e?)jPyR3HTF8dEK^>FkOj^yf?Q{WA*}xKkPyw#SOcQma(zzmEXtrL3WGqc z5cg*UTclD<`+c5Ha+8FhK!{7wlY6>2wHyt@V;y&15aJePGtUZf+YJD2s~TMm-7Z=T zK>H>R`vU|MDZ0~fp~B99QOOf3nyvmV(8-NUx6JPea0U0p_MiUw#El^CW^)-|Gj7Vs z2C_R)q!VcD=b|bW3UGOt-;uv%H04Rpa4vTy3J*im?U>Gd<&qfQaf^~WNXI?AszIb(rI2CxJ#J( zAMc!Jx@J6RaVs#zWU# z(oXSfp_H3b*~D?N-JeV)sY~o8YILw;LdPzNx?L&M1;VPgHSQPf#j232sxU;B|1-GR ziSUvUA8!iD293cTWjb7mm^QAVaE*EezZ2ysSNT^mVIU)~-S?)+zB3lV4D z1?>`N?jz%XyA3t@IvLHAI8%@r+fa&|L^+MA%(YQ}543xcmT5o3#coQl3okXB0l!@I zict8$Bi7n+Pq-J-`TOYG8K(#uEjoLJ-XSm6e-du))Fk&AE8hp;CV*Wi{-{Ow66gfc z*+?onCkisZSk)zHx~Lqf5)CvdlsqYjhTud#5G&jd!NF|um>931GeJX=J-iGg&JyUV zp`)4AI1YQjU)5E|Ru2+%S#>ARp-ccA^e(Ng{4-9m*>j&OARCC89w6Ijw042bX=L2} zvE4(^8MjcKkO-+L$T{havSJ+eFaW~{mpbu97Vf|b{0EQZ-?Gq4Juyxe6wmE_LE70WO?ITauA%{W#c0tF6aJ68C6ti_ICe!?~A! za`YZHM`)R}#N@w$QQf{~DP+VYBF#BT8;9c00{l$_!lC5QVdp2n=oai=O3^OD%xLW6 z{P5T8M;(P@npxcxREP6D&WVr=(( zo&*Xs@wP~p3#;+Iu6R#?JKRF88!W3|3Cc!0r%4?-buF^;3&1q)xXELqOJOGiYq+Pp zUx16o;ZUZ??+I~1=Aw}r=e?N6(4muCjLw%poHZ9Cn8&onUM~ZU4){K{vvSPvBY|X$ zzqP1Q*cr0}!??6ofHS5es?Q(!cMv!76f}#M6k+7i!u*-uYQj&C07^Hy2Ypwxld0`a zvgnV5nD_O_6(_s`=~QLbgmwBroGi?xMf)nVfZP5bfN*bQsg`_Mi>Bkj9fHk{dcE24 zsAyLnUDJhWw^ELBkVLGLUd=y2Owtwme1k3`-1&oOSITa=?=Axv6|;mrEx@&^NY4Vt zrLTd&%SFtAlmAIrqs6ZRHHma`O|p1FIoQ85_$q0J)3e{}ItSb7|E+Ph09PT2=jkKY z3Nn}KikOB=MehS)46x+gvG)xVbv3h^;$aa^K>IoDUgCg%0lCC-4$boyXP1!*e0gI# zc99UzJI=BPLKO2%tF&J zKaNn)%%LXQ>Gj5$!d%~KW&l|DmJrybyGLGA&wtjl5;V#V@8^lsqxP$PdpseGL|k>P zk&{0FaowcEg@Tj-=gryMn#k7*bKW3#9W@Y72r<)Ua)gf8MSBcX@+Q>O4J4(J4sNCOWG-lH(-qZA^wwyi&gr0RIu9Eh1B z@=XGF4suS$8g{a9XI#;{h?8rQAbtYqg3r(qul`eLGNEB(NMZkmxUwxy6iqJ8`tTYN zG%sv~xu1yg%x8@f-X8*3OhD8wJs^1N{@_a(HOl(P(-k9L3Z zD==3;+X-3D3U2>8|Fl~^RzDFo7zGD;=bSAt#I2d~_Z(4WqV#QIS zT@U0{?6TwW6+oyavgzjdOoTaw{T(M>L>e*iTY&HwPd7BpYTt(P=#{}79F`%%M4`@C z{!n5M(N0KKPVD8${NQq=TI9rWX*tqRFK&3!nQPL@d`Wp0q23aWA}koH8@Y*TtEHn0 z2`7WLSHEQQZ%YRkxu~aSUk-F4Z#ZHVCxi9qV9Z_>!kt*o68fS;fQwh1J%)|?y+G=T z>M$AnTa?>#CYmGfiKK#hm=VEo9J(5`yEkI5q;Vsk*|Fv7;MvBi^&pvsi#@BZ;jfv6 z8qvFn?M2i1*ulqv3cDuF=(%ylg$6DVJm}Oiak&ULnNBvG*{asezhJk*u^PuAWoTA$ z5FbU0e=S4RauUZX@J~VJoeYlO6-n*|rYRaPVjXB0t*6v@%vF>*wy1NkLk|@( zjecugP!|UiH4b0uK${ArINWM*3IO;-fX9&hI5i7SzeEMit}GGL$annV_E)LM{Oj!Jo&vi7_1O)+=g&1SkoxS+^3=v*8hWfq7nyg{ zO0e+kECPEvB#H4-IX(~S#&(+%A8w4_b!>d7unXFy@wN9B(C(zQy4d%;e3Z~^Rc5=y zA%?4HVXjgQ$DB2`gmjf!3T;df8wj8$ba3ku^F=tDdDEJv#_aFsv5|)&a(P97>md0V z>*|_Y

HQ(g?2>;w;e2=PiM21&~GZId`WBH_Q|kK6HZP)UER*ko9XTxJ-yg+gt=j zI@9AistoG6X|${0@%U}>Z9H-^Zm@x+0 z>w)yHlU|gzJ8i<kx_mXqI%#$NrJLA1R~U{dM;Z zr&HXMLMc(HMVdq%JJ%T1FR(n3K?yxQC~1rXxEA07Ntn} zV8F^=X?#r5Pt?jUvkr=KlFh6=h)upV3B)DlSz*r9w8mP(q^>tVj?RMhQ(~EdH2EUL zQFC@DOdr#AY+NN`w7WB^pDWXKY;t1x$^4%6>U4i7iNhKKJ)GH;Fhl#6qFS(vV~HZ8 z`Xpgy7Y5&~CzlCuZy7&s$9PX91?%kUi|IckJUSkAa+MeL*8~y)OOJRv3lE`Cwc;M& zE;y@TAtsBaR?Vnu#7{+-RdogK#arW_<>=G;78US≦`29}wc4xEBb_W}V~$=~7Wn zo)u{@o)JwkX3d@#|IA`?=tZTXcJDl;Mr+F*FNC5X{X&0u9}{V@ogu<~Wg1ua(w9Wm z*eulS)GNfvjU-tnIO~s{_%hSfeWjvONCxuQ-Q!O`f^s#H+(^tH6(F?!jEZtG+6Q8>@dlSqO*PD=R373iYX%ZtwAqTNy`0iRA zAhKCB_tP=yKjmocOlN@Tm1ms5A1Xqi6){SgSqF%K5 zSkZ>-ZVs$-W&=^9k!8%+ai}O~P*qtSr;Bt33`aH^v1fgbu)*V@+c;c^)5P{7z0q>K zAafEkd|0fqoFUNFW*?!Oq+)EfZyu3J8#{FwvF;+!g>I7B`F5xr<20uSaZ%g48%60mn%?;6ZI50T z%S7b65%~sA!*E^`Fnr^&0nRpm6=f!LX1B0=%>l~X()48~LuycnTlE8z!&u(dIA|VW zGo_jpI%!jUD9rV*l_2*c(eADE4k;b$EI&GnSKSoPDVQ}kx(z%?7tafF z%9Yv^OCq!Oet9%HUaNe&tzU-Z1V#C8Ajp}{n--JG5vFtwCg%-P1i57!D(YfEnQX$} z%ug~BeJ#D>bF4_0gXLG!hJ|sSFlvYgpPPR|(7h4^l-|}f@J0mE7>n5V+^_IxJrL_j zmUPYj{=H)6%$R%tVOJeBq$Ef=U69j6pBgsH#^THa{Z0SCP&}-#BP4&BY1g<;Ay0_2 zZ?_DRHy7kiI&w{~US`^*9I3ciy^aZ2kb6c;VQ5MGpcxwWr6=8DwH7GPM!0FG>>CQ; zXRW#DponrwkkjKrBdL-Kq`%0oKs+EK>u4iO(FkM3F6Q(gb^&o)8#Mf{o)sIlJ27wML^TR}9Grn4EgGl|lB`%v3rI-zw1AAf}a0@z|~$!CNFVP;Ai+;vP4* zepXBq={_;LtGmFVybvD#=$J5f9epsA8P!<8GU>OXU01DKnAFk+ZwqviXj7@< ze-j#6swc~eu}+CtE@gGvimWfdY2(`-rE}4)DzX;Nl<>Ay7&Sq&czm2M!ufFbqb*nb zRuHAN0?aytc?5Y{mcU?g8N&7&N#a19I+Vwfot6BjK4a202wkP+33{k313i+YiK&73!Ra;f`c!46*ZG`H1`*S}tAU9w5l8sHP*%T`Z$5@oT|m1a4F~GQ>JZf@Do5iPHI^ zDf-CLG+*z36r`JM;)ECwWp=ZbvZ~04Fej<~FKDuq-p}UOM9wAsMT2biKyUA!)@vz1qA$MAnMD zX07Yz1i3zq^J4X5iI9zhP70sqw`ze}q#NQ+1AMI{t6Qp&7!1N+9KKoJu;sB<{&b;qZo%&18`yEXMZNVkgKtwZLMIuBmQ^n*FpOrw_KWC0Xn3bz#7 zoKH9#F&qlyWirMH%Nh-=6s-ctsfQJTK6|4`Si|HY$ACz>IdDHCklG-RMssIL^j!e$ z+TfQ!TFpa3oK@NfNtgG0VHv-MQ1n>)!u%_$pabYKAudc^Qoi5$A^-~0gkKbz|9Fu! zi42(IN)e8XO#!pr?=J=*eeFi1dOGtG$6h3p|G$WE=FJ5r+o4OL;mb^{N*89~Htxu- zR5+8`%G_0(IIU@GUIy+8HQ=ByozC_VrU_jyAFTFU2zSl;jy{Cwi(|GR*n*Glq?(F` zLWa=D7T^;t*T)5vA{*GAWCDo`lAuj#&!jdI%lxWvoUXP1$j(}W7nEWdw zP3*^oyJuJ6C`g*ZI1gS5=;mS_l>GlRAx^KNmCb(4{v7~S{^sy-Cfx8_xe|1*@xKedmY)Ycbq8Pg;ife@O z85iVmf;nTE09tPB_?_b|h0Sy(CIn;nTfW-yVczs1Su{5YP%EJn6DNzHNcLrLrYLg< z@h>-uy05+O8=%$|loWg76@lt5Y>VE%j<7SWN*o=IuLYSy=~+Ojo9}Rao_OvHKd$~g zpzAzm5QEIuL^~}+FjUdR|qp~|twC8UCrsO@HtxVouiEF*Ij;U&so0)JVgrW;+z4BZCd-bpVUU9CSB$`|yuzW#yO>UK!V)Hbrr9x(d_WIe18 z32l-W-6W87OHxRDMTAorH+jdXT9%XJciky|_(w?RieVz3%dt*02zALh`W3N7`K1u& zBlk((O57>XTuX`>oQ7k;pYl&xA9V}!x)4f(gzHbS;$QNgIY&v(xQ7szZ?4`4zQeHx zaBLOph%iTS-iB9i(QXnp`@KW4K!B$u#xHJiM(@joyJYi5tRzf^Zhu^DEqL3v+psRl~2#aU_RK zg7U?8=W#G$;Qmsy>r_`5X4?9@0CGXGp+A0j4+ILrD0Uv z^y1eB!d!(4E*KaxGlZDKHFFx`mhwk+s7T4ODE7aPC@y%Fgtyz??@#p14^!e&k?tkF z1md=)7*7eM_tQ6b+bQ19DBPmZ4GItNn<^nar0YdE;*{C(FHz1*T`vA7+Qpm7fpMJ& z{Us;DLUf5Rb6Bb9OurN`+TVLQ;cDpofI<-OHlg8q--b|1L7C zD$4ZDj2p83{*#V`o;xc+REVT1ZGHF;IYERIOfsqAr=VSPv;ml2;xr*n6JhG;D;Bp4 z^>_8SsE#9_&gmZM^qwl(wQogKkL$%aU67en!QCm3&lThM!rVb5t7hxK61~PVfG)#y z?he`9?fWc<8D8qp2It$)+NDf7tJvDdEyB#@N+y;+izGQ5hQ%ui!_0!V_@}&N0y$+@d?LcNKsTa| zUGNbv`)7Gyj1~S4;WVp|22Xn>kBuaeg*6@)LIk}Tj!@|tk!0F}&gB~aAe=E@OWpTH z!ybtV)2(BTfKl$Fr_9d&pfZ%++GL)1T@Wnuh1^E3nxYj(E{!HpF5ZN(JH=BX$x$LF zjp6fNgLVcO9i`d%DM7ApBQq|2VBOb2+_*X^EK0-TfHKIP7eWLbJ}(#O_HAM{FBxzR zlMdMW|0IHGZ7*Y6(OHK`cNh}{c$?IkdEzbL4rUL*RD}$UNFEASCAeNwZg`LQpUTt5r&Q`n$ z=*f$I?m;9uN2hQX+Y)Nlbl7{TV0smf1H0ntw;)C*l0Kvl#kft7^B7fWXpJM@2F!TY z$AhM~iZ+kva16G!{tZB7kuP9*x1A{eOy}k2@!8Mmf$!M&o&0B%-k3)37vMbQ78hAE ztKYis0=m)B(A3q^Swfs_MRz|gV16gS#hN3be4;&{WP%O}qW-E_<|?-wB}^=OWBd2= zq>?5~R13(aB4!ID7h=XkLY;x^0phavp=peY#-?~nVV7p1PFV3__T!zm?-&a{;77R3 zMRIGrA!3w!=;x045ZYYleuMiPylV&|J_W=tjfkuppjVih?ZGSp;}9hS5oFEFu0#0D zZjIai3*aGD%hdgpNK$KKNslkfzx5(WtNV!Gs6uCPNgS*&t%ksubLAZ$n`+0MYH$4R z6DU^?-w-TZw+nE8!cl`1%B4~fn|%uG%JE8gYfKmI!Y3~^+Wr4nn46MEY9&b;`3!{J z%&gjYSV8|-sbdsAJU<6Vo4O*6Vywc~_2O<5k`1xx^p8d)kg!0(E3`I^zn z*x)P3(F~BrB4QNcG`ot!=u=`JyWZDe;yc6c0EOKFuo9P_g6I`AS}{G%5=RLkmmb-? z_Qsi4J zO=z=P4=Sjn^V<-nX;e9ggb-n9GPpqG@eNik}Rb zZV|yO%c{t7Nm3e`w+S>`Iq@Yg?^~`1^6g$QAh7`wk=( z;fR5%5thWF%}|s~Ulipgl5(<~-)il=a6^AY-z=d!9nXuVPPm4M6TVB>CBoHZnuhn> z2*}LQQJyzFWLZ34s4Fikv0_|X2Bhz}b37)1noLF28f5?);vFICU7TEF#f|+lrKo2v zqg9%OWaA*+t9u+Q5bT_|ey05Gn*dPWx@k>uro#SlMzMF87w*dd$~>rFZL}#d+*79L z7;=wjGg$Yj%uzr79*FDAFiZDcmkXk#cVB|s<7N>sm6BF0iRCtf8tr+SUPZf?v`Xzo zs=((9yCd3^Nyj=Er(Zw9e6JlNJ;KG>b`Sh%+?TQ5?2DlF;{fPVm({J z6nMEH=al>a$ISxV&v4jNSnSrX*#?xx=B6!H+mkoz_l;3>GYap&scv+5n0L8 zz+HV@4h`baPznQ23ZmXfr*Tyz!c+9*vD?H0<4q!Smg&z^B3(Gt)pW1D!-PEk3=Ug^ zA{~Jfnao)Ky(@^>mgdD{Cqk1Ovf07dXSe+87ACH`GKurpD2eo^m3p)O1<`IqREhC~ zQH*0I(yF0X}qQ{PiJw&-QoTl~S%lksey>BUy0pcPzO`X;lUx?PMHNXNCtL~6;6Xx${>Q--=Xm_C+of*}JNkCGu7eBa|N!_kB76>6L9zvxXog*q= zlr#nD)HQx@P$xfb^6tCEiK5+)+VHN`9lP(7egecG-E6&#r0S6yW6yiqbQPzI9w2IG;%Qv_>Bm+5p#W! zS8gzlxuYuoh>M$oE^0Qb269cfv7>A4Ab?bwrS&^m1gWrIGqX!AkjZJ9KxbW#ZGjeu zMnSIVRH^e{Cfcd)JEZ00{u%%ek1Cd-2c|%}bTr{Y7Jnv+Hw3zAWJ1mf^08VFnx-}F zVE!x0AK^!%o7Q4v?KTyVob|Z9lm(}Y_9Q1gS8XaZ2`v`v0&%6y>ishTuztquxJJRO z=c09rc=355u1-aflkbYtpxr6bEGR3Hq1a8Rv$8TMFQ(I1_7?1q@fbfG)uP>9r(m?5 zdf6|ggSj}mO2E=k0L5vV8Glxgfiezf4xeY?8dL_qHN$u#tLc3T`}<1n!Y}}v3E)h* z%;=3hMY~R=mXRxH$|@nWs}y)ry?Tev}qXGJ)feopps=lPr zBQy@8-HFurIzd@|@WS|L-%`;`FmBKaiaUV|Iq1&yFinv-?9K`IG?8$7Gdk+}&X>*FjU zY~k>Hf385c6IwR|u`Hvps$iNd#eQ>~0(wG)I8OvzY3Urfp*OnbLOH);H!Bq*ah>@< z77``b!K1fIL&CF34EqeYqp9sep>Dj2WOxt@09-=(Xmw?$2^^hWk^f4+VTmwj-N2&& z-8Ag;Wr(%;uA?xFsNXM7efNY8F1i*fbcL!ongwvbBuH!EvouR#e$D=}= zL-Nrq1ED<*0HehH9sTjN!v39&99uVb6f-}_4*j-Uvn@To1UERaI@un~j3XH2e zW7}3}KER*JK+Frx&5jnqn^+?;WvC z8;Hvw!(K7w90cTl%aFNF2;uUb4tm z{^y9Mekk{`)W<_2&2AEE>x}J60Imb~8VIdvg1-uLs+HVm;tTrAa;$U~QTxvuBo;O8 zMS*!RRvyaJ;n`v5Jzf}j_e&{gAf|{US?L0$cb#I)LL&^)#+D8#0iRMvIGD=|^UFe9 zKrPA1bHoS2%ss@g-BGm&+T3G@wnWm38G_(^9g}ap!f+lXqt0l`eyFaPAI~V9H9jv| zt+p7_$u{?~S3FNNKVt%qP&PIn&SQ_8v}4Q>X)e(%QD~2wg?N&IxygZ(UI%(yut!fi zaI%<|;>~hmh?1jo32`XyxUm!CT!r06JS)LbW2Hj?Ts_1b(glxcg3Og>bbmS8aa%9U z1#0YLCLN3mgkHF$K^G|Dw%_Enh8y;^9^F&s!&9#g?2IE7aX3q@nDW)9; z2^*Or3$ajy%T&>W-*q~~*@9qSdK{TVe<#|X&&Jw*mBS&aV$I_In53|ak4Tc2$eH)w z5JvI?ooxSWehOh8v<=B-?Wbh`FHT(EoFRbdTk;p@lz&AjiE(^~0Ovh^{EqRYNQ#E) zdvY=QjA*k;zS>z(-WB5dR3W%l^%wjM$Ynqkff2m*kpN~VyI598)w)d}Ni(r>HsBUM zqv_L?@{hRvQT*<#7Ir54^ZqizaZAr5oFK$G)uw0J9}@tt#!lWjx{fC7*}Z-^#{N8y zs%fGW*9gcvZD%YK?WP>{Q$*kW{8&)9C7qi1#qkg>68kjm;lC6@eghaPuYUqzC&)!% zS3D-#g=2MOU$xdR0J2ptxdp^(ur<1cl0-8d_M#IBYZk4I+Z2Sax-LYkP3N91e+iUj zKh&y)qFRWv@Wtq10!VD+qTcv}!tN{Z(>?Q1VKi^Dizvq4Cz*uj)tvw5i{$G{9^e{O znA)HLmFBjWMZ*Yo>p05#6-3sFd*K9rO|B3`(Ps_e_}Wp^hWtShq*+x_9S4sRJ-THg*dCmOmXc7K@A|R(32bK}{*J-iM!^PRFJZ1qQNGmYi}rwLf6E(+YRUb}V4=NT zYL?8$M+-E67a|bRrsU%IH*eRSmnT9RLoxk)XmbL;ENYF`GJ=Rh!qmSAF((F! za=OJBeY+eBs~3FE#3~o$DK|=gw@>tF$Kx&tCOj`Fn-~y>;?+{yvz0FdrFe1?xi||C z40p%&7lFG~8@U8+jRP+RFt-uSaKWd$>to7blll{qPE1AxGICfQyJO2sAe<{3yfkp= z@Ouf(8YGEFOR?u=KxEi5%>GsTxkChuRspkqYI&j{3R`Gyjt{c<%qr8-|767cwj5t? zY?4j0pu88!ehjyW!Wb|;dK)rUxE#v0lGi`m>HbV0+0;{ta};LA8MjNUbOk|Li%yk2 z#kN3*C;w_5RTGu1Y7=&hWg=liGtQY~#ifqPV#z_QRg{@q)zQi3>kk54P(0pB?9NNC zzXNmUz+tzfOKV&SLRqSZVoe28S$N_h^@)!&1b+DV9F14yiP~71M4uJrG1Q=uvBTAl zqsxK8&h}1ZrE!EX*JIo+yT(Hzy>Fsh({Fr!e_ybR$K0dIaD!{Uf#7W$4}8~xxCyoJ z<4Yo56=a6%P=*+fcG~Wgm{4M3FixtfWx!h-gC8aZl|{qNq-sEmZC)jb=hg$gg8!Nch>46M$GYN0ZtyXadcDfgm&`n z+BtJgiyURbNd6h`jB#cEq=0-WXu&~&R;mw#x}N@+{ZY1!NJ zZXg$`QBQHiy7vH>5lQbUS%zOJkUF(+BO4Ek@FxwOvT9iOUH}-tb)%##r)4BVUOf`W~_ zjaCbRp^|>+Dq=vi%f*R79^*>!D`8~IEyfJs<`RdweHyVF+jLp$NvAU#~opoo(@9mD5)~+ z_&31O-Z?NZ9uk=bd|hnt1Yyq+vQERsFG-Et1-rP}IX_Mi zL?VmWTfmg3MVcw-OBFf2y(q+uQ%KGZiS-|0X4DkjPp|nTG+ED?RUMZq?9Ru?MhMhu zC;ndvcixzcrfd4Gp8{fqtC=0UWWk;Gjy1fe>v#ba5u>ZL+kYAoCZu<*W0ok# z#y*6Faeo1H92UjasC+j6)i*?~0$iI+C6Kp!gt>(>AII$KE-pvxWfn=kOWrT1#z>=s ztoQEa2ITcZsY%@yfH+qf;~7)^J1Rba+lJgu=3?*BZ9IXRDU8p}kxeC+up zuq(vJFXVA5m~CSEi_q>Hn4HrM(=7XuP|DJQ4#sB+lTTl>Zdo-4AMlb9W)r3`%`eBk z4C0)tq||etXxJ*RHN|*96t$5rA;ich{~a=$@>mcg<6Z9+Ao#qb*um^1f{#fXhxz{y zrH?O7o7i>#k$+sx#SogI@f|_LM>7JSDI16ytx$pU7iv?(5wC)}fXp?>Ek%=IZxI!g zCK2$7yUvc@^tJ$xs0brbv@O{EA1lfoqr@A-#kl@8|2#Fx)$bPN=Gt}aHu0WFXLe9= z5qrYdp3sRl6v#rW)9uxxH)JC6HM0j^L_=RxQJ$7e!FM0YV5V!xOF z&La(4gAxJ@Q6?CmmP{JS|D1zYpei+akT8u}_Kx20ie*%YmObk~LPZdCD@DbD!D?xh%2fH)A1~Dgf{z~qpw_ff?qZMQGD{bJ| zOOT5qXt=O_wJO zlcWR_i$u`%xj2#XQ|k(l?zOzj!2?1ZZje#94|{egBx%+<_t z+ph@d3h3^N^7IIy`B-TrO4M27k#eLZo!wFxdrXLHQR1_%_}fYV&a9R_a(ph@5vHZ- zZ}o5I5%AR56J4Sm0Xr!+9v2CKap|q8xK)%3%a*RK$VE_ZJTDM-U>i}0AFK>PRa+>| z1clw@xRObYb)Xu#<(TWfp2Y_pM^ecY2qIasm~tsf)&ySp-xN`1p;CLaxNI zp<>lM@yuWM!$RC&bkmZ=N8?w^6J6NHyO0dVxoZH?h6Uzn^iRLHCL~;2L<8b9BeoYk z+NSNg#Gr{533S=GlVaeP;%q^#91dNS2A+17dXlaUfj|x zU*$SrG}b`plGr~BkDah{^egOQPh~g_igbC>Jc@90SdddwFGhsNWAnn?cV>-TiZ;&4 z0!aQKU9@CBwiml3eeI3&*X2jAOf(J8HdvVRm|%Fw(Wu#*rq$L1v1f98Hc(%DIu0(; zVf6_8Li&A63h|i2=6wm7J;P(}cXC7{2zf-IjBwqj4cRe5VC#^iO&5xA`MIQF{(D#e zY^dn&k#gbNA~oL*#l`Cr^vpNAC%y>(P&XxJZ9tGBb;Hpng`J)ZZ%eU2!02&8n$XFP zYk#3`P23{%#;u~w{J9k3TN^@VoeF<7%upgiw^9B9R)&MIo@gr6&9;-Xbw;J7BTg3O5=`alii%t*#H}ws-syRi zy*38RxQFIMXFMr@BvG+rC;#Fm`6u%8$aWS#V4LQjlsXqJaZ3SICdBo=hJOSiY%jL*X zo993F_6^ZfP7~ndxR6q6%pS4)7N8o0a>B5O2p53sQIx$Q-6#wQEU&QzE<-jp=!`Z& zc`zj&`#{6lIM^;1^T0T?9F1CXdhYA2F2$Igb3S$(}=U@#A^!j z3D+XHFDu5Z?Vw#zsgiJJkK?un%30FF;hFKlnNP6$Fk2ID{$r~#Al~l|m8TUZrb+t2 zOu^cWMP`lwGX*YVWyU&nmN0T2owxaJM3A#!=ZwTPz7%54&0@oI_YQf4TKqkr4Dquc zfS3i%LzMDO(f**KXE-+85z^Juv&51yZ6$~<$ZB7V9Yr|DO4QGo^O}XY>=?!~qRuY^ z=-=R#_qm;%K&5;!4egvKAXO#0&YeP>03K;z)87QZ{RK6#`7Zfy2qoxLr)Gc*SCUrY z8e@quI&4(lq&0qkP$q)pz&1`0#kk{!HeOTM^&K~UrznkcMD8b9u&wK0DJkU3uqUt-%`p(zOu zMdSM|_7a&jOdmseYMDmZJsb_9sS---yrA=-DDuKUF&>i5PI<&Pz5k4-1$ zi8so5^EA;O9onSuY6gqZp#q%_QVx{m2jXHuG{ua@*n77;367!oHWBUmmwXNPo{&)i zkbK#&HP+m)#w2h`uJ$U$nxbGLY9KtmAR^0>y&u<@vArO3y&~C4Y_U6l$3pf9Rw-^5 zMx=Qx`Y1$TBsn=xs0TTqyqmTOasg3^DxhEUQz2$C=F1qc|IoA)b$B!(J`@d8J1{Vd zJqVMlJecWnRR_lL!YDeLm;rVEO+s8XIPP~-Z!TkXBg@d4hj{Mf*ugL?)4_Cqq0V3^S^Ix1fbyjU zbMqfTz;%i=r7r44Ij-K#D8<17oHYjTctmGzze8Bw0g{XKj|IUp4C7Kssw(nySWxNo z2MTac!u?P(SSrLJ0y8;urfb>Ji6O<|!s+ttd9@cjUZnHGLof%@Vr*6kL=`xCb;Uzf z`S18UNG`%ARRejhrDOM&q>ReI-BYkz7QuIaY)}K8v4&TJF{BaT&M=E!Nx}bKj#4iv za8yspqhJbNLYh`5g#IudCD?+(t|!Wq196dJIj zn5D2eUWMK+tm=*yVJ=p2F-vm``tCWR=SP?h_nrolafy`;xn3M7$Vtf+Qc`EWQkXe| zJrf;lAPJ)G6-*8&VlC!=i;3$wA?8)BOa<4U4xMihbjhwkyN6KciUDEr+ z*AJ`9=49b{&rC21G^+MWtA8cP(Wc7rq7G!0bzl^dT`HDtA{@br>6~g4Mx!plWAxNn zPLnah6wb~TSFD04%goUQ=57IQ(Mq)8HlOX^`J}BAZH?GXfc6SA;@a|WdatGsYuEFe z^VhS8$-Lk@!pvA)2kY)6ie-rY%rseyUkY-^ki%8H5}q%_V+vbxO|4AiNJ}5t7u@BW zHNPr0X~@Y2Uc0$eq|@OM=w8&`?{5S#v+@3bNOsdE017!JWzANeL?;V#A8D6cmi@~# zI&yvSvgo|ev}JBtHl73O=GA3+(sy8(Hwz|5drrVN(;)&}sP177rKMOZ#QEVqqa%JZ z7up9fUUI`5*pk%47%@xri`mDzD(%f!Z61(0(1!GKSftZJX0wxD z?^TeQTAe@_kC~TE`Xyz+Qtr#jK=M%=O9hxYyaj=AR{Zn;5VsA2+`jm|XeWw)3HEoZ z9|%Balww04PHj|yhlIh=4t(Oo+alal(8l_PNdc zUy8@GNHmJH+@2EY@r9j6a*mfIBjcOF$(=0)+us%lc*!IMbx=l8aAux^MVV_&TFsIo z!4iS)$PMzjsAmAY{+z8}+`A;XSLMGDWNy@>KQ$P4wgND>>^>$wQqa6a3a@L2J&GXa zWuqRY8xT$J*k#=0C@JjNjcK9H^D^{WQJY|`;gvPzLy`(nyEOHzLzX z#{qxcUDP(RPn2uU#$9(>rMOxcZIQdyiU)+i*treyse)w34Lted+#Ax_wGB|oSZmN9 z!vI!L>9&j!;w+}4ebp%kx4pu0PNeqkFGoWsvBZp5ieCsLGd6UZN=_H)svtXWiRnYo zFc(FLLR=-nKh_R)x5ZyXdkCh@9RdfcHylq1r*_jgA!u3QF^PpB8H;gLp}}*oAbM5B zv{|w5qC6pdnfY2to0_6_&Uy{+Zw|)3hXR-+Mi3<{p-R zSwe@k5StyIe~D||?qP=4-vxPaqf92pUfcgPPs{26X?{CMsN>NxdO7YJNM&(Y4r{Jg z92X4FC~4F3h9DQd9UE-?z6qeBFmybxyAA#fl8QDJ7sqJ|6IaiEP>GiKT!5G|sJ9nl z`y(889&=!g!kRF^<9419 zCo4>%Oc*_Jo(Quv^WLP6b+1s0geX%wM~{j0QoxRbdQ+GrO$?0%!!xZQ=*V z6LtmAxU)ZvDZ)rw?(QF9X8->L`n#F(kchd!FF+{3APZh0J{3hLK-p3%$E%-ck}h3`md=_YnR}+r zj2|kh6@wky^IAQrQz;avGGGB`-^QUwSxZo&lIG>{3wBJjV(DyJrGZC(f9Km?y$<$W?EfrkjxO zo(1fz>pErd`17*?oBp9!R!PRmepN5BWySn zD=Rz#vElf88Q@}p%kRnyh(U2Wxs}}P!aPmcvLF>>$rR=^xmVI0SrX!=Z{vw1wic<> z*OVhNGYs)w-wQ%u2N!`dW{b%er!CzTpIs8-_{%NUIs>1I4ePpyvuK)T%mq>O6Jf}D~N*0hhqFV zBP2YC8$`JEQP5KhYTq&Ea!{9cE~lF~;|gdRsFs2Jqow&5GJw!Ym2?uC)dvMD91tlQ zpHxk)iKV|IOjmD1w9D{+BO|pj;0f$auH;uf^&9Y7Gz7T0xe$=A*%?BpAop5ScSw{Q z#Hz1qrTJF@Di5i=^~Q@LsWDSMPrQmycSq*F!qtRbVY_F$`vQ94nnKN0thIT_fnp?# zuNrEq<4^kCMe1QSVQ>155VNR#h#|W88tBmmWvz>M1n_xdQ$u{9us>&E8|01CZLbBP zM)m1jwYMnG1>?q!i<3lVJ+5zvDQpnGgPUFl$QZAk6`x-3k6V+i*G|8Oa?VI^sm##= zT$v;>kj>vOg}K@V>9Qkff|GX&btSmU=B*0R8Rw}K=fBNv$dRg!alRHp zO)Ki_}tbZG%(`@X+H_w)$DHnd`nWmS!-4v=KO9rD;lv$s;McgGIW4)gOYmFy`xiyi; zA#>Lhv)-~iBGYtwC_VNO=HjC2DQS9z5c8@7|4GPXbcIuj5#esl{*GY`hLg4`yZinImTF@w^M5O@=Ip}W6QdMP)J z`FZ-qm7Ox`QGH4I>wY;z{*7q*#YG5jaOt{+lrhG}f|XApRc8Y462k3}(N5h~yb zU4$d^coy61IJ}IYUd^3ZJS@Z&>gK$Jv?(6^3y?Vjr%V6{}JMBWD>#Sn}|$)_Ap?!KVg>P zzBXBJfBO+&M#I?Mc8*OIb{$K}Y4Pp?UZ7kb9QC5yyXJLH>5N}xv;=CJ zCeIf=+TBr!=J@-%Aa_8$0g$PuulBf;l~kuW))(bMG@y`!?xz6HY2VP8;1Wl;+21JZ z+}=2$jG!PWySj6PxED(Bnt1x0F!Nuh0A$-#{j9$MQkitK4*h^=7++00zpSwNj3cc0 zXZBNFMMF${!k^IuC7A-Zh^Du&kS&Z;o`e`J;1cb1Z+|LBD&&d15MKz%=6`1EG)>Nb z8q6F(4}`0UvCrf{Ki6z{B@w{VKD{DNRnWCrgt-idDJI5?1Ugljjdtjb&JToXvXJ;C z7Cj5)vQ-Z7$Rr*ng}{MyWk9XZD90$YB1?+uG zK=FpV=92}$nP%?A#1f&i;G;YZBAP z`2wi2oYwO7XCi4g<_;$L6<^6?7ug&Y;=2OqsN=@$9M>qE&F{QOj`DLe3*+5cjA_{C zZ1@jixbF4R0$wQEt>-sW>{slqax|pj#kl2FqEXza=A;ROjNhX}k6BoY@WVXX| zw6i1t8xiPg&qm3Ymo@}Y6WRfZ^u7OtGC!?FL^@9k%Q{n50V!AQ_6D$92n$l_wmc`q zX=~$Fik1Ea0Mn*%6yiKGNhDuT772Xle-{g(=4_|Z^F{v??OIf2N4>f?fn1AZi=K`u?57!vCWpfvs1CbMf9D-!nc)aJN&MT9F;%Q286 z<|pOP+mk!Vv2Qy*Mht^-nFyCk(?0GS`q0=C=yv2zsyo*EH?&KDy&xJk+X|or@Om7N z?R{AOy`B={J4FC9$du~%uY&X-Tq?wQ?+|vnGxd7s??gLYtjHE&sQfN~TMggvuolt$ zHt&JCM)DlMa$F^Z+{ceimMkxnBaAb0?SJx5l3FGn@rAhWqT9*tE*)N97V64ZNGi=q z8jo@B=O5Q$qkw3nAjGA@H6o7U88_>EkSB&SC2id=t;woXz z9k~OS>Du03EzpJL&WTR*djYN$BGJBhL$t@pU{YD9{XYhB5oSqOh0gs6h#M|?y?Ds~%=|KS%VaELacfXv9bq`tvNJuJfG&HNIOVT|r+JC~lfWn{{SOP;Ut{MC86-euNQ@7?8VesFWZa5n5R zGGKAsTev&x6z;|mYH~bYu41%O<0fOmHCeP7QO$$Q9Fb069>l#AZ7GU+SsCc$abv8z zS0IM@iG${(OEOoGdBr|{Fgm^k?E!_bxwCI^Cq7Sj4^=QzUk9D&@hn_ZUlFk)c$O!J zkJi}m|50_`@p+cj|7UMx9t1>LWmKSau*x(^)23~bhNOdnj3;@XjAoK2X_~^9Rb+@D zL-r6EG6mVl5=oIE3dj)IviFv$@_T>o&z<|`mskF{&*!=3x%RovIhO#Z$A)Ym>O{NK zbE&}1zuqv5HCF<4LD;}3qo@+(dAMfeve;K-+J?H_*iSSW@wlUh$o^^g1<6~<@kAcI zfoXrYm9ywW64E#2G*>m0_H;b7Oxmn1f|nux2|~LYR{@*?f}yz zmFFlp6w9s#NO5^-Q!jl*zi9IXF(G@YqXjr6{7p#hHqv$eC&ZO#gqvC`0QN_ci8in0AneJy z;uaB36gw%MjqVcSDki2hvE$kRX)CaA&;qqkkc(WU2f)u1o%T^!*_McJ%ki*aCsoz2 zGsd)aGN5v>Lu&6iP+C+#hFuciBBv)zNnD48rbEkSC`SvSp19CMu~~#m$QpKz`p8HLDD3$A4FAK>|uS0M9cCMd8$b)svJz5au){R@@9d+|Cz?Ot<5tHJq9I&n6 z?E+kcX(f4H?uz~mfO6Jy^6&dTh%3jwoQp2zkzzb5)E$v&Mb-g$EZPu^u}68jur=9g zBS`bRwRfNwYZL!0xfT;{zeuN`Jpdw9yrkw)$f_^a-k2~hO-*tZjh_i|Y4k{&?sajY z@>Cj*4zJ5(y7WaqAe>uqHihnt<$nmk)PrxOXwWxZxle+fxHqNX)82bW=a2Z!S+dON zZl`WjkhC7G@4OTM?UCESx<{Vm*O>=GI@KSOHp>Dr9$HGt7O7SsDIt?h44dYNb|D*k zo0)r;+Z@0Rf@=^i#(QFCLG&5?)0X2z5fr?m=l)9zvDFs-N$Q$$fCx&(v-RZ?!m4DI zsoxUNvmt`#I98;SuaUTwz3|vEKyC$lqi6Y@Bh1|pb1kjnOzFc>5KQLny^OzrPp@<&wa5{8W|W0zu@7Td`tn_G1XQ9UUjx zqLyQfKsN*XTb#aC=b>b2$}5AM@h=m|s$NkOH|f*4EMP{^%O|&O_Z_IeL+*J0B*?5q z3)MHePo@}q%)~i`&TStEbN1{IaFNrsJ%Cxkt6;q7v4a!JWr&6NfiSnx^n7Cx=L&XO zHCzC)IOG5d)ST2B#}UGO?W5=@7u+BO4t6Gs#{>EA&6q^cl#ivq>*BE|R@jlyE*#^W zICdgrdSviHD0%jt?)u0ielaUpDq2c%^-i(h5lq2mPLKEW>Gr};BUW7<@z0%r zT-GVRd6>R4h)XexzLV@|QHKzkhLfLxo){1^+N=d{KI&%538N+Z9KN zCY@Q$ye~RfW>&u^4^heP%(RuXmfViv-Vfj)l7<0)G1+~P0!ocTo$zOaX(oJT4;uiAx{$xM~J6$NYOTT*x z3*%vZlYcv-pcEg8G6&flkDlK@70US2d)l$w{o3xMdonLB6Y^1e}Alta^dY><1KAn&Qa^L`oD z=EBXCX)qso;d??{=c-{{G}O%mfaTca@;ZeGhnIdB7g;?*sDNMFV3Sb`P3aI+u;Y7N zlv86f$$|Dqby)(4O!Z9lzCs*?XH>ap{ha_Z?ABr6!IVIJ(-$k(^Nag-UoX}X+le;s z`x4~@t>YR5cjyPR8pg`0HT$T43#3<7SIv%9X7N2bo0Zv`b12_Z7@WkJ8#8Y*#Al}v z;}E}uTyYGAb7-#SFV@(lf;=E9Dy!n(BF!WnxajVE@ue`=2i-Cp9W+6^_1IQZ|NR7{X>j^G zW;Uc-Vqdv=N;X=@=OEJ`<4BU?*9B6yLBzcAwg{)vG-q!7a1Nw%oh?(xokY8a@)L#v zar6uFY&5?k9ueiD*J6rRDo*c-C-O+h54ktvwcEK_Focg5>Fz1a2@SQ!6jAQ*^X$QG z&i{uVgIXz9qbd4LqLS_fEmeJs=Uf}9pJf*i>da|G{+(qTGBh`WF`Fl-%~ zkssYH)Hx(4eD$sPvp@=(*yTjyeo$@*^fB3;&7BWmo*`=K_%4n8D%WKC-8s?r?YuKo zD0j!y{j+4yh(bqemH@L_2V`EgjQcZ9m{7)ELejK}r3Gz2L2fw7>kef%fCUVgQ%|jl2>7)?YTwtfh%Ntu1EE~|X^A(n zUkGs0>5DRPtsr7_4)$<$bcZOHt`5`Wcv|%6jDVa-tNsVV=!I=K#6m{2>_J%&Bs$5V zA?_5WAhI89i)9XmG;7!q$;f1p5clMsQd=A$(%IG&So+@*tx1Wo)OiTsu0%5rZ z;Q82FjBVjZb3n?~$tkNtnB&i#-Vi5>bO|ep5?B0OfY+5W#}Mn3aa%m02<|4+v^1F9 z$267z$sOYX&VWlrIf#5F;jWC8zt{>0JId`i$=0{aqn(>%6Y;h%SD7h&AjY?4VX*ID zj-n0D6Xrs4!Ng>8nE(=f2)_)h~oXUuS`r`=!P7CoZhdInY7i3TiZ$(O)vr%UTB~NlPHB<@mV3bCT?B!Uo z_7&>d>RH$*bY*l=y811mso1s?#>Qj%X8OdjOjoSdodv8d(b?yVcDb13Sh;x&BZnk2 zlF{M=c`U?L#rROP8<(!jmMA^QtlZmp)a-)=Cqp8p5dD5Xdy6W z0Ul^K>vIaM;lmmw&xlSNtzDW^%O46rA4g+RvZhBxItO$)+Hm87Yk>YNjYfnLv0fRP z^sCqaF4VVkr#w>c{EZL~nu$AYA9ss%D|TbP6_2JrC!((YSaBeWFU1N@`(uQpB^*?@ znkT?TlJPQ$bi|f}U~bYWY-Z)_wL*|ND2)joh{Y};P7q4=v~u!%#ap6B%d2fpY_%{$ z@S7!bj^7D!k!5)iO+(OhE}j%F#Vw1n;K?P`K-@1REijMJ^~BeC5T3M=mx!gqS&+8g zkvn-MMgS~i7Z_VD<~yB5(q)*o=i$D?90_evh9LoG9uD9d7UV}E>5$QS1SscQ*ia&)-}Kll7+c8RVsg1;)X6R2k#>5} z5enyCCGTrE2`j`)!d#?UjDKS7pPBB&%bkYfQqhjnL}tk<`Kd7X#Kbco{pGCVvSiRB z}Zbs&vT#||=neTXQiOI)ERIi- z&qTS?{xRZmV(@1vviM#ObM5~0$q+` zshM)j72v)~=dm%6PNox20#7;zyQqE?RNeIFg)oJM1*9KZY}zmvs; z@e|`ReN!rC=ziRsQtF*g1tC+ta)|6Cad*`rjA|jnxIK;#;T)y5C1Z!HPXjW45)=J+ zRe-~ACL7ji>~le`Of#0IvNnxP&H&?3v(MOApiloy%OpxWap|wXC|sgzuMKtFC?p+D z2p`$p(6FBgq;&0iGwDkaFg-bW#oRNq@UVEGmsh7zR2S&dGA7ycTqeL%b$zKfuFc7b zledr0M8d9)#l3OgSy|KyJ?IfH3D5+FZ-~dw=9{L`rK}d$RnLJ=nIJVX(R;OCrpH)QdlB-^Bu{0QQDDz27C0f-v<#7US=t zeMUrTr@NJ8!s8jv^Y|g{lhR~CQ%8uq7fI28D1Mx4xPMW8o&|vxT z1zF67!Z2H+*jUxdYqySB20&^wQXxzjk`WM{AV#B1Aj> z>_P{VRy5`aGbX$LI7A5XnGWMTebdT?ESKRws=yZjrt2Xmu6RW{W zSAe>MSWUnS|6zh*(V>IAah1MZKsG+QuXtF9lkO?to8~3aZv27`tz)MvoxrT7*|C?t z{c~lhV}W|=euC)AtvZ}Wt0=R!D>;oN0DTQ{QF~l-RTiB)g~YILi>ram$e!K~xv)dy z;e3H`M1u|nU7nIKe>z5JcZ)Prd!&4!&F2rV0V7{b7_9R2kpR-wc9AD&%!_@mbtn>Q z4VPr`T#DnahoZCub-^wa;D$k@ML&s)g*bJ@=Il088ncd0WGpf1_fCSUv@KnN z9q6PR0BGll<0iyK`gSu-C3iK`or0VL3xoR0(>H-oB$Q}4Bz`T*op2gE!f`ie!TXZ8 zR$?b1E|7EpqF%I_g7^%96v9VtpoDrZm1u^YdQw4d3iX3~Cp=8ds8y^p1iM)BB+k}! z(k)rQLJ{{T>AN{Q33m34ywMu>h#rl|wih*wTS46QlB@i9OaN6vMN;$C8zP+oK2s*f zo1&dUbN`@(@o%LO;OVr%;h1zA0el69(P~ngvP^RZVupS(vm{pgwMchJ+~T%#A{vUZ zw>xz^gi+TzN{A!XS60N+B1eZ(51aRaSnUsvL;i_xc|7$9q0BTQb_vB;;f^fCLLNco zV0O9?*9z}(U2$U`VnHvB&w<6t`SY@!6Oq^JZy$h5c z%~aMMi&J9cYW7TLi*Sv38bEsOoaltPB-4ADt*h>ab_M0#h`WrqUXX*Zu=K?TqA7)* z?xc1@_hd2HTw!=uilu_wNR8~=5p;c;BU7bO$O~D2%%b6#j2U$}#t3p>kROjiR10uA z?8jv$|GW^!%K!_)JNl-g4YL|&$IAC+aS_Gh`CvN%t_XJ?iS&4ZAR-{X=#QiG2$DwN(frXcsM#rq%_)s~{1BZ{AR;tMA@=}+2T7vpYGE&!q?W;6=5(FmCH;&Ec+qqhlw zNj#kwt1so7#?wgGjI@sE(OD5GEL%95_z*QY21lpS=+sl>Ipp*_8hg&>F8S*! z$5!_{aYA9rb5$H7I&DC)BCZv{UU*7>ysl3Q)zrtcvw!B>{kVr{Cy1n(^}+|&UtDzl zDG$(uylnFa9DscK_?ZIF5CV&EZI110{xi0m5)NJ_#BEYed|qqT`XC1vb|6zr&d2d! zmh^0Ol7ZMph`W2Oyi8sun!;ix*w3!;z=t5I$ApQKqF>+C7j67PpESI$5a8y)A_{2+ zm)Z{tqiy9#t}{LnMZsH!QTB<4Aw0d-Nl}e|G$7)1!S42noQBr}{{lj$yr&(D^=&4% zj$BPoIzoU8JWtPYjeo>}o0$zK>zl$C7IL9qi~~eEDBc$PuffcS7K5jOIxYn1Y&6vfQY{J+Dx7jLIqTjy`?t ztU~AA3VTF*U$_Ub=C&U088>Vi%!j> zN{V6MSMh2d6-OPME#f~yC_*CKTKx$K*DCRiB~PPO!p!{S9x1tHxm6%ctEi5Zp5&8G z)zhI?xmc7_XvKGMqQiQFFqgLv>tkfP4?hJ$C*s}hcwXP|2nPg-SCy|sJM=8X)l7#Q zJPkq)Ec$)zF(k@46p<|@)=9q*X4XmIr_S)xe*n2};#m^u@6QF0ZgS(3c&)!u2&F*z zC02NbZ`V@FZ%A2jmHeDQC(B&arQTBcPp888pT;f$q$niq$lVA^hD1B#>T;O_-7Nwr zzPwhn#8Oe-6wGFl!3u>7#bo@i^sK|}h#AD>*in=V#7bsm$Jj%l^Wyw65I;$2d~fSD zk*E=E-mpoMDj3qR9ODTd6dyQf+$hY6b5ou8F5_-cAU$DdnX%}YyhybveD9W9=! zcm-OJXXe_$Me$}Hr?S*P*vb%mF-x{7x#piEz$L8bF`>J72$%KZ2%+YQB#((mV`&=I z=KM^^Z{=y}v50cKo5z}s7q&jl^c62BD zkd|fv{i_fc1)m;8sUXKHFN4ALLCsG`i!ftse;X&KKlf?IzDfivK_I65?#@HwA4tk# ze-z?&Wn(jPD{6SS!4>FPjrFqw3wG^6%Q5y};PgwL>EhaBf+(|_xiPMP#iYFDkj9g* zLZ(dQA|ZJ)<}G1vvu-qlRic$%19Ho4Rn5W+Cv*LlxLw2ZoEtxf@EL@tIX!7T#3m8inu2)EaLnm@LB-&tXQh3R7aSY+B& zxzN$8b2vs5Xoj}1Wa0m;C$13Y?prbXT8w0sZ>H(-z>XApxBh@&)Emh)$DWkH@9^z$ zS4s>OP(fQN!mS}!4vE+veUc9ynJar7yL|-V@*`s!xi#@6Fc1?2lR<;7M0Xcq#^P$P zC)z~2j0d^g~RWzq0@_eOngvhk-q|QXp z_qkZ8S=S&PN2cdu+#`_Uq0HPDuZnO^tmBvsFd8=f1k6P)>m2m9XpJu&-%I7;nD;3F z!(*VZD2~#%>%;J9Ep>92B*^g_rL(Z!XIT)oKZ(Nn-hxPBn4VRkZ+A5mEV=j`%)?vb zvmB2Iaxw9hQo!K9EuP4OG0$VpSB(AtO)z+b0*w?PUl&P#_hXoIK6gG1y&NNDdh$~N zloN4o(heoju8J~{?pB{5Hx2r`+yck5LR=EIbMin`j5WTPMsG2@ZD&6R`Y+$+l^Rju9yN#qU_LtQN@-5Pl;UR z%GU5U{p4gDG`qbb+QrPCUPr}vB~W#o9#*S8R))y>thQ18alSC~2m=KA>_EIGh~k$L zZI|O#fnwvnZ%M&auPCzA$VOuOT#wIJaFLy{BJMWWKlsYg4jdjyaJyXxln zK!k@mZ?v{_CUWNI*9LT{I(6xS$EkHdJjk@4;&PV-SiA5?^g}v++NBw?+`3tUGuXSz z1pzOA3Zz68$y)^rM45AgeJE9HpmYfnmpJCc1tQ$C5~u0+%JqP#VxdRR!^8rSY1^wD zTngTvhUmsxg%&(^{VYTS_k)V`s32EsZsLbEeh?t)`aA3pKi0Q@O>S%1UI?MFB!kt7 zwn8*z&`l`h&r*`?IGn`=d3>$DFzt#B2yfmdreMjwc-8MaQRYYVZF}N0(PjqXU$$pd z_Df+dit3Ns#a%Z9N#_wt)6U~L03I__CG&Bg+)Su*iUrxT%02^-(tF2 zvMCWKBmkW+19o5~L4pF-$a=+VXdc?7P|WY-wiZ{|#;`yZc*aO|0S3!)Tj zZ|>MTu<^cT3%=7{Bz;%z6w5JrOF)-F29WCiHw$968@V7Z7?VZmOE*otsxHt`e4AE| zdjz=`^bN|X%bPhMbHaf*V{1ZqL&}iXEk4^T`qbmX-4>|GpwYwT^($enMl*Lc9EQu= zWJo=yMC$#m5Dg)3fcDxJ(!H*>mt)$!qTQ>>uD1(^N@i!a#b6%0PjT# zbxE3-x{G>!;ay>_2t7znbCwwo;taXsOT>CB3vxtMQcFCXG923oGjFH!R$x!up93u& z{n#PCHh~asSOgO&WKGNxu4C5IE%Ba$((aNxO&!CNvSb$)bYJtf05v?)ulaD9^@DHd;Q;dr|zUKH&PR7p|R+8vUj=*hA8fe3Q0 z#=pxZ={H)4F+txhVt;AmiotkY0EtKsf^m7s9@+26zR`>r769wfX@wI{h$0W9b(rPv z31QYV@cUztX!j7^_(~@vj?b|~fP?8}{6!dvVAWEJ&G&+EPwVPvO|}4&1bIA8&;7&4 z-hwGfRU%4XB8m!Du;aZ{->y_^;??SE0jcx{Cw|=MS^Be7IF*%>GX?p<-jJ>UH-(+C zr)Zaj{avwR5a(WbkcFkicpMXLo_BE!8m|6HWYRAuRA>|FtrW~x@vPT_Q6?$&zVk}KiA z3vrfg56khSXc&f%fOseWt)|2vx}lQa(q$Ge&{pF9M4;!7k-HGoVIs&~bxjp$I$F^* zU`Q_!Lc6ez!_&(|xhWuFbpAaz&j;N5m`G2`?y#LMcrZIPVu|^ z*N)lf5|`sg)%@z}<3pc&b|&5~VU&TbD@U`t@(@LwVjxLF`YsH%&gn^9NYze-G+R6L z1SETnxzikiq1}X|z^7^;>7^(_lw;|12+GVG61|f6|5G&WS6>-*GyD^Ge{3Uq;%w1r z@i^rqJH#u6ksEf;m>ebz_K_g-XSG~CCH^lh|hHUqyR^m(aSUABHcUKMWGoLf1L$FTaKGNIiAort(3UziIxT^ zr`6ocBY?3`fFt+dKZmBCzAuQ&h>l7N%UrD12;^SLh(qNGxom%|pQ9C{UdS-!5lY_G z?97V#cH-KkFtUCx#2l7;D%o$M(UC?OzuUODs)+zDV1-tmxLtrJCLd*c5@ATp1}2+H zxGK3y-{$``oVmq+MAKq&yMl2Mgv+2SJviPgL}Q1`%~G5s%8_9m4=;*#0a&Uj-TL#g z08_g#Mx2n-6L#i}j$1`LLgyei;qey%WIqoLi}=xgS?rnuuPPiOIz2=pZ|FerMC0qX zf=Ps%4mP#g!U}MYX6ZrcgZ1U{`C!x;7a;MXzRk$mmwP0mlt{}j;7tQ}0e<4hFd z`p~|55INbRb_#Y5GdP^b2cjui#r(MJKt4wqOb3q_1-LuUq`~(*D2vK{6#j`*x+`_@ zJV$iOnG&X#GG5*BU@-UDYLBn==>l?OYw7e|@2W*m7pf9j%mR@v0Z+%3V~J==fP}deo0cG4=l+qa z*$G@IV6;Ykv0p2sb4ZPJGy|O})Um4A#G>oomc^ROthj?nvP>3RvOG+5I;uI8Aj0`p zh*p9`uvpy=<@9DJcD9WVQx<3#W?HliT4FL%f3P0IP8L?I0!BJ z;&c&CqCv{`4F0%Zm@jFIgWX7o7RuUBy3C8Zvy`SW3SLf0Ia*@He_bSOu<7&HqFfO6 z_lr9?Uv1L^M9vlURn<{1!fe%xZp1GMA%1eD7+;H`6V}d+alL$|OCV1Pa4*7c<0gSl zpB7ED^d9O1a*|UMS5NWBLqXh9QwA{*T)*EmLV38=&r3O3jQ)}ywu?)JnC&P@up4M8 z12`Yf8Hr%&h#Z<*LPkc}pArO*rF6vDziPm#lzQ~y3G3w}0;n6eE-c?)h;o|9S~$bw zEMd?w78YOccJ^&d$gP6hSR97APGiCQKVf9d{HSZ!qw`pZ)LG8Z(9dH@ zg+aY1?pjDJ^JNBi;PHP$S<2;7oA=1q3UdMQ0+Hx=u)pS-dXC8pD9jbmThv-f5y;9gYbLWg#3F3v;IU_hPDTGvsLd7kDh;as{Er zdP0Nk(!cXmOO+f&cUjEO&bm5zmqzac?JbaGhj8@`r;zzZt03nNLj5S=)xvyS(7}bZ zSFd3Y$3cgIr=0?=*4{Wu0L|IX0k}VI5alVXTP8u=sB#$jn@}@rN{RKd6#o$7nzU-| zd^wK?m(Yetj#jH2p2g#>7NmCZGa-x{h8g12|AR<5)H^JVy=W2SD%Yw&y2Ovx9ApOV z?qdz)#btq{w0JRXS9t`4YbEP|LcDz>fXml_ge2x41?_qz-ieuPdt0MVs0$zm5fc8q zEQplmv4(N6*dMDM4d5xK1`iS2iFA}y;V~MI0b;5{hdb`ow?iY^i?2kQaaGB?XHi;` z#pe-pmX-ear697zLeTTU)dC!g4VR?euYWgI89^OaL&p**y-w}>j(+VF;VvNe5rgs( zu=3AVS!)I1B$ZV*CKVummrE1i)2j0;^9E@CqwX>olbO9rzyHGPe@MP3H1jdnqb)Ou`n-xqSH-;K@@Bg(P_Ha4(O=dDad6l zk|vq2^m7n~OLg+#!fm45U0GHT+e)o#os+V37>=Bl$}wJ$!)c4?i`-pu5M3r*qDY{9 zia^ruNM2vKMwD9uMK;QOivagyc8N37~D}}H(C(k zI!*89>HwXolQX1_B-wE+dkP3WrhzR%ufAOdJRI||?KZ#60ImXffObCtW}LJH=pb>B zAeWQF3yew5j7JM~Gj(f%Rd@K?sbDk;^O>Bv)SQ;3g)U4ID}fvf%Rf{+*wmh>U{u1! zucRIivr6z5 zrD#~A=b`?d|HeRF81v8IH}x zt!Ls+ZI538xinL#LX3#^sMm#^UxXy;mF%{p6agAs?Su^i>#@!;^X>f9j zrrM&j9hoD&mbz0#sz;)|9=D5d>FhRwOM-KN%t>iHa!rOaBw?cqv@W47i1HfbQGS+F?kQ^mj24S z!pvYj>=)}?01eM%Js*=qz_aAgUf|GPip7GQF88$zE_T}23v(KEJQW@||tSP3t^&F^G#qyO!fy(Po-pUHsgme~H5Q4wpI%BAJf3P=pgh z-HK)X@)RIOb0Sc9O@Mig0fS#OUF9+`IN3D2I@ZxQW#j2cEM!Db<0iz%$rBBttv!G1r~aJV3xB%*B4lPpg+h5JkWY0LTXq{O4@^XyqhP&{&*MxSuAsH zmPofmCIhryyD%6#Tc_F!M7WR(>3O)@jb97$c%Xix`tRjfa2+_AwrP&xUwk9T=wyqy z9q}^SzXH>Vt0d=t@f#uLziwTU1J+H#C^g=vkUrmJg1NLSYDKt1PUG?fb43Bn5A$lG zRi91?hU$(7CF&e2@LCSMZ;Q2WCLBzhHFavV=$mS>&lv1N6mhL+D$Sh>S#NR+lzECq zEiQ)W@x{1Ws9A@Lx6(qUo!<#^U9rm?z<(tz@0sX!uBY&hN!NLS(F zh7((C`3Fcdu?p#GZ_F3q8cEw+vcdxec?N01(mW0p?IBUk2Bsq=(b&=4394*p|O~FEj<=OcZPU$=}?X#Rm8B?Q$fiaysJ9f?N+K0qf=LBg}0(PiN>@ zD8OBl)05nMOQ<2xHK^;Q2Mon;@_1AA1WVkU0}UOE@lyMofV7~vEsT>!peg9SbE@MH z`gVG}XjjC#>`@`qcjPMBalM`Y&Seo-*RjmfEQZ8{&HeGI5La+2djs{`b?(msRnpSz zI=X~7P-BVfE{<}i2r?H?mF<)Jg?J^8sNEyd+PDAgP>DRqPqM{|4`eZ`c`mqFjx@W4 zIfIIx0VZAU+y!~7kuP1t@vdO=qpypFblV4^;m7p4>bOMTX1e6TGDj-Kat}E|0V6`h z6tTHzGp%`$%>$yo97$Y!q3Q7*1gRb8;X8B0jt*m+he^jxPEIgsngIImRJo_ULxgkT z4P88=#I#31Jd`V%2V<#7*8`QQ$?=tFM@Xt$He&HBZ~kx>j{d(^mvVIcHA`wS%=?{4 zv7n+hD*l!wgSRv$z-SdRx>)SQMQ!p!8?D%&OvyLU1&H70K zobdn;OKJ&yOAzczTqW^@g-DlI_D;#WTk*XovUCvSGQfreI2~ShqqOCSCxIw#Cx@(8 z^gVk1Bau;0Pycz!VcOAwT<&SUU0U|Gh`nQ5A>hA-wEg{z)?=n+bI-tOGACk z*QZONJ%M)HXA5FnCUV!fG!HPFRa-aQ@BJgoq>QU5`2yy^Es!+#+B;TxhEI2wdL0pe zAR5jOO7Cn_5iSfnZYJM-1yJb)F!E4+yZCy<(lgP80-ap5G_-C@BWNl-;h#>8AovS6y=zLNOELN+5C#aQwk z2)%T@E4kW>&OoyCxK;$^6<><+gO{LOMl6gu%<%S7j1lOGC|$Q$&^zKX!JYt7OJBN# z!pD8W%vM=d$BHj!^pp<79~+8x95%+5S3dO$7>%bz8z~viSp;~ZK(i%&@+zdqujJ}n zZ>vof2u-J73)V6G&O;_*WQppFahgzf4laV25Z8VU00t+|SC13n%xbHq#M2@jc&c7^ zjAdU3aOJD}TX|v@1ue?Bk>JtUYqtq;nP?YBW=r^ZV-gjnuE3(w5gWY$@tTq|=dT1fd^g9B zb>4yW8K_Q*wye6y7Z_pZ=?8`aa`Ct@{caw)D}JYMCx~SOoj3u=5lJF~xPf>+4}kp` zGtHjw<}v!@MP*J7ActVidqaS^ikCGlg8viZ_8IJ>uWJCU{azLd*K;@zh&e(WiWP?= z2Q$}k!e~Xt_m&gmQc)ftP01rpV%d`S0nNxV&qF2BleYMmPvo$-!52XX{f}@xNUK;ur&SV#e?y;l$Ob^6k(qf ztqy>aBSZWCk0ISns_dt?LoONrE7%p9O~L(q`}9v7l{%xp5Qm9$PI}}#juY*^B=ZJI ziq`uSD6L%YLJiQ!ktrA-u{d2R#K~0ZZ2r7xca{HdeUHk|_#qwf*c7pj>xH)&LY-?( zM-Q__IeslD?K`O5a{v&)&_=oE_i^CAAzjl@YXV7*6rA5UWI|!DXXTQm?%E2zSBy_pwgu%TUWCk7o;hWiJN=@9VPiMYh zkjpe$`p<$WDpowqjL+o(uqaMMooD}-0PY;bs_qj7zz+@-$r@0X z2A~F&^xxL#72shwYjNBniuxiQVvdOq(;wwPy&NBlax+#UM(w~6PARtj)-mwx-N6j; zfJo(sVxT;t%vz>uw96H7*{A6lv`455&UKsSk>3fS`lD(j90Wc~A&T1C&uc8q8EY;} zD5|FoH>zvpxJ-b1bahV~b5wFQ@t{yAk7N=-Mbs|`;vq6&@}%+cm}oav#b9fj9J9y! z!W@g0u*7>S-w5$MQiWCQVar2OMmYJ+epp%RYSVRFNsxmzB(FEK zcWD>qVzMeSyXge ze{eb@DrF#>J$^p3!~_D zy~Q|E-%dbp#iRT2Ga*bE9lG;>U4*lrfnH{OA=)`Fz0+yb#(I!BZ{I=f9B z*mSPieQifUc&wxIm~|k1u*03#sB0s2h28ZF#p0c?xK-b7I@Q~CiZv( zB_2`ua$T^|<<}3iP_C`k19b!PY%x_nOMn}Yu_S?bye-I8n=5<2z1N2(`-=LCxL4oi z3YXTpYWPTqE8d8G*uLS&^af5^gV8HG>mYhcD~=MB_DF4~neF1lG#IxWqt41N5|j?a z`lwcKh%9C(L7qVmA>Uc#WmR z?u7x5Plz;3vKu-i@v36rM`e7Ev;(fvtggNh-OqOl2 z(T@OJxJJDF#VMklRTKUo87OB6VN_3>8t3X$14v#9Hro`!Nws2y+a6m9aG9inkZ3vW zuo;-S)PoTQgMlWUD$sc#D%a3HUWhwcTGTjCkQ;Xf=Mx?lXT}+bO*hX%Hlwwq*G4WE zq8og9oFNdwTfbx1Qd-kJdBJ(pXoN&gT459Hx5ZrcXJ4S~wyU~;Y0w`~?< zexV)HfU^X6w%Q+~P3gwVVHh*Ym1DD@x}4*%_+tRKP8mPHQmlJd5L{qShrv;Qd?}g= zFrUG}*nhh$2Ad^aaYp0zKrS-6#T?NO7T~TSGk)J+Tq6*+OB)5jjwtg%F5hUD5AsO$ zS$}$P2ZGTA3sD?hejML00*}@$F;|2OUdNsSt;#q_ked(B8w0V$jt-Cas>RVQB5f}^ z1}o?iA)XvDdX8U-qW96TAONxjDiG3QyA<$RzV&kCbNyX}tR_U9s8B5rl0 zNgQA2andtfIrf-HFtX*L0ZFCapJbBOZ(__=>;&mzRwcSV(UK#&ccyH=5<*GUt^={} z&QR`l_@v?7#LUj_@m0ZY6N;;Oa2Eg`*-DcHzV7) zmUi6)#-Ko_Q{BZufGg%-=HdF2_d|5SoPeHplV{2XyQEonhJu=la`d5RwBib3aIr@l zsRu;35$oh~=bw257LkRCd}NE=2*Z$_R~==2I`|ZnL*fF_P9OadY19qJox&(_y1c8e zZ9O>)#@)pN>087e!p!=nLN`Mz_7XzDs64ML><;1jh|kQh$U|Zsp^n7SL|e$aa->xT zqFwQh5Oa(dh!al;o9vM#rmJWjiD1M&LfvM#LS}REnE=dEZ7XU+P*Ks??R(td9mL1j5=c-*G<&bDg>O8AiVGz7W^2L$7kc^e=NrdJr*_&AcGLbouNvIzqj zZeawtWU}k#P(zQuPoUFhJD;pR?+T(eBRBQLH~OYO)Yru93O=0&2PGzWj-uxagKLQ@ ze^-pJgmhxlZR;P41*N=VZSRi}0knKkQkO%kAY9)H%o`ZA3x&XGPLIXG*k}rrhfy8= zEhmX|`YqBgoJ0Vng z+nvY8!TNRz3^_bwW1`S4jB>DT<7z#Q6zOs#zEre}7X+hf=EtVfvINp=?fw`m%oV9c z33HrCmmpD{DMo`3*P~TACNw4DxvMxxzwtc_5lwF%b0KQGwEaUu+^kjYz3@d!v~@Lr zX=C-F3@M?{2g00bazVfg@Y6wD4_=PcJBjhNAkTS?>0~7puLP#m5@FNHGYI20S%yT31<#9$1&r+;M>~iFjuHU#s|MpjeNq(Z;=~6cJh3q5@g~gljT&|udd|#Pww?fH6B1SK1JFm_ zL>N6#63do&SCp#Oi=*}(8vGM(mta++sB8{Lw20PaoGy^s%gHm{dDnd*UDalJ*r^ar zaufw|y9jrRIz+Iyh)mmuQwEn0&*#XL#C|T;Ys|u6iq7p-GzuZb2KoCR6k%pF(Xt(A zi-m&B1Flla$fia`#09836pmkA4ePjZH};Y z7?e)P?Sd#L@3Z#Aqas}SN;E8b<68l)Dx$|`pS@8~`l%x!5iUi)h|x|t*pDCF!-PGqb5NF!551B4#X1C z=^#%`6Q0lGP+GhYbi}3y6Nc)IT-DtXJBwg6_4a7}i(^E(;OK|w;iEf*IK^%hK)T{S z0j@uzUn;~Lb=%{`JSM6fXsg6(g)Cs=#sLEoSs2U^?Ec2(sa)W(@Au6rqU9?+DBsbD)^jOIK{cVRFwd|tOmG#8e)r?GJL?f1HEz-jvovK{D- z6NET@UUX)PD8x;IU`_gfUZnocOM!_0`u%PzBoIgeS% z#xI(Mkivus6XGO&TXN6Iohx;nLL|RgVL=wHQCds9y)K0I;&q?4=ogVLQS<;wf8){| ztLDgrB?s2Ne5Ws@u(QJ-9Rv+j>&gFeLK&}!4}NIAe|?> zT4}f8!L}z$LGEIte%_ly$l8;0$09+lO!MI4#c`Zymjnk#x|^!(1#$iHlFpL4PhSSq z@Zd&&%n$&pt7(k5Hve@t+qSNFNI=r+?D>#Qbsq}N>^3CfOFS&f6`6*S?&1BAu1M-V z9Y>L6Fjm}&JH{RQBrb!JG4Y5f7}7a%Yg{*wMNBRuHLPzDM%qotT9{tm5owmr?B(G~ zc01n+a!1#zC2h&;fXJr?fvNI>k;~Y{A^kf@fVomZS}b)(3vps{f-A-9lLV!sX4LB$ z?eNRIw0dy)MW#t>g-oMv+<2heFNG>6{D@ zPV9Xch>OA7#f;}h0W>sTtx+#t<5{u;_`Qtq1G%3tT4MUJ;Vz%hX5t}#i{twU< z<($st6ABCi@w!kK1uu>iycBbe0HV+C%<=7Zefwt#Ql)tEtRS;yniL~AO&$s27UTSf z9ph2ZaK>6f+Z+v*_5!W!S`~(c5u+M`fi9BI7EM(X%ZwXSqR6rqw~KHA&}1wupvXra zlO@O?LvT!2+j0q*Tc;JH<*t}3z@?;CQY?B)h?!E+%pL}DV>vcF7LcNH*MVH3JEanB z-Kab+OF}-3x`(4i2%)*AL(b8kQe2bvb;Xf+=o+kH>8EQRPiQKIz8zPFBFv-m5Vk*C zp8)Lw;|r)T7<=bHW&beKPM-izJNsk3zfhzr#8sXS@#o|S0tg8f9u?wDhD)59PCXHt z5)A1H+X+91pjjui{J@mh7fm84S9usAkX|Qh z7T^g>Di!z@LAY3s-ZX(`jwBcB+qr4mf{pV)yeG`PMx$I;{@)0q=5%0uUKiq2QMT2j(1Ga{SlVUhJi_!mp($ zHKDY)m-!Whd4YR47VZ@VxVuiTuZjkdM8yv^rRY8r%KZf8s~%1bI>0`q>Oj;gE?0T?>jlda-l8Ic_5}j_Ob1`8PGH!yMsLhxXIA$;D()z zu^@^+t63`WagfM#B-@4XFVjSHV$eHBarAkPAz3X0>O7Hd=d$($Tb-Y!fKylXin&6_ zB2kIxkH1_1=|T)}lHqBU`2K}JZi3N=)o9_L3T2h5n-_ihbS72m$%{nO(Z=)I&2joB zdHJIu)=46%!D9TTx|^MvBKVG#brfL^xyaFXoUlt=k$yMKYKm|5?Y7*XDQov%Lps-4 zowQk{Xmg#(UB^e#IzXT~hKifqfPXB+U4UR61WRlM_PQ8&v{zEHI8uO_upjVJnOf1cGLw$lU zmxOy|ofZBo#CbI05{|2e-)2E5m&6tS5@Mc>J{ZM9KM+G#WKkPXeea4V1h@k@AOM_Of!?t8 z6+n&NcP)r}>vSym5dkG9!ovkSU{#?=@6uKL8oahvh z_Ea2k_Qyp6T#Nad1uJiWPMrbcADo`YIoe*7F49kK;#tZ{>wa7=(0v6rT^&-}r9uhn zUT%fQV!Wz=W=|!}$u_g;MiBKnrU+X6G?C^_H?{$EIR+~B@;3pydAN1!lpcI+Bh1N< zTqnU*Tl5H`mdFY zXLMrh9FG48@f9!E*xFq&0-2&(V)xtl#qEbg5<&HR0ghZ-NZww*K#1qV+3kgR@lyO*2(!%~^XDu2cKK-;e38>K|IK3{0@G_<|I3jI&gNWyJaQND+%z*0RuA0` zNtHR3562P_6rm11io^u@dI6)nEaGWPyu4MQtIQlb(2k-inya@7b(Lo$S!!igyFE>g z-OQZi&ijKrCVE&Ma^_61mF^)Xwc26g#2C=GnLs1aOiKhXY*-uVyQig;6h;T)DpBSj z;=)DowP>@1OPfTCF}@K-b@2)(d%HFN2$?n)xtCj$#{_N=>V#(YwxeTmFSM&&r_DE4 zKUh@@G~>`yilIL_4CijtGkKXsgZ>hMgu(fk+)TxNkTADThqCD+TspLFB-AU$N+V#- z7=xc(VtvuncJRS-xg-2Q6deGSMK(HO)sL3?cgaw=TM%WI*Wu(y!ih@+!OIDB;Dhex zo5HYf(xv_ak!CGs7#x-Ra)2h}ox}G8xHPg8iWUCs#O78v#sYo2^h^gl*1-nh8etC4 zeyJmF6iw-;BsU0;h;sMUB_>CORUZIxd7EUKhCBY-1rbO`Gn;xmC?qW=a%i-+9?W9Z z;qgUdX;=^$A*iNPoGi-4m?vfV9Up>r#z;jq)+-+dp&j%TP?SUvxmUi9W3|6PIlJzT zMX+g#0LMW1}dV4V#1bOy4d5_sP2OANy#Q08a>_Codi}JqDN-JTb99K!CZ+v8Ag6 zJHc~u2#cO%dwF*bHL!E+LUsMQ9AeSX;u#+bqWl$H{lvV-vz%(#HuuMsIl%HyE8Zl) zMedf{p>izycMz(_(64TZHAK0r>?Fx@eF1b$9N=m{+a%JRtDdbB-K94U6XrBA4d`nh zjbA>R%DS!?a#|xn(u$`?4Y~24z01B#8Xn*`w z6!pekR#!YN!tI9VOx8Y)@OaAM{qVv>Q6$%{`||-_)@~8NxvFMyoS;ufXJx`Y7&A%S zFEBm7U?PSY2!0BN<5|I;mKs>^%Dj`rv%Q}QO~;&8bIA+qp3bmlM2N}7{;k5yUe;eq z{)Pb8TTgQ{Mn24=amB$3%tr7(!pu*3S&kL|k1QtkxxiAW#pD2=9}fzo@i;7T=h^=+DEC8DImAHuYl($|X+Epx#OWfl5rq+Z+#|%f z%`NqJ76x16DM7BhZoK<=WRbPuW1+5m$6zx@w`*R>faEFBxIuv9RUw;`XH~}6wy%Pw zlkEVL=Ty<~Z1$}BI91RENkhi!krt79b+jO zO(7Jf$i5w)@S@0DZ=Yza*Nb*nCEJ zY`782y~($ER>uk?7kdW?a!uL_$#Y=G3UOZC>~MF>_Tj`FboR`wN_TP@T(zAk@Hp^mE?@^wRjp0^mcTb1=TrxB0*YLh?e%$?s(l3k&mc zDaO1HL?iU!6o;o_Vko6&CuTyWI9&ATw6j>fhKBeK$Pl%LOEi8!NUFoE(J(hY66NY5 z=7$Nf&PO0FPh+A(N*f9ykQ{%@RJ1J$LYy5kh15Lr;xrg3b3E03(>Wg#%-we0^xE1u zPc+>GZ+|#^%zxyma#UhJ$pW+M+c|1PQ_zZ{g!z9H<$_9d#N_?D5cdt4uOO8!Me)-N z;~-1(f!!l4?PwHVTdw{84WureC|7pHplDZzM(in5`f?mD&|@P}fx$HuGu+yrgF079 zVA{3STK9`AI+9g7g$CjJ0^L#2zM%%=^H8{IL)0cl?kdoka4X4rh}fY!_7w`BP@w9H zk3^V>m8cj-@k?m+M_Qs3SBY}%)bn+kC--Lnpz=DYOm`bv)z6@2{seFNqUSRsA=IS~5^bs{xvW)2ZKI@NKl6$kwX z!1dsG+!A++cFKCoSCebponzH)!^n16tv38G;ao%c|3~9J<_dD!Eu6Br{}tfgF%>~4 zf{0IqxJjAk;i`D>{r>?v%hVb2Ks@tp7Ij`C>5tchI24x{h&#yO3t?1FYJ*HC6PB4W z8l)SO)kLmcC5$rADY{1Pk_W0cg_ut$ICQW}*35M7vY@oz++N)deId$Kso{!zI96W{ zz}4mK&gP&P8wzrP8f*o~V-Uj3O_^`Uh-lJDhNE`(e-of30#^qw=$kBYKF0+5q9`ZF zflwZ_7!k{ZnSpi}X~dzB;>cYOFFGldE3^<#C-gQoP+K0i0dXzM%K16M&X#wj;#nb1 zjj=j%r7lgHR>&|m=2~shNfrj5S$VX%{fEf3?GY5}vTOW`4nbe7VkN8)Nxes19mIa= z@01O-inv;&E7q;SUx<}f0)a7&74@-|zR6A-Mi|{8l5y8E*j$KHQ=+$k`qgWPzGGe z^~64_Lb(17T1%L_4qMH?Yk2K9T?_@fXdH{QXx)(q>+6@xW%_8`C(vo@ zT&vmW3qfur=3EK-hGXjLV3da~Gp-xffS^*%$>hT^beaGsf<6Opu6M@o1$h%CHAlN- zEXUV*NL-~Pw}XW>3F+*pnkH**427k$9kyKYf1=&xSjQ#VRF`{GC>^@MhPn{ndK#(s|wE*Of@92ooKIYDj_Hf`+OH&`o6n_C(th{OfguZ6l)iMft$X+_nI+O?u0vlW-V^N<@N>!y8qK!(I$&-TJ#tG4#|d%GxRL^4?}Q;~9u9`e zRoe4H+{Vo#muRY?MR!>j(A6s~(jlxE4+)}rZ6%)Akp<3s0vwTv#5!T|-yCH7ZNYVH zzFwBvG&US^5I0s3qof`Cwe9sy?O}caIZ&HOm~Z>!-->cBRa}6{!6+^Z(-;^UXd+x? zeZsi%(rbYoYYHLxZ6}V6WAsfAt3^F5z7=8KaSOw=$rxTez|3F9S}|%wn{&u_`g#|$ zZCw}$DUu{7jNc_r*LTY8L5_{ham)r;8tg81jaNjw*cDxixksnmv1mg;_Yf&T56AHW z97^xC*=P1yX(+#!3!~o<;M~x?E74}lZUkakJ*)DllR~Zzu8M8-6U@ym!kyW(8lzR;aHo~qhxoe)=QLFw4tiqaO+eH$OM`kv=^K%>4f7|X^EW?) zO3&4jRnryaWbRSH?mP@JcHX@8lS4=ixDP7Fx<4Wm8FQiC7aNFh6#S+zoO)uv9IC}= zU#rN*n-VG=ypr_I7Y#pkwZ$rmY*Ij)GtxfF6St&c;0Ldy(6u+%j4-aFl+HOwZZE{K zrgrdl9pb7vg4|8~b{1QqK4Gvzs;Zo_e=d^p>jFC-5s{wkbWNr+0>{6G&4FRpxN+m~ zAK#KviBu%!Z{g%9*tQcU#sMO!OJYp7nDO?E04M1-mhBD?-fao&l5w+!s{vNE1;Wgi znR@TDOSJo0XAdiOd?~;+Y+?jSSMItoSvn(EF@<5S8qWzMUN&3QZIva&#T$#vi2~fa zDo~2KSR@(ig%g?SjHEv&{-yfkt2BDzc}|YcSKONLbiC@?>R3zP@J3F#dg50pRoUB1 zJFdG8za_k7gF0D+3$F`7u7l$=L2kntTnSRJGliH1vvp^2WgdcAg=1vABgBm3EZ*HG z3D7Frf>8!3^l)1&$|0ENt(v!&FsC~t2i&cqopXIJMyJEE{8$i=usXS4OFY#~6X>L5 zNzUxp6^9CQt8lDB6!cF4FmqO7bQGKZ7&2|7ffDwt1F@YTv%8D#bgoEJrAhhK!taf@ z19Pf&{dKtjmr(A@5oWxXB1MF+%;ePjBO$Z{cCpdDeU`vfR7==;2ylg}S+*?|dqSXl zV>Yi-r|~-gn>`aI@3LE*pVMvHCa*V+-MhFd#WnxF% zl>f}_DJo}i-H!ZBJNccXJ4B_kCLK>UR%qd+LC`&Co?3T2LClrp-IMsC0LPi0Xe&zW zQ8fY39OsD0g%SRJ1vwlWqU5}N~PQOlC8kh_MNCB}TUd%%`Y3an% zQtMVH!)pYaT16#uOEFSB~}wiAuWVX49dPIh;C8A%@BM1--vWV z*x<S~5qJ2U6v@+)ltwUy>D@#6KV~?RG=-RXWULg4}x>v76#rdN>{z=1dY( z;qG|lJBS>-mE)Y931hCXL12@5s{r@k1}@vi?UMc8in?(*<_a)l5;Ijs4_m^Eh0;5B zm^dL`(YK4Nu994YNA<25*oc%@TPvRV7D(pFT!ivu9K9QW1~;1UrTCL5mu*0gXT*a7 z+;z#dBQ~21?VR*ft={4&M~y&=!o75i+k{DK@;%Ic@>z8!fOUnEQmX& z05Oj0>*6VWx)Y~Q4kr%3pAqaxZQL~LDrk>tAeSihT~>~hr-GSZHFGPf;$qQm|8l1o z_(={l_UdKmuksj`y~Fr#o<@|{&Y=rkPORlPMOZr4 zlf9)z&o2d1kve+Ii~4p;s|V}OWZ8Xz$c23maLYG zkYgstWKphXZDELWIs3Ce2umlSjzy~E(?Z;&FoKOUjR-tx9}%yAS(FSjrhH~N`zBkW3A)&vqIcdO^vnjxky-E z!Kt=pKHt8{Y##O;oJW5q#3>_)l^YcbakW6ls%jsMEq@B>j>ogvjE^w_oHL>? zUN1&GvF?FcoTlk@@qtJR!X7ZO&|2%DEK1@YMgEkx$}x@xYz8?UjT4r3KwY9D(A-&& z3(cNPp6n0G5t>qBi6J2_VjnjXT*e-p<%f(OH(k{ep zgy{*YcP-FP8vB5rme^JR0Xq9fF4L3nHKOTTGO3IGMbI{N@RdWt^XYFW5lxP*iu^{m z>c+doW+f*=`DWKt%#Q6vd(hMm$FD_sI3%iDEWmmiAKMp?`?s1*xireKG|mYlh-Re(E)Hq9lTbm{_eAy}5Aiy5~Ia&;vNL(nx4le)pI zQ?4!!7g80c=-~(0I~9XxTInf~H0t8Tz41)?6Kt$*Ym0A1k~jKpWKFqas29Y=n+sP9 zaj^h*YCK%160Bse=Alrh#>>ljeOV}#1*rwsaSsV}N1s_hlDpQS(5@-w$7-Zqg`}fh zLN0X3elLinV7u+&W_`NSecX{I0=h?pIZZTYc|5oun!2J@C+U2rNN3SBoQO297h*2; zW$zDYkiV-RoHv_)+)I_QqjO?#XIf!#JTn047Uc4q6CCno=JMwRJC7*^ocb1`eh?(p zUQ9GUvZvD;^^3!Db;uidvC~2jSE^PsuqL6hFc$?KNu)%j__?5D%%hxmt_U~ER3;ld zNDl!pxbR(1&pt}jXn~Rh_gduGZMq=4bFm3_}bAS$R^sNJthkf4!f-1e)Yp9v$d-2TPp z$3nPz96-?RkFA8b0{D`|r_!RI0i-j1fBa398Opm#Xgw6-X+cgy^CqS*KRFJ>4c;RA z>64C!cA7I#avsL8EDKP}?RCsOAxok~qSZ35ae+A3rpo9N*XIME9IgxzR7O6lNmU zD#S4Y+-4F*p~OrnPZj7=_SM9%L{ZBB$JCj}*I86=96QM7#1Rn$*Ex7Q!gSDN7Ge@BB$5yeM%q?WVJ8k@aFSH+P;A;@NQl`qs_Q zhIEGQTr<##%c+7gvy)X9Y_)B;K{%2!#9wTD4utEEp9r>)I?OmhARVW5ZhTuoOJ-r9 zSBFx4ENlKL)RKS?9u}16`h$gY;uZz@32U*m$i7Fkxx<63c->`C{ZN=QO=f|~Ug8ty z0aD!}lGdxRKjH+vFKI9~I3LUv#?xCIC(^ZJQ>P09Nz)M^z8(yCuv4J zN(8N&o`Z__L^;`dbf?QMgrpR6>gUDH3iA`wE8}H6F48QXq4zdm@wAJ8NH-n5ohZV; z(<_lzUL+ReHb&v6W7m&=8-(;5(2X9VFzL%(GK1xJv!Ckc)WwI{Pg<8PsEb3tLrjmO ze!|T6!Ya|JADn@XASu$25I5LD-iejXF9kVAs2i+v0I!SJTntUYSNvqfEwTD{A;b{* zB#35FW?;G;pU&=&66m_kWZGuPjX^=~jEgYAM&}~Hjh2o>byDzcVdicVPNbg_>0)3{ zUWgBJxs-q3E<8J0uEC~xYIC7ZxLLao?a=?2W2|>|RDlD(%ZO)YEGUg&;Sn>0 zP|eAca8=eX!UblT#jmI4@kw9=YwdL|cl;9jjQE8J*O`u=-no8Lkdxr0M~1*ht^jb+ zxPZ@XUyiMw7v}F)osRuZkiQ$o(+j%5@A*3x4BS=qaR0b47hkWvGP2xsB?#P;FB5jO zD@D3x+SO&!B=J-hhZllmsndLwKj|xV%bxNQkzSk}#;k2`cE#A}YG9(;2lgmx<`aLubIrzeDQZ7W~1~v8r!$H7Ra?(WwopmwQU8%7umw_v>0V) z^gK?WGhs1WE$9jME@93@@9-cpW6Nr>beoI!FS>Yj3e1f~QkW9&c0GVgLUHv9QFIG3 zBQ!c_X~Kf*?2kt5s%jbdh9DQCo4ZaNTyU;(16bC>&?hl((Qq3IblF%N%T8?*A*;$! z?5RYz0C+geQu+jiGalk_ZQ?pM@zhPI*!hP< zL!cLzmX_iU5q!gethRs5vd;=ZF7cU6-4D4L%H>;#z9&XSn?15HqsPQ)f?Sq`rGAdN z&^N^AZvnI@A^KEjUv^5hlP~;z>J3uNV;G3S(nCgdvZH?aOWjHSbvJ z=scnluL#R_7PwU%81f1DI=2BcJ0Ctj>J)T#^W^>|iUORBu2Ckp1vd(FhEfws;Y{bg zRj4bShD8_L@T^^*J zURcQicBEYWT`9oK#6zWgiqi`>yA#l*)<&E?Uw2Hu3(O7ESsmz%uZVVe@s*pb%+AiD zRJas~HX$y^qa3C!DCgVX7v${NoH34|MvrR+dN!3h4VlY~Ptp&Jw+%%+c#X$xis!ad z*Ja_-^`lyHu$wVa_glUr&?UpOV>(Lu)Q^Eyu{@0!lDrLsx|(=^q#Ge0gTkB#M;S8L zSpV)C!U>z661_(V#bN-BcOuXcaOM8fb(dsR#=In2gspz?4;CC>)c;Uw_`-UgQg2GUwb*% zW{std=zc=!5PMIJbsr!|=YHv=OED?fK$Mx=Ot0k#{$3$&0^|XAB%JFn_$ipn!bu~8 z?|6&|qJlgH%kqh(jcB;lSL%=dO%Qgktm|brN!L;5IQ1c57Y$3^Dhp*&Wv;r$#4^~6?8F7aT?$DFGiFkt z{zEVgr4GWW-=;r<%&LZBLW9aNIV5d{w3cTG;-mEbTQP1E>7?htU@6^#mELBmd-v}^oYVV8O-Y=JT3j~SVPji-%wo)VHZ9}>mma{|Z=Go$2=V7*^LTfnpx zOq<@R!rX?853BCCGO-9J^L=5s!jI{HGE_I+`y062w-N?|EG)@MYkYE{e# zW6E#*t2R8kjWr^goaeN+$Eylcvto~2x=16t-jj~q#6yhIGxcONU!zn*FDBYW>t(^! z^Au#(iCAvguLcZySU7cMb=e&UKWzeaW@mJWps)pH$w;&$qsgEowf9w`Eq18uN2Ppy zFORarRJI!%K0_2sbba#2B7^Z)1^PRAI%b0te-UIxF`%*9n)xh%`yYc*rO1MaE+6L! zCZ5#T_<8J(D+D;Lc^GS}zdt943iTBE{rUWN_9nP+8Hp`_%kS>4ItoPttDRg`C@HUc z3^lG6;2ty+cm8qqbB;jm=b{7di^p?-IRiDyhXP#LluFtGZ1p^#3n=xSPPht!+&!4A zq)Z=)ZwjN(6ZfF~9eV-7+>jewj7@W(GR|>H^u1V1LeC(io!>RWXm4GnWR3f4kt8CU z`Nbui)gJQu+BfY?`N^H_7lb*d7L0obVo(6J#``4Cag<|eMpNs04aJB4f#0pa^#chd zuJI(cANdl$xb~a}=z0??--BKTbXD~vICtQ<#4BvoXpS;oFK{ZWIl|p8%`jB^YqZwS z38jQ}YCS!8c+@KZ&Zh~VJi7VJ8P=Cq^Cy^z$o zSh^o0A`=DP-IKe4D+Q5@n84ode%z;i^Er`;|328X>FtvDkn2Qy@S|_^@u)PL3lxr7 zN;;KKkG;Po046oa@zG&#LeQ#|yM3$>OGTQiemqpG+G_;5CP;>CeY)e*Z-KcuGm4yR zu@ezOO03K}>6gPI&3*cM+PwcjkP|QPOgP<_75d`>o%lioG(DBn{VylA&!luUwJMT| zCN~k}J^r>oN#`QE8qIt(;~g+JNPT*$^Lzn_)$EyZr-D?tZbnlhx0xVZ;YKV}ZWQTa zGOG8A^*wO%XVG z8jPRikuk&BC)WJ8Gh`RerijjX)_(x#AjNb`VA_A3HcOMVz57x|ql8U zh)TSfBTX#zm>Ay{LjG+3dSdK-2v?Buu>e2r0{}Cjt*9$ABXQ{eYH?baTF2uI0o0Hw zm5xz~OGWz|4mKyQW_x~3{*A7>SK?j)Z>@%0ZOymiEVKv!RHv4^Lm&|JQ(A2PA% zra;{Cz$@QsjVZffIWeWpzbD$2WM*fR5G#eaFX`wv`G=^)qaUlGTF6`38~+nZLGVme zj{R1LaP^xw?;4MX1*kD6u4Yx=8=I~H%`h~tsUbG}xWB{}b39HK;X-g|Q!Nk0%|d7? zy3a1V$DN{FlScNVZ0>2y6#^~bbU+j`Fz-WQ2xnV0c36|3C1;kjD0^k};)y$$OMByV z(c&RfW;`y!!rOv}nlF6<()p#WMzSsWia@F-=Y?r|b&-H<3doGDVCZE+Gr`BLC_2hm zZ!I8KkCogg7EhZAffG|%W}m4r1zB-71~`8dLA~WeW+?ufMO#>RWL&v6zc}6DQg70; zt`tP%cI~5Lqjj7P`=EiiSA^RFB?r2^C7<-K90GDHTZGf1(NRHR*|#oG#$qu1lrhAu zFOZ^PY}XeDi7<~EFkw4dBvs&gT|AV9=eKmknx6vqXbJD|sqd>jQw6)V8NF|nwc2&@{vEAZ*u{=tfyr|c&M-&QP zE$i-HG>LSs?dW;YFfSHjX0iUmRAq?(7o%lfM`N5L+J$HRhgm=U_7Y*#zn$ZouFnuQ zYi1Ai#F--fd0Um0`Z=OuBMZ*KSRulU=5a$E%`@dZEs&P1R5hFaM-+mkackm^bjg`j z=l$!~KE(Ar=SxzmJd%OTdTHy-2C_RQZ9q)t&p5bm%>5`mZT~nvr@6^D5d9kx-yC87 z8)2b%i4ZReWn75Y2Okh#lgCU)kBj4WA!bc88ppKbUwxxmdUGj1o$YcV<}k09qrG`s z04y0pdl@YoL%=@fR$0TGB9h`4Fcj9dZD12(uq6k3dAzuJ;Z>q}Yo&M(ngH zL?&&F4YH_?eT9(=b_k4K(VCHrFg*Jahv#1`||K_y_t*n7Z;}cr|STvY`k}Df--3X)* z8aX5!^F(J=!%CTx8;nPUIjhF>2I6Z1JTz4v{7RV@cD2;md`r;G+JnB5?jcE-@xiHI zV4m)g#D7IFh%T7LBUuF9nR))YN8UdaJ;7O{>!<-!wm3jhEkxX_WXBLQgpeo)xP`b} zgg@aSByRdh&V{+#%;H#l?QI}ki%Oqf8~L&Ty4XHEXLxcJuAe<8mMiRbNuKo$8F6?mMV1NEsV zFB3qK*c@U>b%!XYjX3f|QEy!OxmptCA@%(?1h}B>zNoy$4j^Qy6>4lFg0k$#95bx2 zxzBM*AKj=sz9Y;sqDwGCXy8$Sd`~F#tD6`9QP5?Odpe26_>Mqsv0ZqJ>uQm%Rr8Rp z+-Z&M+#l8b;Nv8M>C)qwu#(|ZukxKdt{mf9N#GFKXgy5b^{E@Yb~&O36N zYY4@-dapbM+&gJwzH$-8PwUlf3-LE0)N%$56d&6Mf?Adqm0}Zx-Lk_I*U>l?0sdA; zPz=-8W(dxR8Mryhia!V;wWQ1V?!L7T7D%KkvFFrUnljj`#NGlZpES2DXyT+iLS>*> z?2mHLWG&eRTiXv zgZ%-_JQVDV$T3fd?}HW?!toGLDo$LEIc8s+sGr<#s^v};7kXgtf&*$vHZisq<81*h zhqPN9wWG4G#Q5idX~W4F)t;!Zi?NXVGrUJE!0f>TMS6m7;ekNrA_|>q8Yv$aMznN8 zVkted5Pud#vbqNUuk2?!SP$oEMH+77gKDXu1@27(lQ24+TpSf*f5mX6M!9V%ts)&> zpv!~PMcFyUMS@&r>vQSa!Dv z%i2~}GliHZ#NtFZlgA!S4Mzz@LRhR6;!Y9plcT73H%~w|7I1s-8IH=_gNttwaHNRW zwJsfn-z$I`a?YizdGCnK@?V4>GBix}Ag(i~i~T*NHVPJ*Mb>`eUQjexFdCr(Tk^JFklKb@m&C8PjiMHS11x>!wI zXq6%2S~G!EH#DS4_IDRRk*b{teoTncTst>@ll{QL0CiN`EPn9v^rLI!xL!DQNQYh2 z@J7+FN?zR%(_2Ks^*YoQe^l6lHjIP%@wj?+4ZxBR>;1CYNk_Y*B_S_D2qb}0 zC%xru5oQXVska=TZ33t%hcQpLdO5fbyh`YXI@A8~B_ZxHI>qEtCrhVdyd~IG@8xz? zY|sqt92Rk5qbs%&K>iw#V!DW|w=yH?4D)?KNZ7<3__x3b93vzxxu^xytdLh@a$@5x z%pC!D5?b#*B?M_=;O7O{--~kM9DTAvFUM~4fZzy*nkaoT>!nEoDG_U=p_ng%^6Cju z+Nlx)0zBE4VG$O!BfRv9Zs<4qA?rWv>EdnC&JI6o#VW_iH?#t|gxNX60f*N<>X#k& zA)=Ac;qIPjSD4c44an{|B9Dolv>aa+4F^0y$k?qMujO&t_$X$y5r>-8;aWc$M3`k- zjwO4UBZSexrfwhI?F7lW6GMtVg{jJ_qpi(5Oef1IM$q<*MznYcZaB)KoizQOwr9k7 zMwqxLrPsFeA>8WqG~!55bG}hvbZvtv%FN3?k><`hB_Ey zjgh2xKUy%n!rdSS+RH^z{t;apCGqtaLX&+fCBw+76t@d-fzgg6+l|=d2rxI!EaoTJ zH!DXN57-;TkAygPc;ShGG2rezB8#eC)7-$rlsXR-;o0a>JuJT%Q*~oic@V zto_AW^jU188IjKyLUiu86yh%;EHOj6{FD6s?0OuaYsw(rE$fsxLYR3;t>~;LdnR*?8OSJ{Mymb2D^4b zODyc`c)3V7Spy5`xJxwLU_FeIP>YM}yJ|5wt|-TyqRlUfKkJ>9LRI#?cEKO z4aHi{G7>BE_XU&NVTG<_&lW{!1cJ9cIf^-2lvy$xeaUzX2q3ylDp@ASX`92^-|Ae_9jiqv0YilDrv#%DFd%T=ZQqil0Is1;JliQ$F z15ip7hhuvI7WXE`sC`Ak=1G$#$J{L3k90L>;YJ*zU8Ash!-~FF#%P~jR7)23TO3b! zX8;Q?9#N*ACkb%Xcv`cIUVpX_ixr1kOzG+@2lWF|9QF{hX}nvctIuLX20G<buUPdv>>@q2y$)neEB&H4a4&_=V!v&_Sl4uGrW%{JJP+^jiDKJluh%&S1*EPn2 zLnccYNwD>LNTdtLi4;N)8;5~h{|ZKQjFy~Iyf!m6(%tS6NGS2_v+o&ny8UV{ujEUc52x zC~3e*>de)aItr4Fc^l53_Y;sM&AkS8SjgKk0-f{>T->ur>K5Ym;q+AdrFd8ng>Avt z$qNdTb2~gpF54%62|#0>+z2y({Z=#)(nGWwkJmfF5w!Br#J;s?H|NNMUm2XZk}d2F z!Ynr(^IGF=kuEBeJ?=WMLoPlL=%VU)Mt5mX1Tp*T(z{240vrX0OM2_$Tp^4n=v%At z9T6TdID8w9$3?H2dy@hq+4en|r@~r9_vrqVf5^45?%4dxeCXotGG$VO>ABZXh(-Tb`=SX|{!)=HN&8SwAzly-hr4-2>fydbMTum3uO9HIXs0m4 zm&ZSQiV>!O%;VV}LMU}>!_3%EVG4tf?xpcP5iaUXWUo8!6X5!`6foz0RJ4U`MzsfX ziJ^E-ka^lp*LopOq78jwIiCGWEzP;OK4)ieDu7EU1EJ%q@tzQPfawYHvgKEyEN7@W zy0jaO$As1PWI^wb>c)@hRzh42bZNb# zSo%FHh?>%Lc!=+gd0&Gj<&t$PH;BxHzr=(utAm?_Ib9qG=)QCB>Hdf=GjqnAD4zjI znzm6b#95+ia?1eim(*?$?9woHF#dPPt%BT<v-7u~VR*8yGG7FI*giX`>yF_d^E z|M{m^O^G)%l51nsI!^i~v0VX~?Q*0u66XjrSGlgO`@p`takX$)Y_<$BMq;&Zfgmvh zT+*pin05L=+sCO2IsrVZU@wxn^7)2v%Mt5$2B&KUcyXP?wp<+Li3A$^IsM?Qw4Fhq z_c;^9+>mvdjPrX1A*1cOcz3!8r(G$_vf>8K%&VvucrsIjLrZ zzgUQ6C40_LYK41+)<&@GH0^=>V{T_^Yxa;3b8{Ap6Q*OH!aWndrz z-^)k@a86r$Y;&Hog&g2;pb#?!xQy*Oi!F=RNYFMfE>PHf9a<^G z^~UV16WtU3`p&Om`YNqmuFNrAV&lk#)N(AjXe_c){B@3@zN597cjjC`dTtnWQ{2u; zyRadlWEj?{T9wVC=cs1nP#4so6vDKI+np`LEJtg+b!c+Fz zH;z!4q7^z>Onx!{2)i{_&7!k$2>(TD!A-y8bdyIr%=E@ZI|>8%DErb_CdfTtR-f#~ zZpgl=!y~7jE!*m1BDh;(2asM1PCQ&8++}74ug0#IK)dF0B`GIIF(`~NDl@Uv;G+Tf z8Ntp|{)*va{w*p5Itr8Jd%|4Sv0^84RWTaB3*@3`fjYA4apnQxZqB(#_BA@4&}@k+w3nOx4}ctU{Z47JfvSwjem zZNadES0?aVlM$T0-W^M?;ujZ;y#-GlaZU0(VdeyDLuuN7Cd8er1rPn%TY;|#B~@$` zOR?$I5FWEXVsER>wSFKL3)q3U;2H>e3X+NR+>=r5Y5b^spWmohU2|jnC<`~!D}Sjl z-(aT5!n1U(W69;{YYJOJn{>Tkjq9LYaMW9UtkC*$L<<^RC!Q(9qEts8yI-UVBr~OG zS_b77w&fqbw*+OvBj3OALje@Cfa{Ha=0Ec8CR_34*YhKlmF-F_ya9r&xXf5)@%~+r z%!CUY;!{5$Xs%;ohiM0+(rLn6nRXe3Z+N4>*3yb>PY~&HV2jN_be;fLmS-7P8T82-fwjZlLa7MHiR`MrmQk3K zajLt?9sFi4&X74!|3xnUBQh<4rdTzjc!Jnk3d{FvUf)%lAMbGI#B zu9)=W8h}qlMwul7s5x)KVH*GD-B3_?X>VL4kTmLfOJt*;5T>hhtc_R>P)%7JzaKR9~Iftk9tA9L>~#Z_Aksc>|Ow-QZ)%@z5$ zq=mlv0Eo+vfHoQy+4Q z9gQvVvcjHiIpi2A;&4j6H{KMEfGsXz8Wy`hTuXzsrC#1B3E_9_#`yi)8P(aTT3;v1 z%*ix;-MnxHmbIsL5^-xDX#t8N78aG*;SnOi9QIR#(UlQnI+}PwMAn<-S8VRjoSY?_ z23aD2^03fYSdGU-xdihDxPK!{)!$_?M!D%V5PuMr4YHWSNB7Tb^h{U~2mS)uTx-=v zqZFqIahgftEs_19FbX`e9O7jWBstupBgoMhf7CxB&!y;mtoAh*zLg@^jDU((_HcMDfeok8K!_T_Mu_svVu!D5v(L@vuNADQy$RNWT)|k#m)u z0+Z^qdAvS9-?irBwJ+rfjAs&d6=D_*>ddU|SI}<4V)7$0eFZ=^w9_3IibPNg7zb^< zlCXO#qhLC?KP`hQXh0agHEmre$il}%$atEMCV;P`kqH}i&3CMO3&0jjYN_>pQ$yN)-|8?S{-&*wXE(l( z$6%*e;vlq|A#4Ps_J(a22r+X}R&jDP5X%L*DJrOCc6kz#BDXJ$;}moT*sb+PZ$>8> zrqGv*HY*pPpkB%ygF;*-(9;=Pw{-sK0WJODPEgM^7nF4au`Ex4$1+&OOWWV`6e*aC zhjH7Q(J=>jrfcyyPpc8bNpXxXC-@`rfiMdzi)k$1sBR@rc^Z@|j1>nsH5iF+iFW;^ z`H=ze3t1Fe0$l^1$etmJSGP>>%wMkcPq7oaHk#vHAKsz>Tq3qV>Bx4BC^BK!#?p1K~Acj7czO)>vAFPM=U%` zTx=YT?+bGq6qq{32c>vj@kc=NTtt&p;;D>EPn*S`MNuKVeaJrS;MZztHsH8xPEtySeNC?^>Ku zya{2YQ;Fk*xnNyLpl)_z5Gv5=W3tcwbToPeQ60P*u)4iO6oM!pG;C6D6lsp&&V`vc zek#P{HNK;iXgYx3`aiXZ8rxa)EfT`e)ldI;Mq#pLw9uJU38UnF{|n?QaL;IRR5Ci< z`=a=#i8gmI04Q|J(bPADt*TPC_K24Rl24g)s5Rbqcoz0EpH~osbva@Uh^(|Mlr(H`fn1=I?B`k;1Kh!R zw02|yD>ORp41s2SGt=))B3;LZVp;3HxJ8f~oeMw%sA*K7p9;13U?)C`dZZF>^T&zjV$c)t0mSvyS{9gNCdJtE8HE2gj;QJJ<=B4BS}bhPlD1~KAVg?yo)fBm zf^e2S_SZW3i4%muBeqnj>uf$Rx$VQU1 zTwzZ_v{WDK6^2{b^onpd$3n(U-k~!s2N%5I6}yTySFx9lTSU6Fc#%Z;!4hPfb->&S zdI~c7|EvJ>87B0hrI$74!9uA7P0H&OY^LK_A#QEFFUwa2(asj=n&}p&YW6@5)m1T* z!h(G3PZG=J$WBEVZH4(u?s}#5$RUDq=|}T}pw#8@x~i}Wf!b=_T5_l{Mwj9=JEjVw zy!h&3RiY8NA|w-CX`0Ryot3}PgJw;(?aOnpSCcjhZkDB=CMH786~Jp$WXf7D_R9|y3qBO%*KWuU^n%HgrpLb(c9EqR>yMs|YCs3)n_ck_0iGl4Fo`1t z*-6I#1ZV7=Ul->EBx$xYS^Z1lteV&svhvt!6A+gFy#}Q)i5rEGTAc^Zz_xuV4R3M&DcyN5>=LwVS65*qwFr(+r++`J3^sE z?=Ol#Q|UAKmG|CkpX$+$s<@5Qk|{=--`3q0fD(4Y{#OysKdL z3!XMs-vQF%t=$Vj6|zdylA&bMlFb+FHr11eC3HKd34-Ok)`ap)gnBMUHidXN z`-vrYHC_{uNmB9%II$N1EI{kTt)EzT@7nJ)9xjOP5|DoH)r7pxBuF!aO`nXNG4f4K$MoMUlc}{o6}wwA1LgKG-IuzacZM!KxTUfM)%`! zfB>h#@&Pj=#=u*Ix#$Q4r-=#`!Q|2?6GdMqohryx!irFreG)>k znlTtI}&%Tp>x8%>yn@B1Lkg)M3Un?UoKx!G3vjFGBq6pr7@Atp7YF0BBFyA; zps$6^m@sFQM%YTcDug6b=VZDbKBN{ykKS-Gf4LCnT8GAv+wlV2_8cbg;)raT82Dx# z3hrK{GY^DQ))_|&b!?48Qhg;o)A7P#Ukl5+^Au)AY-x%c6{HGiA4cPD5#}Nu8b;C1 z;^G_sje;rjC{G|UufaXLzpWrj7U8La7B4K!5zH-k;Pl zW9b&akqyvJ6pfP>*q>?yaoJ>=T!Z936W~hW^e^q3|0oE-7}FtZsh5en91XLa5W^p>M3IgW;P1w=bG>5)dDX9rSUMW; zg~9J@$Y})&v)alAP1yDmOM4p7ksCt3%r6cI6oB^oqO($A5*9pOycc&xLZq zHI}2pkDUd%k<<9q>C?0x!C8IEi-yKiZ|M_gv8&Vg@Th1Q!?g^I&3TySuOIy?9R;DS z+oP$L)Y7WfOI*B(L4h=mwK;K?Nc!H~S#fAHLD#I%Eq4=4f`d6m-;zH#b{N83LTpc@ zWS^RvCYLcJoPN&D^%zswa$eUdE$P<AeNY(d$1tEfMg=+5R*pB>N1ZIhHLLJ+U%HxqhA`SEZJXl# zjAE#i-Fa&*(M~jD(t~X!$fKLQ!?K@LH@Qq`#xlJVqjwRm%CQROI$RLKCF0@4gA4tA zg?Le6b1FA*B(2liz+LPf?Q$ydnf4mO4c4?3&k$)ldef`L6(Qznqn>(*)j9yoL2u3R zkvAWR+^ftH4=!-H0prr2D+~)Bx?B(1JSNH?wsD(XynbE~s*Hio%BTc8X}gm#!a586 zsVq*H#37t7~$=mxQDZ$-ndY-i_C@}WyXyH{P|Ign25o0 zA?7v*i)b^`6UqP1;xUqG#-Xy?9!Wf8qksntb_h)(-A!0VvEdi(=0UU1!j1#q%knrw z+MR866mb{{4jbw#$95T^YxuFV2$vJX%7JPeF2LoK=L=?)ULjNy9YdU!N5@p0jp5G) zP&j#l$LjhCk>)Q?HcL$rXMMpjxB{!K`$~~cpUp7`QL*G`5Enr+bXR;!fD=Kh!qhMn z-_PTq(!!<7X2%eRX5|aSY9~=1W?0&;a&{J{2{x-H@4xTVxcOLUYBHyOPOSDt!W6u- z#K}chY%9`*n1MNGPuwHG-?Cg>8jpx}p}Kjdg|`Ocmst|hnb1yu&%fYy;6OZd9A9Mp zM&8t=%DJ#m!&rl+2Mgi`VIFGI0}RaMUGWovWIwZ1modJJSM<3>?rY8{7TU-FDj1i zs=LrPv=ci|7~I6-0|}kE2+Dp z7BasOX^zN7O5X9;8TQw-csV{L!f9h6g01NW0+72Y(VX$oh@^_lTE^(XBO7~j=;u0_9#$Pcu9IqvTBx-mY zjV}o^x6`F$u3s=-9lR9OU8s&BwL7f~4-x1bG=Cv{r{{>ChGEz6Jt356URy()@TFRO zIHbpso)v;{=sbYogpZ%#ALXcqo!@36Su4(Jh{qLlMP_l_P>9hJ0W3jd zc+%0`#?`(I(M58nsuM2a@WNgO1l#y=JlK{<2 z)VK(1OgS0E4bhnSE}&I^C)C4d-A66MV~tbz!9s=FafBPE(9VUskau7(&!G3JKngo1 ztBAe73gNQxY`fH#<#<(CRt%g?v8wr>5YOOJ`7r7xUCTMA)gmq~@z7NvK7uq@7y9L| z5r-lnTioHnnqIVrf_aV2u}ox^7uRax<-J0j*GJp|#RGz2cVkO?taUnJSF>Isvq?_( zJouHaXb|m08}umZ01rZfxP4jYq+5B{%=pUS?#hQ(B+_Y1-b+n;&#&q#xf*bQqaT6afT4LA~Vl0ukDJdH17@i z0U_Uga9Sz@dk|?<2K83TsQW%%nPK=!iajr;Lox|^99DW|O3)#fhk2X0;h;zVC16Mvc1B^Qa*4iB- z(88j4R9LnXuT~hMIi!>M_rLN4yi3!-vEBJZcD`i|o1Yi$eBq}2n_n%2A+7_(CW~}Y zY4AWZzaoz;b52C*Z6PGri`67r;Mn{EXbqRkn3B3pH~QS z11BA4+#rBT)(;F}>V8X}3f^3CpG3;LT|^8G<{eS_HbL_Un@=gn9v8yr@+dJF?~8IR zrJs@o;PKx9a)!<5H#s~L;54*B@_(yyqGXt9@ezEnFx3&+fHVVsUqEC z?cD8)=R~{8c7P01*S`eJEYz}xn=*hq3v{uVN-#Yt#~y-QESw&o9ym|{-F)mL7tPZK z_ICZ?#X*Ip#X?TqF&QoF`Q2K&Iz5sR@m+$bOA~%%R=<=md3e~MefJmOs#p40^Yurw z5Lan2Z{$T$v`Z>a5^QDS>?{?UdM1LnG=3|9EQb24(RLYOvp|Jo^u|-Nu&fdMVtD*m zkh5u$i}jy~cI_L}C8p;Ekmhu}PQ0dYmZqHdsS8fO98Aie`LR?%8YMkjHV}7aWFyWM znYE6)f?rA5^BCfLfdJPT69u#rafJ}4KSx&qHJ09z$4c&(i}9K75z7t9?L%5ZYUv?? zo^&wf(E!Chb~H{C?CQ)$?yB*j0N1x(D!r|*ghs05Ix@a20+B_9NzJ|{it6B(Pv@w8 zS3!CvXVtHDfgB7i66zxBh!jOlcl=QpAF@TqGeMkrH6&Ti#adOp<_Xc6Adc#4txSQ} zyatf0iy8r9fhe=4gA2Nx@0}`yG!H|TcrW>@d4$2jBIyBsA;cA9uq;LW_iIV8&Et8p z*!Wry^Mdn9hWMQYxFFiXS=oM|Ko~MM)He`A`6pP|Bg9n!6cdqKxG+u=YU^vVUF0x0X`lF$#2~o0G&Pu z*g6lmR|rx+Wv_kWPYSyG(0#Ikyh!`7@$%Y-6dj@c)J*_zec}$u(}tp)jJh>yQePw|`iRDy1ERaZ>#5c5^KkuKbI2Q-!#a z>8LvXDuA-;hIBR7z6AL*ex-Ov(tD|G_r*>&YieA{*eE?5Y=WJdMiJhwN3K7OB4J(hi0K3>*bvMLqQ~> zhkZEhSnW1QGjevdfC9(}Rn!9{vQvuP^C;*LOIV5&ggB?p5tcu2@Z>xaT-F@^fe^C_ z4-~Y=Cbt8?oXLAlk8Krp3f?CS#S%g0S5-;|{A9#MImUj-Z#(@x|CAMBy1xFFAZJCl zq?tCp1HhbH$ksc)C7P^x;3a;lup=*&$}Bdz6TmFcew)vi3vubvkV|bE?*g(g0WpB< z!4EC~3wB04%)TsspV1AB4v0}~^kaS}<9ZeV%?i7M6U%g~nsM+HVa}TswQNal&XHL% zqQ_O15ErLB3vg-B%rOgi58Lzzu-UG2N2wUUE{HYBf{wU3|8;h{r&o!`1Z7-e z^*~me{tN{E;_D#3{&T|SDH{25EE7#>+s6kxnTo#o3&>2=^qyFnRDP6$jg~)VpVtJs zWfxA|quog{{vyo0(4h``zDEJPgk*5Z{2^hLg9osy^b?iXds-}itdE_YZ!p!hiZIX+z;|dTnxtY_l*j+RY zu*m$}uGex;R7aiq8u`YCT&XFUnQ6FmmRee}Ttx^1D-jMXn8HP~%ES zvKwa77*yEVapj0=o}B$yU)LU|XW@=H^>MAju0>lB?;Uu65|T~(+KLaV`^NMqY6`P;gXxC1+Zsn`s-x<`6<)oZp zk>_r|A(qS5-qnZIeH<+W9&rc+3*XHs4sh9mf9%OxY~Ax`nS7!U=c1jlv8ZbnP*0$?rCTS zCp>KqMz07nGh44|7G05}n5QuPV!&98e+eZsIh97=iBqNBp8;_NhFKZtqF0L`4;R$I zmrdyoZFx-TO663wTM*^yK(;oBO<3gQVdSqND z-|QjgJPS+}d0J~U&JvLgV0zdk8^F#MO!i#;lIkSR7j0HDPe^=U72?|I=w9{)TmBY^ zgfUIQgSH5dz0@hWg(DLE{OspyAI?UVxs-;wE{8a~M6RS907+=yxy{yP7)wg~JLY8&I*mAJgfW40ka+cV<^d@G!EsWNadv(5= z_9CPuHQg16W&!@D!jnl{M;nNL3v;iNMj?v7ho-9255UvNBO?64u>5sOckyx-fqC`h zxbhD~u)OrK7kOHwo0$$O{X|-i{5Fr(g1k4pBV!ccny{`Z;hRK@ZF@^p4Fp4pTiD-Ql2=*a9jMF;j?hxW)mE=|jBNt82*9#_L zJKKU`uV(-~U58O4vCSWe;riFJO&sJZXzVHshT*`35e3(GqAjy?rAH~mv@8b6RC{Tv zL^B{^`OBfsf;>LwePw=sR0vFz{XQGKUx}mz`q8(?o`3Rhq}9OT|Na??f?R#9Q=}WO zy*knpKM+k$GOsMeTcTVtZp`RP_}te(NSgggZ?ub``aPvyTc2Jepf;!{e@O7)rYssI z=tDrdW6eL;l5HxCVKpD?2_f6MZfOf{5#{!(6b2ZfZWEA+s!l4k(L3lbPGlyoiJbd! zLM%$HjG?cHbSed2Bq_vy{tB>an9&s+%ht}X1E;Rpi61Z#&Ixl>R1ygIAF>GC$SB24 zZxF%LJuVz2Lb@=^qK)Y@fo^+@($grRea`A{*WwQIl7*fmG1kEHq+ykSnFLd*JgGh zxrP0V5XWLQL53{Swh-pVPNj{b^3P;bkEwZ=5OUYbS7Y3eXcw?Whu4@d9q?W)73n%S zmJk463$j>yTLfHVQ6CbVD02ivWh$s!lxjDkhSnYe45PHD$^cd{jqMQSgi>(^>g!51I=p_RAM2@hZ zmp<#SLOiM+Ue$!I{ahUTK`nc{wMg^UCB!W`r_9u^_3L6`&Qgj|iQ~Oly87s{=FFQ|DU#O>LR_5=oiRtOZUE&TE)7rvE*3!fWz0kU9}t<(8_58FaqP1OC`{mm zSaiW7ahzyAVn-z}nrDb~zMQFOdG?kN_k3QnMRTNU_S<|M&}nP2t%o`3n?DokBBFF+ zn0ZZrQ^T{3>b32fAY`W#8w}*H6zOzOGNr=`PUwFv)C^h3o}G)fc*XhznA1KCbN7MR zNr1~mcO4y~dm_GT0ebjYDqp+ziq2-I+O5?}JR;cTYA&URE8opOtPG*1IAU#LP)=H~ zolExrC(`9)sNlLRE4h)_;FF+ke);^cB_Yd!kF5)C`9vGTCTcYPEXW0usypIS&@LD^ z8F@RD9W-0LNrGK-3{DYu*4k5qIkh>mvL&_Uf?R$%NM{E9VIE0K4(6t~C5yCFCsJuW zyxXVw%sj*$FwHnkfUBitIvxfzfSfKA*=%g9i}v*hyPWLf^e78zg5ekt>~b!qYmLi7 z^(0}|9Iy*v$yAd0h1xWh>nB$sc}Atf-X+KkL5bEGujEnS8Qp;QSOsbHyAJVx;WH36 zoI>DOQ2M0{q;Adi`Guxp0v8L{7P*RratqGh|234rxnX)VN70pa?ykWPmYLP)~X zCYG!{@t`0xU>FT=JS&d|;M z4~^fsx>c^Z(akveUt3( zy#*lboQyK_8HJt6jPx$`ky`?|j!onEmHCqBtQ6xM;Kr>2T+p7OQ5|vZuoVbwz+q)M z&K2SK-3*!|@u!afycmz0T>2<4X`ur&~427L_3-oG?tVm+yL6PmOdH_ z_i0k7T6W)#o;`*U4IZlg;=tT z#Kqe~kUq08YoXMI-uxU8MP*E8Way1`c7S%B=cJpA-xfduxD*+U7e%BA>dp`gmVqGpM0h=^y2phxK*^a{uUHdg~QQ3|6^XuxRYNK#;48$4L6eNxK2i zQ0?50h-*by5aiq%WB9j)I7J?(i=o{i%@M9NILRINa1?aX-OSiKi*yCJKQ}z``S5+>6L(IkG;gYdjV2qZ4#O6Me#k(ojECn2c6l^@Nns9oGOac z=5QJ75rtj+jy_)V9*^fUq&|5w*QDCJH*s8p?4_Mb3<*;ftKMAsKLJh%j}7d0<3=H{ zsaVEu+N*gQoR1-QwA){Wx!SEd0r}6QS`rvsQr6gSGKkx25Ck!ZdO=E6H>Yk8K}|Yw zt@Gn7%&Z=Brhr>^Sdxyh2xRebotAxkgRL+oR5a;9}A*9 z>_w5Xr$w2`*q1XcfjukC{k0xb6DC1!U(yTyohMc|Aj@s8BTOSI)nU@bFb$`r)1h4* zxmm%e7KZOH5Z(>zJm5bf+$%L?$q5!&uCpJg%UR`;nI&Io#e`2|?SXQ_- z$Ax8W?{4F)I6&!m>tZLXTq(sC`!GPd5f*qt63blv>g*Id&D|4$%nLIcgR_ zDy+>oE-uGaqNy29_bPF-2&AIK$nviP39AKpqN|ifi{^ts%$bF(2ZotlSfvPbshE%O zHImZ}+_;LJ59W6Yz`Ibq844>85#SFd@6UGf;hgT2ea!ZU@H+*PiZ6B)VOGzOu_>4R zIZ&@;QD*DSo*fS*in+%_N9BI@*ry313GP1+#c~lD+jLCuifFe0BYkoN8vhhVy_?(S z#DR5$wXmBN8_lSFszEOrMVq~>LOG3!a*k9mJEsG!R$u$(Fw6;JN6{`G@+>>W=ojQR z!NoVD)(Zm6ShS*OKJ~ieZ4H17hG+<+k69^_I<>3Le-fc_sJ$`XQ`o6#tA}09%o>30 zB}Le1mOpLgy}ei{!sS6fnp&h!kU26em*Z$f;w)jSIyJUz+~_OCl>%M=c3(1Eo=2l) zc#eVbA|4b-$+c0(RQ5HI6qX%tysNO=Q-;oLTy>vfz1g5{BCKymq3b@H6vJT z^v4q-yrx)y!4JmjXv9Wh?>SDu2jdtLiFQHJzsn)&YeHPd7G5kXRQl209tP&ZaN3B~ zOWIgW7fR-Raz(pe9)*QzI$)tK{w*})14AOKu5IUnP{$@LIOi(tWYZip#ynDv!-c|> zY**H{ij;XfATeA?Oe4HU=Y89^|A0SX|wPTHv%ntWvx!6t)A_E{kwOjj-B zSj^~^Q*c*QtCMWC_OwvTBiaPU{+&Z`O+EtzXj10^l4eu zZ_^H$agB@IX{ftHm|J)h`C&r)s}Med`8(iHrn3W*l3|CJl(JjThjt}(T#&r^N0&e| zs7x$wqUc)Rl|sEen^>02I8F$;@REzn?Ba6K zU>w3oiwkQWuAbB{=Fc2VIpn_SX+ah+62AN_!7_&xu{NZ@I9@aXpsn zf}HtyQWDZv|0c|`UFTD6RNs4a1G=R+fsk_ZL?Q4JpQ)t%v?%yjq5H6^dLfUFhX6S~ zZz~ePId$?}Afhg>Q*NTf2cKun^`LHpr z5NIJ}#OAzcheZHxXqK~GddUoy-``*Rc96@P81%3W+G_yNWvMHpmPhh(WPFH2y3u$_ zh*{B~yL+&F^}!m%Sv7adx?^uaE(N~37%{#t!0h6%V`&^%hW3PM9W=tg#PPCVdi0Ez z__Kl(M%J6-OVtTi9|D09*hh`T;UY*7Lp96|2V(1CX!Dk%$K={6CJ9q&v*LLLDK^K0 zQpiZwryLLDk+aafC=L>7Zp!RJ6E7QGfi89f_EXUz+PP2&9LT`6-WEPKLwG};#v&n#xIKQ&Gt zgEmiQYO`AzkGljpscdP*$&4^pxHa9kEb;OpJpOzBxgJ^9)UnADqMBL#*k2Z7GXXAX zdwWwHBT{{eVfq&eBV%=CPUQY9!bRz-4tH@+BHj_?(l$w#J98X5OKRfkSR#_o%ewc~ zy|ngQEA#7FA{mD!r?aF>zLdr6L#MOO2@uXm%Q@83ai<^`8ifMaK3~n_%wSoe<>ecK z$PI1vNNjN;1SLrqeTy+&q`wE@yzXEDW?~vF1~>+qBh0nYLn_gc(NgcTqm8O)OAkI* zF-1zflw<#I>j#e%tmxEpZx&*SNAJlxOv|*G^<_{>xZ>^=w?~%$1Po@# zgR49@A=GTwOL|ciO~E--PKRPiLw8~xsf8U~oGjV}n^9!F*wYhV5#&-}&dX|RnE;o9 zH^6H8oxX1ZyIJiL7qQ&`TVQ4uIzc^{#(dB!&=F{a6khD@tz+qE_`{rng(OX|j{p}~hq~-F4;SJ(rWeN2IqTa3DOeT7>=x(OB4{PUOpdA3 ze1T4fArWs@akdaD&Qr!|fXGM&h(@%6&x>^J8b`{wM$-FcanJ>zZqZhm)Uer~CCCvm zqbae>(DZVvP!g}_h=9@N(-+ph(~&p=96Jl5bMb)XP`oYzuA?7XjK6_c?;>a?DhJ)F zBBSX%p{@h!#!ikN286h^=CbXMifHqun-z01wT_Q}8&C^dU6qalzT;0|4Xc~MI8Zbt zWjaBj7Kda%!-nD#W}}&+VQdGAk)`=>1yuYo>AU8*zr;@s;)UK4i=GBRSt#J$426Lp-w6J{iNRwT?XXZYnWj7ceN198rw@YoXmEp z@b~wgF5rVW4J6^)mjC6+(>fKsb9}ni=b4`!l zs3!;xxPl09b?RP|&;pK%QUbbTn0#R0BE%JEHO5AU2JHKuW5HbJZk-UHEW`~c1z|iP znp8_Xe^HE`u7sj(h~Giq{FO+rWjMl_xPyUKHI5yw0(VxORjdOvBON8oY^9}_XwJa` z_G>~tQEMB+DG7J8w8%|AP~9n0r^V%p1n+y0`cf~axufx^tAWgEbt#5|q-)wtuv=*s z_u91qyHb#Ojoojifm!34T0Bl((r#s&Yk}OFD2(Lj@pCzXW`o^l93jNX&&9Gy1KfFn zTvc(9g>`paCCofz5bTPTd7Lp!cvEYfcO7xyYfrjBzf6=Rgq;bNm~G1dsQx?@s3$4x zAKCA-zPMfh-{Mq-33SEvwV&BIvvK;TfUL%FeyE^_zz(?qkOUif-J(xnS`mll1xz2m zEYdZfRqVuWEUp#e3g9@jCmt5{UTe~`An{<1rHNM~(KEsz0fc`eaa`F}~1^p6BmVa*a~25GTE64xYAuJ@+QPPxp_RUMaa(S zV>c6zw&2T_g|WIQHx)eL=bs9oQ*)1(3$RQ9CIr% z+}|2n+T#t85*pt48I1AUYM-M?}){jf3&32=g6l6Q=0#m~=ak8+xpSt4ppO zWC-fUvIghwxFbVkZcn4qMYq1A_LV(d#|=3mlVGH4?K_F%W=c)0o^iS`OT(fnkr*P2 zap{i$VS%rD7GrrvYe2zePdq4^D&T#*6x;q70{$%JRo~>zDekT%nGR?1Sc`L9fo^(6 z%T?wtl~^F$W#TTmIz9)1!pw0UCXB@Wd5Y;JWDX&4b@*;k|HT)|1Lm1xg8G{_Y$@MG!+Xyv4v?puVH{tmf;n_t9i6za^NIWCbMP*S_j9=Ujt?_uT{o?lulNwQN2Q%#f0LvPqF=u+C z(I$xGbdC?p&K0T8vDrB2r-UsSns9O4g)-tOf$ndxjzdzMa`(oUg!0)O%$#mjn7SwH z>i9z*AN3Oovb3suCr2sBVg_H?qt2P&OS8EZGaasG^CE;WQr>gwr09E)aAvoZ-uiVfT9RgB`yI3Q1M=6UypI$(pABoGd2mlQl|F`}*AGj2(ku>cyoBUZI^^^eN zF_Ohd9Q_L?%y)CK-HC@pQ?7|K%dt{nmrJ8Pb^Gn3AV?e<^Iq&4*LV!ljf8S<3{#fD zm?8|uVIfwEn?+c{XCM{vx@fb3nv&g7zXYgBHLk$8<{-$4wpVpF$NKeikAt}+SV5}k zrwO5cELQr`G$v6#P@o&6-lMGq;yYPXs)(mB`8CsICeRt9k|h_6aB>Jgp^2-n2||`8 z&R}GXHGTy}?Pan<5w;iUB=q2w+(WJuqygbJR2qL1;nLt*Lf65hHT*}Gh)mg7YCpD; zNUqs}Qu46>a*kLYW8Ei+V=gW(b&kZIqG_Ya$j;LWo1^2^K^zdgC4eHdcd!ci>aT0R zCe2@*E5K#u!NatWd`S?s@(uI&KqQGT#WV8Zzp2GWCoQcF*VlktDJgh$=EVSfrBJsS z_Nz8(yFnNQ;p)~{toCFrC02esImqQML9SJ^%!@G`Ll#;edc9U#K zvb%Ii`I611d?9rc5TvPy2!9LG6hx#;6Obk%0i;L=K|w@mQlzOM{r`LCy}6e>&pvz4 z`^}v@bLLDt=ggVMOfoacr+U*r=czHv5vfx2%;3S~PUq&71tWU_80Ap)1GBC9u`6kS#xPcKOE~n0bte= z-^7YnEPa1Dkj8OAM&LEyA6;2WuB|vm7WCAaLIW!96~Y*Pf3=&*NZ|HwnzDSlb&5VT%C2oLaxqV`x|H`n%agPD%$cT z=NP8?I9w2|La}D=K0CI4%5R{kVbjM2B%)zM5AI0BUL@$D0H<%}#LLy$@4Q}JV$B*i z3GgTI=ty1Wmh2U7^u|OMe-~+9)8%AtEdM=!X9+6^X*FIIWbVua7>W)5ktb`!AQB{L?bN&&83%?cegiaF2cnQ){8QK}Mvkfex>T_T(w&4d=k zPYa=4D>^&l=6~{RcDGFqMaK(}uFuE-;~ZN4m=fkP)_1kz#(&m}Igq^I;u-;7A95E2 zr^RLDq8w|y1n$fiGTQI(GNgBuX+r7b1RbOMu7dqdW~Ow{QT$98MXaf)j<5cUXIG#N zO(pq$O^6xEG{efIJ(`L{L+z6a;L%?sBHMn9_%SHRtvNDAgB&5+HLK<Le5puhJ)h?7F0Y94>p&j0 zfZ8E$6uM2PyeK%)}76_NMv1W;H zWG}Jubb!!56}b$hHrwGX=VGTxjdEWX;t$oy#U~CIz_4eBuv0}*EOe~8*8Mt>G|Uh) z*RKEL*%f1?_Pr4Q6Gn23JXAKm@HV75rdgIqt(c}pqfM|mHZXloeC!=a`UtVxvuO88 zlM@>VH)zeip@90zgMBjjEEVJiWlI2i)xGu==tjT?gzf!%c~-614H%>13_(<7 zN$+@^SNxpr(Lp3uexILR#;T^K_?k$Ur6ea881;8S2%Z+k@v;a$hzcYH<6HRyXg61D zcbcX@C=hIl@P%8CTK>JAn!4$45-g3AFG>TBxUCk7VQu?r@s=gONIY9 z_9`NF3-X82o@(26m=KDF<5UXzah+&thPQ%PI$gYgKe{+m{c#ew5Dlv72--5PsftPA zaI9)kLp-}O&;ASs?LmA!5Br3(#nP(iK0jCm($Yc0qwJvFe=Cd(X3v@(4XftABR!o- z2|Ou`l4+qsIle2BX&AbNnNhS1dp2oE zUtI%G(yZ7!6>CDc-{O1KAw9MB3x^c2s|(DLl`g?BRdF$gk%Wgu({8%1`#n9wKWbjX zX!G&4AhR)J4DTVut?UuT>m)%@G!XBIcJYuLh9qq%He1_?P_8Gh%SqAhd?WFHA}k86 zeUqG$xLg3AL7IIVE;&3P%sl53D=p$)6vBr~gLHbT_oYumQ|_I1*eS;KZ1zr|mc;ok z{v^m>K>*P>-v1c@SEez|MvVUd6z0OCUeLXMe0rVF<_Sk|qF~r1Jhd=0W&wlgERmE2 z%|r4DUm%*&b`s%^Q4SG!DN6yHaE|AM`MOR4-GBo=tu{-D%QG;hyCf*|SA~(URwEjb z&lBw_8Y9KbF`N5@W>xE5fjIt60PJnWpcnsL55ko{!`+0KT}+cU!bhDzie2s{mh3qq+}WxLi;|8t-zLnBT94u* zZZC3CJQu9vIH*1?&_!zDP$6eRHuxflD_6zjuF365L5N&-6Bt)|eksqoy%Q_U4F%oE zV8m^E{(U|>qvW+I+Ig~2Md?a>hc5%s2D%utD{dC$8p?%N(OC!MUV-LmH6~0sB6HEf zS2hRD`U71cE*I?@;4Gwz;~y$gv}qgltY|pJ;$bR2u>}NTNC3h3ik|(d4sW9i-baYb zftOaVZVhdZg9MtnOaU?@W0Vx;QuMb^O~%)@%+s)BZ%&susg~<7F=NH#M?pfKur89oJ*AWu zSezoVwNh6MZ4I39NIf16VS%u8P++dzw;>IT>BDJiFFm`x*piKwmcs-=&Z0`uQ-|Y5 zA=HKS77DE};hvSDuk}CAB9M?I!NP-Bgn2bw50!Ib`NO8X8vMs#N&|>8eYM? z3Q^{{+5pFE77I3Nvt}K(maMEgaZjPXGjb&a64HUG_YLOXzNT&YCd`@HZHJ z0<*!E^LCu39f{|JQAkbhQnK8zSNSwb9;D&sqwVFJxi3!Ca*~!rn|TR_bU(gVP<1?`LjTCSoU6|CN{2g zR)k`poo0!e*+mn_jJcl4hG+)DDWA@sQNH^{(ku#R9RJF4kf~}gK3&C6{=}m8z7%<6 zb3yQQ1%2q3dN#|DX%@HWd9D^VGfZi+XLo}Mz8ciBh`?f6vUE#2MIS0nf0VVWFzBHOeBLc7BTV`dtB8duAcfE)nFCb+%7w;r^#0k+hmx zt$ckUiI67tZ{^+?7v)@-A{eC+FhTxs1KQ_{L{g?f)PSF9;28!H6*5gukBWAoU<4kA z34qxgF6)i=^=r0FDB^gI#KDc=S%YbzBKM<5Xnz6sHwN{1I!i(L8nu0_ybmd;je-quJ}ZB{MzLUq3%ZNJD4r4R z?BuoDHyMjtawKzuy-0|Q(AwTV74H-&8g;?JaD1wj6mH0BWKEn{Q;=JSZQ{sSd@ciY z&=`yVa2!{>gmXqZ^>vaU#6uT?4aZHQT()io0|w$Jvpf*X3W7%wA+Jb+5kgCfPb|)h z;%E!wv56qcKTOaO8SA=5J72a>GI+fs#LYqIPZl9EixsIbxoId|ZwaYfC1wr2zPSL8 z6lo!;eE{X2CD^&K&Fesc(-j8_vsBf!_r#GRCD+^<@obTtF2FfHHc#g+&9lIoXG6z@ z!dz^2Sc)w1L4l-uK}zxE{Yc{?wxq>cjQ~E>u&6B#*RwM~J0wf~KeAuT#N37!teeJ zI$FH&E)n3;*5Q_c#er0h38b{Gw9{xjUL+YR>q=lvQH$D1;^N_zkX&mP7ZBU|p(z$x z9}05r3)wsLr4#EP>i~2aD$`IprvTW9BK!*lxE7VVY`#IX#i4R>Tf8FD-^Nx)c!2)c zyA#N~UPmU;<0USQLV-6f$T_k8 zCw9@50`f^ONs|oxHx%iHxh5HXVmIkr|Cv4LX#7ebLbenB*>7jhL*4z+(F5+H=)MV@ zlJ5}WiXuwzYAn`U21HSk8CLeq{WE}0jw2mPyKQ@&qIJf&yte@JrV&$jnshG^21mOH zEyeJ2vq*;MMK$qzJ$eLgDUD>e$i-_}dX2O=5yx^8xq;e02N<8)Dy?)*-grkpEx`FskN>m4jJGl*P5gJC9O+$9R%WgQcx-D4a?B3 z60*l^&3ZLOy77(8|&M#8S7M1$)oFV7P>&Y>1foRu2UQ_f+77L5AMAEEj zh&@m=6_{doepvP!d?(BHI3xS5wW29**RwOLV!>I$528O-In*!Do;N4z^z7QVRMyA; ziFC`irmcAVRfu_~u^wfVfUL7KLMCm@iNpD8G)p@-ZeAz^)-kr@ER<&7B_oC?C()GS(X7(aof@^wKM~334Hu zS!AZBI!qXu&z?Omj?gpXif&XGj3-2yODP;@TCu$^kX9txP&rl|_q(&uSMRN77rI*4 zFL!0XH8L=MY{D<$dvkd_Bf`wA#`MWd^!XJaW+rNEnqo?Ti>@wDUyD-&x#)^kri&De z`MqY8#{Kd2NfJ@GhWe(OSbqw_-_g-YCexo6NF`oUeCSajRJZ~2B?tHqgYst?S-E_v z6xD(<9?0u3>1=xhI_HHFyutXU5I&6hXfXaJ!knlg=3Dd}4o$WkR%eu-z5pxc8s7kd z>2t9|Ez`3*wC>Q>?v`2oR-vvh$6HzZpfNk|2rz$_HMN@PP9ZKa!z!ozmc{FWJoMR@ zmn)Qb_f5h6igcA(80D5DK`Hn2RTS!J5f-H;E|+~*Bwt&u!xmC{#H??EWNm;AGsStE zEzAr`dsybD(YXF7K(b|^N8KgD1=5OQCItPNM>{h$Fl(qJi?zpqxk2kkbv@Pv0{pIa zeVmOS9i!Pgj|C?Giu&r-xL<^MfR+yCq1reO$Su%1h}HgV(HV;;>54X+-&LSk=5d4` z>|!o^NhnRz!_l&?Z}H6MXTlVym^ARif?dpHIgAay4WRle&fPW{Ar(rzD#C?hamIWw z8e4w{$XujV**V}3R4dHM=5y>8l}`)_qBXKmEH{WWpE0d0lRw``JSxxy(Nds0HaI>9 zP)Tx{V+R4$rj1+YcGt7TXIy74I-{vb&M{f8kL?gZf6xii?&vRG7?{3sgpvBP0$IxX z8&>~ANE$wNi~8^KJTqVnaZpIlTKyP2%uV!w#WBXeR&6gy@LSBXASAVE5Eo6jtYp zbP-W}=%`zJY)FeZysQX-^H^_TuHPu0rN8ph2Z3&{pF7F=aOYOZW=~h{_|Y*(yIGCDNi}t$o;` zSX-=eN}l;0XS5HqwY*yhlU)7&agQEJzob_~Geak?hCcx^bD9yC4@IgZNut&`6^fQ& zn3#%Y5&Tq{tXX%Q2IUsOb5g4??fM@Un)5aVUysLyxmDP6OUK5jFcN#64h)+d4xuZ? z&VV+%yR{=I#Z5wZae{EhQtNUztF%_1I}_MlL^oOCq{7K9AQy%v#l5gc=R&V35-yu0 zDhP96d!Tyr~J0LQ&M1i7g7a^1m1 ze|DY|o`UqxPy8H!5A~P(knl}&O1-Z~l)qBJmE~&S7&`~dUgc>98j-CnRrd?O;1_=k z7R#sl5FwTiZqrQ#9gl8-^w=Rnt@Y@cHf6<2u#GrQB&|4mcg(MAp9|sa8mJzl)Rscb zh3a;B6HEypD^?frKRwe*y7*J#QvFNQtiNhe^u+0+on2FDLfh#N&I2L4YOaV&i}Pd8 zH;_$dw2J`puWzV{yRsk2k1%Oy1~5mUJ2Xtc5#eBhdt~0q!jX-l^!+|B{T9 zt2~#*&b^pl&3bm~{nHl^6fW8Xx;<%p#vBCVAYm?P3%b#`qewG~@jvOqcDy7{)7p+@ zwO_RJN$MDF4W1Nc$wpAP;)Qq_T=VcN(3xW6d@~rg3$c{W$5%$Gpa+UXwYc^&5FTyGfgKXlTVN)^ffL6x4>|J4M$-Y2|&Oq z?l`SIzAeIKTTC2&3`EZhfu$_i2Vli2mqWU!++n1!Jh4O&C74Y_g?2srtJ&2*K0f+= z;r?n@xqVPei1x; zR*-p}nw!ZAfw=K1P`64Ot0g%g#y!GZ#|33hc(%u5Ldb4J>(Sw)76^;os*@nVk1=nmMu=|Q4mlv>} zBujTdpgB6c011G>`5j5^kpe05lKR&8m7d+6c!?$bA2Tk2;e~s|wfaKm7dXB0;B zbc-+wIDIZ-)Q&eo&>{+xu|q5tHItl9q6TDPY1OpJ=2f5kpw|>Y^#je;Nc`A!f9L0##){ZX z&*sVyj-6!DBE*~==UjUSCdYO`<_RA0rSbTK0M}_m<`S78*0>o=oe+0#3_P_W-LM2G z=#2wK`xV8r#`g*rK4GpxI`_!t8s~L^{yq}bH3oMd7DV9}aysX^;+;ds%31BU{=F0I zID2;NtY^xR%y0N@-va4!q=0Y(qG=qi8AVcmJnh6i81XeEi*$kHgV2jl#c-T>E1+wS zF`Lcx$8Q60RjN^_a9|5{>gVONN9KH202gRU0nig~`oIYZElY+y@gI~r&H zA;;R7O_<|K@pECW1R-YT#Aolw)8ICku9b;x?gVp7rjtM;@jD?dIGjZpxb0m4(upjL z9z8n8N)~xBD%vG!VN4j5iQ;%+)Tw(|!JfY_3K8#@QSxUZXpsi4xw>1=8v8iai(H;@ zH>7i}LImSee}r}^B0 zr|-x4Cq8i>1Z8A-jrsf~QQ5c4LzA)p{rS6GEHl&{?E+NRsdfdpxkM!CC#G*?%It~f z9)Kpd?dQxQQrVUdLQ%=`R5=#w+2fM;>B)-uEumTarAufTk8c#_@)BvEk9_hWfUHnl z>KaQ#d#c9yLTbLWXdV}A@fhKX35nxzg4~;n6@te6^YnwcgkD*@ocoHmnfH?i&ejj- z-_faHS?c7I@~B{c2g6r#7<*liv+KZJoO{C`egwo7Yr`m)4zM)+DNmKu6oeKO0)uAH z-ZgH1lxJEI<-t_!{aF6`d@gvvnYlj>5@t5kG62U9MAH-m`jfC-DbmGaW=L0Y5C1ub zsyQ55W?O#LUx3`Pa3+u^GyW$(5=hR23<8j7QLv(-r7<3v`9-k?>*(m2Bb-eFGFOI}(Br$JT|wRE!qT{#-X_rBu4KmPiKhi*B7_-&UX?`X z`68X~aoYVU(ose>Kx}tPqWpPumArwNg6n{?S5wztTUQCpFxh)j@+uHvmU@*l)6&8HsVBv>UF%ctE}`%B_tq zt4mX|%qOe80PNzHDs`l*P5|FtQq!_uj1<30(=ZTkXFv@r1>()I)OwNRZlY8_YM3Br zSwjUTW4_YB%WiHCoYu41T(hDx-WTb#iv}3QqVXjFS7rX#V7i5cMa&YReybMe*E2=B zLGVQ*Zf!>Ew4T;+ABa}3N}eF=ybP7e`Ha^lQ_3aj6Deyr&J}G=F)(+O<8cA{R61y~`s+~c7rMR;qtzxtvh{Np%PVY+ z$rv^g3*X=uw;8G;?y=$C06`WqZh#%`?P5wjc(821XIplvNRvEEzxD_ul}Oq%jJ z!d%rw2=xsjErhuOg$us_2kLxv@@beG)s7HEzB|s|IX?S#{!#;70fkgMOOaB6rs$l+ z3}IF{MQGLnxxG;UjKzRx*Q^btpZten@FQu6;~(c2X{r==P44%134>cK zv8Uo&@8;iW#t~HQaiJi$lcJtuLq#Ql#`Im`9+wu*j3Ke=dwxGz3*>HzyWjgjmcxY% zU43zi01B?yKr9;K4bkutA(xl(J0cOx>2n8CECGi351hd9CAoyj2^hZ=%p4u>?McUC{ve3PXPEYRdD3x{Go6f^(K}ix3x-tF#ckpR5evQnby zj=ze|nuDboL5Nq)-(l_BAyvlOpY%JW!EU<4NIWmvV_J5?PWSf2JAz$nzJ{_OjdZ2e z^8A*TN~l4);$lJmCeC40Xs=J@X}Wt?q;+GLAec4Kz8pTt<$v?ljnK}l{Ip3qAVZi( zQ3Gl4y*xuo#5P86skvN;>&vcjAbz_>{wYS|4koeKWKAHKdnvc+p~gk=@!AsxJ4MkFxR)Iy(ZGxR}3hS4q@wuWA>*(ojnQ$ zmAzI7t(vZpUs(LyiDwaOokN9KG&t}sD||dHh!MOrc-t@9W8$+={?5o)hkRjGnA3&& zJM(4FF2{chgi!QyJvw6@(zrd?W3kM^xB74RU==yeEhn-hU%@=i2ok;L-?JM&mlRP$ccdDU1{fr$=-qBOF0t zj?U7{JBdKo`-SsrZLX<`J=cfuw^-~FL8d+W1i7(v+7NAgSNvWW#T%L=guv%`Hn(sN zpn%DevsZ!gR;Ig%GlcS9MRiTwpl1(r^5bD?%y7K&27p;znVQ9%C4!ue8<2eASv(?; zI^!Sxk8l1oJ{0MGG~ok2>B8Bj4S{J^ofX4RO_U3&m9V}DDQ*5x=l;CMaaH2@ue>oJje$7{U2|6qh;~8I zJ?gA|vK*W$*wyB|s=O59q3lgMX$3bVN6jYA37Y*@cdWZ9w2O)({6O-{*hpB`)ajP= zaRDxqVp}Tu3bUs`FGsZZWdp$0Jw-cScQbx*rKU7uaV0{5w6c%Foa98Lv(@^A{r^C`oF##2EgbCoP^1gff|^$rm-;Q8 zNfMi$I3Oc=fjF=+A<`9;6#JBQ3~mLASh2dAJ}WK}Wu`AJ^#V)_@IcwhGB|D*O*zSP z0ehB{wt;jxEAh!i_DACP!u*{!bfigN$Z=PpZVqhjM0AJ;h4@=7#AWP_CEMni^)O2e zq0NbIVg6o4TVtFd(v?O~aM&M+vjvf{93~j>X|q+o0z#RYV5e`yz%eY^1;X$pXDsOi zXKx2Ov(`p(l5YVz#yf>OwGa2E`Kn=iP#0DKfxDtffJ=gTSVyK~nV?LHIVdV;^4NBV z{JjRwd@qX&1;B6S4g~OhQJMH?e$jaJ@g2bsi=8?l@asjpkZgsA&|JjVv*Z;GIA!SBUsrscG=!`^yTE#RMat;43T&X4 zSyr(+w-H7u`(V%|cqPRJjgtkKDeV4qsr!9J679?hF|ZR!+yRv<6FeS?hYIw2 zl@2pFAtd8Sm(Bz;jDJ%YLqH$V6k}S1i#x(HzdfE8;EV|{&#}L9};mdUhcQpdo9(fDm&4W~CveW)~oT1*-zaM#=eBXc20XW6+DDDQYVki;d>- z?1JgG`VngS6+z}l1&g2YSXKZ%(oMv10$g-DK1OHp^nl&-*V=nz?z&!pxxgtn9i95U z5Pxit*!a@qJXj>cCN87Io?p!qaT){_Y;Sz!Yd};0AKT=*cSA zU#ErP+5((4dUfo9vhI8YWJNhp!j#z;eM(7*x?~%BL>-` zFjnbU_R~V(4O^u#ToBf&hIDy30nd!Ly8sIbo1A1;9&?cjy4#XAYXez!A#My}x1k&5Oi6B;vjq0IbR9bYvDjDrx73RWV z8z;DKIo=oK0<*@)T(!l59H_t)I2y+b&;nx$mK?PZg6cNvfZA|0H9)##7?U-7e7+IH z?Ng2UF^$+e3Zp7@jWM7{w{N8kUC$LXwwaycEzvHambZ!wSJ4E-_ldc+Yy4bHQ(E3&e2|bfx{}`LwS^Ge^1{^2Spmn_v=a(Njq-)5`y&fabjjT#@e7i<<$uy*Ah^k; zS^e>eB_?WhE^!?Z)S{N@ZPI8?0HodM9JkgGk`A(kXU{RStFzD4u0*ffE@(`ZhI9oKgUsq@Xp=5#Uzm zmCVKw4+zzKOn{_Q~`|WP0%8G~pVplC@Ch>48aOMfQFAcri*%fTz{j*VP8MXg^|RwTOC(=sH|toV7YK0k zTO&<7uhR~eH9!7yOu_Pei(LdGlZ5tX1{Q%B?#60$SkGn)#zib3QdY%Zh4SUZ{uS}8 zNEbM}Hh~WFx>^Nx z=&UT9C}EgcuRG74@bx|ExZv@^TnYTYaK+{tIYAVOZC-E8>VcrNY)Y^fw}^6{+)|-n z051r!k@X`nRFKH3W&GfbTZvjJmCjdta|o$ThgkL&WDy$~!#orF3Gf%>#ZE>gAv9le zO;h|(&oq?A9!xujE{CL~<$ly`*NNaaoU(O7YV$t-wnDCV4I=nWk)sg4!$i7&RqNo$ zk43wz3$!1^b@z%QNeh0`@#}(as;a1nr$uM|s~;bFozyDDp8cS1z{P{CBIURDLt)OG zbr&~Yjl_Qhx&3f(U>hbi;o1X$7Q%(?9kGo_b4DixO9uGeHwDEq{qAhrIpw6&O4kcA z?>Z)jw4ix7OT=J>e*}x5u2^G`ME>>wryp!7+FGD9Bj6&9#Z7pE;O+DU=oIIP$eM|z z{6M@Zz!jAP2b)@yXl1|A+Qv1s2Ms}(#fzF6_KU|wyQ3=h0X|wtWPf}p*p;qb)*HJV zn!nXD)-f61DCjB{i1G7+##0xENzup#!TMv5;XJGEa!=eQlJcjMAuo$CXPPI|O{||A z0dWP!IG)%(g%bNq!dyl+e2LixMGGO4h?${6@4(Qk}fxV&$nk zMYDR`2BP^avO5#q=~O}PbX5!vy@Xg{GkSiOkkK30gF8-R$bUc(xvSR9!gTrfgisz$ zpwgNhahPdds*{!Y_ZgjHGr^ZT59fEYn{LdZK25uC3N$B(h?O)sZwoS)Q0~&djg$`)g>nQb&kvPm#2GB_iQXO=D}^lReL8EWKaPZa+5B#KjTq zPqGPB)OJJgsX=~Rt9+Z^{h>zUnZl6kgqU;EM}S;H|d$bAwIB0>wM{p|5VT?DO9=%rT(p{^;&*JSK_JY?2x`FZ37 znsM~F>|85X11N3Q4Nz>trw`_pQg=aT+!L%!!~b4mtIqyX%U5gwb@f zQGkr;nGb0_#7W-6MY<^LV<-t1{(K+EmCxLi=>7)?^w&{hFxj14q_SOIhuhB-$X2?3 zpLk1;bYm`oxL_oHb5fos*OAK}1JgTC281m#v}v1gsOTBVVerJC?kGWSsSea!Z1Ha{ z5EgkF%%8}zVX$1tz!bCok9SML+J(?IXrCz1RoU+pns(7n@&zH;OePH#>q2bYd!L%4EEUJo z4?P?&a$Z!JW&QUvs*g#RX)2@O2-@PkP9vo&(v2OtADz&Vf*2T&lPmxRJR3c$XYjdDfM9~u*_}1d%HLin_xItL zCxo&rYN?66vgg@z=EP-srX)0#F1sKaUG`%Y2i(^=JI|($E8VHm7lh1ojCSnss(O_` z*u`ydY3;TB&!K5Dh31ZZMQC8zdFQxO&z5z3Pg$z*y~=aI%nEd|n0O8r0H>xDHZ{I2 z3O>kXO=nX_988h3e^1cV6)_-r?NKxS? zaCXCAoA=+LpL1%$rA8|rP|;#K>q0>zmwf3tR=-d7Xw-6*U6qL zIg@B9IZt>tarJTy6b_^mYVAvMv^a1Qy9lMV_!5`e#H2_{&e`|4RnKNm`6JQ>-wpPcrMJ6m1h-*vaiYxL=@cNL3^%5a2%K|PB zi{FcOjZm}nq$3>1T?xi)K7VO!^lKi~>+v3pJw!NdvyRvgvgN*3m^mvKA%(YK{1@nt zuo4`Ki?4#FF7#8Dd0MhPadnBr!8&>u(?_%)n?WW?g&AaukP}w&MtK`3Lh&J3Fev+>c8o=P-f!L^f|06ZY%(eEt7Gx z02iLKRa0@FXmaH?4-8byKFLLKt?Tl9SlnaBxlDjxTf!nVt`+U}!@X&oJyO*T`70Q) zk=sTA7F0G;AR}?IAa@-Zy8G}wO!wqoB;4Z*zU}mOw7h)u(jtc{ma`hd{wv6hB$WmM zt}l$;ZUlpIl7YBd1OSzS0>DH?j4H)aQEqh1 zi!B76lgPg07WctfbBles&`J~#1fK32(* z;m4vaujd<(-KF@xHz&;G6PZbC@MuOGFGeW4ya;uuh zZ-MlN8)GJSTIyybJ{WiE2R9&&%EYGzdqfxvYOJr071KPs!dPZGJ~8#Q7^znXZXh`HXgqclzJzXi91D65xhvT z+Lhwoj3(12{3GJsg2n_fJI?uo)9bLDPHcVg4(HZE2-{NZCJL@qbdASCJyZJzsRx$b z36TvHFkcsmF=}mm7pS>rv#F-Ueu2D)lif`*EP^Iv2pgkx&t<=LNYVR}2)99vG-H?i z5!!Xd{TpxJk$6K8b=9fhu~>W$6s23j+Vt$~nXnUarJhN}IKwfKr|yN~`z;#tX59zj zQJ`K!QRWIVA=m|H9g`Vj4j1U}vl&8>!AK@3D})o>mL+i`sU zeiBh?G1s!d7$#)A8*KUAo^B%}w;XW{F*g26a%tdZM z>ma2SiX4G1AKv4G@mlfX2riOoS^B95Nkg@=HJetd>pqm{C1tWBw+;UEl zMyC?ShEcf0-2yEJ>J&Jit1mBp7!>}aKxFB)*&})8X1I1NTM9I9bx1>nI5&f^YwPw1 z>B(*qmXB}4o7MgV?61-}y`JN49ieUuMsl52`23^!M;K(`8%}kCoM@!X%7J@s z9s@DU8nvx|O0@Zkx}4<)n#P#?GoZh?4Cg6zZFb>D33bx3A?C_-cIIe-u30*ZMW6hQ zAh&oOw)u1beC5C7IW}WXkl!HVXS+}rkAYGSP_GJccDiCsyqkJFPsE%pb@?Vw?#iiW30oKDawY`)eFL>1KHz_XtAz zIaWRudp_m#3}LO9z~WBPu84fJaFL)q@pnKzqj92?))&VMG1q;K7(-0kKfuVhO=H2O zBAftoeXoKp#=nIjy8V+QH2dtQp9d>P_E|8PF9A1csoJtW_k zheX4*N*Tc)7eU>nxgCw2{|V*tb(Mz3aLan3K;){;fOpjkr13YroYw?$jX=LxrA^F8 z-2TyI7~FAG<$Cfgnf7Na#u#AV6T+~BG!Q2~KKUY~hlms)6itNo4 z(r72AwZJpUE=}NhNDzdVfXsO9oe?LkWIhxMn~8+33pi0$zMLl;lECY*GETrkNj5V$ z#cu@g0UUrxmQ*0GWoan!0K=MYxAIHUh4ylrId@wDSZ1^yVX;0KYrg{KMyMVph|e9O zU2>E{@RcwpuL4oYo#xJp@95b~M(vilfq(3^JdY)eFR`a++D;=b(G5ho2`026?ux$( zA;T)77;O4F&op|!Vh?{MBa)%}L=k?k2YG=vR|>IA4iHvm!~msXJ{uenr_@F7S1K2= zk!aEFWAX-h^bJ5)ovEkH<$1)p5az1t=&H1tpZPZs+#sqqabk!vEm}1hB4kC>|Hr?= z;RW(QJ3+L|GXXF9;?VyBP??5`rs_CC1QLWy$!@>en~?q*U5vO4L^}{f@_C9M^PC7* z7YWox<*K*xHxz4F9QczU%2S`VQ7ioq$~8srnCWac6=+E%u7ZLn@HIk|2zK_0g+YIr z|27EaD|cWH9E)C&mQP|7;WWX@kkR}dP@1ZOt6#6no_E@QPJH!UaDSQwN(zuc*AnPH zQ%SIZ7m73t<+wQDY8I{UIm5QaEpddNEqiptjyOfMGi~MeDu#EeyxRMKGmSpTpnY5c zU@;-ZQvzldy2)El#|duTd#|SoW$g3mH5p7AD zPrPsB_(>r%qCyDz z#9JA`x~xCGyehxARvP!TSK3n$td}uOv8Q5Ofb*_I`F>>fJ1s#`$^8TYl&Zd}H8%RB zU*^Y3-7RFm4Wmn~rLBnVmB>QLYEk+NR1XUU!UINamNQz6ZoQiG9qZ_bBlJuS7-W#b zPeHiQ+WJU5ej>dhi-~)|lnc zKMciFg2eRZidcV5o;O^59jb?#lo2vJ@`1E{O+Qh7Eza?KLOc!@;twok0jR?t{qgMD zfHd0_fxk*|@~5Gw)}&lw&Jw{-HH|e*ZSk@w%Ltu(G&cN`xNnii3cofX51iHyj4g5IMdMq=iiv^PthbL5SN3x4!ZSTIskg!)ecn&!>d>i?|CZb}0*jmDkNd?ZMd+B5IcibgOZkI@xX3T@Q#Fvt=c#2Sv?{ z5UfBIH@P=H6zZ1cPU_e;Ak7w>wQ*S%?ObugNfmrukc-6nTprvs&%*TqY5o-FPZG0C z0Qq{(jX{ylN1Y$tP#h(QPbt!!-gs2B*<@uuX`U8_a8$Hb#2b26kCa%%VV{Fi(<)Tr z6C%uWT^5Be#Y;k5*~<127QQK_#5+Qj9g%E)w*k-2j-AMxqAb4iQxwT9HUw~KIAqWI zmC_t8jMv+0TH_%-yF!R$n*AOXL>}-22Mjvm1{(pGxAQCZiEoK?nL0S>gOXcU!_NOa zC~V+}XP5N)D@1$9Mpa3NF-J{Jf64jt#j8``% zk!!&{j2tv%9lGfkz^KI{7OPY_M7xDr7*ptu385JEZLM*Fe&u;_OGBKkXX;hxM3MMk z3UryWBlvMk_D&NaWzO3y|CvkL`GIfE*K+n`6 z8E6?&N*SG^{<5C=sb~)aEu8#U;2b$y&iayHqbVBXP1jt|BMO|cShO3OJ)2Gx%BnK; zWl+SUQaVD#U^y%U$fjzUBL6-rz)ebPu-VsDEz_F=!dHxKEL1ny0?LeNWmCe~9-9gx zUxos%Z(t~^6`;YKdj_IvO9++}i)(t<6-?y54iv8`+s}?wXtX$lYF3- zIsasl77En79SHR~+vfR6fx-FGcu0^%lo_F`Jit2XEn(Rht8&s zC1dRzcojPi7MwLjsh?|IqC*JfVtRq^9R3&0r)j&2{(cTSJ|n=NW?@HCS;^i8r2%%zfkE6R@ zBa+6c;E2PGdNvyy%Q%U(%!PJwQ@@SV1h^rVurs=^AkpH*>muP%eN$UabnKkx)s5OQ zo)qm$Y0Z)p&~Fu3#ULJ*5`>j@Asyuzr|`S&>I{jT(-*@cC=5O|=w&GRQ35Coac;_5VM9q_tncQl%fL9Q#lH7|#fPc0U+vjmyb**J*$EFKn0vA8iW zKG3rpjcH3ULFwje@9qSRHPvyPo?Tb&sK4zj*z zG=E%p|5YHDyA5Sc94p$*pp{!WzAYdVSnO~dRfKWh5$JNKxX?^h$o54-vpSqE~@qB`^cf}0?82P8q*K+-CQ8SEV%u5HWcB}$)w%oNb82eU3XWftc8!^%m zq)eD=rtN?{O{nksHK6`lBkPNH?j6dLAz`d`2jmoaiBMM#&pN`{#IFj3yW!;-9 zSsK8u7_(}jT!aB`W@DlwQi(8%$vDBZf<{_25;?fMH$Gj*v%kQBS4??vk&vts>|O}$ z(ib-gl>F?9Ci(Xmoz-c<_0W6NrS74UnmzY|dY6<&5C62@Qrr`{b`-AXev?6hdM4qr1fu#m}vr0jEH(3dw4wJ&e}f z>$iZxMk#s-*22bIfSa{d=DF2cpbr!$V zmV`qETOulSIY!(hfP$ugaICezy9Ano&f9gjIA71MmhM!bqU$aNQRAf@=K@jY)pDW+ zC3R7QFy1NQQG&A%4Btn{%y7vC?+k_X{b`}BUF!CU@xx zL7OcL(A;Y+{E{HEgpp0O*g5>cp1)J9>m>Lc=DRh2HD~^K^6o_Lo{?#7Kd1 zk-UuEGAJG^6iI1Pq= zdAJ!v8pHT{j>v_Q=ux9WoC|k#EgMbt#+L=suXT|b?fr(xY*=Gf?1~)@0iZN9$^g<- z2y){zqcz|ZOmF-_SQ2D;bPTcH>j1Env`L8~TRJu0La@t;ElAp!SwdW0+~rc3_p=0f zC>1NQ-O{pXbJpqvcLlsWQ>4lSntik9&D|}=l*mi0MwUQ_iFAcn_ucl3_@MyUHlWq` zDIyS%il(Mmw~OcetC{tgh1a&i`9PnXa&f;pNVE%#5Ng95-326G%Q*EdL42lN8_&x{ zxZo^XvkRtnE&;k^)tCh*qgDW2ph_0*7!c)pY(9Gqmn1>Dnr&lvd`*jXBQU}xcec2_ zNK;p_q$VB~?Jty9Fb2k-1z46D{aL`K`For0JXvKrO#HflnSP~Xg3=^#KrK&JEf0to zSO(y>tWASyTq(p$9UZYIKo{m>4O%bh2j@tX9IdRTFJ!Rm1)bq|QVDt}IZ(LE-3c$2 z#o-?TaQGzHqX4%-Lx~Hik*w1MnISDi-5l&K^~O(&WQuH}_1t4cGFD@qCAs@FE$3C2 z4n0R-W)V0+sH>Omt|Azg5Pub(V~HP(mj%&En2k|=#%+C&F1e0%69-)YZAu6TmLPu_ zgmRX3oCIWB^6|2NZyuS%*m0RC%L!fsjMh@8TwQ>4{b81J3=&%mkrMGrBklGgTy>6v zqB2A2@=ak#_VkSdvE8Bh?@Rg_m#_lGgfNTO0z8!dEYelO#X<(iSZ6p-(ab)%LZk(- zK}O)mN1*vYeOpC5sb_v08^ME~t?{#>T@MCQF5$-996h5s$eK<1H^!1>V}KN+k4*rx z=A=m2HGPdd#E%}&-xw~L}I3=Z|e zTuH3b(-&f|U3CQzEkb!T&rt182_gT~L2woOx#+yn(+vgdO#)HMeuc4*O+`5YZkEYE za-krXN!v<{#Yus^RIr<$07!~9@b3bt(CPK|*m%l$*T~Ws3q@Hn=}9z9c^Ku`lR{~H zHrt$PI2^(i<(>+>Pf+i-3iI~~x5$AxRNoAhmk9Rv(O)Vs(AYQfmr>yAa=+0ySD0lO z57BaG{HFlYN-pAvJRb~JC=jD0gk~JjyfPWz{73?wsO4S;Auc5wF2+rI!&|~&qvAIX z#6#b7rf6JrG6lik<0uffbpP0jQgn*uBZIwnoCe?D6bTmyS2-HXkA`rKS-EgfResnU zF%|4AxE-h?s*iz|(4qx7RnPv7aKXcIuV~t_+ILte6wu!US&lJeqDYVmErD)t%}O0~ z`&Eu5omockGB%hS32?hnSK5gM;9z`NC>7|X!ZhRdMIwZaJtyt~#F!B3VpXzJi4oCR z`=-+cjB6(g^Y@4*&%pkG09ZLOea*7Lcu>?#mtY@4_;!RRo)ApsTPmvK?|SxE*h%-s z$B%OcatcDXFN%~EZS_&F=d5o1+Wwv)=7G^3H^YCpuy{pCyG?-VZ?eUQI4K)~^0v*=sudN^4I5kqvrWNmMrvUmR+7h(KTmeqinp_;^QGSrX zoR7M=^@l?IWmXcHry0Oc`bnOu(IIz6PX(ci`68+~Po!Ux!&tHxUY{jFM=hJgUZ;`7 z%;suH#@bRW5ac3OjI!X0r2^D(b$J}up_~rs5-VChOvSPOJ%Mzt?y+UVvEi8zZggf* z854C{@C!l_diwIPwp?E=QjLt^T*G~PjHlleN=7X53HKzz6<~hUN#m|~w!j*v&+SUm z_(Ap(Dl$Ts{gjs&{U{mHnZGBBuP#FatG@7W(XKGwf%rUe)8x;9s2?NM0Nw1zXF>X- zQcWSYr8rj@xyW3u@L03XhIS1**rcFNlcZfMl(J)VpBw)aVVRYR8Os58&ko1`1ZNDV zJN0v;UFFYn2;~^}zw|_X0WD76iltaC$ZXHXC1&UU73t(NDi@hG&LN$PpKXh{Kvoz{ zs<8zv#$;^y3jhmP3+o)dxU&$K%j-%4coYzehCz0A@hd?dxR%z@d9M}ik1_l850?p6 z_s1d){OyYE&dtBoDi%Fa1<=ME;!tuK0qir~T zCd}_NVEUSjO9fC_tXxRmNNjL{X+AT*Wl>{%S+tqRsI5yg4irKb$;u%uV^5ZZYre}! zAPdTYA`{$Dx?{A+BrT`L;%Xt3jqyt3ZJJ{KT%;nRs)od;Go;rJ>F7n@7UH&H+ldq0 zg{FzNpBxCT7M%$O8#K0LECPh3Zp)nb@r(R|kCJd-rf7IZecS4r;}wy#nyfD+&PyRQ z`(5leNWqn6ebFvD$r<-gp=FCR1bA?yA8|#D=4|c!|DYdSn2`yrh!Vkvg}GC4%QpKG zX0Oipmr!pDta8(aeJ>Q+#j*jhBD9c6QiB6X=qcfaWyal{!cEz>S_nP4|E zn?m$0XJxO^HFQ|%U%%pamn-@0O5`h9&oh#pBnZ`A-_p2Mk8VwgAHK*bMcZ$HT@lV9N)?UIsW7vk35-D= z_3TR50x~CX4-9P+86^SZ77S4oFnNx!6&C5HsX%e5)jCU!*MvIP`Dk~^6VXhdtKY%s z*a5E-H;-?v~#W zqwaw>fm$B$${%I-<}JjOa9X8RM(u$(NhF=8o}1qe{TAZulZqNiU7x~g zd|v|2)Gg0RBdoob3ve3` zNwejHk$9#40USBSMWH>m6G032;2k;V4xat35uLzRi>!4gnA?%n)Af zvC+BY?)+QH4A*YNMefAiNE*kuSE-CuGmz+zajf-6Fmqg82d5C~c8);TjkWPq{3D|g zZ=?xE-IITA3Cn**xz&ZtOsss4k%~PcC7Z7w%$Bw>#fm?q0BAS3@?-`uOVQT}x$>rf z#QCY%<6fR!JXYX30L~C{fItfb&VeWpQZv6Q)L*KY(#B!s`v9l~Uf;S~Gu9GK^OkTk zkIh7odYnzFEN!vD{Z0*6S#C|n4x&jteHGRFwg~qU1#DJ8gZB%FF_{MLiqAYiDz`Xy z3otRnkA#@x9OozxQwL+}UPSL>)cijZ3gdJT zQE|`LeFWOIs9+_U4&i?UlM$9H>`JlZPks*;?lx=I_OYL6Y62$+`ApGY$p8}Q6j)a@ zJW2|4tXYwsOED(IoK6?T5Se?s$ACPRRqLu~EP|Q@HvV&-QeAxlZR6g8%(^N?MvkQd zT=Myu;v&}jOa5Mmq6;G_V}g*lxjXC>8$QmnSwes7VjIE!BkmOn-_$W_>G~(~JQ(az zEU-;2pvv;#a#>mbD~N2dKg7y^<=I7R=r2!p#g+mr*)5n(*+awmJp{Vrv1TsEgGHh? zRudScSt7nK5CIs{M)%?;{YJW`6`kT0qWNYsOVHY<^4#z=nT#gUvOU(unt$if#p%QL zKNy<{Fy9ppUPl#X3o<8$6hChOdr5}6$?)ISIk2OZ&NX2P#cb1o3I4I-1B!7t8K3_L zsoaW#r5>UJd2rtOX;pmqK2pn^uUUK8L-uy~#zOGN&ImjEqE&A6qu zX48CvEe>@$X@C67`Mao9CX%%;ek_o7NQduU5}|I-ow8r}ms7Km$h3lJ0rnH1nsZzI#f_pYmX-K9F~x833JB$>s%VV4 zdiEzLW?c3uNvVLbP@rt4Jd7&m2{wO--hpqaqFg>K5H4cH@Y?lh0eqjkbMeJ__0{~H zv?P^Lg!5H^v=he(6;S8{k?JTcEeGPoH=tZ%P0=0gQS~2?84hp}&6%QS<~f~s1Q0yXX6P0?fV)A&Ui&05N3gFcqU2&4;?m90og5G+b<(zOWyi zCO6b+6?&#n*He3Byh7tTL6o_p)W?Ec=alL{%o8bolk^Y=70`UPORSROL_wJtl({9c zH_9JdI3uhTGfR7RR+p^=?DphN4;oK4>$qH~+2Es`DBt29fo4NfFFp)$^v40rhH6H` zxL344T1hk$3j44SzXF4?RIRgeo}`&_GJYK-gvRfjnv4sJUmZswt|*Y^@`Plo_Y-*z z6~lw_ja@DTk?CwFoLd%(;;&gM*1$4-Q1;$gc$X`|LtS0Ig$OPRRXN zPQHtl$$t>RP=k?QYSYiGmA{RXESlE^0{C6a5&G`RYeV`B7I~*zX$X$UKB7NX(@#_k zH4plquGmpDrD~sK>9n^9mw8z-WnEkVBlsBLp8IK1WL=Ey85yqh84&ZDgL>S`!S_$j zFjgV7kVWxgVdkLYO>w?M5T!16Oq2=xwaRB9Ev)kHOp0kr__APsl9g8fp<}W4Iypk< zJDm%=Nr(%W*V_KJbpb3|98n=y1$xEU zV?98Bzlkg12!(#45WY5jA?I_3JgQzTEQ#0b_(o*U99@1;&n}tle(c0YVu$s?oTJW_ z!}tY4ToP0z=tn>Ox%@STQd{Gf2r~!kCRzC-rr!``L8gPEwMZj;LY)xt9#@N8qJ%I* zhu}re-vHVl#9stXea0k(uk=S@6 zzu$!Qhe3i z_`%qGa|1N!B3bZa3jx%PTRYJob;WN*yF_@grhWLIw*c~2o3Sgcvn8afE9*6TM*7_u z0{vPQeN#FAP7qDMtgJJd650O<(PD~DgoD;rQ07Ql19B!9ZSr@7n%7m*({E7g7y)|39thaTZPa7qS?FzaXWn6quv}?gQ#Th5HpKDP`|5gtooiRh^B0;sEw{|d4`GXgsDV)U!+IM zKyV~WtE%VwY+W}HquF8leJv#wjyCt2>8jj`L1G27IQW=lx z(IU}81mQJzfOIcUDz=XJS%JvOA947DknEeR9mx=By-T16u{O4V+#awa0L7lO-Ofy! zw~KPViy1rc4%==P2n>QbL`O(d)hmKrdqT0Yu0@JAn4M!7tBEXx=FJAub*!n}OJNXK2eO zt{ZaNk1;`J5>pdXSP!$|bpqY4Xr0m3{6zqt*I8C{sdTJ&K9UtR!Acb8?Ls0d-`v8{ z-c5Feu!xQ0VYc^fkp6O4ubp`Z1(96G8slLR=9QG7X}Ek;n2UyBjNf{UE^WGd9v~7{ zkENN&5|vSuQ%FBY%W<$jIcby;$f9~1V6@M2>6R=fJuB2~^rm1y00Q=G0iaei1 zK4=;Pf+%1AmYpJ6O0OMDxyuGS;b$75k_TsLw6krGKMVGj*Pf2uy~%&1%XM^ppk=7md?gtV3v;D}^DTDGWG$=wZ>$sFllp(WJ(l#{4zz>9)t?cbf8K zgIuh|jv#&{$lp^e2b%HcLYQNg*2e96bU&z8tgU?)<+Lno@k`PEYzHehEfVj|l5l2+ zA*U1@HIu|lX-HlO2MO>yiaEe(t2n(#!$yX4BaaKP^o$WAqF*_)quwXas)HpzG9kca zuO^xUUq7Zm(6y*<{y>ODgY`7iwl9LeLohvZSZ3SzMEpNjX98YlQT=}eWRXpfBZ45v zzO{6P76i@KHf_>|ByDBqCb>y&o8*Sv+_s4#ASj@SAj&Q%JE-g;0-_Q&1zB8J6j2rd z*;H^rLHU2a^L=w)e*g68^StMK=bd-v%$b=pXU?3Vo$A^b#U>p*yUHCnpT{Re`~7AK z#wxEZPZsQIu`|K^@Rd#g_aN-yYv@`qcVsCb5<~(JRuTKxnezv{)GumN7W3$0`EIkVQ7PWR1q@qTLJdP8jO3 zw|XU>5YC5iXX6(1<+WUTme=%!JWm{@WIG{wbGBaQqlk%&!ub6uRPXD1!T{1;b?Hq#Lr97QxBF*n>N+-2sr z>7)QWEXa+T%n=xSW6vYO+^F-lzKE|1Ab(x`C3lyZM?t$qSBV!y z!3(s5ag=zRZC?|@$D1+W%29?i`oSrem4?O!;wu6uQ6pEF{aDYIxK7+w z<8ILi=NMK*7=Dk)jNx)8<#IY9^qm2e)p%ZnN7$^9)AmUps6V)zOXF6?S^WTRnY0qa z#u85mGqD4$UG5o2r#AnPJpYc7M|SW z%94$67@iaKJ+W{l$tK%Wu{4JX1-Rot4Wym>Qb&$e0?qyDb7#+qd4tfdajSGM#>n;@ z;`(4Dcr?xuM1^&uI|}JdBI!td1V$;wi=tc+;xwYtbE)c?LtxZ4`7kj($4dgtvc(Hp z;y)r?j7BMlCzfk@5D^PgC`#A!!ZPWWGaojN<_=$YEnp%eyQ3&tY_W$nZUpDfnPiJQ9LN{K+lz3Onb5NfN_V2S=xo(UJf6SA(p*jUrVtmX z33uZP*S6JwHfFs3_?Q4^<&7&@-V4zp7}jxMEUmAO5S>XYi#eu;SbH@PvPknTIwWR` zqNdywREi@-@OFx~Ou@QEQ#mFbX0YfJ>B>!Q3}M7EOo zYk(Q}rv86_u}nYk#Rj%MHJH6Anz^EGVQfCeBPGMjrW{`r;d;~|P+BK^Pmtw_2>?3; z|KnDHlz6@(su1@+f6c1#X#6Tmk*=cU5UfbPk{l3D8Hb=W{W=^;FHW>ODmE^;9IlQJ zeWv!=j^sJ1`h83wjXZtEelb_i9)Z+Hu)J#$IbA3PYmzfhF)kMg<8Ylp`C&ks@!8s2 zO^&UrVR}4AH5bYYl`Z3hx*c#H!fF;DJ(jo4p3)G50Y%#4bN*mAHU%W=Xpv4K!H~)H zNnt}{A}b~OJkWRi6~RZh6ahF<_mGQp0) zR1wZuYGqZT_oOht!wB0W%kp!AXkTuRqJPGpMfy9dY-XSNMG$0*ZpUT;$Av{ZUjpoL z5S%gp3Sk!MG*>5c@MC#G*2qkb=uA%t<7*|R6OOml%4r` z(Qr#nAY6cO3Y5#=j#D%{|FPgypvlHk47SArEDLoGz%DWRqELScuTe!XyERXzP$8`C zrM~}DC~aEDBL3rF;+b!2pQ0z`isD0@ZQ-B;z}o^y+K#F-`^!B0qwtlb9}7z^9}!A> ztY#}NiXwPF%>?Mf@e2VilA~b9FS8`PZr97#@+2dKnqsl|fzzDjBe!CJ+*E{f;Mxlf z+Ea5R+y5ICV*bz*;m>0N%)V2QUJ$@BQlOeubZHz@GNi+BX+rt6&_A4nY>Hp&|nQ{xfQHr$nk&{z(yB5N|?FNsi*-l`;t6a zFC!lTpSV>)n0h?U-G#V96oo*AjXU-1y0PKM6+o{G@Ulcfugeu3e3N3k_67Vv6QDV1 zzu*Cpl%9n@$GS9hKYAeu<*WH@Qoe+dstpqgo5CPZD3#@j@6U~AMY{%-;W0FF&P9LI zNr**}D!GAZC()XVpfRyTkc+*dpndxv37~YeyRI64B!8vc<<-Id7m*@UIdxchCEFKq z)5QiI%7m8{0%}4adPn`TxLc2~s*@G+{@>!+?WOGk1i4;_S`#0_ReE+IDWgu`#QVPu zM41+|dBl0y%|)8COLeawuB|%t+!IIZ2fxaK9urA1P7&m9v0tO}ZZG7C5aLk9T8TZ{YNYZIQ#l{}AR1;|M#VJ)#Z10|fWdEjn@EcTKe^`b#`4iXw6& zfe+Pe@jU>KN%hV1V}VFBhV6EOszjF%iqy1>>l^+nf~=Wx;n~*LLS@pziU_?eih?Yk zba}KC8N%)n2peYZH!Yg3t0h4B%R&VY2&548)pQBKXGCVLQ|;yGNc>EQIV5SqVf@qtTMX~n{Ji1`>iYPRziyV~R;zno76*-)AM~HV5KxJ{g z<2=Pp5YDiGj-+kVT?M({4@gU%E_!lyN3ckSBDYK^)#-I~%{s9J3{H*y#>3B%liwX6>h1Gnfc48ewwHy&N zDP}}!OVOjR7wE=W&H=IbfoOyk8!pQIUbod=!E}{5um10LgXqB7HPi^fy8;n{cE#yD z;tmtTn%ag$xU;n3)B9JEZZ2I3wJNsyQ4O$5*gXPh1oSp;)KCw9R|uK2hp{r^&RY6T zwA@O3Sb&*?3$6#BZwhnu@gI}o6r29onM|KHb$T2q!i8#TZ;LxbQtQ>)X^aU`E+RS| zYQ#&~t8D|Amj0c+itCIv!5036#4u1dMs>l6t3|sd+3sK`+b#KmU>As~26uU`?arar z5=MANuXBD{ONBKx4JAJmR&eUw zPKWJNC4DW5l=aAq53#YH5zw^*kvExX?z+y#N%M^J+tJ zrU>%lU5 z7ZzyRq`Wnwk**cmfUZ8w@2*(~t2k^c0xa^1MqERY&y8yTD0-bFdiMxfEd&HaQ(w@s>g6N_9 zPMyZN!P*P+nQIY{?+S4tk@O1gb@0BCz!F+UuI*u~l$bR9MF7y1M8RY1Yl-eDP@fR)AT@;D=He z`#%BV529ycM=QqhIn<`(C~))3g2)IZftv;3#!Uhc0i75WL+=;ujC)fwpg7`5zXCsS zmRVJb-VEUM6t-Ok`+I*&ib?%xQp1V<-+{V7y3bta`UeC#VbOtT^~rIe*(WIl9?Ulj zu~_qF1@$8hP9J^>SQ;o-)BjA*?tE?ZVMnuA=l6h?=hgzpFcyeL65tv4eLoFB)hP&F z8unBLxPTn9VzXl?F3ZwTSC)ymIJ@N7%fn400vk-e3 zhd%>M8^RIJ>qc25g?Z#wCcqWXLO9kvxI0S;yBg0diFOe99N{caefAk6xp(; zr@^;t1yNfcIIP6>&p}&AdJ5W7-#Z6J6@)RqF2MZh*N)wtqAih%X;&WYp*Nt3z9ZZM zje9Ojc}B!_p9eF;v9O^g$hv1(3X7#|j2Sm7@CnAig)s@I~^AMOvQF{L1o1;2^{eLj4)`?9l#Z zy#N4f8(NxTxt?8(dGd`)J44TB2?jBi{Y%dh6K)@6$DbQeUIZ}HU=CLqh;}7A(LERy z-}e$o)>#>HR*K|vbxRv#|3CBWvNSM#jm2>Ss3D$ig*a7&%iomSg6|gUdbhIrV;eNy z1Eo0`ae6?h+(x~HE~-KQTKDA|mS!!^vd7!PTwGkmhL|Mxe5IB@;kbr01MVltTw#dR zi3BWL?E)#P0)@mKB3uz>a)$P`Uj?9+Gp6nv8|s-7Cqru=qfuXU3G!!$2~k0qlgo32 zc#;<*pWhMUJh5L>j0au=KC0vPMY^z8mH!O506QBd*U=f)L;Py-kbF7;^L%(T#MN)|?CKLoYGj<-V4oCZ@nPkHT_=r_uL;$#fcL>TCbo_K4y`_;r3B(E zk~}-xm&B-^T>|XFnxjq>LJP&vc^jdY z43>Z86lY>zVSJGzuF;Y`&zv$X)_bS+9fh@#WAOzgqNpHofQ}88NSIt zOVwY@erhk5<3D<)mMm1#;_1u(^=tKPUw&85ZWT=`vT5+CUkT+?46#F;pFwEe6yma@ zXzSWv_D6)77b$2fyx8u4K$C-Oso0ZtXTS6=AjO>5+!>c=&(jz*CiLw0d)TT|$9+*y z*5iv^F}7CAq?NLX&B-CkgH4olPwEH1I+#VI`^@`*%$^1KPcylEa&0FgJ&j^5Z%h+_;bse5*Mn1%*+*l0ebO&>7 zd~jWUH}C3N8)J@0jArxZ#{qgI+d6H^X{+T80WP2%iBib$-@hNs*%M-8EWW!Qv{{S_ zwyKO7>!uu9&S5$=#9e|svMnm-4o1HcY>vtbAvZy;+*rSsV_UB_LohpS07RKNEg_%2 zPiCaf1+Z>Pq>C^=yGraDrI_qP4iOaB<`}^jSrjuPZ?Pfi%t`z?an;yefVtDr))>=8 z!r1Bi&W_gXnF#oCn4bMfa>7C|8Yc^KUE0_Jz$1tJPtCzX7t?A5=Q|ts)qXxN)AT>p!BLxPu*Z#_6>;sv%vB#;7;n7UJ5rF|4Ndrfm%7c4|gj ziy_g7w7eR$_@n>QH;09~9`qSD=_+wd_99{Ih%Y=6$BCwFtQ81#B*N@zsC_XNs+W8$D4HWyT_uMCJXi0Lp{mJ}rtL7wt03%L-QmhgaCYOi`v`OVT7%u6@xGsRoC1SYxK;! z_g}!|b>W8~T)`F$S?W%A2r?sRb|!o6k~}WZ-k<}M6#Y+cW>6S7$ooGkvMk;ryDAvF5e)+ zxgbLn=?6J5gmPbq-wW_}aU3A-pJ-YY*KCY`5}NAALC8PZW=G=(BHbTX%C%qj@qzdw zP07#e9RZY%)qd=-6VF*2GZ819YmP9ozHS$S5R|r`&%j&7?Vy+fo9wDhijv;wV z4k9ZAgHzDOrkzQd4XSupcd>vT*~R%IBfUB#ITAn3kY=|0nbVln?-zpLa3X^q`VUbq zWr{Z@r<}KiO%?$!gfSgp-fGuc)^gHi2{T2AlQopt$x6D~B?8^@=?+ra!V%sC`!hPR z&YEV-n3K-qQssWOiSEEH^3v=>s|r}^lU4C*A+!{VC(e*PvBhrCW_uk@Dj7gS4YTHf z{8;`w*Mp!|E*9XX#vg>Z%c3=I;;1wlSBc{7iVluqJ;{F5iJ_IT&hGr^LP{Z>+;?gd z?7HI(g1R6De@&h?3(}hye(4^hrO_Fa(YqyB3-$T_um9>^Agev9QpO_Az~;Q;+9p+RR5C$D02hRDM*+>4uvqmDc6Mp<&-ROSHNN zE8GXN-)J+ImZP!p6#q7JzuB=w&s5z}F=&&!1z6O%LP{pdcu^2}6I^5BGA8wzQ=!cr zJiAuHjRitnQKDtgaPBF_ zzeH0GA{(U50GyF#gIKVa7YA5YvjLT3;l#PW*3Qe$QH8_t_G+^5^^c z6_N}xQO9GVNy3Gaj5kAC2v3_+duz!6G5Anv6Xde98_+{YE;d_~jM-&O2C%n9XeiK* za5>(YB+D2sI2k&ZUtKlaLWbjB(asJNyJzWN2(uVApd!+3f1e}BiB^IO_a}|(BSVes z>l+6D=&)nx0qq%>CZBB-A_3Yf`aj~92<)^A6w8GxISEf z_>5&1^1_THeZ%60cuS;0`AZ_ewllq)8IJh7OQ$=f@cXj@x}(36Y11 zJFt#kCBp3F;DnfSs}QQmZji!09@|jMR_+u?cM8uGCkf}piK`1kid0yMzY56&uZ%(# zb&XD(Jzy#@P;5-lf@1$=>1JlmQ4cT zmE(+@ZkOlV<)Y1qq&tpqkDrV~59jGx2@4kM%=3qs$5!DuB1LISL78yLsd6;-6hh%r zKWBS8Bbj}fq~i*aE(*a_hvP}nSyT0MP)7>QGdWfnDh%USF&h68<{~yQOJJeIeud_| z)BIX~OhUz2E}EvA&-Q3sE5gjn@4z^8fm1MT^u`xNP<$M9(rJ*MxL!!6lvM4#Ay(Wh z*jeFMQXyy$rsBH=QdOF3`aUz`)hsi}Oi9?anFMs?i62Ir9;rCxk^w(X6YY0fF&~UC zg!E_IIbV-UJ>9YrPYR|w_|p%^|3tW*X?X_Np=ep;T#~bHB@PzpQp!;r%LKaMJuNkm zYoIuVqOF4a1!ik=UHDVG%KRz);ExR7eWLsY<8Oi(bY^17+qRWw3N+4AI?5v8o=)O; zdikjkOLW7StTRsupna{h(a9+E-?K!_!yLFlZT_zS7dshIS$6fr*kV8zB;DwS@Vq9- z!lP~OXj@{MZm5$iak^+;(ZUsNQV#f+33AqCinUJ~)ei+)XdCE43_Q2x2p<>nqbx5P z6SYLXobg{l<{8z2!$%m-tT7Wu`Vl!1x6HarrkOCWOi$Zgz* z{e%cJU=iya$=Er|f!w|7o0r73B3&vh3k)1+;+6Q5P`^hEs}vof`O`q|hqX}NWOu$` zYKf7ZBYoV*o6(f1PPZd%c!+<89xALNE~n_M&9JJE#L0)&0Cp&l!7~K7@tF!Rv#V1I zW7MWoaH;suL@{_QSinhS9?hvne5ujv4irM4Y-ntV20i;@t)&6x^274v3*=He7N_J$ z2RApO{+=qt?KZD@9%qX4Od5-;81XI;Kw-MmT~yx@WmYltry$pl338!W&naHmt63hS zxFMxm?2Ct!gjT}!wHn(UVPa)@xHpDG@Ofk}>7^s0-9U|8HJBYX_`;E({sdm6Wo_gM zp;QcijP~CYWhVAgD>lAI;zLJ)nHvqs^JSg@Sk=+g675Ix>f<*llz4rs+9bi$$7^vCqX9qh<_k~+eMJG zA#yHyR&)a(rrHB5_rxJ0vzAN&>CO~Do_$=+UO+Y3st0;fj_F5^jI}~2xpeE?LMgtU zk$kvuX>)v2WH!vOO^GxX<0@fpP7KiLtm{TaAb)!vTk>NuHv@bco(g_R04y1)N_jc7 z7mBv+NVltt;AO5RDWN@3Nfd@Mq{V!jxNz| zl*%xUjMAYPAh-v!mJ?$+`$MsB34}6@R_{Nr6mN-gReSJ3(;i41vH~oVTxzddWxE8q zKnyW5SYDMUN)b@2!*O4pXg&%rlJ&38H)ut(3XkPM@-{(!%rWssxs zUn2x|^_RGG@p=)w+O&9nyp#Qao7OFFVh zw^Qnkiv_x79lEsWQPHXyPMCkxvxT+2sI~nY*^lkG)O=#t31_ldTBm0>HTGtuY!c#c zWirBOyilOO)w%@lmZE6$rh)yhbQ0x@IhF$F$iMiIV`}M=J5U8N`He8=RbL(FhJX=h z^R$P38yWQ96y#D36*1$jj>bypq^KTzN89%V-|T3*S0Yh%;g@p4}<#wi874@sq~F9G}lfdQy9}H!jLwaQxO% zygkkfaF2sMNWj*gseJ>NCtYjy10m)H!>{J#p?F!Czq@?QN84w97Rb##cYhp~mx`8X z+hOb;kSEk*ESN{}K6a+Lut4X<-v2pg#DGBw24W-8ev=T51Sdyp`++bE zFZTn>v52D;Q;!35p&T4?G^Xd!@@$8qSCC&#zB_DW7UK$mE_@4{+|hD8G=&$}h?UbS zk`^Ssp*&6y!k=Xc*df%W@UKL|LnakWJlmY$+*nB`=bQo2{Mu6GV{y#qp`3Ty;+D8o zq)SZ$%5yxHomhkFhBV<3f>e+Th(r#@H$+S1wUZYQh_Zwg@FHete^3ac)$%2T*E#eg zC#E*@4vC^D^AolebsRsYd;tvUBWBKJNpGAhfc#nCAd8>Ke#V~$DQ7bIS_U-gqBeOE zZuCXcy9A8%MT|FJ5n?`fa9)bGL{WJtPlk!9mlv-;nPikEzZ88ao)QL!szi}smi(tk z=e>%M060s2^c0Y+vlMkf>sqG*z{J6cTL@c{Y7k$pz16vz;dLx_$`Bm|cf`x7c^ak} zt??EK;nU6dYE#}7BHcLBUAo3_z!C&!6)-za$}`ZZBh~@m5aPwr>Lgz>sN8~xU7#P# zc6>A0wTA7F33ELL@tWe&!P9C0&VQT-eDNy)d~JyiF>Q7_goOZ$WHI&@O(S(wW%6qh z<;)u7r5Se$pe|Uyz72I{yg>^c0x`QCM9TsC*BT(Av1v>x3 zh#hc5Tp}QA7x^C*<2ypKuFAG42e^k~${AqJegUf+b)F*xx!=o$E3*roDAd`d4Sb@= zoLNJ#yXa!v_8p0{{|CH9uVomB$La1J`{nx=^IbN5f`kW}fhDm2It`|WuI209Ie}iW# zo$g}dgeFqFMv&RZJcN#ON|uIRNqd#p366^enSZP3#oU;M0xrx|?o0ZYigD1{HBin< zqs&U(Q7@1$FuRuV0v(Na1;EidjtD7HcWi%-5xxmn9psRq5X%%Xow@5=G!?94MKTa~ zgbF!&Lh2>7dc6W*7F3hU|YH@{-{S+nxB|VI?e~+bG;Z!qyZO2 zCuL@4ths>atcMJgy^VEYjheT53QMAw331UG ze3H>BZqKpRoa#!;fro`reab+e>WcP@Ow%5PE?h;<5$!S!7ACIc%soo^BLd-P)4|bv zF^{f#6Dvl}y~x2~r*DC}(Cqp4z~vKUD~vX=U%_96h^5OddzQI8goRFe+NV8NtgNU6dGB) zdN~6~%VjsC z6zFmhrAS*E(}cKOElZed^91!$WJcq8ATl>t zR|_=TTG$3Bde~JpU_PQ=jRgYucJm;9`9pD>Nar$nYwb|%bv2+%oZS$!caC&$s)!yp zU5H;y20kp?#h7&sAUvzWOJWEY$4mk0jsM*}~vgGT(A51$#?`{5bIcW~-0#N-Xc|Ggd5^>k1Gvfy$sbiWEwY^5}6T;{`ix25& zirues0;IKZOxFq!0(Q2rY%#}}$=I80X8Q@YNGz&i+l;3L@Ga`j_!2i>U&~6Copkp1 z0bzWL!$GC^g$UuIYIXck_atp zj;r(x`(+~1E_nS9pfwG0!Ywwv6~g7|WdoU`8|aHo0-a$SaX#TkoFd5IRY*##&74yH zA(%PX!7+4Pe*`!)t_~dJNR?d11p3WIGG0l`cvYC2wXHM~=iLS=fk0%&<4IBOOrNAM+F(eEcrbxOSDauU?E# z2y$_|WWi?xGegi*Sq)%AjHiXT6vU@Rtk}UowqDO)?c@eyqK)i5K|;zk8DLr5KTMbf zqlG>AI7>8TV3tO;OH6-5kn?9Al?QEt*Si}SzD%FFZ|taN7qfSOP0VWSCB$`UO@}MS zq9;#8j5H0vgE`XAb$zhq`200u%i#4j8utp~I~|?PariwvTUJK+=J&X_mKLRv;9D>-W(ag+H{wHv>v9hCrZdco1h^`gFPQ%0Bq8pHBNJEkWm3HD zeV~jUx_=>d6yX-9=cK5;F(S-;n~U4yev$riuY7J|z5AU3!%ry|i!i&`cVR}yUMt82 zVLx6=nQMgjD~!bWSF-eaCC8c=i`XkCkhw4yxOoT+(UZ*;dg{P zqYmUpLjI{=09it^5VS@4D_ki!ll`XoOX5D!yoteX6t6@r($;?n$c@;+vAjgWi1*Cu zd~qiwaz2MJ$i+dTU0M8>Z<)9lb?9(m)TO<-Ay(_z?8M6jekNCvbFy5x5)#vRPy9@@ z+rOSw2=^cSEla`C)-sw(Z1ylID4e25^o+(7k(MwNOD6K!0$h#-cnj%Ba~vehJ*b0i zD_o5kFTa;3^l3m6{{B4)v(qIb@v=ad9OI#Hvu6H#r1l+IiIW)YCCn^uRalM`grgFt z33lV??0rvpO*}5h+#^EHU_Ab4PB(WpMfsPtSJKI8cCOzR{6aHT<`B8P~%Z4|(Kt)Q)-l^*k04QZ1uIvQ69aWS#~ucejKxCk;#S9Gy4G#Yn40b(|B_#{aZV)DFDO45PGo^HW=Nq~i`eMxg1_#`A> zsFOp_*&>_;sgYt9(PQzXP?w!^!aA6_$!|g6UTKKdg$rAXc8UchYCI6Dh0x733y;Uf zzw_^_)h)Hf659)*6cbmcpyK6j2@Mi7HI`K3P~$b4p2sa;BT_fxmf`-j@A9sOknziS0J|3*Tylc*98 z2=fr2^|W-1G&Q}Rzg&-jYh@hxM-o!0LPaZ$r!$J%+2G*%&-hEasa4m=#_t5UkUCSQ zgU$r(dlt~`fs%k*l3e!Ugy%rve%+9q?~twMq2XnU?NEuIh~(#a92wBDp?gG=b8?eF zF`z~IV#=QY%_J&@!Z9eo9b)=EGj@zCMY|0O_OTG`(qC#x&ejLK;v85Cu26iBLqt142a#My<30fh|9st!_j*Rm z=91yqJtVlyFj7Xb&;BAeA-3Wc@(Yk)AwK|50APY~o1HxXVw=DZH=?~PRZS)F`U zfW--?ZA6Fha_hg;u*K!og`NTjJ+~2N<`Zs*RwPWlAj?NGlwmn3$MyovxFl8%|8$U0 zYQ^5ja4gJ-@&FeTj*2krQH-V7wf_wS7GmVZMJlcr=?}MP$%CXkD2O?v6VK>>>zP8J zNhAkamIhc7-T<5w3tSzz`6xpwWAtX0uidea5OV^_raA`*$i$0lg>{cqlOT#xraxhz zn4hIU=}6&CSQNJAxuCi6>E!}6cww}Qzlz8v9=1@^(W1(mV5tjgN&OvB=3!}QW%6ku z#<4((KU}tv6Te|0Zbr^G4#i(YJLAe2(HmHy$LIbI=JALXnQwig4%_{smb8&Fq(x)B zFv@_UfY0Z*MUiUPW(jpu;zNlwvBxRMw5_8SHLd^b#vRqxd{{=w)jp*`pjNgiMYcx@2rpdVm0JtS;YzrU(Si>4`28=7^i;{tMy~VJ;SD#<57M58p13T-c!}Y-Ue<^xw4? zb^lMgRCy0!{y3u>BWGVU3Zi(~$%?i91L@*(TR6(`6alRA>lbjomPa>Md!eTqy`n9R zysbc5{X$?(T}xe@q-SzkHhDq%_&Wgpc(W&vxKWTRSI@1A!?a?&k|&#A9OPgqeg%Ro z0iG))7v0Cgb70XE0(xILr2ZtjVv zM8giEChAo5X7ASC*FGdSKnswRuPUO-JS)l^Y2miCShZHG>)%!vm+RS<4aLOOP+OX~ zisj6j_wfV8u9Qc*;&>6xfpY@0<80Apl45O+av0%}EIoS?{qdX#^IQ`+)4(fhJG<3* zT*MyhKv*(tlvDh=0|fFxjvY^rkE~lup&*WmSzL^Z1e(Deikvxu10}aDUo6_Wmc_B&`)j#MZ*)A{Ed}!3#u7V&9~D8tOPEE`O5yEPA+CDElGe7kLG+|> z5^st=zHB|eMC+iMsJ9+1#Ggk9@bFEmk6xjf)GC@u^a*fpjkl3s+DnCo=AbklOs+%m}f-mN+K((Fi5AgtHh>NPGqZm?qd&A#9N+qx!+T?AC;B?4W3hRd1Jo6{}T?vePsX!ui+=W?tcrZiYrVqFhj{fr?17&4)qEv92OBPf-VK;v%7zl4W@J z48*enTptDNW4T0ot=2eB`sKWFt|+R` z4g*i=ws!>a4~+*}Da7U7Bzn%ifhwA%?d~eBxuIRyQUb<>T2i zYsdJro?Sfr|Ku*OTjSy_!P&P5INB5IZV%w{Vt~?lkyuZV#ZM;cQ8rI1u|gotuMvl_ z@tKTdJZ-_na<3g~*|wKqcpN0ajZ&y6+HAQJRbiGhPR($tMA3_5EJsn9hsvutB-j_{ z33aiO0?u%7sUR1fT{6~a4+*eD&YeCzj{3NNC&cq*Fl#if7m#(5a=&JQ8-+Mm1{mzu zJ+b|cKq##X*@e_2tem)ZFs{_I%etsatQso7g%sZrN<*>nggSMzD7P!7f!_GOXxACx z)NM+m@uxf=sdfze@`#wa6DhqRvMjkH>FOSN{lppi#i}3{Tqd|79JaLAlYVgRSF&d~ z9^X((*RGx$Ch#;oTZo%HiLJ~M2>u&_VG}pN%I3BA&X8o}-5r|pfn5O9ySNfNj@lK% zb)tQG<3`c`Y&r=O4+^kw%=5kvHt#bzqgZ!$AoHAs3Omj?xAerOLe14K)M}i1M`NxqN?GVG-hUn@^Z9!~JA0H=**lly z$nxaG6yri*1NIONr+16gmSk&uX3yH&oJy0uHKNy3UCA2vjywr*L^V0RZ?D=* zi*ZJ!Cp8FhL76vt)75P&g&{EHi(YbvC|6KBSug^{J~rMP6z0_tqhaRcGmehUdN!xq z$^&EScSJ4B!H$0IHx#jvBoKBaFCoUY4X4z;L+EcE{MUp&LnviQCthO9sZg+kKrgzy z^Kg+ad_x(7miARggt_h8F-oU8E~Tv{3~BTsa%WoqT>3r|nBcJ4Gi~ zsluzB_pRmH$y!pyXcXi`b=7qL;fN3lrkNPC*V`G%lrVouTg=~&U#Ul%E@7+eMj6NVAzv!4JkXIWCqE%F;e zUB z+b}xcFUlP1E9$5QYt09RS79KOW54bG&FZMKnU7UfK|_LQeVJS)sR z;wnAnh9er^lW4@(!2U)Ua;y!$SgQ%bpIyYL7q^M_XBSs>CS@f4kVDHbaS(|)o)P4F zvoE<)LE=?=<8DYa3K@A)O6R93giu|a)b;ltJ^s-XqfG>(=e|>C#M63q+RAVN3r{uf znx8{*w`VTxqXX&qK;r@cc#C|}a>rzpj?|CFb)qaI_=zfbzIwqX&CY`6QNYSl ztg{fn#Zk$+ezg)MdxMZfGHV;7YWR2j*NK^B}+a?EA>o9Z8RMg#z#c*rt_sE+|>#|<2N_dwM5rqCq@Ai>xv>N>)gcFr$kvYW=umM zr!0YX{aNVXf`BF?$h`#11)H++VR;YOQSEYw_}HzDl-c@27Xi}AbOHR4Ih-_=KC1{P z3$|!$xMmNr8oM40rfGKCoM_Xti&Bq-N?-W0AnJuJaAwSDhoB;iPk2ZjAAV|2lrwFAIkgvYR$!EsC&jn%d=F7D)!+!tfMh&M#|i-i zz$V|t&W_=)N02*7TY*3;BXNunR|nDL;KuO+oIRr`Yf)8an?q|L6NEehc=d6C&cBPB z4tp@Nf9NnE=dW-?><9D;L73^Mo&D7~IU`q3TsA`W-gkH{3t9}-8rO6|W>9wTGHhbZ zSoa8U1cKpr2*+(v&i1ZLXUBlZtv|2@UqRY>S>qydX4ja0qJl~JQ=`6}y^7IR`%PCJ z$?wQTw~h-vlo8DSeX-V2{6g;`mIr4(M&f;_#OtPZzOU zJP^YI;FN5GSg=)!fun(Vi(+LFgolKfoeZ<96nS|<5VgdlCN1+&le@Y2s=k8s=ZsInbzDWme_sl_gT31H-XiO8x7Yide(o^b2 z(Uft#%9?OUF)!k;f-HHc3O!syzcMc9_6KScs$BZ+5@@chBDTd)%qcn*jmEYJ)(rL7 zHi52VM~PeajuxHC59<8xy^vD zeYH&MF)uIghn(c=?ERuD+9FsTlB)>niy{fF5pH(cU!ERB2`(1whSL#5Eiv{fIY*qY z=_!?1E!uTwFOdXa7vOpjcv4C37lM3`a$t-QeOU(ZW;Ncj@r-DHgOModH!liuDVfu~ z4v>j!;}zg;T`l^`$tz?lfv#f9lJ>?pP&91Sjo0+=E|HT};0i{#_)|eHA;;IyP2!xD zHH48Kf0#IV0K`S8Fi+qU<`C=`2&Yd?oi=-Fd^;oNEopC!od)@Za^U_8WAK6(?XPf- zYYhEk$qJdSR~_(lF6HzW|AD_+i# zKGX}=5!;r@$-G%6^XT59{jEZ;Cd61S#NX=2W~AmC$P?AmRoQPnLkKJ_SCi}V%Oc?c zSG)Ab-owrkKNmLqWr;sUNG5ap;%RWTXx>haJmb-Mj9*~#;Kofg>#agu2Ko6)%AOQ7 z+0sK5PW3XLy_BaTN(P>qE8}fJdDf$5F#Srw z{v-x|*@ogSL1cx$A5K9W1)?1ZEzZT5-G(9~a^Qf>DjDENS+x9821P zy3up_o6AZC{Ci{9DsNi6xDpeWFDjsg-3!9u6=A@1waJoIkY+<2KDRIoMgRK((KRNn zWdC)w(^JSr1pPTmq_brKET>^4jLEAg+(oBP%9*jariO4^3|%FLg=8$zd`!cfC&33 zaOcNv0gll$_;teE1(<6_<9jDSn>(mQse%l;n|>bDUuCaElWy!Gh_;?XIH@vc8RgJ@Bf+}MFf<6J?s0ZJX` zKCTdjrnnX;48ynCYjS0_H+v z5*Zmw0n8KV{HlbvV>FQiQbKthy&e%EPzbDBtk<2glk+B}zLo5V#h6fYi*3zx0pjlk zG1$$}G3EnLg@ECMm?`O=O(JQn#T|9=BR#u|jWE*j>)(X9{W{SriT3;@fXVj3K99{Z z4i)Cte04^=Cd6e}#8!i1g#9Z|qiClaHRx1LD;$}iKKIsl!jVMP5b02Qz zWB`LtN}6CNy0}5lE`0+cN>TSe zt%l4iFiKWpp%8y5nR2zV`bVA$2akb?Z^Z^*A(d;*o&mmVL$Qk>GUr&5b~Y)?jMHn0 z6x?4Cds+oing*I+sh$x#ucG=yWdg!Id1Sy!>MH_WVBJiI$v7cSh6MnDQA;JzrC}uI z^v?f*P<_(vT4yOU*G&36-iyUJPnc}sopHAw%{k^;IPpsXly6m`2dn)Dz6#}{bZ~f; zeWrOrvQs&XFt{ZVm|w+dK7$`z9F767eal!N$UUh=X9qVt(@ByF#iUA8Oc4ZYrq7)d z9of(Bq<9StM6V!!UuG@FkF~xA;_q{1dQpKv`4k0TUq6^f#K#@QRfREadx6x3EO(j} z9ip5Ywyu$}atxjY;_4G|3r*pQuh(+vV5Pu_#rXZYKyu+uIOc-JZ`9tQuQI>?PypEy z?gRx_Q6laWLak;`pAl=F&9i1p3Q2`866rEDC}`$j{8EU^fQ=ZY?|BYD#z}M)*^=9G zNIS+dgT}d}a((IOoMx6k&kh17dkhK3;6cK&2{P@e6I`60KSxR3JCP}D0Q8%lI8dk) z(}n3@Oj&SXVU{n;rAlFNFxj$C5=JFwO`jI`>)CZ+P^Xy=IKP&UMrAT*eqwnc*o7X} zMNLc!*h2*}dbMau+vEZWc&#NQ1E{F1KBVs^qHeSc^S3z4MtG490cPnix;Z}D143L2 z>EH@_Bvs*`f{{n&N`{h=cuO=*BOi6@o=9ihpyEd3g|&=F9JcaUA(ReNFE*yn<-b=3 zvEhxx1wvduT35kU0bdvBN_3$>r5(YKe6yA{Ry8@|e^m&DVfsKuuCHdZJz=#o(kL9fWfmTreAl33M?~L{rPgwK-Oifu)O? z=zAB}(%~yas0#GVUB3lJTQ{|L#)*1%uWr*xlIKOcFbi0Jjm7TY2B0vCLq5r-1T-_{QbgD-0yM zo%N77>C##vi5RQY(Kt^SW#L4kHjet@@;uQ3X6lmq?Piye$n2P3#4t4)e-wiJYr5cQ zaqZ_i4tflfBQ6KAOrgh4+>vY?y9=Zdusb5j+?XvIo-Y|n+R1Su{Snzt5U*2nM8h*@ z46nX|G#&sr-iYSQ!fqg56Y4h?C{o90^j-*)j-HQVtjKzNic2f&gei~pD~ z%22>C!-Q~>NatVZ(Xv1r330Mew<;85>*!RqIr4 z(P$7zJ~D)67l*Wc2grh1Kd_36??k%*ERW@Tt^wxHLMn^;wEQ@i@k^z1q}X6NuJ%zZhE?MgccvNqiNL(*nTUbF;EomOxk6>hG~ z$D;&?$o~o9YqO`!i0y9US-l`_m+m0SX*uvCSDr0y2l1jb&9Os@gpverNWZvjELN+; zEtH1#1)-ThGvy4JR9hp-!%FEyM-$Gr-`pGK<*S@RvGNXmOL!v5(dQ6|SZ@i^v zGd4Ms48&FwAg;(fVp6d|!02{@KwjzZXNynIeuZr;1)=(-9RoND^Wx# zd8U6)C>3k1ju+{v4~X=83#-KXLI}6L#}EfhSW^^b*Pkbdm*hanKJIt26m&@0V0FId z20=6ic0rj-d*V?62u~X=U5O_|nJwfwN}uS7CxwwI?lXAI#fu_c+*K3TSgEhX-uLD& z%BfqCVP^`1J=jJFGoz?pjCBVJ^x%ted~O^p8g`9wE5})Sb|vL0*@qH+TMjM6tz`rQ z-cJQlg+Fa)nFRi}j-B-()$%H5}Shep5a@z^2YPT;I=3qmA zaTTFWsqm)-X5B#^Ny9|ZJ588nnxo4sVZJATGUy&_4K{CP39P2T=aHY0z;$bo9)JdN zwxBHIe$FY&!}MRm5N=&f9Vh?XnQ|5by>c+_5KT+870bG}=+7BBV|Lm_-0cB=r6tSV zV`yTBiZT;gw1--bbA`AB3mE<|V#S?;VE$~jgnydE)P(wL zgnsB@t1JtlmVVG!m)vjP*2`|hN-y{55g~Fk9)8GR9hV(Qr%h_jBLXvC7x1o^X(nNw z{iqGX?W6e#HlGN6O>3 zc|Ij}=t{BHqa<`WdkYg+Y9iS5m(F?#H^Ie@BAhjYu*|5fLMU;2xht0CzwXX`cC+?~ zBZW|I29rb4{=X^G4a?Q4WAU13iruF3O|8GG{kTM%{&$IXS>~4p6#gL|$&)O@;WT#p zHA!5Y;X;*5!(xdLzobiJweNS5Aj&{YXR-Klk#1DHiSUe|Q#~ro~A%Je3hdQ&<@x|Fp*~KjA z4iVM|mAG7>TcJ=*htl`{9SF;z`OD(-dNeB=m`-3>nyG%C!}P4QKgduY33eA|R~_fu zJGj|Ig?L`+T|1WQJzP6A9A`cS274wh!OXk$?;*@$_FM;a_rh_4vT_l55{=^oA+)p1 zH?n+u@abBLh1x#heqtdWSv(Br6i(bG)STlu2MekF{s2Jh;Tu%u0DYH8|IQqy3t$D9 zPhQtZsra8j7qK}-0vL!7{L%TWE~L0wyNUE`KFqDL@(zK1t%KMcOv>>}_F5k#&HRaH zNI?sA9MTc}dUmDf>0)vGPcwu840m`j^FKnIBU*pz74Hi2mkBdLE8(dgi=M5e!#K@) zp%N{E;NUQGrULyRB3f;5OaDMzo&AWKo+1@KD%xdf=Pqwb`hpMyau{B4UV?OZ}TMW*q#Pry+4t}5~q_kTDPn7o+;Rs zMzf&WJ+a9PKoowi4ye5%0wy*Pz;^Q&{k$4aqh0k(Q)6G_nwL0BBsJ)(YB2fgOHgFm zG&tN#I7twN!pMhO7q5tP>#?vYazhifTl`AxwFR7aVUH`$7UXoqU*n*O zG=j}v1$5U!A(rY>jID&ZPzwuvZ0p5dLSPS0_3;fod$=Ul*l-;78nm-T%}?6$KzvRh zMeHphVTHKhbx5~lT^gI76yTSd);RinF*^SO=v7CXO?cePmfV)oFP_4Z(M0p0R!|S> z4VMMga7KLOuh3>z0|(ZmR((nkmEjU@F2a(7@>lXiw2?Mf;|)PBAGeltaYpBF05miW zNRO+CBHMm0{gU4d(BKG!-0$s-8GNzpSM7u7ScUav> z%|)92AY8pe3!GT>CKL?h)UwKa$v+?|fvlvN!t;AU&ZkasVDWjmOtzAQyJYnW;artm zCeVy25XDlO>$U&leGhi6NERk|)PmUY-=J>$mb7UQ`wQ`_+GP|^zbeR8>fo4EJSEyv zk9Lo5h&JR)@v2~?rmkT@eB?hoyHqPCF6+l)!BBL*Kv#i7p_8GXxI;s@i^eQ)b+)y6 zf1ZU*CM8|K5Dy4+>E>w*h7J7gw`-qD16WD>Cdq`WrOJYwkd=~@kXJrV1Nj_xVTA0{JtQ* zG{h|)Sk*^k!*>BFN^&}&Li0qM!}Hh%#bZNK@;Ra8lt#|-n40<$L77_Jjd2efnZJ6S zexR&4>&O5nr?%)`Yw=`b;3m)B$*S1xeL&{HJi;1BooH8)?P7wiGBC9ZL)Pafm!&vX zBurd_GS0bJQBN{gJlCZeRTU3v2hL& z=&Mir5}(bXc^<#wPC@35a?FCp|5&J{yc4}k*E(w?cu6p&XZ?fAMXbLrq)Q_U;0X6& z*oWnO{eV<)qEps1zP#Q0YZ+oMWIQ990$e<3Jz#%*8D1R3*<%wW77DfSw3NEIT=e$> zTsw+`u z!d#B#%vOxvFjXimDYFeO9S3g?>CbW$JG(ylRe_d=cKT;I_S+&)Rb&dlGKu4~Kr^wf zN=yqj*AD(r4XUsOsyK&5K`t;WsVuHVCB_6>f)-XehJ1!-e{wl`33H?XxP!?DRc7}s zq1;9NO{71Rw$KPfEdAC-7Um@^}of$n7K+Xlq^sNG^1?vvw0S7+6v%g(DNrXWu~f86wn)B`E92KfnDRL0LcqFh zA+lyF(7B>Z0KIi7p6D;@8G%dF9NT5;IAh#I3rf*-Qoa-|qFnSwmXm`D<(4pilRl&a zgj;@$RGDa%xR#wzIG+|oqP*x&3!yBSv-{Hm>@5M(BN`iHv+e!d*vw56dZq*`lPm6= zjA~8#)3F(a*@|VtvK>h2l5ne+BFmhYBL#wTAWoZqoHVpda@u7q-4W6qD7$f#PWH8( z04W;U3f^ESbAQSJniGTD>Y@0n0Qf+Ke$ViRV~>Ld z?6^lQrxjcVNN6ho76^RExeSbEIB`!fk6gV-S#~Tlp)N!n>%_4*P09RfLCqzvzpo0T zS_|qH$K!hD1(eoaJfu07mi;(==6Ui=7m*xHtswuMO2Vpm~{6n)|#0WLo`-lj5W z7XOi81|s?$K@2Op-Piu9yX?&m<{QyfIRnY8AA1UvM0R$@r?cl7)Ao%^^z5qO?S~5Z zssPuRjZsXC5{X!Q3Mgfi8q55*kx1lYyvhzeF|~%`79sxVB1R$f3WBWtN}$=I8&}2n z_?6n>tDM(hw)lepOLH$~Qq98epI(C&$Xkm>+d79D z(DY<~*)@k4gVHr0hY8~Qtyq2sVpNnHkKUmB+)`FA2=+RHl|8;&jL@>&ye{0ut?ffc zaR#7E$GzT3@BOVHXWKBjvCoL`nqa=2S|xU$Sxba6q9CwI{X0=GT{#_TV!eJ&o-C~{ zJYL4Rg3WqGPEZ>Qap8O)y2NnRto$R`CYcYD6*IxST}Q2mpp6=~S_v%-orR(CoE~GaIxPnq2%JciExxwLzNDm* zT4MAuOzbb(Zz90*b7WjPRiM8|lq$}l_r#Y4S^nlPZRQ{+qx0B7)zfPoh6#M8a+mu-Y)wC zx&R8wjd4MK69Yp1K~5fMRr#kp9b$(oZEEU$4yb+N(GwoIGd?Mr_oU8OQp~tkA?^yT zI!PY~3t&8*K4;%)an>gwT%;bh8f}@nR3NqIhOpJ@7LSRBi&|(C)mkQ?;e!m(ix-rK zu@zS0WI-;njO{b_i0=!5N379wwru}TLe>b^^Ynv@xVS7E{ue(5;AX2Y62%;y zb-Owsvxy}Z)>6c7C=L;7k;6czpz=epLKp*KV_id>q-SS@p&-SpxG~SDPN9axZw|lY z2`OxfN${8;vxXZgrI*8s7X`YKxc^9`cc}+KemN&bXW3O0AD_;NnS=H0YT>oPZCmGL zKi1(ElvkHUAQH4`rPTW;yKagKx>uG zF~}~&N1DL04|C~8G5#XJRlyickI{gB_dG!JrqG4`59l62nXDAXu_J$QK7hZ5*H}4r z5KRK6>$IYc(6c&o3G4UUvu1Hz2HffZhg+S(t{1A0KE6xzcC&WvR z2E=)y=?8tB@Q6R=FA!kJPFm+La=w^H@m$1YGZuFXa%H%w8iW1%EdYo(b{?JTK0#z^ z{y8zd)vwb<$e`52dHdhy5QcgNsqR>7F{#YqLy@s~N;KkB&pnns@o!Pt;Gkn#$UM^W z(Iuef;A*nQhK^(=(40q);gD$Tu4I(019Kv4v^gRvTW685V0yN6ElNj5vy4<8wmHX{L9rK`ZgPFzTRPo_%BH!L<}DU0A}i#`?T4 z^D)ITj}vpC8%ZCDGX+rEc3dW()wAcq_gGWn6~S)zZk?g0${p>7umB;CsmUsW%q*pm zF|ZPAauD|z_$LltERf2#7WxX~tcy17g!XtvOpjsKhFR&i*ALDTm#PtYWsk((0?nF^ zVNOlq{+S~R^@`2;cp->fKOq<6!%HFDw%SY2bioI6R90W^k}R?`NGKe|jY_NSWxQ#b z+5Oq$FN(dF*XR^cXtwBV)GPF){I^%V7vgGHM~^<1dH{5&Bf zD=CgYj@k(s9wJ0Av4ahEJ^j4Sl)jKn(BwBHJGM)mXv^`xy22g)3V z2EFFYoVUNAU({)FWaNvdi*)5NT5(8$mv1`)OhxBRl+?DZkA!w(*|{%8Fl-b`dvcPw z5~ zvsQn5N;KadCzuQeCqLEgpBZb|IGG+_7j1EB9N}h(IIjo5l7Tx#(twl6{yE{U7|Soz zDoqs$bc3nWGv>>0XQW6{b85IWfcbSW&g=zpXF@1f#yc5J;AL(r8H$bj_+2fDucjQ7 z;$s;=IUC1lnP?F}Jsv&b(a%3}V+@OgU5l5r$Jeu;U;)7wwq|^+ObYXfGN73j(5^QX+h$Ju&vVN{G>e?~JdGeb? z&|IwO=`F8_f{DK1wkOsfhIWqHZpAzk^90d0SRP<$Tq4r4O+Tb_QQ9#Mb=FXdiUBw6 zGG8Oq{gZJ=x)f_}VSKTZ{jDB&^gq#VHqX!+@Vk$xy{I`eEo$!)MmaEvR~YfeMoh}F z1yR0JBy~&{+j~T~TX9&eH;%4AyByrS%f14-&-$Za{w{XBYy#XsATMIGA7M&}jYXRs z17*9V?Ip+qLB2;v&%8TJ$_7(+zm7%^#owy6Pqe1Hi&0u)tyQ%w)2<2bR)^$Ji}tLT z-A)ojE2Vp+<91OlKli@jQHO}WEQ|{4%7NHwbu9t+U|Bf!5N7ddzrd1lKBEL?>BInS)U+SfG}FAHFtXyvAb&DQX2c4MXJj^jjA zG{RfVh+kwsPUe2vSS|6P^2mEkG&RM(%L?Pq*{>YXN}Ev|j`OQKqQ*c*lVATorp`OC zvZDIqNJj(|ID!QgL}Y=5?FDH`HoKeMWH%()U5b>K;#SLnXz7 z)P_8{11)2FRWARN}vt;P; zhO=n+qj|FB+y=?mO!P2esTHNcd_8-KvAdz<1P>iT0yx!)?P5Ige-M<9vrJ}adT*MI z#~ljl5@BPcD6BC>5d3b%G_%Zw^uNn!GF~R*j+uw?yK7#B%~x+6D8MpHt*Cp^Aqx)| z?uM(E^Y$_Mi;Xgvj9&}LXQ^>_4?)>`>Yzh$i4b!w z1*l?NiD&W`7Y?-dX;QlRM3TAa%&)%kkTLU4!7f@8^NoU^-z&%&E-7dxP7NhI& zLd>%HSSOFfTUipMkM2X<&zSJzN~WlA(>D0*p8&Z(5g2z$JS*DFVaG0i&@Fxn;<_}- zi&fqA(3AZZTQ0opP(X>cC=-^6b4j^{9Vd_iW%GP{d>|lWv*QMHY2GPLL?MwyrsYQk zz@~}V0==MTXUoE)w~GQ1GW^s^E_3NK>4LX+bBqfOT3Lp-#p*wE&g^2fnk`1P0IJGT za;U^v^xs6gIL)?HAB^*V4o3AW*Z0Ocr-Qg5IBn$grYFV;^7;qOBI6zcX?xr*{o(?n z1Cz3h<-!R9&C){qQr5BY!z@qMD>(P1G4o78G`_CZihDC+aqWVZ_>Bm+D@xMALcCUa zdzceS1saHXXOPB{xv*jjiesi=PahmJ%TiVoV||XIC}P!xU*C{X7rja97rOpnG$`p< zyRsC;#}~hPffThy2RToPpx-tpAJO=`NLp`EOKo(W$#XVTV|y26(dGkj!F3+|vJkSD zF^rrplkLPO0^KaQ&LG{7y)dl*gbTmta{*lP+`a*~0uqPLwmx1}RnJu`C02CmLUL zRPwAKK9EAheeoO!s?GJ6NS&e@?IHvwRn;}e(RyaSCHxJu+0CNhCcUAluq@U%7jmp~ z;*4^zXquTrFQz1^^%KWV7EB(v$TAK6OqAKoDv@p;>z!9Yl5!81aUo-Zp=A^iG`P~_ z{7SmzIudR-;&jQZLYrlLoR!Wz=GeU45u029;Bxlkoe>9$HUrD8DC66(R& z(R?A#E+Ut&A(nBx5VsQA64vO&I4Ms=2tmz_p9q1i#9^BomtItPL#A39AfFaQG3wf> zHQL7)M7x}QY>3+{C(ZX1d7=BZjrE0&;N) ziO4!YLHK41bi1K$TN<~C_D2eBnpfiE92qF4vFj^WlZHZ1C&YB0p0jo@aFOmAqAA4? ztI&a%{0k^o38ywj?pJaEwKMkX2VO%87phJx4H?qJ@dBw=AEDObRuO!ysjh`uGv?Rgg1FzDS(z;jk*uHA3~P0E`o^2N`Q~?ywo}p*zPUL<@fchIUDrv*^6=hB^W768c6+j&) z5QHgKiEz%n_zE#zRJH=q!Cem|>3|Ke}NWjIswsz(PjBRG3j(iOSZucJe{o$Cd<tTDpS*x5WaupbZN5z}z|# z*WC_iHgk5|F~rpdeMyhB?2bxm8Eff+vY|L!pqZk{lv}r@DNU#=%WTS_Qh&UrgvdD! z0B`5r3FYFl)Tc?}Fd<}=<_nIonU)cj|K#ZkC~nXUef}=L$rP5Huuu&Np}>x5#CWO0 zEh!~-)(>Mcr0~f@u`q**+%J-?8(IXp8}%w8fW+rYVKjX@kfh)3dN;H=2TxcNwhM5z zvBW}!%NlPmE)eQkRdW#mt@(-&I@%0^qQ)loR9+fAoBqIH#+mo`gu)m!HEcus?}c(N zVv%J}9IA4IU`C9IQzpkBvgfHgPK&?m*#(n|sSq372jJRBAIlIo9OH$V4?6mm?s9`) z0nr`MAV@tOA}K=?F0+JB5#><>^N^NBDX3C zfD!COaOS>Al-rXF2s=7P;!h7$5Q34*2Xo6`gJ>wdbJfl92N5*Wq6Kr}Yro-{I&))5 zse>c42c52E1f#FxMTq_M`^$!7_lF=Um&^?brQH*=1^B}{T{Nrc=#YBt36zKi#z+|FEm34<*BNh75~njC+|2tW_zAKQbV-D9DyF3QLrzTCF8t3cTLM*FvX}V=4@`7M@nkCE) zR8}4qe-&y0T8@63g-2hk{WzG%AaoTlx-;7-?W-SL&JsFFdg56^U<}^I93c(G;BTQ3 zB?bV^IBMnR1^Fcm&j-rr_g7Vr+7)P3hhni1m!eydCnQei=BaAR!*ZFqREYV;;t^%W zhXO3vEi8n-^E*hGFmvjJn677jB6yozrx%FytG%TaaHCHE1-yYYbo5F3{#I2M9ENnun3A5Mbgq8 z81E6WLX^u{Gf-HG8Sj$-&XNOfHsx`N5GPrdR5~3`0U!u?Lg30O0v@1HjhpoBf^(Fh zQzNFK*-r!VJ=PGcB5oJu@--+k^u{j>qDdto3S#pw(Jp~pw5j_h&w#iD3vs|A?vMZp zdQ)@!Dtn%Spg*o>awi@dXBTN3y87=c?@AB0EY1<&>|0?E7c2fIM`jHV=nmPx386^o z0z;GpOa1^&9hS&wJ-+xIdl}!EVMie;qU1Ju{cSz`BU32MawS)xJR-D z)alDX!|W(0{27={#lAa5iykvc=5CodQVwhY@UKhzo0|k#W)(19 z_XuM>iK}9k0YmGUB9^1-H-u8^+5%PhR0O3&i^&NwTzU6-|B3+_H}d=if^RWV4tMAP zl8t~Mi_;?FMy3OV*!%@RN{aBXzrp-NEgdfirGaoW;ll8wUNRoITG!mV$zESs4cE63`^4ln4NoL!+M-Z z=S;Y%WJsu^TbG?a~>#<5V>b0N=gUEmX8oaNY5_WBHJBo@K67` z0N>`V^z2`=cqM1N24WV~5G_giFFMtOLR}n5EHRj^kSjH$ zy)K%HvHFF(ABdvXoaF01_|;yA^ye@g#AqsQNH!O00ZkSN+HCw(7)9kW38aE)H69V- zLV5Xxg-gtT!>`Kiii1&&qj8il_)*(X7Z+yFQzlG_OS9*?n)z{A_8tw4x3iT*`@G3} z5_PJ~&|8ev{$+rmjVRg3xmti#Tvb^}rflLpLBF_g_+VzSTh0{d+KvqjgcLtlC@kR< zXj$UY|3JF(`mok(@u@I)$e7KfaNt`|u)=5EI^ypWLJfyGC%RA1{s?=;F~_xEW$(~S zIcUdHA=Y`DbQT=O1-T%_=LPxGEUO4wLe~HOud|*uetLZF9iEZ-m8g{?M0lWW#GbSq zUlZWBYwQO=*t-9H7VE%Ne<13GS#HvuznOjHVS-&AuG&r#w!yoVw=v~KbIY=$Fpq_2zaWUH?w4`p*~bIZVe-?3&yKUl(pJlyLpb+#mm)zdd&rz3G5Y zoq2N+oj6-okroMx!(zT}wS{BlM6FK=bdv>pOu^gw(v|{9l*T{eS%ang_UuWK&r;Y%LB5+*THxcF@9}m*MZSH z#mvFK5zI|WlPQe*c; z)P=RXoZx;Z$ijgDYlkksj(9>ab)PzkOQAmRFSRPjPkR(ZniazX>_Uf_jyW}2AJnzT zY(R>!tuV?g8@99u-cEqZ%wYzm6gy_WGt4yWQjNGmfLn^Y`i8h#4)A|D$jHso-V+J#Ldw*B-O&4mEw%akLgN5 z7BsBhb&mdGmJ^M!r?jB)QJy3n{gq?wjs245ER>)eCwy0kb8bjsI2nKh!L(U*O|>;G zF+L+%>PhCOh|I*L2g4D@w$3OCWbEtUHXQ;xZvy2SG&)|wNSq+d-)Y89j4m2i34)XO zAjG43CIgOk2u<*kD1RfnvV>jED*`PdjDsuy(h5JJW9ze;vRD?kEw`;-HK!uAo&>b_|Pl|CUSon8N!V!F*?KRa0!S1PwbBMCBLi&&|?kh#&kn{W+urpN#3zAlhXd?(az9L7N1*Y3c|PLt6v#M?lb zk%PE$_r$lp>lbvSXaBtIgt%E~&Q@8yNR_yYV0H8XRQ7{WBa)BSH7<-fdNzM)M*Qy8 z)8`4K`p6|K1SY=rxHEfaR&7;$sAo$c`)PWWWH9#G7SzSZdq@KQUm<20ha$R9VmLk# z=I^1(LkFp2k=4cln%xU^3sD;WL}3Qcw&ahzysW*d*F+YS;I z$)|#%5teOx02itbLs-^<oFYNni3}>le3gbBABgb)WQzJuria^zbTx+idJ4}HPGU#LIFopLx!5k6xAkZV@55C2plE+>AXSmUrWnJ3II>Rdu9fqFsa>>Sn??P!LG z0LYb~PU+ZW_;wShPLO(ubTH8)N~~rugDba-M2IF$of>;g;pqtVXTm9ILqk>EtY?3|pEG0$`|4BtGkqf-)w5aHjN2@lzs}fuDwrjf z8%%IxXyZ&jf$C8@1V_`YG|pCQGztIe(<)G-ydG(cbp#r#W$&Xft8B4~Ca0jaGG63Wo{u%-L@S z&#uuNbmn*^F+05|kOcLF&-qx-Zjn|Fa$?(=(Ee;A-U-P6b3)80wypFE6x}=Q45n^_ zGc2ozO}|%3(pi+si_+dCjE_`f)3nwuPEN1~so58agoRk$&@*61d-Mo$Ggl9zJn{PY zkt`wenshjkM3^RicbX=~a)7{RLLwak&6kMqkxGN!t6h&RB}Z1S7vJ+c`??YnIJGki`cKB0uUgCzHz1ic+6pwPTYPY686#2`p`Ag1-4%R;yR4Q z!qC*4V3!$v04f|+B!Od-SrKQN|*b|BN zhq!g9M{SZIzt&ht&J~5IS2CBK?XoX?AbgNe*n&cqRRHGm-~UW*WrFl@9<@?XmK!3E z0VFn=!YmBMK6Zyloo0-GXW20sEiZJ!_9GTMe`Ta9x17$mkqXB;oo!YHF5yL~;JCtakKxYoM(b?!q_`b~S0Q;Wg) z=Dv`Y@%eoyz9)!=!Gqjtj5(8Ag}SAh+$I?A?FUB2bq$R%V}G709EOCn#ZcTPnzGYeTN9$< zf&(D4F36@*CL1^#2{eBzhlS$r+6RKW1X?p-e|;_ih8Kj9F)YA@0YN&WV1Tk?pT=VAb^$bNEjr@3 zQ-rfvpr{5KOnxoQ)obDy#iyPLw4I66ty0)ou2v~^CgEc)O00MPnP-epvP)$EnAl2E z_dt$=74$Eb2r_qOl?Is;k}>)|c|tUix-={95M+k2(t)pAwgJGJA^dBpY%CS+nlQGo zMqz&WX_kh%56Q&o6w$PC&D@3Ym3F_+Z@6Vlj4edF!*OC>z}~bh#Kk3`5yQ?%oGJ)G zfct4zUW7DL+K3;Qq9QS@55>v)!R@y7L@Zp+5$(e1FrL5^1MzSN7=@`~xAn1}UAubX zY^{uKIx8}JM7N*xd=+^g>}sAg5;wu zbq#p(R^F^dS&6aLb|nxOv$;sfY)w&r6^6)RaZG#FF0G`X36l)OkTA;&e8VD5x{uog zj#@BiPUL3&V0^pA({P+4+VWDRK$>ixMg@^?GS2>E{yU}>Fsc+kK8WAV zrJ8hj+ojDRt@@IF$XcSS$RQZ>OTx0Awu1Ou>Vm95J_C<=25BNsu;qeXk+y-=1LO&B z_?1=O>lroJBQN5&MOgrwain9ZJP3fG^j3AnGCiARi*$S{y=g|5j4q?;@Qb61(Al3XL&8EF8cq*b*IY;($MyRAlC$!0NNJ2uK3ajpc{cro{nqq zhR_6lH*sS|{08H-JjukFlL@)9k|glFw=mMm%^jj( zYHdqZ?5bzKEO!hP8S!`y?K65OMeB;^1W|XI5Hkw+{H22dkSNTwx?}5%SatWRyW)Ey z$Pi0hEhDudY8J%X=#3ou^C$skZjG+PEXOO^TXdM_e%W7+#FdATNM#=3v@C8EmCb9k z7`kff72E$0+~rril73cLBZmS}y#@ka_Z`MF;?b@EINSjsk`FUX;%%t1{ac77ZmtwL z(R;W7tVL~yu~NWTe{W@TjP|GZ2Vfou@m4|S^` zN>`6h&|CS>i|ODp0{_wx{7iYO(DBd%v1AtD?%gK`n%SZ)J8TV5o3wJwRVUEjslob< z_*AjKAlS$D1?&8$@&xI!X~xinN0Pv$UBtm4xwHy#P1(%ITxKK=JgSnamq15sj*b!H zR3-eob-6&?CCuOF;Efm>3>|_bGVDe$_F%Vkbmcv{^I?zF9sA^%RW``(jAJT!v35w+ z+woW+*BOl%W3$GlTLilN1g4QC{x^>Up|OeM#BdsYBAtNHJ{(iTNkSwURgH1@@qV7p zF^q58J;!x9EZ1l*`DBmNY1rjTw`CHJj9W>R)*#v~qPNPo z@oj--W}lWih4?5(8Ve&thKO(cxKfxZg|UrY1Y|9sYq|#GRUz<&HNu3M<6@VeR1!30 zwn;)rK%r5K#BYD<1f2TPflwvJ=T8Q4jae-Wbi{Z8$YZ|u=!)$BH4d%0!O#8c#2M3LXFam>!0v$(fsxPv1pz2+b0FX ztfso!7=Nzc8Yp6IWw)I(1-Y=z?6C`RnE-!IeHUK=`tFC>iwf`AgFy=Y>Z0@fBFCnh zcY5NU^8sATHhL;5!DocHkTMHFl@R|CMB~sG*$u?I`8(Api18Wv*##tUr`bn#GwA=~ zC1F_^S7HkguL|&|B&STvKe({+8lGG@6jA82gz+t1m8FHeNXFw;*RjS}_o7M;RaBDk zcb*WXMtQ}oeyhl={||Hd=-r}SF1$O?I55opC(9&R_atM4-(KuYFq@PY;{}&MQchi; zD&56U{7Z@S4cAYeC`oU|O zMjM~FKTgsME^RYb+uf|uqb!UXwaP%0cTW=S?{Gy>3gCE2mPATsMHZNMWhEQwSP(r7 zL4^6W7VOQ~JI7Umlyka9TL^X?O)N{4p>cHMbGu&~a5!CwH zrnq0vbUqpR^~D;$fZ~(M4TZUb<~d(b)+uxbDNlp=oxt=B4yVx948_cA{3c@oZhE?c zHl|+-WY*AY>sD@@y}G6blxja@a?d+p-4c55>;~ zSZ3U&{rB<<1s-HyrRLiCSJ3bh zdlZE*8;ny%0bE+Dj)pGI$dQ5K=w;Uq$eW?aV7DF5Vrh;O)lM2sPWK*FQGPT-BbO~_5 zwE$<+n3gVQWa+p)2djD5b;X0EqbaoQWw-jg$gDqPQAJo9{8Okohyp%oCRck1$n{#T zaNO;&mJo6pET;en+vf?Gr*yeqjS$L+`ia?q+55sgi5AlI-A{!e38?v){w{tP%F|W~ zMOq%}8oivbDBM%Y4}4r}2Ss}vj372oO?O*H{$`9_5)&+f0IiZ$AM} z&jkmAUD)-XBFJCk5V5PwNY02$-e>9uNh|&&l8iWY&q*-bzj-aJ^OWeJN2McqF_2%^?3BY~&#pLKpjFWUHde$IMFDGg7> zxL%km=b*^N_(%|+K?fr(!1%vZ-m2-++L&$jLSgE&c+d5eiB7$L1~62yfM&h+wHHW1 zQ3gs}aP$okmWw&8HDhk+$hj8f{85Oyi0)JB!)vzx)SRJVXk~_^0MOwhnFgu(Hj*(oZ<5u z0$q-|iba4&s1Rq*4jAzg?iGRx?S%Y|fuDf1iV++Pc#7!!#E`M-{8%Z>rC-8639rA`09=2c zwMNJ5Ak-g&Hq<7`{_HYAu2CI^q4A?PoQrP2NJrLp39zu}m>iz5u6kIY8A;#_ZguR7 zPXxip1>8<^=bO$1;g&-n<^5MBJu=uX6N}vhjm_!V89F=lYSzo#6> zi8K>yWw;Ti=Kw((Dd3j>sbtDwCoG9mgz!}@Y8Z4L6zMExr}fn)Zy8YRlH`qb<=cKi z-^69<*8f7f$}*3UT@C!);++aQ$VIPGfFRSKLDRmWl;T5grL=AiMQ>*hA1#91u5-Do zEYAH5U2biSKmPACudsh&`g>mxBM?5>z0ve8gn36u7ge()#2=}`JWvu=&J(eklUL%6 zLXd)KOlBLt$Fuu_ZoEkIpvL&`3U@}VLDTv64T8*qG3y$}fhUBz)j7nFati%N0-ftJ z9a-rxNv4n=RMsXdE`=|oW(S5!tid6#Lahqk*EW6*& zB*YdI4YA(7N^jGC3>E{j$@sl!SC6gOAVyuW|Hn?UysNDK90!Yb5;S>?h=eE-q|L}u z=?ShE<$^CQkLc|F&FpugSh4Rp=Tm;C@rctE8?V+lHp0}EITO}t+-}0m&xsSXTsbCB zG@n{)Mu<~{nHM_Z)gF5wt`_Jo&mt@oTAbVSSJ^dijLT-?gU^wQIun9g8|pZ1b!aLy zc4W)M^0A;wU!g}OQ`jkM7}co8{(!Mwde?V^`lJ2B?6`*FD{F!v;T*Ge#=#<7P^M{x zBBU*z5C{tg z0J#`d(jpJWwL<)&oQx6Tk+@!%dAoGDge}Q!0$e<;E7D*Xcja$#XvcWO9Q3R(+PH?b z7t`^&P!zh@#uf>Ubw>7}7or`8M8aXt$I&KYMzS8X%O-nFj4Kv@9wci%g|*TKBvuGC z*O}3}u-w(bMnbcsIz4A}#LPrVog-%DG8=nX2uzpeC=Ob`l0BX`$d&F=9uP>Gv8yHL zI8h{p)K*WYVP|DDaeGqC)pJCf$1EktrGzhG1?nX-9=@RQ?Jqb}^z@3Dv6o1fs|K?! z!p+|$#2m*1nX3OL2Uv7*&i|(@3p9e=wC#=?@PfpX2x&U~Y!nUSv2-FN!j>EPS9TXD z$U380w7J6#E=)Eu8vf*qfbgPIR*4h81Yt3tG1^mHqGyHqtrQ(EognpY4Cv)vZjd6* z{82x^xVphYiQC(s73mD6*U?p=@qCV-eKIx`Sk=S~&ud_iUgR(!m7wSa6n zf>M6WDgQ~q{={OWlC^1TJSPmkFrCq{M&iXhuNFGi@;H1`5>QM8Rywg`L^_Xje}*=} z4+(RTFji$cFUHe?=!%nfoDq8j&u%?*7ShH(EyOQ0;q<}H=&|z4KrTm17dqafM4N3g zdXZK#UEmi#*AKY>b+c4BT@dxJ#tb4|`*fxNjhriFjd86gcK}&@M1Mz1e@DDF$%($KxMS5&$lj>{TuR|w^NNz=2 zigW>)c%{2k4?0()EwwkXJ$4bZqhsKnj5$ zBX`&DB+B_IgK6WUD8x16vT6Dj*2IFmywD+nTN??uMY!|lwiGPAG>LsAkZQEs7if=f zLQ+eHOj#hs|A}@(^rwcnAWJ|`;SO42xqXWSE|K;Z2>s$5;V@?zlf^b$lE%DQz}_9k zC?3x$p?wS0T1uamz0cDE-JHpSRwKfngi%T~!P3LWnp-(5tCPg|=Y;sJ z77V|!x#P<0Z-coQOms}JETKyAq);>-2%v`EeDrZ_~TYmON9TZ|D7SEyhM zAq>e|xM)sXu4i*5MYYp~V?*(RQ0m=>;|T^`+7$nETOij{w#$RD)i|d~`+%g^iEcr} zK-jY;4%ed_l(jYkC(a1*jzA_5T;H%{+P;!Pb{qtnV_z!SbQVaiUE z;*=erDVfaR80-`^{Ot@P$Xj`Z0)rG|qw%CNCzc@HEGM~@NmwSX(*F@nTZqk!=`9?z zkA>`u$@%*YtRE>}d;DidQd2bCWHj;j#oGe1NromIvj+a*PQY~A)*_0V`1}MYH__}x z3t~HwRA)I$5HqAh~`ELbohiJuE{IjcJ8E^H2RWG+fy#_p$cWN||>O@1~*iYriq zrY+by6UoOs;PjU{oa6S7CxOxO3s8P;IN6^@kkrH)#HR`}vl|td>i?!xvf!>PdEF5w zSD0(9M&k+zBnGXFV3)d|&(X(SZ?4CtOj2o&_aQKW`DdQno>@jIw`i^z48K4zq&C)V%Xj#2iYVNY(xfH*9de8 z=1?8v{5Bz;s}LYGyU7@4B>pPg73JKHy3==G6y_>1J?rk*csYldeNs@N$MztV+0h`o zDhu@|RiGB8#T>Qt#IDr~sjl}`qdvKN@s#d?SIWE()OPtPOq_l}2>t-RHo_L=x{fl#n)%1#UY?SwgRjv;hXv9Ay}13E((&bZze?b*9XvE%$QXS^@W zmBRCi795?I1Ned$<6!~LeUThO24mcO5RYQq;mXwSAo(xnFvfE2C>hdu^WpWRrdllS zhp;kGcRW!LoT_QAi!V3$dD4U_vEc%q&2_E~Anp)V`Ba!c-?W_C72=gf5c5g*K1eCL za8IDI4yl+|+6MlysgjO;4L<8b@fShPY;TkiU4{5oGYGS42gWRMlZZ^(xw4ATasvD* zCTaNC9$#sxKsY9+t@KucD0bS)X`p7QwaX$feZi@ocDY#Me;*0~nX<#wn6GCSs9Gy! zgtlG~-{vxV?%-;V3q`wTtT(g{xKD^%vITuQMszu_x0dYDwx9E00H~i+-4fS`b_3zW z%osbw`C;s^Kd6gaWyOiy*9Ky$@Jt4YuPe`_8bRcvt+RbBLm zGzSD@AN+GfrliQOEdJl4YwkIY(qgJytNy)W57P zwrb;<21Id#{$!jeeyQdhcVhNji!*wu9X#vd=xEvmJ z$ZRm9|Ep~fnBNF>3)HH~*{wb#$QjY*9i>>e0{~W99VO-#g;1_Z6Q;+}dM1ZK26J@L z8kk-YWFDof4btqpdMBW>rNa{-v|Rw%>i#!;7-*qGg-~KvBsz0GRU{2Y5Ll3JqA8VHSM-*b6oLrpQ4M|NpiAO3)-M<&w#hIfZVe;(i z5avc^`&ewlY7M^e=HE1@9`qjHNrR1mz&T+1{__kmD4rao!&>NB#6$jkit zql{!Lq66^PMY=fLT*0Y1VS^Ty!CdQp8nusGWiMxIlwh|~BXK|(2J8AsNUvXtyKwAy zP$eg-pddU0Fn(TBuJq3|x*I&yaUQ_<>J z*wdb?XTGh|sklgli$>TRjZweIUR_?)CW8QL1EjDV&qYtp%r3yJ>?=Qb1}eSSauA5p zYKc&p*!LI6H)k!ZiOcouOt7S9O8ry-+|y`Y=;Z_i2LEixFI1yz{iU8=E$%dtk*u0~ zyJ+40RqFRShI$GHC@M0!aOuaE850B(3NXybymDZKh;Cx}(>yIuYv9>joHHY6Q4= z(ubs+-xo&Cv$atqB7dgCuhjli1aw2V|$Kkk}kFXFy}$@D=sXSD+0^{d44g-C|z zI9i|!50^1$O@O!lXmPt`xMA<`pDLxfqjMfY94N~E#X@Ps=DPZLQO~YplZ+vlDPI)? zqiWgc#WqJk@GaeomiF8W1h`VDOvtsTM>EErf-P!o3d8-ZXjm~gf{R33Y;Ytbd2+m3 zim!^WRB*ydwPM>GnWs~jzL+A!`7OqhCFY6tXK_+t9Wj*7^7c6j*sMTz#`JOM(UrWb zwC*j&j>mwwhI29gQDJ9iP;X)MycFfIFB$|ne|5TaeIRr9LcuNu!T<5G%jjk36?HRe z;aGmRpfN0A3dHbrvM{RNQDCJ#5~Fz%tcax(8i|R=k;Lu7TAY*Wqz(T>sCcG}wua)O z6Do<&U@_zMqBw|O2(?7ik}w5Uj+ajaG#g|bTZs1rxYE7Ef>!X#b$?jNjs;$FhGdPm zwNSsQiKRWNKLgOATBAb%(lT-Cj5t`&{&+HQ)Cwlf70C3?tiTrWz3hFiL9xYW=_GzO z3)u^FN21MX6gDgobcOj~+^A&cBym-}j(A5Hg=;N#r5#9o{zm{vj<(po+G2wYp))y# zeC#iT;;dw-MbjE$2dfU3)UzT zK_z%WSk^6df=uAF0fWw`vA(vdxihv!|ANx^;_()rJ85Mw!M%{R?~{P9Fqm5o`CjvDDD>NYLw*2QvEY%F=6`5n5Ad4 zjYB$0AD0Mme)I8Blt%2bJkcD2{9%pyst^~ham*2DB;FJ1cI8Me3mtsvX@Fugx5TlZ z-0J6$F3h54Rt|@W=5sZy5KyT}qD~hCt5QgjA~HYsbZ3ENYfwE|Bz&qb40m?L1KE$v z*?M`#8O~3u1J-6*X7>v+>t~k~CA)YQfOlp(t-iS%|GXwZ+A41*}7YoQ_MQUdSaJVJMmM{U+6-HwJb4X`)H>9wpc#8|7 z7&^2gBo%|t<>!LHG;}fy%7h0Koj61|?{4EcD2F(@e5^@tq`*O5%b%Ojk*fOSZX~=Wp z90&z^?0y01khG~gPLA1nc43JfdeliH-3x85ysAbtQwj1lm-8`MjVk0|b%+Y?LP&ED<1x zF`+Ss7NaGSX+xhNiivJn`@XGy;hY;$n;^Ez`4U0SS({*PY8ZD7h}oihQnd>!Mq3WD z%qL16<%%1{D8nQsbd#SpaI@bsM}r}+u>NHWad&0BdK<=FXs>!?BEX$ zn+3Ua#6TD%AaLWgAX!Ew?U2tCK$|wRV;qVvURQZx5tbYDlLjGv0Sh*g)Lai>i7sLX zo79kT1DH!gShIA`=w^bLA6MP_yC1~)`R}a+?4qu5X%02AsF0=d6*(lME15~$Cdk~7 z8l9b}ZXy20ji8jt;h|`l*92s9EpZ$*3lbzeOT}n~u@~Y!`6g1ia)+jLm1-9lqvmN+@Ggmbin3?`n;H91-7M_^A5%8zvMkPL^Ul}ioFE5Xqfj7b*Tui z3v)xT@@5l16z>W`hQ0NP@81eZ_0(PQ;Jpnh%QV>w?Igf;tCgUlB2n!4m?hjFn^R=Q zlp5v$Vg48qSkom0-C2Ig6oVyN>~=e8+<^0C$jH5+qLJ-~aE~i1#I50%?))HW;s>dBbbTQb@*LI`rr6uVAsW(S(9XW0BbNN5j-BHqVhJ1!e7sZwPVbH3ZM;j_Cs6g$|(O3K8y<+T}^JnU>%yegmEf z5vQBU42$XVV8QSi$1TmCFNmbJW>HsM{2(-qIb+(i_}4=`yAI7d<6@^9p9sqa(+=@k zSGcbJFd*fTGBef}L5|3KBf7#^Dw-T8O`8&HKf*KP%cLD8aGQzZH9R}H)j|Zxs#+T3 z)9eSukBO;|@`Lm1RCJ9cqR9jE7Y59IvtMc5(K`vuzhCw%J_d07q3qY?Xz)10jvXGW z*8G z7Q&|nY|Pj5J16ipBU*`_F2n__CX^vI=q!966KIQ?rlQV&W#yX&vGj|(G+38x;h$Dw z_=2(=-vUOD&p!b`>r9+JEq2$l(_^x$<#=rLBoIa7ib=&t{p2Y~*S?n9%u`dW_cR#I z=1cp|5k-E@%{X=Fnfy4TWDHJ4TJ0G_$WR?#@^UQRLXhW*DyG7c&I)>BnowAUcRSZ% zh{!r2(+eYyfK=+~aoq2na&c`_bDSl@RcOLaMFu9X2r|=Ha!8L(_H+LTNRFIlb)->r zZvlJ{58hkjFcFr_4#mwqUbM4F=FLb#Uz{$`-E&boPA2k`X1N=LyImL1?@>74mLbgj z*ve>De@2MwEdMSm!DHT^DsMJ$F(dQG6+&E)?!IzsT$iOlY1mU9(G0!Sv!rl^8NyiH zbj6;6cxgF9sQN~SXqZ)BRULO`&l4F1PWyA^Rd#hrsraTKw=Zf`hAmk2!{_|^l<^bd z1wFeVs?rfr;rR+MFRiwo5CE$jW(oFK%-u4+vaf2{KYkiSi- zy@A_MnA&;Lcj8NWHZQPcE60yTJB#_{UX~t1amZgQSrlZ=&ZT|vf5IrTb?DsRa)AK9 z$4)g(tRH1*kV?v@tLk6$O90&H$XHSLQst%o!4ay~7V`vIDA=B04I{JPvOt7_Gk(ms zb;|}K!7k%R!l_7I!@_tf%d6avev4U-R&0LRukk%a2Uhq z1IXsL_&3sIor4}-j-Lsza8WQteAQCqfjk+3I|qj2VF6>)3+hl*OB_OS^V8oe`L&nZ znaQ3Iz!*T`8rm?Ydj-<<&Uz~^-7U}s#TsO&j7_%?zqR$mDY4e8kXb1Qq>i1M1AWBH z#wAyP^Id{}V^UK@^FJ#2Hp}~bBszqUk?hH&%#Jq%Se5pfV*<7;xBinKXz1+j0FAKS zuleQ58PzEnQ0iN%T{(LS0b`=Na+qV~>Nh|vmrDzntK&;!%wu8(xPs||M8~T&dj>J+=V_aws;rNY)2!)*tM4c z%D|!sUuW60oG1jwH*v`N^n3mlHq0sv5c@FxB?LaSD|!Np?|+NV_Y>oB5*z3(jg zh;=$Hd!EK^U(-ME%bbs6WKxXfqTQ~9f;7N$O{A{+Pn-?hL@&VsL0N#4 zl=_xo(I4jsA)nS_MHKH5L_w=;rdu3658H)^PeJL2Xm+?ss!tSLn~Fo+!FtY^r0ATz zaclPL^yw2~$JO>sAK+A*rDhV80|c2nDbgP|^hSp;_eQSynGk=^UZd!>NUAhIALXwp zDncKdIAfjxI7r9RSQTexOKB}7Y+My%gSK1ab9{r*tC77Es;SLHx){1rg|_UEDq*e* zMnddx;wM5Z6Azw2KZ>&inDayjP1~{!R|j(8SJJF;qG(oP3kYr;mx*wC)MvxGuKH?0 z{xVY{^DJZM|AfKOi8H6g`fF5vw?wcocxz2CttO@X`B(%sA;7Mz4u;pNq;I89bfZ*> zE7k^cO>iWUp5WI)_(E&BEoQFEbJi_6pBsu10Zu;0Yp&yjxZKMyEt4|ul{^tgNd&%# zw{t}9myB#{tyjs2kPn=9M~9Hgh{AkMe;AJAgd$HJI9d$G19=)|R#M1DKi2*{X((}Z zRZVQJXY*|9i4&|9xLX)(n4PQuvmS{;fBLMdXg zjnSqM_hfG()-a+yZG7W7fv#HGd$W0rzvKyFM~Zqn6n_;+BQe%1Fd8eTSADlGgi2$o^l;}+Y{iW(cS6^4TBOVc?Ot|zR zap7B=0r=C+Sf-)fyg`WNpqWM*l!+w4t2eKpDUc}pUMy%H6q*fF!>u@8X-fF!R{;42 zS5Y%Xp&LCk0}vWT(ZMP{1LRYZntk1`@`_8!HW`I{Pkcv^S%;W$zQnewAdH-wJ2(JR zq`AZM_i%XUo>sI2f?T1!N`-Q7DSn;jr09;Lm(w%HeT`HI5k;zRnH779^!N7}cD(4m zECtGE=Cn9cq*o*?4wCJrcA8J;FLM5<-9YT}by8}e)37Zvse^xLvhh&n{1gF3lN;CNOf}y=$vV zCQRSMD`V$xR{$)hsmIL%U@{R!SxLw4TSMw2Qzu7b-0hq6@$ecLN2&Ro%iXgO? z3h}H&WSoweydAU)*vt`rvO2m|m{~!yq4ieCm$*%+n}dtrbW{?*7DSI>yy%Jlo= z(O^*%V1HIqsO~$9hlIKKbC~3j)Q5%8U?|kJ7dUY|B%kB! z?XhA<2p5tS9>1qi@xmO%0h=93vOKv}s9TYq(TigkhtLUh5m*Z`p3_{nDIpz>{>AP` zb~ggdu3GIWM|XmT2@0Kz>R%yYKhB_z0)dd_RMj566Pyhrx-<`kxIlovwG?YzZh#}q zXO7`~j)wt?&$on`1%zoRW8=U6M378o>+0eJkuKo;k^&j6I|)GZUy3?MEa%D4@CdEc zvY0akA{+a(>c}oxzg4hXvJrOxj4R?rL42mWyd0me_}WyH@L|%<6Jtw}yny8es!d#5 zuwWD9k`6OJGCZ~jp(LnWb>l}AvX{t%PK}0uIB8iDppHVRH=dg2e4z@eNA zaLAR$o&};^R)wvRR;pW&KfQ#J6PNc5W`I~0a3n|_g1y-ZLfz_pSTZXTerMbxkcy(j z9qt*7RU+NTh)dh8K1*k!lm1RW_#<3uh{;*}bS8*JfqA?Xw~Kbk@TBUN{loi$+&WFt zLB!;p0n$uAGk%yoPh*quw4R+iv&l-9!FTOa$!!kDAn1YOZb4c3iB5@v38{TXAa7Q& zxntYMRQi$-%7x!qO7YK(u95#E^YQ<(H`ETExW2I~Z&(^Q z`&~jLNnEoO+wKP8%61ersrSWXL9T3v;@n1q09Q%@W*Nbc7Xq{UyI79k0G5$OT>RKj zpDfbSNnf?O2xI)If?Ybwn=G~CK4Fvv7uu0{dMq1tnZD@Vo!|JZR(7M;pcROZGlUu! z21j(<^2a@#2863Z3TP?c7n1dKDl`}yS5<&{V5ar^bl~~sZaC@+}NSEKD z8Wd>8G?z364aPY&6^M6P{`ST*g8a35Io`(kvnqLMkWj25hEsu-FlK6wp+u86ZIWFH z8Q-q0yoaKSb0%r3wx8{E)NZg1l@f*$2MU7o9UaN@;3$#skF%2r@naF@I~|i_ewHpY z)`Nmw#x{J(i}70l%#R%TAZFXnfpW8SFuW)GkA7k1NE5>)=0zwcKNUzVa1u=xZhOqF zyvPwWP0B9$QDN?|M29a9rm-pB7iwN&Plib+Hwfm?J`_^x%XRBWWigx0Cm2aL%@OQ$ zkj^+;hzmwURQBN7f4nY`3J#T5Fm@wIA7lW0lVMUJmd&eVy{K1S)3GW?YB32Tmp=xm6ntFccQ<>+O=KLQDa!so_+Q)*md*LwiZtRo)&J_o8 zqS{x8*DXa^Bcxihmt1#&-)ohBM{Fg^491SJtsHyhz+CA><0=8vV(emD*l?X7vq6EI zIhvr?-Is%dM2%@h3fg0xg`~9f%PwBdb>hr;!OpqgKLAdm_xWRHu=O2Vn`r&?*rP{7DA-pckRX2}i!Gn;YlyVg_z477aYOH+Is`JK8@)zs9Ob?X$(ziN{H?DsbHD26dAc!7_~$zQI7jW_-i=8^p@jK0vIG|-iF1E(eiyrb3w)+%;!sUWSH2ruVD5g}pWj`j21Rd2_Z+JqP{{=a3k&o3z7V9Lh?=ek zo$a{?R^DO^NjJB~e{)PKFSK_(@t!c3xmjmsD`L$8NY=&KGPbcYTVIexE9){csV_xy zD>(IMhSwFLJ( zcq~%9jQFQ$+F9DU_^*hu0fZdl#E$$MLk#qtv3aKv`=+xhitUBC^0HheaE`ozCJA)~ zbi2fZXVFx3!pz_r?8mjA-%}7WfEy;KKwES{Q9}$D*dfFtBFEAr)eN20B_Mx{Ygn>i zeX&p|KIsmsQ~i^Yf_!@+#$pgI~hOScSTaXiU|%ppSaW*LokLXc32$W4Mq{z ztxET(X4pxPnS_-K0Rv7JKpp0wmFS{&Jg5a&;EHbr}c& z!+)}@djO`2hC5AlbLO_haDOFhtcy?#6qDBYOu;TgjaJ%RJs`w&O13xTbf+NKsk+$H zsZJHU9t1|S>d22K+;9Mrj}v?&mG6Dgex<_$1kRg-V0?=hPxNwe;65RA%qmv5M-4d# zViRCbH4xt!hW6~HB_E|lkarO1!ZnN(`mkA!MnUFT?F!vxZ!JcXU{`El^m6H@f1YPJ z+^cZpS|Gn7kP=Lq$W?&LDz9KxSs*6nsY2+ZRcLzSnB@=_KTZyb-gKb=SU?-8z*ma& z7r9D}87tQWzvPtup?g1(x9%8&P^Y^?x;!{OnSM|?A{!}G@YgM{Q~@b?e?@#-R$TJgrK6= zKt(LbHEb#q5t(srWt=V09iwX2+*$FcXp2e%_S2ZTKQ6>|X2ok$VJg#i46sX1%ta=y z3k5hQRBFrPPdPnt=A@Z%-?98o!`8OUjTiKcsPeU zOuq8=l_kkPL{r&REl)=~9PbR&C8;SZ*?WaBT;Qs-GS)cJ#8S@o<7XoHblt)QRrPVc zC^r))FEAN;?58}ta5|Vx!_A+Bku9en zQiWMz`l7&0@CHgNS29#3DB~!XL*61s_LuVfIBjB4hHmt4!d#P9*4wNR-WKATu&ZSr zdPe|FHKdrwvDV2@&X`u!9g#z^NscizFq5)j?h)pUIp~(={%Rpe5a$A#Oyb7;tyvtH zr7iU@vos8Mi^~Mked9yk;i@f8S_n;z0|MP(^_t@1>*oNtbgA%c0pc=YmEl8|CMH3ibS`kESJ0N^T8dkR zy6Dx#c2=A1@z!S&%Hs(&CL{j3=T%ssrYt)8vVYxRT$*7t zvc^fBGsItnA!K$+{Nnjg*#NIi4B8J29b$LEuGm89Jf#^Oe*qX}r5DldSaBDkN)X?# zsji8m^lauZH!>mLDZn+Bm6+@W2pzoOLQv<-wt=w$6YG^ZmcMaNg}Q@a1jQ`po|j+?Qc>1}XzY`b`_)SBQqfOjG!t(M zk|5R9#Jkt>3^!R{4Wzxy*RKPh$f#Jb5h8XF9p^Y<6uE9eP5q)-Ts;QqEK}%bX1A@c z2O`UfQ>VwadbTLwErbP794*8zXt$vWX^k5y5E7}_9r3LjfoKuN2JQ`>9&6uhy2$04 zooMs+6m53cvupHnM!aQ0GZyo$q=>)x7BGL0E8<}LP&_Bd$!4>OKpljROIXHV0s?Bi z3i_r{7r&vcMpO5mzpNnA^^xXcArzJIzoZ@CdwB{nmYR9DTS)<5drN2!HM<`zz|E$> zp^QK2h~*Q({+hHA>~4y;foKgvKaNL5kd2H{Q**wQ{aRnQV1Dd;JHNV~H5}$^sd}Iw zN>)`}S6|n%cWiVAwCh+yIk*&FYx0EpMM9ya9dd%qpc;cGlM5n{Uq`9%VrNYize5DLuxm_e)aJ}8fU z1DN>Gl?5Oy4FuGW1AhhK`WJeV9d+z83gi~Vl$D)%+L3(*SzOWG6YD+Tk6^@#&wbjcHvbxkmTuvGr|G}p*&oDQ2hUe+;ttC}djo1s zm_V1dl>l+9$l@nTXGlRWWmHiAUCD*dQD_P| zwm)Pzs1`&n@EpxJ3+dY*1F&c}vI>R<&r2WiIeoo@@~l)j+2so_zOl-bT6x zu|a2@zP(7xj)J^MD-w%@QSZhs1!25Tl*OW2mP2vSZvpHVg<4rbz`P$PO1ge=W^G&} zNleU=F}GO?E1Yf!NeL+g?LN19VXVsEYebRGTC*pf7Ut%{N<7W*tFNkLpUzHI)-MQi z_BsIID3ry?w*>m#jxs|pd*}r@RMjf&!ALxkXQ%a@oE~D2-|?pVNENQt=$;k`q536u z8>}IEVvzu6E+Z}(9dzapMhECpdgHx3BNq8u=db;QGosF|7@w?+Y5}mhs;8e0dzvVJ z89pH&aj_6{bCCF2$su^$lTO9pD18D~7knTjn`e^86PxwHSo5h$Zd{F}VPqR2W)!1x zQ7&d(aYGJbBq%{C0tI?Jxg*B|Ya&@+tgKh|@QWH1GNUT)lT-WmWb54T6AxR2Nm0UQ}QP z7-mMKWRfXil9`ZXW&jnqNp8xd+>o10CPP=Gh#*ahh)9#(q$% z_w(%MK-|X?<|U*B!+EK5nFqy=MOp+= zt)lI*zc6=^~qbb}DprGUjx8w6b&Qjf@7(ZgfDS|z`M~p5PLIPz5WT%6Q!qP3~PK`rex+cI@S)E zUblM@$gNPMsg&C>!d!&xY&~#+$P+ICGCbzZo?nf3MK}ej3D4wM;blOoj%3agV#9H} zXu4jZIN(>yE0E@Rdi+*)(V@6qsH@b9jac3=j}*{smPA=RUKd1D>gf}$rd(Ti6~y&U za;pT_tAshP8C=Ifc8eDTQ3pMC&3(|*U(3JJIZ+PfG5Sw1*NfvLG5+aaUWI-g)XalV zGJriJBx5|6a0%_n6kX{JKzAcTur9VG_X*=!^|*P}E{~&|mDbfuIyzRmyE8}+fHDST z_RsH`4N<)~M>N4yX-T|Om;}^*;R~BWya^0HeGqe;QFOX&v zTPVq>Pl`qunmlz{JgeU>qKjD}js5QL0>L!iJeQ^GFCty}#x!0Szdi5e7&lkbSdW8+ znYptTG&ICdMAPZq_{^TtzP0$u-aM>V?+f$F*(|rVxchwr<)umeHykH_0HE6LIyD~C zZ_1D+SvskYL{ml!@f$eX(l2v91aW(C^Bp4g2-m%U+fSLawm& z3H0FYLn!O(>E;n}qh50~q#0RBIg}DR*FiiS(qj1wW#FXESk8trecAoYwF?ELc;#+@o{wQz9t<4HcZAxfo{um0^r8WL_N;hvNGmlg=FE48-y3Yyq^5 zn?gh5m-lB?69`{mJX=XRcitq(Eg&-u_HxYpB*zHKj&d25*S^B2J`oh;b{j8I(d(6Hq(!GrR{xI!A@^u0*T|V6L=|Ux4Z(HW$Lz(|8H!x7qJ0 z+p|h!zo+vS@%k%~#)G_tVp%cq5i7;+g56dt4q4-Bgt5lq#oZHsUm1ehboJ@hWt_PR zq??mu!K5T!~tqbwH`M-bV7;veCLAuv=ETq>8E$K#g}I()My+ z@}C@8(GhXfeno(JEiFGQ?@&w&FtY*+DI0t=2%!~d?O6MwOQfsJ0RkepomxHqkKyl;7qT|b?@JCwP z5gX%#uK>|~s5djZP0gLudVm*Y1hWj@sqUFA*_w+eNQ@VHCS z8I3U%i2Bjf5icr*d$N11e<1$8MP6dJOm#_+ct)W40HByL@sc3g-BcNj|LAvCHqGRX zEyhi(zNJ&t)YZqP`psaNU8%??=j$Tfzcn0}xUGAf5LbiV8H#$N(k`>`)LV_qjms* z--9w)b;Z|r^!FIQ5df!6a%wKr3?b=t#5n>yN@`_7U+F3$+s#~(pX%o z52+W&ZTj#bQQ0uSi-1ugqvMW~L9-!@M9*wwahO_wNiCtpG(Hvx)9Y$lYhs<9AY3QR zHJVH9afdKx#XVAC6P+@P=cxo^@lB^T=BV4 zw3*m~B#LLb09T>8Jcd&qZ%IBa%wtI=0WST&DTK^ZJ95o>OaY}9UCBGing>D74vz+= z7DbE-^9YdrH#fK5A>1`)6JnC5JB1tW>P%TVBr#}H|FnSSAw$aXce$yg%H)}0`)6vx zt}NKiYgf0&?HPcM68!}1ojr{dW(exvUE?0nu5c^QHNx_acvu)cuD7o`;uTTukOu0* zaR6I;|FHl!sjY+v#9;5*J%?m%&`AHDAaY5EXB2pQKxTc&BrIbgU}jF) z6Vxn2O2!&3ZR`mG;XIz(>5bT%MY{sLb3|)z5nu-DNXynVf3F;|&qxdkA?Iw_MkPzm zzYqwUP_Gk2{kq~KbgM)#kE?6)Y#J-esCnEV#QeeJXHV#Rid0JoVhzU<;$|#F<{!UF z3bL1#%xr4yE^F(E zx$#XS1B-bbYl~VCHlJOI%KNx}PYgE%FvOc@1(|(vXX=Xw>Hw&P-Z#~@SUYp#`(5H- z{dRlN>T#a|tbxI;8G#jDM>`TGS>8{Nhtsz{P}hw7b30t3Nd2jfQq$OV8Gh`Zx- z(JY)h>+S6aMY!PPz;fR?ScPKuy>pbHV|sf;gK-a`v;?mX)_2*zYlL|5z$*gx&m^?O zm*xUf&oZioP?kAwDk0;m(txb4H3o5~ITf&>d$Oqa!?c>v}!Hz-ikNk$ZA z!3Vb$G=s>`*l+0b^pA%Hfs}}nt(D~@C8rizX9n?yjdKK;@5IGUY0ekq4_T5r2)K>L z`vZaQu{7nmHaZ_kJ|0WgUm3Hk)exC)Dh(p|fN4eV{$u z2xtK>4le+iQ`n9Lurlx_2#t&{HbXLeCr_CkKhbYjgy$WG@C@QolrXn}T_rhBb{KTR z>KK=GhY4Quuo6;o_GD*YJSHF;xOkg#bK;*u>>8s8IB50I)>%D%L<@g8#}W%*)t<_%t1R?yqSeL*i}($f(2Nr7?4ooTXdu3lp$vd} zdzr{Cn9!_DR4Y84L#SD$baUquJ=7}3O9f6$YkWC4bk5p0htkQ6b@2m1pBd~JiW%&6 z`+;U$z`n~<3iU$hGB%bWtvbKkAKKMGs8)zIt_^-0jG>=4a%`fk1d=cM1iN=}0B-Ax ziwgkv21d!w-ywyIn}Pot<9ddWY+R;zM+5OwL1G^69_JSy;2J9x{cS?Tb}lF&;r~-4 z)$ORrST`K|eK*f<{}C=-FviXjSDQ@o%#jS#}sg^zc%9UY(WY2pc$uvAJI_tL-gqhv{U&35M(@l_d0KAbdg#4FDnTlM|BHCPT!PB4=4+}608JnDEh^r|GCZ%y!if2lY zY!XBjrHD1#^7M@oCe;~Z#qp+8OK+9 zAhODHK4LN?#JiS8p^TVV-J(GW4X5j@+4{XuFd4Vww)lj8yZYSjkV|CjvpCPbn^#h( z`_n?q-5KMz%eSOIad#+|_U8X^j^(`wNuB8hqQmqLHJ-nqp)OYFgP`$T@KvO+SX;C^ zh4^h9Xf1T(;fexH;*osx06mb8U9pk^P(Qzh)5B zeci2Vh5cMEjQ4Q`_rzj zSbh}1LvEg|1w@rOT@Zt%rGgK|SC>M#%6_`nw}B56Z2rtF49J&`fqF`|mhich+3_-A zPKgmDg>%On_i~|b1(T=)U+(LRZ|5qWwd|NT33F{5dzQk?LzV&1)Dhn@|BXoJ-NK@Z zo1K&{D<1+%{cGpcG{^cP80_*gl$YBi(QdD7h=fg!-33uf8)k(RGyYituG~ycM99zW zhZ>O~lbJa>5N(HpxM)@*sS%X>;X++uTx8fuZWZ9-nsql8p?2C4In;m!m*aGN?MR@k zPy3L4`;0jIv}&>XvxKa7l(XmZF}=$sbf*9_93^*!Wu_bpzXz5x+#c@(vAH%xmK|=g3lh0O$Q-v{+ z@c64>JQhV8G>;jWi;p+iqOepQvEYZ048i{K^Q&>W2!9naO*tHHI}yk&Vo#pT z(sRBLw+L+nQH_wsFBV4DJMT0tRz8W}YEL&^UX34#Bu{w*B6#YUTK+K*&6qFO6|BRZ zqP;-1+G6COzwZfmNem!@PxlbW@nkTsBAh?(JDb6VgGpQ^m}(|ZNLIrtPed6(WFgQh z@>+cF6cBHBJZp=Iw-=wm)WN6fA7_e_kK8ZP0X4xMg}mA!gGW4~l%AHg6~&j??or~~h( z!dxIv0ZOa=rJsP94f~I1(;SGm1-VEa`A1||eNULnkP(4M@@e;fU$9%H4n()hR-Zs; z+0u>QGo`G42I*XG4H_*9*-;2>@JS{f5Xm>JVv1#?bM_i%0%Z#n@fxTbQc6q_N|(s} zQx&U-X1XbCa&H`2e8USQNLhs1sm7*fk;b)bsB~#NB_P>m0?9}Z7s~;BC(&;8z2|A* zag(C)r}>B2Zi#s*U%rY!K4e+K4U0>?=j1eDRYu}&(X>^EQM7EF8>apY#4Twi8!E;O zkHdtTCwq@yR%H$!A;eY4VuY9DsN&lhS_Z0e3X3a*!o3DeD+9}7mvbRmM`kvl={fK` z2spBYWw{)8h$1Bp>I$m&kVqGbszgeQ$-l^v{YzZ4BYeX7Anw0e+_lhpx~Cv>kjdKK z+a`a4y9BBW>5ED=o-97og(D7X@h?G)W&*w;#!%RW7XVN^4m~M6GEdzJVR5OSH7mY) zA-~Nh#=$U>p=|YM3iXtj$*WP-m~>GN%uraaI8%Tdg=&ipATG)fEguS(i97Sfr11b& z^o8*YumUrcPWUCT+scZgD9G`iAo!=N354aoBu`MoF_~~3Q4z!dm|Gd(1-*gzo@n?` zhu)9XYSyKYbjJ8i@MuJYyVRE=SP@Ssu+^q##0r6sqT$6^SCJS-5s@ir zh`(`lo+EZo?V!je-xTN^i(?Rszd^Xh4tqm#W4~QMHet1we&rfy`UxX+wGt}Q+ZkhI_nV>35+b8}xx7di z^&?p2?d-!{@wr<7XhXC7)@5ic$BG%mI7pGRuw*C56+$7G^l_zFwMgeF)f3ZhbN0M^ zGDak4qAUxblTc~mkhIqAP&AF#*8JSciyaRj(iL<2<<#b7t1s4tHFHKYd?LV^zaX{OA*8`#yC2QzEKKs+MMY*f%k z?J+&^RFM#;C`TVy;Ws3tQ8+eYjDl4g3vey^ODXE}lj9&RRxdg!a=-4Yt#VIZM02@) zOqVoxNkyP}REHj*6q^fxAM{F!12#WPfTdCXvej7aUVp(l%;JJrcZ~p79gBm^<@DIE z3veDfh5^{!6?^=al+2K+c*;Dc-x(9oaExC{4G;WXp3|V7;z)MJW%mJQQ)#4gRA-_? z1i3^xYc5GPv$gKe(<$iqFs^qS3UcG-&E%z(xIut9!)!*^VCC!|1)GI!3^8i*ln~dB zE8AVkzN>w5toT{RkHbd-#PI&f#g8o(2iuDjmQ5C;th8ZSO2s70K5GKlx35b zW-IZa2)CVsNHTQ2E(o@19B_6|nxicr$deIqzSNCoVlP3O>`YZ=%_5PMj(FN;lf)rH z%zj1gO_B7De-O+Q6P+b~KLml&YWH*DFs%bgA`O`H1dv_!LLC=2-V@?E&dG%_{t&bq z$7K*=$Dv;<$1Ot5{U%0PDSj=$4WytU&BzxX26Dv~qM=C_B7gSR9OE)6m(@zk;sIg) z6urBAA!Fyi0h#f2(&;h$#^N@Ct_g=Y1z^#nOekN=ev&Rb<6mAROs90Kt3DD+l{rqd zB_ZzXj{v|~v^|A}C69ilmG4_e@H$ z#tZ&{)kL<9jYN77^i|sEnW5N4kS7K&j>EU33ShQc9A{?$Dj`l5RQC86`NX`Jn=X?0 ziZA7v@LDF%`TbCUxGb$3=QpW3mwOpZ!xzON1s^HW9m@@YYEQO093^-{-NQ8EPCsV| zhD-Xx1A6xS`E_x+5_-1c0LUqn5&f(%*R0A@MmYPuSEOp>04p&sCcQ$cY=!VM*HW|A z2sI;W6$3PW`6_^!)vM(nzu?;hx$lT{OV}^%2~Dp7y0c0hnblsZnTLe4o=)chx>)C* z5Dcg$VyoP(-~OPUxXm=uz5Bj!=RP;R*&wMSfo>*Op72y<$4Qu*iIceO03R2hV${br zpnFaK73gFQ3=j-etG}Kj+@_Kod}fCDi6kj<)_j9BbW>|ZgC?F52^VxSBc`^dOM+ab z63epYX)OOH$&5*PkRIL|E->voT%950h%gWIhIzB&|NaGOrq`iyE5)3*TpSaD65I8t zWv3vkc-VuJlX+$oeoO!GbqnilnR>+6-UdMIY#Ed%UbiTBATPTimZLKfWESHGf*eV; z;x2*a(>%6xxbkL!wBy4JgS-Dt8b&MOm?U*EkkWQ^m0$+tedkE7>xnhqfu^nt(PiI+ z1)C>?kXF?hFTBe?)D<;nxlNk-1h^`-r6E+VniL82q^O@aYkqTMO)K$u3{wozT{t)J zn&x}X3@%|pLw|In0B6QLV5TDZ#|;AA{u0+@G)`_RK5Quy4ldSspEPd!_(h5c5u8p+Z!F_@O9Q5r<(m zQI1P2LC*>1>zOl~;ywL_H4K9#B?Z&|%6|ahu%Gpaqd$c-OB~OL!yvcPgi=gxLw)Vs znEPKS*RPwUh!(d9@mGv|lnNHWbegUVZxTyFPFQF>B1HF>G z>HkOrr*sQZ#!lE3J1y6eYKj$7qSwDg(&?I|(pO!$JTxt<<6=44 zBEXf`yT*#5nAY~Mt(+%hmEolRLjkZxfholCXNm+Z-P|UMO;;g-Ti23e;Ke~gJdEWo zqJZ{81=NJ6V^4WBZZEz|wk@vueu3{8KK=3(>xd&nQZiDsF0sUVt3rDSh;@5k{1_wtxr)oXi!s3NnY_kF*D5{AYp6n1h)*D!vG9?#yTZ(vUx1keOI% zV}>a{>JNqKv-Uok+7XAYk$Mt zi2!qGHewP5VcBd+ud%ns zbAl*~obVB8Ul)p+qqe{qQ5=zB@kpUAkQD>#h{oxTUjih5?n4q{jpGGuo|9p8v_38^ zX01t>%Vguja`XR#SZQ$1;C6_<^n|OU(`Z5}WU*jshkgJr)$fTi>v>~B;o)8uqM3nz zK9OtI4@j!dv>@bu9}jo4QpIZPfif6a%&`#+$70cZ)H#&in3=RbB-_mNou9Y-~&v8t{oQ_jO|m;(Xjj026W}`Z>L8pw6qt zdK>c(ja$&z6dPr~dlnPYRKHyo%1#cn(yYBzfCQDq?NN*VPMhQ@35bATu6?ILpq9_^ zn+Ti|^F_L2ni%cuy!!|-3lL8_WrjXAON0-fHY;*D=qt`eYaf$gNF+^_?wVr~vI;>; zGYlrpr&rTz{;RRi%4A?_2I zY~TvY8C;46<6ZsZ`lC)|0oM_#c5^^RI`L{)&R-Kny&G&P?})bqP;b3s#0Yz9D@f{@ z#uBbosC`!08pw5G*Q=1p=Y?d6B#)73UqNJp)&M0(oGj7xDRmgPW{cYOzjKzio7YD_BNv@ z>0=}P1|%|Qxx9=gCPlb|w5mzn!lI+|@mII!A1_8VBh~!HuWbdpQA|&papTVgs6Y{f zBH--t!49N!D`qm5(Lwh|`;Iw=MrnW>)Lq6baZB~6Qy|wESX@(LkLHEtp+RhsYE4;L zcyyw0FOT5-dsLBYeR8p5?j0x0{6gu{i=2C6_8}%%h2l|X{GrH;(><;n<#SDjPD0fiQkqy3i3vOwNm&!wSV!(X)lP`CZrxnaRVk<4(?t51Y_? z65J*Z6hty?YznlOmY`=djAR{UJiyxUnRru}Cn?I({@8hEXw8L@P9BMmgG6RyZKSF| zZU_L!W@$Px*E3(P73uMTHUkM)2OAc*H?m}Oh!j@yc5%*F!v+XPeU?ZHCOr?hb;SHB zrZHk9@7TjdyFW@Di*+wFChZF3rZ>uURo%9?F!L2Tiw)`9(+nt<1v=HfSR%j!ZZ6Ru z$z6V&hMhl`-O+0NKM)?jtfGhMW7yHFC@JNrpKh|9MF8eqBhqL^cge3SC>67VNw25l*?A-?FgvDYe=tbu z@HFVoXXu{#!_7dzqUTxThJ-O2i0x9vwSXG`tVq>JI8ju@sJn#0#Kmkr+=_U-NTO$t z5zL3;YkQDHvPg2k8xY0D+S^@;@9DQ|uQgyvj~*?by(ge&VIBJ$9ks6zTCJ6sot5Xr z1-QCs>eV^T+?fRm?WSQbz?g#tQL7!`&Gdyn594;SPBdiz6W@UIK!?O)yX zs;xDWG~A3a^aVm_L1TSOjMh0fMDs4XfhAwofw;>)wZJdn84JAfj@kV~1@ z)2nwWwo)PwWFoc5q?KHQrU+(G=nWJtK2H|OCKgQ%rLWB<88vFfUGZf7_6o>fJ^gLc zI>qk_6ay4eCjx3;w9j$&B$Vzx7K^5*nvwwXm?$r@4nsuvSXPO?*sh*Gq><=A&R=eC z=b@`c6?`PVEZUPc38Znj0Jnc8a+qE^y+V-LgxS=ure0fMxDV*nB_3rf@*T z=9y2PvOO2Aj}%}|5}2orjc~7q9O1N3CK6*$fy}_R&G>mwh!@7j`g!$nT_d!snC{bX ze%feWo(*aWJ$tvu{5+BLr@E%MbrTT$YON?n-7Oi_%}pq_Xi@G5iL1vBYQ*c6oDQpY`;hqT;t^+aB-0q?VC;z%%$H6 zG@}u1nJCQI3!3wyOL^~H2mmb_yVq6eIcGvAE#g`ZGoAD%bLu|6}vuVdKrEZG^ zWkXu~hX>4wi$IykLw@Auwv5z1+0hy6?aRN^QudTl#yhud0O+elS{$NTlxxIbPZI8c zATt}j4yV_9f#PDQyUGqRNj|tXd%#8a8Ff%1;V|&yigaWBzjf^|Va&U$a_pFvZ zWX&?N+0U|cbmd(9VmM2jS7e9K#q^N(*Hr@PimL9%NCai?gAp?haw&YRs9OxjIYP{R z9LMDK6-Nki{S-W$ONFHSQ7KQyDLn06337L%zqTEQd6nRd2e=<5Kn_@?oggCpowv-5LX)AJ?2@h z;a}(k^eU}*^HuKP%0;m&w$LB0Iw~M0IUJgR-Bq>SLA1Y){MrL54(Ive0{M~vyQV$wumCH`G~?uF|kav3&f*c z8VhE~Z34~F79CgHv6TEN`;cO0<4_lW7VW0C^Mo2cobnw+a8G|w0tpw?Hq^v7dr3%b zQQu+he@2x5oy&lYH$|JVgXs5=i*D}&ap9OomZa#!tMr3;(CFy`w7s#KASct&IT`=g zEt28C#if~n7!yQ!y6vU~cZ~sP&+geeV|~WMd_jf3%nAgV5m z-iyZydhPc{;Ak+Y*VseCzFrvyf(ublrd0L3GYBf0V}y=ad_tPb)L zP15N51)Sm(5w=#!l(nm*X04e&Z$b3vw`(~>Ku$!0m4`vxlolcw;QkhC3NjOsobS(7 zMGqCFRg*1=?Z18$($(O=k?G-nyc7_oa(F_7W(G3aE)(V@U6PKCrYy2I3wP6+den$& z{9ce*i~B7WpMMCT5onlEAAN0E{&znw_zlHP0^I6amI5TE)eZq6Q_gOL_KLLG)9-c?nJljwy&E;0)9(IDQg_sdF-TjyhrwU-LnN!mk_ZNTS z-_2?0T>;dQjaGJ#JAMQShiHPLK*!7zp4( z#5F(mSLD*912p&6J_GGt9zZ`|jejW>Wua~?5j{5!Jvq;*)Q+h}agpB==1%NK>CH|U z`<)78Hfn>ym41lx>S03NYE}f|8*O+R0M$g?(`}G;krdXH1)@JE`+HtZOI^)A(Rw<6 zyA^t#i;If=#ysqr5tO>p#jljm>jD9GQbbvc9O)0IPQyX(u{#S_f3Rq$cu1jsg=j5qmGSeU=`0Aw&6FuqqD{YDYt}j3e3SI` zu~2i3vmYT`5Cm2{JI9*2y*(tO^}52Ss5}(<_4a62OfQnnz!^ZXHsWew9(-Hwg0IkX zqRmpBCX)0UhyN6i7P15q_bI-54x|~!;|jdC6^%m76|X z_IUZ{`G=NNc{uAlAgaZ-g@j18R{BMrhSS*q8ec=~g}fTenz~d8$XEf3VF?WHi24ii zk2=arRLy8CAX!Rv%6>_Zc}wuYUE3zNRF9NazBX?kYf|4%y=Yrh}=$7Oo zN=3H5*x7NmlJ++q7tL7Nb%$MJ(_ix2t#9PifIM7^cZ9(s%%)Oa($=#s0m-IDvgTG} zlS_d#N43q90vljUX09mhX6Vkdl#pJ2St(BNELVaZrm^9Jt3|D znmNQF=GTY9$P?|c{28{r8ZxUkQLj0R#Ib_>6?+E7HvxfTr8b`U;EmdZsLog7o3 zM}Nit?)xOrA_sBej*A34t7hWIryb`FVJ;Ex2Ss*@pIr}RF3cg64$JLhLTK5n9x{rT zMNwyB4(a4nb3>jSqZy{1c%wiT;)w`rcDT`>^m0f={GTtg?KTVIfO==V>d@ zaY&YI$@t}&U%~Z4=%fbb79-+)k*;x)N0;b+=5Q>3JFwT0(o#+fKM>7upWoUV>)qiT zM%hi!QS@fyR6UzJDJ~G{YM@nDEHcLY;{x4Mda+tr68ql?;{KUMObp*rhJy*uS}|P8 z{2ESCVmchXJQqD+=s5zieJZ~O8`u9DbYd8@I)OrcC%eH_sBPtRD>D)z2{NVr>CKfqg2W3jGM zx~|*;t(FJV>E<}0u4^lrPw9K&1Yuq{M$uzd# zW;ZyTRO95t!E&i!*sbdwymWUzl-t-!(6QmTNC50n=8C89 zZhKxR<87>he~f;+>Wdn1P~Z6Xc^0+YdFzQS1(@e^crJ`n*Bu3dQ@1X`{I>XLR+%$! z?Ei=SBc?(dL*;rQ*)$vJ!!-))#_%7(+$Ms6p{MahwgfvV9!W##tbeGIdPFQ3p!aFWAu+3!$n&8m;OeGs-)S7__p<5^AzpeuA!&@Ss?X_>%h}5 zej+5=4Y^EqL%{8r--;=6P8^?h=S_U(-6jIkx|Mo}w8$3a@#y0G=Z-;|Kn25?ikpM78Lr zc{69$#gj!6iI$A}__rXpyNyc@aB(0$dNTj4xm@myU7mtA$Cxe%GwJ+&iO_7e;2WSz z67hql!8`~*8&@|z67D8-@o*jk9VL3lGk|H_rSr(KB7HcM2^AeWp-U9;<^%oVhNY`% zoOIWD7ARY9h!H##=R5~shWB!(tAG4bHcvE$f?X~ntIcxJEdo7CW^pqERol}ULfAVk ztIVbypU*!eIuf2TTndeK{sEYc975(w5MbzkNvNyWNEC^7_SF#NVsVM*xQxcnp!qdR z#$TMiV?8Cz4y5T}0Te47o-ttR4>z!$5E@n983Axt4k}o{S}#Jg{t`19HQQuS6U#$C z5r^UeA!NYZ>FJh_)`dk9jDw15e3y`H$fKubjN&){64*q|WyU}>j}ryb!o>*kxV-lD zRpTr{^c^Eu+L3ETx_rb+JsH3L%OEWE4FnSXs($l5ceiPE5`H#&1<1|8J8N1@73~b> zVZx?KbwX$$+bD7OmWj#+o1@r`#RJ03_*UX>%cE=ESAo3d_=z7G9MV0ldHRO}c&s^7 z0`+7FlVf4>fr;ycP${nZpt|CA_iHX71)7q+q-uy-Vf$sc|3nF(2vNo;n)FYoY%;Rsn((oFUL3*JjUCq8B^q4N!WKU3-ZAWH_$O02URF z2FU80zeNhxg>E?ke!TC%H68N^;tsH3rU%Uc)7xRB=j7Z!}Bg(v6BGUjq-}c?c1Vg8mGOUa!}7=5nnQ-O+@m6=&toLXD+9 zHuwHGOQ~Gj{uFFbGT1o?@?fx5M+4a?dPHYwkZ?F1;>j+| zqXlI|zq(zL$e#pzv>=(GEbfcf1ewn)k15I_r=eI|;@27;Rf_ z1tVB&Bq^}E9wtOnlq<`0^cz_McqtER1hOeBlED3k0}1{)$(y3HKv78{)2B844xw(@ zOzHc@_3wN>$JmNEfHQnMgt=KYnF(${kf&RpE)vB-E9GhARH_SPB|%QZbS6sgo+|^e zOf=4)*BW15#sAVWHcU9Os{l8@34pW6^%-C#(Zx^O(JWG3|U*O>b!`& ziVHIbuB`+(FTU(#eV8P~ZNS!7!sjC92y*lHo_NgR;364^bX0m{ai<_}yLeHu5W+j^ z$1@G9^M@;gnSnrVDbR~yXN&LfWu4qyj(;jCV@mPj^>Ws|Uo_1aOK%-)zlLcP7YK13 zTuOk3b&A}Az5|oAfGpD#5RJ9*0b%ZHPElG~WSxAyNQv;DVwT6TYdZV#Qq9V8{6Vyt zA>Jw=)nC>Ia%T~ec}Y*aEP#>LCs)OexOkoX6T}F9avy?S%~RNfrIUdXZ<&9T1cfLornlYewJrO(+1rDT*v=@Eq$dBe86< z9)PEKjl%1uD^=43Qgj*9m{+qMHvn-%@t%T3c*+VgCsk`^csYJ3%)_OwtcMD4$}dt4 zu^&qFdBBEwW{o_ZDmB3?8|BFuRfMkNB5=aYmhw_XgG~-emv5Y>oQF3~8-}8|?#p1b zirJ1sIGlP{fLk?#>MA7n5Ki@gwzq3MNxd%-Q}0i-=r1>~73+s$HkAaqbe!FBROd$6 zSllkujYc$KZK9(t`)dAihgK5~cW(-U>FgQU>{i_jit6oy?=0@n)%Z#V;H0a8T#6ks zfaM2~mnmNr;BushNb!iivN@RR&tPLq!nrKj+{-04-WHGp34^&@((*il2yZ1v%l5;8o8JWnpdx z+5z1&JY{PTI7$vn#xKAsbBzFZU~@^fvaYyZP{tDnl1`3OA7!6%Xe2^ELvQzONawMV zSx^alQD1?qM%RN2?RVTfC7#7Wb$$?^2|KyGzgNkR4FZ6VG`ezs$A(2fA`erK+UPl~Qd zP_V9^wOf0f;^mx@VKAx_AD$$`oGZ*3&%!WEM_(xf77&&pU71YU{a*=mFU=Vpi@QZq zIDRk{%*;dat^l_JZ(ONEV)w~Ftj-*LQvBDqcY<~oNI+qYPSGbm5j??hw$W7Fb~^)7 zd*zO7#u&yaGCNF(TR-r`MhCJue>4l1SgIE&A;u39_Az4Lk;se}z*Jt2=JU z1=K8S4#Lj^pI(q_nvLevoz=&YAt}rKS0vYBRnxkrfJZ?F;aO}E3fGaZttr!EJj5+<7OU?>S48~-mePvDu_X_ErkM1LicO>V`6Q^-O0*=NHE1{SSyke z%lb-nqy)0QE=7{uR}j@jEakoGp*Sr|LiII(6^Y})ED2i_xv)*mk&u#%BPot98BDl%gzmRSV1-w3g|Or&<1*>rFnjzzP;=?j)RUCbSdg|nS; zAAE;gZjN&zH5w!7#D6HnUDLtZB+srZ1!)duL9Xw~K2oc=XOFCx5H}(nXpjM6(C>GIm?c`i1^6ueDMrn&&SC zxmX=8IWzsQK!^{;3)OMp{QNU60!gHZvjy=P9=^EZYCT!03B=WCVTb99s|9%Waw`i{ zRaZ5-_Q^5C2-GbYxql>#+0(#XT%Oz$(B-0O^Nt6t^ccb=1tO;a*3J8bpz2tHOW&5?=C^x5ffnWFhYEAI?|{M> zy0c#)O9*B72{;FLdG)AFh zxG@lGh^Bve5~wxq6p^(;kKD(SZU8Dw{qa{}#+BnaL7wNj@Pt$%pP#upr)hVH_^ZJ<^Mqio%;SOl;bh zr-Y5t+;HPonCp&th{bwoKY(X`O&`%65N(bSwEb_TWD=+)sz4}rO&WI*JyG-ZD+)cn-ZW5SNReihhN!e5@{;D})Z+H;BD zT8--jOca6trXmR^%th2IR8B`y^d!_wYb_6AGVgXmLpZGCgFE)w{FX3`)9K*}mEmLx zOU&UYzBvK}V+R!{8}aV6CD7T-=UxRbkT4p)F`Czn1qWLeM%n4YTsO3aa#mA6t-Tb~ z%}4Olee`r^>2kqtTr*EGC5MBbj^!DkP-;Usk&KhjtZ_IjCnpCb^vARDOy*ezt}Par z+y$lLjSGa)6#B2ZWnoiXESfRWtLnv5qTDcsczTqu92+ci)-pMEBirjd|4(7=s{RV< z%UJOc0FM-`CfM_0kr0wFK&A2=iv6-AoCnbNYIIz9C`nwI2ECZ@rf9Qk5$CNeBKW++ zK;7CIJbp$L^EmqO9GcV9Pk7PJc(#DBho`o#c0`{2QWW7qL z*~1gfy{K_xND19LC~+A2hYE119Gsbjakh|&o|dK(KfbuUNXCN3bFXoy5cklWF+yLi zeYA7dooJ5wL$Q$n=RAx3jvGcd3t=IyM*xqWA3(U6dZMPXcoV7n+s6P>W0uA?8m#ej zydan;=do(scPtd8wrAlnJ{9eTvp*8RH63~mJPy=(GvV5N8?GeK+^E(5i@eh=$lSp4 zKsbdi0cP(k*(bGX!^j@NE@i;)c_^6rfKXPrUS6cVK);&3iY2V!Gq6OcQ_jF*nIa#(Dh%%FiC#SxK+x+Sf@G#!RJ?L(8_Ze`)v4po(N(N@i54+jstxLU5_Tbz0+#hq=e3#!NI_9 zi%$hPHx?A)6vi$mSG1Qf7u+LFJMR(v#=YoD7J zGf!_FaBkV4fCv)I8&&<$DaiFlkBk~EE-8?Oj6<04DJ;JA_#>k4t}7BRu~fNVLwERYhquutf~x0z`7H{mq7 z9p89~0W*YcvQGe2l)Z((Y`a{Rf6$7xWh`b1AV)^rVucfZLiEHm)orU%{Pyx3;(fjj zEZ(uo6+mn@t#VG;L4?`M=0$AO)TC;Zg*z)Rv_tV{K}^rK{?T|#zujc~I(Ydaw!9LA zx{O~o%w}1QJ4KUXw5L~AbE zi(o9TbZ)yY84od;zF0slI#r^u*;$aM9qT`mHhs!$YPt^CUC)+<%18d+hyN-E(;Enj znY(d>K=W|6{q%mHC9~5NHZ=)0w_cwo!}5z{7)>{Tz-kG?m}TNX(Js=lzcrUWDZqK< zQJmf-!R&q`DH%{&@1|@Q3vV*XwOrY|?-c3I$YMA!7~)}}&Yfd-I(HCkzDUZ^4$T)L z?r{^%A!TE&DzdD0u}-pn ziFE+gc_frwVR274hn-KA#$$jLK!R36T$}m*B@_bxRT7W0_6kuA#a71Ic6R|gT@wN9 zhND#g746_MCXD!H_P@>_x|9^ncjq58%9s)r0qzCX?|zinq#70I(){$ekI4ykTWF)! z<{=(XUZ%t_1A_qP<0QwORAfqUSrST%b&0?|N2C>pTeC zI*7HZ8hZ-x2-PhnxnK1Nay_|ptpm_bzx7wo6Be(c&3sP*lsA4EOk`oy)E&(*C8Bu< zpaZeAz?!9Y)6J=)ip*Nro_pf7`^d~KBOsIO-V52V;}}t7FJEpJ?3YFI zg;rM!NtF4az--NH!=Khq=6>?09Mu@@L*ixKO=#9}BiPXrlRp1v7gE99m@<1svPn+e zetKM?->x=SNZ1JIibMVa1{b&s!dy<`!(k5?g3bGnmeHaTMfpJ>R-l?zT>iR5(2U8x z|M3@*IhQ20XTgS-dze35+{`2bG7VM~W~MLD8=VM%2_m}(k9y$N2kwluixjw=a~H4@ zn+ozcpPfSBJX@sEQAam2ul}pk3_?%^=Ai(p%*Ca_IPz~0u4B58ERTnzPW`_AaD6z* zN*^BAWe{dKj>3r}{}SX1Xm6>+J6W1k7OP6nP~7AhU>l7vm4{ ziywt%JaB0V5$|MC^v!4sui)snKUa89*-gh{rN;nWZmF-Ug!iz3thzdWvK?k{8{;@g&?AhZ1iA#mgA<)`IJQ$dy1S{hr7p%qxSp6G z;3r4Rr-e~ZHPDX*EP5KmY?wuF@#KyG*e1;#^5JVDor|CB!<=&3Ghklbk>`1|O5=kU zET7Gj;u*wU%D(nEO&DCq=0`xju6X4+Xm884JfVgIl|o6h$NJCb-zLX+Ia_XAUCAt)(oqIebq+Hl9vAJ-K)P1$FBdsUGp;8r;;-2O$YpX1 z(`$OGzLbBvP&+*fVx1rt(Sk*gow*vDznp&zcW|jp6EdM56%A~#Z2lx|R8rSQlL6s0 zS_LsG7U@BS5fL;fT{!QH&0qC@wcXrfm?pwwp=Ci6hx2a=fU`CDO38z0zl>H*2N@i1 ziFRSy54npItGxyU^HUh`q#GUiCx}~4EJ_XqNhI0vb*H1#h;PO)k4ZwDnQnw@Roq38 zdC@o#PU&04hg1QzOzb7Z12z6uHiOh&Cw*YFpnR8OYOMcuo@@pu2Ccih3vy2=0xK`dQmxN`<)yuH+CbJx zpzGRHNr6suR=rZ+djt<~Q!YLb?GX%LrKn%=-+7@;oC|Q9isj!0GINrI#Htt133C(5 zM8+YedrW%Ic?~L(OOo2ggqSm}b86<@DC<$G_}0gu z)LfDTRwub2JS&J#QNidYAFI`JpMbbuac@ICNA&hMK%n!RfeJ5sSLr~Zt}O?n#heK+ zItim&mk`fY&qdu>q`?r&gh5Q(<3D*3HEkRV5TJ*nS0IzLR;x}ajuXii*-~E{y@yO8%0xU8xhdh zo38tx|K|?k_Gb=K^~d1tM>NQbjdcpoaOuZ?BgCC8Z^qJnXEUKb7D%120qEZJ&`2+X zJU%Isvc7Zl({{Dq3M8dOf-rQ(wj$tV1BYZL`1eE_IgJAdXQg^F+r(&P*^dP8aHQJhV7GHWW7&2ywA>Ng!6r(~RoI&~R*)A$UqK zoxADcAFNCoYAI2gi;qu;bS*jH^~iJL&{cpCQ1CfoK*txLs7?0RJi}G4ckc;J;|~+W zur5Atx+)0ywvS(+z3*g^ZiH^7G7fZo{am5ucV~s`pxjfg#`ywWg}E4EXmPBu8jx3~ zw@uKDG*cbhw+)5`5w!hCX#yYbX zxnhJMNo$afruP#vWh{;nm357d0|W6_Aq+Xa+mv2RdQO0A)aDn5+T%rGZn_?wOL92@ z8rIBH(hs%rnoAy){<=7wUp6Y8Hg z*wz7bdy&sdXtQ`1uE6ju!sf@sdP7(?tQ2XQCrf{~t|6ZAS;mY;O&5RqQeK4hWtvk7 zbl=P6T^@b^4~!iplOYK?qAaNP9Bg5j&#UpK0Gds=?)=TkvBCO~&Mhr4nWlIz!S32Q ztd>}FuG;{_E#u@=Wj)~`zYW1Moh-*+cz}ZtJ5a*S8f^W#{KZ)!fzAM1XuI4*<5guq z?WhcogN${=hXOn{>Uye#eMlR^>KlPF?C?CmuK%hi*KNiSPG$pf&&K{AXI0J=$9x6K zyq#ShC8WSmeEzFIZYAE4a9d8{9fqA#gQnnctfqvn@5@iT(`y9A#%+#K31;b}DlpyZ%>tk7)GV0|Hk1B3JPTL97}C0?RnV67pN z@(2(i=PHo%g}6nCS2*C{GruK}E2<~6(EAL;*22t91W#uAcLlgKMk97l^}|6$Dy-gU z)=w4S+_#+KqkHXEK<1Ym{8;!{ILc8KOg}8g0f!hk@h|n zd})>=KFX-;>(x7;xg4uaGSWLx!64H`E1wbK?4+*b zz?UpO?+a$=C2h7mcu$`UV8+p*cwZ%5*qS?m!lQYtDSZ*5D9?hbWar~{263x-TpDed zd;k;Zs-lU)vkc8e!mPK!`jDKDRFNzP_dh`-xI#}?6q(uu7|l?eB+%(_1ncE8T%0V3 zHt4~2HoSqjb{7B_jB>!^=D8`K!FZJM29C`e?Fz=YOktgn8K#T&cdb_U<7ro5>IM9f zFhUEXKq5-(Qtwclk!94R2d}8WlT)35Wh4G@Xk)l%TVSYXlCylgG@W$rUK|lP_jKV*)e(E`222a(-zvYE zdqk6+u9o4;g%s8ihkwgysBF!G*7-4gcWAFh^L2ec&K1BMAP{_4JS@UvZ4M6+vvB-P zi2J@)!<3b=6ps|iP#SO~B^Eo$yyq~Ay1ID}72h3RSrP6QvT(A0vh%(v%vRkL(#E4x zirTO2!9Q-NbiCM{&~3EGE`k{qv`PWWwDx{WkohU=G$NIb^xw}CqV3QvK&f#q6GY!= z$HCUMaId@sl-s2t1RcqJU4ls^hZ&asGio5+=uUaql##IGEP>1tydPOf4x9m%HH>AL zaAw2tA7SPm8mji*q4-pY$FuY{924Sy#fK6ad6lOcYt@p>^G4GbYe**Pm-UAkG8dH# zyQ=Q2#U(|0yV~Y&+18Psb|^3zBX#Ob$TXBux@wW1C4kN*a!zk_i!cva@L4TWEZUQW zlEpB7_{{x1XXS~A_mJ$744UocfRZuSK5S*2n*kU+nUnCMU4Z$cfbYoY<=DF(jPXkJ z06qK@^D-KpVp_mQMVsyYyl~*d&Q^N^x|VQ2ac*Vs=@m)^^m4JrWSl2}-dV;HR*Fp; zpeTt^-#|W55$%4U8&I_m$M%g*!bu6?AB&M_Cz*rSByYMHa?vPMlfeY7O0yEWWiTK4 zgiW_y7^S0pLeRRQNP(gfez1%^Ak1SUMJOAHRp;eMi)IFakAyg9sU^`m_QVsyTrKWb zvg!B5JM;4o2P-4UkMVp{fiM`!>dbvW=tUL=3}hn&##3PPML8;SO>8xBxsDO`=^vR-pB?!2IF?Br_+tr=1>XeDuH^8Z7y!_naW>RO*QRr{PU# zhqNF+ufJSQ0|!zz+*b=^rh2I@jyZ@lZi3E%3iKUowt;z!WJUt*plb(^2XR5KO zFb}we*d2+AQGD9SRTgeJDzr)w?}U z!|S@qSv)#`*!$S%DWAIyRvkukf};$3{>TfWq~FXe1?%Bhd$`DnlDI6vYFyU^l*vx% z#76V*GOqfMEGDZftE`}@r%VE zu97b4VqcC6d-DP^w++?(exb~y={roH677BYSMxi@;OJP4334B&sDwH$CCpVtZc9(! zAccGe%?`8DStr!9N$!9=J;YFCfA01_8P^8UC*$^WqzbbjBh!-{6S%w35&QRp(}A6i zGxRT!jImap=HOy!H9ivH_H*n)R}uXKAn*-;t)vP&G@}WXdpox9xIUwq%AMu5q4-I~ zS>g$aL`9Kj3UKW@G>OnEe=rC{f7PeUG2cHJ%CmX~bE_)tS^TO1Ekm(z#Ek;UN=|Kv zqms355#(9IqpAmw#NPza!X~2ODggeF|L2I;mEyoZBEZ#|h0>YbIc~1{6P)(tk@&le ztf>+b7U4&vS<)RI+8=|RjjON5|-r947u^biq{Kh5f}ayc(Qpk z&z)nhM1bR#f_PIS)br4UsBp^|Fg!!jVl=lO;?EXghGaQrR$L>*!wV%34`N`k-15*I zt3e`UCRAf9VIFem>v?NK>!?xp!GQWTjjL^oq{Ao-yW1Eg-SI7kG>p0sJ#+q}c0Jnr;kI*7X_!EI?5F`2{vb7%p?KW^} z4iy|hkd6W}Upf^N0UI@|)=5H1$m-LE8Bsgm3xZ&@0%RhgC%eVy_rN>~n~=mY)fWJR zh{(I!97782vwCH&T?B3GmYYlTiE@owNQ(0zqvt$<*^WSD=;XV6zA%r(MI*#1>WjOD zxE)m{pgQK z#RO(N%D%$ z<))|bkNcU>Tg0zr&av4Fb=CTD$dK)IGeORx0f~LIXJ{gV;@0}dTv2GGLGA0?2+Kx* zUUQ5C1-P8FTce;taJx|`_2hB3CF-repX#hoOc7ZejS|>zPRqX}0s)3mEip#nF*JoGv43`)dsB4ro zn>sqxm${8{S5azZ5REqGhE0D)GT2wk)`!u4Ymu&3EAFn`Ac|>%=qyHJ^Q`*1IN;}b zk{0>y_Eh3NL9QqU1LDWWljr5g+zJs#D)H7YKwL?220s_1)^`f9O1Rhb?m(<~J}Etj z>IfP$7V8UerRo^_Z9GAIkRYwKDfr_V*%vHz=}JtTenI{PMk(CT%5lbpKs1za5|YZd zy~r8hg~|{x$g9FARSO>dCh6(5i-Bl^W;3g*v<+zc1-iPk>YH0y zW7;nP%neit%joUBgt#xH%R#0biq-;4uRY4Z@B?A6n*K^p32tyno)gLp={zqLGO;Z( zG7(^;E4favIaJXdJ5I4RmjY2O9`CTq;|2jvlRW|y{}BWid*mM*U6=U_I=V)$^2PlH zfTJ^a#O}YG6wIbMH4P2%vIuwRgq6A!-@gJdtH>||3X|+-pc00GKAu|pd6p{0pzRs# zp+7DbMAq}>FRY0#T$yK$+{ObWIsvY8bq==mES7UN4c}Iv`N4SV>Xa&i8Ju7kxg@iZ zExLweo;PjSka;0hQ6?U~7S!E>7>S(#S@bDkZdEH9UeEP^6bZ4?b8`!c_;q2l6vKp;9Xbu8(K|NaV+YVqt_(q|ugJ+up($qvu1%ni6N-vsEo`j%SS z&^8h3K~?K#XSNh#R?`?nvW`fQ8$|Tuv@b`yl6iQdH$r3ABfu=HK^T%-I=*RfzhJ7` z$o9aV+Hy0btA_ZZj$)xXLZI`~!vK|dSOB%s#kH;!&*EP~XktfZ!aMN)pfhb4@f5HZ zNS81Drv9M%^|&P^pZwvtH-i{}OdNz3{3UL20i)fVX$X>rs#%Dey@)ZXV_e^DK(4D+ zzu_Ki?FO9=;RffG?&w!Kx4e_P856fhDuSI-&qwJLb8-RA$=nA|6XXu4Lw5{|-YF0c zv67Qkxt%mLo^g&~R*HQ@XI<&5Gs^rSp>Ud;A6jxRx+722hUZiYpM$sIouF=0dV{SK zw_k&<0}W!=j$tKpkF@fz=U8lZ7XXb>giZVoqz}D3gJ5fG1;-cSvfS}&rvk#ehzPpv z@kc=(J8ZQQrm@Tym}5TCZ%+wx9TuT7L&}(ZcaF@^v)a77C&V0`!Qj%3v(p5*qQvds z__)S6K-R!^-DpCM5nTdVkeD5%?%44jD9ax3fcgIaF?R0ZRTbCYpOXL)^yCp$t6) zSkulzD4fB1U*kZ4EfX533V{}(@_UMFrOSy9CS9k&;ZT85*RQgZ%8fK{rhf{Up!iH-Z7ucf|3 z9j)``nDPjVYA>@)ojveb0;A0sWarX3VX2B=Y2j92>9f%V`}^9`EK6XeFIo0?!mQt6 zH%bluBEWiz6bOhS;qZLP>%dfu5rKnw{!Rsr=2yxjIO!h|a!RM3M2OenybHfWb9O#a z%4G}?e^Xm@G;*bE(7hBP_-lkEG)!E&&yrO4LL0|=bcYaY8+PTXF<_~$c+*fFk;ctQ z39IxsITpL$S-5T9zu;@iqVnI@K*^iN)|(Mt~$P6 z2xZWAjTA@I-z5y|a4JftbF%(U&jEooP42*m%|dAE|g;Q}Mz|?x{Em z%CD!G@KJ!G{tIKF3Dg906v{?Dd4#jy0j2D)f<&4M|9xcXu3acl4QV6+~$LBKmnB$pGfHZy2u0$zyi$X+Tc|&Ui1;gc^8U(x2VP@-a z(?~&H1!u1JI)z9f zpa1fqQD$yrWv?i94yH47v>Hh;`Tr3G12PLTSLy^DYMMT@w zK?n*Na4RG$Vvb7J`o|R2DuX(U@sUvjTVg;+t~=LRC;RORLx5b)9dSzi4+>f`VS|&d z`l6uLVFqU{j0s3n1HT59%zS~fSBDcY%>fZK4-SReLw zwRQ`zanrn#O1!`sVyOnHO*He1m8#XNfL6d;q5xW){*tKR*59ubjz$YD@QeJ442zMi z{S|DEURT(f2RV;BzBuq*<}dAN-yw8rADww{f)IEhKeM2~Z#_2k(L{z}*`|>B$3l&_ z!x4dMwr4*PW+j|mSv)IlfR(Ubn{S^+5$J(-4agw@t;}s6mD_brK?U+B4YQmg$nqM4 z#dj@QG~ZvCB|QdS@YM=iP*1pHr?fNA^bIHQ zuN7CD$E+m6F7xw(jZsIUspmWL2_PlPmKhP8%WbTRW@Vh3tSM$`y<3P;8Ry@@m+=Qt zB&Gu<3GFd_D#XYvIUKriF7iY$tDKHA&Csr>%06GXl|+MT7~*}4Kucf}#gng3|E@4A zO9997QG=az5{Q+hkP~Q+P6<57EH>x43i}MTc=yN7`=bW& z5F-c@DtySne?c^gW@F7rVI{q`Enb#d0HNiK?-7q2OLsmVt~&62&IYgw(VDYZ>d8(g zpyWU^u_GJS^bO|_5hlQyInGYrR*0EkoczRG+@AoYj@OoBx`p8y6!@z!;}QwLsCn7W zd0im6qcMvIhtuB(u+bVhXr9kK*TS-P)-*aT3SSwo2)m2pzR&xplgbPw?kj~D#gJ6y zvEHfiKRYjl&Eupu2O^S&k+p=#T4rv4w<1MdiB&ZRJ0VX1O@9?U$%gfqL>vEGMYo(s zbDta1^ix7e5fcogYsk=hKq^WmyRKZO^q3HudjvxQMEkRX)^5_ytNVQ<2s&ov1E&3d1s`rw`t&Fa;_O8wz510aj)d7E+Kb z6JqQ>g@cO83e5hs_&)YRa3e-Gs_8<`B^LY60*$on@3N5y83JZin8@l(CImedjY^_C zD@_#n(-ovfxeS?pkOD?^77Fz4!2;}i?3aQ+u`EHx1JiOcC;FVWD0B+uvT8r%B1=Y# z{%~%nI09%*?82GCe=Wc&z_~yMMs3bKG}toZs)69lN+h|&QsOk1&N!l!5n!XY769fP zThiBpOH(Owc8_JCZk`RG)n`_+vSyY6Rv$!IP5^O!TpvP@EcSfa@3>t-i&0WpjQKrh zmjux+oQg=!2$T0SLLx3sN_1cdx_yYnqK-PEi8b%rLZZy7oAxw z@T0atZt>>{Z>4-NzuUFO|5%`LUS6VvNzN}4Vm&LH!Bm!OnmlNdt(Wu0YK2l&OHaQ; zkdZlo7mTFuHzF0Ml2Jz2_T3=J@(t6A4N`3&YiSMw$yoJ1ftFfsIE?z30Q#BkC7I_t zO z^ww&lgTi1v*4x^&|1b)&>A3TWa)0TSyp1TPbBnv~Lxz1ypp~fvy+2b4(iE!$@Hgav8MowcxL0bWU?Xs&aAQPn zb0IFKJT*R`5z9R>E7-^&YZ`2vQGg-^;mXg~_g36Eo?*3}>K6;6);Lh*h=+f;EelC} z!Br_{IYKS1N%#oR0jX2Q*?019i&~?%g4SGR&)is|xcD_Y-nArXXu1^d^k7zGM;zC8$B1VIrM1*}mc0Dg@qH zVzfOG1f$6;Qc8&bO(B+%U|bLeZ@mhDo-(2y@xlLo8li}PWj`jJL$4o!8IeZdsYpu% zuH>02G-@W@56?j7-w;OP64@mX`Rr>9PytHo@gqJq0)jhu**4uGfNI4Hbs$@X{&YHs z)qS>9PcVJQ9|KX-++^*jxnx^^|=e%Q9)RuMuj+$wcyziPqlhEIl2mV?Ln(cNV2SWyd-w zL?C8B?NhXvFmA2?xgt^&*p}p_bh%$hD;;Se5#=>u*0q8Lz16b$^XO})7-^1RyYymW zS{Av{)N`#Ms8dxr&opqq7C@dTN!gatA~re0zG~Ip04)$?G%BD+ao@>=5)g=9SvV63 z(SNF-MbKb`*W{N=EfpGfRMbgMPXa}i!Le^xo{bP@MI6gMP))qvXSGG6ziEdi;eR2_ z(ywLmL6y6@?ZfDzIRB;~8%3}Rsi@WcK6Kc3X8~Jt%KIfe1Ka*XAPGi}&R8gJr~t8& z8M0g|h%P{>Dl1q|50AnlcWw(8M&u}LrHs$P&0x^|jEd4B`{2tS zd*5&BhzETzv1Le=`C5-TmNHGreOjDqoJ*L}a70SZ6XO1T0mcI(Ie8hJ5j|gT$&m7u z_{8G;NI*~WO^9g8f-T5f zRW|~_zSul)$EklnK`O$oI%g~7m%02V5FjVC z1Y$JEk{7ygqAV_u1}LtspTqd(s}yF2n8MIe=hGJxG76|87|$1f0Ob!{a|!Y>qP4U3?{DzDZcDzy&WPJD+{=<1OKpKhN6xe`pxZFBPcY4n zi3aZbAwb450IRaj*p#Ax^A`&v{oo<&Nq@#|1g&Oh)Un$@&iCU5k`MRUlHo+fTa5>8tEG6m$;m1DH<`&9tO2D1|!PGH?3#F|M`dNR27_!StW zWQIj{^Oq^eM_4mesk_wp+WSC^$MaB+%?f+m3IDJAfvqjCqL_kSm7HeEuzUnu`FmmE3y{rb$sR=*m;S{I?%;-?aHGmWt9dZT3 z2e&3E7J&Isrq^5)0f`EfF(pj<69gDpaU{wtpv>ocMGmrscSRe+&Q)szIWu@zn}h zN=TN{5*7Jpg;{xn>jrwrzXe&JnMBXWBI>t~gOIK*2^iNl3$Q4K$;v9Ms2|<}Vrk2? zrjE;gaJ~y>-2?SJSB0VN)h0l|Ne=h^;iSHAR79%|<3P%r`=^4fZ26cvOLjQ!Ng%Zb z$`R=iFHqPD!Z3wvZvXeKAVvdZ6h`=Se{aA@oV#O`R^bZ-L`}rxp+mrTwn1Yt0@DJg z@~KohS=d1*{i)jktcD}Y>PoZ^>o41GkyN>0GBiQ}X|tKhW*3$M#eU7xK=hAE+Jsft zLlmj^cRvFfO?B!_Y`ijw3AB=7>xD3fl=>`?8AAITNS&13N5?F9Gyn<=*d1y=Uujwr~6+VRB|x)?SOseMAsB7s()4ely^w zcDul=p~h%uyml5F)(L;CU}z5eLMnFMx(Tpm$xd)U6iYcFaAHBN^h7fivRX|<2xV~L zq~TEVnA$QIB_Km@{820L1{eL8~PYp(#pNUVOy zam%Zg5MSlei`cdHErP7cBDZ@w!*KatP;0Uh`52htb0Q=v16=#aYs8^8d6J_xtn zD%5(@3|8ntsDN<-Y$-|sP|WJ<4#hM|WN9fHCV~3}MpeRTFyqHrZ-Cg?SIDuqq@Q;b zpnFaIMQ^6QDL{xt=6Fho6+(9vhpi-dG5(fC2JVPMJk2hB2V-=`)xq0N`7gb{JLEx#O4l}7lo7P z45x-$6Cl6qJan-irnr`MFwyka9!Mp}p!ont1Md++6iKo{%)j(q0HbI^*GvZ!uX_X< z<;xNn?69$P=sh57IkOTE8w>$Gxzq8`0xwfY!XikC9rz0GtS(boKGM!D?_07nWVvQn zg6{Z-LDJ7*{-mCH_CJBFW@y|sesR!An5Cut^U6AZSO^u#EaNDvB=zGy0H8mN#xkJX zUmk@xfk2o2gF-NLH7d)}a)0%Q1g%9fv4v%wC-t@ewP34t5&m#c78=ArQd6~ao2Ja) zD2fT{nXO%xnsAAuUKkk}VOZyB)$^4=bDdP0_{)r(>{^jO=Xq6DF0SlZ$vI`yQ5y4gi=zM({+{Y1Ay7oH?l3dE}T1<6VAceqQMd!e4{P551qcRjPj2P<`H1=-w zMajr_`hP&J^GTQ!jf2`C;1DOA00^44A&O~@8?Ts)n%zGrgdDSUT24oqz7{|+QPb$C zMvpHEGH7IvpE}N$D8T1I^yE^u3ZHQp1lsC^j+_uuu@eN50Oy+U?B_31kPKwG$*wohM%c-d4ppy=&`(FvQ zX&7raoh7u(aO&-iRg>%aqU(D?^k_(}*S0c3M!<|{S-8MZUiT?g=ngldR!5z-Fb#k> zY@%VR$NX6A*QXnl7koklp6T7rKGRT%C2z4mPk@>eX90ZDsXaj}5_ah98)#c&x%&Z3|DgIpxXaR0V9Yt7&Ga%j$lR*sa8Ame~p zC(fx38^z*+eo;r{;HW4berV_%;WU1}AS;X< z5C*sWL4jYQ7giW7%GrNn*!x--l+>wjzqcy^E16|7S9Qqm4ho?PFw(}d6jA?mA+$Hm zF)3JSkL>{fnWV&F)Edx}ppDX~r?~o6VNzhV$mA{V+#4YhqT&p|3BTK09Ma_EoAh|E z2r&{cJF+aP^&hoCx{gU6`4*m#`ffxbUJmp8q77l&fzv7ek`q%YjjLoyfo}BilTr{? zFySJr02yne4m3kTi)UQRT~b);QXht-E&AL@iJnUKgY1d7C0T>IN3NlJ^dcIBn4_a8 z@bRm?Eu2FeGg|uJ_i0NeILIN+Pfi81;eh6=w7XJ*DR6jW1i`2++}$!?ks*Ax?KV6Y z1%tPux&DSTd255#q&dN9#MrY^NI_z*6p`bEkb?H;Al@Vejl-iG)|k5})nwsOV+6N7 zHU1hw#%HpN#7_vGRn%v>N3!N@1VXe9xXMM5`wIr?x$wJgMH{D^ai1cPF!Zf=Xl^#`E%+Cr2K z6!&r8D+tb~LAg*t0b8X}6)uccMJ(-;&*hbM3dDN`XNJ#e-&ZI(h|*!-Sq2DWvrqbF1dB(qVk%fzNp$6m9fr|+n)pU8fj%kom zufd=cLI=hQWm7#%fJGaNZkKsgJ!!j8=H{v6CyvebPb*~A!>@EDK0qrBGqR1SW06oj z0|E4z=(9QM9C(Fn`%Eh(q-Q_lQcGCZHWc|2hFA>PO)gnz#<(9N3~JAoi$b(fL+yi$ z&%c1%4)0b-O#yRY9rB2Rv?huQ@i*R&7FxywYQgLaq?BI?;*P#j^kJ9?$YFQp_ zR@lD5#)ng3N&ja-#^t5SnL6#zc?1Xyjg@JIzghv3j0^?heqUB9{?%+AXc3YPVstEF z+Z2{VGHez|&N6?fL5G=@OW10M@|bSR1_8ATFpV@3LsXK=R;<>}fkKI0RKu*y1$I}E5>R+=t>Zyg@y;aA9=7qlA6RRI2~giqOj2- zhaC`;v`F$Rr@Jcv`9HG+(XSt&pr z{1nCgX?dW=NbEz%Ukf&e3>o&qii0~-j&RsAI>fiCAPO*#%B}H8C1MREZzw@C{-iJ~ zWle=-`!@y9sJL!r-uHh~&j()@JaRPq1W0R-;UoD09k0`ws6xjBUt2 z+&(49%2dN8AULJNoHl?~{_%wfAK^~vdY@iET%!Wk8BE|I(GEf>4Eq5XBFs~w#|Wah z9EwKG;73m;NNa^59A6WA6gDOz4eS;Dn?fu@?Y5c6C$uk9>_cW!S)BOTRKi9pqRG8e zjnBLa%qoEYNrpl#aQ6$efrQOlM%D6K4N-A6X`R>g3Qb%Y(^o3eIdZokL&JC zls^6|VOBIG=XqH2any{<$gj3_Y1NH zW@KSS*Y_F&a1BdMrB0p>014SVmG`#JKPCtXIo%ZQ=4+0^jD_0#^8ZxW^1vRR$+gxW zjG`c8h8^YqwngC{$|BA<_+zi-D{BZwLQE5Uxe%kksMh;2ZWU&QWgbCM`|Nc9#s{o+ zu$T!V(x!MSu>vL&I?|>>5DLxq2dm!_A1fkEotO*J0{9^c@*1Plup9Ay6gE%j6@E=I z@2zF=_`;kcf0zNZmX)gCIwLFCfE)Qu$EAyRgT!=HGCLpP!~z=uSXL@+t3m;a+Q|FS zLqaLwEXEJm?bixg(?_1X!+QGD1hC4jeRJl_uV)Z8+E-L!`pFdV^AaEE&QkJ7Y!|z^ki3E#HEQdf8V5#Nu(SyVy!=rGQ$kvf>p?b1W`u^sQwvDPCHMJ9z;X zDcq`{o6U}jR8XONU+1(%VuVxIDe^xNX7rXCy^;)nDJU8?H4Ftk>w|6af=4?fzQSta zS#h~<6?YsAM)Q10O)5bw7!Xfu{0)LE!KqMFdH`I6x5bOIX(8u}{}2QfOY7K#x~7f* zRV<2gJVTB;7DNDUev#|xW19q6ojC`N`2>UDx58)_JU}GzqxHkt1fen8>RJ;+lxu|0 z$AVT-Ynx~(5O6CaI>ADjqSabtBl8Td*sk|C>uaNyD2Nk&KVOiQf&l|tGC206xnT4n zcx+5^PSO`fVNNQA9sX@wt@IBHvdp=HqNL6by20Y$qd*#a zbSubX|py15V3>bMg0z(BYG9Xk%6vS!A9D;8sNba+>oS+xh_(KswdBZW45`X_q z#GwYLQ^JPW12+@4!XTru8ULjKvdEOk%3gefy=U$(WZhElyEj?_;Vv>NI5-MtGL7Lm z+JC07(L1YBcS}o~H*`LrRc9iu-2)6m2H4_KOC)5fX`s=)fF&KJbJPbu+?YHs1T z381RMJ8@XR9*m;Ypv}^DhQDUTn{q+}>fHg0+sgHh6v7(s?JO0O;fpcp9P656+oEhTKl zprJ9B(MtVXftC`c&|z8Q&ujuy6_e&dw%CslVxO{6LPwyd`!jE~Xf$ylE(cgfo+ZSh zg-hwfsCu42?i%@-h1d*&0236l*!i+w!yYR_xN9mJWL_0Qt~3G@H6!x(?*y=N zj^en;jQCuC{9UPZ&;(U*Br+gTCr3a^Mh>t89Cd&?)OWhs`b2$fSe8y1Xyg2nqY_M% zW31~J3!$y5*m&|EDnLgbS&asa%O+M3q)oJu&E6}fIdtYV0x35blLiaa{JR0**0DRE zz#}&sSN9OM$`_%oq$F<$v9@I!zE17{lK$XI`%I<>uz)}I7xoz=ci5lCenE)Ubq4EK zZkR*ViC{~O)1>UZYm^OfOJj{6^&8*#`j6`^I>UEkBBC(uCvI}kr z=PC~iwyKTC226f%{GN!C_ln|JK>F$d>$Twa1$61 zaxZ9=txnL#0I1DOKhh{5;zvd>R`IN~9s;xGDZrgw6;@5h3W5i5(aZ4%|F^anupw$M zPO`5f2AO9w_0gLdJPr!6q{D?#;)pZXTRJs$+yukDYzi86#>ElSeU}Xe=u~46(+gSw zwl)}=>;3HlEY(a5rNZf>w1>frP{Fw?t3Vc|xf?-STNfwD3{&`=5NMy{Js5o=biQ90 zrQkTL7R(j?m`4Dp3omi-z`){TzXgf3HXOwc17VQw!waK%Hh1inCVgB{>3N(0Lj9TW z+c#NaaCW)u`;$KH(G-$XjP|S8rwgG}q1voyWbZg$7$n!N@E7&>H$Fy?M$wjKg}j&H z7)=mg>CVeC22HJU#yt*Z6vb|a!#BT*0M<^(&QRcg0*qLb*kt0kAuZ_Bx1_$~<|3?x zvISVhBu>)8zY$^|G60J-%*Vd{lc2^dI7FlIaf>g=SbUO{wixSag6kwW8VxkJqxqx6 z=;?)9lVCxPu7;h@Uc#sb6C9c_yx$ILi^M$6xF?xDid0@7nc@%I%2!qv4t$0IY?B}> z^wrQ+hS7LOd7}-jh_jC-4L{C3o+7R>Os!ONoUN@s8pJG!%T4HnzQ1A`aYo@~6&@-R z!iYUm?rksMMt~Y28A3_=j$+mSA8NX8BJ-#ehz>ix6w~5o0s6#`tamtTMU?J{ZjS;{{R(@#4;3m-(xs zu-s4L1kwLhVR$Y+lQ|>l-&4?9W-9ibD9(!gxM#qug!BRgt)%~58^m2b$+G$i-+KqK zDBvu%VzAev^^f}l2n969R}i3aXDV^+A+t|j@BJUaj79LfL|^1w2h8J76mGSfT#Fk3 zpDDmffeQpp1%8Jh>V_?f7}4MTTq;Vql9{i1KMxd*y}=s7Y%EU~Of?z9!|8$q6Y8&D z0D}@xsst}=Zz^c{aZEm3W77LiDTsEK<7GcdkkKoP;YuCVPiu>YnHj6EAd3D>DGpge|A7O#+&wh#2A`S_PygBfQKcmI+sHj2fr?X0Y-Sg#CzL1js%rtSO zF#EbdSD%^7?sIm7MqE*>BVH2fj{+qSm(7zlDu6cP$wpqD#ZBYZUr87#M$pG*v$hz} z1{L9sOX$4tm;Kx{BQw@fmI&M9v4%Xk2 z^Ip*lh#F2Aam8%HKl%oUrK+_Vn{*Iz)LVeop)h#DnQH(nn=GtR&``>La*U$U1{5dd zv2ms_BWj*>>2$SeZ(B4fhy6n$xX~{YMt8(;nn3^=`5nU6_t*rK(q@L(`A6>pTfSwL z%m^HNg(Ke)YI#T$LW1+33nIO+8fW;p`tO9P0b_D7X!q9%AOa`LwBYe~M_~-c=r}e0 zviB`bO_tH6F$qD&V&$lxgY{d$ENjLCPLme-(&$U72PY=yu>)Bq2=+$tB;SrVDM;Sh z9pUmTzg1ymz=;fymHwQ65TZ@;w5($9s?588f>8K52_3_uHhCWa729#w+L zCKn@8Snq2Et-P!cCFv*q@dv>uZ%~)W{RXD(AEu(QQLoNB&+ipx>^(A}i!&mT`o@pI zjsA=hXoHxo=J|jc&#=*pFfUW|07jINNdUrZhmZN%x-g1A#s@z^h!rt6$uSzPGWAP8 z1+&x&aSsyiZtzV4kFGIa!@su*HpU!Nj{_Kgn*b{y_G2_`*f_saCSS9$y%MZ3$P=vlQR_U;BpPie;EQtN!3i5b8;@n(>o_y9&QUaMUYX zjmCZF{{dR5vy-?<5*t4w%<5l28gryxWVcl)ZBY;|MWqkK{j-87Zt!cvT8k;*D~kl* zVg`hcj@`i{K+pi_VlG5&_l?CsRt*-;^wa(BsxY!3)5HJT50G@kkP%*3^N$^!H+oV| zwn0`2N$c=7YlD9&0jtDVc2E(`X8FUyjFhZ3D;cn#je*#wj2MO3tYanhCxJwh?HT#P z4sQs7;&`rQvj*Ni5CPi1L~x7y{51Q9W~M)2_~9RmkZ=7Rl{1_$Jn# zFqw0cEz(6~IPqQwK${t;J3c~Lf`l!mZ}oz4JU)X3^1=)P6o@;axWt(u@|9x%th&W0 z9k6(*@ZCFsS#__D*U@6WNQe<{bRoV$ljZ)*E~yXkIAm9~4G=~_F_e%7)vxPHm~kYI zfvkBKk?B0#Yp9eoYAU^Jtxt;s^H2XId@2XzC$XB=EgImCVCBqCYY4x3RN zbk?e>suyr8x0GObMEeMNRY;~U%W~H z^@3?RSQ*ZfgpRiIWN>SXNx>DY-+GFDO08HRRVDoq0Z_T5z6zShdlRw_htk~qe$DfX zgj(U{MA@z*381wq=5+IzH~syJM_Qa7<2Ie&C}a^#puoR~AgdhCf~uMHX8GQIz^sh& zjz(*&6=HP0vaW_1CH+(YqiGfj-C3+u+I}WHeWis6WfxCcxG+rZDi+4I-13RfyT|xLNp2PODM^60^sW^bQ_qCyx2FDh%Bek zXX66@3@{p82N78iYgi5X zhJ&vKQlK2py971I@UsDo12T2@Aa~E60}@Tluwk+|sEIxh3VY%9mUcb%`<6_c<44#L8IBSd1c&uHH|ek%Gw&nM5jA zJdPZdAm8H4)!!fpQqTuv)gA`!TZGYHC)Kl9qI*88uw}s!0Hy^rP=4w8fL6FMs26ns zXt#ktmTQiz8MxI;x`Xb5jbk`cJfjScR{-RxX{y!dMb03@;G&UO=A!k*Z{&qwmTzvI zwo|wv#z4zA7ylS)x^oo|!g2(KOPjt?2=UWz0}J(XL#;t3!uxRcB*ins#lh=M6LBR6tt>ncYt#_*fFZLl2JU&9%b4) zPeF=}b#+*-UOqIHIKx0f+8sI@%SVkql0$|ilE9{^KFL@Y+iNeUSsjKdRKr7ssi zQ_f+17+h);`ni_@S(7s>Rwrj_IU7K0h};CeY%2T>ibkozz1fJ6orhca;tT9r@R>g( zY-B;QMf|`v@GfCw&umvS3w5slxd2nfj%JR-6uxzt{42dN;vmjwwG++~*Z8N~USwC! zf*o6J9Sbk#3#gkT9{>se=@kSiG3TAN*GV0IpJ@@ebfppll9p_X0J|5XN8mv7D?+U1 zd2)mnM(^C5)~_&>tMpGrK`v$5zI(d`tt(8V4H4M=%Om*!h%T=K_fkcCm}?MwNkN~5 zb~SAjkP#HEv8M7;A$-RzN{C{d+r|`U43HIQYLc5q6WktWpk? zbwKK|u@;sAwR|PMp)j8y0oTn!l7(v<@_MP;YyJ1efzw1%Q0{yXi|?}q zSQ#w2B=hSyfyi_NFBm$|Z*Gghb{rxil8JwPynV*lCyTy{V%BaxXM%zBFHHC~$FVLN zDbV@~(@m*#->3+WLQ{vK;yzb#DV=V|k?z7T7GPY@>Kp@Y*+AjhWTK_1?ZL<%7ODRu zp?nRqOBYw;e^&rtb+eMoaCgQeKx15q&CCK77(LmS*uAnEgximw0sym3u3%}I^y?HP zN*rUBRB#m*Yi0u8oQ zDXL-6N`ylaPhJIJbc2kuSTISE`X8aNLZMs`kkHht30tm=pm@E~QuM{?fL1yQbt%YF z;{+`3NZ`L>i)df7xxu)Fv7&)SAaX1ZySX7ni-?CCHVZU)UiRZuHn0K5v^mal9gt;% zo`hUbKE(Y;La7^TFFeI)B95n`jl|4>IX{k7Y}aB?%OsBh8r=y~M0bIfqG>#t%vrk$ zCB z8A`F}Y?Jonw?-&5WYE<Vsg4<9*R#pZm?rD`f5k~$evwmWUu`5=ohA;M?3ZdB$uY)$L>ShZw{WClj z`x_exTRnB33YQIPcdKAQ3e}k-)8P1JLaj=OsE9CZ`vuIJIe+5#DWkJ<{7>7W>1uBd zT(f;CEE?~1O$Q?(t5tumU@EV*2qQb?>ADbv@`jm$ZhiS8!f=QBz@ElE^Zf(?MBx4( zw&E)M-o=E)rqYzp^D~Snz`OZ(S}vh~1g!}Qpbi;!KZp?*l?+QlE_u6L7!{HQ zl}^W&D@<;z+gg_u_X{Go@rA+V-JU3zjU2{u;oJx80~jP*C%ibea^)HzTNW^0={gpq zbPbc?g%W?TdR{OW=i+e11>*cpE3FO-f&!#iL$@yT7pygaV-0q6`}PQs8aSMYE_{d> z)RjK1b6psK&WjKwGJ{PIrb!`GLw>|q0X)Mp1 z5j9K8lfk;A>n339qf^=4XI{V*KN$3fH`bW!DL`N5%%Fn`FO0v&B}&-S@;#IY)PWRJ zh=n4;_Z4K3CUUns6Z!>0tf*}62G>&b`>TXfK?=$8sm9M#n8unYlPwkn3R+FXQQXI! z^rt@tW;F?pT!O2ITAwMHlyIr4tK!4Mxf06tR>8a=L0D|}|8(0$2=b|v_lxhH2sIXPgmf8N`9}HpHQd_H?3O{Nm z2ql!pGYD;P?Py`vMENvP4aRqzsk~CCQ9`Q#^aJSI#t5`h$)L1Y$Bm*CAvL$3pu%9> zn;AvRmm_f7XE^1L_9f~z7SX|-!OMc6OeIVSgMX$V#U}>W%4sJ4V3$RxVe<@y!S&A( zG}2+SCdwoFCj3o;`A)(m#YbR%J3?BGViJDu^TeUt8o=q8>h)i~00hPHZHqS}a_jmc z0Ax7I3prf#xKOL{m2o*>iXRP4%^6X^MwUTVv`mhEu|^6*^7CyU=Otk!Nlk*NbU{6n zXpuknPXw*aSPDwDeVY)=Fkjwsu<3awf{_00P$F)_1d$9>pUJtGo-f%)G&6eS&`s`X zwir8`*e=7=D+(5%5T~of{%jv`|7Gh3KM-WYKmnovDz1b-Yd4rNL0sqAbe!jCkeyTX zG+OC~n8rj9RCdcjiq`i=7kt<2m?UMX_5B1>YnB31s6+hkMF{no7%#7(C!QmO8e-3d zRO2)Eq)L)iX?H!=`acPTR>6mpT_-pFFF-cd=Caf%=AQK0d10)b{t9obgq)3zu4C~5 zHF9UMYS*cj`(7m?{VZH|#%h1=Udx5Di8r~|#IF(rUEn!RFhzb8-;6-wX)=8x$p1fo zEtP8xp0C3F#~h{+YP2qbDB*nny~3!lL>N{xk1A-SXDC7X=McKzA<#;TF*srk_gB1b zpJkQeG#x|weHH^>FRWq1u=GIe;M#Xkk9iW(g0LyO)dOlP1zY!txuuM4mOPC^#N zZkTHy_JbJ#!^Td~JR2HyTe=@@*+ub8;47`K;+9kM|6;fm9`8|%tu?-@qFOdA@|lkD z!`WS!b%}6sXr&(^1TJFGmguRiRSBu1-m!?ZW>~k)GsM_!1lk5!DM$hh5viS&3zGW% zy68(Ci(t~ZCLSV-d@c-vXno0|0)3u;^gSS$ATbBqQjU@%-L5r^u@r+jGh7d}oEAw@Ad`tVOI9$ic0Ou{e!6o9NS z`DQSDP9dttRD^pHN?zih`jpNoU4#Tc{Ot1SU+x`-u=E z7JVgjqPyE5?$r)@#RmjI75V6AIAVq`^Vj|x%&5X3g}o_boj?0?OHTbr=iOKY4T&HM zH=E6S^bB-9qi{GG zijJiJm-Zgvgpp&OU!yS00T;(PLH(QPwbs>|E{{KK(K&OCZ|q`!x5CyB3lm73+`%DB zQa|i};3jm23o=rNo(uGX^tEYqL4SWlK`4xsyd1%hV$ZKYpwlR-1fwB9BR)QS1dt}J zLfk9%v2X07s^ITH5_jXbAjXCm5uiL_NPX5#YsDZhG)ItlPM9@9W`!I%tLy$*Ac^b3 z+qi#SA!}jj`Ve3J@i9v&x}GgeeJyEVG(ff}Us1wztSKhFoqe2xUkk)l_$>vgO>~)# znn|&YXgOS>S`;kNS#|{6N{2oH^|ch`0kn=6d~UWjwbC#K(Bi+u{5iE3YALXfU58=; zjYapisF;Z(rb*B1PZml&wK)p!$d>5&3?O(VIAX&15+_$D6<(I zb2?fC>UP#8L(liUIuW))Q2{NS*f;jK3Z+ot!T{PKtXFOmXf1Z(1%oagFlf*KA21_* zZc>K<>ilmMjVx(Z1YK6w&j_H5MG20>Acv(LOBh-fpi@Q6HG?r2aCetf`i#KjiZJ_z zFryAihp4~3Cy*SdLmg|qT7OJe04oJk00vZL{$fGInW6*S9OCgqx&c^4bxOR|Ft|tR zOEjk3XH(`+YJ=niQ3odO6lATBSsf(ebd7dwf7BD)8v5wFlCKdAcaATrjnDRjPOvNx zSArWTM&_X>8bk&10?(EC2Lu^$%ahm@Yv(p#R;@BRh%^;z6wQV=EG{&qyxEqPa?q*E zlTNWnOa!~`3+}q72r)8hWnR7OKB%^?Hy|V$F}Yy$xEwTN7oKXd5VOrjLnCi1NLg|I zMU!G}V4#(ZjesDwXX@jqKmQaJ0+W}Y6eY;F3R6B>=G4=O$hXYcMcO1#A3PnXQ>XNE z7+!kxO{FtJR~cv-cY`pib#_j{=-hFJSc{C8{E4VUIZFIe#e~<&l2}8rmizh)!c?4r zx~#SiLwzkio6Z6>R^VJMMyT&X0Dw_n)L04%7Gd?@SY@1@dUe_ZF?x1IxHLooi|(dlcs zAak67{dqy+$Q~jf56_U1~HfW-!eIL|{JvQ>B7$9TVBE~h2 zJO`D?Q83h?nV5MQZvrghMUqs|fM45|>J)4d5)4A6f~-FlU_my=|5gCh2|7mElluDx z7-EMnrHWfC#8^`5lWN(j`3~m;QZHOv=(NUa1uagi&Nb=R3WOHO#|iB?CI=CQ@wAZD znnNvohcK&WYqygb(ILT5BwN}PyUYFa=xh1+X7CRZG#E&`ww!9r7;1qxde;Ft}2m4q~Vb1nv@Y|J8vC)%FG74=|CiqyHk3RiiP1t=-# zE+1yq3R}B{DV~b~&bkyx1eHXSkp0QBAs|K?#tK@7i&_n|J~*>(ZY|2KC?csbb7~Hd zmC|kc)-uSGW|%9*17!SMP?o^J^k@W4CX%#2pwGhI_TYg&{RgT1(FmHQj2|G-5=JwC zS`xpT!+?x8#@1s3;?su%SQ$&uY9M?i{W+I|8S#PQ7k5g zu$Y!q{9;b(vmhp+&NO0>?Nga z6)w;P6|d`ays?LzooK72%&SL_%k^b~tTD7`t_xyG07<9_C$nnAW5=bw z!#P2vj{aOH2=-&Kk@Vfi6Ck5ve51-ejelAIZ^tAORsNWK0+z)jZaBpaFEu)@05BT+ z@b{s8jzT2G09#y~mvgs=OqT*^ZI%d!tC7i0)c1x}F>k%Fl41mmVsrSaUb?73ienU# z^)FYdJRIek+7737j6}yzA|fBM@T_AI=I1Fa86G7Ytbc=oRwz!LQ$NjZGo}Di0IkM# zoK1`B{ApmuNE763V?<@;tQ!B6Q2QKZBe&mGmel)gZBemSlkQW5eod&=3gsEZV#U<+ zYA^~X6Gn6~eu=`cRb{=@#N&TtA;e4Uq@GjAvXXs44R#8}zT0#_YxNqu&1%^+MiBYv zkR4ZuAknfC5k@}vJqX4k^RFcg^>n5l^LrZS9}$un$&zO+{!@(5{ z*17MY1kh+F6&U81K1Yz18clp12k2^qSd|%>f(j1qdp?R+Y||TKW9H0MVw~kcbY+|p zM9Ek&)N0MuX%=DY7VHz^qn6H2v|)m&QREz%Rj2?sG+41<*H}i-_y$sGF@-H$IhYk4 z>0Nu;{wYB=;*6%!CR8C>>=bUFC(Tb=wcpkDc~+h2uAdWR^~7`pQJjS;9&9Q=O;S^$ zjDAe}!I@&8>!S?9(C-v&(06MEmYCnAu?Bs&Rv7f% zT7ji)Yo^DI8z|eZMD;)x$HfED!05*3fgBi03}k_ktQb&PQa%71@A!Z@I#^LXu%@cK zmG3T+zGR>d>1s`li>9K)NwL!#{ujHz{U9A~oP62aH(!2L!yo=Sum;>;Q#mdf4Flu=>Lxmzg7PabFO8ZbN6<14Zq@9 z&9k5nANO*uk!N@x*D#nTk0*zxVGuaagT!m%clcH5ZrG@FcSR@H@Waw{7h6OTVlkI~ zn{#PR&b43RT)XAYWvp_p!%F8mZgB3H)y^He(Ya1*o$IpIxz5|2>&DZS|9k9muKNz> zj&E`9xEG!Cd!6gK$GH>tJ9ol9=X$;G+(`$VJLMDSPCn>dpDeaN}fjyQMfVdvsQ zI=TeqEV`nrEBx#jmpG@JD{koON?z~gW_;GoC968S(hEAfS)X-wGhgrQ%13o^Wxcz& z%I?Rxin1 zLo(czpJlkbsth+aJHw6tteqRz+|I3ouKDM+cLn!%aud6^cavQwHz~i9o4WfLH>LZr zZrZ40+*MB=;}-UC?&@L3xCZLqxXQWL$#VeZSddI}^K;T%nr%^#$@ zmg~~oMABMMyIsZak9mrD_A73Zblz&`=J$xX_X*Dg|AgNoJnc`Ry8J!{jhgRo=URx@ zd`!$eL3k}?T+BBI-1pr(H=gbOzI0UDyPb!mtpZ=NJm$9WyOF11Ukuui#~;(&huOtx zE5N#t?!QWl(;9nZxcP@-?z7#urZr85uJ6a3=iQP6F&de07wTN^+-%C!$Zx}nm}?#q zb1gk%ZrP5QyAgaFWm)56Zs%Pmx5`6Th zcTV8j6Wm~E{^xC{yO)32%k61C%k6%>pL^wlv)twhF?Sj7*6q)5+xhObZD+ZCFP!aO zfAtLa=9g!?0V>-kl$rFJzij9F5bj63B`wb3Y}Gb@aR8q_gy3hHGlc zaCd>-z9YjeBd=fZ|A+9>y6NN(Pr||t5Am$Y?BH_vW-`xB9( ztD&RDI_HiMr+IgVYi68n-y6PYBHq{zuB9FPUd3-MzgJ|qdk1uL&FeE<%flINF>SeI zayR(Bo9jp$&X09=3kWyRc8!c34^pn%Y2#&|IQKLD-%lNGrCdwPySXO5?Z!8c>$`r= z<*6N@R{_8NRK^*eems462Jp1!Y3NU#c)sSFE|hU$J#!wvjd}3S>XZAk0pAS=8^-VE7x|WSClk)$SwcHa=l6QbaS89&w5KeGX_NKoE&)CY zY}shq`4Ih!@Eqce023eFM7WNp`C<6%1?TotrzJ_}-r=`#JADwk{ax7Bm>b78%hwt| zt>T+WgqKo|wfukYa^_9K{~)gV{$Ke2Ii6>Ent2xFkQQk)@-(cCx#6_Y;_IEej&}#%NwhI{7iTCMVr39LgL{yiXMOE!u5V znk%RL2ci93(ptp-zlH{?;}^vF3D}xFv^97gPxI1tuCb1^c^dU@3+cOdu4Pi1J3{;= zU<>IdxAEH^e(u84yt}JwK9Bmi_O9ic_U>cy`WH|0JzbgS_`Vrl9!4DGhL&IQ`)W7W zGS{${A;fu~cIR2ZGoR;GzFo+#WPnW4Z(7S7vJd*bnBj&Jp1?C&`Cc4zUl6xpKV?1W zT$aYmBb2>`@L|y~!zGCq=V_vj&F92iJ^x?NbAa;e`#1S*m`>aTd=sM#asG$*8b&)e zhmuI+tQ@&f0 z@Y22xZYjS_Uu3w^d~-W<_p*J==cM-~ar%?qSNtwtO*)yR!=rxuB6-Xwojv^S<5{x; z`Cw6olT6Tj2krh&d$;)@Wrxm_N%u!Qah?*MZ%A)#BEucvyXGg;-TKKH?jZk{^X?G8 zF~;hSJcso^*qV*38~9z;uY=3bZ=Q1-DA)XQXo4ItKZbk)omPV_;diTkdmtZdWWM45 z=6uS7jMqrJHM|Hd`PKN}aD=wo8*?j(vyAzoX|QuOq#t+;+Akgey}%aU#9D;%%wNQ} z{NJFth4G>nSQ!5gI9JbiH}iDjS=t49($4GP`Q@a2E7%a|xu##t{gB^H(|8ZAezb$% zjXvgX2OmUwi@_EVzc2sy=UGSFG;|?9cwosO=sB7;>t%EEP~LatyXC~+I03r93Lk$N zbL%sm%j8{ae{TFF-Tj<6t9X`Cwol-jHT>VWi~dhv-@yO#`F}LuEPOFN@W^8FnnqYO zZXk~d{J$j5nu^~fkEh%kt6Ls6`9Sk@7t-m@b3D&7=(dI5r+7B+rJnrO^WBSlw^=-X zLCj^7?v*@ylvcZ#d!9Jg6KCC8=U(CeHMdcxo{UXfX=k2wi&!rbe|~~6@s!8^DXout zyOyt+|G^&F=-fWOUC#f_&oZy?M*iFmz3#|xZ-Y0|{>>-#aV@8(Ijx&M0&97ax&Pq~ zZZ+S$uQ>goG5>GiS@Q|;uOL3M&{o1bcp5vx|NI7gcbZ$z7|@e)DG!aO&DW7{aR+yZ zw0iUGA@1`2F?U31x18qo5&kd#ALRFd()lCtDQhD%Z`jBBm3L?GXzp2%1JClSIcV|p zm^%cm7V>+L-xk)C&7U#m#(KM^`k0F`PToG5IVK)+hk4()|5VrLPjf5DqjCCaZdv1L zZb{~8)Sq@MWp2nj%{8t)&GjR%1sixz++}-Cb>|RXvgI`9Cf={)fldpy6JEssO{cj- zq$fG+Uh3S$`@@7ar*@z%C`v;^7ux3t>wErRv+p#wrv6m7hPLa>`;CK7bL--#x^?8Y zn(r49cOAcLz`swv8%clR^C4lwmHP9{?+YD~*%m^VLEwv<=u7a#Qu=Vy!I;}e+P5BoZtpYBQ=X2* zxqUz1K=b8{A&tas(6gHVc@C4;w>+Xj59-*oBIfRGf1f&0+#cBe+Rex3*x(WuH|FKg#`71R%7_)dg9F^y&Qdi60{sjn}gN!G+#qn z{hT|W_SF0~%&(Q?*?5HXp!-VjKtGB@pDxJA{Qm;O+9}8v8E1#$l~h%Px9TvJWKe0DSf+X zOU&J}o%y%FciTv>kJ^7O{r7eB1n`4ouJhL)7!j}HHU*uFz=cc(bwL~aL=qxckRjV zuc{A3ZF#RVyxZ9|Ay+J&%(@4jxP@oY7UX)KB`eSYHlpvP?Cp3mcP@H?1i4|#1~;aoqyeT?5nc?R?U5S}Y|F5@|$r;fM} zQQm(1-b4BO^hIYhIo)OPt{4A%p7`m|qRhFw5?$QST$=kO?YwGj%>AN?ahm6zmGnb? zn@?xnZAaOz;P)DSAzBN%*ye2+ZjmyIu_o<;o|yE0&o_(OJ9j<*Pvd!d5xmhKT|mEX zEi&7rcEoL(PccJ(C5nd{es^){2tmSkC(WzTlGluol4tNConLFACI&c0MM*f2I!B<18E%4d^-v5p^ZJ-W~)Tj9z`ZM)t zXd!&KYiOqhv|mFVZLpc&L;UWIxea`e7}B72-pv0Ew7KT~`8PRtC-3h%K-r+zO5P!! zG)T839aDe)zY`w2YXEu$-YL$YD4mUjhw6WLV?J>g4R++mcL$J zdY+}o_-jcctk1R~|FSL^PkM`y16J|Afj-=L1Uf5S%Go%YveM^U-hu}Ft|iWD9z?f> z7Uti(7*}p1y%x&Z+!}G)!PlMEa%VU50$g`#62= zu9b`}&t67=a_ytul(nUpcyoz&BiKfGnP*XgHeAL36To=xI>5Mutgo@*&RwkI`Tl*< zT-w+8@e}2L5^eB0={@e?d&-NIlZ~7sNTj?|y#U^K{|yJRNyrJR6}|4}N>` zG(SmyLH5vuDxHh?zYn<5S_B`&h`T{_VeW5af>XVHCAw?-0C8dK-kR{5+F+H(c+_c@Is+D+|`fTo;v}`YQh({NJ9ZFY%gR zfF5t9xreDU;(bGq^Mh8S!}AckvDr0DG&y`H1ja}=mR*^HqBH!y(- z6helwX@de4C__cu`E6nQru%kRf0z&ddY-lRK5Or__j-Gtwbnj#Cf26YI3LB!LzJHm z`p78#W5Q=W|6l9tje|DPH;dciw?BSQJ0|_==s8C5(Y?9uSaIEczd8E7xTs8Ldv9;< zlmBKmcJxkPa&Dgb z9TCNke0Wdqef?Ia zvEi?_7)jz57v zMg4&HtSp>t)6etuzkfOE@7?>$q`BD0RzLYe@qGR#`TpPkkbKwpclKApkGs7$bElg> z$_#U*y%3wS^TL)5>;m^Y*7J+z{6_1-HtW+j=_8`&S8$T_j0&PYMmgKG5c>L__b-i} z`}h0`&PP4c<<`LBcx!ml{KnS6=dG*bNN zuY@;uc@7&w80Flz)c^N??_KiytZ>eo^W9riPTgO^JKZ2IIwvNLo_7Dx!+Vd)aiRB@ z5FfqA6#1IG7o7In^UgU5<#4lS5Y^XXj@P0YNAPB?@-L0|=sopp;HT%0b$%ZITXers zIgwt}|NVF~{kM5x;uqClzMiNL+hSb5%X$A=KK7B@_(3j4=}9B%S9~Y_3?fJWK!|}cH6}2UA%Ika1y(2EFzo+SM-C@s>J$|c;?&qpD z!SRoxb3O_Wuj6{r`Ny1hD~k90PSWpvn$B5z?UC$Iqd!g2>&V3U?9gyhU+vzbHl$f! zeTcpXt)Bf!d_mqaq>)YjqwxE$Wrlx6KY2M{Sy$%Oo9~O?iRZ;{z$W=T`8&ry$PYgj ze!q?0l&@ja7aJoKxK7b4A>=!@MV}v)Z|-s9PtSGgSHf%Z7xh1eeVbnMJpPXS4~WX| z9x`g*enA#>sKfIAZ=+-G<4`1@V262UuZ>o&v_G?>I5N7=U(vUWWs|()e!k~^&blA@ zFUT;)FlKrGyYA)v%<#ge>AnA$`*~Dd?Jupj9k21b2JO#p$X}D29_B0N{Ah0C{hw?Q zgk~+^xMvR9Z^5@@5%T1+Sf-?dldZm<-y_n08O481`s&Cx=oR?cud~BmeafDv7{=cJo8Mv+yXI@|>5WgO zh9b}BU|Du3oX&p|{r-2EVSj^j7n`%K$_~H!I4issy^G&VSN~bumKAEE^S+ZAj*=_! zE@F>ng;QkZpR&x?xVP*Oeka`cVXu%JniUdX63>pSCx5ce^*5MX|3OwLQr6PHWN)ky zH!Oq;e$(=G@13pC^k-$HOj@2}oA_vMYHKtGd@eJ*B>a9>X84JF`l;iqgnv5NeQGCu zK;OZ3e@&V{H;=vV8NM)a#vYsA|9Yn9A^lIE-|i@#Pw%%r(xZ6kCFxu5SeNiSel7he zGi1|wc|OBjJd6JqZ%y<($MaRcoEa=f+H_vpZ^`#1y80a6QXjtY1fSg?cKjE@rZZ%e z#^2HtzwZ?q*v?TJ=f9O17Kw{~d;Xi!aQ*SZmw&*o@vgiN^lZq>oA@2YZ+^$S{x~!I z@?C!GZ#(Y%ebM=anUVh^9H9U5lgx1WPy9CT_X>M{YYtLe{pDVv)Ng*}9(Mmegzt(= z9|_^_$)Ay{$#vL-sQ$d)_LYz}VE>75+YcWLDSFGF9t*ACd@MAPyTvEb*z{OvArls& z)V;$;@y=u6kZ|o+dxf}iU#*TeWt+eJnt3Ae)hK(xbM*`^(0@h#(lac1O<9@PD>UZy z3cKBBg50;5tv{R1ua511g%5yy8~?B+BOGMgZGAg4q+=oMEeYXgDewPTez({EHoV{T zoc`^(@DF>eCsM8sIQQ*4d@i*aA^t;tHrGvKU!L}x{Drc!zmMP3{}GqJmKm&{3sv9F z3^C8~zPP)1fDf_tJH0{|S?ztL)sLTf?wL>dT~%i2O%{E^r}A;HaJeBfH?Wq1QW!+HECT*qY`#E*@IHs8w#RrGqa;0WGD2{tu6EH@pGdcN-KmN;v+ z!Vd5H4!t8N>&igqTTg_@{`lE8V~8iUBR|x}hO97Siuo(d!8|O$0ne)I^_Rojhcdz< z;Uze{Pd>AE^$LJFy#$?7ie<{-g03VXyFh zG~Y80=+A#NSeX_+j8ar!Q;ojoHF=}|SU*+xO>O13vqGJ4q51Gl7xkxOJx}dWTG?t5 zpT<^osp##jP_OSjDttVOm*4cz?2t4jNSyB#PKayO2e;hn750DYA2SLs^BWqYq#a91 zw+-p=R47q*OZ)W-r<`9n^r>)$JcnYxtJ06h5O+@XVBuf>b!O}jLozE|w{oQ4e|>OB ze{D#(;QTZXaKq&xnTg*G%S?VbH?zw-yClBj9sbVehGceXvs!|2<#&f>rvEfFvpqbK zxhv}*Gv4^%ZpP&oGQ#dh^E0nF{~E62Cel?y!)@{|?&AS6ndiOH7n^HVc!>OSL+6Ep z-~WfO<%|16;T!3&_0j*Nj$8}dzW28ouU&p7<0nsT%-H_>zslHgxg_KDKQ7An>7V~| z#?Ej3G^6J17eZ~u%b{+^-}Ax#eQ22Z_aU+8{|q_OzWn`+Fn}C{A;`rh^(gWu_0&({ zF*sC`84mM@7eD_<_@%XTB^GOyT5$C%ll)~FLqoZNWyQ>=LuJMjq3X_Gg;?ybLOkQi zP<`(Up=^BbP>zb(y+h^n-l2+&vDxE0dWY&Q{P~;I^QU@+ajr7~lQ0GOn1&{P{N^=} zgq97DgjTXmIK{7!HaF0Yn)2S}=X!^_b&rI4G;k~@kX-yoXq-=ap7lqpb(PjAB!~45 zv&cC}h;MnNcW8BA^Mn^5T`pa;A%$XTFA~25OR)^43q1qz%juOF@;0`2SV>=v>Kl)Q zIAW+mnQN4f%?K6hQDvTXfQ$*p$!hg_t>3K2MiihZk}2;1o6vNCKRu>=m1KlgZEM>a z)>h-n7WdXV zkw0G@P9eP^BebvK_rIY|JMS<`Q4vXTC5Y22*U8%|*QX~@Jz2cAAvRxL(IPI5qd1Ne z=%~sFr^wFl^bTjpbGU%6Z}twC$SX)U3<=lB>$r)cmxqR}+VtD>ySR@B_{I`x)0E19AOh5g6uTX(XR3SD~eP5-%qwtHv!Vu@>VgyEE48~ysnm*FD-3g(kJA~F- zp8qb-A8Bo8J8C-kX;FuIG$3)q`0KpB2Az*ZK0AC_8~*4cVUjebARp5(1G6v(^U(G4 zN5TSfQKY}7OmE8!OXy3H-ZUgEBa`A1SWa(!$uq`E`f9WrW2A*s-d!6S^LmFQ5@;CS zJJe59S5T|m*7WHe+Lh(>+}xoEu_@ z)2m13hI;yb=N-ghlp@JFl|Ti(^?Yt<;rxoz>(Ktn(;@xh(;-E+9mx%4w{k-{Do}~4 ziPFz=K3P3BHzfV01!)|`ah$*@oIw*qyqU4xLbfuD+ZfQPU9N%l9Z!dv`PK(4&JA^B zefQI$!FdViC&@;Jeet@d!#U|(z$IKkY3k`vf@}23ecDcG6}_fU|GG9ozT?B)WArRG zGooi|3sA67`P!EouKP`5oBN)d5pL3N<1Sjps=p(>bM-hyrqMpoeBNyNcW)K$v68G3 zjvY`(jU%cz>yOpN`+olbnR)sh^hFK^ph;P3Uh%MuwQ3XEgj2#%8EaR*Yf7{~G4-FU zPicSX3F#;ID0_>w9~O2FlFkt1VgyQu8S{Amqv(~dWQK|tGs77AI8-k*RuC6M)vC-e zLEI!vK|ZEo24-On{<8kh6JCTR*lO%v^zE!r`1P!?RCpN*v$8|y7c;_gdPkW$Lw3D2 zB&;UaVm&sZ07cl2`a`|KPI5Q)Vn31_j6ZOY-n!8EV{Wf-m|lu@?L->UoM@Z2rfjaY zP4ZMBKT%$*67rUkN3uG`7B71wRQO#Sb%@rEC&^7@G+)uAJ~yMKkN1nu+h=+^YR;PD zy_gy5$of}2f9-JsNqXap_8&CicTb1T=kVBHYv+}99K{Ko!Wo>y1*E6;375#O!oLm? z#fNPF5ia^LD{N()U89$7@cwX}UK!<09&gfbqqUyC?nRaq)L?9}ke+rajux zJ7jXFv}z|>gnQHbqWzBcNI0b(X(MyQ4Zt7_K`usM6uJtBg)wA@ap5?!^UqnFjh@@E zhvSzC;>vDmi%`)|+mx-1LadKAOM6(Y9h~HxDd?$xPxK1;^l6xZS!n97Egb9~wVkbK zyJK9yhmj`RKQjK9tu39eOM)&G+0uwR-7aTpaSJ(L|v64@b6uY4~%RNTuBMHba< z9jZ?ru5L_LN5-owr?W$ybLwwph9ud7q$+ayxbO+2$M*{DdB*?)xU|#XrB5L znrs)Yxz#Jw-m|vh2)_XuYPAh1et-n~v%FWhE{&VGjk~yy2gv-2@>@0}q%HdCcq=3H z7VaFXzwM{|u2O!{S6uO(ULl7ZfI{~45VBN#E4kV$5XZnR|&n_;X zvH9|fj&Jl1Bb?Xyr2Q$sek_clkHI)hz$8pTKBi#?W})kce;v}FJRauI=V1Zrz4Jw6 zVxRX*UlPTu&r8V`WubMp@-bieSgd@I?K8c94&FhIFT--IM5(xv`8i=Vy>fF7+aV{c zrLRZzmYfjZm*XAfger2AF>W+36|E!Q=y=yteL?|Qgze~@+Ar)RJHExA6UFltZEfR^ zpzlYSbjqbu@m@}-A3EPD0>X1a!k;g+bTF{C%q>x7Y zoyS9sbqBT6b3z@baXs0v*qR0TOQI2Fb6tO->pQRVfa}X&j2O&a{tZlza^H#PVsy_+Z35bG(N96=K3dHpRAX+1~QRweQ7t6_1QV$f^;t73a%m9 z&pEhGZ|x)PjGSS$2Lmt&L(sHJ z{X`de;3t1vi+p`cUu34I@F_KuKI^08j&lF`j0Zh2r|7Xi+?UNjG~W0Py5k7 zGqhry@C2lHNf&Jy)|_CHxG5-R1Lu<^`s``s43w_R4712NsFcqN#AeGQs@-E8WoPC8 zqGMy(eFEFl;d`MeAw$Dzu2-~p} zUEj$GyUD%Sk1gNc5Dt=u(fM8LBFK&gwjbH`j`B>FYS#)s(Pq<=XhFR`XBnBsQ5;87 zTteS_g5G*aAEtjjML&afHbXk5??PL`x_afW%lfv@%3u21y}~)?6@8+-eUJZ@ehF7_ z4cF26LRPp*cD&AhFXOw9-vfQiL^7H_ulk{k-gHJ-WS#GB|cUki7NM)Bd(sG zVE{P@$(fIYA!IHR)?>7w6(fX4AwAK!U^4%~bmN3P_P;dB^^p~1rSVMF0cr1WjavOL z#`w)ROu!`cwEwl*J4_L7@_d>-i=KYoJ-5+6Z(p^yHK+Bzs5`6wb*#ZTiDu)Ea^nyA zEKcOWbDD#Ww_@dN6}9yYAsti)=p z#d>T+0gBM%oMz{?98yowwo3g+8ttgLq5j`d|51NT{a4o#3FC!YYge&d8auHY#aGRp zkR@bkhj}9UK^#WKNcWfL{>Hk$VeW6B``h5W=osQC{OXS^SkWJSI=-wEm;U%ip@l5^ z+6F#3_eDR7<2Zq`1M=^_Dvn4WRrJ_N>EDt*PC4fc&fx;y3&tPh6Rz1xyB~!q!lRInq9*0-ciK|=49vnD%)oYt!-lYu z-a36_Xu)dwTC~&CNX_49EV9vFl?`FN_>Cw)5lUxo2qoA~uiUaRRA484H>&Az#P)0q zRfyW3`tch-Z-0`vUJZrjCH6WuG2FFJz8YFjzxuEp{axgjdsVyqs&T@r#tE;6va7F# z@;k4FitblKC0TXj)exKL8hIN-HMw6p2XPpss6ZTbNTTWd#?aj1I%q}PReAeJ9`|hw z?Wmc((Z1}Bp$_$ExU(@N?rjW7=QN_&{j^9YjiWe@(!|D4^3to}1if;P{BPSBPSMYx z+I_|4H|7~s;heZA{};$hNFK`wSIBEfoK`O1Q!a5`_$JcIXS@EXO z|FR)esavrO<=;3p8mr#+o4dG=2k7zR{V&?V{^}W;`m6tg)&Jq@KibafmoI9^$aeLp zW@fa;S^ZzA{-a^G`afR%Mtp)t z9S`eemADuNIIr+s>yF;fX5aJ*gM<@g@Ux<=7as zhpbSnjTuKTAxp>dZO|uS3MyjmqeT0Va$PcxsC~?LY#L@@7UrPy{a#@n8TnBckX=K2 zg+=5NEX6V`$4Yd3|J5)fs()GPi17L-?yZclk?j0dhDj^W83iNN;SZE|@kQvmyfJJi z>oYRK&M59fd+wge3cKk^?Rz4dFPR?sL|gbg_tOueUB8t+&Ho|IHu1g1t;J!K;!s9b zc&EQ-F*t<7$Ff41{;FIbRq>Jjv0I;WL*H{&A9PXwsQ;|dkJiqAgnx}MbOrzZ8vgx( z{QATA_sOQQ<_9L~hsW!O`{{QF>vxgvqaQ}J*T2GbJP))!~SFV2lsZc`G@xLy&8T#|NG$=hkg|H-}_N0c=3DT#}=I*mG*HI^foFFrFnR6m~`203R4axnsP};$i@}oYO_LN&<@#@FiCn-kUX6g5+zw7pWb>hOCORIrqO4h zy;<6|t})TIFiTwVovbj2oQKjqSz!^m1SK6=+OI6_m$s{{UnqM2u@Jw*uhDIu!`LW> z$o}tm_L0!}RdW&Fdc=Av_5Gsvb=G{aG?rmGR$?{QVm-Rv)*ih3NZ3d(K>b8*B$-H< zQzCaFnqx|T%$n-+(|BZg3yt`khxYI9Gu3#6gVHa*-FOb#3 z3;cF3_TwN9qZAd0qv;O&x0{Vhwm#4PMrtPedlmZ|H3Qkd!`Q!M{Ym!kS+?y(_Al8e zT)d%QsFO|-El8tOdL=kYuRPC&J08_VKYF`;8D;a?|BGE?4?8&~&4jd5u0d7{pOMBn zT)-tYERi9hY_KIhWw6S3OS>wO6@?RqV(yBis|MUbsc|iU>|Kede;kI<{ z;#yQ!$kIId#{+uhi|$Q+GpDONsD8!0iHogquj_I`UvW7YfI%37T#Udd^w=u{Js)G@ zF~Z|80fj$wkJ^XMCm#<}ggY+x3x$DCzD8MnlAl%FG|a#(%)vZ#{rd5+fLw$nSc+v> zj(UCUN;2}jC)e}}tLba89+3~e6>Z9YO8HNJr2p;c6*f9nfFf+iPVB~B>_^!u#JLhXd2Exfflr)4JoA2j-Gwm{XHK< z^ABB~$wAjWjPw)cBi;>sD4x%6`2@&1B+-I2D%@Kodi?L-(QX|vE_ly4!5AoZ)*3eF z6i-)n$rC8mUY#P(pk%)9x{w!e30H6p*KreFg?v3#S)t>%%7}31lNsSIc^?muIm0to z&U%x5k%Iw9cI*E!gdW)>EhxO46>^2!4|(q?WfW~F%htaSduU@;j(uoT$MW>=6ZP+- zdxuijDbU_;zN&4)AZd(1(NOiO#`7J?{ynCzKFQBbwiy?s$h2@fS+kJ+i#lo5qhT`p zcO?54jjmgKpm!K0y)hVv2`EkJ|1pVP>39XE(DPB<;oiE9K~RNh;_BVs400BdH~90( zc}Te5mL2*xLwue58;30Mo26KWVDd;%D*vEnv8rUHT~qjzx<M`;pGmE)DnYCo2OJtsM||7=_x%^roSql-@DaHObB(508!uae5u1{w7Jb zAdRCqjuR->r=B89&RfGsoMnWAio{oHsT>9l<;fmj1!*%qm>yG@*`^>KjH+Lw5quCy0%SY-q8I1v=wk2v~YG<=S zP|r508>{ahX$&w~-{0RDfL^A)mb=diR9erqO4h{XO?~#66O2&MUUD{jR*ta%>Lfp>$wwDDknS1@y{nbJOPI7txoXx{vwq z40F_EmGh!Cznc-+g-ab@hUM7$gudiQ`UCoEti^h4L;;Gh9c3j?hw_*?cCyl(dleZI zj+4~`pAI{nvm1M{9|v(5rKmttKlA^C&HoQK|Bp6yO=R1oIcnOE@V~C|jR4f4e!BVp z=gt4mHvgYz{(qu4YYgJjsY4PikrbCen%9$L|cl)R^J$vb?7ZV zu^jpU3_?_|qVhL{-l~4JD5JUb5oi}qpXVb`-`dDg;>KVcia%18$&!2QZ~7$q6y&4g ztoo@vs2cdNjp*_J32%DQb4OJEra5mG3PYB$jWIdB=-0+G-(Zi^7hn;VU@4ZNET;aa z)c-`3zUM#C^B?BeOhAEk@+UwA;ToU1Qc$Cp1-nL$tg zHB+HTwEZ4hCQl9{S(yU#c~}5pJ5zZk?~6U#y=e+k{j4 z__V&geHFX)7`yc(JC>}!$ZjPQ!b!4mow$!43Axf4fx=&VZ@>3$I-DbZl(?RJX7Ini zrrz=Aw99u|I7LR!WsGx*V~>P!GCz3k6(Bi;=<9qe${;WB+KNz z{3ZEc;XLxc&VRK2XNKR+!W_)Q0xZH3H0@IU^o1?Pb**SyrENzV?Wnn>{GslS@~2!j z98>;k2qIKI-rA%&<{hvGoE4 zWD!c0o9*OIl*HKetJEv(7EXW3xP7|5eY|@7l6pK_{XMJxM)qz8AC+^W^*;y6X#LM& zGHQ!T$)5EGFB|`BYbu1}s6+h@V|}s(g_|A=X)>8&18lY~ncm7SXu)y%3AEGGNTCg7 z&#V9Lqr!bfYh|itx~~n!9`ae;9F0Hvg;Rca2Io-ty~n}@@)E9~NqufsuUinU^=nh# zQv*H!{+>T--sAs7owlwX4fF((&TCww{{N8e|8sU&6FKkZ{tICLpbVtqc*U3i#7XX$u|1|#1BAef?e`e>kfYNSg4f0D338zp&HSzDL>%&414| z|IH4bBd%C@9=QOe?Bhk`5|q#@MziZD`nIp|GQ?f4`t;+WYNUJ)H2csx{)anvD+7Nl_$$8iFua0Z

6chr$CNJq|oX{wwE3<`zH)AeR zCebV-d8|@pU9F=HMDO9I_SjQ^Ta`|PN5>y<)gWEmr8q#Gg2}|!nHHTn-G~`L4r%Fc7aOO-X`@vS!1cbVHOb7GmgM64#gOKr zEYpT|s*74Iq&19VDH)LB%kvz09eG^m+$TV{f+bFA=&MQq2EXE3xH;omxi9&?N}J_q zyKOMq+#kmwQvkME(r}eIf|H(V9FFK|6Jr8R(Qo9KUjgY|w5{PV}=C@^;kU57`zM z;00-GZm5V92&0~QR8~iE%otukl^7$>#OdCs%N2#`yER{&LAw>K#}qx;bu0LU=0a`2 zb#pIa%=`cR=d)X`F-A>&c{9o|gORUWf-&>mXHef{Mq?}>k(HwseBWjOOkM4^yg$&g z&q1|8E^f%25dFb6y+y1~kLeCT%rrt=k;|jfXK3=@dnc6;)X!%?88otP4dQ-o4uTn{7btyFMs3@r<4k*xQE4;N{W#9{fso;L zRV}2t0{g6FPmNG3CY&Ja8OsDMo}KYptT%zQS}S@5p#ah*;w|%3SM>AW9mZ_APhFO5-aM2iy8tP6IYs&57tG-GOZ0(O?a|f zM|#621Eaz~`ymXAInc8ndBvk;P6$F?P7kPl3Xttl?F!8ZL8!fVW8maN73(GTm#{3D zxChiAQfMlD{%(|ngelt|aS1sG+9D@WY~o1lKZqM2z_Q_uRLl&FHbk@3NIqOe-WzuZ z+F*ESBPoT12Rq*}Fl0cPPTrp!+DhutmzcFq2(kzd^Eob@_=4UQnL)#E)prd)Qt|VN zg*KRMy`@+xq;)LY27I?Na5+5TUewAwC&ax}8w->^aRF$f=mKQjUI=4=8<)=*{c^+& zwqyAMa5h`bOj@W zq`aPLCWppa{_ac+d}ge0hO?+YgLK6TB~Jy9`!yMbBsU8anGaAhjBgc?Oy$wbypH^I z&L?OB#GqHm%~{995`5-tROV4Ek$fyf9Y%dY+e+)P8I*!ypk)QzGf>4HnAeHD-kyNb zIZnP7bsY0i<_j}0T5x}V7mgpVxLk;Hne7^%8|pZ53RdFx$4mo&8pqDUuujK$2nK-( z7=}ygZbco(j-nV|#&MTA`9#&mx5^W}Z9>RB(8?o)c28p=;`vfWkWIlpsk8+nl>Md? z0CpLy#d{b92oZZUvl*nD>w5XbM-eGG@2*vF8{}v@o*HdRfKB=u!3?NQun0esk1%>n z>O_5Q4;Da~CtPYX09y3p@X#kKk+%lA1KtKJ;Zs_trvQpRi5)XYm{xlsxs$PCrm;@| zR@+zg3NYm_Ytw25?Qw!*tL}eKSK{-E4-A3MJPHS23Pynaw?;+QX^+J*uo_11od6N< zr7uN4NSCr6*92x@vyFyMk4p9`wh!oLM;izme_~Sr1>JWodP1s(ZL!ond4q} z6-@}zEbxvX@C57vH{~WSfS$#&vMui_%m`DH`9$>7Up06#TwMlyq%^AnVB~9QoSDNp z6rwK{K>cDEhukMdT1lc#0W$Y(J)bgj54*Q91#ZQ-kU}K|v5=QIlOYW00D3oQn9hh|&2hL~MgB;+=6s4M@UaxU!*b z=l(Us0pfCiuy62In0NGOEOPB#MBE1~z{*y`%if9AK z#a>c)=8Q_;VdjLWa1Fd11vrb06HwWwxAnGBQOD{^(hO7$pPMvmcus>So*pR(ux!!r zQpf2m7*z_O4$oVqOr(vIsESqewmh!6`Puut`xzs`t)7)XAB<;riPl-u~OvsM`Qrl_f2m z6Nm`wdMYo2t~wWo!n6&SJNiPjRnw#MBrTwAa8^jsAZCPEv@TjF|17xApMoKz>MhP^ zrw)HT4v!4d$_>_I;*vXIKh94~4A={z6%_zCTxJQhq!}2Y;~-e_JHeOkq%)#z5X|^w zX;cTOGS#{>NAx#Vh{&CXZZWTKmy<0Xy$eoGhJW`7*f<$p`REP*JPa3Gwv^EZs_<== zgsJH@-8IOdv3vM>Og=Dx2c%7_wVl0Y1XW6{t5wz*Cyjdi??yqVWkN`J#k?zp#?IyY z4;cu3hrYw+D6XT$q!LsPZoUj#I4yo|#Ge6Z25Q_otd#tVO1{m0wZ}maOnVxcQ)oAu zSoUT>*Yy&EW)3QcZTtipr6+_S)yX(@QvfJuM-Ggc=?iiuC%+frT*^d2Xv@kltQSjD z0^q>qik>5dK}=g`=9kz5l<8&yKb;9odj_?ch_vWd(0q5fr+L0A6!I`7jah~jRe zL8g$_YWmaEBM7n2$6-Tg^kKdtu8v^tMsqRU*JO9*L@d3u+WP&DD;Spq3wGbS-9 zpd752Jjh>|)v4oBeGardACjbEIG96 zFYcvfK*V8?{uluz_UGgFDcCj1 zT`ry8$_Su5&8>4t46L(-*kt0IbV3~WvZXQG>E6_zfh}`4_QC#jRhJhzv4nbHGb%4n zrUbb9uDlYqHG29=%#IMCp^bSVVn~HT5t|ZXi%RKK$%CeN%C(l*Zk1u z+S>%d=1l2$O@VIGc=r)B1FNI(d60#TLM9l4DEMJD7)UeKff4n8$1Oh$(@#ecgA1{) zZiCEVupv9AHD(Q`@y)4GO-FBY5L)y48iW4CQ5?LBjVhxd*M6m>6LQpo5hMAxw*Dr= zKpWcCw)bARfn4eC_&|!M+t8qMrz~f_lRDq@UFhe0F~R?3xB;o&t$1Gg52K7xyaUt* zSi5CAtia~CJ)|jQKnpWZkIrj!ySCv#r3rl=sHpSuS`s{gE?PT~DEaR~hH$R5P4B%I z%csz0Nu|NkQA=v$IW!fv2|0m85*B9R6E`aT7c-DKt@q;|(jSF}tRL}|wG9TI?E^g2 zgoB#sj1bDbIEd4=IS7Vd{~Q>W`Qkof0kE#R2(0R>12{Yv&-niP(5}CM=55nUny04F zl(?X#>EP4yF69m$(%dGH*8gJc!vU?#Y8?ET)!5ra$sbo;;+~E2q(GY>m$0m9OA16K z8cCc;0wKU7hhS>*0aad^#-H*6M6>F-Cpd}*9nWR_hlpYgMOC!{mW|ajR!G4#t4%#! zl9^tM2hr+uY>1I=0#^B3>S+bQb--oZIQ>6!^k7>Kf#gpjEz)DMHUNf$hs&dRDcJSU z8`1sX>eNedK_Lg&4qRk`D^Qxh0iKW+{8-#fn}IPAhq0tq@*jgGGhXKIZ2&qQ()-RS zG$m-Vo}u41Mi3=%u9v|l2OwNOR)>yYzAvK!5O1q| z(@X&_hA#`;fL73lJ903!yF%?wAQ3T6;1zYQd2Q*>YB-F$;|G;x=Vi?rEz??45C!U8 zwhYn@m19gYQBHuxUMyX|6G9|vydYiF={ZZOPr{4ZGeLh1qv{1rXOIH&NOXSM^ArJ?$YGXtK`07 z)dnLo&NM#GDWIB`l?VAV$FIZ{p&S6;%~*ayyDAo9hul%|@W8B&iaXrNe+@M>k#0j$ zz#!f@gLVONZ!B}vFm7Nv$Ej1OR|dyk7_?PA8B3WN*j2(AC0+SW{vtTtk6GG~aN5{= zA8I*R-{K6?Ojos%&1-mEHMf~D2Ce1`ZHc{%8$<`QXlm8xU2kL*26ubX+eR@*>#l}u#<}|OS zl5$8}l|zFtam?Ms6wa`$dG`$3ythEfUsZYHhEuzS8`{xGYq%YbZy-W2bEWtkKsihS zbzFV|LZrRN$W=hQ63+8Afg>2}$wQd@k06&ddO^CDIS6ML8X?Z-D-szPc07S3b^sVW ztUf=1whA$1)@W6$6p#zS%S+niuOr_{9WQJ{+dxOv>{1{NSMR)dV>KhddD(u!&N&zX zI9e-n0uW$Xv-YA+ct;#_m^qGjFk$#VE8(`7gtU#gx~?}sloxJ;HZ*1E@wgPyJ#v{Ng5=2vuwwm( zv?R{K&!2lPB~w9{59m(_c9k$mh@qw+#Fo}Uvl$3`obay`{I9A^Orjwhti}-w%@p9J zj&9+)FJ^=x;PD0cn~c+#kmf0WzlOhp5DMzXOUpB-T*7$C-`2IKI%or}lTHtzG`JF; z!9k-;9U^az&Wq%A5sPuE&qc7H$Foiq7si-nw5HR{vpV_tcqsaRm$j!}5ACMJHWti* zrN#XcWJUnWo|dwZ9?o^bJhYk+WLZajyHpB7T;5WjIl-*X(_1T-vAol)5_R6K1!>iJ zdq%>5X;v$`hS`C}dY)>e0IILI77O_9$q3TyPsjFM4dVjV7-;Rv zHaMvRwAiGJ@74iZ>oH|4E|;L&5K}`fSadaH2DC}olkdf5^Z`L-_2ISBge3W_&;+kE zLtzVmCYqQ?VUWcQPiV&}2l1Ut4PL_E^OJ6!a5d)AKn%5}eZ^57#r11y$uQ0V{POdY zu_0r;5B>vvdX_g|Di_2qfok#b0$c%bi`T}F-(Z{)W}SL#r8kTa3x1~_=R5$Sl@3y3 zq~x;$?fqQb)M^n1yI01efglybuNC+ z<{6eToOi7fENlMM2DrS7H1Lk2c$kGzCk0sg4PB+7QXR{~Fw|QHu=E^%p;m!n9-j=S z!}&o28XIt!NHYuo+%62_8SY(>EvY#rx;g1!MSi;}023_mDVagSO0UjMX(9(e3fcDP zmOQg6WcuTp@wLE~i}?lm8FJj|<2sn%0b(%r@0io30JX{?2J6<@wlqV}fbb}t7yatQe%fOM#@c$H zAvbx|E2uk@_tWYpkRCaBcg4+Nmmo95x_q?@uFXN3|=^5wKlU(m&#HZ=h}vNkZFG449$x?E%hG?J{wvx!pxvjaxtML=ox4f!B|NXk|cgKwvfV-km!bzYr_rmDLHctqD> zQ?DK~!qhv?(K|+U{zb9M>-<~&xUn#H+P%jx*#z1q+=ye@yI=(AhYz>_*kpH6&eTyH zT}Fl(fO^qGnWd-Zz#$6{)tCB`&#!bmEAr2EoV^q7DG*V}Y7F+0thG21FNx zovMf2Gw0$d8|*Z4%7MRxIf!~#r!S73$`1id0Jijs&eQA~4oV+;0fh73yXgwkI%T}- zJ_EYx=@g`HxEweEUYhv!CUegFK6*e4T2;?EMyk6Fit1E#bpS4>a@M4Tc$VwK6{Q8c z1iMY@7#w$z1iGhaA1JhT>FZX>xgl}sbss(LZuIcnZ8;E?&1N2GlIN{!7^q>?PS@G$S zs|zT{!Ppwg9O(P zf-cx%?mb0?fw1A47M`OzW%(NfHTF%zF_?ncDey$jQ1AHblePt2^xnb;jv?(DFIa_S9!fM`4Q)M2y*V+jN|)?m)wWyY|u&po(3K1AW~R zdG?zb!EceN`pDUKHngtE0X8F#W6=%9V9cnOv?nGXrd97kB0*eaD@@PJ;WLGHDqDj~;0Wi=ssxY~579An2VpyOeg9B%x*m^aER#dnHTK3iemX>)tZ`N?n(1L;K^#E&KB1c!w}&y(`|dbMljNI&`7UrMw~J zxVR2KGQAWFN7HeFatuvf^&i|R2i}Pzw{%Wn*YqHsS_Y*^36mrwZP@)Au%&XsoijdBBT02`EAObK{tne+SJ*EV>97a#rbip=> zjw8hSJR`{U&-bDH>8BB4NZHqKKjqM7@rb@oV;~mn#@^<{=@_=CM`+vr#VQfy5T4uERL@$_r*#i8BH_yD&c%N8B4`Lw~#7RMbsK$nZ}Dwh8# zA;=vs(srRKR4*Y>od(QHX(c1)i}M>)6Bf-0L3W;I)VyhIr0FO6Pe3rO4*2dmGm%=W zYY(Jw@&(^~n1U>`_VuY)XD9u-oH7SByfgMuza)gjGsfW50$3jnaoXl;6Jmuqct`a* zH3tWgIRzjyu6uo>x**tcV?WDasYNy<5e6#1d%+yvdu=k-PHnHVV)0( z_Sz=I3gfRpVMDG*$B%)fKsPa+Tbo(5M#XQY5#^9Ba!H#8W1!0z zsmIolaaL>>XmgZD8w&!;+6;Hy6bz4tG#Q>JQ|{;lSV?b~qW5zBEz|;zJ2-_6s`x z-3F40y#yL!rv&j}ZCWQhrRwbhOU`k&A#*g3%SP&vvqFU=snLc$qHwaG8y&`ay2tFgPD zydX-s=PaER(#Bn3WgWKw+60wVtWMhjQqtB?CwWqU)YBN>HNB`WS_bWAsGocp8R`8p zRi_dD`WTHs6o;>os0qM>^%mRNS`4D1lT(G#zc4D_kDiujV_OS`8ORn`9js|E0wdgx zykg0h2OGR%uxSC%s8>EC*9MX=J^#^%heGX-j!Ir%ENr zb;pyM>H#?clpIePjh((6QR0>62|>2fDUC-?9Ghc>=}S7#GKIEU`#9B56O6@}wmhl~Jo(3#2+ z$igs6lRS6Uk-o-bXbBb<=q3Q0hljcS3iht!dmJ;TBAs!l{r4%fd2%#Djbi4M_F5hQ zEc>0hzLR`qRViD16t!+|5rU#Ut=`(E@i(9)w2QsyN19UtsJ(ewakq}+TC%3W83Ah6 z;Y}GC^e#!59AK4)ab$Q57CSxiK7qED@%E1_vg^FJ@rRZGx1g=^z1(S)8zE*Tt^4N( z(007*7+Sd-o_tlMdQUy91+*kv9E(9&aM|Ac7WVd}zNe{?j#yxsF36&?NyMww z(wQJE8%I_tegwn>K0KHM&~jOK*~gBcAX>l#fDtcJhr5v0ZfE3gZ&8QP(PDxytwY$} z@vUz{V8anbR5$=wrRn23mXLgPlzyD*Ye727Xa3uclFPRPX~nnf8ciK{M#B)K$B>u< zMwMlRxe3MTk`d5ya}&*{vI)U(+>+3Vr?Jt_xwyDB0bz-Oj?C%O`(w$ zKY`~bGskVjLJZo%-s=YzlCP<1Vr#&*lP1J1g5ZB?Jx;B)L0HIdlyv|$!+KA@^pZO1 zqK}bvL6>XQpHhO%Plws)?IQvxj>Q+vAP3kK3tI1tfz;+*$EjGzut8kZ-vycN$(Xkm zj>RjY{gNq2O(hP9%^bxDt!wuOATu}>yTo4`r4so<`a=t3aXE$Ow^rK(VP1PGow$62 zFa>#Am@ltZI5Ajt38j2oqH&fq10o5(2tkt>0W9>r{zX$TD#3h#@4?tfy^AyfSj>2$ zZr3o5JK}gLfSBbBRWpT*?oJ6aXqe&C!E^@zx1wX`eGZu$ZDmNWJet}f#C3tIMHSs9 zz!GwVgx1;tTZ~1WC`o}ZyGpCrE_7M7Iw2TiF+pa@G%0SJjR-+QHp4Um=b(yRyni<) zKy6nS{U*>7p0g%2WJlnxLfJYOFYzjJN`QrwhZnVLyf)Bf?*lMGhp`@~&%UsdN*mPj zskaD$6*~`XyR-qwzzPF(9kDfx(^Y_yO|+w~6NXbNgGR4%gdaI2b0ELvmOJGm`W2Qj zblI6%R%pPU5MrCI>Xn>bpzWwF!*ECxPJt^uo(r1-k&yj+bTz%D7orYKU)0YZCs$O3 z#ucm%=QMjWeksZ-$_xVSHpLs;Fb{(iX@Rh_v&%fHH%&)is|*Nw67lKsbI8{zW$Z zRy4U5$ipeK(ef#_Hem?rCnP!m%8r)`Q`4KWO4o_lw@01~;7VfX3@6}-kaArfZx_W6 z$~gPzJPM7WX#f~B_GcXv_uF=%Wu`58JPIeHhd=#c^mNqDiq@MmfWG!e(Ge0EdvfC@ zSGvbouog%y^;@H*8mzL-NF@T49VRKHrC%;@Kz9jo&9UT1J*Qn*q!LY@IZG8f(OP*X zW(>5U`7p-jY#qQ8FQr6Bq%Q9c!+cg_Hd?M-&q|?Bmy6Rk!hb_yO)DA`MN6c z%U?;41fk6X*rg3=Ido{0n(EX6TYYYOp|2BPq6>5(u@CJ^Y67e^ivv^zTHIc|RF#9v zmgG08$IiSIzQswMIj%{31%(k4vZ^dQ?yKg~N1K-0O0W9#| zQ~2ai))`OwTG<$cMK7WC1kwWNtUOKcgQ!cGEwzCwhsv!nb8E~o1yet`NMi9h17ZxW ztWm4bHbDHm9AKM6`&0;X6O5N`+CcN#kKdo^fD!EsjWva&tnz1Ky3VHyI$GIf zK#YVcvZI4M(>meaFHyBd zI3=~nI&ivSj7L|0X4{vWvqcC>oIFNfg`~i6(srP&Yx({O7#+BWK>c;=NCp=ynblZt zC!!0NL+|6=+XN5>0-Y{mYzR_4Gc zp6$6=9d%5a9suDs;LMLbCCN)GZp*O?(gLG)c}cttvT*usDcV4X0C%j|yN%n3sf*BA znmB>mtut|jP0cSOfHL*dBg`&FAf(%S4-GDN44=vA&!9QTYOID+w1%Yb?SWJ6sXoQVtl0M5dw7fwMo(uJ580_l{mx`MiOy7H23@-pkKCe(r^PaK?LBy0og7G&39$0?|Y zEscdKkW#PZR8F^!iC-Pefau4XkD7F<`Qs>8r{ZT^QLl!_)Bq;HQp4~W?}Am8;SMMO zlsJEO0p;p^oEV_-I%@Aj=zRc<6?V!W1iCf^+~SRn79@pnq4HW(1K2tb%R|s{oQIFp zar;~d(AIMo2;XHLQqG_);7SY#BV+1=rl)pV$FIk8RO1@6RHo%jz?8N3Zg}pR9>|kk zKwF$R^DzY?>e+Bn%>ZRBuy~C}whjow>~}3PB)z=K>B&P4dn-(F+CYp^{^(=}9#7CC~ZFzSBq+fmgqad~45jl8^xfDI;fvO^r~ z+;tQO#fd5a=EnZG-ZGjx?TPYHDW_eEYrO}K!n7(h=>!L^+9P8NU~8R@Lv28-rB{Pu zA=x3wj99p6j7TqMk>{yf$B+EvTtN3mDbChxv$Ur*z_U& zs`aGI`2IL^0zgN87a}$z3PU6t=>X}phO3$m&wvsEZ_$$W!w!sY739s9Py=(wiS$sK49qw}S6D|N<`@aWc&d~M=|wTuwAg01a%$9Y76%bt%P&CZ>B ziQHpI+X0D|kdiOZgfLg*8J*tVt#j)^2CCJ$?>ULOkm@G#KFsQ5_eTFcaI(1La6_p1 zQrt`}NXq8-sKO7@22v;YhljBPpfolKXt<8>(SxodVkaeY#DLewsYOTh)-W*6!9iXehORE&@rh5)?x*$a0gfDzE03w`Y^)rGrKW4V5 zS!cRuk?J!lf9z%gq6pp;#x}ckzHkN?HRKEwIs*MI_d_B69nBe`mgtVNv{n%HF^rSZ zH%8HXo{*shAQ@La)$7|J8=l2Zbczlk7CF{RDbPl|>`o!b&5jqUv?!9!&ZU{Yj#ZC< zs*+d&dC!sG|%(F+8)N zwaCbbE6{Pk599_B`v7W0V}dA$o=bf?G23C<0^9{5=Y9AL3rHB~+ejq>Y@D-d&@;z5 z&jG-;(U8dClpKkieww&t&|libjBNmnkB3(m8ky-4TK}gY7{n7AsL(Z}(edkf=m$is zV|Sn&q}5^nkQF_#IYDkk`^rOKn-c(xxS6RRdfRmrAEsYMM}S3N@-qoj z0>tPHIzn3Ic`f4(fUc?27$CVN5XYf(?63t2Q)w2^HW-Gxrvsoy>r+)J7>2QhAHBEh zfXmOaWdtC>DM}haVy#EsNNwcMuwbo;Mq_~c8Ea+y1u-q05a`BpG@h#1b?%`aJuINn zWwhmydGa5S_4q_es7)D{;=0bw6i9~6KxpQ7=0^Y|!{71_|6*!Rfa_NGNwq8=6M~*`OeyB` z&X3=Ormzb@6utdcK$>Y>8Js#sXSHXL*fowOA3)m_xBFK!ldrE-+oqehK$NF<8QPHK zy5g0$qGp{)FVm+W%eY?poVt$R_S2{t2uYYD#@U+@5Cy2p@({H#r$hZ}+t|rz`>R?u zOn~Yqqp;R42q^~l(+mn|EXO61N9mye3&HuHaPiLwfzOBr5)P^ZFuI&kjow}UlYC%th&H!%rD}xoRa^TFDPqHPq zx#INjam)0ftFJ*xXyhq>ldc1Db6L}Xo&sGeAC)Ii6s(4T@v=JQRyaL@HhfGBuwL#2 z=SpwO7zm$odN=|3W9+M8`teseg`=&xgY?BLLfA zjs3TyG{}taj0gX7uq)zWO<%`A3fiH$PfU&o-GzjA?AR2}z)#D?V+~WFdf%Eh2WF0g z)xGjL9XK1y5fnTc>HO=xEr7Ui>Y;7&bqqcoNQ=U|k~B}}7{m#puEG9W!}AWXaX2s< z4Q)h-wen5q+{yieCSw5H?vd{Y(uRwtWznq8cp7gAwFpMjOQNUH7;b48TVpd2ih449 zVBlj7$+4CQVn`M?2jVIwAH$?-6Ue+r=muuRh*Q3bWZ$P`ancbxE8?Bk>WRDXnVuj}xWJmpHS z1h_`^j7WSo2%(5m`Z??z+QoaLX$-bKIV*#vK#f)5x!ncWW^oS8KMGM0=;D`qYw>{d zln_a%o!1#OLY+d61IQRhVhBzaYAg8n#aaZoD}Cy_&=O>^w&OjBP93YA&D4nTLpK|` zAoy*@Y~A$z@#TOnEf;Tm=iq9lsk76nRDuvsKY=oVbhU7HOP_*W0xZx1-LC*isdWbF zDKtjmK7oc;N4()pFGEGsJ9334Co1m#<(L*&ZTyLNlM-NN@>EiMhX6AhvJ^_4cK>av z@Gbxu<<-a_5#of_T_b2~hfjr?%E4tvh&c`rcD-|P7@aSi}(1+jk8sZq4Oy=n#e$x@y9Z4shci46s4mv)wAR0m{9FKJql z0XQKT>Bc5s z;mj0-5L199Jj(DmtCR8@qSWyLm@0UHW|G_yEE$XFJS{-=CGmp_Z9=SD+;Qp{?rYEg z2~-7ICXA#F?k)i+z|W~6UZfqHpM#$QY%888^{vtwL1y}j+t3k&o$o$#8vX7J`n7rqtp!c0@<2lyV@=Qnd!4QK>Z*H&8KG_hTlxf!fVN(Ft1KtLRiIyp z9fK-wbiaK4U_uxo_(yJc0j@;-M%+*npn@Pb)(tMC%lR@TgcfkuJT2LommkoGasaR_ zeC<*FRMhb){VHM0bo{bm8`{bCeo0ybWrFkSe<{Gs2HXC{?rt5@t=#XRp8#XmxL8%{~T|l^Ew9CpBgTNc-Id!`dfl3V_ABLtTIBIF?MvI5W7�@n8 z@F;JXC#NIZ{@Z9|A3;$dMY7Av@}O;Sih&S40W^PBSEvbXHT6`VjMXIs?v+n81G*l| zUlK)HUjHg{sR%x4Svn)qF4#7CJvJ#oRNkROs8~nnSE6Si$)}5v z2hg@^%%qaLA_~_l3PO|rY!~$${PdH|`Z~}srHOccn1U&RPn8?5Arha~hD!!Sq_f_& z8G+F4Of>r(;1;-1T>u(8^Zuh8rs1If1Xvo6*ID#a7`uc}+~8b1fnR`7WS|{_DKyev z&E?CP6Y*=FTDTm5;K|cd%w&=?mBsWbFgn)QSO3~I_PZ{f8Z&^YtAgEhR`uEM0<8iq z%ddiE1i3!>0BDDDM1U(r&(!1~#5|2r$B-@y&l}1DQRY&7NuBBy%nS-hXP^EYT7s%x zTi$b?IRSTiG=?64Te# zXIzpSDFdR(LwvGFkZy+@m!+0+FlEN$e`Dw31~RWtp(Riih-Z?FyN;x>+#O%i8s4Fi zcnYK#HomD-XtP+|`w$gz0Cv+jAJ?K#_0?61t@ss{7MR>P%uo^LF`};X#hu#B^xk8d4nn(POV3|UfJNe}9~C$TYe0CHz6O&zl2>5mdv_#fO0Z7> zz;t>wYzk>vd=VDt`nAhLJ(M78io+y+LNU3!O60v66151jET{Y~CfJ#Hgu<_U5aM=p zKeDDEBsfXdE~IChd~B-i)r>%l7faa@w98)Kdp8+!#|^jWPh)7x(XYf#>SV{`*N=C> zn1dUO7_R_WP5g{#4YZn!ihf3GMi3?G$=m}-Gvst8sguRPKH*3GwZJ63q>6PE4QLCG z4pj?w!G7QZaZ`ejgSUu@?*huv_94G$NdSuOy^m@cL0iZTt!{FlY{ZV}7}64*h^?^+ z*yiOpTRf+`O9)(inW6wt5c7nO>Dk_?Awqh47wDR}Q_6yD{_6cSWKPaj<>OF^gJms3 zEP;04P#kPU&kxr9{p{39_P9qZ6x&roP#Ixb01p=}0MIvT2G1h^Lam!E4nAkgKlXbq>9 z{F;j4YjoVTRl`M1>Do06V|IY3#73T4u>sx-RVDyS4DC?Qj0P*w5Jw>E#O@*b0^LqH z!sD%jF+mnfUs3C>pkyS7j_ABTe$UQ z)VEQ20BPjR93w=1=+YYce2<-wdzEUtlR7fiy1O;xmk>}Ihzv2}PV1=ST1UrWT zK$q5Jkt}N_-&o1E|2FCvgdusl9*%7QWu93)%TNXFZoqpA^t!apw#VwC3v#{nbqiJ> z%Zwm3@%W0540w!#zK{c46eoD~B?8+ziwmd;!Up=y&G7sJ>D@<3S2*8knp;ncH^vW~XsGjQ$v0JEp6lu1jux0?57R2!kY;_aevP36q&S|B z((&OGgdj_*o30bG;Tf-(WKPIzR-@JkrXjexq>Iv9g zAa%vVZXZN5fbDrYJTFr)f?k5f4ASkB`C9#hWF~0(HQ8KpkHjB zfGC+k3587$b(pBAQ@!NnG}{_Ij0tBT+kzcVBsu_N3*7-pUT<@1#BM=jKZK&f+5pn% zw|hE}2rzgseoQF^Q5ffb%GC|nrlH}+{r`**D?IPx8lyTuug9xS2|$UfuDJ^JpxtcZ zmiPn+-`m*F+J#hu-`0Ov@)2O%6cF*y9ruce|4NDjD^vf=X)< zf&^>&p}DqWk*w;BojSc9@zwk^CB#z3k9~EWeM6V(Gia;3ycQ28j|i~2dC`Hqd7U>- zpp1d8Pd%_V0l8QDx%L>32rCEBCwQjidr;namRT^+BBmgO|Fs{cvCN>&o#m$b%Yh?@ zS{o*Bs3a-hpsFz&Ei|BZjhXq8zEfjImsOJ!Yim;NNRc~Hp6m&Kg0Ki{}$oKPP#z9nwWgWBk1W|*pNO}0LuO& zqf8yg-m%7GfX!F_qWJ*w`;Xv95p_tisyYw^(4=m)Ys`;Yp^Q9Fd#j%<95Wo9~FdO3img#Ow9<0ng%+?%ngXn|p@vAhjw#wRp(c4~N9U#1kO zy78_m069uWe+IHGw&RH32yAQdLwvk1hI)j!De6X0+4jdSLn~Z=10A}F5Ewo(@?XJc#-h!k6h971IZGa2#A)ii- zPlV?@1-qp%?bgW9CBP~z^?ipXBSZqNXstFf*ng{Bfq9)a?#Zj&5`wvOGKs_!P}y`? z2-AMS|A(%3i*h`>&in9)#bqUM)?ya2;s8hE5Ls3Fi zS5>3>QU%pz8r}3L;7}4FOBNy9p-Q3@Vr0#)qR$RSR1g^J<12_}*kI?LF$ zdslZ+x(>IpxHZuSTU0AvO&CCMM?<~Z-Hbi(kt4W%mxLy`&3%}|(;hMuM9B^=nVT7` z$2045gXy2?kVtMTva)!Aq5yOaU1hjZ0%byt70wEnoZY@hCUJUAALCUFI(&n^o<6Tr zqMAmmD5yDWgJ6iG3h$9&111OKAm0FS zru({eyAL}^xM}xemFu=Yc(A46Ko{T!Anc3u6QsmbY#o%{7`_`NziaBxCqR;FX(}+q zO`dkxE|)q%Dq(f9hf_5so(j%AT%@%sITe5l9r1E-w!U40N>oQa&sPBv)nn4D+IjyL zIm2nD-_i0`6G(BCRb-z#Y(ub!PkulaQ|L_%Q2P!|b;usm86T`IQYzGNlgyRl+a z1!q}!pA7p;KR1Qj9N2m;3Weg@$L0I1&+n%1b$+66w5Y8hr#<~Wtz3=3uFmuwy0|uh zVEJ+@iaY}y|Mghij4?5^S3dFYBgOx}|0m0jcmGW$4j)n>s@%>Z<~Q%(Mj{Euo0KJ> zvnIn14-r|#0cd&RZgmxC&ey`)Zyl_1NAV$WGx>7#gmTyhQ}{j|=heFa=jl*->;uu7 z9-15Aq#*Cgpx-Ek^2<*^VmSFoUHBAWF|9J^r7q6=;k6hw*?qJDJ@;r)4AN701xQw> zZbp6JI*lXo-8DS z2;(kK0gCGU4Kj(-nP$|hY=WgR?Z#CdtWMnZ6zrC-&L+ISr{~SuV0fnxW*5l2{`PM% z*zM!YG_p8h=+NrYvFL-CMi4Aqj-`{+mrw;|nu23QO+(}v1joooJ*Gi(D9X`Qboz9m zwmT@M0CN6b#6js2n3OPB7IuCshH&B>11Tz;G9mkQ`%zTl!b;PA+PWjZ0GEwrLUuvc z9Qz6yFZCh7P8+`gP7=Hm+vemHND3^VQFkT)gnjZp;ih(f{2oe};dYA^7Giv@3!3xH zL@MmTB8 z5=EyAa%ab8M_LQ$ra1IKf=V<56+u;>0d7nFU|okgM+tAo0jY_xPWr_uyD1p{R^!|q zGDyL(ONvb92_M9)FSobQz&7Qt03zXaUF#^}w)8Y|_TBFaG)W>hf~x>?_IWGXAIF+& z6E~VWQu~B6(<&JvM_mY3!S(D0Nk0YAAvKLS65*LvSjcty=An%7Yv2JztnO+*4>v+y zXJ(D`X6a*=k+m{7hX6Cr$yBbrn2xXtM~lfm4Y`J>sAN*Bgqx_gy+}|2nRhPfXf?A6 z!P>v06AtwR!zO4GNJKl25p+9!x=JffDqRT3j_w-YzC%9NbZdZ{I2ZTGMurr>tE!|E zhkOz>7`NG3-=uPp)fBd-uycov3rx9RStxcDffhh;hLwA=h}*H;x81u+3OXH<#pK=g zOMZ0-&NKFDDbWeu6!+~E{5i7#)#60R3Vj~}X=ALNyy8Z@bnBs25G815am954cJ*LA zfr&1vN9NN+)eXkQZPyu8V#NR)`NoZCRhm%oek8#`W{pn^AeUnAO{%>pL9+m|M>#4G zh-+OdOI6&&=&?T4l*%!l7fXAAMcY%(X1B&X$`w%F44`FhElSCd# zQ6tjdSTrZM0M{y3!RUx*_N3cYMgiuh6;t|y1wWh@kIt_Eg+w?JKsIp` zEvL}|6$4hQ&_XqR$E-32o^>b|lc(vi_2Ex=;)ayl2CXf1$6WyI)1XR=eULfkXfU(j zLBdaIW67*PW$#k6d4MIvWHYo20gf@tLSAtrt@?Fxiqq=`h+%PWBX=0-(sirzD1i8K z`4EmuxE<}m-bUmX0-2|$R;>kCY&9*0*MVl3d)|E91QQW8lYFM22RhN-wO8!W_HirM zh>RX_0D{+*6*7o3D;AjRlSn51EcOhhxSjY-nXaAryJgay<6H5>*EB3!cb1_P`LPPo$hRxt84bP9nS?8rQ5-R^x2T${MbD3kUd=_#x>&~l*j zVQAk4!`fC%uloKjt_TfMKyFv4)PgOrma63h=nBHbA=}fPrXXG?2NJc;!;?<{?5)Qd z`LhrXou68t0>~V_6Q|}%U_#!ydy`x`B<82pN>o!w59jOlWw}VVbebSfGi6~rj{CMD z5mij8y13znOIyga4{-I+fM2E81mFIN*&hVQ#zU9dwIs`s%;@IH2YfS4k?gW)fQV)g zT<#gogt3n~6c{+ta!#(Z_?%Q&Z!UoGd-q*d<4d??vI zZJGkyi6x&|!YqtvegGy1 z2ir|r&98zM~!C{%Swmf_h~&%%^))1PEd*td@w4e~)}Lt9-3Gu18v#;C*C-oC_x;g* z#XYclU27&I+|E6R+{qabhIYeH))YX(`|A&VTxAB%bY`Qp^OqlM@gw(cq|e2TI7MJY znbW165(uUr>IsjE-S57NSgW|R7L6uO)u9f>spv=p(r!W^Tbw{r9c-r$I5F;Hm{1Va zbAtBme5wfzPDdkwQldr>oYM_WBqqSDyhfWiO*peAl!7lG(TmN2j*c}Jm6P1BMuv8D z8M}bnQ8TS4uL<7K5=#Z>4w#|f?tz9GiW& z%cdRdCd^CjDz@OoqAtB&R*MEukuD=yH3V_n6N*_~(-Q6kh`tf6S|)(g@NLGKJvrD< zL75wPyEFHnEGiw>yM5fMkVqImJbJ3BTM3fW;=nj^09u4RT`5ghA-IQUEcygP2n4D_ zBg=PI$-SLnjT|SX=SVw?UEHql>RIEsW8dC~YIg>>Ri$ENGqT5luK!GMt9Go%rH?6y zbe=JAnAwAZDnm|lkcGg}XvWsL?jjShYM2MAuKRVMLh`(-rHLZYg?asnZ2^>^IJSBm z8Y_b(Z;aj}5P%D|nzbd6rS6 zC*myqF$5%edl}jLGd=30wY(wF;$RUu_UlIu`;-DZ#)%}@!lrOd0A`5!vQ|4!A;3_2 zOm%9Ej(8-456;1k8hu8$I)g%d^wdOtk%Hf64GE>D;If;d2reBQLsd+Xa$k-ty!x3> z=JGg+h697&rCtN9J=+^9f)UF~W(1*>yg9r^Wh&d7!$UC;t^kSZ?02-YwPe5=lnTDV zP8fBl`KvgSf?pkIQ7rpmo`wUnLH1=73qaWKGE{GwuFExO6ktwyP?jdPV^8k(P^Y@d z)8qY~JpojwK7c}qWN%U=k3lYkZ|F+VAboi^+Vr8lxlvCIr%45pLmtefwvP>V4pd<$ z>BE&64V;3rURtkmsv)1IAZ#?#U!8-=S_|h4CtRHAw%bQ&6PQ)2O)s9$)3@94q|2qh z?a=w>-V2^izOOX$0OHhxV+GTF7Pkm+%OkstF7qV^z)CpsECZ-5>w3RFO-tLFcel_mW*HVbRSdXfMDPnJ4N5A&kdyrW?ZKj|a zlA&at*rHyv(6y->`T>=dBg>|?#$OAR^p$449L z0C8vYX6C4VYsadycczMU4Pq|rx1(*vxj#ep@s8~!kWzo9$Ctv2V(!;o62hie9)cs|#HUuE zz%CXslP?<5Htk9(B_O)ct*|mq%X*UzuL2^P^f1_xF?>w_p(;letRi|q*cxt2uQ@Cc4KLoN{m}#JY>9udO_yxASP5mL`Zjq2s;K>pWi~b-4yVl3`+HYaIFK! zeVi_CvcVul9H+4BC-g`IAdIf6Qyb!To56w8*ryzMHw1tFz83v6@W7o)(4h=ju+9y z@!zULvynoH&T%06YiQK#2ExLS6jPt8@2k=*f^xJ z8j#^0Qg=%^rW`}w7d?vPeFQ1=~KPvQw3#0kV(fc>&So2F_6`;@>vHW zlY4bEij#zVQ>_VrKaBEme*%%Tz!rX(M2q&=;d$f(E;|5o!hlJq!@7x}4N+L@K|tsC z)m-**6T1u#M2$NJlVjE=G>0A-$q)mzaR_qJ!wSuBO@D-Lp}4f)R#Q9%dN@hNLHQn` zOrVmRv!06kDS)(}i|uLp`qlEMP%tp7XS}YcZeW%uJWw0vRW6`dS!;0(ah@=Dj}Xo! z7>45ZIa$y37P5GtonDj;!QblyADu23cGmT%L=iVx*LnqMm+XU8a&(QG9IV8xnu>!- zCZw=rgc5)5bEw%LJ<-}o&F%y3-_~&xf!`Bv0C+c#PhwBgR(d3lCLWe1Pye9k1fv4B zfv}3b26T*@_%Of&=mMBq9v#>$AK=0BkVR zi@lK%&?37XFXxQGSp&qm3JHCZC{(Wf9cC)0P~eZwno=P z%jABL{=(nwKi9g?9N>gh#l+?%6ti+8UdPM*<|60pLur{X4}ti(cMzRGA^Gq6CU?<< zvtQS(IgO4=5L}sL_$w~DmK_J`xgWhT5V7yKC?QL@vkDMTMWAp8-oaGA8onCP(Pb3? z)kDe}hE}X`HWK0Kdf4D;LU69Au&Am9CPi%Q(puwogc}^ZX3HAgLn4n%muwCMnAI9q zF4Y_L{C!+9?K?1;;UW-<2Rf-Y!eI0u3#ubsL)_V1aIc+S0UJ5QmQF^Dahn-EO%)F^ zO(3CL4K7b{nvr`Ok78Igg<@@EMiAu#-I!jZnVQ*?dpR+Sz5trDdvSqfp0I5SUV;%y zyfl{k`w=e&Mx*P=8F}O-L>WJS(9c@m3;Wwo9iA z+7OVX%>Q(7BDuvw0bL+jT33tJ!|jA_P$T*vi%pk#P_TVDwi+9AAgB41!>_bSLkQN$ zrWV>p>1%G?BA;;zk6{5_ww^$6K6pgbwkS^_EmmV+zArL`>WJ^CqMid`ORL;*sB&hU zte0MZRm7E{IqtvliBF=w|L@-iJ{|ERUE<08R%no2RX`qKQ7ltB3n1cr<+HD#cijGJ zU6L&UEhR3ttM3L=4)iPqx(7IqGKEKrMm_iEj3i>TQU#LEvsNs))j+7}{xV9p2g~Lv zA`OsJxqqaYXfxr1JxvE%U^B&nDe4Bo@8P;eVJYadtu0#cZo=FSq}S}FPxmw(>nHES z5X3QcmmRSwb`B& zxW9FU`V4|0p4>vExUIxH@p?le`|!`bUf1-8+6Y%mZBL?k5sftv>2O*=iyi>8e}9i@ zC{pOA(D$Uo9*|VyecHa=k2Lc>#%&Hbq6Z#;ozE64a(p<1@YiBl^Ru6&4ueP(BZr7! zY_C;6(K7*A3%qcEIunddgDKGEz8&k#l+bg3PJX7}L8S?8Aeajf>99^lIhZG}U*_45 zOOQF%QA?Ei4;CD+zY{k2@(>81rR12O6u=U=4%-xQlbhRY*{OxL53L6J#7&CWhbLIY zfQ4Q4!vKqfr4tR-su0WpJIhpp8kn53ITUSq9Y`8CnbFhWG(gtOc4msV3B{uDBcM{P zKS304kzgBSN#4433*Fjtj|B%h{Vpg|SeUZvfyv!{-Tmw1raV7KH^(^14t0mI{{Y|! zujvB8(0*Qx7$v_oovs-J&D`c<(Pje4`KL`%gLVqRJ<`k3DV+J=m)U^2rIAC#Lyex5|?_SusCmPY!Dc8F%Q z2`1ZTY?Ny$v6Z}a!VRs0T@W5HTvT1_K!N!>p534N_IuP;AnfgU$k9(NO{#?x`#v6`YkOMle*2 zS_-ha^?>45PxxURU~Yh&KFh)J)NvCE>^(own6{O?Imp)4nm`*0%y2#eZ96C`pf~FR zETA$2h+aZE9n}ZHR^59fv@gdHh~?;j3=eR-ejd;u>}hRa=7{yuwIxem6(QJb=b#n_2_0}K|-`+m+i8^kTGtMVbvjOHjtaz)S zH6d97`!vI9!dm{CP6$2YCS`IIbE1wR8u2a!&H7? z{1V#6Nfy$}@2|)xxZu)()-tpFx!;XIvK*DiiI8{nVoL${qI2P9 zjz)|{NY+`r3{wKa2A)*+vi&&L6eoZxK!l-acB>w20A@30%hfnQ?xOw9YZ3C@|7y$EI@KTxkX9*MKIi6(Sf8AZer83?)srl2r!66NEoab_zm+V z1Fl~nohra_GF%|p8d&A8%g)x<&?L|;=QW~ffXoxs(l6;WA*J3-?U(8*H27N6ZfQGx z!tn|^846(sl9S|oEVv7H8tUhJ3BAsd;yy@vj?JE90P${X!C`>g(&J>j4tx$FxXIx9 zbu?fjD2}&@*}*Z$yxSxl6=VX*#dKBs45ta+*HX${+XS~o#^9A(YpClK zk`uTYPeTEb0*q>Wfquc3qISrzLZy)wG?l-wu5#h;F?|-j!5oN`p6;YaG%ukba}Ic^ z(&hf~LQa?u(%Nkvg2nobT|@;bIEZY$+iG+X0x_uLg5eT~)K7-$kIMjONS!{fxlyel zA-$uMZ=zw`h~c$&Q7_IcBYH47mPKmze)A66#YwCjX1I=G0ghF7kfY=@At4tIs``XU z3v7Mg<93Z`)cBsR2oXegyU6CZ2a0 z={9g$bJ1;4bs>-wswZ=8*lB^0G3=MD+Vh2qo> zj?_*AohMo0LBU{`ej4BJdcZvMCp3+5ZSo?0Qe*D`1%t`k$@{cImv-;To>}heArQ8c zP*)z{WY?nV0?8!nZ9+vOohhY^l^k?2^DNtwEb7{D1w>-;yucD}E1KPD1|ea9uL{u_ z!YCPIz#7#xxS3gjok20Ds~o4N!a6(|(N@kJguu>N8nFH6 zO9)hnRanmb-a;hta*FC<9ui!g{7D@D29Po_=}JZsBwcArUczlP&{(TEFGDa}tJnP= zQUwa>EZ?9x#_4!|o;`?a3aYkSooEe;bI^`un>yI4q78l$H&7aoNbr7`Baz;l5L^Op zXuQ}0!nZe;+5m4nF5WZC6yCwZ3GwJD33tIxh@+c6(A$Hg;{V5JrH|XWWS>=K`8W~O ztL z#r`~ftK+@eg_IlnwT ztJ7`;2n4@Diy6&V5$r64ISdL#35rU+?ktyaTNr=UJ*I907xkG^LN5+WODTqDXh4)| zqSUkJqH9pxXhlPSVyNanMdNsE46Pf`T&!d`yqV(RCPlP7EhtD|YyR}p0E(k-mUU7{ zhW*iEb)k@pw$7RLa61>9T14#w&5Yg=x0kon~ zC1ba_zu($W%XkTJ3Njt2wh*RszaKJ)lcjmw4zRw#i3mR`QGnC-(w90)R|Q#sGAoQ7RvH9I7Rv3d^@m};JVl`s9JKTa zuVFy|?Ss+a&OWuUh1-nXz^4sTDLDB+s<)R8^CJ5$$T`}M`Cbo9BotQkj@vBQOfFit z@6H1{#zFGM_E#LE7()5vC;lzk%B|g>G7md~>?*|`p+qx=06S-2(aQi60CJ*l{!q2? z6lk%tK(4h04H>2mv#{QL4z#qG-FT6928k&BEZzlx%)f9WG6y;NE!!2jgy1ZkFlw9!n+4rz*|AZgLJGvZ75x>#3m%v*Xf>z=$qBJODjk>8H&lH(hzeNU!5!3( z)B3r89rc4O=sGiEjY@0&RMs`ZY+)Vj(zK%?PXm}Gg*|1$Hz5$U9^GrDpr6=ft z-P!E`N%%4Lb_v@BIUcN{X|U9D=!56nJ0YFEVH{8X9_shUD0o!`z@DI+(eP(sS-8=N zbY#e>lo(=-fyirLXEQJrr9G+%B5daWz2I^lk#Av#`K8Yblv(WK(oXT zo+GIuQ1wE6RtdMI#%x)Ys0_h5!O|5CZRO>++8A0&kfYFO8;A<(do=B68f3n82SANq z-Tpe(LR<|H2|rLn+{Ep44q~3(O5ZM?|7t@a&nMrK#i33LZX0;r6uh3<_wPY*PPF8% zDxW@?{uAlsIAK;P;B5f5RPL*M43l@At4}B$OnhpRHAJMUrvOj4#L^_;x8p$gX~LZ9 zA^vH?NCWAdgPq<(9Xp-bTQ?{V_0Y}*7*XzVdqE0Mfj_T0bP2R%S+!sz2EG4b1jj6i zrvG_R=Ks`o=n7CQ4tDLe(^G^XasEY|5=*|^4%UW68G?(=3h$ixUGxexRFt0DSpuWJ zIQ3k`?aaKb3R43T`ASS;>HsS67wJYCINdh9Aj?gVxs=rzeb;i(+qbCPIL+{z*`^i* zk_*GYUEIWT&DZ~XMlwN19DSpGTNpX^S7EJufZI$mhCss!?#3Fa;Tf;0XtF;Bl3QQb zpkhtHZaOuYqf?E}9HR5{cRq)PL1y^@6FQV~p1#TK_UgdQ3?j0jHzdS`BiMNn&A=RF z2012)mK~A}46>E`BkSvh>8n7eR%tbxdcSSt~B zb}=o4jv=_G@zD*W(}6@`(et-moaSr!?lszPpo-f&gzDqAGU7gjhd9Un^i>wL2RJiL zus%v@AEpm%%Oi%=BZzKu-Z;{R#2D-Z-_|181W4{E#0y&+r%+&FOI2oy+ey@AlLci% zfb#?7KEs(cX0e%k;qPKSZ;re8iNB6`Y3;pklIiXeA9Tkl2G#i7f3_%E8j<8d6q)Bqs^8WbcH(>4giQ&S(aIrrC2K2{Lh`661D)|M8ZZ{oFUh?=9^Z z+{VF?PE z++bQujRrW@RrTOCAiVC&%t;+LSU3vC4VxR070!%S-kA`pnbaLMA9{SUFX=Fz@m2x~# zI5z$23W_vco7X53VBP6;fhowb*}B;|*z2O%{p7I8H%p<|%0&McU{@3N2@hDiL9dWP zR?;hO__*GY5Oae_kIq-e4fCMI2#bZKDI z4Q+tAI?%FU2Z#`~Wf_xE)-k$}Ncxvwqe;f8a;|Qpj~h1D;tkJZfc3l5+R_5j0D`lx zGk98T8A5SFv5qzZqA}*&%J3K@USsjhpZa`AwA-gpNXmz@rhu%d{F+W4oI5yjWJ#Pi zBWR7GSdlX067$OoNW>p!P3O2tc}tIWpjP{ANhP`W7fdrTVlItl6NL^h?!gqGz{~R} zwnYHZ@?sMDwEIW}qKw-d`0+)mdj*oqh<1;>BeDd|LgLm!SZu07A@;McYeZ5r;PRpx zVgO04w-99mH#uN`Rz;u*vS6_`jCO&jYgI!koW$lwSUU#Hj8#z$=q`w^2e3pBV_@6| zJHnf~40jA9hMhOhDpkOY3c? z4y?CCvOaNY?|M8`at_F5udI5|0%{00gJ^Dd_tY<6_zO>E=K#{u>J%Bf1eq5s)5Wwt z_fJAN+lU~K)A4M_#l-^Hxej#}lQ+kKka7v^xSk#PiJmePm-*XSt%ZP~I3UBI3O9_f z{iZ&kYQ(mH=3F%pDN!-PI(Qw3!d8}1EKV1aJ*;2JlppGa=477v=uHcP8Gpm}Bf&(+ zqkj)I3hNkBAC_ZH)bQ**KVH}~T%2+0r@;S_iu*B;sJGgf&eXJH047Jfau0DUM~9Jz z5y)KeNSe%ej~$fP*3dETjKd?1fKS0$InPe1PoX%Yda`RHR+-Kn!^wxL!nny3&Cocm zn*&LkJiSG+o*SL~37SF4APSU+E4hEVP~tNk%gh7e|5MsXE+ns~T0AHM$-qxtHizKC zrk##0nja!BLnLzcENNgX0CO?@4KxgrY&4aq;&z56iDS^?}$)AwNGH&9$LYx(xq{VCKe6wV_&Sq3` zS}K_DL!ZD*DqKcXk*-6r%o!49&!04)xqN&9V-U@k-*B>0qHaNfhYbxj+R1mBlfXJj z)Uof5%p`VkJNd1LB-R7MrQX>>my2*}n(d)EA(~7d#XCepAUxwo$L>QxDFCBHROyes zcrkHksqDkESnfE5;bra%v$>_s-tp_ow?$53thhNnxs_H+Ts+UG4;g3J@X zwx=n@CD=myMTD07<0uSgx_%yKU-J+ab%$XT9~7XFBv<*m>LiOmWW(_vKnY-;vndM7 zhLrAoXjB)r3@T7v=rF3Ga=!!t-GuepDsH0WycIoOEroo#6K#1N0;OlQPIK*m43vHg zeH&<~j;aAf_dxTCHg3{l=`P!L=|CXM^m;mR+XY#KtZtxdptHb6iJv{|JN(Wj=P=PZ zh^QZ`Um0XR8Y@wEjTpVEw2eS2G`l)fGPc(fVkC_Th$Jbv(Hflsts-9ZCWWcJDGRKX zQ-2NhEU6|?Ga%fr9zNHs^#Wx5=whSl%H00!*K3M>X{2)X1JwJ^B4H+OG4{#>oiO*K z5JdqD%WM1Ui;4i`{2H~fgp(vQ<5FdZB6(7)0GvJ(@7)73w*bbs)#Tstcj4)Cz?>Zz43yWCPM}JFUd-n?Zv**dTZFr^ygeRTGYU3nR zZ?`me?*Od8nja19LO?Ru1xA0k#f=;AJ}4_}+4lXh!|wj%8z>RCMR$Ei*A9nZGqZW)e=nOvH%WQ~2QN*(6Uc~JY5}k_d;j5V` zL4;|}snh^{>*OH?qc&@805VN(NI$4?o^_!3Nt<&c&Q zC4=GUdc5U<+v%;nMWdTOreHmZg^)HRE0_1!#cl@z>BXx4*J*r#6bjio?cpR|zXgZe z@oIKB7s@*UR9TJ0kk9!wia*O%%Qtp!&@qAS!Hrw`3*x!cY}i z$7<$43MErFowmEQC+ApV6p{Org_IwtlH`Fdz^fFl0>}yRtyX7>5S)40BhvND5)|{M zqcEr%OlJ3WMzDh08i|{baprQ#{u}CptGFp6Zft4*@<3-87Z~aecchVPBMXORU0P>= zBV~OtyGGxFX0Er9MjMo=HIBY_{P|uSGwCKzwi+mpJ+QN=g$d&CL$E3|0%pmDdJGBH z)c@%a&LD-*(f&9zHiSe@v_gZ90h!d;0AP$bwl8{9MJSnIW51vTh7X3^VezR9bUtWXb}+|B34nxLIOd5OmmHj8N#}$1 z?MJC0>RR%9OG>T-oi^{Z#A0;Ap_vci-86WIgEG`{fWf5=(r4;^QOwy`gm(;KpTLYq*JJ--KPAAK1la`4ZDd?-Zkk$mk#hP~5 zTew}lA4t6*^S~3ioP9^j4jX2*ySU9Uuh=8up8cNcPGTRo)8t~TRzQ#K`HHS53~)=s zf2nGYhErHteU+XOghkgGD+fe4hUUc1zN&tC0!ABH<6($!3Xs~KZfF)TwHMtSS~~|= zEUU`;4CrXs6w=CM7B*%Va{$pW#8fnwATqzWaRv{$?<|BPQRG1xM{&!q0CtvFXwAq* z5ke+CY|rZkcnJzhxp5Em;za0saef20JHuGP$*j;WIRs}Dsld1$)g#6dI#W`ELjJK- zNlJBqlYX0X5o*RESd9l{x(Tv8)r;CXNedcsxxr{jB5wno8P=W=TgTp1o>dj^F3|DG znoSQx+IN@lTu0LYr_Ji|2T17{0y(41qiKhH7LCHtU@u+-839P26G!Y5+^kQJ_n|5B(7*^QFaEIC^ul~0;dZBmzk)^30{rW zNuU$dMTpoB{O5~2vA!T_=OH)k_zP*N?agsW_O|i%Ua_U}BI8B%bJ>hN&MsINPP^J1DnAV9r zYJ`EAKf2e>PE}s`BZ@BX1E6f6+j+0o$8OanL@De81d{s~At`RTp`Sbe{&Z}Ns0*M> zIx*ZRWmC>mgy%l3rU0Lv-3-kU<%U{{T2?0`~@mLQPq`eRb6;wB~7T&baJBbsh) z_t!yLx?#4j0Y=y(CVvJb>+C3ylM4ght)O7c{n-=m_+1(uYRZm!NAVRm z_)tI|pyKCK2?Y@1;}9fO7`Jt$-D;RC`McY3{J3mDUqFl%fHTikELd4epWfa(QjMu5 zg7$l8aB7Kg&r|j~1gqtNR*)LND2_CCP28~fpcU7JS}9=cyS9OrAtwvt5J1NP`~Wcu z_SZT@1S35V65D;QO}M^+7x<4Ij;2gU(*__%%T2VH!4K_E>HIy^S+`< z&ICw{5iwX0h<%zqxuZMp=Rk_{XUpgmr)9Yk#$GSLIZMrh8uX-H7AhsO_byJlJ6n0y8{iz5orTqccZ^MIeIF>1Q-;Kk~Yv8?zco5@$V zi*!=H1;x^>sT8(>$YVv8-f6py;Pe4pbV1_c(@{Mpes>D0kbhOrwz1zCEJ z!t&j@effNcrHWZXHJZ5#kn?&iCeJ`;BF+_{YzQ#Pr9aiDW9wfNgkwMlzZVPuU7Uz$_L@jgdsvHADWPN*&R%e((uY`5;5!hU6qa7~C5RmWA z`p55~SObnV{2n?5kiC1hLNf)KtJsQ-#cD`!^`;L0&u}|~*JX6;0;r<(BQ37a4bFZ; zhgvQH5}KEQavv@-^N6F#s1(e1$d9ZP0L1@}%0aU&`^n4N{8iVh^lA167whKps%9?(%UYoN*NgNf|!^)>3O-7EL*qFdY) zl1=0>^d~F@9)?ZzkultCwblKM!IUtW?96NjPE1jVU3v`!cM8Q!c{~k-Z7#%LUtAkK zPa#%xQhNqOJ0Ji4$#+n3^21gKO7_<}J-axSa0$tZL1J+h`cFe-45MN^4I=qA+ifaM z1$*xCW(vF&K?|iGsqrq^8}`5ODRk~{K};RcI1s^Hm#%Rl%Xq&>g{TTfmMaQYvmak^ zi;=K(u+<#T)9yqyhURh(mvk*e(S&4P;tfcY4Z(t=Z$sZe#6mAlh3h20whYR3fs20V zAsN>&qX&(Wv*AbP`uhjOYoaft_p1C>y6oRxvI?mT)zK=tARB-WY7gb$u%~ zvL}$tCnr$Qw;^WwV`*?|$Oc06pyxp6k`ay0BhDberY@S0+y%(VO@CLX*5+V1-G|dl zJ2kn@eLt9xO+}pv(OyFylEkje3rz#uB!i85 zY#hVV&>ynRpkg_)ug;IhO5fODUisTUL#vG&S+Wemfq-baA-H?tWLNAfPNBf_fghwf z2RTO@Y(t`HFoOHstEd^Li&~g0u-%clzt%Y?@^G2Ho(|$N;|~^@ju)}EHC2X0zP5A{ zrGT5HIX|SyM-gP!G^>_?N>E%9vLhj72$>Lfv!1>J34O5qOB74#1KoKdhAITu*K;H_ zoR;rejbcy-I-?IyJ|Lrw7gbzSM8W z(TCWP=s|SRTBQ#3L1g~q+fp!YqPfmQ6%7MmPRmuXs0lQ4({E^MGfH?hD&!b!!O)9n zvNnN0#MgCc>NExBo*w063WkNZcQ!SHKL=ThD=n@99&Uz}ytC}GJY;?*lO)-t55PtJCWP&YL5ea8k!>kynK_Wlq{1B}QHPd=m> zPky}efnUph(Yd#AyWF&8_oD;MD$qLS(I|9XD3q~l=(LwSTPzVMralx>q2EK}cJFH+ zZ;(PRO!f?+nBg@=jM5<>v9mwWTEQ5wSUS-;%2>@n?cS)pKb=`I?*==i?xSdnIh(#%?KtO^kx6y9$+hhj@=~nHxdJmQ2R3-T_eFzNJQ_#3e zjYboJy>1N*n*LU1SzF0(%dZo)Ay}<7<-L>q@3WzT;<^yBD%jU4iXIqk@QT#yXf=7# zBf;cx`ud)&?F<}Xd6Q?v6x$F?4qsz@GQx?JwPi+a8ap7R`F7kmn}Ct!&wl1JRAjp! z>D}-tZrG)NkK_C2>9hL2uIbFcS;NRtG|mK3jUkcbyPrjsI4y7%U({t>CPHTECHKR~ z9m79lkvCYrdiy%n7+^VI@RIhs2zFdN)}a=#SeFMX5TZg$}fm+W-^$k(Sq*xU<5_!HrfzjRq|=NNvro=ARva znviVVp~}>SeSvFaoJ=FupB&#A>wmL2yU?oJah0CTh6E%U>RY< z0Cmj4=94a0TwVHOO4wE1kjnje#06ZI4b(i)pWlrG9tAL>VQFKrv4P5=A)1qqh^>@9 z-&Vg>20ArLhlVN@2#$|&8R3^wfW)1HL!KTOg%d?REK5}==sjrA8J67N{?dt z44`O#=}Racr;EWdU2q$e6_|L2q2qwJXjr;9iGY0n5|tT1jy5&n>*LM@wsoj$t;Y^| z#sNmO{L+`chH@~3VBXlLfHhPwf<$bw20gZ$JvbDN8{Qb%Ycci|WZq<{ff^0YIuds9 z)kvH}fyKvB3}yh*$wm`nnK!wBM7CvbXpYm7@!|{3!==5-{~-$gQ7D)pNW9^c2RbBo zw_&ybhS{refEqUus>+mblJ>RbHR^oXVZt#t{IKFigh2k_b zC_;+trO$PCh@wRK?598*&qG(oiJ*NDvM_*vlCtBm!{m((F3ogC5X}EuYBv6we&jGDFnp3rTN~}-y-(dqdy12`%0bWIt#VSV!;J&56D=ujW@2)(j21G5g+Im z442d~HEFqz7EEzyfI18$;zOJTJInMQMVu;?Tfc&Kaa&e5cuk7JScc&EnX=KBRublf z7WrHPJKMWDK2il*GweW8R>edu1-YZKONKWzZBTE!{~^`;3yIbg(zXj&jx*5YzI zeYmQ%?hepJXr+ZAGkWnZ6w1iEzw{-Bkv#ypVig+Y;wF;UU!%_2xk}X-;B*DWs5&D! zO2H74bBn!Sco~7SLaEG-lQ*Zo(D@{J^T;C=7f6yHf5ewl-0;HfQG|UCFsmmY*huBf zL097;Hr%!~r`f<+vi38FZdq<&079Ems23X1Vr8G@@7!N3xLe+ayL^H%Qz!tDBTa@% zZxKY!kg#X^B`^^^i}OY0^yO8plU0BgjP8_%LCK{QBF-XL6MRQg)*6tcXfj_z#C3a~ z>8Q*PQE>A7JHLQ-L6$HRJLwmUY93P(+775%fG|7hvwQbZZ9+B1$32iEp@k&peK48P zi!W%{fF*RYG5}bC*nC63K*{95^I?a)6RRd;fa7Gr@psUzJs-q5>{9@m-PCQLsXf+X z%zti=mLBh&;r84lGs(otU7RMqz{61=v{iEF#_U0329_8v!B%uU0B%zcKVE!u9TlKl zFd5-`SSVagKsn7dHiERaEUA9r2`2G5z;qj7+<&E-J) zzY!Qs#y-I~`90p1m4*q}A|y>cDyLD=DMaTM8{}voXuh^I3ZZVNNI11X0yD6)!n{BB z{4bzn(aLM`|sA#qfyRX3|*O-aJqWyB!0L+EJam064(Rm3puq$f?+d zS|3c#_f&I^{aI^E-T1(O(Ij6D0V)qnw2{!rUYyAVi~-0&$5k{pm>AOSpM1!Nr$95x z=sc`KO`({9XJHfg9E`M1-j}J$89?0VRMQ1+S0Rd)H(BP8U_%otG;j%WVcC0tkqBDs zNwBi8vz7-EGxjTptpMQDb=6+<2{IE`#FL*o!o;ftUAI@Q_klMfiqpF1EC zXOA$+)NJg&EgACa@o#E9unI(Stj7>t z4dC?kCVL3J4ry@`Mn_SZfl$aB{)!zmvfTtw1~y2mh12}dKt7OW+EC2z$=6Xkh}^)c z-%IF%ts72r>da6N0?ab;jb+=uArg;H?HHI1c>D?n>K!B+licjk&@jsJAMgt*@(*(v?ORd_b;R5(G$_2<0d=?hrXg!qXN+UpMBX!Sc-|F z{bW7+S@P$s=~4S}n;H5O)yE113r{DS_jy)?766*Fq*DR=8bntiSq2AKh@nh?VS`TN zP;<>XFePdLp$LV-S4P?`b*> z$n+d0RZhW{m5iX1?i2!Skg-^1JO^6#9Ak)Q4`+$OBqkcMixdZYliJ6eCqiv&SB2#g zf=iR0e$1Uj*0}AAs&Qvh@GH7tr^i|dvsYSsC>AYaLWTf39aMrwd|X77pvxfV;hBxx zRv-{*aHHT$;DxT^VlcY)H|*hOvxgc4CqszrGC>_0lJg>O17OZ#?+M}%wHvQYc8oozIf2a`Wp zxYZLeh2)LNFoaSBJJM@c(Y^zd+Z8P}m4QS|Swu1wgY~E#O8~f~mB|jyR3TU{2i)F| zK^zp7B-_xhgQ#>T?=ysI_+zeqsz$?O69VZl`6Xj502#Y=16AX65uW~IHMbqGhC#Y2 z(M^6dR1Bkg39rS=vwbjeYGV(r0xVpLjB4%x0*U<1PgA>b!Y4&f{muyFByIV741U=@ z$1{i%NG>sNsl=T6`yCoH&=drV_tmVO<94ca-FoYImcDGjV3FIf-ByBNj>A-Y8EBn<{62b3 z=-w(7I-xJUOI-$$1RIOA*|;s4S`5YN_QN&<8pZ7d4wUgGaFKD&RG`!3|47#l+WwT& zPUsZ3qxExEU9g$^4}Ko4CST0Yj%csJe30W5l54YC1F8=hnL=AwNn=_nBULu3dG6@A{u~FwXNGF)HDI{_ zI(9A{`a0Dd68T1Zp1NLw$=PkL*`V$JD&!syf8}xe+jvJxJ!}CIVL$%HPriz_0T!4p zFdS!>JLv*@rBW%|CFM)VGa>aolSyoQssSh^tLI>5ZdhPkS1 z1CsTJq=~+nFgB=Yu3P?|6I%psr|)%vbxTGGJ1P9*EnU6nf?akvBl?1VwFkjtn%I10 z{Q^e%5aEhfzme`Sz+63GgoQpR3y>(dCqE#@p~G6KBYSKevU5JRdnYbi;ifDxUHiWJ zoKt(9d_-DP+z8{BxF&v%)3U>~%8t$p%n}9R#vJMbiUq=La=4p=VdX_Eq~TU7f5GtU zcTn?xy(qv>wV;&;T9cpp6iQB>cjEW~&|+YqD|=pwy95!|?%qSyI2S^Q9uG}Ja(u6_ z0$-UYyqt=c2uH&hCr$} zc}R)@ODB1&Z>K5lLO^r3!Y*tNU|G4`_rdVUDUuyo#y`%&hWVF)0fSO}h5!;_wO2=A zM<7RYGt3K(6UGiFEKd@?#wZo-f)U%NKZ9~{lJ8hXKF95>uwbd;K7#f0af(8X6Z}q66ja18j)Cf- z!NvZO-cg>W7@md6;d8L6=l6J#n`!I}$P)R1E(JWq812(77UO80FQCIlX2^IBBF2*s zWIX@UFs zBr83YwS?Ohi}$ZtUyQ<6b%3p>RLmM~V$<{ z7YxyD3Vt&?<t>5-2yPExt6(cL6{ay3@WfM#z+8V02{bN3()fS;!i zR#sQ3#lWl`M0{{{0l~UIKHgL0T(P}c4w=QobfNb zg4RLEnInqSr@B98yc)+S8bA`F-5@UYJWU7o0mK%7?57pFHW;yAc^zfj3uV`18KMie z=-!r%I=XEL&UQSE*H8WjEau{W48aPYev>8{;SQi!*t{fx@P`T0$$>}d3r4|TRiijg zA?`8xLjPcLO1ne?rvS@kndf15v;{JS1pg1()N!0no|XQvD>1X@9X`FV^9tjOHdFb*9_Q@pt()~-q>RqgaTHBVy;+9@Cm^>B#O)a$%i!8xJg;&SWvG$ zc4Wn(g&RhfnF6;>CJ8i)+Z=4LZy>RDp}06{|EmYGVxQ{9WIqMxKx#bvbqvKtL6cUh zQuBiWD0xMVv0bcTTBC-hGm8_SVe#_FqIhK)<{o6kUP>_f<|1AspD0gmf@i_ zT7y93Te8!s`<->LGjzal3kjzI#WR9foA}5TyqO}pZj)`TM3CJHQf))9J~uQL>;Rn$ zQldC_lQ$<1(0+n6fM`F3K6rBSEowEGJho1LKvaViHU|Aepk*9ZNo8-70szq&%}(l% zO;8WRS~B^XK*$*CXdZJ4MoL#$H=W|72yCeBKTp9JFN0_Hi`#n_xGikj%>BK1R$vaz z*}kdC*Ci0fAIAf2x&Pj|h1@u&&YdQi!bcJW9SK*~U( z)hHgF11x}_niMQSBto^nwyIi9p=_Ze4rSCJx{kLN+fj8$E&-c5+KVO|P!IalBJtI+>T4wJ+M>eu^ILa`JfMpbRN8*zc|LN z?9)@}%=7?=a$cjw8`{~T)fwSL3M@bJ#TZ~IXq)FL3uJUl6Np*;{7dc%{3&XN;)3y% zkssepAy|pZt?T)D3dFXO77S+)oVA)p02e^Ab_;dkIS0Xpo?PwE%^UeKlnsy+W z({tk3Jfli<~7xCisso{g{?86jjj_G{$ zN=GbZohPb>#EZ%B460*qV^@mdE)B_rqYLE$=Rjpm7Je>qlM`NsW|WXS3y-*OmB%fq z$BB}HJ>F4iEaE0}FDTt5oaUOt52zgIns*~!u&ekhwu8~S-Ou%oLKU~`f$nN^DMhVT zEqzSaPBpHBEx5>?f;1e*->qLq?YNyZug;@(pmQCT1%4FIvpe|SI~2eUZflTXW;E^) zQuGhv!T25|H+PSSu@Cb742%wj9kd-KbnHN~JcING=@YepLy+^8T}{`pa|97#U)`bT zk8xW-wYYjVv0pTir?{P=JL{ZSp90M=uNmnI>iJK7tAj){u=%51-&p5`npi)%NYT_~ zRA`>Qs)xzlOE7uNF3;xvhe)K($t@PFsI++q7QuarLjeR!I;|zoB9IcnsvJeU1Rz^a zWD%f@+Z8}#L(Q}+4!X{JZm1sMtZ0sp466``my;e})?Bp)v{bxkP`9_vfl0;Qhlua3~_r$`jqo5wqSrFX*q^LxO>tr$wijbD732hV z>FjNyeG1L$Z|kMYbALdlr(Z|cAb97*CHsHd$oB#Qs=zcAxttrE9jnk^0*I%!C5tB1 z=>IYJKKXMP%HwttG5#n3Ef93CqFqe>aS5~pBn>)j+NUzW)rHl~r!h;dKtgRhEU89* zm(u44VSd}6*VeReR|7b$`+LvTk=7v)Uv?P&$GXVafaGGJlies9j7I9K=s9^EZ0Ytz z8w_gCh7a*E`P7SduoVI`GP!XT{4jnqtIN}l#aq2vnmOXR-Q$T81*jdQ~ zRW!F_L#qI?s2O`vJd5D09_?)(>KYIg+!2t|SeGmVvpT-NAJ&N~P!QUorYlP+Fejd= z;6PG4@Imof^1Lne)PX2(J@n8>e!S0%&VkbTo9G;;)w=wIwNRAq@7BV!R0rT3O#eu2 zNEb|sJG^L15+nGLttUV@U#&5I!--(lA0gSP-K;I6W(Ro(=gj2h7t3fEwsCCdfZX#kWL9IvwWYKcs$9tH~ z^fkKy%0df5mSxRA+R6U`OQOWyfnXVI^SBt=1(O1UUo8vtK&t&OR9pH6-bp9Dd{N0r9Xk0&+BeY?tckx9)|Hb z+!o2%d(ut;jNZS%5`Gbz$|Gcfoq_#h<*&!tjGEqb!Y&^?OV)( zV+VBTh&(JBrBA|o1^OIAL4vwJ$-yK_-NXcRLma9Tr|BE&ZHy_WP~6t*`OEB~iF1g~ zJgwNnFx4=FZG9iyu(Vf)`eiz zZZas=ptR>8YO*w#piDRj^+4BB)kGXav3PG?r!4pb^mxFz6$;qU-r32H5y&#&`8m(c z#*kR?1-!*65^_}qS?caJ!=fED zr_HhgY6g-CZk3Uv2EY+d-&cumrm(RM)B;A0&==41779fxJVcuNMax(*xb zxY0!QAgH>?cmUc4Who7m(lOYzm7{Prgf@T#XTHWi1Xx7e0*dokBPeA0wJ)IW^zn&sMYoKEVkSXG_cYxNp68Yeu_k;;zN3}neLl|#Ki7YTJG@H~g$h;+}K<{S*q zake@uR=NLu5#&ZNnFkYxEOdJMRe(UI^g;ky21swWQL>#^eh%&0sqKvlPNyh~o@f`G z=}zUK>X5&9g{qr;7$u@rpvA>VB217s9ANQ=VKW6dc;X3W3ksaQv8pZfHo)q@>5~FafrwSZy>t6exR2Dr&-@w3Y_wE&0kVSG zCQ!AU+nZRgq3%l%`C*-u>id6$a5U|s_5v&k-L@>)>&;lSECRBAwZn{zA-PuVN>H89 z`m=bfxC|kxH;GnoyN+rX#p^0d=>v|K>I7UB!eYp#JzA&``b(M1w5r z(-$}F$6bx@YZloATA|BqEr$J*R`zvv!LbeWGL+hTcGno)l12wI!hAq^@1{8DuUIZY zQ$0u|$v7E#_5sf3CgTYtcnrZ>yU77T^zP7mVNGucPTo$ zDQS{uaS}4DDV;(iaUBUn=>S)aC$Y!|B);+$mVsut%?1Z)X=^UPWNDAD<~Xg&>#WRO zf`||!qA;KPKNq>-0E))1c?d*#U(?$HZZVH72F6xJkjwJ}j_7HrtCT3LiN=+?G8FjR z(KAERI?y~l+*9AW1U4f&)m{aWR7*W=4R_Y^>D>+FQ+HTaSEJ1aS+iFck76_-kq-{R zQRZ6!PI6&h1oP)xFYko4p<7$L{HRjefnW*V(OO^^2#4Bnv#O0(>5BvR)AzS2=49zO zeIIvy2lfvuNO3E32!V`nGM3R+STi4`D74$idh!?o`No1GRU9`Gp`>Xvc?z<;UU0|` zb%V_;sZ-B^^Zl`%sYc^!@1og##?b8(XoiJg_eprb&7Tu*SAAKtrPrhLy*f z`;BpvGA9z)zeUwRr+oHht@@+x^f8lgc$sFnTbD2Er`Zo7}N6hzQ>mm?{-h`y5S*hp@74smJGjZy zo!s$kX`Qv@2!8cf>A!J0c6J^QTL`U_VmMSW>jJGECMb+gdtkJ4^5+EY<3`xpA4bXP zyKAzzHn89Gc*<><{2u5!!w87S%o2IKXD$x$fW@DRt{@jeNQ$kpLbB|?Am;v9^b3+<`c1wdNe!wi=al_*EsIhZSO zL^)Dxeb+$hpQ|ej7KPKav}aYSx4N0wxNM`ZC9u<+5m&8LwdI;+jZYAQN(NDUC|nou+3Hq6|NB%Po9R(0yakx+Z>b}JY@>;siH z2J?FjmTJ(sqmRz$=|6C^br-Y`Ca)SYG=I5aq)2988;y?k*x`@TK1@=htpzy_gqfk5 z*G++uK{i54M{{UtyUsG(g?!E9r{-cnvMLQS_C($1P(lscprk!lSGOz%+VN}i+M8f> zbi_ffTDn<6QOjT1#^ME2tmd@UM(RjJ=!RtNx^VLSw#_*f4=a-H0l#YjHjSC`Lm)i7 zRvqXgu&YUqYdXp;{^Ooj2N`nQ_9=W?RjLM} zp>k)gGiaAk)KyDqjH;qsK?p}DrFIR%k(=L||KuVlvGH~+;3km7xCXT?rX`dxT;4*p zHbd`8p=_*r=M^v6JHaSrAD6=9|F<+p|-5n!?ug} zIFM8PUo4l8r=XHJuUe9R4oN4k4{i99B-(B<+6W}k!`0Vp2GiPmfzC~R?Fpm-#7JfA zO@p?45my$<2C+MRUN$@Wi1o_JE-1=yd#qZ%2kt~>o>2N6&bJe-N$+EOfS2_gh1Ho~ z`zA#vnzKeUEF6AM)KqFNu=?gYEVTgQv^xcV36jM80>$>9y;`!n2BwiZ5_gY91f>|S zcNuRAcUsc9U8wD~r9S>aI~g>&j3A>oy}>QH6^MBGOx3bEP`TD7N`fDMohAiScpkCs zqlYLd$$?t)ew4U--)6!6FUOB_sCf#eVmWTyJ+}NDEwU|N1qS?|wK%onbHOrb8z5{% z>r`8neKVLc@69bN0|-gu)pqV^D~$19e3?@7sr8EST4CG)DxZxVTl2esl%&>MYmI$R zem*x{8}-9LD$D9ERt%Br-mzgC6;3%^edT%Ny36x8T9~qSR5Ok#gfQJMKby<wkAKy0jNz#mIXeHvya-t~;ETgz;QcD9J2C;mHV~ zsW;z^x2tIH0%4F39~FamsJAzWou2!T^&sU4_OK`HH=k2=9_0&d!nVO*_&M6{@!9d> z@+pk5kE*48E}YD^)dfWxzE~em)UM9tx?9vnfBrgy378bumExl{eK@VbaUs-fxD zK6UEnvKgx}?~Xpg5sIqu-Y%4kqrI@{!cEK#AdX(QL>-hKMgg^e+6)jJN z@scZgp97te%d7bxsSW>m`?;>tHAn-bKlvMF>B5zMTkay0fndcrz53&-0C1A3>p82| z4x*HherN|q=dq*zz(SjLeHWZ6WVyTqv!_qNq-?p?&4y7l9!E4^qk>pzrgKy{HoxDe zny1ox|8d(-p`$63w76{rxv3s0dZOI-slUhHzEE!-UFs8Yb4K%2z0y;Tm)=zjpqH75{Gcahsjq2O^mhv+ZiD zwfxGVQq+e423#dnUHu~{LjKW%-=gUb{ywQMQ+6OFovhvO=U_zgCO_qN+w+G1BKkI$ zr*9y?8@p?Hz5vQPoD^vN6H)SaZRDAA12({Hn}z+E&S?`c%B+ zDwVnZ!?&1~(k&q1qKvGT3NH2WCnsu(ccqU%V$_Rp4WSI~yvnmE{L`z=bx>a|KvmLu z@MC7GT^6u}68Rix7ZonUibBE+eI;P_{kr@5Zh$E@)!heVb>_TSYr-k;Yvxqc@DKst zL#jZnGCl%TiG!I5J`SSvZy!+wAnE-2Z>pltg`=8sw$D!ShX1m$^B4T7003S!uI2el z`*4ifYpAD7+XE=653Txa?IV0pmxo|&5GB0UtV9|+5HQeglinrFTs|_4*&`V7z4KFx zlEbK_=jK{78sNHXgR-MQc>iiq^Eg`AtBYmIbz&-?)L5Llf$-X9zYAblZh4H;HI(!Y zwd(_`I$S|ZdR$jp?|!XM;`MUcT?FyJ{S}H9j9R^oXCSczOP$m|P`YI(je)FI0<7}^ zcY&_gYxj~pXwIAWQL$Q=J5|HC7T6A>>R%p$$%{K3Sv-QJZsNpL9s^ZTYF*{2FyY60 zt!mM8gjlr6XSSBz3DG6?ZacHSF*q+_NniA3@tGt3BvwhturEC4~6S?hj z5kXO2sXNDS3Wt*;E)%2f9vd>Lc*}64@Oqia3W${n0=X-Q=h^eKwKH@d3KJi?I1pEB z`MGnueVCu;XRW+8w}?jwR6XAITpj}$;yq8*!eJ;23D1M;f~eN^ZTPQJ+YMs0uN#9# zs#&69DAI{pm%G3~VsOy>KC~5*ZFlY{z`nB{W=G*(tt^Tx&M5G`2bRnO%6SP&H zOK?|>JpGORD+tlX;Tn5h1F66`pwzyY*bnN!P1_v5*~gxp5PJ#iDjlPtTDY@F zALWBKyuJ&kUhdU1+=mFSrVR9(ud6>cp=>879wJ5jr>f~`7i2y{OA)MW))fJd5mJOB zZ2%uQZ^^0F@SlUpaK935_^;Q|tL6e5A=x`b!NN8JNtZ#@-rN8}N@pvGq__5XcCMC6 z2YYP14DW#D#51j(C4?f2ZvBtek-{jyDtZTHs9CEEZcH0S5xN$`@V1gXilQ2yuNL*V zu;iz*P&}27Rj#+rwpu)gki>uK7hQw0(8Kp@(s2n)zaM8)p1*C6_zHTsJmQ$gWOGbM z3Kw-+rd%c#QHbd$-&_3w^9EF|S%bi(K4$eoZRmy)k&R8(aAPH3W}JN&P+n7kA34GF z7im8PWw7{C>xFCm=O&4C_Ymgz{)_*gKh3%yQrMHV_xKo0DGtR2htVV9*~Ppn`SU*d z>h}!-HvBj1U(bJ_PHY4TdHh{9Vsrn~*>PeU0MjgcV)>kgsixRl5z{ZV(GX5cn}et_ zG?l8V07Qo6l2(%0g%Xa>w#67Q30u8vvpj4LBVy!8jh{w^iTM>%ucLyg!S#g2aY#a> z_{Y^)Oo5TOt0;Xpm!HYBjTRTcRO%CxXD4gch9I$-2<8eDK8?j&uXPPYdIycgoAb~j ztXNswya^;VJL@&OTS7>w_EzT0VDzBO=}K1m7-Jt8Xxs%Au^mgkFC63UHhm?yhL++S zXk&$k`P@Z@Y2!iMm==-vAc z22O1Ve04=Ka;o8x+$sD#pVhOf)jw|dZ(B~huW555kR;m;kDI}KJAJmfmkc08j&p60 zxV4Y4`VZO*U4wlDmprlL9k2w4E&P=pb_IICpiPYS^s#d=X&6p^r^er}6}C|XVz4+O zt)oEHsyek}LB~-@%yga7g`>2wI_PiuUIvaxazjH@p5QP4GP4$p`fPu8x1nA^OS^H{ zaOxU_?e_c&u`Gh?4N&=lH}krQDD(DC!jfNnK1puwfBZ9UP8kTYU{JMuD}?F(>5xGsp$kjJA=w(o1zcz^ zWX%Cd0j-3?BB_J!_@YKKgJq z2amxdZM0g1i;S8yNt+Q;J9HxE(KT!x zKug6h)oq2UQlAzc%;&lc&(zlX(hdX#s5x7^iFp^SeCso|J%Kessp~z5!Bjgw`t$6c zdXc2w##~2*Q#{69%M{1+!1xtnuW^|^VbptmIwi_BQq2tbF zewuFs#|=+g#^+XVv1MW3Ot)JsHUr_p+2`^w8py9!yk)l)ke#bNJvx!Q(xCGrj!#p1(x{)U?yGX62!!(Tw`y#bZ=1rH z-0}Wf)kA%o%Fohrw;Z*DVP5sCG0Q?9Ujt96m*9w0cFXN7VISkr>K)u%gR8Nbn5?17 zB7}9BwYlp}FzK^|TFzcV&^Ua@HrBFW>TZ?0?Ln^~MLEL`rqebowf1^0Vcze5-rrjH zHCXCZJs-4NWF8_&eZP`D5=`pn&v2@&w%{>RH8y$|gQ2HTDxbZEy3?{o0nZUhl0zv= z{~y*hQ}bF=G8}9~OL_Q6)Uxwt1RNf5Npa1w1_FtfYZ2{(l4~!34+=+;@4IEB>cS2b zrJY=V4{BFptITvRoQzx-S~HJf1a;0kVb~Z2lyh{9z6DcGo>(SnbZ{IgQgpSYDfbkd zoIN7cj9?B$Vw|u{{TF~_<@MjPt4sOS#6iAS0(e&7>KdT5iuXbNw%Egcb+P_UFsFQv*dZMC^b51Qkdo}db`%_p_}zV4PA0<$l*8{q zH68_`%MaQuI7eamI4(!e!o+bw!!;#{85Ol&&YOeMrg-bEyIghwEyA^lJ72?}ODN%E zyn28upq!fja$7Hl!xz{0T3uM|kb!9<(VKhFJA-o07$xTW-sm8x}DZMx?S155vlZYUTHTAQ(;>NyseoBiS$D{+A0`rxR|QC zkU@mVuMW!W=zr#lc)^HmS5G){a<*P8+!I1sWwxP)5#)QiiZLpj;?{;^-DX1nXvyHs zS1BLrthk!LsUGgFvHlzw(O36fSL-aGNPT9UL@o(O2ya-Mqs+ zIN8>G=q4l?eJRxo6G2-5YJLFAA?POF&4K~0OE zt66P_{vjL*pL7)*wjUu#ZLZCh9|IWCiPEsJl=qGueDe(|hSKyphcMqFZ}^{EEqd*L^q4X;S%6c3 zdMl$%94_@yI1QI_VRiQ7pP^d8uWD5qsYSGK+Ri26?PflBjXf_3rjor^o{q~PRxY%3 zSAY_FJp1k@=DJJ9eK5MKOZ2oMYX~XT>2~AHLzw#Z9yvg-!eL84R@bp;J0GKEl(zST z<-ZH3N@`F0L|!wrqiHzo?JXao4gX6k>5;vZESO&oRnNQ`>~e2y{y_ipzS?8onxFT2 z^JTVHdj?@58~i2(3wB$7e>b=2)`cCr65$PXZ;zmY*pQ^-gssrguy8nV13o1OHt%#_ z9Z$^JdJAMqILw`0{k!lSOvOlY@E_TQSE>M+YIwt(X3G2!{90e{UB;lr~i> zA$tPruE?4r4TEy+ZfY2Xl2NUoovWeIQKSe>i%k^AVFDei`R0^xl5Y0`&Vi}(XD8q7 z+~*b&eSa;JUJ}NdeoVJkg}j1*?FlXNHNhlhCxDW{Tgjj)f zw{}i#5v61OuT@)r6P74JfAwbi5}LB^YfJOXK!i3@RcHlNx?lK?RoT1wmW1*ObTu6J z(Za%!928i~2TqVubiya=p`b^?5yk7O+hZs;oC$0jc0EPJ$F#RLm@g1MPSn1|hX1X7 zZXLebi;Y1zIi%&?3?=GZjc5lz$$XIeZ~+FEiHTgKJeZHI=+&%r2b2PgngeQ>b|I9F z7;f#!$FgxYI&+fYp7d=)!DdvWNb=Ldsboii^`5X^2cvbAs&+LNXzkDx0+Ie|)%7{y z*uT*yjbVQwKfnC)cIp-;wDLhB>nr(X{IoIUHDRfa_5K2j`O%Zulq?*9IHbaQ!SH0w zz1iDxzOJ4iGFd@LH8EMh#9g@Cs%Xa_TRhrE#*h8cHphY#Wbc^1@?n0dBT*Fp2r3n{ zeV4}ofxc~8|5Px-t_x<2#-1mlJLsrl!~ecs;cKZ{d#NZJ(ZZOs2R_&g%kxC}j0^}% z_*I{76_nwb6SLGd2qYKVl~qZ1+><`K%bJt9Q|qcDy{5 z=@ufwnRaA*4akV)+t#*^w6=nhQIn{b7S)nRaEjyDV|D6}5yJ6ktpz;=Dd;z~anA+i z#gyK9gQowF^>~MCA$=p5biD?l`fP?%Vc$AoV$8$`P%!r^gD-1`w)WAzFNpI&IBe8b zk%H|&z{dFM-*01~T_7d3Nl?83D~CNHVTTWjJr^Gh=Od(7%gtsK#BjSMgjqwqQq3BV z!{JaB<9@DdMxkccnoxOuWwyM!Thq3Hk{PPChk7f5t9#A&E}@gquaVFd!P15cKQVX> zN`{&|-W!WB0oavc{5OFr-$&9W=A}TK@2jSA8BE){*CBI*o0|8po{AZoGC87lEYa3SH4}!m+0}>Tvi{ z;##K@cR9cD`?3K}!C+E;U~{|1?RQa<*!c@}*yTQ~ywkawd#(Sqj%Kp$9-nLu(2wAf zof7~LF4cz2V+4gBI!wue$;&$^ZQJ-c3P$T-ZVgj5{GY8*JQV12BhV~^IyVbfydQCh zx=OoXS|!T_obGFdVk;Up$^CU73>HlrMpJuD+3qffU&rH06YNEs=?KU)8fkP+s6( zJ=x9v$AhJ}C9qo>_#xY|=2}N4mQkb42fx#7#hi2{AI3k0b%Szk+gE>S?H|*2ewWRw z)rYQuDZy;pad-$T{cK$+@~HpKOG!-bF^og!(@IWHA!W6U+FY`pgY5pBzr?Kn?^bN9 zw4!YUlEGMe^kXv|^J)9QmF@tNbUT0Wtx)8|&823xgMA9GkC)HS4p>h657Mz<#-Dq? z>L_R%S%TeT6~j=}W?buZA&sMiCl%SU+fi7G`>Fx&BM*5GuiApCp28)l&93H9sCD0L zZ8k0d1orXE+vr<3`sk)&EcXgpiet=DOT^a@N^ZZG#1=ukj+L(qwf8A>8;%-_r9K6R z8LH=9Mo59m%dS3KK~aF0e}=*ZBj38Qygg)aA4wKe{`IfZc#rnKragzM#?}}?LXW^m z#xrT~V>sN~u&9Cc)5Mq7TstcUh85Q{&e!Eeo(L>fhK_t~!wZ64v)$&ACnUJBr}4Hr znd@UVp@n6mdEJy+vpK2kd-H1>wg@JReYJPSQU*}S|HOr2=*9kH+Y4se)l?v*RfQ`1)P20`hzck8ojw;e^gxy@+@KoTd;jT`sLbSE51c%QU}G`sS>-;g9!849Pc{F(5je$lRZeSUMo}^-tPYx2 zM7Ier*2g?$Q?E5$J*5p|iThYjDd*;D`i`ThuW#1KYeHCotD>mqKnmE6EobuUPkyT$ zE9L^m-@EtQU)wMbM3Q^!!0`eYmfM=DOfI6NP>vUhaS2u_e(C4Fw&Aj%{M%0V72#yr zxes4ONJ`~&P(!h6iSBgC*EU=i#%?XBavdGA zo3Ye?**5q;0qMzuQr4|ZuWS2Y87+LcGpHR~xs4+43?EhisSVY-4mQ?jhl_&imHD_C6T7j7-`@_U!F4o-crK@2FjSV)-!&hF5=&fj$wIhl>K+yUkE)sr!GOv8bCOpP@<2@kA^> z4;XGx=q{c7!`wj&Cg5<@8t9%1JBc>HX= zbh)MfX^=1nf?O(78?!G$6xtT$S{u9-p(}q`=QgmjqM51=gZWxEt18iUC~SMhDNp6? z=p!6B%DAD_7yiOE*V%c47AW!?ypRX56T`Oags)QfU4d(wZx{9CWx z6Xvj@xX%!j&bl==o&(9z15k?oC7jARTGPo5|MWWB-DR*ZAat$cxC=hXVS#~rO{$8In!gJsp#?^jc@2Z<k>JmvKWWZSI2@g)>&KKA?L3S|uF&D%RgBlRFBB>9JY^I|h=1jW|Xw z+*R0m2ZhffrA#1V%Z5&MBvV1H+I(%p4Ad!P`dr(Xo$HgRcDi%#nn$cE-4=fr0A^iY zR2KzPp{zt|x=Uu4(8Au{lIi7qYLmWZGJpc^pS3voE8|^m4DT^M7W&B(sig zx0BQ_AmN~Gt!xy|r|lX0P5J4qkumlyoEq<>5ueofm~26f{tpoiLx*K%JSpcFft06) zs>a_c^;R@eeaUX=Ho;`}EAg(dS(ZvK;-Y#m6YQ*mphd?!L#Zw)tC zdk`nEGTYrjZ^U0@50osvT~%%fOiI=4%4ZlM9KO~+iZ+5KZ%2Pc7|j>uk-hf&Wzu5^ zUH4d5%aQ-lM4=Ehq2{v35M(-5>zqvFIH+8$Gnf!gMLOt=db?m{3W-2ov6ww0m^lpx z%69KZy9H$qIayjOY6GMBd{cLDF9;^Z*>l|8>TaKlNHXLm#KeWu%#=5Vo9xQ&FC#__ z4x?4Obp=6@-DjX7zuJG2o5kAv$Vvg(eFEcsY_BG3SvbluBkV5uy4}am(Tf#W#wv0RW9~q` zd8u$Z^`6q6F;IE#^^{uLtLwDxql5=eDGK)~9Pzy24htGyLr|Vm&2jQGK#p}(S^5w2 zZ8vFS^T8^^w=dLU(Ic34>k$4If?;ldcj4<}lw{-xZgt^L`Y7f1T&j8+id~@j8jTb6 zGZf6M{JGNCI7t|0&M}J)+01I040sOn2=mvOx$q;5QYsziuG0r>xRQjdjo6`?CPm=gso0( zPmGf@({m-t9bjs|8D<@_(gGUU?d(5Pds&5icl9JOPc?7d-AL-q+44r)BOF1Wn9Kls zsQN=9SpC{40`A$oH%?hS*lKj$QF65V?T@@=@{gE%AeA4k zG39YkL9@eKJH5K~#K3%RA_DRHLFsoUv8y##w03jR98!c{#h-_8W*&ASi~9@t`s4D^ zzbK5$_4`LJ1xoC_TBW%RM&t(;xHHzO(iNmAX|TP?eYKCGY5g#kjucVoVQUz2y^oWX z8(c5;ao*s#NBK8Gc#xN|&3$enq!0&4^A<#Wbr4hE4s>Q+N9;+3+2?!W@=$I0nl6Xn zx`cPr(cDHzb<*wW=s|Y%s^A?#O&o#$UiFgN75U%oqfgh;|2>e%xk@m1VFlE1anP>z z`qL1IO&eJY>T5m0H6pc~_ZdQ1Hda_RyQ9>HXsNE#CEMb%6#6-GDxU}HKB#s6M=0TZ zf6Y3-0LcQIhD(nD#du9?_axCSbi15SK?0o5J)h5@;{PA0tIrdD<=;~Qz7&?&sdkCO zhJSWF>w5m9n0Wz3@_$xmyHPM)@YrSYU@F1~wr!Ywe%85e>-y99(^J5DP7P!*U; zf74`t7{aXu9o@Kar%xVeZIc%zEr?x%^6?+*33bp>36COVy3z){8pE0#xRA5yHn@domDds?#9X^a84L6wdrF@s;QetP;5@Z z8jS@Y263t_qg{lP|ADhLU&rdDKFa=fJ;-G^#Wnn$siFTBl&EW}#$Z>$Fu2~^N#$Cf z3T?~ydcHAzYpvuWNJBqwK5`?$-~5=>CwTE9a~JGNT_zu`FCj+G`!2K&AptX^;jktmq?vGTzaVLkE=ZK;>N2c*!cw2T&>k+j4<+@p zr7Je~Y2wb-=ypvQYt8dxW$lN0bA)8$(qSf_xCalZaGwiD=SNXGfDkAh-pi}HPhJ+C7;*Y9@mZ@*IN|K z{VUAviOs{Q0Ch=doXB>gNztA0b-Z~Gq`V`9OOuQ}>M`Kb4C$C;ax_5k0qXgjYv#*)c4O6Kz~~^{XG7)9AIY z@Qv!9uEWWudXREwUqp%KI8M|q%?*TgE~)z_h>e$jy%yetWu<{-r%UbRwy-qMT5PYjW(BO;;2}{Q+=0sIJfT@*F=eRB9KSRXJ zWKD>l3oGRL_C&y!fV|A(>)q=O8{1F0IP3)=8QX{;yN$3kY%DQk;gnZ-tg)7PZbl<& zS9-SY-7qE(?qkS;k^0+|Hw2|F{>f!K zK*Rlyb@FlqlI2S;mviSR5FST!@M0`+IXl);;!!A)YNLa;HFXS4;_94ZYHS)zp=zb8 z`F2hqCDB*8eh}^yf-K$iO@lM}UIjT)Gr_qYw!1Os19NQ*`_7jhp>RO{B0%UaCTH5h z$>TkDiqgnsgw8}Ka~aDOlniaUYin9%x{69@zxHdoX~D3b!Mg%qN0P<8zd^5y!ja-= ztv%n!Pv<6$-fs#=E)#WnwJlfQLQ}{Gmuu5)DUnZ}QTk7WqqYym&nxjV9MO44Q{7nnL6mfx?j2C`P?4P{-f#oAGo!#G)#&w+I@yDH4+96|nk(yK-KB5_Z(mEOky4U`YX zxg7F%g3?vkHt~F#7*vn{_Oj-)o-$cG=``>hg?>i&>#_x-AE$F20WTZ>7uE}SpiKS+ zP~^IEe4`-7_^DO@z6nas#)AI=^M<>M?0}5k)E8Uw1#b2-i-G=&de4A`2iLKg)tcpO z?NcBqbNEumHZ-;6?OL20?7x4rO>?&Ax5L%A>=34w6WwfXCm5j~s4=&W8X?yE6N~Y? zA-r>&)zIQ|Pkud;k;@R2dd_>BuL1K$!NvR4tBxdm@W>e37ABlGMHv%J2B&Lh97-)| zSJfQ@)4-j#^xzah3Gg@~CTf-NRE>ltf~~*H2cAkA8S*h|Fhz1)u9jrx5}$B~=jR#I zJdDZr>hggFVKwaBc=dM|fzq2dz)0c})D4{sdb-!fFDJe$m1KM+z}~hN@G1~tn7g$b zZLXon$8BqjSy(yWt}BRWa1l&(BDrjI-N^TCbFa3vSvE>+!W^W$gxZMx7OGUwt^Pfx zEgV_yCD~7)RJ1pmpT{z+0N?njKm9mjq{Hc{q`QI;4NO%VcqeeQrly2&7fh+nO_mcV zJ5L18Zf&jfKA4iss3ooc`V=V@eAHY=he`))Xvww>aTz=U*4b{YIzEJwjUB*N4?jmq zVO*DjuSbbrkA;i>MNpmPg&srI+%I}J6{-0-Jb^?k|I=mkM}nLnX#1f+Cu>;N+tR-l@ss7jk5=^Dgz-t=rd zUos@R8p!YJq$^!dp04r6i%1kPGJfVnje56&(q5?QvQ4-vlN;m^2J@qpb3<`q9C?&V zd3FF1tp^dx!V%=t79YUfpnSgmbzSft;b_j|;NG&RHh2gvia0-Bdp5&x zc&Yh>G4aS(AN}#V8~A7<4O#WGHsY%>1i7589_y$uhTm@Yj2#0~{Z3VrdmIe^4=z_f z!`e}zA?nF}>QhKq*y|dpoNb>$2#lJUxx8}q*k}-`3e{tZJVBFFSK>LtBEAt_CU!sFzn1td8Vgr zm|X8aG@R*Qsaix;i7w=x?Hdq5Wl(<;NORPUd#g~l5W^i)vm~|D<9*JqYZ72$cWa5h zURc_0@hro>4QY}8%+F~71Um`qx?3Z2C*L(6$h*S0D4$E#4@CQ?v_M3CKfhWWuHifo zc81&j@ERrzt=ntJ%{AFJB6i)hW-6Dya?|f#R4<>wfr5ST-RhW0#bc$!$gmzh(f+xdkSdcD_kaPSSp9yEXvA zgR{{Z0O451yS0V4Ek7MSG{XFa-6#63|IeSczz?avZ%?0VZ=UWzk+qjEoT`!P&fr{| z1@D68Xz5y0Agp8`wKi`LknCTtiarFUOrzF;*v>FQ7_XjaB&gdHAB7}3QO-|e!cm{+ zLb4Tj6e&sXE60IjV5D=z3l7?V9*<1d{gV@L3N>nBt{VKQB#^6%^!R~wsIjCy*MEPw z?O)8p;lO5~Hbg~TKuYD)9P0+F{k@2$gl$OGrsbCqlwzuC+-2eLIN?$!b`KL(F2J}7 z5XG6Cpu3jOKB(#4b>WnBZ<*s_550(C7#~O%%?Ifwm^_?#lE*Dr9@arw?^5DE_-)&e zKV;ZoSL17H+Ko<@QKKX;6jsM|8zD(*V~BPIR-{wqqkl(O^84E&)ZPBe!+9|LUQc+l zJPjCnPZ)LU88Lr~pc3ylFI^K%Hd*retdBeE6g}=AB1DC=r%ttfpwIg#ZpE(^*hdJ_ zl8$V;J;U@x|5f=jR|D}Fg{135J(iw8q&s~odznu`1v+=3O7{#PiZ}pQt$L0S9^Krc z4t)t@|LsHUd(%31?MWN_zJLpq%|)rwd1*s*Y_U zFdFdScOU#V69-ZpD>C@h#iK+d<*#fxnw29ZvHi4tJl{KkRt65F+S)Uz)rlztd|c?x z-_GRsGSzId%ps&;1_0vE!(rg0*{?RbPt@spAMu`WcyI14#oGNu-?u(0_%zW^uKs|@3#Y0au0iN$Uww`B z=3oC1iNa6L%$76O=Rr7`uygS!U$@h6Kh&QG`f+aBTI1uyZ8z|-awtMG4XQ^fRHmni z;e%RLdskfvxqj2jgU0ou2ymn80j3C$ctS=D3K0l zhFSBhE2*j2BZp&wwc?}{KyYj%wzqJ4tg<;<8W4M z`D^^cTBvB-VHSud{33@shNlKk1ySdqwXhi|6626jdYMDO*&DC1bV0m$-9i7k5c=-j zY`dek=J^uP*O+>F4QrRtB9sI5c+i!=bKN%H)j*S4S2ZwdFdQ7t>d|$Cl=^U67h8lQ zQ)^$%jBg|c=kc0YF>5$HXnrO=z^wbfg(lgykikz|r;ZT&Hpi{);;I9mAjS}n)ZK%% zPrckH_MkjN|50iAHX0U;#w?PzK4b+YOpdkpQ|`cu>^8M_pWIzAh2j}d?+x7RwHv~t80oj{7dWd zvpbOK%nL}7%=mldVz3d8+|SmX&1aft>?TB2i@3178Hj>iq1&XprBA~#LNNwl%oyP4 z&xP09X{~zF+lq*J$A?>r+Y}F` z;fPm8S$V$V=)0Yb1F7Bb*NYEBV5(5vFh5-d8vZIqIS-D2;do;8`zJe&M-fwKkN>qc zVXXgZvM%j73Wo3F-(c7Eyc}6Q42jTM-&DRr^FMr2X?9d*|Gv=F2iK{;IEgj%M}3I-AVDQ`DuEx zuCKh7pUlY%U56q7$4pv%TufwRWX;))M4ohA3yI!@!okR--r!b#w3=M^^(_TCCRn=q z1WLN@lxrKD%P2};%aOMQOG-b$z)JVo36oOYb&wHNS<&6Zd$%pZb-%1G)i~~cemPmO zJ{3+@^|D{JCu=C-bEqbApMeoJqqEmnQ68cZ(4@5}wdZq)?98>bV~+wf(-iszkb1S> zPQz?XqaPzBzx83(6BPMbVrlD(Pr=lg14B$0qE@oiQe*dd|G617k3+_s7+Qe&wwAKt zUtX^W?{d4-NyEvIQ&yYIZA3|lwBCPE?+WUx(a85S;cpg9yJxNSeMQ>R6UN_fH(?GS zbj<_4m@}MWa+}WsWLuFa=|VjixJ{UR%t^F8gMiXDk4xp*4yLsAuK13`bR0>+b^^&| zqHWFZg1cNi!_tO!yZb1oJQBmK;aI6>DHhCnsum6*D%!LG{jeZTvWq+d$oi!9WPFW6 zN!gZdg||yD%fQ( z`JArRAhq`j3f;7B_-euzYE$K!a97?xtc|eia1v_kD~o}8-e#jWz*G~L9oKZ~W=~+> zrNCPVN$=c+6S!YmPaxNCf71V)3rv=S&0#D|(A#a8v>c|;UabIl{&?FqX5N1|(hjcO zg~Q2tQ(0C1UQaMJt`&~^2nKM&|L=vePy1*KXJ)F4T0@Y@P+h3@nXtmQtMH}p5X6k7 zH}O8tZ`5k~%H;Fe-Z$9m7l~J=Uf4RIPHDsIQnGf-<`bmU_cyhAPoZkl_cL@s`Prjs zL$c>!6~4LYe+i_3-GgEqHm_G!tEi`}hcI&_3D=2Ky4VOs^!q$A&b;A_VBJkE{&c&B zdoyZPxEjE;*Ne9xMB_tsqHrLv7Q)JqUIf=o#MGijlv|OK&q-TtOdVFB*Zee?;Gunt zypQD}Oci4XoCYYf-Kvy3(IS}B_}>L#> zI-FlUjHTNaj^xg09950c{J5{0{;`Co@BIz7EnMQOi3H1r6k|%HefO=4Mcz^<`r}f#Av%#V!r>$jdx5Lj7HjFhHlJx5mj#GwV$QYO9&|* ziF5p6&nP%u{r*e|{0bz8>C+~OSA~=Lk#bbL28Q#x$i$b|5pa5>-Nm{HNT#-M#JvHf zP{Wh8EpjuzVr{~Qw}SNJre7TFPF>()9{<`JCz^ujw_fe`8ZAIo&Kq+2x_Cxc(mdMho zS#8y-&rp*6>lbYGv*Ub?b$&#j1F6uCvSd;F5mL!>QVsqhasTAfjaS$*pwx8%Lv`a% z`cECPFlwjKr$|+W>~!gBo((^q$nrsaT2<*v*h=7q z9@n6QNyF*eUM*J*OEgNn2{E~ho2rVn83F$X77UcO2&Z^%c*r`y08-cWo4P${2mf9~ zqmkz8Rtt+;;Z&oys}P*Q4H<)Vz`VWSx%e)Du^co)h z%dP(Bwz|CpDuDDJT-OHlKOU)8cNxU~`?c*MO@EU?~(qdG3V?(z`(Q>17DsdZCpD2|?#M~u;Jok;nkx(xR-WE`8h zlmZ??Sp2x|j{ICW;;(m9rTGXUvO8R(t}lAX=(GeN{$7{L6Tzm_aP+ zapt*TSoUyFPW^s~6bX#E7?b7yE9>~}gL_|P`LOKkI`#7s+X%{U_yBVkOk&+d@(dm+ z)t;W_XI+}j@X?~2a>W^d6z{?B820{g`C`2o!rR;v_I-GenQTRfnK&Z3{yOG1#3Wo} z44tQlgJ?Lj45`xS*TJw84G9## zgAc*7u@Bvj@D2y6A+MLu-UygnnIew^P1b0iz}oj}HZuk*^7_5Gy7&H@{p+uPA48If z3erxY9S@YrNDbX40^4el8Zp&>zt?hp^QM|Xije13`8h~gYdw{92TZr~9}9_hAU9=R zgi>@n;=a2Cr!a0>%TD)Yq^>&VUgIkWsp98rfr|jIqL8wlhoSesR>M=SW7kn|R#$t| z!XlL9IG0w7aRULXW6kmFCO`^`t$$?YiCnEh#;Z#R$-BKtN7kS8U;O!c32GTkfh<^R zMsD{%yJ4NRgECwDajE4FF!-l_R%ZYI|6dzCefB@8A{-iT`^oo`2tPR1avw-}Y{8u6 zz56LbM0kF#dt_n_DY_YBzn?*2F}|1&VbWWzt(4FEkE@oq&8A045y|_jtmTWqI^SHz z^Ee+LuDskG}!5Oc+_oPt8eQL?@5) zANp>qFk!aFqB~N*4XLYsmPl#?dN7eFk^s6*{E+EE~-(-Qo!PEs7}Kli$;=*MAFe${$2G?9T zE`SB*0ClMP=(5*&u&aSwa&-7`G+9$Csb1_Oj5TMAOE9i##i=a<8}_55psvWQQ)9lm zf|i?%?h(6&lDwSashf*5y-O!52L~GhK7($8c(lTUeXWpY)XRb2?ArM`? zUQ;9g=b#+hrl;nh|Niyv`pyko*1xZf4&r+O!S~)qZB=|@|NHRxg_;#^LQtLy&8cm3 zA}e`2G_VDfnUf;5crgH#6vh?x#YALVZ8>dgernH$Y!gm>`|5hz!9vgPoJ1Ecj zsRBEINNsMs8j_vC{B<)6*wz2O_TB0WcK6r-S(VrWD`0y=z~@6Cp6l8Qr5o-eYi+E& zjU;j#;^}in!LEhs9!DHY1e~5P=^q7gnq{bCKsYsql=yL&Ku(uG*o1IWvMfH+9unwd zyFy0nnZ)L%!ODdtn-^>61KyiFE!F~%DayX-+P-qDkiPOFYH~h1S&uYbf>TT8##euU za2cVhMGlHyfnsjn&|}((U3-?b53J`$h;g)6-{II|2{SrK^d0+Ufa~T(f>R> zrDop@tOlhnfVu^y@NELcFqTk==9L;Hf6{;3F7RIt)X207c^kxeExnWXN)T6q8jjw9 z!l!p8Q`7F|Ys0J7INSr}b7s~m#{GnK{krxea`xz_6J7fFCqtkfPSx*9WDH{d)0yOrcxdL3z9^3y&I zYZs=ks{OM*o*mY8pNQv4t8S00HTy3?rE3m38@{%#@|vY$;speyc<`6YC^iDoz`ORL zYGk_!MUmavSyf;&WGYY(5^d?D*16Q0`VXK;=}^1w?nOY=(x!F}rCSjsH2dzmHILs0 zMH=mOr&>E7M5?skVc3GHc>AiA*pXlU>?^EVF!lPuU;b)ydRI@aLpi+tXRD3djhfm{ zmlU!G#^03NtJrq{A(H(tko;PYH3EjY3ujNCZC9p_Ci>p?(A5|av)ShezVDAB5b)lS z5xuu?6sPuSUt8@wj+hKi)ZBIg3}5^SU1b~oJ&0%0BCilhF{K^T9v9a)|4 zznyAzVjp+u+us#D`-~&tFfv}%CCaGjDMt! z%N9GTGVU);Ul)!5IAPZ+*&;#;^if+Dx&hrrjkQljq9 zV@{tyNoaq&?Qj{!ypHuRmHT?_g51sr-NDNhh-~UA7>1so4#9A&F@v&RSoG~-n%2|o!sKQ&pk;VIN*W6Z(CVJYRQyXT2&_*&z?FQKjw zyh7e>>optxwHDnY2Y-ci1M(ToVWEvs*r?Sy?dv84eyKEniZeqsjSje!v*_j|Vl9!? zZvj$!hsv#H0E}QxPUoG<7g18hPS*Wf)h=yC!r9PIzh1HpE!lS^9Yjb?>+ioO*6pat z?w|bOE36+>1U$aPZ)7JF^ZKA&g51n&7a|E6T@u%BAUA*XCk|^^wt{D9IVs|QjJ ze$Ol(RF3IdYrm3sXI!_+*b|S}WLv?$1}bB>a<#(L^ScrquCp17`PS|KD(a2?x7X?b z{Y|hlnA)$Qid#M9y~&B$s`pDM%AclD_MgD?@UzDZ*!#$J^uJVgJCWthDO+t{fun$G zm3@5&K_T5u(w0!}f(pqY*1%mj%y^Wvjp6R2s7Gh2P1dtRQH1^Hzs}n8Yuzs|;m@E* zuifVQ5KLm_WKjLr=O|RdGnO140r;|W(|MDAfgr0BnXx^FBsTr->B*X!Jppmav4yGU zCnapB{hsB!@poG{^gPIqCq(@c3d8#*YXx}2zrN1m>&4IuU}RHHV6xeWkoc!sOS%b8 zO`E8v*s}$?84)jgYu36&7}GU^hX(S?nT}_sD_6W6T*y+fp63Z55`B zR1&O9c!z{jOuhv&ABLl$HxvnPFY;oRz-YSwOrq?bIqKH^?NB9=O) zQ=YY-fTVDuE+k(Tri1BoWqP+0PhB{8R)ezwY9Kyf(su+SEFJn^)W7GmbIrl;9?2`wWPNa#uw88az+rz2!{(rEszzZ&&(k z7-$KdtP1jiuq;=<-*y5v26a1|n}SI0*l+5=p{m%9RI+VAl8&4yutE&Lvceac*~_*|Aku_5ewx-2ci9hfq5CPu2p+FkIYLZRX`W(tpq#qDXACr_7$r8$V+x zsqyUzU#5*f5v<%Yp=^-&C@R#0K|U?`W@&O(7`_(iqjx^zmP> zx;EFtw`vG7-+#{EJ01T5Qdi0;$1zmri~TpN-#7EW6zKd>J6&K4`aKCR3Mc1kA*x%xfkI0zAYt}56ZLdWly3>AW}fGn zIn{hLmJ)e1gN*rx|t{{o(Z|iva7m%7=#`8GwWZfRqeFB#GxTK`{x;%xYD~ZB&)r21Qx>Yqm28;_o|OXV!w$u=-=B zw?i0zYN%#XmOJxR>y7m7yFl!6k&@!>{HDZb>MpN6`OOxRVf9cz?Xt6HpctVh?c_*) z$~oClfMhu=@I$JJ$QVK=Sxc8jE=)Neyzy3>ZUd1^=NRvs<7g3Txj3?BSb;pSVZbe% zVxOrKbTeR*x89aIHHU<|@}QF0Jd}Dk{{eobzR;&|sIIfT*hg?NM|%kGQX*(v+i>YJ zh?|-jwK{tR#?AQ2@|Y3M1c>ucyHvPZhHFWHAP-l|c^wQ3BWBMvDp^EH0Y5N9W6*FF z@LdJGDV#d5D%N!J76Kh%hr*T;^TV1Ge3I~?inJ_D{k1o6TQEG;DqOkMlx?q|Cd*T^ z<%Dym$9(Uwa2Ljc<3#LOxN`h%Jz;!5V8(^iY#1P0C40*^Yfy5+YQ2QiS=j5QqGnu(dT_arQkA5Q#ki!`Z4*wgc8YMcEi6qd>~b=R;mv}4o9g|U!9 z@&q8+oaJNTpgjF{hG=G>tOk6*!rJR1bpT*4kyOM{NzX%)8{IoL%)Wu-Tjzrrcz%4p z^}&|_C2Yfpwk&ozv1j)9e^cjL& z1glE4DtrS7Lv^bJ3x^}rTJF1*s75K$T>_$j@!8csMEC?DCT2gaKMuK!+8KrUm#IHP zx&76r#w>)gf>3q-7Z|%>xSRTr@a}>bn3?^sjuG6;hgnRl$=&@B6wc6P0gIm|ew7wl zgTfNO<9NteXZQ>?jhrEPd+qZf3K`5cU);}uh}E*ieDk_}grq*a{IUM}3*j(0T5ZN- zFjX7}PLj2$+>^wA=~sS+#V5X7_?lDLv;6c?J%RE(;qfy@1Yagv`KXs`!G?cpT@mlJ zeccz}vZ@sZvr#zwKX3~}Eii6EDgHUn6PaWQn_IK%BuU% z->cT@8kh>|YJN*@y7B8sNpbJYiCT$RLk~z?~)h?i4tqKU9-8`al;r2SpPGf=Mo4~n3}^)=ly)TjHmmM`o|>%-k?z7t zalf9n20!;ux)MyZjoteQ;W%ee@}AYFJ=MFA)p)cXXblzTR*im-c>_3iV-w zTF;YA`T0ra%OC^W>d7{|xUNAD!R6M07m$*V2?vF3gq7mG>g6{HcU8)BH)$X?qs54h zzr@ZVaBV`<3&I!Lv;vgt8j8~biib7TaWj($?;%5g6Ybj4IUrV6^{-unIp2S}`h7;XkWb$$pZbgaPlw7?{Sug3!f$K+zV^$A zk>Oa5{ar}{`e`;U-066kZ;p%97UGY}`}%VD*(s7fCLBK1WBDA1BLdsE zKP>y1K*5?OUcys=gjfH*XLNBo1LD8|p;|wj3mQ-7IS*-pCao+i8`@5*_NB@uGCCIuOUlx`gB!@>p+CvoJ$tL zl*yh}d)4L!N-|+&W$SRb7|$Cfw-Bm9JFV6%39s9d#iKH&>Q6pFR=x$nWx=#^6y|Pg zDc(j(Yh11{D^RM@Yvl#f4dL$epY2`u`M--K-?wTEcuzQNx1AYd?)xaR+(&Pp3Z{sr z`(N9z1}gr8E45Dg8IauC7{3hY$9i$p)GG5}8<$mWKSz_`;r%RKFbvm?Kwm(~dwjyJ z3e5H~N<_q^qHdIXl5c<66#Ho(+nP=rtv*9ZX5SvK(|%Q1o}($-OW*lrJ-KjFn_QTx zqmdi_oplisu|-cJd;y7)+$>_Vhm<$Mid+M)T6x<9hQ;%%R@Y^)8HErW{}X!)kn+@< zs{@ey^1fLO7GFf@Ce^hImSM`)ByhMrp}Q@>x+-}L=KBd7Qn|NtdkCtu!}>LE-GNXw zrjEhw6qfgeTEJlI`Q?1orQO1*cze&)lzLBMI?7w>7=jew#D{IsEkI#^OCJuj`Y63o zVP(-cE|-3pH$sZK*O=@mM5lv$7`I?5$(gF<$3cnAPRMvdIMSP}YXuq26iQU$fJ*T_ zgCN-hdN=}`gTnct>3POJ599Tn<_om|!~kriR6EJak+5;-AR8A=e!1>vFA%t-!F7>`fx64pOW1Y7ggl{8+1+AE~#pJQl z$EiDv$ny?Dc;e5=v%ie!E?R_0#F)4Thl%C{e82zXtMh!HqIJUSPUEhjP~-cnx0rhW ztBvq>;^JXXIWubvrYxT){^=T0JrYhy4(Cn4FZ$^3PENJ^b{?aoxvOW1>ejX9EcOIB zMXrlOZHX}Zo^*tbw`HkkeR8Mm*Vc8H&wGlut=op@mnh-E72?$}Z1{IuRrzI;#1{n9 zba6p%&w^G*w-Hrk7#=UTvrRx`XrqcuH^W^`K5H%^y)Au=cV=f;!~h&ApIx;b^&*)3 zf7KEY)on$Pd`=f_>wm2)ouoF1kb+z0h%B}vg@tcx+t@ygg>804OT+><1s# zG;kLbkq+fSMB`btXg6~Bdc9UY_kgJ|%@3g+_#5h@yjF*Whe4`;qoh9~Toy7p#X3iU zGWuWC8YK8FedkfZWbjT6@Q?Md`JW#TY|s9Y;{+(}y{{e676?bCw9q>KnM6L~(z9AU zo&(c-=ImTEf%(K?H2Z1~a{=sXOr1h0v0Oxw$9VH)?UOh$=}j%SMJdPS{u?X1dQvuW z1u5$Ka~Ikj30DymeAW(;(LEqVF2ov!{P5s!R-?EWV4p?VZU6+q@Y{j+n_%>3fnEi; z1*f>}ewy2w1C;2Z95?ky)b$CPtUjtS_j3QmSaUPiB%mZsZhcz%mBgN{apxW3q*fQC z)+?Es0<^AL&5}(uxc887SyR(mSGW&Kv91Dc7Xf_=M-USplEwBKLfCdRsLATHq(B4j z)*$pDh~py%#-JSYV(2r&1A->Q4kp9!nq ztKU0YQ_AO%vb4FVc~{*}|0NRU>*1W{`LkhbV}AAf@@%^XFCgH@$|2Ei1iGqHopJT2 zn~)+;&Z_#<&3%-2G^NH+>>Me)zFTeEK>x*&ymFqO-&wx^4ve%;dW)jOvkfI`F}dtc zBM;`I^IbjP4i=a17neH%4sxVm;Xt%;AeTSvLLjQ|4E`L;77p{nHi?)vobv70EH>jG z>c6+&S9k9WBc%VEgGL}3Uue#Uqd@8LH{?4e%+i1H73M9N@{iSG+%Yf-jFI1QC@jP$ zbs~`!tQpZ1C`nL9FD$Q*+SF^V|L)b*JARyp({_Dzu{zl;B$^I61HOwewqCxdyBEZk zuA@Dkei@Y0N7Vvd5sn-_80TSLws#dNYI)GzNq!AQQm>A%=>A75a_z#dMU<3zc-qY3 zMjydY=tnizyqO4-?cP784R)r-#?lHcp+$AwT{55K8)nxYLS7Ec?Mf^g4AZ6g_Bh8% zVm$cERwuPJcR-SVRd-7?cl-DbV(>$y&3i~BY$C42_kpl<*tj^44S$LjmfBO5Y&&0Y zFeSFn03tlL``6jDaJU)GQaRH`iU@15xVrpDD5O*`tJQ5=U-S|7p0Z4-c05K&?bvHA znw|I9Z4elK3e&;IKhMV?>VfnV|hvKODKiW&}xKguDju9*J+jprZb~|0WBGF zy*2*a2$P6aD9Z|)fRz5=+4kVZ=KQK^X?M2tzs}SHcMKgNX|4Ue690=mo@;B#TVWg? ziVOU-B`QTot;P}&pIJ|M}H2|$7~Hpb_;hE z%(&=-saopYljxe^{WWwR0wuW5a!ws7tFC_-N%3o)oUH?4!MhmEC3X}g0(oPyc@B&v z2`M@w;7w?HIEoZ0>}$>4F<7a8a@kG6%s;5EcEZp^kk3ETZcYW6t5rHPQ1Y^`R9AG& zA;iSS>lN{NP*oT`%;*Ip8Cr_@UPMaq+tNMPZ4CfgQk=8QLPnPnQo!?PtPY`EL6L{9 zZlx}LxEkmdw^saXeFAl>BXM0vP}aTcn|6!AL+t_&Ww_CQFdw&sn+VDA%?tCbO5H*s zaqY1hx&&mH*$(M-UD78>Wy~C+jymh*5mGq&Bi-BZxBE|fKRZK@6;upz)=Tep2bP;L z3iG`iVDeK<0kAGG#c%d`KVMUixr^d_3P%I&J?%9x@|^TeO0@?F>fdY0@Q4aCei?6XdX2M=2S$dh@iW8Gu-_ zFU>qdOD#Ovu55hXV{hn5{>vU`$9cm`>t*4sRzr~&5W>!@tz7`|N)&Ia#haiEGsh>& zA*hXmHuqobEBA*jAW>UD$Id|i+tGtZwG3dmd$kSAxAs53U(O`kz{u+G1#9m$o*eAI z$l>Abu+%5pIA=%yqpftj?}Ss!w7YG?XID?D!_75{+l`W{rr*bZRI&$^QSCMJ91;wV zD#Jh4G{9ALnsi12YTwf!AE5RN<<&TruvJM)OzdgydQEut(Sbn0=ut!^2of{TbL&tWGS zwFWTms$r{5ugjp~9d4t-D}WTgx;BYFz+`5Wz%`h_mwS?@9A?@y_%;&u zSMQWB{z{($mp;P)uxt064DkKk{(F9WCOAI1htx^co>mF*K8h4|i)HyhYKu)5Ph3Y8 zYp6-sVd0X(XZfb=QVRftad$A)3uGFBo^)<~AoK_+x-wWkt8!G!^hMIC!I+i+NG`*9 z#`*~YS+D*8mrsS$9=I$00w)v#e1_K5+?Cohd=4v#dwl*O!|%Xft)-U@Ki9`xETeb< z4l|Q>!m7zs_Kk?0_{@(LY*YW$_{oy*=01X1TMcuz^aQ@-)t3z*M7nE#Q-!ef{UQoO zdwa+atKcOOLy>kThM1eD0ml01zK!Nr_g=rjR+^m=WfI`9#cY!t4GKRZpMeav#U zueFjflt^(Vujn1^2_DO-T6+v3^#!g+*R>KzjOK9X&pVM%_S^nH(e|LG5Yz;_$Hoi7 zsR>%}_PCuP16oRRz~Y-R15E0F{(v?@Fu6Ij;HWwVF7_m2n{@D(kYwU$WbX!D{_G~$b+z?KJ>wfJwQ3#%K zYyJNpvhF9w%Ot%MJO>VF#7aoZYSAuQF*UR+YfN{uyX-FOrrRVWkW?j*CbO#Wm|l~R z37JJgrXiD}%G8SK!E0uOZ0un@c+8Gz52L|5^ayv;y-b_wL6fw^fkPs3;J^WmIB?*= zfddB)`+4H~%eQ)Vvq~bq&oADHC!UDsiT_XB03tlD5o?Nh6QQ?3OHfmsPUk_yTj^JK&E1COF?N_*+!2;X8+w`9Vfro#t7hI;C&unUggsau*J;*$5Ci*->X-*0&2qMm zAEpK$BBsorw`a2+!7@8J@q6Y8-EWJ3Rqg6Ye)C8Z!yqi>({1JN84$ZTHjT;W-FMC} zlj93mjqMs?2fCPhYFX+P7)`)#*E_uKzPFbq)*CpI`&>UQ!M7dZ*u36{LFfn^!j^h3 zTfbCQpH5B3*W+@e-l(ITk+d#w1)9>n_tgd#L6{(U+Aq#lL2-Uu&k?fNrZyRVt?trk z=gZr6!kT<*WU<=<##*RkSe}a32`l^Ver7>1QnHu=i!41o0c#D!pSI`UX*qi zDJj_`tfAB>LXTTZV8!hL6fM9(I$RtCWHinNtU@1x6uQq_*5iU{Un6Z-XtKjY?dr#L zJ|wx1B{SRo%4@M6b`Fln`pb*)*+4_vpVr9!92lO4d1EUs&!dEGF7ay_>jFYar;b-# z%-6E&uMy8BFq|3Q$@?-~^*_d8CS8Y;(stLn5UwIf>3Eg)ny{jOx$raKIv}0)bWnYH zBVT^I_ean3?qc1)K)g?|8vWC@CK<&R&5jVm+l;GJlN{6IhPRrcCH_D(nq9 z0b5TH=`DcQk9PbvMHdcp)w}p{**97i1nOIzKN$WN|DxSthhy_}C`YG`wQ<%;#3W+O zTw|$KU956QSvN;n`_+gM?~xfU>!xa*S==?qSmT8C5={p*^!LA_=z__ab-~GcIAtIC zqO={1rK`O){zBS-asz54vak0<<;L!_Q+4WpQx}1e_}ePcW)xZehRyCRf`rW`R_DAG zQt!Vya~weG)od+*4<-f$pr-VN_=b=o3MR2eZNmtXA2Es_6^y}L{7L!ctqH_|E{S^9 zn53wFmqaa2OY=~Y_)wQPE*PO$J%J}-Rr#CW@6*?J-x&Z`JDUxm4Yq$#`aSn`45JA# z&LZ@5gqf}rE9X#TP{(!JQSkF{N_?gstGbXB)V<}o>NzeVq)Y?7p&3JfMsS>!)naSY zq03z|$7^131+3=SZ0gkj`#XNQ1}M;cO-8N@Cv67@_&TZ8nj2{N+tEC7Yg9L3d3pc* zA2pZyRy1xQrBFM1OBA=eg!U}l<9o734|h6Rt%Vrr>5uQCrLad!z3zeXsztwMTJOVY z`+98Sm6LV%;sGLtzxs6}pohYW#R$;cM?exgUNeV9FkCJEbN1Hc=SgCF?wMVE3TS#< z4Oh)Q>%JYV>EH7XHZM*u@}Ul1HZOrRxKHC2{uM$pG8ksV7zGsNXs*$UL-PhJwc!|^3qeKoI zaMn<5BZ7*Vs1tCTgp>5RBOP?Ui#%Iy_-H;tgfLZ}2)E{UJ8zC6mA8w5?)M`F2f;Ak z?&fKSLy4dj-l=mq!-2}tq&5mx!?;oX{yW!i#wZljdk9x?uH?#;T7n>)aC;3SYj|@|7 zD7hLewC%EIi9Fv{5uO9Favu0RX;DDieUS)@Hy1C{a6mB_jau&0xLzTAz3xzFl&X7u zjhbv6S#FL;-=GlBG1uLyCwdEFv0U!!_W|1RW!2EK_rv>SnXTnOlG!$4^op_qMgI0D z+_z`#=o|SLJu5<>jE(ZB`?sB6R>FtVw((vpKn6+x&oXP*t=5B; zUA5vLRCav`)g_#E40MBVgq20SD)>e;Etyl?YDZ1qHYE)#@itQDGB+cX0^X~pzXg!s zY|YiT3ga~&lEpwGsz+vH5Qrw$3GSi%v~6Zag2O=C)`w$9YG5&n5Ph=MQ?IO0bfk!K zs&-1Geh{TJlpC7T&_mrfZWf(tgQRh!V)Q5F`DU_9z!b{n6rJxVwY^cRHM1xY_HKKz zv>s07Onl*kv!Ei3Wvq1$iugvHB*+78=erMvs-L<5hH1y+nq9t#k{og_<`RrW<5j}H z3`7oIY$?-vrTadodkpkZBAJ~_qoiv{Ns_xJsa)^y_{5hIxdF?fo;=sZ1L4RK+=U;Z z+{(|h?!(i1AROqUsswisRPW(hZo4b2_@?hwpZB_`3RW%kK0=aq$j&`WT93jYe1k!F znBecfPuB&*uhll+EyBrj*0T;O_6b6yzN1>$QxN}C?XBl$0a^5V-hH_Eg6oc z%s(^cHUM6Zn!+5NZXNFm1l-%2vevZ{h=?4NYG=Y%p`@&5+OQt17WPGZ+i?vPhE7k^ zlFwQ=iSHTvtZHXnM`&Ya{W?ODYg64mP(kZ8K@~0_u960^w+Poeq4K&s?jm2KhWO zAOA`D%zpty!uvV$u2J*LM8;F!clF;;+MVJCumo#9bE4K3LN2(yOI2-^)=`d>0lx?o?Ba6=+(|7uJ za$|tyfEuU+ay?L^!_C4G_-Ji5ZUK|i(b|?T)3ddUakK_F1E7K(J*(#yOt}X&%kq#* zo6(}7JF+f3j3Bdta*aAFoI0!B<5~hefRa&G<>n2ic9`@aYMA=0rmKfQ?NY_=g~p-e z;Phdw+f5?K;q%%pn(lry$~&Q<&LV_I=Ein@4e;mCQVS>Aqh)7d**&g@?M+b6f#I^b zyT#>s6gk-SquJfBmb06a`im&o+rOiKlrCI?dQE2H41rwk;%_s@tpHx}u3!V@((dng>y^VC+;+D8ZxmGPK{Pqk5GwfOJL zb=#ACJ9erxTeyV$k86NSvjdgn!x=d~2h;9s%UYZzmKR77WIu(Z-f%MSy*>8|K>@e# zGAtLA>*Ie?mf=nJVNPwoh113k8~3p7ZU5*Usxpq$t;S`4zlFzZuVT4yvKi*^4C5;h zFm92>*<9KTrpQAM+tz4iRlfeMk5$m>M6Rbh>N43HP-2GIoDddaErPl|8S$+PB7~~G z^-xki;m$xUj`gA7@#Ec$^9JGQgi=uG{50N{+wxQM)UQ+C49cVWJlt)8lB#Zw(-I1u zPshp=#DH+*ZXH7v`d|<#a<^kE`VcHROdi^*e}+hUteq_w1+oz8xm72@@`Mtt?&$mL zDa`IszJvML{Q)iI5YS_cwXNOL@%+q_3TaFNQCxc2TsYEhG&R2Azo6HGxT@EeEvU^w z5srsF^w(hc@}ka?2YYpQ4n0}c!Q@&OJ&%&YvhVN(Sn>Fulgz~~`gCn0OW;y|HWvgg zci)=#64aIam?!J60$80hi@Datvy82^U56v{W93?>D*8qjrJnwy*>D=+k(qXi;ubS4 z2bB5+Eb+s*f7$)SbAq2c5J8df|}YkcTkjaRSH}fzvkB{F8$njVj96DL zoQj`s_wK~roY-yc`{(pOvHRZtHq94a5_p}w8h}&Xy9}_1eK3(ZjH36#N&2WGmyPdX zBwS4$t8J~(#64ObWDW?IopYF+ZyFL0CP2hi}=qYlu+UCc5496}S5dG~o7PMK zkQ&@k$2P8msRt9OKcN2k`EebdWx#KO_?tgz`CG3ZL>fc;^y-jyIYBIU^400~4j_dG zt8yl49DOgKR(o{kKrg$i)%QMtrLU8zVe>;MWf_@jkMKRpw?~_2??pgj7EqnR1-|^h~YM_o2v&h=%vj^+f(+ypF1E1SGAes%-??9xUpP*2Xp`^7g_0{S+Ng zQ@`;(JrCU#7vdTKq;FJJwLJ($c&FX*=FCbZ4WTh8?|=UT{kyQjS&eNDh)2N`F8+fL zBx-Xda8OveytgEcLx5b)Gb!fA+r=(`=Ewz?L=w|alN^Tb!q z_f722LfHF{KmL{;TsSpV3&dp%&ZFSM3Ivt70HjwlZM9^Zh02Sly=Lb;uNu6BAjS5^ z@MYl$WbtY5o(Nly^B!Ji>lq}tdb_Wl z9e$YKE%dg{$wy#{vR$jHh37>SoG$#VI-(~4-psctxNu~7(p{uuJwW1xh+xqr8>auh|Tyn5m0K%y(fM{OVCV1Bg6pv;E?T)EfM#sTcP@0RN_`7Petrh#NKbvh%V zSrqK?l1-hvU!87geu2-TNQ*;?_Gace5JyKHXrs_T1diy-5O(GMZzJs&h%Lr172S9>2*L&mz&?RQIr{>N?upx zeRwm!6WMNS7If?uf|UBVe?+5&)x8mM8K*mtbj=qC@GcM`E&ec@PWMoHO{4jKyx+xW zYs+|ffKth^K>OG=!fAe@>=vx?kR7 zxZgs(!t-8VS8(2;#J<$z@l-o;s;mB1S_VH2wRy7~>={U>$1}EEftHfghRs~oUsrZS zz05djb#|)|!`vsj*40p?-QT*sHQlemS$p$2#FK%de(!JMBh^##y|Fr)(w3QQhs;L1G-eI#6!JhKsQZbNpGor|jGg`a zJ~p{K)WtS4Yub+xk<_@M4Blko+iNB1X&@Z%#-s7qTzVEQ(m(ipMxaZerld9MJ(~oE z+Z^FsB0B)lYygD$nUi0fXlKMP1mktrRJ}2CF|pwq-d*a@x4upDgLdrSzi+>;9Kv_4 zVryfyVR$u^IXh=o@Tz~lmM^?SUUvts2XU9wI&vtDCVo_J!l^^&^{IZ~VDDYH+n`!> z;FInPlcz05J>A{>u&4Gc?g>X$+ueWgD2d4T(V`4>9dhZy!GnBtx^9g>6b>UM>>A)B zIE-woUAILrZDz{#TdMs8p*MZ2KBLUX)9#ZK5B`GkgQ=f-MCCbzT@#Xjs3!-)qRRS3 zT>+;0Xvv}fSUW=X3L#B`3%1sIyhceWTsoRC7HnGh29;182V@3>umC_$mgOU2 z*|(QU=%rC5SPuJcwzl9_2q*KC7Vmw#5+UDyUMGlG<@Yv7Xd2avwk-KFj#ZJeXfG>st6R z!W}|Lh3xE|85~AQf+kN|$tbMI?X*hoiJ1dk6pkvebP$fD4WUXq4&@u!7%yj5JdUL0 zaJN*3WU~9h?atDfX#}Mz7faQj&VpfL$H4AUmH<`=Hv!1$EMVBQ2mM^)wa2c?T%PZ~ zGgXq#1vs3|&btH13WPAA`V3DIE+J?!^Yz%k<%Cbx`;Au;=GKJz3r9$Lf%fdywfy?= z{vDKGSjlKinV}nyg1AJ|&IQ~AQxHl^k#FVa({l?qYG3en_pJ5^zxqG0aeG3o^pCBxNu;rC9G5Yq~k1sGkf6NkWT(ToM z9-%wI$#T^2s>`We^A!kK-cPPW(0d@ZCbQnQ zeKL#~*7sBkqWcKptiK%H9_ZlyYODvr2#3EY_z)~ReNFWo<4}0BPSZ}hO`=Gi?51n4W;;q<1E{&?97-}d$?FH@Pa;iNZ9nN8f)XE@tj*W+0e0`TnhV{hGwr$B zi*Rb&c!a!aG<&I|ZJ#;Wj3zUXMC=*gV+Vl9!{IM}>3q_|V2>%*q6g!UPYBi(qq?EPEFcjr&7pTs z0D@?%&Fx6{Qv^J1`*@defv~DK?kjWh97-E^RfqEmP3Hwta?e2EC7eRoG;VTx)qP~L z;aZ3+Un4D5Se-SkwBMjbn-(rG3=j@AKUMJ#NFEDg!g_2+DV3K59#sWl=Z5%=O-njRpjn2avS+{S($} zN$~)fLJYR+$_Mi^DeSMa0*5-dr-nb{UF==$u;(PK4$@1t1)6CTg_x^7-&tXW@D7a{ zoC8#dGtV_(I}6FyRxI<)P@aQRnQkAp^_BDaI2XPybU(M3mGFHrKWoB2qxnGec(878 zUj`#02C<&8BDO1N_!>J+DpwQcBbLq!V{0tS7T2NP5|)8*F7Vfti5uv#9Mki*aV!EL zWT-pt9BBEDs|I}wRe{TOB((<=apC86@K=%V1e5Hv0R!EKOCYB`TWwYVg=YuG&NCmn z())?wi+T>}0hGENo;c--q>mmVMQYm|bFGK=9--ji!Zf&tdJ@w2Y`z=}O zfqdn@P#J{5{OBsIO&lr@vLWN@e^@ZJ$jntM97T}k!oBMAg=JQW=_n=6O&o-K!A5Ckf}GE%X}3Z~tr!@P$*FrbGWuXO0rf6>mG5_p<-7CC}$U`DR%T zB|;gl<|Jtak`$*VbPg+pBXnnd)ygV3!u+TfPgaAeq{FzC+?p=_WP1u?Eu5-2qhi)U zQvQCqAzhzOv#(R#ejkFQ&CEC`pwD16BEu%?+ylI8>bHBq1vFY5u&<43wgBO7;fG|u z6%2Q7x>RKhAfzerBPw9TrWEuFUZHjEg7I^1V4La9-t*-rnpQR4xG zq&disrap=#hHO?Ur?L-~!rP>ffw9n-1oM7^sp0ccP9_)2*|88kT=>l46-D~Xe zWO5Nna&Gk080``i(d{}lQ~Ohw5hO})|9#C9uOxOI=%zDQdq4jB$L(zLwZKw>I#PKZ zOlDNfg1mNfqlDdw`ak+Kd0PJr@)>}+FN*o+!YSopoh2c^KuASxo3WYkvisd> zr5f_R>V6+7WBwW}gJ%tm+5>5J9&=zs-sa<}Gi?z^gm(x%TTn}5|AgXiS{h;1^*+^H zlS4vUj-L8C;R)F`&sc#JR_o#b-d7?dyZ-T7fLYajWB66;jcfr@GPK0j=9FttQoda^ z4PFbX)E&DRf?xSX<6RFb*{Ow_(&+<|mANE?w*ig`sXhCRP_H%DZbsXK+Jq+IZ~i)S zAV}Uj>QMC-;U=#-jJFjCcb0jZ6Q%)_aMxb#90ZdsyN#!z?&l+?O&5nji5b0V(W5{T zbLOR;Ks%71kCDFq9fN6iD^Suj&2GC9S5b)0J!u(S>%Mp5r4~Zjhy2d3 zkVbw3NPdQ{YX2sj7GPrS>I~7XSWxj70bG@6Bfs z_%%xEu$E28{0)K%vNBz32ydY@iT-vnFK}s+yzGBk$}HX)eZCwi>>c6suKLXt9p$jI z4>h-5i4wWZ@*wGa&8#kU6=HJQTSvoIcVBUs(@xc_K_RyJ8bhv4c3X6u15;}w z6t7IiS(KDF&Wq1=*q*lfIS=r~V|uC>SgQTtmt9N_q^UcI_3zRA*Med0XdcLWgAi-eJMcMGmkr-`2@KVm<~vYJ`?5y& z%l_wy zVR6<#-oS2cN3i=+I`qiIjXn;Atg-{wH<$n)}uz( z^h6VmyGU#k8g99UuVL+Gh}hfHxmw_sK$)Mc1>~(@B+wQO;zDNtQPoVHt@b+zq&!^J z)o^48fi#bn0EdNBr)O$CXtawvEt#J zNRTMo#f`}>vgKSry<)@5?fDe?$Ah zXxE;0LGc=l&kyS+#r3a$YvY6)aF{wh_eJy6brVI(u0j#+Eug0(+VSmQiEK@cXzt{9 z9+%pJKtAQ+RxRJ_BG)bGwgH%*kF?YK4}dD|A7wZBA=p}bJ45;iO8vK+j!Zy)c7vfT z?2~*;G&}1e#Zxftk@vhg*98oAo}ons#!CkM&tauHweUkV@gl&j0K$9Oech%mZDaEl z8i}1QZ-TG8-|1WJPdM-QrlZxhd1isazeS6v)|*bfgL?Vz8yRH+{+FdjyS+x5%LD5W zR9zrm0m`rMy>IK{g`*vD6x=2Xs}NH}?p2L;%eJjXi_PG=&j3m~Ymk!bY*}) z2~)HMX1Qv$e5+tYG)`axkV22u%JHCZn&EbDjGoX?hfpxbXUeoqBHRuq`ojOM*^LS- zXYMEC;Q$;B%}J}a8;pJsJsEz}zQG|Vne0A&YOGq$I12fkocZNn)ODCiFy^ilN#D#N z5cTh<2Et`@b73B}X9Rio&OyjIw1|IaYg%XFWVoy6JmDNlL_X)mUn+IJi*j^6Pn=!o zDD{?V-QU01edHu^eRK%{+uX{xrK`&zLB9X~`%HmwSQu%?Wv{|%P1SeS*(%C$E#F(2 z7oZ#DL1m&nfc+QlnaevT+G4Z=O+ zIXlr#xW4IP=l0cGIECC_$IRG;eEz$?+s7tsUMjFlTyBnQu*;E>;sDnIwL!lErJDN0 zT=^(w9=eZ4JRs9X3agORRSm#(wyUAkX5BTh{AJ#>CW$C{4z8`u7w|`FVjZMNQ|+Qo z7xUEAX-%yUP64)^@&dR?077c%jCotL>l?doxTZBE+yti(&TO}t(Pk7md~rg7wsf&~ zwS$LS;a=}*j#tjK9{*w4*nxa=to$zwCcJO|9)>{}N6i?wn+(GUOuKWs#50;-mCU%I zn$UrKI$Rh14hp9^nGlM6sEh2*Rqck4cZAvs#_S}5v`@5tbQ-9$96Xq?4ecsVT{f9T zX5c>AVH6-3Z5VF5{~a7F=dK~tLZn(+DiWjn&!ndr5&$=(fIaAI$L{XNKC& zORqUU@PktJ6y_n6j81>vJU%^2G8%Rptu2Dd_joJnlYFfKl=t$dL7$bQVg>+;F`PME zTVZY0;W^^cXszBKscmHK`~`CIUAWdRS-nKihTF~aSHdziFw2#o*Kk#gk)GE21|llO zY71a*!3fiQHz#S|AxW!w;av8=wkF1>3=WqAiObY_jMeyK1(MX-5-PP=8K`_F|KhK6CRIWs@k+cQ9rjswNo-+}yEH{vao(7uED)lFQv90CYc%H!=` zE*K%W?I!6-IMuLw>Eh#b_r=F`dSe#MP@!6{qv}m7=1{}%FwvZaDChu*pA$?5Y{#GI z_TdPjT}@Ti3n;Rz4JF2<`B}NxMba4BrpG0eQo@>4Q0vQqRf^gGxB@D_^(T$+Dv%uc zvtUfF!6}kADQ%Au(e*BhZ4gy^15VRvj)k+he6#y*q^?udveYf4s-k*#{C3i)>UH^p z8GzGXT=6L15O+Jmev^4$-$PIa`rD>C_XCFwK`ZSCU~k4*Quw)bI-EU3CD*1CRso?D zji(uTF2d2^{a;^(HW~=IS=V~yqvtIADKaTpHIUsiAoWn|1#R~LMd7~sN(cR-`_(;& zyolFPJS){E8m~}N>Zyz6 z2&J8MdTNDm+G#t;xe`ptodeKlRwe3@@;$d&IFhepF9>T8D78y77n4w;J~QfKk<8ej;1@42qb8ntdE@rq)zY_ z7L;%{A0F4n=LCte#;nzb&w?s&N2PgASQ7n1BRgn45C+e*jnfOTyoc%0#r#x#Mti{R z5=@$0B-h#Z%TP}%{!q7f&x}l!T|rIb%_B9G9z}@_{0yXO;CBrnGP4kp(c*O^DId!- z;WwaEyX)*@e7@Ai%FT|L=USVrZy_qlsfl{J@ivf>e8>kYwTHt>J*U4vlNt!|rRlq^8;%g`ZiV(Vc8S!ZD-i`;7sHKl;v1K&&yRy@slFMWPG>a;j{s z&S!1`BQbp$nQrZ{+YZJx10Cj6Mizq|KHN$_1V=0r&8?-9ID!>@89qRJxvLJhf>m$CZ6tU zD9@I!X%j}9t}QVHXeFd_w86D?wfq63Wcqnot3i-NhHLsWB%JbCOsK7vVT6=NJ8DM` zM-%;U>GA>Lur#ur`a@yq>no*)(B$P&<>oYWy!+tOLAnp6DR2~BtAp*?!)esWXz{PA zozKEy;dF0DaIT9%OiFMzF|r5S90Q+2Q6_UfqXc0U;KCV!UjR~v!&X9F!C(kF(&_m+ zA1%R4NWE%0S=&FCVaBGsJzWv30@WG=SzgUA>#6WH!SKloTigA+j)IH!Nsy+x!wIFQkd$lDv#n=7g9$Z0 zou30*4TF8;_~AMd^a3IIIC0K=7z8A~p`VOibwAsjF}9-bDA;IYpEn7gJu9KN!YS&m zdVup?2dC>&=d$lMhPmu0bFdu3(wA-Fa76%d`eh{$u~_XV+Ew}ScsuC18c=Hce3i(W zE-I4%uZ4T7X}{1!f1;7L<8$ls)p5f&39b(!tKa_(N)2J!h`Skp4d9YanLE>Jgq}Hb z4WZUeT93XdpGjn3-yRAMhH0+|Xp>uD#d8f?o8AgU)ttd!y5El`ABxu7RT)G`TW4bD z+Wo*GB!%5y_njy=gwcW8?O-ZK12HgBZyp=~Wtb~?2Z5dlo#rs%htneK($aYM<>?2; z0h6HG{H#vaPYZ|B#d1#m!$o7+S+q2d+RCa6NpmQY>va)lp-8N)b?M>r^Hg2sq|m?e zxyKHCegP%Tw2U+m!aGWnOlB)7K?qEKf9CCkp%l|-fvZA<2A;A8sTT8%_-MN}BhF3qT(0ZZ%3G{KYV794)8yGK`|C8z!`K+@T_cL${xR;$*G7Jf+U z^NR-+{;OrL6Fyay_eMBPD|a&A26JPP3D!GUDkr#FP@rYsTWU|{b{+$*m8<1wNx{`G zDX!@7Wb+EW67J2GSsRQ~Ke4ZDM>G>UEya1&jBQ?$y_aX}3+st0gbwVqk4l8gcp**8xv&B#3J;!K?MfMeO6MI`wcnHbbE1)x?sW%FOZ4*m;YR*0am%1D4()lvQe`eA#a{-plVp8{LDw5#?WMfW6m{SD@nH0c^y7*h)GaP`V)Owk9!uZ&2Ohog6UN={}ZmlcnLreYb zswf-s?YBSp7UdVlMa?%E-%U_VYoxMN6PuGjU01KM$(AJW)mP@cTZO~;XY=*2_&|pj z|3#fy8HAUn;-*1U?S9n|VoJC0k1f5B+V4)1D9!E-R{vd2W?te{*x zjWtzFo&(dYk55c^?SgupM@pm6r`1vZ3kW^=*EFK73SC4?V`#RZZV{M9T|$kp>uKpW zmu!l089ljJ&^K%(kSj=P+nq22ec`Zwdb0K+ufce<9M$%0uY+k;JN6nDKoPX@esjlp zv!giCf4CNTZlQ$ZugA@|(UQrLwsds|4o`K8pv`RVc2V3o$K$;uGj3(c#l_46@1vpPodQT2 zQzvS3?KzP2KPk)pqWi#!-lhKhWj@&1rbw><71>r;+wAW(Lby0QQ@7>bz~O?k!Pbi3 z<`V{IyA>Xie20)sKdqZk%eJ=CFZ@;QJS_)OFP~7f6;Kjuj&Sr%D^W5QcOT0of~6w> zNka>yTdRSj|52^IuK^|L7HSRn)_(nCHIsFLQw#sPEt0GUHR|skG9D02-rj*aRY!+6 zph&1rhGBbS_xq`d+$-FKl1A>tP95OhjG$im9;b?1x?de&EAy}wA#AQc`1>qE_j~OM z`h5@~jmg?bd0#X#KnhRoRb;6Sca%L7XQ#?x=qO4m#62?wJ^+WSqhnU?*@Q%xHEL`} z!w$iUrBpSR8HbXEZM0L>0Z*df(;(KS1G|t2lQoqrj}$CJ5?J_AJ4QYSMNi6Yy92}$ zJ&Q&V&SYWm9FTHvtLi!5!NavycL7vt>mQdivgSPVV!obe>*|+)6mMw0CKs2x&mALV z0>Cu+?M~LZ09so!SGzbfwfT7sWG1*3Ff^2Z=ywf+p@}f3n2`! zQg_V|Iy`P(W=sGpUd=wreBJGS-tN>R{qL|tXc)c^V`iW}eITqROu_5I^g~EOt^%}f z`B5M>`lNq|_JbH7vG+sufiSt7imCLc2-W!Y8;%VIaj6V>5nTCox_jM#vhW*k-_q$u`9op{3A>LfB& z4pP?$ryyP_!p2${i`9Wp`$YV_4F&RpA(yt%mmj|W!3XqRIQ@~kqMbS1h`Ll9`<1@g zgc8o`ia7ynMhIt4l^<)@^tU9A6MMC0M2)s0RKsgiya9m3%70{;mcj1(I#ARqX(-=Y zIiZKuvkxPX*IcSn`Cfsto)#z12sW&rCOXjA5^>H z!i8efK-LqhE@{*Mfjz0QamQW7eUr4xIe6`!u?7e#XsmYb7z0SvovDsx z*|sK^6YV_oav(z6?sYFV0PeK|MbTIJU?p0$BX*?pzx(2twQkA}S39m{@HGi9+>p{* zVdAg;i{b+*{3i}C)d*lcii~y1ZXhZ^U!rTC^4I`KvkksBcAuKba4tvjQKDCM^mnvw zfNaj!c7e5)Edle#>WS5@0Os*p&3*tcc5} zon?{ay)MFzc9-gY@V<8bj)8grN5JN5Q>8Tw0b1DLl8PDVqA)@Zv(z+y5iR2TkQd+_ zQe+BHl?FGvhFAkAywvdsmH7;TbepMM_*r>geVz}@LTX_4B8W@(I(+srs5cmW1(gl^ zgR)_-fiU=4$>j~GR;KD=$=mLy{(7|U9jLYL>Zk47m)NnkT`h1qLQ*+gox}=IDg)K* zRwiuGl$r}mVqEiBEtnGRbVvk`YY^y2KR-${Q{(%MFPq6(i=1ZErfMRtOCrZ9!ul=| z>ytIk?#pi@M7BY&RLH={06*aP*q1ok)O|IopYYXY1hVcQZ#QSRKxrCQ$es5VVJnj2 zEuDNH03{uZJot6=c{`?6qeDp2v-eyphQm-Aw?{nvItnYfb&A?6xJ}s+QpBRwGWS6g z`Hd)YdAd3T5?i^~9v75#Sr)nuq>XG>DbrArF=@6QN8?dq)awRgy}lv)xqLKp;?%;w z(z`>ch6%%KKc7QTbsv1kwCTLC%uE*jZ~;g$a$e~ojHzGyHHw{YzN(7}m-CI~Ij!(Y z_s!`xCA-?;vGGdtS|WS57q{1esL#CF^s%s0Y^4@se^wNAbJz?7yv8S?-oSsX2=dk?{6F_dG4 zj}S;^R+-wYd=ajecepI*lP)&jNVU!T6hWmq;9CRMXHW{Q=4+q$5=}7x{Rtvc3z9G${ALQu+2z zcnFnpBNWG4qu47Tvij^9Ll3j(+>th^&t+iH~j;5h(+fBz4>^DDJD@aI8 zxhU&^l=!sq0KJA&MSJ4LhGrur<=O}3KS@<>Kuf81YY7{n*wH$VU_#dpo^C>wbRB(d z{oCez!$8*p$`+`ndX~%Uv^`-}xG$b|6_Y6P@rs=DEEIC+1* zaDx$R2YtIZzx<2OneXW?j)v9l=CTNA%40TOx1fH?8X!by_DPO#K9@m2qG6&o4OGxM zgfy@I>f6tQD$f}!b$$T|FE(Ar`EkeJMYPDZ4&+n|yo93Jw4U{HKBofMyb>^WtkmHu zkQ&`Zn%AI6f0746$zDeY8>j0a-VHF!yS`iouFa5cqRPqif^`rkH)rLHhm_+s9KLq3 zP_=G)2PJu&$bHSbT@)*^KP{tk4+T>TKRUATBlZF6wbhwA!tnr3Tm5`u+VDr34?}v~ z+qi6d{YY8)TxuBnkotPD`}**FbCt(pASvYO-jdr>uxVc#q&$NZ?w5Zqo9BthJhw%K z7x~Gtd>y`U>SS#EOwH6a!dHo2ZLLJ{8dQg6_SuIn{;v%7(Na*}BiOxnu!2^fwX_?z zY{ya*nl6jwFxEA#W~5etvf#2o16bL`oor`WRt1;e;dZKWHLPSOs!6R84*TUn+K^2< zS&J4q*PY(B7qkv3wP_^P49R*F418h+T3t#Xm?C^SHf<`g0q%8(htE`BT{^oF5tnW+ zYJi&nTv#AAED)Asj(vVIRX3rxB<^pwLHbr8+3#SJ>PXQ5O87pbPs_w%5UpoIEYc|4 z5JKAcL-GKUA}!pslTh{nOcr&hCRXPVTC&)2?2B2+jOQEr zdt}bsW)cjepBQ?#g+G+=FlZrZHqospHU*jkC8rMSb!A54ECQiS*Ch9xa8IOd$Lf5( zmDO+^-@5=3W<9^e3Ut4ntapbl6MTs&kmD#WrKOf|R74G?muXUVLVWEdYpN zX|R4hzdioLdI#tRkn**Y?<(RZ3XUD|WCMgH`^i#6yA4YT$88pQCs7a8*3eyHVlij0 zp~F2WEOH=FyLglziTU5Vo8AjX=pM(eaos}%Mdus(*A4W6w6xy6`f6#5h>>~?UY|gC z+`fIUK|y|T|D)F3pFt6X9-tQbU>rNZ+}0#s4fU-MrhK1TFoOW>3xVd_WO&w)v{dhA<@|s)t5jM8u`{Q+XVXH8fzV`u5 z?xMN5EZSg)-4G+#AvjEZJhF`zgVDt5?VtJ7gvyN;lC21Y5opoX+?v#iWbK&g7C z6>cTgx8Kv*3xcBbHT9mQLRcm>)_{=$8#`}GV?2= z_X0sO5C5W$2)^thkCj`%R~>YRg{FgP1C~B(vqzLSiDCt~4Q=0oJvFl^Nzc0!4%Zs| zvR$o&AMY5U=)y65HNkH?eJhaib)9C_pTm*gr_AM-5?GZ4+Lf)<#z+4q;)QcQq@4pW6UJERGAvn3P-V`qK&b>5G^+7kMTpegLHG7T@p`RGqFxW9 z(y-jwmv&nGM)&iXHnX_d;l&#zggdHCv^2ZDrC)cu1a>vQ z3-@5!syn>uq9{=&DP%YOBl7! zz5+2HeJVrsxH2Cdn>cJBPVvE|rr&emwi-zRj`A>0`++o3rx^`x>3@Ec42!RssAEod4$m1OpUAU(zZEyboED^H;Wjz>B z2Wlufpt;mx=IZ3eyAKbK)w4d6`MHJ*hcPw{V$;e@ZBfj2k+~w(uIAtfK!bF}f~KQj z@BQ`E{8xW;ABd`>$l8L6u`E>&B;S$WBd?2~to&Lw<`Sd;)-!#086ei#+C{sPa2w-Z z%_qn0lxb(zATqFKI)9=j{MUn}Z@$Ky+<;SqOufL?saTEPFm|8gfMO(@1em=8sgJy$aU|^zmeBxmjW3JreJOZnr zH3eN1jv_nqG3D$sllG*ewHq$2r+$*`-FfSmvd{oWh zrEs;jFUvE@t3;~@ClvKHq%>_rSDC*FCQn_mx_;Z?d3Hl@4omiU&3l*a?lh|IT{YL} zI(5{rI(5=Ot@2-ilv3)nW-Y|aXeC;Nestn+z3RCNA=Nb3j^uz!Q=Z_JW=(#ix%Lt% zc0Mkb6S*w8uKRfJ*cn<4Cc`>1VOvEKeP|MFb9kLQ6fUWcyWP0l*nKq9+#yzvxd|y0>eo^z4tsoIijZ>xpVVPPCsY|JYP}tIm)_qgG%@A60*w!|z zhT(`|M{Vqng7|Fas@?mtkpFcmwYYDQjrOB8Q&64cAwZ^Pd1Xb%p=5B3OC8D|Z0s=6 z&aF+uJzMNvX>Twxi%7+X_tAgB=!>NySFY$kN>6s}04O%kA>sNM>6Q=r^I-Dy3%xz*X4%N;yX2QaSWGdD?f^ulQ`)rO>V4WSzKsdio$2umkt z=Fd#j$;BJ_c&1Kv+!PK!ng=U!E8muhp#A|Tcyy%t0WZEfs z%5SE`Dmmtu5#Z)@GztWn1B@rJvRPh*G}+ zgj9WPcUqWqwtg@lsj3sral#N>o%(O<)Ds*Urr(0fB35$*bO2OXUOwGUXw`8LCGCEf zGfOx>gpkH(0m+y)ERLgwgF47j!)}_6mNM4Ko4P4LB-4ngB#u?-*esm%y}nT6q&Wn9 ze(Qgy=z^*Ra|cay&vjqU%V`%utFK2;5#uu@ql-YQjV$e@>+QSmEl$IiyBKXt z%4F|KN8#HdJ~>drofBKN)N&0Wu0YG&)`z;kA6t0%V`G6t=IhiJX<3CE2oaUT^Twgx zOh&zl8qWA*X&zv1p(tma5@Drp2PyTQZ43gWP!lK1W940d@aou3v;6nq%*&lq=$#o* zMfZ_a>gTv9rF{TarMi+e&%H0QR?mJWK8g8Wvnlkl51z z>+g)xGa&k3&xahZGab*7D(TARg)lz!nXQ$+1S3ovwKaHpg&<*@A{xSL0GBm8IOQy| z(!N1SQ`y-(qrFXx)>dtfv9a$^@NNoFr}qfQZRz5u zU@UFzu>A^s$^firPGXSxAb`6$4mqkl4}nQh__O&DpI|!9&0LDGNUiC;7!|bu_k&)nqd2SvUe5srA&k zz_PwIsW}T0&JK3$oM39hJ2Pz;_B=}R;|Wy9SXy`iEn3)mZil>T(0vg#tPf8(i@`h~ zDCN3ZsR|3AAD06icNCvH;wl}`6bu_mgby~HmejSaR7XF&^Ery_r@o7B_ zx4o8kZla}>N9so7El`#wE%Lt&;ANzA?2a%-t8K;1+zo*m4Aj$oh6V_|l_wrFiXp;^PSgZ6?IL3L+lb+8{i!#U>%T-uZ_~PrL`VWot`O2)_sXxmu3tSfHbL{ z%wC;kVgb-nPhVIosdcDL9c85FS$}i)T|4`osQ_AUp?1cKvmsbhfvw%gPWpHbn@#8_ zbqBECs~SWhAj3ROY6#Hi+BBgp=L{pHNGIHWwxCyUR*fR29?c!x-XgUFNu?Z#Fb};{ zIJQv{CL4zkQY`N@w#o50N(#?~Wbet$Ni>yNj=*Fx4W&7_KvlZi=Gn8Tq&an3)0h)Z zo1=XU1gOn@dp!e@U+RUKs?zga#66#EXBfa<+guB*>8Q!A_(Gta!xWgg~R_yEr;LAPw#*K zTMU74SR9`(TXqMI0BeX_li<7E$0zFn-Fslk?sE^TF$IuZW@~x*fw20mK7$62~on@Bi~h_5F~>-gAgkjb}At8K62$e24afX$fVd z*FvfL$sEO7hmclVo_*RH{ZFk6bkXX4YxUU|LP;u%!yCHqy8BiNzY#Tsf85p%nFADJ z8)*mok_hAHW$K#vY(Yo`9jlq>RuHR}ZjZ=v0E)($iFmBM1{{NEBsI`h9ETFUUJTHL zhJ)JTBaMejw|~)eIsd+}R|dY@h7jP1OUpQ>RMfELiP(RvFF-OWmNa%6uLQcS94kqIUsK1=UG= z?eHQ(GIOrUk%iK(OKA9Q?2`IrP+`x==t}pwK4M(@R}tj4opl!T8bp5aFmydXVfpM9M{PQ^f$jiO zc&rJ zvQNf|^L&@UzPdDb0gUuGCHm>(cJ=Kd8YY{AkxRlDODnjX9~OQ@K3DQX`Dvr*KrfXa z8xH+#9WS|t7UQlDvz2MNL)osQrZxxW>-6FcI5l;|<1j4L%`T}u^(dZgVx2u&N;z&Y zSf{IQcc1NbJ5r6`>7uB>=BM&5O5{IYudL*x@;$`JViyZ=KdIyjWVM?I2ueQv1%-Yn z9G%w=8H9X!dfsvYg8&hWK_2#=04W1=RQvHy6W1)IOvy7SMRwRu^ylz@E|{9fzv!sO z2=3iDVM{GuqK1j_V&WCJR9oum7mqI(51?UX*vbWK05El19LRl}PpwcKt(OAcfl5A- zj(*v`#&V@$A<(&wG~|kATegRC?}0RNTE)8ua^bLPaNQtY264_9hE#b z3HKTXzvA`bW|Sq}=8>o7m0OUoQVV{?`PM+{&`!1tB#GWtz^3_KgzdHEHPpdh-jl&F zn93foGK?^afFDoC>h6V?__3Vl*Ms@>y$`-m;e|0$Yu`0}7zdSctQ~}z1d^37b+T}4x(g`2f(N96TDCpn(vL;Iqyblpd;%%Avka}= zPXqPNZ3E#M7=@30c%Gk?#1VFy4pzPY@&{6S36OCA-ci~uERUJ#g`dl-l!z-<*>}!{_qN65gi^=w=27dx)GvT z1*^I88>3K)zZxOMfA4+D4Y4x6ZoPS}Ag=mrzcc;bIuv3xY-^^ac2w3Q$#?8aytbNb+R%d>fwXKypUb6`yayZP>5DkYT2Wxe0)nf}vnnsJ%pnd7w+dfr;MY z?Ljm`?fc-+x2Su7&PY{^cd-Y}AH4b?y-BpR%G}9mOiUvx?)IhIX|vsjZq&_t6kXPH zXc*O?4wr>G3#R;@Qp_a!Tw*+SwX^!-^MUm;R_(H02z1$Tt|p%s1GOs00S)lM)b*6n z-epLU>S6&Kke{rzl>T0YQWF|U+f}`W5;6DZ>fQBxfYUKPFm6EFfiWfJ7p`Wq;8{8n#Q*A7fu2YHluST|X!8%m2IOm_A61R*KgcB8RR;Rx2mYQ+AhEuE~s<4LaZ%|U{hbKI0Ul!tR(r8yp zB>4`M{I=Q*UG`foJXvWi7nT*Sybi0U2gBzH=T5RGvl2~77XEEpuU-Xd0%peQZ8eZ8 z{w=YQhs-x4VV0{N-){kw!?>#^Fa?Kar&QVV!Ng2&%2Va)x$E7>jO1<<8w@IJL)#sxCi! zb5{~e0;lKeu+CLD3gkSrI2;+&V)QlSsw*eHi?{1wPa^GTqte~zNX`Z3z|2h~+;CaT z9&`zq>aqr4)axT1=baAKo$0$!63<;3U*AJgH^XCf8T39t62JMCu|Vh}txg^SOA@s0 zWdwjA|0Cm!%VJ_4cNIdH52Q3>pOf0t!1A9^W0z;3^oMuSaX~4}*GDgeqsIHKG}H19 z8}!NTRX*FRJg)_@Yl<%3n}84eQEMQTz+22@JF+FpyF|(Q;IjQo#kWf{+s<4rM@q@+ zmYEDzAf)wg8#`SipsJ&lh-9kLYDBcE`-VS{`BHX-UN2b=J0_0#WK9>zMVcD9uSHPc z_R#M-;hx=UyDsbD7zfS=e^xK?^mUX8Exd-v8}j{VT`<@vObVuopBkC=ay?H^hk7EC>NZg^RhIn-3!T6&!**U-Pmw9#dI$9VV zaYaSzxq%SD8sVtcn{br6F5TSfejS-8ZM+RvRo3o{S>zosBAi?J>*>0Jbho1%nXy+) zsP|BoIvj^(YUf%)_ff+KZ$2m0&;x{y4-M}joC-FMH9OI+A0hRGRzrfev%H8F4ko{t zovCHyCn#F%FaPPhX806Hs@}q;|Igs0>XblDl$i&lh<$kfJ{BU-l&_sLdfEN@W%Jbu zN>AS~w(ytg@pUkhs@@grO^2OBU?Shb+N4IvKnRCbZM^xdS~jvI>;Aol0Z@L|+?p%& z72WS6dzc0&jP}=TbyfG{FV)2A?#JJ$dpT=BY1!FbIy|v96r8$k+dN>e>pt00a$66k zM%wWLh5;cJ*DmPR83XNQ1FH66B30G65mLVIzV{yMARJkHr`~UyVJ5h)P;C*E_6Toz zSzOqPppy3NsA<*!kb1Q?@sr2R3;iHkHMGA0%{hj^r2S!=X%6RSrD1|~`+?1CvJupQ z?pJTg==#A_`qasz(>hJ|0ZIOr_q3~VK;lYd+Nz=^;Ygxh=Vnc&5yD{2lv9FPwA99z zr8aX(14D+ATK!p=n7kEMDV_`Z($b~m&gavGznW`{{ujWocl@OF+-j2-kz!-K>0BLn zZFgKk#B;y4!w@8mcHNVFu7G5Hlq{|awkr8H3y@g2ay40x4@zNduD0%P1i0dHmevEw zux<<(u9T2&A!Zn%x#wAr>XdFHE4dLuJNIx0l;4>;1$|dIP5898YG;_Fdx?w#r(vjl z7oqg8MDqY8rQgZZz*LF*AySxW_ijm~l)w9O&)CU!++Yz!X}D{lh)=qo>(Q37J5Nzk zX|&Z8vKsF*wA8G%)LNx{j!@0ay^(5WFF;Ag!P`q9ZEh#WbjAkx@krY&f8G7)Y(yzO zoRmNGa0`ClCbk6$;9X*q>#3$1%SIb-yH9$!YsnZaM=MiNchXk~M<}{BrcM$oJ5pW* zVq8`sg+VLF@M>6L>as(9u?9?KI5ycXkgZJwzNL*OO$9)W=0m~t`Gp+rKE6oxyH9tO zb2@dh0fl4^x64Tz6T2FFal0wMO*qU#>j4el!QWcv*%Bn?ha|W)-z|+720$g=v7HSV zOhi6)%BjW>q-57Pwf6dC}pQmO$863WF<*s z&Mt4S-yQc$#ApgZOD2O`tEr{!>F#%1VKpp(Q`MEUMl_cQN}L6kvvBmLrcM?BGNL($ z9E-8DTI~6Jg<<=wy7mijSnhXS?xqU7h?1_jpZUMkeeS9r0|15#Z`#$tjw=YHxwmGQ zSGym#x$A6-c?}_FG{5%0P;<@e=y<9_o7KbKNE+Y#CUbsMIAW>0j;uj`TllwxeLJ7w zeXib$yc1ZS#wwM&U;kPUMc&JY+lTt;zpx_Jt#T0`KvDEInK3lbnjRvi?e|YloYng2 zen*PCPoFL#r3qM@ua|w$o}ekrNd}|L*HbWMHm}a1$6wT`n`a%pPOP8ku97`R*TnfI zk>v}3ZhY^MIf7s${pr;4+Le8Upb#hR#u<{ohL&pEs+1#TZ3F8Ka>O@iF@Qbj@R!Z8 z_&Znz<1F^Fj~n0Aye;=E2UG3;V4f*}lDkoN^>r&ln5d(jZSKA*pR0`i!$#Yy!6j3! z%dV9ywg53{zW2YGAS4N2jKmGjx{hECu;x4K5zn zGmh6RHvwVSmA2B$%?O(1!jEX>7U7;rwri8LwxY?&yFu;7+yF>y!`{hhc6|`8#81{% z*-(DO_wUWuXlocwQ*m}B+gYO>sTMNYt^5PsS6uJ;>L45zKXHiCxu!!1Ve9MDBI9Vi z`-X@0Q)im3Aec$?2=bRMElfkW^@N(b&}wGk=&+Zn=j)}CITXb+isxk0upl2eTfk^h z+H?82YT^supYOh(;|sV3kQY$GEUQ^#{EG-Z8D?u5Q3lZURtDg-m;ZF}PuT~s z$7C%4H>+?bQ6B#zos7xJU63HB$J)}%Jt#SRVjeu9uJ7k}lIi;mmIBfYPT6E(A-c#8 zNo(Q^1WKedyYSbwn7N41Q_7y}l%|wVx_HMY+6K#01biFSwdKrbVE9&Hdr8LmJ{MH;s1lvZ{zYVKES{HsvH*-pbB?S3_a+-K(O zEwKntB-Hvt&Sjc})*>fe_jR?ub#NvD+F#x6n>teF0X5aVx2~)8fxX`4|1yNnEgikf z2K4HZKGuD06z(q;3a*v{_?F zdLpesBpESYjD)aMP7%_uVDdCbs|!)12q|4WydQv*R5`EHf3pr<6a#tq5G?7FHTxSE zPI@DAmhM@DE;4^o@HC8Vji*hIX9Jz{wjawDfRY;hoLtWer={tT%WKPz83VK={|QwT z>3kQdZM7b)b9NWd${^0yS-p#ZDz$c1#}qC>a$mS%H4^8SLD|{rV*~P&#`R@QC9Z-g zcMhBr%{8QC&Xm;x`1O2#vc~l{gnM)NT68i_FR9-|m0zt~RqMV5CQ%-#r)(Ru14v2q z7}w4|xr0y@Rc7~Y_hawb+TA>I<$U+f{&^ z*z|aL6!{3yVcUD1-8Td6S(*j!&S&qE#i+$mDZ>j9)mvM>A=yQcWZ$nll8c39x%rsm zv6`osz+pc-oRX7E5mNGYK>fGG0E#v|I5uuZq8S8|j%~wl1=y2^Um*|M^Jyy+rF7qt zDyu-He`S^7E*KW~Ojo{h_xBBa)T-;D(xqR`$I8wXBIN)~m$BK*AVithAt`fps$CQIt9fM1FATiogvNeha;YhpMyezG}DKf)}ra1eW z<>_`di&H(po@Msjkf7>X{vlpZ+wWey8>65S|4$#!56Ok@+%!D?Z zi1M1+UAq#;;GWRbo$vNeXGsBcEh65bI(Rw}SO$tEBTjayaJZsHPr-_5frl@rf%J^c zGB^{2WpxI0HWB$)_tm-lL@L$;eCMICZ?jc?ZB<=BNylMIS}9-b2z%mj@Df5=ZqvML zqRU8fAK1KMXtSZ;eZdf;tg@~mV9dHt8To?6vv~)i*Yl;jyfu~GfE1@W;f#)W6GbI$ zWhw0~Ap9As5qcYx7IrZkuW=9e1Z(f2ld+FxjBt%$RnWzsY;xf~EIA{qJ%RNAR0&y? zNPE~tE`Ni@IjkV7UjDV8G{EzvoTsOVvnTlyyQ+&mP0YGrXoLgGSQloSn(++AyjpFy z$&x0yV0%mX4aZJKIG{{N$9C;6?R!xmYdcr+jPgYNvOhx{h=9^A4iVbQe<@;kVumz? zmcc4@D3{BZL%4CImN_eg)6jN*>jLpg1X=Hyjjy;>iTl>zu;HDbM)y%TFmeqj6MKVW z?M3BR;dH*aQ${wVwTHGD(Si{Rbtp!GXe!Nm)HEZPqK0%}a}#Hb^@C}}zTSS6F^whz z7p&_h%wYHZ);ez(29?H5bf!E?w>?;z1^4w(uf;3T4`JZ zrTXjK)aWnQA%(32G`f{>oiAn7ha#!k|Ww5aZ$6>AsV=>wf+d zpD>nz2rJuc49YI03QX`bhuZ~dIoCOcS?#b_-xDj?Sr+is~!tswbD={ zO0o!QeXum>#res0ec4Pbfx_0;YU6QfezfJ@6nAJ|B_Y}Q*2TE$=2#bE<^z<@M~cL{B?ZCql!4?9WB}|c+mY5mb|wkDX#QJ9!BlE`YPu=r<0$f;Dkrs*UF6;M zRKzrx^w@orbd}-k^8^k}wVkJ9D2B}EKW?B4_LQJ{fVQsM8mK2wGu~cWX~08DT#s%W z?10Qyy;#Tn!eNR{ESx+8_c}te_{(>PC(DoEIY5&8OjUa2`R+^FrVeod?y0-ED9|+a zi>Rr3AJgq}_H+qF$u~>la*`N2yjz{%E5X>d+1k%whpWTR)-vQ;540_?>p+URvBumD zP>uH0D&nSaa>R({nC=!z>ezp{ru*BlWa|+tL%sWv#Rc|%7|d78+_!7VaS!e_Z*A{1 zlid4Bz?qiR9sptV5v6hoAosb8O(YIjPXfJ)sSK6onZj*Y7C z=Kufj0rk&=1|RvM*Nz`E*pq^2Z`U-x;B)hZi_iO-M;4-mHN)?jUKa&=D!T1;E(X(^ zdq-@>e)oYQ8~b2HBNxV{$1Up!qb8af`Uv5t^{O@DibS|uPCu^Q`CC&uLU?BX`r!L> z-^0gM-A5ib!>H9=oagtz*PyAZ(OMJOt56CubF`Ueu0_ypLt8$#*^mcPO?OKS_dq`2 zNE%}utdy8?LH$q)Y;^pQ(GG_RV|&Qq(w+v9w8hQerdJJj(Wl01Ol*f!ouRrn)TF`= zw6s^V)jn3Xk4-M_^n-HUty{QGNk_pNYcKtR8D6;8H(Y+Io;Zo3MhAD5dBZdiF+0CM zBJWwmRCDg@cycW8K<}o|aWEB|9672MC*b73=RwQ=BuYl*U2d+LaInL43RRz+C4EjO z_-V4>jG)qtIYBhh14(+Uz1naN?!>SH>V4;tdL!)w`>d~!0ZG9vm2a5iyPsWuYv#F^ z^0Q~JRrfNG3=Aqxpe394+~M1596G%u#FM zn+Rd11JrqAs9T7=0-wa5?+^tjDMUO(-gzG58(vbknEbGdanKc0@&MMeM)tQep8HU^ z!ss%$j2|GWhik=cVe>FhL3zs}4Zvh*S!1+4hEwqEE^cix`6t35o?El44{H>&p~m_M z(3@P@CJORpyE4~yCl>t1d{^0D2aOBC2#z}^i2*4581;Iz&6`q*7I$B4Ds^cI*c)() zP7zs((CbeBIz~>*P*QyUpY)RDaE3;iOh{I@Skc9?lP}qoFoqm1>t3I%0##`D`z5Sb z2RIcMaZUH(E_=8HK(Omm@gcPqRwL^C+;<6pF81#C%m2W7xVO5!#wN=^K`l_!XjAJb@Q#m%yq?LOF2OQ&O? z)II4xMDPx8gm*Uf^ebh)PCGzI z*Rg$j=)E=q9-^ft88oBB9j!DH^P)_t6`@I!a0Y3vC?vZ`a5rx4CtRQz?#xgUY^ z?RTUO;aU%ShDu;eRl_aV(ZbE~(?a3Y{Q&*H9Ski(3crV%*1s6mcEhAE(Ey~oFbk}W zzNH8__4+pYfuLe-EswP%0@S-dQ4Y^@rM)@cT;Z3Lur5T--*Q};ubHNluCWSAKJS)O z76Jkx{cEsx^VWc}{f$qM4uY!2FyDeeIQ**@eoJs-*E+;h=#A!*b3Lq_Repo550Voj zS|zEqQ$K>7SWz_>zXOR~ze#Hls*X&^5eNCSuIdvs8hLx-Hs?>|0MwI-WrTaAa@&cP zCO?3=r9_P;k>Bk7(GCz)?dM-62n2gKKg$uL%Q1%2S4k(c>U5V9uN|#h%%UiG*_Kty z91Cn_O=Q>cK)q{-s$iLFuWE zyJA0g87U1r(@w{)zm_vLt1IQ6T8!%U&q&@gE3+X^7c^*|hH&S!6c_`Y!?2_YzF zChVn1--1$|&$JCk;sGUPp$9UgQSv)T@|#}+Zh z-o0N}c#t0zXy4&_1K}Z@nGEx<+-a|+*73t5boFSHn-$*Ud=>)*$vlCj$?A!$dJ1G9 z9WGl&@*%NhGW=HGThs?wV(?U5pryf-BRDU4_LOT}0WXGAjy zVNL&D>rP8hanYsuQgD_+DTrO^8a>Mp6m$37cb%6l52)1+#Q{i>Uatekm0*wCWi?S+ z-72JHSYE^?j@Z7{r&ptkyIy zCB^8u10z0=QjD?54_sB=38&6>>L^sUCFC}W8n$kpI9SJf;|O7%(=;OF`S07dy+uk- zcZ89;T0ocR2z#eS>ZI%#LNd1NtH&INlj2NiFDIV=+>9Dd!d5^v&DFZEl&$KnR0}^C zP_Ieiv|vj4PMi;)=@Q{rM>jnSYZDghI`BE5I!=tM&*vv5!aB_bKv`tgCcBG?sq@x@ znU|n&-VTw|!{X0nv>tzQ8gT_dPNjN~5#WFh27oxibv5vYy^v@NRIxLJ%K|6T30Y6;LA8Z1g`R$m&* zkn{ZBb;)HY5`25GZ^Q6rVgO7F+N-r2UL{?DmZ5R9?$1@aV!qkY5z0^= z2X-PzTgi^$@n{g6fqH#z98w4mB;m_s_hq|9Xs{!Mccb=*+oO83U7XSzthbMaFn6)+ z1xwdIju4?4m;UXB>g-ra5#7ocje-oqEpsfHO221DxvGgUONJB_wU$ zUyIDk!r^p#eZ$fs-(xI8LrrlN#!RiC=U?k$Z;czD>m9*9a<$8id_FkUwup%XI6Ru> zGN+`^t;DDk=7yM=m&wY>hTPU1v-e1SZi@->n>8X0PdNEwg<^0Jy0#H>OGCb1ODcMp4 zoTxV>)M{A}tAlE{on#tJMc6jt(JWkT(Tt&RU+@Jh?Q!2 z*7=S>52m&U$Bx!!>~#bM_*bh9_wKhLPt|Con+U1Ls3Rz&9qtLvHsIT;>^5S0#QU{E zy^};N8hVS84-wL*oV#?Ewf9hyvCJF<#&1IYpV2J z;~pWLX}6-(_({Ib)A3IMrL(yFLG9Cg1j@GA>{Qo!2B&bW|H`#s+Z|c3Grfnq1fw1Z z$M)s%_eBWFaf`)3Dcp+_e`_l&o2d2(bt-uvaI`h)3(jg2~d*3QHu=~ zSkZl=drM*^tcix+HueP##iM2Xy;@jnFh{D5jUKnxAmU>=6*u|;MIqh~m0OgxfyTj1 z>BZ{;OHfxYU*G-8Y|@|yCG^SpKIr=#_fTVl>eebf`9yNC1RL0Iz@bb{r?sl&KxOQv{$i z%GPrKLARVmDOOdR923UDFaD&-Ur@=WdTu~YB-Y&bl;Wgt>fzNS>ko;X%7-g|lZu}Y z6>49pB4_f;@POehh*ih@8Qy?m@z+oi?>v;Y^)N?m1YST0V?I+3OD}>cc6+`CXD)RS zETv58T>_qWDCvI%p?YvB0L7ZC!Jd2V?E%0F!@mSsa~+Udz1LZBZv<|t!CS){Osj6% zY;Z$qRd*!5OTULR*vYlJ4qZFvcM?exP}~gL58>3$lbN*~yN3{Kus%&^cP`tj_mP!K zPrGVL2f~9xwa9qb!I^U0@hDNfKXusX1~NW-PHWoo@<~Un2Vp0efYv~Likjxz%-Lj9 z*FQqS-VJrfI|X@$qL#g@Sk(WK`3yXJShGSr1fizC&G4qAlPl&Tv^3RgBWy5bxEMtb z+qFx*mn8i8y4kr@SY@sCE$){AsX>*gX`uSWa?~WIA!lYZ)rurAaZn#xDXd{;#%9Z! zZ55PynO@5EgQ1TU4(=)c2y61~r^+eztKCnd_0a8FP<b#Sj$pUb=Ti1M>? zD{<2YrEJY`(?|VDMAsi|7A6B-B2}WpannASL`1st#{>YBj=_y*se0Qx0+ZSay2cI! z)#_tuz{yCHyE~CE&Z1mb8x61>q6FhWI_FGVGfl#2pt=M;+H63k(bU3e9^gjEgexlT{`q-SooCg0ZFWI89M}E6Q@bt~QPLu1-*_o;-!2pF z%lXA{DDNm&f>i27oAOsX^y~7y20fQOn0Or_OsfUJVQW+(0gYt*`Cldu1XGGJqpWH7 zT?$^lukC`{2}`9a;Nd(!>p*q#{ZKDK1COf=~yLbHy;hG&VO zs20cVHRuJq<~u{P16T;A!H%+Rx4qYvyo*qKUC<31qdy-pQ=ZsMQY`_)Tsn?xr6rM; zqJ>>EV=k39AHn%*=)D7u{>T9^ZNw_7W~vnk>BQA_R(3y+k~s*g5Ryxss2rFs!L%AJ z1!GO1qpyMWtbHY0UlmR_rwWq>V6t<;zKlZFA(W`%T7d?T$aNxHT1_9AbXo69s~^^k z6Ly{s^$%|~@eQDaQL|3I4)#3%4#L_aIQP4_bQ)Af2WO}XiAv|b)d)nUqRW!WaU|3!gOg*OSlw`bg_P2WB zZ^`Diu;OFU+nZ?w`Dk-#_9A`=sw~H`^S*%-yB?HR+57pq&9v=A=|KoMR-P69*4PiB zdQYi1^Z^76{@`Tonmh(l2OHpqeiyk^ab5UnBF|l_OB){nD)6NpFB|m2=?h2OBS{N( z&kxP#?*bsnXv6TPezQHGl3t3aVlNJ|qOC2dB~a#2OTzM?S)*ksqRjR-H$lS5OuP7C zIh;}tm4l5H9elAm_DWC_jghab1k-};88zq7hBsQxKHo3x`0l@{v+^}aO6pKc#;@l0 zVwe-b)^cl+lHY4>zdx{afU=2QpRe8Z*5UdB4!3Q`ejxH{)4Yq-0VI6T7PZMb2&F3z zF>b<`VT9Dw>y=+MykRB#(igtC$_$?$y(7b{N#vdRalF~vjsm@^9^BOI{|s?7Jp8qF zW}``A8Si4Iu9(vh9u0lgs7|Bw@@{jWLj6sl}jz4!};vv5+JnY*sWXJ8q>wb8ULn97sY_1!syu5oF^%!IItt+S*s>AK|C^J2ZvJhpp zoAI{>H-d_?mui-}*TJaawKeC)P!y~i$ntW!q95GMCpym2%H&pl`_h+wlL~XY`|b6& zw;1GsL~Gk;{t!&t)%y}L0J+zFF$v!g9jgB>l|_Z@EUlnf(O zQQh@5(z{>xkBybV`VNFN++cHbs(?H50oSA!0HZ((f25c^9%y8?r(h<#uU{*FWu*#D zBSp+I)2XwvX%cc4F&(?@Nq2q$kW*H-gKJCmI6|ttg=Ho|0M9qO1J$+`I*C@}ewF#* zl(0tePERSmr@PpMZqIA%8MN3cSS6PIak+c-)>#SUvq|ajv?=JEaOycx*9OjaaHz(` z1u*5BY1f4=!YTMh4>2g{B?L^is>QR*fD%sarQuxZVlt7d7PyL#el%)x`Wn=0!P-S> z%h>B^(wb@}NH+rLffL4lzMcEl!PsSwE454vs@X%dbe368n%XswM@Z^AQPwYyg~P*lvp%m7DBnI*=W;Ba`Vj!{iOqyUx zfc#wU74#-b>u7%VRCtZb@$PFI33cIN5`kdfLugG4X80TFaYSMGBc?Sr_tsd)I?C9g zJyVk;%W;$@rfPw80#Mv?+*0eIlb}SKYR(6EhRG?!RQGNCJPjp_<{`;y^-R(j8$H~1 z+|Gh&afNO!Dh+zHG>RUe-sjDF88VvQk>&|0l5X`ghzGDI&W_bGzR^7@bQ-u+(QDkT35A<4T#rV$tHX?XYi zw%0;f>aM1e!Ha<2kito+FGk22t`@BWx1PGR1U+5B`Zm}9P*Uk_wQsQu#B;r??5LJQ zm5Fg>((k^#b0*uKEAwsZMlxLmrWC^^0*!kF?CPu8agDH~NDrKTH6I#d%)kuz{MOg@ z-q!&J%x6Dq#!s~QUfl;uZlr45FPz4yi@)v6Z2(CQhnxTSK_FeaXSk(&!`(N87P~_2 zza1$>Cg@5b*@2+`x<^fQJE1UVOW$Tg9aMn1Z@Kb2E}R17>2IBT5-CkK;|`bcp6DEg z8|K2&LO%t<68Dw)2DfxO*rFK6r zAw-zH|9r3Eau=)I!qpUypiK_8x&JB>)01(poY#CT*Rueo%tASzn1u`C0c#fA*gj>}|g~88y@v0%^~Ix(K%j z?1}vE)KZZ|K$%}ZvC}Em*AQKTtXO3oE5oHwYU1#_mZQrMlI*+n`*N_?HsxIWgrq4) zD^P1(mWBRG;f#Sg#v&7fYqn~y`>uvluxZv|WeBk5ImIHh9-w;_CAnD9m!}{f6B4Ll zDl@S|yPI*>I<$0Dx+ZUFl%4fRN=!v++lzf*_{s$~HN342`VmveTxA%5@ww0DD&+uB z8>d|)hH$E{Ly;C89Ip9lM?Rxc8V@^xRIKcnYgs&s5@uSJSG~p)fAnx|O-=%mwuEb| z-D$8ljN3*k#>^%L)0xM>%`Pjq;w$-|TR-UiEe$c=z)O~jrK zc)z)AI0Gn1ZDZH|;aO0Ju(q%6aV{|j-zwv(^NBh4y~%b&Z~>HmcYW$&;!f)?2G=+vL7t*8v#u1 zvFT+<^y4;|GH*5`Mk|E~yN;-wEE_w^iv+4^xqAVwAa zevk>{avS%;SX<)O5C+0}JA$^4kcN>W2kMC+4NxLwJ7Q!ax637b@;1N@@h&S+C1aR6P2pB zVSGHXOgz6$6#!&j<`31{C&9EZYnM`lP9gLJhs`+JJCz`x=&P^4X+P;q_sKSkGriz! z_xpx=lHnYf>i&AOi8&9aBMg}sOKZQ-#Tas}k}~i-fq?;I9ZZ>+YnRlxjDSN5@OS^# zK!3T_wA4sXT^vvRYp3&2 z?V>N3XyqG=o0IC|3sDr`jdbm^2uK1tEGf{z&z3}70#c|qZZrP#ix;%p$XW)b+U?~a z1ze7jYS%*-L_mk{{Oj6hTM4IS@0=n5x?d?VIK8^Vo0>B7eweC4?TMX;ru;GC7c&>U#0wk4oEMvs?1B$}nu-2UeP*~9SP9F)-!M(L+ z9?pk*c;Ghbfz*1Fg)>V7g1Abn_BZvt1he^I1~XJbtW;)$h@@I+Q++qJK4Uc|uqY%+1?Xr;}iMHw%IK z`V@lJomI=ziE1FU&DJxCT2pXD$=UpFtDwD9dJYVON6SxKv)M(`{9Bp2HJ3*hl7tjp zE~x{Liy>^QCsvz0x`d!f?w+lON-hK8Bim*hnKitxpeet>+Rn+Zc2N%2OG<4fyoRQ_ zQ~NRKdZN<^W~=Ys0Mj4ig7;>AR-pbem%Rn{`ba$kTYHGNyDxgTd|=&#MpPymA3|YY zpL^UI@E$^nJhYp$yS8|~pCowa%F9>};N(Jp)Uzuj0Fqic6Kp41kGdGI+xlw{Ydl6$ zsTWuOByB&jmAc)WcnYRcTi$%_E#trY*>;ujA9&|#Za6I1H=p|vUz3OiKT-;972ose zun0{(t*J@^FNRW2M#puRX9+^jsQW(~{N5g`16>lKOUhA3`AATJWoSL?(y}!!XI`8e z*8Ef}q2+!}mv6pmR)CeS8-zYx38iWZ|2HJW^T>1GBOp3BSjOLLz+Pvbe6N(^R}nC6 z!?richlJ<8OC+un&XA=@H)VG{N;2BxoStatVz{B$UK;A}C@wFI)bY>&ioQyLqkax{ zKi3{^oem76s2I~44^I*kUJ{FB$GV%P^G<+FXx?$ zUG2W~b~lp|VygQ0HDskKZ^mQ>kXf~KyLS0rS#O}G{ZzE3%$o?xq?+E;AUnF>=?Nv< zZX-z3jH@T!0W#)!usYmkw-1r9iJT}CoO?m<9c-I__aRdF&=D&ILCIJpm=GQU;mdHF zdmeSThBsXS?s1?FWt!RQ6SS0brexex5YIjJMN%ZZuhfiZ!f8s9Ss!ZC!-DE+nk!L~;_G!PunJ7+_>C*!u^J(z$Vqk?9;`v@WN-P_ zB_xpMwHB@mjk>icSmXeosOUb}?N#@-$-BP$U})q(^9J9Cl4j{^KF|8Q7>w2asLsCz zx@72dWo=4gAZdTc*oB56c{cxF+w%*JpoZy={KD}LEkan8+gk%V_h^3FRR((F!f8L= zqNoCGL`)*i*KKNl?e9&alzg>+f*rF#y&dLbP<4%6cyL@e%x$kOp8%84%*dXxGTl0f zqKh8?0-Yh>c~+3pAYAG5j;m46Kxvar;9yHt$6!PHQU)KGQ z>-o|^e!b3uZ{*9FQESqh!ius{|GOoaHZB=!c*9B5d{8?@cMu{rW)5@H)*eWrA)v~t znU@;nP`aPDD2y9B2MFrS-?b_2;XJ7A^GCqER4fVf7!EVrl?Q$0Nutjj zE=#(n0Djqtb}VDCg9>byjQvt+XU`B6ZSK0~v8)LuTkH+nHuZ1VW~_s~%loeSnRRO8 zvhDmW@=J!*0nYbwOtUy2W8wSt-pmqE%2U&xTrt!k8J9FNH|n;_^7HJjsdB8ny!*Dj zpt%B6$!1Sea$#k@mh`j>G^(`<0T17!UDUnennzYA4s&2mw~DZ)`)G2iJ=gXsLJI9R zKr6Ljju!SE7-MROMe9&>nlJ5GZQnpxGnCa9b)bv7XXFS=KFbA^bVes{#&^E#D;0H6 zSbcGft$vBaVT820$-tl%e|zE|sx|BmVM*~1&~_7d=JWC*6bCk=2ugBfq6{9#fuuzq zMXR37_h#pIay$)U{^6OXe9c0scr*L^tI{2g zBp^BbFIR`B@+s}T?KzwVlREQ^-lgv|Xp-~6UT;1hvp;|kKEFkaFFv0`iL7e(X!f=( z>+^_mwPMEb3;D)@?R+UOqNMY?QX#A{qg2l-M2mWoZ$~_TOgiM-S^?-*A3^C>`{={%$p2YB=1;l#^IdRYe%tJD z?P*}L4qBTj3;m!(-ms@cE&@`9<_s4D7o((UUM~wx*Hg%c-X~mI5D!4IJ}~$FdW(#J z=u#m@C(0H5a+I(kr+O=3HE(;ZWJHo6hM>WZ&#K{vQ)oy;M z>q2R^4^WijQ12a-Q;BSXqzDN2vTUy&oq<&*CxU15odWN#mwC>CigVB!dT;L(?>v&? zw98Q!gwuse2Q%Z~%|*17r7ZyP{St~U!G~wL0lN$-+1$4?*IfZqnqilqsRW6v0Cgql zTKDTgyE6mOBDrNpRp?;52z(z*?(N;ba`~(eJxC(HaqF>GMh_9R+{%AN z90yq7}sKqf0UGN?+0I{ z6@aSonXz(@zAk|IZ4LSW(?9Xq2c*E$?R2gm#tYVvwH6uZetmbWY@i0=@R%ui9Tp8E zXs1uVVw5NDmb&9py2cJT>>;{XsnzmfCtAw*(o3t2cQD*>$T#t9*B)YP$g^< z8qL#yqU~neA?H~rLw}g}4|huq_wM6er7j=uVw9Q3%pU#h1d`$&Q@oRc>4;6iDfepf zKZTkKPEQ=G>G?E*5==6nvkU;jtTJ?_5#&ctz54MSpg`2a+7oK0K$6;O5;E#LeBiyA-!kqK*|Xo0z5;|#%_;m>jQjjN_w9P==^7ADGk)kj4tE_* zMLe0^J5IWR5Y}&acgVz_1n&M`ZI9e~{#!dL0CDMK8ugA~xXgoslVm*n5b5WB;{Pxn z@76OU*D&s{>DA?nvALGJ=XaGp7Q5{GPz}NZ`q~5G6k%rW&yDd84i={$1zOCu?(;Yw zzP)XWQ7#-l)+WkRsMm7yZxUTG+EFpoRvkf#DbMo#=U;JMYQa=1ggfX4yKrhWz$~O0 zXDvcXTR0apS{5ftANyBUNry@vEkWw-5R^I`2S!xTrQN5jEv3E;PE#1gBLt$7EJss% zidgmA6%Zzj@T6R4CM!Yxi&svvbX8*3_K=@eL+Oik7uKh15PIFf(-?II_-aRKuP+5&P6D8&lyws@zQFss4~ObHe}5OHOa@${mDm89>14nr|HvUu zgxb~PLBtfQROhn%8}2BUNmpubbUTVl-uX9``3@jOvZ-Pjk>R`(6_eP}9JGQ3;ewHn#QNsWSU2TPWrc0@67*j%|l&V=X+xQ`| z&mpE_1apbk^BrMia=H|l3kb?xV?z61gs_pkQIqq4uA47`#L!A;vp ziy*Jjv`s=NWy$JZBTyCceR(9SIpPM0#V>zpB~c;V>vsRHhB|qHl7hdrt*_onyN!T% z+iFLM=FwIONXl0yr8Vq61SROw0hK^F333a#tLB;eD7|4bV(r%2>_HMxg{`|ckrrT@ z$8DHgKYWyQYhgGh!oOl%JwY4Q(LEo@L~kDwpB`m2>00dEi>YG z2oDr;_BU3M8-nR+pZvtjLrQaB&Pp)6h$T>oj*i4swJtM|A4rlP z{w&Fn?+Iv+WDpzqWz#$4g`k`c(Vp9cz7E#orl;D;;`;om4eorib^!X|>VYmK5)pth z*Um&0Y9LVDpa0aSs0v`JTc^`CsE1K7V^8e|ZSQ{FPFK{(w(`S-`Nqm15Z4zN;iCbMp7jQ5F((O<%G3QG5A8F)yvnlt|kwJ|26D*fAFM1==Oj~IY=5PHL)x2^PHM|o$%fM*W~2$YEHEp~+O2fb!8z`X}h zI6J)U?akx@Tx0Hir#5rn)l!cTQvdR*+F0^9DU@cQ4o|uiyrrQBJWUGN&r4NpihR^X zhy&wiaJo^mVbh3)_JZGPWvp`^Xdx5zgu^B0zokpS z)u3;Z1%kcV?@GOLRvH|UfQoB6(xHi6F@av&rSV$36SxjeVMpF?ckb3B$o<$JPO$*CEHsKa^fEh@?1Q`bAPfP^p{m2ZX#lg*QZ?M69eA)iE>p9a!QWkaHxhSh8$n0N61Sb+5$rX2^A zvVVi&ANsq^-R4Ok6)F{@c~d=wlxF!g;`?-BQ<;soGeGz~ICikR*>V;&4K#YFj3Cc- z_(*(;!t))zr25xWgSm-&vHLWRv@XGzRDO93eINx}`3Yk`QJdGVa*A~o zPKq^{v!Sl}t|8;BcawV?BG^0NUmJ$I=_)Xk3Is!6SsZy{(%PmeUoe;d@s zTT8LLBdm0uN0a}D0oKnLbuXZ-PYik>UEzyga&`6r7?#^ zjd?lORso#uGuFGa{G_AMd-v68!Bdnp4qqtWp7$H~5h}K9$g2KT zTfXJ4<<;)@u?byrEvW0*$Eb6ubx>Mh>sZgoWql%)mM;H3NdD!fCGHOU5tPp%Ut0tW zfJ**})rP-dwNwegQVxe#6-L48Q0A>c6RGDYNjIz0JI}~j?+c7jeFN4+BSh3*S@lYgs_awP& zALB&#oh|F#heqlFo|9F7i{1Gi9Ga*LOCTo3686 zGMguh8|Pr$ka;}^uFU5VBIUBJ5#0b06C5mwaS_1jxBILT1abB?rq-ngK=PUUuHCCE z`C{%;*>GG1(v`MNnGvt$H)^axUk6ez9$D1$2Al#7a1Scgn+UQSqPW}=>`Cmp#cqLo z8zs91G|09?P)ksbRlipMi37hUb#op&>knaE+wddv&^;uj=Cqbr5UxJ;l{%s)7QnQh z1M1(eS(8G5R{e||ek532Y3I+6^Qn238zEW%n4;RLJ6J~jPf--5nV!^i@)0aw)|ZC9 za4Nmwa4ZUDT0!hW{y}w!g`nSiI-PngLXh>5c4=ubAZ0479a0Un1eWrlI^tWJh;!dA zr%KE6!_>6lxm;LP4U(pcuK=}VPfu70g>7$**Rp06oX*xR`;~ZDo$qUyw9yXsSoQy4 z7F#C+9&MfnR-h*4wJ36#nDJDMF%Px=f;;PlRcKdx@U5@=@XjA(N!X7PL0ZOO9Im!y z??67HXGv`k2oK8`ObLe(QVIV5^q%c-iev1xvACoAq?!$bb|NUz)++0$aOzC2WnZJ? zjw31L)^-*&38Y9pFBwf2o=%i9&o}-7$?d8u8pnh)23{-g!p8&6i*0}IL}E7gwkL&^ zd#eqPre2f9wENlaqc5&B%AxSau1cMZoJUX-(E(HpA82iRn^(GsY&VMkR}sSR&rm3erPomK z<4a%qm~k)Mn;PrFr58);?Cl1s(e)Y*-4skYhHARF1*SDOwzpJn!_{Hh{gyjj+&60Z z_aP{s>HWUDCoILI$oB=4g!2;hevqHu++rm@Q=y)EP(AxUJo8ve2Hydbdu`Zpzd zo}i_C8%uM33Z}M4%MriiKSJqspgQ3y(leyALp!%N$Is3uY(AYnE$fo#7JS&T95scJO5N>R5kflw zZ%$QKCY_ob;z0~fPn;z|Rzs;V8?xp>X-yZu|8PA!{wkatSf*AEYZ35t>WD(F>wX-b zYL!@zAa$d+I%OXe?oH2qpG!Crv%e!bM4~#1HGtG}{9pcyuPghtnnO$W4<^0$oTaPn zFqG21;le>3uWm<4cX+Ms1?+$`^VPX)s3rL{l>C&Y-6@zw2{&BstIdaF`QGUTqc9>N(3X%#^(X;d^dUMD;dBzgz zn`@n37HBj9^yIX4Wbb>`|4%0+?RKOLxX*yhqu7 zD^a3r2uA4Jn)CYe-zYwm>>CJaliJ8BbxwLW5i=U!>|Tbsl|-gB*=@n_qx{a*w!)o` zGI8)o^D+KmMiWk-98E|-MUoeXJD$Jz|9PD!QuJN8cWrMu;k`1 zZ!8LA_u1skVz63#vMjZh0AW`-x5a9I?AoOOZ>O&Jbz1)eK&d$rG#e}R!krbu(rU#h-W9-LQ zlLvw+RsU4o=dH0Zjh6o0bIDx9S;Vx!W|vlV@M8$dezeT)jti%*uea^y6R-?kveA7~ z5L4{<)XMHukRJ6L9PgZlBr#JP9cP4-#HMzo{4CsCE?(8&%J;}$Wmoc?Us96k_^3UP z5I&5VpIM&GzOQ^2x&&wEzDM3%gcU^AZ<14&x=+MMIo$flIJ9&MsiK!c)+Pzgr%r_I>Ry%vQgp<=?`5x1AZ=<9} z8|!@yvH(F!%KCa~J9n{!)%_m$R+$w)0Fu_hsWu5eM3|qNZHxCD zSU&2grPkOxXqwGq)Ksr5F{uw-Vx=yX0)Zf3a~RLH9|d?3&6@357qu)|RBFMIR)@IC zqcDKI?q34yNcE{jXi{7K!xLY(JDB*|xIH?z1WZx*oiq4JqPs0qJzp(p{N09(k;P)-4`J2 zuG7YP)$}454tO|r-%L%gmyl%C+}U65#jZt|ZhZyBoOj)o#z>~Mr6(N=C@+sXhjxo#mNO$0_hQm==VvT_SdH;A9E5^V$kD)wz?hry-x-AO`T zSGY3oBK<=o{NS|))9&SW0-~KbmzH@yX*dl1U8>&Sk_26xttP2vQ1B25%jSN!?TbEw z6paBsZ)W%ybro=?#f^0_OykMjL%HkymS&+@x6F^w&ae!Z_M0e{{?(L{8UYq_C^3h!&x!+J<8_jW6zg*34#Kx&aE-Q z-MBvB%5l?VmUnTKxBABl1Xb5gRo0b3%37+^DkvqgYc*C(}lFvI8c4z z;9eK0sRf|E&|Y(#7EbNhztwRUS%8F@<)BVN$Drzzd;7)pnU>=S|#*Z`dtS}HRZ7;64zJ68OD!dwbMh-ly&;|{rcTPPCcTlGzpe=7e znCmE5aj-pLasx<`b?DYsQ#X;)kKU>ys#}T9v`bBH1HBoN3cPEir2HMU6rm0$XpJRh zK15VyF{<<4gA}3^tlqV=`zQ)wf1ml-gZ!-1yi3b39z8@!KEA2lI9z&!md?m>UCkcD zk%GIY=rgvn$w6g&f-dQIn-cOAz}ksYpFYYDEH=b?2Bem6RA*XntbMd+s)qMMAUu=1 z!v>=tC2T4+ycK6Ll9UgYvFs9IwJl?Ng!d z;mu#`N@RWWHyx&|5|l$<<*-^<0rD!Ig|5M}1}QaQWNW;G#qq-vwfnmkN)YfisApZp`ZNY#30QdY0+8 zYMg_~jVn%aVMl(}#M|mt*v=0A@WdY*=3tT@VVa}(p z!YoRUll4qay<}C&-7(};VY6+Kwy}R436BoDkm$P;kRrd^vrsr0sEE6#NQP5^G#z@q z;Sa`*!&ZItM=lDS@qtdpB z-SQ??Akp8kYK;}kWrQ}Q+91nWP>(p9i>KdSqv^kiFlfSB**dp*GL3(&8YKM2U!j&#Eus~ zY5ErqS3M5(*NUoLSE8kF(;Ayvv$rJtD&&-KphV;9r1F00{cE~Z>Y97bHDB#$*wA~* zYHgRqzB%gMy1;C~lMdY%C7JTvXCE5g)2B&;{=}!~wwZe%zrFPN-*|~&0MevyPSyKj z!>~$H;|T^~)z>k1in#;IfbkY&)nq3^wQpO#k9NOqt6Pl30zwjI{s2$H;aG9&&i88h zJKfP7SC{o)Nr>m)wSA^zaM(9e(*1bgI}Vp^a z1&^dw8kc}x>ra?IF9%M(cXX=mhFt;4%HelOg?x2rqV(l!0M1W%(N3)E`RTRh@bU(L zuiG~NI=h}>f&fnTu4t96e+wal%f0c;@VC1-2R!nt9(NE@Ynt_RJyAvtKvUAgJ)56< z;Jl=68>RXuhTca`v)=i$8X^z!6_uP|qzr(`{NUWT<51`kQu1ncn%0y=16r>y(M2Wu z1R=?41eJISr``O%yWVd3s7q$g$bq@PoU9vI&(NaIGp|Uw)o!in`=Icif1@}J>F-D-=$f`SfFzgNeup=oLK|O~MI}udxmwxGGn{cB*N>LUy%@B4RDLHsJu1zG9DD$1GjE0)o0(pQe zBOargJKxzZDRb_mqk4)0QVk5+o3=O(WomTPW)H|dkT@q$Wmo2_M<+(h%gIR;1>^+Q zXgUS-nw_B$fdEI!y~4)w3=|H}%>5-%0H#eg#RBvkLh3S9I<-QaN72PTO+;SE$K%#O zmGVVU<1#rV2NFI!X{mNuI9+7#SUu}h3hxy(MWhjs5?2E(bM?P#KuR=FE1&D2;*z?x zvv&hZ`8Ft@%HKp#K7EgK;P=XXz%3LhdoZhyZU9JZ!g+no40pir!fxmvkpXZ@)_df~ z_xI2;$hOr}31wkH3?M262~$$`L6^+dv6|l>c9@Z<-ygxL!slvz`xs0vJstUP2mmy^ z8mtS(PleMWFa3gn-+kQIE+GU`rJC8uf-lZ9fU;CN%0dJmTiWl37D1_?LqnBW94w`n zF)o2M-lslh%nRZZgGD;RGN30!>;6Y6P0Nwe^0OoRKe#Tj6)34z-%^yN>eFblqY*SC>e=T}uHF5* zjj?YS@m)d(m}^xJINrr7#Vpg(3AEJG&8DU)QVy$VdIbF&(#An+YxeSI;uQ>#*IrIvOqA+vO2d?H@u{XR<7C@<{X)&in z*OAi0-)twkH{i&`-f2sFP3IcGYI(Lr}Nm3Wi9?SN-M#0wG`7=Pvc@ z3DlG7xkAu}-&537-MwTjjNKn0^;makq}?QahLURbW(@Lv!SVSqLx(Gq?oz21c0?jg zRoX4SMTj+8YeltKxHsB4#H${?r2C}pD#Y4&X&3EHZc~aK%R(vBf8Dc;`j?~VWglNf zVhE0W+X)C^#as6Y+XjLWFXgX&YG$fl4H!i1O@VP3Ka8Lf zwR_PlT(-kum9q+adX@?ug^TZ*+E&|%qM}|z{9}@$`?>a%s+W(W=qNQ8)*jR(m~z-& z)soHFY&uCCZM}RpAJnkexQUPel~Hnanj^pC2w}AI4ZJ!5r%%}?+EW%G)mA4_lh5Jj zHkPLlHN)5>J^ZwAs>TsP+y6V$QD}F~Kh#;2@Mrhb%-m%H13_J#(rUc(-LG3_n(guh zgtXd*(YhT<)44)R4vRREL8#1{AQX__5*t^(ablk&Ln^PymBF%*+r zasWwklXDkK%eo&#A*-hdA3#`QL}-hLKrd-VWfRTq!y~k$M=#+6OFbVWX$P{3NDz)7 z;op{m0H$>vm8;1|-OpZ-w=C$OgA@YbOY;rHW_gA#wh#fo+cVf1#*4Zr4p~aHEPhTX zI}oEEp*M2>v0i_m&hcwTGxAYWD~`2BCrj|GK-J9*7_)i_d?2d~j^?8L{j>LhXvtb=urZyHD@ zbcYjnf4lY*X3^3L#edUuJ7hnG+B^Dp5c;FFIPRA;Dyu_6{{*a%2j?{K$%NZ0ai@e+ z#Ik6xcTF-NC3{)@b<8==pcNa+g&mdlEU5X~!mzEh&V|sPJ8DM}=R??H+VcXOo_isL zUq5W5G>z*bLTXeVS@K#f@qn6k_Uu60Q@o6l5_-phRMwVPkg7CS6%cQ(g6T|0SaRta z*SbV#Xsy#O%fakNo{a|;CLRrUFUoVOMTx6$9d%BBQT73hup7PJx&DKAFStt zo`5~MZ?|Ue)SI}; zB72>L2Fj0&AWy829B4g>$8~~EyaORSv!&~o*W`Nnj|f2z7e7Owk3wObYgO_ehZS$2 z-Ta#bQoJp^+||r7ouBLaJ(&TdzfRZr*0KD?`+!6YBx)N96$QDgZ@q@+GQ zZYN98PNAg8M9_b3(xEM6PNP4!K)U;lyRP5rV}tASwa7n@!j98J-LfOs`OU*in-K5&RA#@U@A3yxc0BEba=~=nq;m96T4bK z6I|U#rDn{8Bo`Q1cNmeH}=JGvDx)eQ4*cTww&bPab0A!XTRkw~rR z@SXp!YH|-&lfIs=bN~6z|8Gq+5Aw6YK2)z|J_J(|H;*nU`lI~r;J>|K_ZU<#y<1jK z0KNV0{Bx5n##2N?#E79(2q(4P_3LK{Ve6)vfEN6p4elPVtp;)dl=uh#x^_4g0n*u1 z+rf*4rDNt{^i|8jB_X_SLpqmEmm*}0ZM4_vie=lUT83&c&nWA1!Ct=`+v^UF(X#?6 zg)s<82T+Cv8_AHLOmu%#*AQ2CF?YRB30wm@92*%O-CTDjpPSUa>IW5w=vf;u^8Wks zS_i1Xh}U7t)R0g>ka-;dm$Ka#s2TbW$-h6}*5L(Fkl!4Kw7XV=V1|pAUvq^SC3Agy z%DC-8<*)_1luHst6LvW1fMkDFekuBOD?q-{q8DpbrKN|`98;` zQ$PfqpUv>K-+Q|IN=<4$uU>x!QRNJuYRa>araV};-p(cLIdCfpVQFu*VxlMj;bW~b z%PZqWl$575h2D1jB}A3l&9JzZf|tPzw>?dHywYJFQk3{AoK(yIZ=F|MON>#@M-Not zuEPqqZ|;JDFI<(Kt;Oq2peN10q0ySf&@Hs|QQIms12Cy?I56TVXTu*QL#(|%VElJc z?9ub{Kmwqo$T?uV4@;9+ZANVmzzEytH$jWf57Du-x6SzoQqsY8CFXH{QtUbodIBZ= zjm^97)BLKHw^qZ-$<%I=5g%sYF|8gb*a#(_yR=r<@d;R3A8nL|qxC#wVKl!m2 z7JOV#`FJuTfmZ{%hnr+=o9~4MuOK9ScM)>hvnC(#9fWtk(*4fkwjrlhuOg|yt`bbY zDxAFero@_RinVC+{=yetSnx?f?5KNvepm--QIANq`(~d8rSsC4Rug~uX~?_Y<-~9! zSmVIcR@LsuP#5j4_<*)8lC7497v%b4+f_3mjF2;U%DyHq@s+m;HS z%SUpl*NaG7xR#tl5o(z-RReb?oO+Gy=7z7Sm%GqX)Hh2ccS*RxQy!J}Am9?m zswz4Tq$>^0);{w-7_-~+*o4U>n52d_ys+S4epI@nHI}A93EAit^AM0uTUP$HD%J+G zh*-)~-U|zk2q&kSyXbdtx)27H_45l0-bYGh_OP}qD*?hDEiKU+POLA7($wXw%}$^w zrG8lJ=&wRaI6HwzCy|mc+t1qR(jR{ZP3ajnVdE(v(u*@bO+g$qyUO1~SLm8n+WPZ! zFb-_rvSrH)3%(AgecTr<(Rv0UY^Xg2eZLy%54*2sny<67FecY2SaIo(0!t<}^A`}) z+{=Dp!Ji1HE`3vnZ8WIM`TTsiF7N)S@cdL`I}M*T_yxo;dRIH||FbSq+i{DRm5YdB zE9HCQfOP)?1wXbmdy-4t&u`T8R#sd89wjyP%uMm`a!26uwI+!F(0x-^#cKcf3QE@} z-27{Yg5N=u&*xYD!pr8l?wj(}RNs8Bqm=8|a>I5lA6TdByx#{f=VQP2lcqr7C@h-~ za=bcqFm42YfUGyTR6c9sz5%78^H;3@uA|J1mEvW}L8Zn{s9S7a?AYgNi!>gBDt&>?@T>$`CpI~iRb=WxZZ(Cx(!PAUj@@vEVi}Xhp?Ve zH)$%%{{~9mRq#=!{64*h5Czq416N(Oba`RHKcZ%oeh!Q7Ll``Aa3ALVQ&K2_!WdBp zeSi>tlonhQ=?_t`bboEwJ?x^78zrLuGeQdJio-BPzQe9WN&jD9b=gO~ek>R+zCJc} zXm3sa{~alrZKfQRzvCw;y2M+DXB{>Vv7RhD!vBG&#CN}0#d!)O$2^+#KVd9UgAJu7 zeAL02qtg3dfd{z6siE`?#EG`Q{=$O)4Pl=va4x@MB~@A^zjSsX$GT_43T3M4f1Bs-FNdCbIKZ}q7SL%5wEsC%l zQ4(z zKd2-6k9S|}`GED?YB0=U%3cEE6@+wzZoL*l6A zNL|?bRZx~z3nc5>?t=;Md$ENt5%Nhy8Q0-%ntfeT*j6Tt`t+wk4Lo?4A2I+nL&a=u51&PUAQPQ(r^Tu1-bL6#cL?o1NJk!r@$B-8~rq!v|AO z^?^4L=Eb79hGr<+!S17dr8aH@!)`C%?Vf*|Ww=YKFC*t2gzBqxinv`kU2Rj7;h%#s z%E^}#!yQ1%w6Qj;zW}C^%=*3_Ym%LbVpp|oChiJsn1jQyV{VNg_G#f zv0B54as;W@!EBpi+OaMU?l}6_!QSuU>^t17&WI>rP@nlTSbuVX%!=S*z-H!NREI9+u;sUX zt`3)HExJP@zuz_hE&~c`hpU;-{s&MUcGoG{mHbrPy!+cPEcgzH_qBPTVXh|Pi=Tgm zl92GO@+){vI90QH!oBaqH6xK6*9F6qZDmgV18{!a^~^%)2Z+7VIo6II{thAOar33x zH({l4m{!xz-$RP~=@*%r-xBW0^6NZnf93A))MWe*s7Y|Q-D{oZHbP3VY3Q977W@}5 ztS)u0{J_u%&@#=~>vP+MVt^u7M~StZ`VdN)UZ;G}3*h9zprxES-s=)DyvJ)h{~u8l zbhwNa?hB^`V|6*$Y=L|DW@#k)qU(@}Yy` zs^-5yJ#PG|4pi4`ze6cNRSd6Z(FTOWe?4K|q52cVRL$nM=KLRU#yKfwSs>-7NQys9 zQ!o3&|Ji*(cw)v!-4`ZC2RKUhzmSsCEHxTKN?Cq}s4`QIu$cVe!~aGITl&kv^@6Xq znmZhj$&Uf^1EQ^t)ny@CTFLR-_sdG_$5HgPK|Ee07%rA?qo06!1@Lr;jlts3;zXeb z)WyP|3fk|OR(mgju-$cQU;lJ|n&Ny%<1dAJjCG>v))=p5FD&?(d_awn)-nK>Cu;lY zXNAMiq4tE)ayT_1J8-C$fIrv8;VtF&C(fywVU1XaCr3%0)*gN(9fHCn}N7*}(3*G(QHzPCk(&rm+N;6n) z&j&jA=7u5aKnI6wY!8C!bn}~f%r;?l8XBvMro#dI+xsi;0I9>~c6huU?sbyhJ<4j5 z&!I|dsysUE=!L5RztDZn`37OJGr#Sw7v**dCnbhw);f7WU=&pq%&Sc~*aK!ts%^Fj z_lS(V+5jF;JiHtz_qO{$jJI)An&>2yLeDtTpb5a~3VaGmqK)0@E|QDuy8j_q&hFN= zGp<<>OJX#51W30TrV}XqF@$syet`(75}xm)DTCEd3Gm}k+K(RF47Dp2*as1WptJiIyS0_4&qF5vyBLnI{_l5J>x)^?{`(hgrUrN&Np+vH68)MW} zTNS5~REDD9F4EVN1Zh+EV71q4$nD*2kDX&G&|cUVZ@x1Evn`s*4$a))6+?WKxog z2uX8GolX4(n9|I)dybdjRF)-8OrrjMN9x&#+e1LoKW#$WKIl?#*Hd|}bkVt%H?!+p z-$6>-ZP=gtSyvOuY}5RbS4(^kDV=I&qMihAn}OF5Q@pQ@)%)$e*2VX8^z zA3Tphg~5><2-v&o<(;cvSnzj%avz#f?wbkU{F+q$UKrakdh1*C0|<+F7H|FRA3&ui z1sz$o&Ai(vDb>EQ^0x3_;Os(G-+lhR$Q|@Z)>^g_1~HoYugK}_y+?isz7Nr=;rW5l zD*p{iBh8e}_PySZvj0fE0&EI8~-A#ST%CWg6DA{u3;550hAiw$}?@* z`vlfjuUX)~Y3GHC5Q6yS`BqJyA}CC~g-boi=YBoHAr18amKme$R`CB7xY01Kf&C0j zM;NZ_4U~fXUU#EwP%ijdOWH+T&HH13I!x6ys)fR-vomxF{dkABbF4}q=_3rvhg7mzl&8AYHuq=+}lnou7}i(k}&6W*@th8W2Cz#pVHcvaZK3 zL+N$wxcUBBgr1BwaIIGB&}w;-!K0(4nEc%HkF4!gZUsUboO?(ckJkS$BBf35X4Qr{ zE0c_44Z0h(0K}5112e1xlx0;S~^rz4~BlAV1aD zHIw`bC~-!JQ7c}BQ_(%#OzAei+7Wv1n6E{s9#^ZnPYN3r4F75`a~%{WTk{>N?)>Tb z-^Xebd_AO~dO&$>XbxgFB9-9XN&^tok{&_9$=3kI;GwscWSXlVL9Rz8^rbiQqbjgq zDJeOC5GHP^r%Bqq**DQ5GfK-D46-j(n*!UQaBOC3@<<~MBT4`D@=X4YaEABxEmZgI zUF%i7X5L#ARgmbyGdi6EaR44!1MJ_t8?FHtU$}yGRVFywgxj{c=ZhslQ~Y za-Tp`sk`-bJpuMrFkIL;Q9ASVxIS_cQQhi-(C-MU%;8#ooDxoFGM4sNr~W;ZlzdNZ zrPK&Jjg-P}Wi{&o9US>OTCWF_E*k4hmr~iGmCe>4qR3}p^N{&oI09!wv(WgX4zo4T zY0o9D!#&G=;gr9fk~ngf{CPy2w##ak{8K>Y^^li-?2`)!YWGX_Z~jbBlWvUl!$m0V zJydGOU*yLP6cV!QQV`Kvn%2LE{|89&#|)V(f~m+xa>1L*@=K<2!SKm80{#EL!5uSliRKQiNf3{RiDA##kF5KkPn2cJFT6WAreu4B9Dg2E~65ZFXY+`)rGI<>E=w(`$B5)xh~5 z9cIy_^E`#qv*#Ves^kCcBGFy+i;v*&w7p97zrZx7`-pO-ElVRc9%b0&lD<0Il(Urc1#-Dt(|! zp}oXtdjKJY*gSFwgO|e^ldTgyAmO*GU#<{NMVxe+{=rn8Yl8M*`AQUB!XdqS{$ls@ zoj<7U5|e*N;Rdc59DKa@flY7|KcJs~a;3Gupb|`uNoS3)`q^%+zWXas81x1uy)FM< zMM|-zYX16FFvZ$1izixSZ5M%2sfizy;E)X%$*+S_`N@fC8GITJe@9p$NN+ttx=L+U z%p25i?4sG5D?`gZg!FPgt$GhJ*QU&C=&I5@ZS)J{k8>Fs#~VOe%8b+7njSz)*)8|} zx}GX}6Q$Qt%WH2>a~?!hg`Q#ZHYio#aa_v7aCl^Q!-1Acypt%ld#D|T!**CL<3Ol3 zRzHUz#i?m)9~%QuhSdMh*!{>u^39WmO`Mr&=1YqVnwZjHuuMB~)xYNN%jBO)S2L|hRO5fKp) z5fKr&uV=kK`{T^b{o`&b`MlrX&u9I3)~{zh>sf1Y`ktFHTSZ=nOb$t|oOe3)=6ZZ} z1lvn|n}Fm>yX%(xNAiMYbtxGGlGx>KJJ(jY>%yG**lbe--&Lt662L_B8=MSq zL5B9SqyKvS=!}`26ioLNC3K|i*q%joaXQt4(f}ud6*_+r>_$|@-Cnksz5TB>#-;~)I8$2ogP@ol7;FEK?;nD@?!1oGDQjOlBJ$z>vkkTI@+g>c zweCH|&zXIoe`Bb;VIB)~)o;_c9tR^MGplOqPa?FORW*n`24Pic7( zOm6!7-d;kbQq^uh3Ir@f?X65FuRtl%!8%W;W#}L-iMYaTFG774A%~0qt+Nwf^PJ$U z^Sco3^&T&ooByvg23Uf|XSZ6Gq%?pnwssxEq5Q~@+g$hF4pbkulqv5Wuet?aHER(QSumsQ(!nrE%V?=ce z?WW&PfUeTCbunvW?Lgm8@q(?Z$C|5}6@-M-R%+d%M}0s@>gDCS_l4)=WWX|Oa~1wF z@5`%dRKv(uhzN3EuFk3d`X}6$D8A|OvUcz1w{ZHpI$>9*RVLo=ki=W7)TH%2RI+c< zqb7h0RgUHTfXT;SQx)>b7$;Sq9QNyl40qR}ceVU*d{@iknRNsrf$=CB<$}m{hR%Q$ zqt?~TAxD9dBa?M`($UburdK2DbIlK-&Lc6#9^*IRUse;8wvqH$M3>lqE<2(G6Y6o; z2x&)KNjn}EL3gzO2?<1W_7GLwKCyqDi-!*9BshUu|DN8{Et8*&RpP8`BzOu?#dg_R zN<{yH#wt|h)%<%O085WYpS>?0qkwIr_+H*YKi;UG}1E=^n-un#WOu7`LlNS zax;KaFRYdrcD!;<{|e<&Kd~4V95t@iGKt_^L}j^_l;{;u+H0MQkn=Lm$0>wz7^oV4 z0hmayZWZZ5ScpswGn|FBh>H*@r9a(Bu?U*6;v%z`K*A!@$x)}^T?$s?FsZYi(1Etg zY&j6An1`qiGzd)M<$|n#UIj-mWr>#d)d-@#s+@5K`X4EfCDXM%vBYX6L99cFdTL~9 zH){frY_H=3m-F(5a7pT#GQw^E(>~h0oawBuK_*Qcqowg$I0Y~>x}`R{(;-l#;n=aN z4%b0Rw3d5=4)*%|Od!h!s0~o!+*T&95m4}^kcn=r|9bur>kW(wh)Ahj5J33bdgjo> zdZd3(Yrs?=w~G`eJxk2YxBzJlP*T6mt`#Z+oa+46udO51@z?uV8?Xd5enV+QJAovy z=^eZCTbx+-0u|9tLEY#%g4pDOqc-^8luw-((?;8yL6O(>Ty4ev7MOf>wz-<6_+K^SuKw+jwzul;9`Bf& zf0m*UY$w!gJDK@jINkK>;jPnK6(luXEcf9xz2#T1DR_+J<*seteE=hUHst@3wg4pm zOX%m#t^OfYQe`tn+07qDM3!61kN(lX@ovAsJ}`wdF}#?yM6e$#{_PZhsm|t z{3I%Ymyd`nKZPKNUh?`VTC$*IMech-B!{qk7TpzQ^jjUP2}%ZC!$%LSeuqVn;-BK-s)LZW)1t!+1j_|uST{*vwG z*3n#CK0`|T+>CzlbI;PNhi@w27huxc7Ja&z)0f!v(X}=Gt9&BNtEcqJU*|`+T-D;- zH&APL<*NNHkOCZK>X~Z$ExyawH9pk3!1sB7{DxZ3_yI^Mm>QQt_#+|_|BP%8yKOsRPRQrP!z4%x2&|?3a#cSGOcv?ywGMtV zBF(dIr)Mal1%w)*h4b8N{_LlsRD${6*7V(1r@<=0uT=3x`AK}MZEmud0H@|s{l33;ZLh@|wqyga(O zu4mvz01AkS5x5|~321xWoN^(Ud@j!!7>54-4ORqWKiJ&@RN97lQtd9Ip>cpIE$tG& zXS-oZim8|0S>m7=_$37}qd6{xdcst* zG=Xank!)?ft8lNyB+XZpe!dZu!=bJA4blss$dZ5XaPzadKJWXs_3)d(B&+I=bLfOI zf|IWs+S1GzkcvFIyI)1xiWJL~ zI}pixE!Wc$^1k`gTvdu$P<1j{r5$*di*8M&4ePs6s`Vb-%3hCYxj*eBpebObaG0Ty z9BzV=i8=ZEmOCGN#FK15Zwkx@>@oCwPVR|$8@7kPPJF{ z?!bw5lk-P(Q+GnuUfQKkclEC?se!)@4tL`csL4Z{=ik#arh(Qk2X{VRM|a{=lDsQ8Q2(uOVeHPtQ)(Sv?Sl%}N5)JXXdh8!-p&|SN*9tH&- z3#$(v4IW%En3IF{!Ieek7m|G}XzTpGIuP}7NQUQHHF^?IMArv z@H9r$+lP0!Se*8d&mD~_tY-o7XhHvqzW;pQH2Apal+FOAK(p&(v|M3cK&C*bdu2DV z)O{e|EnI8=BB)v|sWIav&q=IYz6>O7mbmrT)CElBK0E(gWjVFr}^*Wpz7Wftm4>J5ba*iBf9(wk7?+oHJ?b11)C`luTFHk3YWcqI2!a13fy z{~fI(g=NPm}u;CeghIf9;#k0vrZJ8lMB=EzyYL3rLD= ze6}XxSxdf%cKcPnaE6@H`8q)Sr7QqRb}2C5LXq8!g|8ZmzRU0Bd+5{egA8ibSbl&c z%E?_m{Sl}-StCW83LN%VE#&S%r^BIS$Puf;JtDu3Z)yG{M?#T|?GA18ko5+f&|mw_ z!M~&*0I3pf>*FzCs=(GgBh3R5A>r=((fPJM=s1kr|M{hq1CNP&P1|U80-V4GOS?M} z?D}Ch`E@QtC*_^_7jufm$$?jyan#b*DPU!`t_J=ewIlgdM50*PF7h}H7N-41X1ct7 z+s%WEkdk!8APvD|DwFkC(_;cWBfr*0pEEu7D(mkSIcCqJOFF7xb^T-dq`9*fVRBF_crSyWlZBp;2r9g^}TV;)@%MmGwwY9>t5{wj2Z!^eM!5gNg zb3eptI3?wKH+VJ$XaFhDnq27pjelzq)r?#3Og#W)cJcMB1|E}ITb1K2K7@)0*K}4i zx&bb2a>6gPHz1sqI zIN6730g#S5+~f|A^3DFDPp=p6M5KV4s}J+pU6^Ft`S(*h>R^n!dunLPgtq$8dr;&# z|J=xU`J~?qMxM2+4sjoX>{Smsg69-DL#XlU0XT6EZr)1$rx{?Ra8RfGkVgT}SURhb z^5OnnTm73``lFbHcId(K-`JNAh_VeikM$q0fw%hqI7U)7Z=)7HnUCu{kJhg~73^cH z-Ri~DFo|aG&NF~it-72>(7#t!L^=UnVb(ln|4+ZoKS3pWA;0aYYJ9-6@aotF1_VIi z?H%U}MhD1G^DmbB)XPBEQPoDVc4NjXd13X6Wkbe)K+D#Jxi3^58L0wkw( zHRE^}Og}T}hTW|-4K@BF)q(ErlHVEq5ec~+gXNVEFi|a9>**#?r3rqRk43;qvy$}@ zqAE~H@ng?ow?oGD*{ld)gf%nQHt=fwpXL?cPgaHc3`~7AO{jYQIYL@i)P0=kPreA# zu3KO{5zUwVN2WYg%3mQe%q+9BYLpgRRr_mfGQVtV&F;3>#N`{LByJxcY1@3i1r_y_ zB{Bw#?;xq+2VGWAZ-0-7gz4fma*gzdydwin4=?YB9}$Vh^r%kZtjX+Qw=ZN}KSxsa z;^8=w8Z3Lm5uTIcvUvH|EkAAia!W{N*sStiqSF_{y7$6-;?5(Rgi=w-T$6XJ`kE3P7arEAyPyJsQyC4dg{9<{PBf=BIJX zAxdW*q;v+giOS3R{5WU2t6+y9Nk3bF8$2cjR)I8>Sk14=>qd!M^SBn0z2-}|F~4X6 zt5#49`mYTs&Z11Ky&fAuu5Xgs1gH8r;i_FpIf9Wxms={QF(8d-aMa+VX=&$Mk>VtI zdV%MJclB^>9GZY7|L12?2|T8vUp{yxg#b)YbX85ZY0A?`5-hDkt(t{W9&%Ss6Fd8F zRLBxQrD=D*;l@~tWqTos*;Es`8$Bn+He31)if+?wG4vSi3pr zgXk{BJQ65g`{@X{gsP8g&cqL6B9`GjZI$p*L^4$?kd`4S3jJ5kBO5RC|6`cQZs+_H zGWs~2jP`n`JUod_*bdh!1M5>E^qn;z{Ufabq0(GE#E#Iv+`KVq4v56Tcbo?HJe>Ue zWS!XGzf;E^(I8+VtTRBV5I~|gg{?~UA|hGaWdXFcf|vRLVjy@KCfDII6~2<^X4p8` zf10B)h4yOy>09oEr$@kvZu^vxi8TR{dS7QWjTtI^^#(Gz;2gO%7V9^0Qa!hQld5+J zNNKVlyK#y#SKZv(=!AYnZft$0Cpa;drtvN!WxI1~PjmEr52IDoMGV#Zybng8OUJ7L zeb9%nN`nx`hk0QcgS&Q0lYo=f!OgXywYQvH~fc%G&mkf6oX-d;M5}W>LQ_Z1{0wc)o0!PJlG)}5FO)afe=VPEqhGt)TVWY94v@yWGLvzgU~9;)haM=S=PDpkFol;HL6%qJNTV#8HH!_1lzJWI+?20tP_0;+u~R6r|?~eC69?(H@)7oM9e=j;ansx3Sjc)8VC-rmA8?+qAJYP4*Rhl z4%OLHTfwA>8a8gmO^@gUx!P5UH}DmrOW@?CIi_))rBNUV{NGV3=@UTmzS6|CI;v?* z>ZhKKHW8RRn%=UN^oV?FsYeaF4IpE&+L|I~x98PeqwSc5JD^M{`ojdf!TV0XB##^B zpJ?k`cVUu?&Q{Wf@4K;bVJ@Y_@994(SFqBa@5LBj*Zdi~LjU8`$PLYn;C@UpJvctP z)!PsBueg6;yvCFVG0Esm4Z#luF6`BOxDS9Sa<5`~6d^UdKo#shV4-%jQ*AMHeo&qC^rQ8{6#{M)$G*pOxqBjJbbyjV=c{X6P z6Za_$fc#mVYW8pTh7i?FIWp55FCfwa#)hk)5A-ig&p%&X&x?q(#xj92TRD}iI)In* z;wum5KV}GoMC#t#o(G@4f>9~vACIFWB*}Z*81^a<$!ObUlVA#a4X315te`h|)Z(_6 zS^N#p$-JuAy2m#$3$?ipv>|#qIfP9W955|=8xq*R{u%p2A3*7?W&3y+)0IoBY_-7j z9!~Ncp- z(W+*An&0bCIF;%%DCyz6H9I|5+hRWNzj3f?TZj4rqo|Cr}|AnWiK_Vw-MVJ>boqd8WHucqTrMo)bFIBp2?fC>C z;nTpw1qw}bL!@-c@jp%Ua7g}$xxD2Hzv~*U{aPQyi- zD{AFkcv&hH6JKGiW?fFLoYsF*J7f89iH z7T`sQB%RK;v!cBiBT6%`E3eNdXP1J~sN-2NNPB?Ng4Y|2s4A_8SdJCtip%PT z#+CVC{zW@E9Hk~N9uRt;ySRj+!$hNC{^k%KO_T_wRPcc(Cm^udzx1k=aOvfy9XTs zmy3#Q-P?b5)zC7^1DFQQE?%voD*gR=r+n)oGY|IPyTgUz04DsgI=tWN zMF0Cp?R9-LaL!>GHFMt=s6Xe_Mr-K5c0qMj*vETf$kt@UlL(b@-8$-mM}nU;2bYhh zp=5tuEh;|Ke>jozEuYQXGdU^ac_`UjIC0fC`;k?p0CA@8ftm#wESK%)JnDdUSC zGdPUa-lZ}qy@b+Bj+binvS)#8Zc8(-o#XOc=kJT^{r;m>&Cc$l4|*z#PQLsQB|#>JP4{M1=!0^lb7%Yf7$>r^ zDL%lKDIj9qEWl6m`18~02_9APHn5ee<^N#7`s*?xeeQ>As+gJIhFwbz_;>i?~swNN@b`v{0%0O-^3U~iGU*#YoE19&SUC3 zTvy%d)Qj&yDbr@W{{tY8l@1N$N2tH8r1KrEx!zm%(j_b?}g?Vhgs2ljIhdB;RRyWv% zN`dGxOC)?k|GfdGD%pvML_rcx+_X&5f3l%TNRh{XXJan}$Rabj9 zn83z(RMb@Y98Bu^jD1Fnp@g==+SPQ8H|Hh&I7xmucP^z1LOHPmG&ae02g=^ z9KVM;W9&jG(XAQgp-Wm9^~@G#hB~hCVvO9(IKjqZ1<(~~8?j1WNJ+ zHQKF3$mE!8t@HT59(|(nuFcDQI%5}CJ|d)L_&K?@#@Q~>9KuAb#sF1Sa2xuUtZ-1p zc|!Crl@++@jn|@*2pwzn{u>c;!dJ?XuM`eY=G*SeszKMoDa^fI-UL-WC==O;=Y(W4 zP*wY}e7cDfxS@yAkx%UqtTAdEB(ocTMc>GmorkWJPa?VsS$kX6svVe=nw9^uZ3tl+ zC#9;*OzCm6{YOhC_q3BecVZ%wTFz>>sP0Ck0c6_BxFf#3eMswD~}sT>}w&gO*J+o4n_ckhtm9k2*=;yW2> zHHbU&x}2>xD&CdXMQvJd;&eBh=*^Vu;HtWH501pz7SY@zac{z)mQ~5$2a%Qgbeo2E zKPUwbc!>M=1Nm8mYpQ2`u!n8gJ1G?ikTMgHePvewXN{wolLh zH?|3^M7Zj z4plArz5Wf_3_nLr_E^4HZ>(K7zaU)NU{%02TUU zY)XC1f_D=12~IxOueglf;W>?EXFI@7K%b$K+htqT`Om?u(-?5FVp0Krkw7MD+whn9 zqUM05Ab$lebh&EUBr~n`3?$i;!cC4$`6kG@MJ0}J``wj(XL> z3hRgd{b|#wAEDICwR(l>&89{%)(CaLn>b# z&8!Zb7=qQHJR+Z`SRfR(O_!>}$$&(#0@t{D3Y4OL>(@kHtF#Ab6bXTZN2Z&zl1xK_fK10+hV% z^G{at7Wc0^AMA$aKZ!|7)>I8@v#RrOiDab>XKWJ*qqd#unJ$3H$JF6>@@Lq&5RqhZ z{+1glzX;XWcsF`QvKM2O^SYXxUgB9YOW&;1x-SI2qyD08WnPuOlYNj;S3r8C;uR?c7xT)^(scAFU7AJXMF5 z4*{a0<=5S`RsI%PHz1{)bNBosO5_?axpztf0}$$3R9BLYxT=}V#(dM%rf2C3U@GpS zGieJDvAF+(uF(H7{{U^FM|A_)7?cY9v!7{_&u!6J+y$f(neTH4=2}f>K&DtO{$tug z-giZ*3VkmS*-zGr-+dshld{@B1SaQo{ABeX z4`Y%}ih1>hk0KJFiH%CQum7ZG&!x^kh9Mox`SgVTZL5ENdlF7UudIgnRR1&0pAepg ziD;F3 zwdxo9Kx$t)qXb+{d$`U(c^M$dS@FM;&z7*il#YE6B$)LzF=#WeS3}rnQOdD5UPF)+ z*Pxuz`8u2oxZp32`){C#%64JOf#($OjjR}lpp?XF`emi_ZA?mH(9x~Z_6{QX*jy$1 zZvVy1=#Y>aM?jj*!YNA*2GIOJ#wUq$ z8uTYn612u%dsX4n{JeYnM76Qcpi~t5I2HZpaEii(g>$jye}PLD_72mDz62BEQbsV9 z?W;ZvMt5cKHC(Aw)u;kMr_I$F1^-^B(p+Y~#aFg%DpH)^K?!p^Qvo@CkBDS1liAwo z^#evJ*nCVU@GRB3i}EX>5&X?U3Z2427XT%IbEKe;0KCiVBLm2haM#1J_z`ruYim_U z;ZyJ{$0uu{nQnlRab5Rc*2aNjK*8>9gXpn9dXxbtyv_gp{O?K)Iu4s;voK85^5O9q z!5Y%~Gk#7$C9}0@ga&+KPiTK-xHt)sQd!bA#h(m|*?EREwo`zV@5(Ew1wq?f6h>^P zL4q!?PpOvXMTmqnH(8Q99hRj!%c6341}Nagi)z*JOdt}VDmYO_cxR*Ja)d#vq}Loz z=mYu63EisOVjx*|VP9)w=VFpShv(MP+j$5@*WA<2_nZW+AFUK$0H?y$;V@1XRA(>5 z$)R0AwP=1(&>Zit)rXT9deiEedEOC%jf`5D(})V953%*uiM$G z^H_-?g84_vxV0)^dppu{HISq-`H5-(PEZbeH~xdE0adT5ApcO+XdSj26!tijji-lJ zzrO!+u;37wG}kp2b-K`o{)MZCR?-K+#Jrq81!VxPH2${M7d8fLYBm45fDtRQlmK8s z7HLMCFe!?fdU$0dL`>3kDaRNh&Ee|*X)DgwytuH#m1clYh$H2mJK?$NV>=vU5=aR; z8PFPIbt^l1ZjHm!q?Bx@aY>!UqGn2&#Sm!ilr6n~Cn$zx1Bz|8XE8W9N*ed(hdp&I z`;Ga*@r%-N6Oa-atJ->V4+qD0vlR63s+vLF3ijpI9j?M)fQ_h!ED9R`VTssW+aK>p z02UYKN^iLnO#A3I{!sx?$y1#iQnT5+^GzG8>9zNO)gbF^(PwM7!o7&9+qJ|<2Y}4J zYB>5si{kz0B+3jxrFsBP%tqC?7e9!q;;2FAAdcH_r^yiNzvVqW~| z=dHudvGgTOYI3(HVcI&ASYF1b+%0vPX~!A#l>|1>Y?}vx$mX))8d_iNKWTmq&y`9^ zRp?n|oii}V@pWvP*jR3gf1?je3Ab$&dK;I#>c`t1k?&v< zs^!S)A>Ivfd3lSs(}CaX-&iv`Qd->mA=YqmW}8qi;B&zGA1Ax>h4L+R@0z;Ua0)$Jyqbe>^I+r+H@U>Q3lCWL>U7NUl!ANwQ;bmDfoD?r${Q zp4@-$aGA+EJNy)k#y(xMvs3$DYx{7C`7{jiwRxEr7J;%~x2m=s;-|w(`Id5HzdC|qN)CkYQG(@+Jl^Z|}$5Kht{<4~V4Kgj&D7&44e=Q>MyJ(KJGC+q2l=ZsO7CZG2-N2-FPE!BeE@?~W3$%8r1kAC0E0`-kKO_0Ij2cBivxm$;5jpooCQj>dEo~b0VRBSc2CraNfTUhOYz^JG z4tX{ZzG+vJp-8dVQp&y9cU-@U_etvScmJVPHNTH3{ z`h(b-U*p@o4d@08P6Dp>Fzll;itAQO9<7ght~`Uk=OpgX15tnWVUoBT=J)TN z-(Rj}kKvL(4uhokc>hlIrsMO^iT}xP|I?+;PvNAAJD$acK8%^s`F}9}!x5uBg}Jx; z*}T4|YUlHwWuf-V8vlW0VdrQ&!T$wBq;+-4=Kz@W+2zc#{UV%Pad2u|&R*)@h!*)W zEM;5j>Y`UXC-m}^6!$?yH>WM<@aEz5)?dH%YxV@cC>C|Aw2jxGM5_dd<@Fxd>K}gu zH~|mWt>Lr)gjiQzPQ^LoIn8fI8s6?-RY}Ga(mROG!KyCSEG?v+MwQvXyMA40Z`NPj zu~EIrd)SEdvRdY@G4_3w7#-$MC-9sYYpLo(XhD&6h|*@INGS-(9v$`*Pw6yepnLG$A} zrVpdlx3RXt;aFrOTDw-L3JGPYlA+BU--lAJ9h++<_5@V2Qg?p45m5a)5vQ~jCWXN> zAzQC#2Opf=zh1q_)s#Zk0i2v!Fnx}m0HpJ@@3HB|>BLXNC&!tdw4QwtGDSucDs}L5 zM3S%9P%LM_QZltkA9JSXipIJel_5WDnN&~C$wzL1pKU8>i{S;~>ga0AA6G|YH$gNV z)<@a(NNK?8pnvwSXbc`@)JU&^&=2RDI`0mpQU`4=DFQ1vfOhznq{Wbbv)NfOa!s6tv#&j-(cEr$HlDf z-`H$5li~m;!F4;fnksE;agrmq&2NhCkdJDz)m)6$!)Zdx%Y|VGl-k*;)?IGM*JUTm z2B>TD^Rg?iT02B>$j_?lQma&I*G5E&zIJ3aOKG#CT$dNyF8*ejy&lo^+2q*#hBx(r zRrOF_BR!#4sZMVUA*oA8$C|pg73>7ms`fTmF*t)&K}`ViF~7g9_)S7T)raYw%^I_# zCml&YI_ZKHIhf9OYq$fF-Yk?-X_h+r$xckfHCuahcK0vTvdCM1MVY`TV9r<6h&+$i z28)~e565b@esd3Z?UA)xz~se2%sTvA;nX7INZG<~L&(pvobYfvlyvQwbJd>!?&v?5 zooh#M+?fxCZG(`KyYh28zP6Rf-F*nP!?lLudoU@I;khkY7`+$Owb;&$SxW9h5;LQ& zGP=KiV`Ox0_xSwd)z3YEl=yUu)wMo|NX6Xl$SR5+?jamOE-YR=49ef&4)=CbCK5s! zfHP^{m$&)#HN)0pAR(W%(Te-y`AH>QUi)&N1k=j@@F3c!`j>20eqqyp|yW?1XFxtlU12Nhyu)t~=`X!;*2;+T{!ho{440aP8AQ0LYEQcguG0V*lCp zc0$NYaO%+i_Q$_qOXz=gn!18`1s0Nx+@d%LRQvra!?kUi6bD3_*ow(Izt%~+E_7{=&0B+@<(Lol^KEm{{uIubNwZg^Bbm z@t2pm)1ykNUnh{cI*It31Ty~!w=07EHc(22N^AHoZ?fr93p^)JD}46@l-5$`xR~D6 zHrF4K5%+AJD%VVhhyBNv;;HJV4)-j@wEwnRe*_r!K?{KW2`X}SWWHHhn_!RfoG>pN zU)Wf5G)}eGxlZpHbgEQNqC5th1pY!xITlK%GDu%gCdVO^-4=;IK5shhtCoOIfMS=k za?|{jTf$H51913M&38{iq$aPbBP~t_6~!yhoKASo~cIIVw+ zfw(!5Ey4(rHqiR~(*sF=^OP{o2%OYOR=0L0D8jSO`V0Di=Tzb?9JA>Ja98*)RjDp# zasS4l-;{;=+(*6~SCQYD?(aVv_=}eu) zN{@oy^VIo}SCcPTWXZsMAE0h{)cJ?1oNZ{T58@1k9>f+v>*=hlDk}d#hlQFJ}sr_2;%k zdCtc-{~enHkb=)dXUd&0s@)xEGT%0;jos-vbtS#TT|Hj1y7m>^4M&tR*$z#*2a_o4 zfFRZDUIfw2bhq2z7bsgB%2(w6K7btmMq}t17lgJM@Pj?$oaxpRJcQ8*HvW>`!DCWb zn>MHmFgemew~KrqM8s3=5WdHNBzCDX8P-WXjuFmg=94Eqs^W%$wyXTDJ8S#)Q^>AL zTelP5(+G8r?onIwUZ~OO8I=6dfwWPY0+>kt&78i+H_!J>Rdn|*3zD2Mzr7Ag4dYrC^WK2dZViv61sEdV#3hqTq&Z$4>X|aK*1-HWhFt7mXuBTX z2^2}Q6TS;3&uiw&IQU+Eu9J~VgLxlR2d`ZJXDke!m7z;5rS3zZi61qeG{qpioA)e3rH+X`p@}I*gbLV+g75xH{2-djfw&nIq zlq5OKLy%tq>2#LzU^X>2gQrDcWnX@+#ZV-`v^6RaJ;-qhrOJPcmd{0X(Z_e5Mch^^ zn-k>s2x+hVa&1}lhrA%j9iwgL^dlm|sDn(Zu^o2L0>jUz%|Q>xNb39(?bPW0XQkBc z@i-EbWZqCVv7^9bey3}A1$Z<<8tmZ9#Qhk!YL4@cW%AhkDn%@2YVL78w)ad$J-){S zqvbz(Lh$V9_(Yr9oCwSQnScH>`h(|W#tkA=sbP0soED5;OiBfp#tnwNH&c}+pEgw8ffdHl7t`z}a2naZ>C+D|b zl;7(>Zt6q6CF-U^ah92%K28aG6gGS|XThhRI6hB%ZX?usbLgKqS*PE|-U=x;@$|{&HaJy%?%h2$ z6=SHrC%6m%O%J{oPJj~*IjWxHK1}3x=#kb#wp&u~$4a#RwE2)e047^kPPk*Brn3+B z-;GU8&AOYbIi)^?P5A3ww=^LI4`U)i{d1jV@F;?S_wH%CDS_y}-LV;^AU}rG$Gq}S zXHg{bcD&@D1SF%K(O#aM>g7|Y2w<<7A3Xx@GC1WxKMLbBnCfQY*6=Kp>|V`|xv=E) zJXUsV@?TT){b0tFg&U{a`K2$Q6BiF1MR@=gGVii-$bJ#(%1Q&TG3X_Xd@NhGf+pcP zVyHDb34aBVYGK1BT>><+*3P?E^GmIDmCw&>piqAG7iUr-JSP-A!u;Q}I`qUfOz_3^Ybw(*>9$^Pr$AgKF6l{4>=Fv zQ*5Gjmt|)z`wSNm@DZv0`*TDBU)oM8my$0~3xR`Q_MeP1jL}`b>Iu4Wt#W_ehfxdR z&0O;hN^T5vroG?h19q{_r%8PuWY1Lz>jy~eY?T&*7$?9k^UkPjCa%N& z(?Y(+=xn*K=5U;^)|Lm3hJ6H>Y;rQKb;%r_J zo@V36`XzE+PUUqwWpnd94%=CjZAR~MsWUzvUpc9REca6~z|?Tn>vwf>z=@cyUevam zmbsH~iDF%yCu{FSwZfCJ5~!0MQX;27$;<>hXtS-Jic-y6UQhF!@>@~|(=F=Z<(Cgq zJ3u9~^2+sVs2%yt@o#jFyhoF6XQG`ACb{ikDHZ)3Olp7bM%r8TcrjAS4*pF$+vnUM z=`4NsJV-)H_o>0{e6VYNb559|Xyk2IgVap(LMZ)1CZ-p`Nx-Z_?kj+c^FAwCp-cMT zIs7%9^3tAht$k}JOEHP^3N29{mLno^9>=wMud=Mf$pYtw8WvYU>8m2?W9AOz9nfw6Emu)8sY{ zC%@a71IxxTi;>!$<;%OX|8=bm!8K6t&i6u}9^FRLd*Po}sY~^u__g~5Z|uLFncE(n z0+S|0YpHeQ=W7Gs%~;8s-&g*mw*YA(T<2*Ofu-WqN&UBha=u~7z*Q^g6#1|!F_j{3 zswobcpicz0lWnE?u6#KEh}9d~1eA1G@0qfc-^>{IV5P(L!&auJOkX0VLIHYdvd!phRN- zUh*YW*SHP;x&+p_m-Cv6q|C^Hm|j68XTx+%$^#t9s_VPN^D08}oX+R%*MLr_Wj}r$ zl%D1s^M+^jMA*x;mp7p_{2jyHw(mozWZo&~6MJ^G4GVAg?`>FE3w{R`vAM~MafuFr zNg7#gwU+n#kjmCq-o=!l_mOE7e|Y6p=}sT`r7Nte({1+P_nIj8D{}tU-dDSih3I$d)9Mnz1zUV({GYT2| zvS${YVZZ8wX~#`+K&>yoPB20>G<^f4gsxPID$uw2wNBtRA=4<<9N`DxoM zHj~v4`NB9WFF)psHO-}ob>iL@Kw0{#85|y{SZ8W#egvo}%Ez{Ci#rmQn07NmYv)IW z@c!snTg5*b5$U(9``XgwF*voO^`9?NZpimolz?eHS;{;PR<%}4E0N>#JilKjaf0XM zej+#eoQO#HgDSMDFVQFAbliW;+wx?OetVpK<3Cl&vH4!-tk=2|s z0BPw9^Xo-^NDZvJJn0Rv2!8%2v<8o<*6lo__=cX@zrSO&&fhp2PJOAv*Q-vSgOLzd zZj_cG1Xa72i`-Jnos2DOblbcjBfLBlXvUy4YsH?-65miD*~#iT#Ll07}wmWEbB zU9#&4iOSci{sRhuZhSSYFiPR8QD`9FQ>p48@U{7|ylqNA>p+>RWwopjtVbmL0rtIS zFBn24fdea64pAH6^g+9fYu7*u=@*plYduE(yu#YyEF1IVC1+3-JVt!R+uw8YzCHwN zdU^G$Pn-HTOk!$vVgx~&tcbuEkc2I%HIAA{Y(*u$TDhz7XImeHI#ms8B0=bY{5lDw z)!H4%00Ab%Wqdr_Ub<947oAv@EV+U^A zB>|~Cj#y%J$n#v?`+ajh*}kU^AG-xe3_F~@VP9zVrMDu}LUgmCJoxyiP;+`q%(p{b-`OA3sA?`**4p+?k@*Xf^)Co5qpL_ck2FkVl zK9HcaRtCmgPjG+U7~i|MoqzuTm@2|V>x&2BRFN8AAIi&mtDi0Y1tkHLHq&3W1<%Q8 z>)d|20&|}q65?ge9sRK$Q@DirINX(G?Gt(uOoW#$r6lCX8#w+~NA)z=)yuX!n~4D< z30GgfoMzDf+E&@0rygJgw!@OWYO){dQqnfQz5uK6gKKFA9tE>x%{t0K|Ju^2a>RNG zPNbJveXb!rxJAhVO%!jgC)R^Q%FitrLHMPn1`)Ea@SQs~_T$PD;5RO&lMgYE!Q`k@YH`EMb`mD!Sv}3Y9hh>|yL-ojC1X+hIk$wq9-dBw7 ztQo{t2yxF%*2=`!K;j;Dtt%a%$IFIiN(IQvXErzX?|@{)EidCEWz_y2m9$b3n=9fE zn4J39ZYQ^q#W-9mN5Q~h{52;$YN484id?=+)HFX}B61xBuLAsWZb`Z!9ZL}5_F6w_> zSp{}6D3x35Y`aT5SK&1Wp-X|}_69l(&xhbP{8W8j4yOiBjN66Elz@mhHd}wJvwT-! z((JZOxwq2=OmqUA)Q=kg1UisFnwdzmrWGJ0p-%6k6XaLj^=vY`9+4sKtN40ui!t|;|>2UwA| zBPOST6vEgggO9{!ub6EMJ34d@C~&5mpoUAxYJMi zUWVj?%173(09|7$AJ~InSJ@_1$5LeJ0{uI-p3w%t)IDvu8&6)(E2SF_t)dY0!IWaD zebfl`CNgrEasFy4$cOrnEF?^4viCMFGIy+@$lnRBKC^8Ed$<2)i0;8&tZBZ7lcVxL zqYwZQ?(hUvr0yX9044v!lg-`oLr9h_-8b{`M}c~rCAGKkV=(bcx-@>$V}~;;nNRb2 zb9*(Aeg=wd!ylYA`Fa1vW)8Tt066lhGwE{p?Uy)ZxaeQd0z5`6W22gkD8I%i#wDd^ zf8#m1VcXo%W**<7q@O+EudCUA2g>|-JN5PZeA;#={@}SXADyHf`|4ooj$pKi@xjz@H*IOdk)rU#&iW%22!(*gSTxkuKRei9;S zSUqQ+Vaz-k6R{KA@cciN>H8F1YWtN}$?vIPbVq8VV;#$YP?pTo@JS6fvzi7kLZnh0 z+E+3^9hN@_ikDh>1{Af0vyKxyGgCXW|7Nr6BIyHg(&OTmg)^FPvb5+=`JF8GEX?_R zW`8UMP*S>Mk0{Oy)X}%oJ!9TK2G;G_PCy+_p@Jy*vldw$IDKJ!o z{>N>jyGURfPReIS`6YOLwrAKc+pgiA7=qEWt@$(R0hIbTFuY6V$_-^NP81vj1#n|t zFBc}&^(LsBe0As2y!C5J0+uk^tx(fC826qCH!9a6^xvR&!8onFu>SYcjx(AWcNqG^~y(p0#{F}2vY=`dole-cPMn|V5yYW>ttq14qft+<{J$9cb& z`(s6Y1{Z0%e~;n-MnFrL{|VrEgl02dt4jMlchYTMd@sPMhRI!<{2j;#j*+EAcurQt z*qvGRQvZ<&3J>{a1%DZtRIDzCvsb{@{TS#e6_CMh`2ba-e{W>|w^WJ1!EU$S>tO2K zxum2gVOYIkC!$#=TBfI zlCh=qpidD=q)!9DtPduGrST z&WGE}GVzUPf?85lo%JB!x${gN`3^{oONaNgJbN5a-Nn+YER(A}{3noFjTneln%IXiO6wSf|!4#G=t-Tw8$+ zIv?ee^My2RpVIPA0TsgHT3tTXv&jB;f0P3tLDkWt)!!}3tCQt`e)>o1ak; z0;jh7M*hwRyFJGDu>Aai^{4DV7xdp+Tc<97$rH2K4dtSG5k>-QL7lz;L{`qTs8ew- z$>&YAWEHymV^&L%tx9!98m7dQjE061}#KtW300+EoL z5Y^|>3)@QE&9IE@8bA1Jz1c00giMvY=B@d(xf#$QI2s`2;o#qu0(d)+b}>1+i-s;z z)&eB))?!4#J3*l>S-Om!z;lwf#%Q=z%d0o;#uD+LTLHSqql7Fe?S~cs2}!!Iu3q{+ zFx}J@ZOpzO?y77Z)6hwW##%}q^}d#q;|AC2SmfP6!H$gaC5;iPQblvhg-%TswrW>z)({?o9`HBDPB zd?r7qf7Itb3wMd;ep7}0JVu~@U?Qgt;NrK~ZJbVvdI4P(ZL;qFInci|K51RF{OW6r zdl9W%c9sGDCC@3xrK3CN_ea5f8JQGVwb@yscm4ZXLm*$3)<3`6|2k9WbG`;9 z%<^jNE=_wKsi@bLeZiwLa5hTKmEH^@ow^YI5R`_jieyv%+o%NR5;Ib6n^)dJCW-S; zn4rk)yO>D6tQMo=rG~wSOS1W@GcJG&s}1U3Tjj65>Vv$d9X02H55bb5F_VG-BqGN$ zHd%b!{k*eQIX=m!QopJE$3N}iL@CRk1*&ar$HV8KMr%Ib-ADBWBK4XNjL^P>6It!U zDNE&7`DCQ`B>%eqqzaU#04Iy1TU4fRLGtTvb!%GrcPQBzo+uxl?*X!}3zk>V7NEWz zJ)^HKI|ELd>d?v7Ngj6pLW%4gb$m|gBZv1?b@p@RSbYRaGC1lFR~0)FOhyK|b5-wt z6h<7(Rus#w;cR+SvZdYlZ>`Gr)|+YHm?wDlk;(40?a;BY6B)U#gv^1(5QI= zD&m|nZKXHB373m*#)`OfjKWNFAp>y4f5_S{oo37u{Bt5bnQdu44K*A<loSjS52O#lZ)~3~G!0Gge_N7DrJqqfXKZVd-55Q^|(tl2VaeuOSTw8DidoFRhg%2+*}7Fea5pamaIo5&?~IUurhez9CgdxK^7k{ zNzDds-n{6j5Gbi-QPCNV0I50c)VHx7wo|dTg30OZoD+VnJ|Mbo&oWne^H2yTkOKVu zE!EFX0`jU;*KJApj{G>>wlz%yWZjL-vrtkyYAab8T6bay=K8bFp-gyI1xFaG_j*hs z*Hufu5fqAA_9x!E2}&sZvA5T84L4&_W;yP>W=OZ-x}5MlLEPGZrkHq)Q6(_4VF{Wf zZwFFj^e*+{jvjArekXVKxK0G7O2A3Q>T*Sq@w+kdvSWt<`5w>7z^*!4_1-{6p5|71 zA1E#*XoBMB`{8cwFgx>qr&_e@vLDEsa=woBfhqwfr%i`%ZeI`K#JlXuHB<@DNtK0d z+hwQ|eF)An62d+>DYN=amjF|~bg{Yxpqkg?NP?MvqU82uKA3-TV!k}bo&qDM>Wi#H zz>%|#X-18CChtpfGmSsn!^`UEr007$Fe|40pfaD5k{3LtT^JONr8;b(9l$Djmo8Ly z@*xjhc=r9qy0jD5r!>nFOr~D>P3U~{9;Sf-Qz4zw+|35nHZDf*V zN6FlRZQvcOTsZ!P{@^*`*6DV&*iL`IrAeDf(Qzf`eN>XOt{oQqK`{MSFQ&u!u>ZvF zCH<@l{Shiz+F-IIxgR6c1bRT*HSkIQJC&&F^QVZ!v2;)AC!fKx#{-b$KL;YRp;lU~ z444Y899!w>U-n;bV4kExz{$i)zDZ_u)+kWj^pfYJ+%6np)vEqCeodpeikn!SPd7R~ z(zY6Y>lZOOh+(o!XWxOz*ba6vqW>O|?qZvWe}MWX<6e?7LH>wKZ>p9Fr8CkT_IK?= zT69&m!=dDVIsJli0avT|`_j0M1f)R$wOQa%U?QQhbbI8E#`Sfuwy?K4cnmhF*LAd)7bHpe&t6Uj8wan-jj11I`LO)`z5V&u8G=$-62 zS>gJp`kVqwecLWULpc>p>N|HWxjzk;bPB%A^@|YcDaz}S8dG}d=}0M!yU7{-C%a-Q zJrf}sU4ct}*(g9cak|OqbY{ZmU=n7{P&uyFe0XubVslbaXd9r?Y!7yqGex%k^vi+1%7eBm=g!69Y6%I*@+~o z{H2J5w`7tSrF1DK;bm1;ce^}amG4Pin8r+jNXo8uG~V9fT^ji+oMLL{B&_zV2OQT8 z4R};aQXjMIS~&IiP3PgQgA!RCsWnr*!1{bZzI%taYT`pZo*CY($Tq;qg46G7IpCU} z7&TaX<649$G=Ul`HbTm4S$C!3b)cmEf+xWB9;;&QmikSeg-X4u_DZZH2q~iy_sfK1 zsP4AdKe*b@2mi=U;kU?PaPZ0wX2A|4(x}t0_(f%IM|g$Fu{i2y5xs zNFFWi`Q5zpds3XIBmC&P}h4ydH zqHu&>Qv3fO^Bln&CaV54N3zGU>7CXz)#^#O>o&^8@+q+Tn8}(rJ>9>{cdoPzIH{Pe zBM{gv@^;PlX&V5+4c8@6lnqF@PEl*Cd@lqx3CYg^Fd;3wazpu5zKBS&%$e?^Y`~Fo zb;LC=zudp^)@^UyN$cqGz-Z}O2VoNVg#_Q}!?NDs%Xj;*7Eb?r z53|s@*KwiEV)Q;zDlT=Z;6bp|+t=dr*+b_U>dGraId4H+iPacS0`Z1}lBe}Me zEB+rH_7yVa$cM4ifv*usSazp;11H_v%kSpf9yX_e?*i9UosqKdK}C4h&#kWi;8_O7 zodZRM0E9aKM6=Ub)OaB2zQxp2fgTQ4&C7v7M?fjwg=y)Lm?X+ZLUr>fIEA@obgM;? zD&V7W)#+|Gb{^wd+-LpjFE&ycfRysp<2Q`EkG1VQKMtFGwe51pgNY&L-4kG;HOtwF zo~2INT*fMTQhu@Dh8@DQKv+jxUvo-cylS#pEl!1!4a3F!Z%N@ajDi~)q961>md&iv ze>z5tTjw9Kg5f#&YqQ>esw#aZPAbY;Ljq^_->>Qp)j0)5rFI0VFBc&QZkd^T z8=WqO1>6+dzoi%C=juvZ&v~fQN#X4gJ}=6;y$| z+y;#{rd)&Q2AX$s==m&PxJxR-4F3Vn8Xp8i?eWusbh&{*!lMM^8$mF=W( zFX|^F#G!wz`mXC&Q1|(z>TfNH-0xYExKLU>p(=n~?Xcx_y!nhjh+B|UJAUjTL=w7n zkH+Z3vT!}zzt-Ft=nRMmY;t~|H0}#_QyiP94&pJGyw=ep9{1Qve1{T$vj6S|E)Dbr zI1;E?NKLVx#w10Pqr^mOK*$9>pe;o_8>mSS&Z^GOgB5x^%xgc8%27>JJLz1rUce{k znS@mV9l#~#t0trUy@*K(?x;M!1SX3dk+tuaVUk_+OM22Pp2cc)THX%=T^{CGLUdfO zVp3wm6AqLtE97hWV$ZHR&+v6XP`^4$P;Yo%C;_LOQyvfrZvKh7hKTZjNlF~Oq+H+b z->`1!zI#doMtZD#3gF!U3JW#hJs?@Pt<-W(CGY*7n{>77baPt#04GUPbxQAtp2fF* z)X6f{oIZk+of$QrWZ5J7AU1EEf7H2(isBPo5^hJ^Xx*6jDMr(^3%!#48I){rZ&BAj z?{RIYqCUV$s=l7Fu!Qj?j&vw-9QCVye}za|>_Uwd^=p*OANsGA-){hgSohx?a*F)7 z`K`_bVSLDMs?NH)D)4(y>ITa>mBJ9zt=RksB`H@};s5nZ+6tyYcUn0(UAY1Lql?LRTquhoj<@`4Q1 zWXggD9En}tRd*{IxIZvB{S)In{u47keTze}wJ;L`)8esigMX9U`!+cp`U36hUq z>>1kREp!Y-lD3Qoco{Ix!RS;rn&U3^nE3S?%>YR}=OV?&Nak{SN(NYUCu?WYb3Q`) zxY?&;y&!n`_{cyr$6pAGad~&IrWh~6q)Ql?qS##wY9vdmzFd-zb1oKqY5(e`aSK#S z^RvuuYS+FkhgBAM@G6=GBp^%ilnT!&M5~ZYTvgDkajNuAt{oJM0IT>?v=&JHue&0e z*ZMkCYSx-@JG|)@2&uMN&UP2dMIbr+u2wHLfWo#sq{yxTBxJ5uBCpMRlDPBtw7`vd zZ={8B9gt{;cGbZun(Otbq`T(!#viS36Hahv)zup#KmGn#=Kj=(J{YxVS6iRhib|zf zW3aS{(SPQS!oO5g)zS%^46WqHRT-KD#WGO?>kiN4YI&o445stTSdAOA`G`K*_JHpM zlUxTIm1;%IyK#xTwuM^C$mh*B`o{kKS`CXo-c2~^xV)|*P!>0XDbq7AWl(_Bo+Zj(Y{aZ2W>y^~@+dOu{y0=0wN#lRPMk2U?%)-WSS(Uu5I0-_&GlUqD^8 zLQUvHsT;-0Z{u+cnNH=3m?xnw8xNX|IpUDY&~t4L&}P3+_Z&0hxFZ0R&NDa?VGsD* zvSdBm!R}tV=fMcj;8jzL{fLrv^Z$MUP{XFm4s^h?N}`j?Wlb*zle9{twY>zV+~OdQ zdby{}DWe{~f{NC7)#XE_Z^Wp25Uq-w^~aP3kEuH?i9904lz4XdWW$%m1_8C?=`G`8svFD?i%95y@pqubOR1`UtFC?%rcuIns0DU*7yNj)Gek zQ5w|I0VdF*ItGXUM9t?yb2}DQshemPg5#hR{SCBsb?*3lo(*p&07*M-t#x-N_Kbbk zcA?T9F#pTZ$7>2)m*T0}Z2Yo*vY%3$Yl-WWp!p{z>o}ZKq4ZVFf9*6dG0kkAf4u7P zB8;T3<SsoZJ_hjk=CaGK9*3Fi};=C6vqcax@z6j36R=3qySL+XQ_KndDP~tXm`;^4>iE+=#8p6bn?>wrU~prP^CV zwZNvHZakf4(SO!XX{fCyJMzNXoT@wxrI>cKgX(AD1bTV($_|ICT2=NBKYch| z=OFBblip@3xv~FwS;IHMBxO>}sl@YUyWsvCGmkoSCnjkcC{K;Mz?6778n$HJoo~90Q>8!L z(}%+GuZ_F+Vmdjuli{Mx--nHym(~IXBqj-K73G2clT8+^N85RkmND|lUR%<28V^B{ zyhVOlc^FRhHnA;}!=rhjj;Lwf+dh~uayjv1K(cm44Gxck$@@$>bt|$b^EM;223L@;prJdunZczvr$U&Fq+e zyyn+0^kEEc-s(y-?pV|gB+Z~U0lb*U!IhK^k21wG&DirYK&HG1%01^5NEX-Gd{$O~ zgP=6Hj4r0rSK-84=hZZ^zlMrV!r7C%P-kc}Sz#Vd(SRe3&1_jVvy{5@X8%&1 z67ffe9qM1&KF;YqMesJVCTu@rq3|f->snF08-9&!z89X@Ri^iW6wBO>+81aQ1EZAf z)hH!ad;Ktk#9dh$40QsLdU5q|_1ho!FL1OjL4JZDC7n`f(VzA|cL%3@hA0ipr1Eo* z>7JTB&3-3p1TJEmoU;~QqwJTc)TO00&VB_7->6fueGMfGtIF@2q`tuj_41!itDr^n zLF|}+juHWOD%P_GG``a&Ycl&iK6QL@Zfd7G_(T8Qva(?O2qv=f+b=Kk!~V;HGz^QB z<-=i#(+8HP%@I&C(>xz)js8efqFCa3HN;Vf9Fnqx-NV>fr^HlCKRO>b(|WsEh>C$y zug|*v9J+;PX{qgh%~PDUA|EKK|A(ymkMVTc_e3uuBI1gOaYbBljAJ)r_Yn~hBoQW803ZuB!f3s%omM`q#C^IJWB;*RhQlV_e5JJr1tzIJWH=V{B1~h)5F= z5fKp)5fKp)5s~})toKuoJ()`V@x0&P=ULBMpY`kWeAZ`umfcv{#(r#12}S9sW62(e zS`s>S=b!7#=y+TtyrRryCxEHq-OZ43BJ8!Bs)3y3IwDx*7_6#+lQD|ywbf2f32GkX z6bmRXb3%`A{+bp~>+i3_9Zv7>Fb8#W>oQzA05iuzH@uvQjNnFQcJ5O*XCbt3W_QhO zdH<{KP+y~GBl70*bk9U~PM)AWsLOMK3@;Uf#c12Q=y}+5(Zy%VK-z)@SqDMqn6M1DLKnoH^+hVV_0?R#N0 z7~wDb_{Ik)8ZhAxmo~f3bqcksDS$OvtncsNy1k;}Z-n!Ln$AmA+k{E&ZfUzVKnXhS zn3CZ-9pO)G>uPZ`UM5y*w(WcGN?3WAoy-hZ0n(e|Hc?9M)u2Yc=T8FoUV}K=YiK#HJv-*uY9F+lEOkSUuWh5Zr-~rd9Zo#1zy? z$22f5q!tAOr?Bl?U^b7_Me0xk&3>+bz!652iE<#A$8}?L?NV8CH`$$Mcl8Gj??t3* zcb_g)3{>R2reu3PDF3VWwkY^D7T$nK)n2xx`)q(9NJ>!P@RU zZFN`pTI19`)C@o5#j_LLIpnvZ6w=OUT88Vq!zC;K^GsXkI=i25xz*1ZHL?4n3Qf=>D%}8PmONX1${Pn>Rx2`^50Zm?}JrXMWLq< z8CmW}82m3Q{lcZfGj7S(gZ+a#6kX~ecxeQn=Oh6A0c4uV>hiYj=Ddfo3V+65^SFM* zwLHqp)>2#_15?@xfo6|1`88~#W zvS`ezM*k9&n%Fu+&6mfQ^SeC$%Zr#X^61J0mju!{d@?Xyc=QS56x zeiN1A)H1=$bu~#G%tJeByBre+BzMdXs^%RaRnuZ8M)7zTsc2_jOTlnSr25YM%$EAH zc^{!T%g#9C46Ys@K&s2eWx9py2yM0BfZib-1;c zt!lgHo3{H6E_ttDtTYUGsU-=uLB~KO>kN)m3$U8$S^CZo{j(NsJo_VD@*S@U_K*h} z7@aH6fI|U^P<`8%*kPcT@*v2%*5Scg!q`H^|2rbLa|muay^?^thOQ^^I0tY&kY{ZeHl@m1pB5%Nti8ItR#$ zS4>-^v7g3FaV}D3DQ%Ud?gaTP_A7F`9gRb_^g$bAXOB=#tzQpVu{3=b@<<0euaS0=1^l>TDgZMim2} zcdmz0rw8wgL1QC^tn1UalOe-Ztz8*&`}$UURgxN>;A_#g-CvD}4U^P(wBs^*yk`(922jtHHP^<0fw1sQ@at$)at$4O1n2yr+>7gZ}y{-u)P}1dHs5f7jv>G zE7c8%1;U5eUt|PH+Wk|fOzcmrt zZigd~irFLkcjVdi_3WLlWg>@+-<3NBfu0+0lDHd@wpOPV2(F~D+=EL_Xv2zeFRW0j zE%ZtIz9gajw=W)A0V0`eVGre+_(q)&QIR3{$!G?mA&$-=3po1GXZPv$4N zzd}3$Is%+QwkEBnz_!)br}1gjmu}amnvW$)#^y>*e>T@EHf^F9xK87~@XzT6kgSx` zV$Jq1fRW{(5Ja>CSZx@%DJU-mm`&y6qI^c~eyCUhm z0i_CURGl!Ys-!n@^17v(|G`}EE4#znuG3Ox^h`It(?2}x=pAt-<@sG)nzd=U*+Spz z?;34`M2&;*SF4r$EN9iD@x|CW{i11&RTfG-<&#ZPldp9a7O3ACdm|u9MkKrj5nlRtNe4 z7XfZzlB^GzA2DeoXD+8C{Qc7Jv(7%L)we@25#a8b8lVn?z2UNoadmkA+lsdB=mnok|(5G-ovzcEB6a9(`Aa<4O5{$!BwZW^Qt`sk{2ubB5 z{|7FYcqRUywkutWi}q39eme3(=QKIyhg29x_v@5k9FaD{LP>kb@7X_5)5*5{-r|Rr z?f?=1wSk+T5>3Gh>=-&yn+D|l=NI`Hoz0CJ@E+ZQ$^hp*qRB8cfZbSAOR!cld?oBg zN^HBWkGv&-bomWtvi(6kC~I}-dMMREVbNx8$gg(L)SPr<|7+s%-qd4JB>odkA-J>~ z=e^Jr;MB@c9gR{x=(nQMinel@joN=3u4~$io#iuhJ4Tt9i<@oYj-*pTXllN#dF0OA zp^yDdJ#bfG%~93x?*_XRpRXl!sskp~BWgwU`+Iw0MSa9jA9{k0T`sEkBeeaC{(?~< zzZplKjQRS({^pUn<{0`ABDK&4o&!A`uW;=TgQ*mC<|Asdw8ck~1_Qz9JaYd$Ec1{;@X_?M4yh<=M>)-Mea#t>T^KfcnXq{Rh={B(_qqEUAyk|i)V6!@vv?A zdlpQ;uTi$_In{Zd!x9(sbsgyWA88MWwEK1&n@6eZD&hq{c>9i5e!KWPS_Bj^tgrJ= zUjkFxHh@eEV&{RE`&(@ErcQvG_Wp|H@|x?kKK5a)@AcferMz~?=M7MW{MpZ_5&fOD zNI{i=lf5n^?9zFuZzqYB+)iu5>N}toS-ryAOAGP!E=u39?4#W80TI^iaI~Kmulf!S;qC)PhCc4 zYsz5vS$?@>qlFBYNwMjA6bVGvLVlCAzAy9eTFOXmul@>*z$fOMGvB7*uW`x7?shou zH}FzpoEtB5j#~K^nOCjdxN+4GRU#?14~?bWmEYs?`tij_ca*yM1BUQcyMrzPbmM4w zSv+0tQMDd<$b(BS^rj^F(nO9# zBs06};9a*Ig;DtCdQzT%RZU0tk8#%?Gi4ov$e1L0`Bru*{t~EUWkXflaY@ElLSra* zRrB`oxV&s`LV$Sf;slJ!+*0G>iTx987n`qtdPaU11cQ!|r)w>mpekFZV91T<5mB7l zKVD9mHV%9IG+gB2>nfn|^dALW(}xJbf0RM2nA3?<>pt^8V&a z1$r?)o}HUDWw}2G=p4TCouSf{+oXK1Uotk(cPkiV8-&ioMuL25?FUet^HIs|#uhUD z0@$FiKHI}Cgd*QlufC9`(Zg-EU~(}iX)p zHBhIsl-arVE19;}VTs0w(*@pLkI>h4jZrIH%BU!F1g$k}f|Iq~b?*02epdB-Excd^ z?)uyBEPH4On=#3tkHV(!ut+2k&E4!kxd(ge9YLuECxiLU$BS$mOp2T_9;Hk4Hxrthr{E+gC}qXh zRwMsP)c4B#k5T2r7^vtkNnd%dJF>0;t z7T@nWBTkvM$Mqkr{(59Gz=q8GohWWVrFQFF=NqAukNn>Wj_ChD=dFNZDq*R0-sJa) z#L|Xtd^7Bg3pJJA(x-Hp{N9>h2Ik4)wp_PJUAMbVIm*skqr)ATNQp$@C0K`y%>|6ZrhvCT883Q#Q(KJvbb5Zra$6U)-DOTl9 z`8e3MXGdij>~#Ala1lbSUd=BELcuSh!Asn}-Y&u*UdiZaCv#Depno zNvn?D^2FN+1um!RZXNj@OlLb{?9JCEA>W>_&UgC+Mt6@^1APzCHMGvWQMv|_jJH_< zrC^eFUd@4OO_QkHKWn?(Uk?KI!i_Rcn8r!jhiZ^YX8$7y;MK z)(6Aq{hbGIsb!)sf;CfTf|!ZFgqK=FeRa2Gim#HwXj{Vj8c1GNjVr}Bpt}6)I?D9h z{--t7MZe2q?6!*ZeSnT;So{IVP;S|rg1mi8Ic@!zTQ;>26NmhEsgd#QZ{c_k#VFL` zfqBElVSrrJ0fF^>dpMW|xG>#0d>ny_knF82_xvOKd$c5B=4I-rq*9x(%Cqihu*+7h zjF(@S5*~x~UR%G_=*d9Jv5twj#Wp)7zw=4_JIVu)*ELPfpm7=|U3vp!g_0@h>9`bq%(ACsm-RO( zxNp6h63)bFas0(=PCpAuy&FU9E^Y^;FGqG&z2J$nd$_B%+`zDAyAU%$Xy(rgu;i@<M zYx27xr0fvnaxEey8QNFV`54?wg>zc7F^=~)8G=>KBe-DF`r0+XYQ< zE!@^rf5%GEZm0*j!{ocVzT7mcSxl;avZ{U#B6V3IcYijqk20_Gtte*x6XFnDs%=5`mMk#RGgu6!}?Deb98Y*yn(>`LZUl zKjdDS+*qOWUg&?Goq2`*=S4W9pie=e511Zu$M)&n+~gJeWmt3BHK2H}y7s04Esk2y zKRUKh3-eS1OftX8mJ%6#BhPNDNRn^5*1lKHi1|!0fYLBym#^{R?cBY8pM1XKI_X5$HYj zkv2P@qX9lfMS`o^KG;v-ymz;7&g%11MDo7QvY^JGnm)r-SN@x7IG+RFp}KryPz?I} zSN7*geVJPr##)$}uL9Tdi>hpY4a$OHZmel;-vq0~8h2<1aEe7^`rEgh@An-hnY?0R zt_&LABO*Xw&LteG0xI${zgZM*k%@mq%D6KRJaouIOD(>+93Kj%pj~`WSvw3D=@_qN z<#0F#BsDv^eQO_e^N2pJv1&#~=D99#8+`$mVGq$`t2Z5uNH1Y1tCl`rgWfS%t?$qN z?9V6+d4v?ycDZI8he-YeJ;K%3(eb&dY|IsFf!5I9WI2?>PSxOvIE`dv#lZSi)P_7F zU5o3bV4a+weW%Qrk0V6UYvOLPEEOLI;}`|KXX-sxtFDUu1N2E9v^|J-%*x29WjQ2`*Q-9B%2?4LljycrdBlgaO z5n!ahXQHB9RWsV$Q+u{;cMzR%GbmJ5ni#S4*0skwbGcS%GP#co|ARG`X(Uh*W^o2fYVTS*3f?o=yj+09K6+~M7Zph zsBeQJk`;9pQKfr(?%nCj@(!1>#-~{k?hL5#F;oK}g8jwMDF#qdNnBU0;U1KHZ?C3( zuj^!Hq`ngF12b@Bv9e+jQVa6HLY*D*fNN5y=HTl{wLX|1g_L6xa4l2AZ7t&fkXmtg z53Q&M>-tfWpq5Ma)jfI~2)PL4*>>W!>B6Z+K zCGlLJ#ERyD@;uzNSSHoBsqF<+y7OgQn$7D)L`2Yhnzv6H^F#%sC1eNrnY%>WU_QX~SiQMKr}i@o6&P3bR}(-2&yz?<53`h!VDZ|e?% zdK;l8?ENUC*E{{~(_)lxa)8rgvanA`xJjArob^rW)jttbw%Y{PWzQV5=u; zspJbdBBygL#82`|9EqAvTbz=w0zI;>K&Df&_xF7V40B z`ayn{QC=OTs~nE-xWMOsS5}fEa(B6hD*2K9-KNvh3y;d(9v`m{iE*^6qe=$QWZ zoudYqV?mwQrN+C!;UycV!JVPg6kBdiZOl16kNnjtEozx-A}hxYg*-C} z*r+t!Ok8Kd>Zp$U-Op%5D$Dci&hq{`+qELs_A7cp9wn|_&9CEJSi+QxnhVc^Qk9oh zC<{tKf4eiZ-EaY%*5KF*--omVjP5w;MHjhL#v+FK-2RTnVK9tuxkvZ)AT}pN_DI&)0#NSNDuy@hxKWHJDWJ zE<-$}08ZvE{B!;tV_=e1{nlQp!p3plo4ARS0b=vZ#I}G{duNT$I{;#AAD!0QX$6SP zWrEKvjMc}YYMaI;1H;X^X|~4#IcR}%J+7r{+5wy#N%_CmEI~uS$)~z|FSWY7%5KOL&Ag`<-`L+#_H~*Um7z~$wXX{*1DqzZVS6)a+=9?V zwADZ7jd5%LbNRRGQnw+Jrwz5pe|w+|`CO(tfGLt_t~H;^)17@1+{{WXxeJkIGCtv> zL&@$=Dty9xsnH@pO?rDBPkL|v^r)3p8MqITSFo(sZgzsY9~VgoM6kbE+#f(yQ|7}$ zk?4OnROAEmq5ST1S_K~fQghB+pt?K^S2rOw=n+VEvNytY#IEgrsd9u{@cm4{%D)!qNtor*j91+XPA)&%i0+rcvRi)EmtKCr1v|Z3;tw zM{@^iedu{yI%vfU)Bj8MFXS-=sjr+D`^Sb$#cHlIFZCQXKrPl3^)gO@r)nYpRo5wB z#d~eZyoQRj#&~_;rS$onDI*TQ?7*FFeQDQ9oZhlsq*ZX4MvYw<_8RKc)D zT@C1Cj8c|Mo8`A=O8W$*5I$jQ`T5iS5zCeBE9WzeY%RZ-A;Dz?a>e5Fn$s6>`p9G@ z{$>BHW&C#b(pUWrmn=R*O#qdCp=QT#TzAde;$}?l-{MlW?ck7v?cQg&Z?c|NkJsp8obXAT71gq3Y`H;rB`J0+Fw8f=ORlu)l$xL9kH&mGD>TJy>E(% zsHAm4ZXK!-_rm-nr_C|>A}}S_ADTt!VocXe{;AfYnY%<@uId=@OR`;R&~;7A9mGWv zn-ZpXH9}T4+soKYscVA0@2&q>-c{>hgYnR1myOUQ0QqG9f3{w*5lm( z+y1N}Ok}Z3Sf_3|X9Ou1+YB1De}YW`(S2doQpfKVLsP!8PmRTby^%73>4t2_GBkU_ z)hMm?&wjzQ;8La2MM`VD7E-do%et7CV{pV#+h1d_8^?8;-)hd_;q}!DUwwhQRtbzY&v9WP?WA_LyMt zH{m5WQy&sH=X%F3-SrmNGCeR=LCS9hx?aCft@}2(vPe~cZVwu+EkD!-NQQ@+J*OtP zJ448%Z_v^Z?m|QaD{D@?WreYQUL@6A2qqI#5Wdtd*^Sh?=q z56X@=46a~$fMj69#Pq`2=4|m`e>Zy?>q8GkkDxDrDO&<*JeJY%77rDGtOGZ z?BLp)?5L?~e>|v6aL^MFiLJkE<)xMyfK&kGx;aZcm7kposo6fAC*}AOz33SzS=eB| z9R&eSqhQ{gp(-&epehr;`jzzs*IuMoGkm|0A1=CpVdzEI)f;bp>o>J>_7a%3%$6)* zFG*IeUq*LrpMk`@ zZES|>^m)K^^QibDH>$n;RX1Pex1Fpj|KgCZ0=7>q{yk*?=reRv4aJlKT-x`lnr~Ok z)ztniG7?@}R_X70xOqT}{vM1Fhw1%<_yZz^;>~D+{t-?GSiRc(|6iAy&(P>Tdb&zF z6jddysCl>00YELI#;@kb)$M0H9G&cr*}6)dma`*pX)Rk8pKfOeADM@hzJ05g7jE5n z6uN69rFR|;b`>9U$}vABWYwYuhGQ|wb)7VN9F#I`*WS%FMzHuI$0{Z^H5e!hyUX)sHJD26Vt=ebc>QRdWtvNT$z;!>oBP?5s^8u~|a`x)0%Wb@5H-mz<+eL9ZgrWD}P zLMaNA1xi64+1_^TT-`rnif2o$wtCG^k5s&k|A%gXNy)Cu?*B1Fa&bvnyvIRpbu_;1 zlTcc~5J}Sq`lqeNRi<|!(#d_;W=AN^07+OsD=&a)*V1Eds86O@DCs#ZsI-FaR~w*x zG|fYi(H2IvX5ri2Q$tH9;O#|4G!;X{-jKhcAK-{uKPC6;fxKiVjg^i7r~B1P-;GdO zWZ6m5J6IU7DMwoqjziVWd9aSPQubT=WVYy}w?fIq?(JgXG%NgVIHF`jm|y%Ibs-Ow z|FGJ-BR^_kgSEZ=PEd};d^Hc=6`-Y((cQUmY2o`GaA|@~ps0H>s<62}@DHRVAm~F= z1bTmd^A?7ZT48(uK^QBoBsZJ@}wq?lUvj{ofl;~2=1+@iOIs>Hb?JR%w7xIfKxa#jkNVZjZGjzR_ z8z;&y;bqqZSXPx+bC)@_nUr4(B+oUq7WR4%*>fofpnR_$sw2nV1gcsp{8{@bJQ%{W zOomm=w|n9W=PsJg-a&MOf)SaM07U^Mmlj`IfXDa1r4CaQ;QM_NEB7-_QVBE`J0GVaxW4EP2SoOOG1_ z*yd}NJ{0GrS#PB}^tbThu~DTyPY=iC+ky{aRQBeYa|Aw>Fg!nDv6v+RL#nm=P^BIP zdAF14Hc=P=B3@r@@)*}?>%;S-Bg3eje1#&VCg2okp~5zt=~`u)n$3cy3`IH%l>ygeTmJdYD`h!0O~rog3EPXyV0AW9 z>3z7h&-imdVjEQHl!TuiupZ3MtH#RnkZH`Kim3DR`!e zyV{aS4HOq)RKrAhuI#F%t&0%^`%9l`11?h|!xeUYu!k$_!*n%B?9JuJQUsLh+@epA z_BsU7{IaI>^?7pn^1tv;I{-;af8dTBzwb?`>Zi|_p=c<;+KGlY5>Oi<=>|YPcWM!M z&Lq~fuPf2sHM&E)y2`a|iOEu11FnXY=C6h+1TMXTNkQ(e1=91Z>00zbjrCM#d884b z$hfSeZI+vayJqqCYO^_KZo_r8>2xBxc#Wz%kV}izTxN1$0DdY7YO4ENKzI1p)NotMT3fd*da|BHco` z+=?OhOH2;8<>s0TwpT#s+u_t<>5IM?GpyZ#_F7J^f2f+ivwtcZ8t>}y>b0v44|l`q zT82)lZdL>D!FugAKW9#G87T-iBx*EWp=XRZJdvpkn8;|Q3jL+xV8G9w3o+G+Uj_$2TwqGom~v| zMfYTWZhPNp3jNRaGM26DX+$b&Ypv!z1A6yJIio)7I)&dbBY1~y{TwDm9ci2Yp3fbF zKkXQJ0izL!D5Mv?7(gQ@%9nt2mSKyFZS;8=l~zIgW4``W?5nt{!TO5-I~E2od0^me z0ft`3BoE~zOo!<4*h0AlzX{91;-j?|da(a-PkZCr`O(*zihJize}vzKBZ{e7vUo3W zVOJc<-v=YuagtNI4-g8Z3pbN7Y3;2?Zm*%UwTAK=DHkLcl4IpTi_s=)EF8JdG@En~EHC9^R= zDS${XRXyM2=NW!D(6>-mKN|Mbgu3`H)LS*lnc!+5`MyuYNrkm(8jv|{6jx${=ZB~}(2D7Dsy_}O z!r8>?I6i29RWR)VBI?V^P2@z^T_!5*PtDsW^;E)sE6~ZPRAvH&Qzych>l5;bnE{hd z(e^^OW_w!yoZ)-%w=#Kpf0xe?_WA25Wy^4}=a-vRnW~z%^i#A7zob#I!IA8G(>T5Ed}d0f5Td(VVsYU!mf#Ov$+6B zIpuD$#<2@A$y>Kc(CT@H?2G)Ol6N+bM4AJbtXePe-~gNaDd^qOC{DQrDHwKgy@zsE7K)C!bxOqvH79e~uuPi=b$ zmj-X4!bZkcK_kfIdV{q^Dn@@3O|G4urxRR>Oi`M`*iN~mYG9N1!Bqp7UqajH)7rLB zcJgcB2-6XZj(encV3r0d6V4cDzFIjz*KjHKdz!It66i|JmAR_PSoJg`1U@BLS>J>- zK{?)mObLWRQ1Mf+hVMMn8f2%TrAFzzlNOe878l{!KNbh?xg^rI%hfnA56g~73r3UO zfD~=@EN7>^{nHhdwZ(_skIGA0P_Qjk{nz6<$GSf%!3)ap&rJzdjvM@zHpLsR9sV{! zZ>mY;M!!U4JQP|5-Gqsh#aJ#w>dlDwMlT)yYMigAf4BG{!W}Nx_*=oqWyJYW6cJeG z7_Cqcx98Tj{n?Xu1eU*UTh0KLcUv`VCW`!MVK`_RP6k|QJc11ZsLO^x&pghFp;`{9IKtBDU{6vgL&?+=Ox*ealfe>(uB`YzK# zo2B$&)c@3Ay1hD&B%O+tUIlv$l-pXJ++IF6kHfOXwLrUh0!Xsv^zwD0f#h*zUB7lC z6(o;yPmI0yX*kWXEIpQ5YUqCknbHl6?yHwRn?&kpygC*7x%{p{)TTBI1kWQ<Ll)O5t7xWworP$gOazh@c9;&x9@SJ zGd5v*`@wZyJLZUM`UxCyTv002A&)GjzJmRn;&dn?zb~up#fO0&ubunNTyc2+vn`ia zKETc-P-B!%(;I0_j7Ro2bKzjhNLn(!r@4(B-D9UG5c4tp?Zfqbd@PuDGcr>{vD_Yq ziY&T=XpZlR6+VMBfD?LRT^)%=JL!o%+b4FKpie?1*$PM7wyvMt-y>{Jjrpg9SlUlV zGYLU+qrsJOei|ZT9iEu-ZGSo(u{p??KGOd@I5DA(o(UT##CQSD0#sm^nKH3du<;A>D7jNe zD;U#-KRwD5gAF71I~=~bl3fgDRH_|srIZ?#29QeNJOd950%@gN1v)G*^3^>fsCWye zxCRr6TvwCHIxtnYE^+bKBU0{3`$XD6vN4ZX&t<}=g7kNYz`A{6k6aA(sc_^kMSdiY zDBC3!xOj8_Nb~rr;q*#WmZj2Nw_@LYOLXo~OkG0Qsf1Y3b zox1YI>%j1iZ*@dQ| z-PZold#N1#^ZIl7klc;vhwv@DVZG!YT)AV?WS#+?x=821{I4tPPtrg95b0P` zoNo@w51{0)x%ey9gZ&+b_1?-xmT3X-o-$_UD<-@2^@ ztp>b@CO`*qoknMsfJk1369~duZzAiIfvaqbulBc&URT?YUhDCO*4kf(QzO)r1-tzi zM^Ik7hYCRl37}(=<3XU>lEvMMKoU2$I9+p5jqDh!%*vEra?3PJ+u#e-ouxGcdzVxk0K-yaNE>D13`F| z-7w7%^kaVK>^YZBI)^;El*X{F+sy;{P*hsR{^r+z7@XV+LBtx8C)i5(z-GhJ5&a!g zwL#-ZP+>*8u9`UtlH7>i*$yf^8rC#ShHChj+$w5S*`SVv@}|MTAqq#JHHCZ1ukUy; z^*KBv+}NYd*W(0aWT0Cbp2*lluE=lxY6=C-;b4`GW&Zr!wS79_JTCwfsOu{9~qML<`>O@F!=PJ*j)0_9ZOk~EMM7Ju7ZL^YBTx`VNv`8hCkEB7ZD91C(2b|YhlhOI2M1qt3Dm?gvk4hnj`OWEtwj@ z%E)^+s101Jncw3wvR=*QQOCMBzg_SzX%sFceq9SOc|RbzE!DjqaGmlr8*r`vJ(!=@ zRPqnG&X7D|@4pXrL-qk&Drab>BF;YCkHg-#f}=<6 zc&rblCvv|`8)2UW6nv}kOwW1>N;Nl^V$9PR#iu~GZD`Mc$;)Khd;TmO(d&IBN}n;b z25btsMM&s&_WARu6s#FDTA0=sa1o$*0JWI>BErkeRm1ho`4X5mI=qq60Oi#cSDdEM zKRv1F%Jmu|)~@nX_!Cpd>wX}hT{C4SeFNwklp)(hO=mzw$?07~Yp(IjLBHgkLQ}}m z+prvor&qJhI~}ScY$y6Be5Z@Y6nl%UYQu37ybQC zA0pH*;h%;JD!?D!`cLIM^%Xvu_Z7_4R0{pqxMa@aQPVZP>6xWMe~U>K?xzQSm*49H zXYVFvSvmwpajZdWYd`dl@0r^_@ydT@!@zX%Z(fauJhn6~i-s`HxvX@vLy@`Zt^d37 zdKj$CK8l+Y!Qr4+#HsLz{@z-RpgzDU*VJgu%uR{F$@9J_q(?(K@7lptGzixz@$9~> ztx}K0q&00)fo-GCaj2*cTPlw4#&+NFNoAl^wi8^-kk$vB2&fT5Qkm-wODE;d%gW?) zvg_ndP(?jyUJadsOY+v%mFOafBw38_G$eZdeYI%oS@^4Uxv#P~>h!S5eM^)40|xv?wCyB8YB(t;L_`Is?Tn zqeQLRR)Xi}j^SE)xxjVGP@#`nYrZf)^Yy1khAIB-3X<8J}YY~-h^2CWY_+g z$Wt3bc;z15V!R*8wLNug7Os;Gp;2q~&nvIQdBrZ@K?bL*pvYjD0GfmP)u^s@|Hrp} zZKVXR9@@GgMPzloz!P{=p+`?wTw8cS2@?Fsa=8!TdUa1G>fmC~$NVq_> zkz;rNocB|D+m_qC$drAJ(12|KUMDBH}fnB<`5f%%HJdK)V8Hmq1lIJp%ls_aNlAY_Td2Q|ZW z-eq6$R9lgL3Y7tBs#g4;&fQnkN9r@Klhi7nJetw7h%O6toOi88)JXaqI$2nJrj9#$ z9!`~-9-3q73m7kE6oUqH1eiAs&+n@#@v{)kB+$afex3(dZJMFw3(%`)re<}@FM$;6s%HKF zs>gK%C%pldwZZa-{U+B773$|(*Akf?ty5XP13E?IfO(w0SYIv_27F|)x;}M(0J~|W z>3sXzyzwJ8?SMxa_cp5^|KC!c24?Dm?NHdeY{Zt~!vHnMkWrS*!~0v;IHTRl2i%Pl zR-9|rIub)X6xq%r@~9+G2MTBxM<;=b$lh{uOnzmaD~rLgK}22Vs^cKV`FZhmX{B8cWc(N8ib;xjT?SEl?`e>e%3dgJa_3sWceH;IThD=dYZPQelX8E0I; z@ZdVqZ1ekRU2`3=d04qBMT+pxhx!*KNYVu;FM8|O z)q^h#5G+UMy$DF%)zDq0-it9x+&s&ys15|GjTXjeu!qxSw_FXTlsw%_o@)>hdhHj^ zdd)hV47Jwa!?V_m*7whCaJo2AZbT5}Se;u@i|U*5jMo~FXB;AF!J|v24jgmtNF_6Z zlGnFx*7SYQZcZW|GPg_gO4!I$=cHZL|0Fm*Azck8pNT`wcWh!*_#aU%FX;Ug!N*6=#yA8Q8rJC113#oe00AlcdEzN zwXM6;Jr-D)JZJk9#%(RCCNPIl+1&{V^H9cp3+=Q0F)zE(sh3Tot$Xi95XE1Z{Pw#{ zF*l6L!S#VPYg91S;dftmO#(WVW`7Ki%KB-r@l}5rqL2y@jEFYZs~W zS)^>P+DuV!>AlV|W?`EwwO0z>1`st<}a~%9AVWgXd+}WYwH=)by)h zMBW0i(+~2yC#M$=&<}dJzYe8)1I&Aer)H=`Z|3I(3x_oc9R$-KE6!bgo4<`o>D%^& zE9_o#(0!=?XZR8RvoC!tZ_vr!0)5 zRZ|}!RHg4FLiiX+-gea@gXcfNL=-Im@TXw1+BwTL3*2Y@V=GMp)!IJCB=gpV7|=^^ z{36c^FuE%HFI%L~FR`gn-gWacwPMZ+flMxUj0$W_l?c(Iv?Wj_B8{k7gUe6v+y0?l z**W+fA{8;!{9L|=lPSB8Y+^DBl>4Haj2^S>Byd$vOrwp;ZYd%;Ny~lcr>8=h8oNLr41c} zPz@8)an?K*?99-9j>Xx{EmZY!eoM1nwVI*edNxBs;w@v0GO@5fP!7p_Hfk z{Ygopwi1y2$&hzfKPgMfDPT9YSSj6B9x11yG|QdaHSg2%th-+kE%bD#TUM-HBhR-@ zdf6w^C&NBkjg796M`u(Pft&?N^n$C|W|#LjFFxAE=RX_O6>O&BJfG9w!rsg+Uop;2 z3TOOk8M8!xyYoKWem<;F<-J|6zM%h^OOBr}?6DOtnYk#pD^+tCy%>y)m;Ik~3n)!u z)(LF&i7?nde))WvTUWzIs1=veEc&0gXsPOTa5oB>-`7K_9&M@owl{{zr%O#cn-E?5 zE9VBCWeAfV#42)~<73NKJc3N6ZLA4wbKqNlOKZqU3UuEvo5 zFMnaCy~cGi{H0R*GyQuex^fEBI`h8Q_CzO`7S$C`IpXNQtlPC$!&Dk~r--T1DLjO@Vl14KaH$Sj@5NQ+iMAO*4usRg+ zxYUFD^628u|to*w`sF1@&^BoFpX)4x3aQ2)qy^}YjOGFM|3t@jT!44lmU zb$xX_;<}#{M96B(8IK{;sDA#JG>o5;D3z?OGdzLonz4wQH77ioXD%BWtP_)->YrK3 z995h4C>gjkvhAj0!hHszUGn@&08J6czbMIZO~4A0k5Ot68?d>GUFM)2B?-$Cy%Iz}@(x$7gp~!mw=*#~P z+XS3Rinv)Ao5N#Uz5TXN%Wzz~Q8kJ0aLTiSo3dC!8qD{Ie7JLRiJ`u2h5i8_(Y5tn zrTP(*7n^65;*j4hWwpk4UBMu7C@xRUTWzTm{|>|G7%M7P&Ec*ihOM>r;RsNIJ|D`z zhFX!I*=XnM?60E&RjyAeS_PP-oubvIy<;#0xAL-$BMcU~TZlSdC@dCWiCDZgHFP|b ztk=#-dPJUF{_%}>(;o%oP8PLV-<_;)glY>|v*)Wcz1l{QM%;3LqlcKST98Grw(j9v>Y7NUm8v{kR-X zTK{HY@zJr0!*e!Do^;%5N$2G0#RD}ppBpgIrke8tY9ll4A;6-HXD$Hp5(_noabb^r zS;@pjaO6<2V5)Opj8SrLtgyYLJAjah3T^D?K}hk|O;l>D0jV1l1hjVRnmnr`48~7$ zU2Z_k&s3vY-`~J-u#DCl5#3<>Ae+NKmkDtbE;YvV#Rnqu$`DpI>a-ROVC1Lg92#nn z);c%iQkIFf)sZUEr^H>LmIhjMimR~NENz~R!nNXfk<45JC_0C{ZnNmMxpQV**2i3@ zDzB>6JPvkx!_(QgU0ImKO3sJ7;b|L?3f*V{sC}#NK-C-CiLzDJDMV7euBNMLuw-rM zl%Lrozz%b}95uyc4pseas`Q6EyZGqb%;G=tg$Qg%J9v06*tPKAXA;|w>9YJx*>_6e zqCQ|{S@j!?Z|EPol;T@ed?O|uVTFFGj&JIT^;zw?8Iff5ud;BoHTqkyN>WW>E$tx> z8AaNjAi4vhYy2~%gx(HE4uk90Tg&Kwc1nh7x)V+_;kDmIe7mdXF0n4YtL>D%yMOM| zwxj1BShAZ-8Qn{50F`xOtoE1O2beC~G39nqW*)yE*;-i}10Tra3ef`AJ_r*2%F9jH z54rA4!&XddWX@OcfuDG$z8bC5I3LDGj_ViL^r;buv<_!UX5-^yC^i<``*stffw|(? z(I(JdJ9?b~{t4HS)8_g*e-f14+Smg<1*ImI-dh#?G&V0P^J^RZo{1m48F(TCd$E>nBL)*CHfgp;dnwTki*C|7e6V`IAN%aE7N zlsWI!{K5uYSg5x^Jbn_9oI4&9}Fr3wYR(td5dUF z^oIVqOPZ1D-5v|DMrC;qPF=7vwmlt)^py>an%lLM4=@qhHlBlRko^$l4RhO7-bb#L zaGp+OnEn__>oio^AXuA$Kf!esRpHt`1+_e17`{n91C|uE@~>~nf8IZ^s)pe&!1$3= zls@j9WdN<2>q|d)H6g5!&##~qr6yU=eT_)Q^+VR zPLy?x)_{>}DP9wEwH)ySLiX2QvSx&)5HPc4#vpgd6YbXRlM`F^wGA+bB2w`6bvVIc zpeLCgs04@SW_jQ08$;HP2qD%{WyOn*M9Aw3jxNoO^e8xa)0*0WD@S7_cu6yp9RtLN zm&Qk*tkI`i|321F)y5}i+Q+$01&z-*2c#`^9-nlqqRYn#Kty!uT%Cq+A{?=fjyvy! zzJQ2WcX7gQ9*-xZQj5G|Cb&9P{!c;5-Qx4rA5R66s!BHEoCXsO$J=&>@abST_dQpU z|LU7+87{SZ1>v3jZnr!?+h|Zj=-E(2<=e6Cmp=y+o9(JU-Nt(^ zI%U~nF|IASo`*_S?5f;nVNwe|AD7pagR$IP@Dq{nw-+Mv#{JWyJL)`#i!hR|W0>2l z{3Ly%zlq(iRoVcC7|KVfToVTaH44!V=oDZEv5JH}w=-MU)?hnXITqGFaC7H6T*@)L zpz=%qUyq9TuKjb$1*8eIg@;Z3&7%voDM0~;P-&@qN);K&vsbJg9Pz!o8A|CZ{*!!N z*%Omf3$?s)6(XhQywJ8iT#ZSBBQ?BTlP8Ayz_`~UG!8lS_!yApb44A-oUroa$fP-B z4W+CIlbDRg^^Mca?rj*@hBq4jg<|e-S%cBG?NzENDCzoWvVXLBUrZzA%rw=!k!Ayl zd`Iceb70DP`CQxPGLMkr;%_b6P%NNiN;}(L{b?^IlBW|?+%|RzRNA^_J8koNST_1i zB+ZU^L;sx66Lp9J{Q{MDJHD(`#G4R#n=^dc68FuRB)v@pYnBN(l{~M!w?ZlJKCNqF zu6<|S)<1Ia&f183dyj1|_mVqcFL5q3zwsu|J9Ed(ZkxUre`g5oS4`A# zxc305<+W8o_ktzV+`de&DMNZIANfKwiLi zzMWz41SXO*#hB#UBKnhg=B>M`hdl+P*9}b2!+icyC;D_uIL&9~8B9u-wcTfXJmb^P zx(BSO&eLz7cS%^QjpSTGb=VgW>QdXTx$Q+rF^xg?64N7cw_>)B6RHFv`C;8uL{)qgUeKoeGI15AA5Yo+8S{W`NyT-ool8Lhr%gDEe5CIABIy1%it8dZl#7= zf$gf&^zYFl5NX*XKSfG9vL_RNtS!|Xg-ov)mS~MXM`OBKh42>tp1_Ym5rW*$ zHgrCe;@Eo+D(Y0 z#yg;e0w2olD_5+bHMo}0;v>!2w;Adh&z5a6v!C9O8`MVKeift=%aOlCb2XTJnRiPe zz6Oz(QR3R@dM!rYc2%69G1n@~mYnQm8i(_`fjZ}{JUAv%UQn-_)5*4hdBGKS3#qvs zh|Cq;(9nXeQXBFd4WX@?PJ<;M3J3YjLVEGQU(prvgY<{jGC@F{r2pPmFl}x(Chz~% z8ESSf*mdGgJ*O(*lBb~wo66`4h%~WnqA<|TtKHpzOvBvN!WQ2Mt8RuAKivf)vx8fo!cJI*^MgO)ur7-gqDf4!)>hiI+aqf=%Xb)&} z7rnE8eB8%QGY{N_QZ-vAjE#5a?(!R}QRE&_ZV&!_tsdN)`?gd>k^5XnVk5?|@@Tpr zle`Ym3~Qq*1);x-Q$y?S4`L$Mzbe)LA&_+TG?mBu4#2Wrf%f&ShoSVi`rvIQ2-$c9 zo6iB3y@^@Q7mJUQDZg@=_Ogo) ziB!KvNY&Ptwn^_BFsV`^Cb-0w+V?Gzkc}>N=IwV-ay`0h-*t8P!uLHx=PzoOH;OEzuogZQeQ*Qt+X*moP%OP99hI+*Qek=f|c!AK^O5m>qSk zBVqYpIvMP!{#hPG)i;kukUo3AJRFmoHQ`G(T~0a3Z~M1bxa{KqIaqe)8I~wqr-+H# z%jA$>hgKL=PjpQjV^ju(KM9fn(rcgHC+EhQxdl3a>mb@$jvgaVT+QU*29gp z1a>-D!w}yFiU5#;mPv^k04K4Ez*RcwS^d+ys&LB#7k{Tc$k*9<-kDKp-{%B-wewkx zI_CxtFKzKU502CaR$Wr@XwFB(rd6twuZh@;=>mQrsqI^RPhIF*>6N|N^)BiY*)roR zfD(X6u7{g>iXwm^J^3gL>0tl+RGssVS)C;K25!qr)EWe-SWGeqtqb5XrR%;=U}! z&AAGxUzL}~)&1|jKWdG)igFFI3q+t-7>@2~zWje|*5`XTKlr^HQHp!MhU76^1S(>e zg$po+S~pYcuaj^Jwc36?L+v(%$`~A2Nn6OXhMnKk05%0y({HP<(?E(O9J8%m)C5c_ zVpc`XK}nb$u{!-cB5%H`eM#o~N|#(6(pS==~Srn3h?Z<$*BEqj9N)Wy&EEIb6IR8A0F zFoMz;@~nBMjjs=bDZ~mZebt>G!N`(uk?wvBNT%yxa;L_XS>thJQms|o=2-p&iu5Tm zn!e8PB%J9(SkDSX$TCQ2z{==`+N1Ea>v|0ohqOmJ)4^mG=^Ni{A>Ap z-nG6mt)sr+GIf4Q3r_PQoWhj;S_A$|m?T_7k8e{g<7He#w1TznRVcFDRRw;n|8bjf z_jM@6Fp4NYcS+;p8`!*d19v;h0<0*@&iL2Y(h`7bXL@^;>}`z-iPwYqK8Z0MThfv+aELwyXKG{xL0#q(6sK)dZ9(&nAK@fv$C3pMVm#!ZmLA#GVjUg|CmX(k>XY#>ghEtg-#Yf;@chnSN!*3s ztG*);alJqo?BJZN=~~{lT?nS|1JN{Z#sOt!tl-CZ1AbO95R5MtqKsu95#tM3T*%S6%HRrGQ&#jvE(KH$&BD5goLk~jxSnu2oFPNQ=X1iNQqY*(|cpVuc)!9c3BoR7&{ zXU1AURQ>h>Tq=JDdyGg9GzygR?S9!_-iva*v%E4dcAbVhT%*|lm<;Th*-@UOgNWpq z9@acCR%1Gk(WlzyN59X1jbD`Im75k`d7e%ImAKlTE?NaB$KCSz|@@&mM7o3n{tpRnpLVbk7p2Gxu;eA zvw6Ji^Ua*lKW>4q>5I=}lKFK_3wQyJ{3vSG#a~PcYGSIwGQ0$)y)%Y(k+V^%V{3`( z)&6In#uNb}cn#Gpy;NkAe}3!V8#bS$FyO0I*AB8@(-Z<*If(d8P@-r2oKwR=*O8lF zZhx{+v5rDr*s6EOf1LflkWq`(9D*y=q|bCet?f`8A#^J zM)F}E zI8(mG-vKEJkGGtl^gSxgY^)rge*opQ4X*}p3PYYR=Lf37Q|TAZaXf;&={JJi>yk^$4}B`-73{$i5#(Eb zEpm1e6#2%7w!VT-#>u=bonw`cQ~FyD{>F^dHv624ORH^;-l7GUW6fz;8Qd~4>OH3e zDYF=jEmqeuOvJ|myRnS?JPwa1LFU^13as*b1#r!W-?|djFw$3QzL3w6efQ$6k2o{viN)s=(!lwhJW!(mYsqAQCn6LyKpdfJ1e=ZF|7vGBYRvKca79*5b{&2)!O?y zNSQ}AP!wEtqe(4=*m|mw8cjCh<>lWlR3qFJu&M2a83NLbd`VjD8G+MgY7W+mH|K5} zOU)lw2JCK!UR>2by1v=uu7>5RbK|)NQdjcuZ0jg(RJ#`WKh=6OM~-3B(Pw977qaZ7 zT;u5Uy1%LsWHL~;C(9zcEf4X+UbG5ku&wEo%77+}fAurQ2AA@)?2K!FY4va#NC#m)IT|EveB4tM&x~_C&vHs7PtkcCb#de zZ^B!FwCWAU+)iuV~?Spi;pO6t9uyPDILGHnsT6(j2hF zHCD%!?KE`U-QQJ1XjAV==N{}*=Wq5lMYknbss)Z@xzm&#>S0L1YUWpU8vP>(Md*+BdJNVy zE;m>{o}avMsMX37aB5|KfsXQI|MNgwba<-An=IVWD&VN`j03jt?W#~pZPET2zsS-n zH*eWbXwO3Fj$HuOvU{qf=kRKMXPv+EyzA75_0sB|FCbE9v$ems2GAE#8TS_dcD#jF zlgLX+WXr4+MN&B_@%G*M7d>xR(fh=IX0p(?!?HKjF>`fF| zSRrrQY7XWOk*#WrdpkbrZIrihmXnQlfaKq)cx7^X7m-G}X8U~Go%^4;@>30>pSV^k6}n+PQv{U0 zE)*#NeAfT^3%0D!p`_bR0&8x9U*M!WQD@nH={ho@j0@kblDh z(bieN=^wkadFzHXc8I(|v0&$J3$-eG#1 zU5E-cjm(@U0391*r#jE}baSw;HdJkY4&BM%Ze6#u3`~ZvYc0a={^|FpqrdIV?+ati zrn(dbVp3(zqjDr(pF{gN7xxT5 z%YZWq)hBA(AoCzf7S^m@;Z2AaTg`z83NHb|Y zhER@jm)=XyfTUj^3H%^{RNJIe04w4JO#_$YM(Q)=DNvTDW@e)8J)P%ww&l@h@*@!s zww*Q42A2cq%s8#(Ik@uj)LXa8&9A8zh%T?S`1L}5_q^rqvUj|Qh(Lxbra)QvUdlbS zDEQXBRi2j-H8Iw(^QvpPvvO>8AASu^wQxsh+kali9;fe-kOi zsR;}B2VIlTxbcd~0!S0CjY5cb5UD`>)@ANpI09tq-X6d1_i|4;d2Fv{`F@{B87(F9 z0YWDE6i?K0oLfH3BjjM46`_9w%k<)-GHCk$7?h`YYJCzwFYw@}K;FMH!LP{Rvpn3E z@`&qmFlFHQTcghxh;Hgzs;4jeWb8H^9d9w>zQSbySUY5q1CoV_UHfJ48-TpFRj>Ni zb&|hs@pm<+eTR_u7F9!GzXy}dPSf8Hxw|RrR_Uk}h*a3{zggcjghQTQVl0+T*M5sb zF^1!-ms2NPdh=!F6@Iwu3>_=R4JKvH?K<5NeG(KIjw)1&o>|kGf{wzZU2LvNYX`usjybXv(`wo7t6l3FiW- z`E>~zavnnVM!7+^EjQ=Gq_g%?%7e>vyV@sJLy-acLS#lv`b~YUH1GL~ut|1M=v?FQ z#fX&J21_k!0G7*cv2CzVCa=uAt=6tZ$+~H(#U>dG)GVf}B;#Px7e}DvGl_`2 z^a$N&Td+;a71)&u(Le7{;4|$L4!S zo|}@u(vLUiN8%V4UZhDA zoA<^8upDeJ@0$l*cOCSPSv`NKXSA)IwFGqllQF-Hv+>q@xaWkDEY}iq!y`D^vI@}l z(LV;JSxmLVjvvp@ySyg{zizHV$`gcn%RE?c}}Og!Ft*&F|?v{a!$oXv+5bqU&U^^P_04 zcrRhSsE=&*GF*d0j>LWy%CK!jqD>QF^?=vVDM1TkLhi3) z-MO`kk5PH;Fy%w>K0$OQ+NZokG<`|a96rUTjCLX#yIL)MhD;uJImw86k<^;cR_p&? zAk+X$ZP}H+gfzk#dtplQUx6z0mERb+zRr_M=jgl|DZfGFeY++cRUePMZ;>g|@C5xp zcE1Z#p*Ly`(@VbZA53W0meC(_TiIdDVa0<#V!V7u+0zdB!%{I8A1!_UP&j?qX*cu? zFk-V2ARldqqmq$2DYU*mj_98e1BQ+PlB*4>n?-!lMve-hfCI1xjz-itmJN!>^gkQ< zDu&*%h;+7E>|hMgB#!HE8mdKTdIp00bx=VkjEyJ2$*G5G$0%JRx9pr>{C%^Rp9FiY zeZi&BpA3;g^ZGx8Rx6YOu z!dZdS6Q)ENS`Mb`mLJ5d*uJ$Sb2c{Z(D9kIwdov0iAP9%;yf3MG(5pnT+%oXmpULq z+B7Q#hB(&KT3fAO5KQKNW=psbmS(jRHF8n^JmXV40QX`{H<-5YaHW?F;8N~^+8j3s z64I~EsBImqffS`3+*={Gs2gjrUEAn339ZAVaO$KQ-1`2O@|UQrZ^Yy^KAGtnV3!jy znRp+%WeDfhwL65e(ce%nO&8sa%WF1RG1fe;L@4jD6Ookns{WC&@*Hgvy1IX4#-bKM zUz10i17DrwS}2|HQjr0h&l`gUGUc;+Hc?Jb02 z`kU7dD##Ql)4suL4mT=HBf8#Jt9<3iHk*6YW1U>)p}C}CCT}L4c`(v4&UV3@cH>BE z<%;1|bPga{Zb9tiYk$uy9qoI4pNKhWXSYWKm+jZ97Z(4%4jXTq1gkb~^izhugZKRZ zRNW7VpGBH4dJz#3S42GG6%kKP){WV0Mnpv1Opcv&l1}V&db```=44&_Z@QCCcTfMz zpHazXGFgx7xE_zk>ttO=Jvpw5BUvYt$z(GbuN}Q!uUDdoctu1+L`1wIA|fL9^VIWp z#SKY#pYN~UdaIs#s_Lnyo_Z=wBzrSMOE%s-<;dHN} z<3#2?AgwhPZo>=|C`{~bYoUsGPW~3Z@cS#dElxB&fW*Pk!JkYy*sN|30vdMgv!rl5 zgoy%{9B?IG@gjS;=3)t(dbNLpT z94;sA4`l+HUbFl1JOBe{!)$Qavl6j_63-)#WqfpomTR60h*Q_vJF^I_ayN)xGBO%wF<3Ym;4yAQE5{WU0s3PYo@zDc0B&t>CGr<4gmYC~aj z8;PLeWnKCmJWFrj8g0(Qd=Je2v36An(32*hGmgx+oC#0H^b1IScV*kEl{e8a07S;R zF%}wUqoT`{c$0?lbI=&yYJGNY{)DxI&RNhZ(2Z!$3{WjGF-U6}Mq z3I_E8LZ*WVD8Q0{Gqv2tEt>VM85SsRCXny6?W3b=RC?M+nXG{Kst-^Cws&j}R5?!JKV^)wp+yVGO zu7_N7d>@Shs$#Zp!kMkBrkNU??%_o2-X}^0)OS{Ob=&svoNc{I*kQUv{#H(|ZSvfd z@P9?QnJ#MTiE=dPw$ro&02B!{Mq0O&P&<7{jG8G1#DSW2Me!aSLd(5u0ffzl5&=n8 zcAoBSTY=fVdt)3MMW&4N#RTDwc?8bn&+G$aCHjkkuI~FQ6!%9IrR{D|e#<3*;fW~OJupqTJZ^@&*K?F?U_9=4y$_XK zaClN@zrWJ1f1(sWfKG9VeUPHNc_jfat01UeD3Srp+w#mw8B zU}8Xh1ePptt#i?0AB8B~PSMqkAdjIm$@anc#uNF|+`*y%9Yi$)nHaNa6?hUBViXQA z|Hi11oL?|O~K)8P;}%jcEhIB5MmB-ter zpK0iDP2sF-xis=fO+j+h;V4>-n#%=_)n8jW*UImBrR`jiq7!HhwK9%!y^wG6p^s_$ z#hP;EOrM52iB74{U8Q1*ez}6-Y&DSUoL4fRsp)c)*{i4&qBSFz5|LzDi#WQ z1QL5XC&vf0!+WUgX5G2YeLqv1(RK&>KR{ytwosvC1@a+UWm-k&2#=89;71_sI$~Fb zSpp$iOF!&h2ENoo{sR*F4AR3=kjOd}pk#uSkNi9p4w!w4r03 zIzm~1;KL3l7vyzi{e4o6!_K@2lJ&74JW|#Oi$SvAG8%_Jmmspl&IRg>ODo;OkBIhA z-|upooZkoi0fYtRW*{TywaDaZS^gh-V;}tirrxbY~F4ON02sTihfUA zFhp+vsC!>ImDP^OV%$U_q%zc(%N2{A`SLiNJC7mufEqhtbR_ovH+~t` zkBym?`0PK4_?x#TNlc1Pxe1vc9x4^wj7|}FV;?)I-gg@W1J|~!pf%KAZ?T~g)#`&7 zjc!}S=-@H4>MPen4ONEf0F`Qu16Zx(X z4q=@R_3t2rB=yH_7EgNCl!IG#?KW=u{8NA&D7j3=W=6(yeFm&)C}UGGhCGW@$l_rY zS8_$4d=8du3MVx;&d1Do2$&^bKf~c}J3Rv8iyO2kQFj*LFd!R58aXkFLZHZB0P^!~Y|n8O-j@)SSi2>BRcR8)tf!sq@MeX5 zU0<>}-$En2B7Ym1pn`=Fd;vPS9!3W{0a|_`EQYPvwWmp-vY1nYTjq}G z7mFatoHh2L%ILZnl+0Th*@t z=0~X3aV0nX0)&IYE;PE~b%3m>W9IizFwj|<2u3qfW{w+b8j-V9cv($D;r*?Sz-?hU z3@f+A=Io80ljAM)*%e8x?dS<-%hpVH)m01W7oM{@4Ii=T-3IXSxUG=YDmA)wHPMhA z2u;(TiNYsuY+$9>RNA-$mYsQhi;r|xa!VXu>Ov}8iTK&$S^FD}wado*LC8T`h390% zEtb}{HqcqPzh*-4-OYe(#O__&OEl!$Ao7Zs&J+uTBG@z@i>SSQ_4S)|cdrisvN3LU zKET*#QzKs?`Q0I7!^kWFuLy5!t@NpXjtkO8(b+Vbia&H*cRvPGwU%4gvPIPNo#iHy zNwkjB;-~5Q({NcM&!V$(ta;(ox3eag=$%0Ct_gTC#KDX^Y64f`;`m+YMpygis;m8$ zVgb)dkOS`NgDrb&ep~6%I>0`_{CKF_r*&mNFj>qzcp^@7++E)raI-gw z(_#nEO-F8A;2+6XSfPGP*|hR|6oBiS8BHGZm~}L*hc{phM^6CBi6;D6blihTjRKuAv%=!J_=P+JYJ}i7Ap{Yr^I4ewW&ei~e&0 zmUW>GnNnUrt7v!W=r1CCRLEa83C}92GU2FpBAD`WeRrD)hZd1Zw3~1Vd{n&kDyZI= z6c>hgk;s&Q@2~kSn{nm1VbiuoecyE|X@fWg!liL%;~o#6V0r}vPnO@nD&aAOn4eFH z|7|p~uK~_{=@5gaymtUuM|Lkg_}xlR&4fwxJ#@?hG&)6oAEnZ(gJU1mloUZJKSU?z z%XTgmg(d-OI%zt_d|Y3Os~G7Nr|G#PSpF&6=i<&1_4%y+st5bz=jbLox`Fb10Z2hP z^*|gGrA}0&YcLd88U=(1uQyDz@McYyfUki>*jXN8N}oWg*I>k#`qp#SgO4tCE`I+G zX2yzzy1zCGOUL~l+I;=N^fNi*_w(#Eow$oeLa%l0@A1|*79W+ zUFW_Ynl%(gARJE(DL2%2BPvJOv6n%#*N$=#?Q(=Kb=gq}fw~ciUn^UBXczfBxrOkx zW-`M>d+g4wMymYwnI;rwTmAW#5dU?^iKXL4STbG|U_8AQji<(^aICOk-vr37cb8k86=-vXbWeMb zvbhbUK$J?v?TAJ+O5n(tZTe1Qac;28Km)M+wwczqhblca6Z9}TqQzK2~2Dw-&~!2|zRbhv7WJo)lBdBs{AXCx^@Dr$Jfl@WGGxrc4^JMlzU7 z$xAu?wzIy>G)g$}WH(4%O7Qj=6Yr>*xtP6w_gJ`A-<4@JyHHTx*;CVq2IB^om|6G2 zNRVl|91`7!Oo=}2>*vk=nMzJ=a|bCONR7-HfZkDm+7W&W_aX@g$FDf|d|&-h+-N#S zC0CDIz&wCVeh=&@VyHM(P??6D-*xPWd#C7(c&DIV>rbr7K*<5T5k%O~?EhaMH@DTLw- zZZWYv2On>wjtQHl92`jm+xtW__jAC!=4zRbS!J)9l~A4NLKunETC3$7mH52y>vf-UURa zqcPsUhlsYe@`^F`{Upl}r<5Ncv+sA5#qWptvudpGzllnLY`$e9Pu3p;vQ`e6~*qZRnKcsQkIGT0GFGT`~ zwL)ISF3lyVY}?gq+gH&e(AiB&zrGBaY-{#Ui<)(LzO>%S#oo|0u0Ut|(4@!4ca;9T z0O&hn?Su(%Au74c6ACve6%Yk2XXh7rCX5`daCI>vxw12tv`YZZz`nPrUIttm1yso^ zC$2`%_oc06ytoF^G*8$GkB6jP3(KxfOQ3S#*mWS^TTg3Xn^2wWGbbAM{8g$qK$@X_ zW_CtoAIl~sT?SUgII*vrE=Oh7tVC&sx-oOop4;Y*%!b>`3P=+;@-a%u-xXQ=z5nCD z_V1}1e#eQCa^`9^A{%F*oXTiRTIdFjp$3a}Xe@N`F*U<;He5>#t{wGN(XZ916Wz#i zMC&XQW*00u*4WWk!~^5k{^mfq*xu0|Renl2vsp`{J^C5tRf zL%Eb`7$Wn*SSoGxj0bViUG%0=WR`Es+|wMevhW!F)3blHZ2# zEt&Rg5@?t8tjDaFU{;WYL*{q90J@{SO+6*_!-%=|4Hs9i+ z+45=dtpk}MHak`oOVdYcswOl^$i<@-+-^gu@pO0$m|aa>jMPyAtbPKVZMi98%SZ1! z2*MEd1H_Z{wGsCiaws#OD#%mdY@uzVgOTo;nh%+`7p->Bf-sBPRjzw3)9`aJjkVfg zuF??>p-IJ9i173IJP`W)Vb7WW9pm+yz^I?m(NBT0?b#yqp#CtTyjjx? z?sQT<8u~3zGx)F-jg&x+Z^N{nvXS<8>N~g0N6vir^gFvx{_>@N3akBlnXIty4YJ<1@5!Geao%=ZL2H)3g5zKyqZPcelB+oYVWVrWzP81Ls#2=)e!0{u+?X9|_;+ zI7j&nh{UapYtg=~=~wc9)tKJ_vQV?dqVhdjp{9z6pYd<=+@{WRV~g$HGhu3I=!${q zEL2L}v}+tk$nn{b%+RrSDbA_14W55w=|JZqfvFOJc+}t zd8xw%0AJf^JuGZ_Y)xJWNqI0u#9lHMTm;J!bhLE0P*d_P!q*p@PReu%L?s3Hl>AbJ z3dI#JsLN0qz3iRf-Q~#4zguv)you;`<-4g!;+2SI+~tlC z9cK|l-fT=|Bv_o}st8K#Q%jIp?5<)3T8a)v&zAd>uSUt*RcD5uhb(XuasUx6Zm)^>{ zfyiDl8mq?@$gJh5NA(HAcq=69(O*u-twuM)#drw|+6Ksx#}Us_W}Y%Mt*gmK9DvbX z+5yRWcWnxW5+hAV#du{jhch~1S%Pq}jajU#W;K*|yZ6-BLbRN=B?BAlYvirOnYovmcb@^I4y~8?8EjcISVglz46g!9nZM!7%XL3sat0!cw_i#(4aHOjg$A|1=;JMX-TqTxA&X>DhZjTMAEc{4@m ztQSzp>0agodAxWUFz;!rlL?4((oB;t1G4f4F>kz5X}43#@>O)RhWkZX>y%@ruhmpM z*GS##XzfrOR$_FN(W!)j<0aOLTD%Fu#GbV@i<&`C*;RfU-HdZ_Sm>SlbC`h`JrCc^W4s=5xx0Sh);G8j@p&`tp3akq+DtJIV3sC(1U|tpc~a9Od4!G zKJq2dM=~TQJ4k(ppOCru>q?(`Jodo8L1)93TVZGxfGm|eX^SuFcQr{zFMl6i8Wrdq z=IhD?a(q4${d$+t<4geI8QEU8GtWXP_sG;LH;5?W+2|ZM+PJC~={YBpXwz5?(Js)b zUBt9#teV#@;Hq5gA0nwxopp)xf!PIHwde(zL{=1&p<)}NU(_UbGSOXx$~yKr{2Q#i z7?Mr%(>pcjB}fg?(y?Y0#R8Em>d2+bW%(;@klbI6$h^XyA3foU{Ml(N)9ivISP@LJ z3lU8(%Jtxt71%Cdiso4a$lL~Odxavi7?J}PMih;@=m<-|Ih2IcMqUNG6qfR_VmxH~ zYP3rAi!UBAldnPQBtPMDd2LPEQZ^y3LpMWPxLU;F>HV|}Xnvz5l&Z2AxuIewe=$1u z_}Dg6*s_|Nohe0Lj?PLrk5HBxH$obVatvd6rpBsr1sp>d{lcWBDO&+qkR8KT3Qf?9 z)lhwH?q7VY&9kOy*Dskn5NhZqrgDnD9hn~vubiT2WCFs#50lLUnKf!>=&|t7LR~N! zQr{%#L1YVwPZ#I9HUcusSi%QaZ-r#9b5zFx|DyG5s>vpL8RtpxW=INoIkOs*%x$2o zTyc61$042m_KK=-`pZE3YKC2Icq!UO&2Xc5qJwck^bjZ;d`CGRH;mR1dI*XvwxTeH zv7{7n6q!{MPBvmXj{&lTVZflg(l|iLpYAwDRwQi_Mr^= zD!LPvEYf?+eyW7Kfmzs1Wze_-o#j!ysNh`y<=yRW(=b8oslSVU7vT^00@RMG8Jf_( z`g4cf4(+%fkS#U8fV~@%U+iXrq=3{{ZW5i?C+-Evaqib{dZc;O42(C$ndbh}$WH2( zN<07|i6?)dH$UiEdBe~fs?I}5awNPz-ad>-4wS`Pf3cPw05;41Sn`|B`Boj(+X((6 zev`3#N?AcNC)N(@u*Whd>BD;uPZY93>B4`hmlXb;i0F94FATua6y z@Tojs1UB-k%M}?v2~vVpU4PBI;W`W<_%e z`7TVMH|OtjGe=b)lAqm=w}^A{ZK zyvgnhO0iqG_r?JGe%p^O~mJg+FdIKtjJd8T}cI>Gw1Ex$+r7eBZBC$MQ zjBTlI1`HI&>KdADMH1uXGL6>LZ^s^r9vlm`)ljS~H>rlB2V2?df zKG;NKX!lF%Uu!qSwJ)wzh-T}^+@`k8{UJ`$btWmdmF;Wk6v*Vl8wX-;=s^>emFp=K z`PkD+lknJFx)z&>CH7fS2{!p5+m&ZoS?6ztsH|v*RtC2rlBaH9NsHR;pzJPFI|>K; z0BWm1yF(8eKxTm=h&aswEkEOo$`5B+U8mUgw<7cFYs0WdXhtj2y(4dx8G|%MFdLUI zPJr?g@BGXEyN{`&(u0FDX5VRaHgt`1M|%L~x6CZXV{<1Y+a>&zcB8Ty6X6bi2U4+n z-A6=UKoC1>H)SDz2tyP_0g)xSDOCk@ex${6BZi~>HAVkaY_;5t&gxj|3n8rE+yk0- z#S$~kDX1n8^}T*jkC?WJ<rJlp0j?f z2BL>hIXG={)4d*}a1UqdmbkL$K&D>V-qN*-l8_W7D?BV7MP)VH%S!jLN=N*0N&-5o z-5OVu$@w58X9Jz;P>C11H?}~Zgez<;Ij9SW6s0)*;XBXNd^#K5_SsC&Rop)Ab4dtd zE$yM^7dBPfB1t$OpfraOc)++zIq;k%ccrZEeiWT0?e29n+%R%1UloAd&&LsjfVG`_ z1ML8*GyU{G(F^K}DpaC5zX-uzCx~M|@+2xhaN`O|T_E-zN>sje-V1@tSd#L;nlXWcK-v_Aq-!7&Gc=XNUK5Jt55Q&3p9DB2b zh(Y zYutON$X^1YONu3F&PNpP7e;-JYDRI^I2jgA_Zw*TwV2pvL*l;$C70QJY`Mim^&M<} zUWk2NElLRh;#KjsJ>$fDt>V@t|N5`5>`kqxHf@1_T&PHZQC>Z66;E;fGpo%_e z`^m(QP%+LXKMPqh_a~s@?IXpCc7DyIm{enaU_iM5>Z=p69=p(U%JD>8aH$p-K^jR{ zGui-Jxnw#TY?+Di9igmTT5||*jNtEO5cw~>^Do&NJY#P$wo?XBnfJ~!fX^Sw2Bjrt-~Ho(9jB{BH3IH7W!tJ$Mm0#eerw=VRArJ02ZJR0n3 zRFxU@HRx=>?Q@S$_Ll>2*H(=0@lYP2zYdm-8mAd1CrY4<>ubW8sPWeR8Mp%~3;BRVPfleOp2vw4ep(fx(PT*Ig^IH}XK1Vlz zgkY4*c-A43T|!HRXQ3?(P`08IFLZ$GstXat-K?2#I5=BDmD;-5cyW1b^c8rH+#$~xv(Sna9gG&R#sTDx*eUPnOQYl z6Vs&Gms$MiE((E1Vzo4)vk!UBR$0wO!x@HgqB#uACM~<`rQTa%;w;Q0I;Yhh1)I_r4V&BWXu7`Z>cBjxJzF8`%e2$) z1m!pBX|^?J2#{=Ux2a*p9cUFh`S)g#yF6zd-uOlEJeJOTDps}#*zqY0F!i$5jUw+u zH3C#z7xts&xMo!cMZt4O#@s<-a8FI>q%wxwi%zj<3EkzsOuJQJZTbMB=^++_B-4i; zfMsRe+*vyNgEbXXX}QajiU7;D4CAWfRcwiJf4C+UGr)>#w+s)!ni!4GJDD0$seYCw z(ntKF+$LAa9z`f;ai_(R$B+bTC(ZJSOvadIVRjIaQr0KPsro(%$nr)cu8^~*Y7%CT z&=jA^3?hmf;{rm)vn)r?B~HO4T89$9rIm`1xUFI3I*h2|6Cu=T3Lv7%XC&yx2z(US zG=67l6w4qZ`-}JZ97M*5&8Pisb?% zjxD?llkLW)f4+jm(y47He@=ZsV2uIP4W?yCeGQV8>6vl$XUP8RAcE8z7nq##oOQ69 zMkRU^orN%D&;8nRk>UU=Ux-oaZA9kBI%Ul+k9R=w+Z}_)yPh>xNV)0D2>Ko%nJ^j` zdB2}z-*~Rm92%3A-;EG#F+i0{u)86BE-_C z`Le*$n({M5etlDDaGxVnl$=V7b?z4cJ#Ox4o4<4j6lQ4NbVK_pe;p9rhuD3M%7S!H z^QnM*1JQVGhU#xU`cTWtp0#mh?RUszD?cb~`5v7gcJ*E8q-Xr`2Ni#a@?I{fKNG4r z&EDTxsQC}3Sf35a>g*1c=^SKZRfz?r7-M*DL_Zhq>wFqg=RFUNVY@=VI6r?Qg)7RX zIv1duu_;b1{A>7BTnNh#n|7z;^!G)O{P2oc%wJp+gxMx^&r2XF78_X<$~xdukjlhl zAcxCP$&uOdFw0zyZswvW`ybPh^osgUT+Fv>2@n;T4BkQq#v`F`N;XLbBM_OB7a(dNnAUYKSj^l3oKKjrK5QaV;XNF?S^T z?{%5F%U~K#g*GbyIE9tPk@kj6AKoR!gL4@=i)=6}2Y{Dn4y%~vdTcK;DC8^nwq_)T z^A+f9p+?`+!mTx%iJ3u$ecA&ghv(+{y^)Ct0^SDJ^jkyATjx1z7He~rY0n?ak*p5S z(Gz}c^N}3^r54c-%HBl2smD926?-^6`FluF&n;Ajd|80`Fo@oYY6^Uy3yNv*pP3P$r)Z z|EOWlnOS>Wct<+6p3Wf7O1VOd_5e$!bmirDV}KNi78e+ED5E^o_sq-|Uop^4*7x{D z#i%+BASQP6@n;r+n_*ieQae$`i49o3+oRlLHZ))rr>{FeS%OaC?$q-x!2FV5U!v?a z6V4t#$Tb2#$!c%D7uOJ`EodJgD?IC1cNB6zBsHpVjxy^6cy|RGAEyBC0cNe&vm>)^ zQzJmx=U2!2`aWb9lVfw(((eaoyzX%Lj%I%#X)<9;cP|S=z8~hcIBoe5vQY~DXP)Q{ z%W*9GAI^kv1TR|YK$7x#>7UqZ9zo`Jck_c%??(YC2HhOR=YAEP_AywFgyWN2dR<=7 zaO{c9%gM{KUO0$M4hbkqBc)56MvB>wE>RI0aM2&0f%u@K@nd4Hr%Uy-utuI^4@KCc zH~uU2B2y}Q%;4#le+ZTxbI^L4_f^g<&x2F+sS_mTFk0IR*BV>XM^IUx-v02(3Z*rg z>1d|%fn5aU7%E$+grqx;%r;CN?w(GsKZnaSR-#nciFrMNUjLiU1H*&gk<3a zH^APP^A+;yuKMUTuK+S%wxa%R#W4SB%_g=e(42aPl++c zHQvxEpIiz+TB7gSjk-j zD_v_mq8eO_^6CG$;3_to>ktaoIyltJibZ0s2UOR2!AX^;DD5#K-{3cSn_A|co}YY{ zWd^=KMZ?ql%K=##?u+FroEst9+om?H#dGq!y*Poi*3_&f)4{6MkZ7c1ISsZUmG+JM zf-URvx0&r)+o#%*^V|=<>2e&`SUlh!QqyTJplkrk+qinNs{$@82$$}j`cgOB zkBuezONB5pXR}*DF&+<&g&AuTD(mVlEE{H27fAM>5}M|=N=`-~#oLitdj-=I`p`v*pQu}m8Uk->&ipOhVe~AT*6w^)8D?W4cSk1O zxPiXnF)QoL9**xp`{dT*Yq&RGD`paYko%H!JmjUlDz+bq4H5sP@xH$sk~N!|oQg<` zF$&+4$$Xn0!%|K;fn?Ds?$Lzz)g;?Tc9x<0{)&{PAsg}okSyp>IqUZzT0w{1fJ#3> z;LX%{^6g7Gf-vQ6r)l|b+ANHF^GWp85$jj#+MP_?aBg>NSF^CeLcD8?_Ch2zk zG_;n3nMAc#Q>*3jBp{o;JEGe@g;a*&xq~bko|7-*5k@(UFV6zALUYf>Qsp_cf|kHM zu|*gwvqLa>mfQU_@$<-J+(2)MEqEB955))~(_+6l0`lFz`SAi8hv)pnigE$svHJea zc}?8$d|xG2h5{XHpc56^Dr!r28&$(|w)Cn0mL`sOGjVVrY(?)Oajf3D@;+Kw zh6TLy`493}iF=9`{UKW6*)HPfO;N5s0x3JM(jvIH+7 z&Oe1G8$(ZM9NIqv#t7h62-*fBOW!xRt=P`K$e;U5klHU1K23Y&>K&V#UjY=}hSxT4 ze_h|^`BHAz{00)D68Btx>p7*jt5=8m4w+2lNE&<9-vg5K#cE*5N6+{_=HKa?Gd!G$ zOlhN_YudBW*|aS}s+WMcXM-|Z{vyQ+QUS-h<22y802LlFdD%Y6Un7FFu{?APKr`b$ z7!H5hFi!Iu;V$rtx?*#*^My5?6poga|4VaRlv&2kLHfF048V4S!@NJ0DeMv;&TNij z+?RSzML5k|zDG;B44Q4R%Eiq^>wskSMR>sPD=OVG7OpM}&{_9jzvfv8!1C}ki)r&p zq?+gPf<=fFhRM07!y_fwu{d+Eqh7-Pv55e(5MclhzpJH?ECfrHZ(NPeX5A8t;cJl1 zT>OBq#);%&>9`giT(`YQ+o)o*{hKfxPT+8)!s6m?nSy;Pr6osjjb35A9RtX6>5th>JZ8~`cz@b8r-6Xw1y}lmgyl$m7OA6 zD=3wTFoJCnONi7y2U7karkg_^8Hyk27iw7Sil%-62^ zi@#Vz?Lg!=Dcc6TSMtaIxOC}Kx&|Wqz%v*nah`j?{x>4LgaxNkV44xz+ zIl0sfZ_mplEn;3$MG%T=n_W5RAISUw7shh9=fX_gTaG+jl&NE`j|IrZNd2s%ovM+) z@Z7cSN83Ou*2J)*5)rxTGIW+CV!4EkKDy22P?Ai`JXEI5zYBB56`50;P&YAJFFu*CwudZpf_r@J7*Cg#&S59Wvq9el+rSCcfdFz}Uk8_VIeLcXaz}JMf z;W>+}w!e#dF3aSKyh3bMJ!m<=SYcrm9r{L87N?`RT7uRA^SRdXm}goMnWG?tGdsMp zI)9Es19n}KFl$P?E(t!z$?^a?yV87wg4i9RNx*Sxi(pig2!yh}QKGj-huxZh`fcNCB%4t{ zeE)cfh&_hNp{6@D#|dOIxfSd8q=*OgrGkL@D^%DxYj!`#&N@onDP zUx6O9@84a4+1}#5bq^pL#wy5NY{iZDUZ7&^>Wyv9`!a=br`%h=9Vwc?l@Lh}Gh zo#&pO7%wM|9?V}Atz#t>0+l@MSZ#QC7%6izFabP}Z%P>sKQst*iaKM<)BQ|-bcjq7 zn_Wc%c&xsoC;I&fbkQEzC*_a1#Vn_SC=~hQ$oAY3`Yy@110akI-y@UjF{H+l@{GB8uFp<} zrSwd++=lcej5`aJTq$?a3H51dA$Ut z*Y!u1mwF~4D~qqyWr&or?K5%w=5llvI}B{5lq&#PWeR@=v0IR7%GK;eo)*?Lu_;|{ zi@vhvGsQttFIWWdA&XHKU!_=##@Of#deD-ZynV35aajsT+3pVmNNzq}4OCIv2gQne zO=iL78@sO8=8xoJM>O1Z$gGHo>ZO08Q)KF$afbx00+sb%&zwuA$hWD#pO>}2;y82cMFYF?1y%}-?5Qto4=!v(YWJJN^m?o3 zT6r( zghlF)xv#H$YqTsq#G@@r)pM znqfY#)5$ME;Td|6hrVJUbB8ErQ{Mt&cz z?`t5^4#ylpHr+0E6^keyg=J+~nPMj-gzXs6=MA$4fSCA?0~*0% zlb*3)Isr-cuPEV!UO;EwjlQJNFT%7avt7v8Nt6a!bkzdtMgC$f5z6!{sI2XzYbl86 zs{q~7!pEYjEM`l@#@E2UId^KEU=>Z;orD{3YVqR}ak zWSq|#Mtp(DGFiY02CNWYg7PD2F8r#}4w>pcU)NVw_PXsc^!0B*zIO8O)%aV_DF8g4 zFqe&@BE>(PkbDpEi8^GSadN&Jw00P4&#a`NqjUNSmm?b;bC44l)bNI;;ns zn`u`Re~a@FIUW)I2u)lh^n7R*u)PVxcL9X-^^VQ`i)OtLl`=LoGqw$vFRCwu=WJ-6 z7ejRMzr2!h!XqJY1;Jpf4K7VmgRP1?!Y-??n$5T)Q6C^!)e#4@uBeI2g`f+Na$Rps zTj)`h-?;mYdl?`ynVo}jJa!SXsa2S}oc4(bJd2?iL2pdyU4qV9QYeb&=Tb=4t9xWL z28OEvJ~B|QBDe;Tx!NfxcDU%?*FsY~%Ej7oukM(Tn`oSYfWHDhbODZqcZz9HUR4ay!yas)gm zBTyT#`DW~1)ArUuRFqjTB4GTURU(1XC{CA98UQIZ{f?W_8qo9AjF5;5)&(LHapKp& z-GlPw;-wy;1b!RaQ5)-9;oDcnj9WqK);ks3s+$lhT2s_% z&T&NGepqIF>YuGC%%Bfcq&+URBts8Yq)q&uxK-t$nv-#JF#UHPhNa~7lwkM=(Ag<> z_wEQw-6H@c4kI#41tLq_Wf~sgmM_I)36@~1aSWmu(4WZHPW~eHu@54AWTd#9;>(k0 zRdNn3A}^(R@u^H>z8x!me9xds=i0a<`B~2-V|1_tTYe7Jj2l)4vO5F_xsFkSNIZ{B z$)S;MH!<1ssIM@Ohmqt6AQ^Z|9J7+-C?pxNWiVW>kJS{D~ zg7X?GD@6*7kf7H=P16;3>r?35aWS`)i1|eeDITV8*0igO{q-$$@-UQAnY|6j;dbp0 zJX7AO$zqYnPX8_-ixE9ZEEpOED2uVG+=28yI$68kx^3>)lnO|*M|LE-l4-9v^L+@% z&@eU7ED%{DhgHI&{4qejy>tIE_uJ@9pP(~enu-HA_CP9PE#p#OngyhCDJ;{p3lP3< zwV3(BW0us`GY)^5sms6>+csvcuTCeKi2>?sG`6(xXb)GKW>xzQG>4d*V(|PHNmT!4 z;S#!qXVr+KcWKc19uWO!AWD43OQlVV2O+frm96eDGWX>e1kbA2sROa&5?7O+4a=H~ zlu_2g=hRf1pZF8R4B=HJp)^^RvblZ&CaRH<$%its-T!>D-ZE-Svr?V(@5?Bt%+*o<7Hrq^# zz?x=DsAh{jXVt^5S$vz8fC$Grzb^HdX47c5W<4DnH&~7PbmkZ_ zbjr!`>k_+jqp=P6l{*F$9PH`&X;Fi~)d9-Va8_~&-;gQs(RzQ+* zJM6#gnETDZZs}T~zO$Y$c(%@JK-2HSQN}=a?Y5gBlyp) z(5zNDRgTk}AS#eogvYk(WWX72i8Q%s=X(iH(qeaiVy@DL6LbZ#A>jNsHI{Df_ zrf{`wfEjf>(Yljruh9Xmu+$g8bKpNqW z+t{5ah^5D5%}KEgSkq`_bRJgKW)aEeJ_r9xr``!N2@KIqHOy|5@0NW&t3>n89gwWS znxdZFg-(vKU^bfsU9?kL~B+I{D15;7Zng6=+ zvY+)ZAbAp3K9v>M0Z@Ljm4P9zgM6gEC%D3pN<0ei*_oGYaXjWZKhi!En?_Hdvlvq| z95Sq=4?>Isn}0+%@u>D~CHU=Ai2PL9{M!?&pJ!k>()5pSr_nSX^3Q_vBhw=hrsFwu zw*RSL#Wn=|yiPNV-yzD~UHaCFG$p^cARoYC1=++9l4gu{MOu=RgMW$}nL&81W3 zs-pn1p_@*t!ZB1+sZnKDg5!`Ze8kb+QI@_ZYAQXs*dkwmsILK&qV*yoxozVkxBE_l zvcF9a&pkc&M2RHva;A%|GUfe;H-1iq0i_E>PFuxh`HPwL)tYrHpS0r5^%_VU zm6Noud(Of}^im;m3HK?OqOI-d?qLu?q-2aa(y1%G1;Fqc=J;(icEtH7zrK_2t|`Yi z-bEyDTrpF8tKQ3(@ve8v7B-0Y(JG!>`k?{QT0ru9Y46-`cCdt?vJ-X3H2M)z(YfYR zR6J+VBU~v@Z&LpWOwr%?ud(L*G!t*=GLL@dIZ7EJb|@#PESMlz1}-`Zn~5$V1{5sYqS|Jq};;B44GzsXdN`^NC_Eh?GTl2>+pm%ysAfr;@lxqP4L zttw-G^^BJb8^Saftr+cgCMb)@gyHs2Dhec7(H4HEXV(OA?O|xH=RlNgM%r^d;?u~8 za-Qcod&o<_47KolG*M_al&)^@C6@IYq8L?{T; z#U+3&W^BX6vGPkHb)o?E2SYBa$%Y+v2k;X^Nv)k@J{|-a zo?T_ldvy{Mc0TDJnO0rwzLkmZT1e*D7J5%v-ChUF_L=F|KG$asoc82>L*@{2KkC*4 zc`kz}-|DruvV?eUawEs-=zn8kJ}CDRd6#7b<_iWY6{UD28JYH((^eyKs+D}VdCWmD zCe*M~g!yA#Civ+MR1J^Wj#R=}WOV=(VRx+fV<8x#(FqFy9~dzAb|I2qoxM)Xt3eNh zT$MwIvCQ3w&W1Ou&j;?h6-d~G@s5)in^1D3KFD!1A_w$HuVW@_Zv!VAJHqevc4SJg z0|@9obe4PzQwymW0QiDUp*WE`l*#Q0i2$cw!)S$yl^IrqCyCi$D=@{(8BcpjvK<9w zVFa}*8f0-m7|ZOi*wJg6=L9-isxbjkBPU_WnJ(d!Vi26J$d)Y;Er*Ix-|OL08297Z zRe@oKKzfMXh^!XtNfhXg=GPL8%OZ5=YUkd3sYF;08`OPh0=CPTa(_+hK*esr0~IJ+ z=QIpJ_R9|D2FL4NG8Truhu~TG>=_TEL#Fp~RM9jClEmHLZ<}r9S=*0Lye+j|%1Sq93?s_)M#u@X26}*L8?>3ecbIOyzED<%G+VT`S`4|0~ zvZ3nFz>+teaafvZ1kjWu2JGLPaQ_e`d9LE&$m33tCV<@`cxk~hd_0eu7o%Juu$5$r zkPT|Z5mbIqe3r0x9|bgBI;_1R$v>xkKx34PdK|B>@VMzy+Y{Q!7Ng=um22u1K3bun^E!AoJZ_fLrmF$k3}m#pX{qe`_ls zYig@#XZoqU4hzAbh*jFDB<3D}>6es^B)G#HK;J@OnL_;8RSYHhQ)f$eH!TCv3<{II zHgDgp^pNh%Hj=Ln50;}8@8_%GpiaF&C?N@-8eAL8ybm+&>X!EIRTKb4ccT^Mr5}F>ieVQoG6C zT|s?$`W2gXVN&@D(g>KYLByU<ZMmlQHg`>3y5Fu1#$1_Ft0^>-dz}jW|%42 zNFZ8#ZA-hYnoPR8T*+{K{&3X-o$3b9+3j0K$=os|Mv2Z44xP(U$yl9byD|PoK$f9p zw6B=yR@5XbOz`HO)(Z5G&OI%Y)d_G|wFex{)duhll?_|!I#g!KE3&K*+99gt5>SSL zj!Znvt`ho4CXSHAySA5OTV0SW|J1Azum_p#vC@$+_cQ~4Re7&KMi>GW%;?LI%lcI`lA6HkD-PVnK(QZ~CFa?e_O-h~$@8uv3-?rdtT%k0?nTEa z78Z>A>g(21LKOS?eo*u>rigG*djQ#R&DmZn=5ng-L1-41#yB5j^dX>(Z;s*PVb59R z*&H7Z0F>%4%AY@{lc1BAYSSA_)<+>(75$q(2XT1}lq1HB6%b>EQYzCEU@{gKA_9F7 zm7ij{4IAu}fTqZ?e2<+y3g%O%(F`h6HZz}rrhKg!oSi%3XnMLhJXR+wEWmfsjc34_^-_k-O+e5LrO?-^Ap16p$Y^EX5Jw-^K}ldI;Qz_Ma^+ zj(b$a?w*#e9y$oBsZp3%!ac;wb{)+U~J;n}08zcEuQzX6UOYg2={s(~6&$?~%jcr*mGm@u`KJ){oj)X;%l7dRstamE_O9)y6b~*_`KS3z! z<~tq8{M2)Pz@jZ`_8A}<(&iS;*!cJyLOSq`21_l;x7)+~{H5po3?;#$;VZP<$^>3U zpszDAKUOM;XC;jNSS|ByCXKPwK8e2h9iWkeaAjJdrv49GD`x#V?xhT$IwU3R_XxuRITc zQC$($|9sEncew}>7vx*QxP>NpAu2x>!wj7<4D}ZQlZ!Wx<*Mf5ibPc3;J_u2WZtPG zVT`&It&PG+PUS%O^k{gYUG6zWpN~O0^IrkV{0BychT?qk0!TKb)4(y_FRV#!2}g4u zyb_YU?2Yq5i;yZG=L)fZF)BZj2mjS<2}l`6wwY&_=Fe$YBnZ0B)u60+7%qd89A3jluKw# z8i!}O%^jYODQg8v4QXiQ3_>eXVK_`_-{8P%02Xt&)ZA?dH7@3!qH$6-Kv^31srq(1 zS`|%NshyOKz>a)R1;z$7_|%2=oj?1VtFEGR)btd`+1|KncOytSS8nK7 zy><oeUW)7afDTXLI`*}iMoG4;LM>Pux`CH&>-AVBP!d)hfF6N;ks zC8D%9uZ`1*^bklhl7>SCs z1xoA}3P}axmIsWw3qV}@cM;<~o|Dtm!@SVczDp)pyP=yR;<*{9V{K3z!D7_g4^Ixc zDn|dj8(me0*f&2Bv(`OO**6aMdqcG7BtVUAUXzMif+QOhqfkf?QMNc+_&}!9Qth#o z^|m=w zF}XhhNxthAJVC-zcMzsr3uBS>q~|Db8AF~z$Xp?3$F0DhK{jj|+=AE7qO&l4Tw65H zb2Yu2pQskd{JNcwvXAyWB)a5isBec6K06S%yB+atz-_siPT^7Y**!y=j-j#$Ey2R$ z$SgwM7<2-i!j{p4)%gWbe$^0NLZ`h5$vVegrM*>bJqc6xuVvTk-ipMu}bpDv@Dl?g&f&Ysf57TbZF=M<-LJMs~(t+9^N^neFwmyKZ1nf;Zu+ zFK8Wo!gJQLEv%MrBlEjQRXkYsT;B)bp$o!7 zjr;(W9O>(wDuJ;-gf!+&k$}dE`lCo6)r8xfhzc$1V@TF!!{|ty8TkazXngbb^9O+a>*9}BGunFds^=)Se zN`SLPnF0^r5Ime_QBy?m;-bAJiL7iQ1>n?DNOLCr{$g)B8rH0|#b50=13{d(yT+rk z%>BmnrfAn9n_fITV+T64=j&i8`UbN+$A5ih(7lFw;Zf= z*#kvyM`-u$y}SApqXR`GZ|+$|m#A;qM7OJw3CpG!T>_PD-76+RtSmP|6n4OoFN#F{ zdD50yoKW8c$ug8ld^0kIwzXFn8CpaITq_xq_w5O65D(1e3EiL%-~+w8PyUSVfXM7R zVo5cGR13pEh~03$s^S0I9I3`z^Hob5yc|stIvqj=k z;cYsJN={}*Gm|0j(>2B1ar(=cGk(k$nTufeBM~ZbCo=i8p$W3L8^3%Mt50Dv_+qi>O`$5?=$NOS*xf|^RF2_&$V@cYZ&4UYEEZoWdhj4AIow1N6|XkO^l9@)puPuG#su*D_KI`*;5hE+a}d=*bL`UB9X z8C>Fe$P=h!+R$X)*7gD<0d;o6o|r5l!lsK+}*KQgS-rppJJ`D@9;`eoQz|e z_*GQO`o^$@zJ{bym*sLCONsaj#bx7lKV(f@ROh`@Xl3Xu29q}tq>3qmmf$&yYt>i0 zM&5=fp*YdRG>lyD)FfdljDGrVCUIanJZ#nKJ%D1F_RI9~J~F>PRW`Rj$n-ehEb=}C zq%8Cpgwh}Cqs$|G7sFfPW28bl4nslkoQ0e=#)OgRQ%KWNz2OD?89Mut-AHi}G{oni zWYyXz^B2gJfbo8Qkf|?yS<`HbZ+?Z$+0)9v8H%_rho(>ynKtMONS_vdX}ARb`4-)D zPu~{GzX9Q zZEn@!0eMlA)J8<;85bjcX3G|bl&K7;Y!NCzF<)H@QN7|{bD8HXcHXG2*q4I{puGu& zyaJI;vmqLD0Wzf_)@;U~-_jCbO>sub-B4EoGS9p^LN<$P8vQ1Q{>6Y?Vg4|_tubVY zALgap<#@ii6r$!oUCIpMQPDWSsMs}#{O(`nl|9#1fZiN-lIs$PJ@)ZkgR;6FkSrGw z!xVP|T1gv!#9|~|)j=LX?l)~H{Nj!#Dc@WLdhM z*C^Jv+v;1KqC-G#2c*2ThhJkKQe{JFCRqddqh0#4S{*`?!>JhDhCOFNhr>{jQNFfj zf`Mr5QO`{ygwIZ6#2Krvg+b1E6lV1apfZSdW{{&$AhTw-;DItu0~(%+uI)uCMdm~E zC!T^N6z|2Iz+|TjbIKxYHw5d$&z(wv$b7rwE?Rm8Kz#`8Nl*3I?R2*Yy($c z{D(8{M>f@swLmN;9{_2V-fg1dvOb`)c;R%NaYP@2MNKVw86gnKd!MMzJO`jY!*#EW zGmq3nryfs}cPt1Vg(eSsiz-IlfcW;we;NzDXqg}7Gi3fSg)k-IK&6H z3=YMyQ=d2i$Q%U^qaVJ2&Qj{4BhzySedR@1a9`XIwhDw2HjzhP&Y#R)>ikL)Gt=RD z{wgA?Ha1fxl-Cj%5z#;60K5*!Pp>Ew!6|g3rl*6Wi)&)QdlQ~Ta=DdvLuB59Dbfb* z&q9I7Y7UN!Irc&GfMgRnK`I6(4TbWN?@Ue465sa_szF!Uc8kppRF3})4dJG*>y`!Z z2Y$$`!Gqyz_8}ytkL{*xw}`jH-&V4i_9{Ioz?wKp_g5l#GQ98 zH1}SFAmulZ@{2txKpAh+ECI+8c016C((A&)PwNjluvLV~UU zq*1CewEX{}NOq z>|t#9dt;OxRq6%2=@j-n=@{tv;AD(P*C4V`#A8h4Ab{vGW=&GLU2jzG0O>{%_}ANYX4>elRLaIqWAn znnwPOc>_ z{C|J^gTJ;jkl8YFYeaA#OuaMa?0y6m&&H?k&L8^9y#@Cm8U{pI7!0@ zxDS@Cyk?F--JhgLGi3q&K>f8dhU5p4$t5Rgxy`13-$OOg>e%Ie7@2}1a!4904}h{~ zxnjH8SPy0cb#5|HzN z!->{r#UJXa3WnRBy`z}oo&hFH+QOY&jh=-l!A{-qIgj}rky8A02%X~6HNJiB393VV zdy7rG(9I7+nh}Q^y7feJ;_^slCF0ZEzl2_W6rGH3v5DpsBH=lfX~^}qx#LE!bwphV!dKEF<2n7_;j}L0`dNvFzvNItg3Hi;_W4%z}tvy zeyw3W?frK^a(^Qn4-bcx@Ld3|uQu|$=P}!Q!nE=}O0G*6+nE2rb5@|u@nTv+zI6MY zR0EG@lJ?$R)=O*0Yz34t?c;p=#}VND6VEBvt71_96q%V>YvAr@Xr-j9m+hgb(dRWu zuUPK%0(91TFwWwYsPkXKl22i?mgp;h;`a>>#zx!MH497UpD|CMbHE}>wt?o!D7nuB?(xft~_XmdvEkFkg>@@D^- z)uQIOete1@;Rc&n4J%H7Z4GN{~t#MSXi)Y?5A!B+_FM>+`yN5ogt7U9gIk(bQk#$yUF$%7O7zjqDVgb@#^H?dOcv(Jm)HgOV z2F6Oe6OsjWp|!7cReH!_H@hB{9xF#SHYV+mNt)AdMQ5qH`-jU$&8CX1cMv(8o;Ft` z1TePnZ-Zd-Ky1g{?pc{<%9!7W$l`Pr1I$3BR}o$MMZU=HN1>>{-82$AFk8`C6c=zh zVZumJQ*;c5Vm^jW@iMGS0-6A&xHz5>(luFgVXEf#ORs>W=x%YW_~3A?(Pt~RG8kzK zh=Kvj+}HEQECHK#gNz-w7&`CpsG8&ESf&X?es#TLi5hNC1sukUcGwFb4g_F_0e)Zo z**Yn{wI7h*T2W$7n0fC8+7%je;G5Wc{hpak!cy0rKUasK()%Ud*53QE~*5qr$N5-iU{1 zZu6a^aIH-;yml;!-pMGz@g&A4f;fR_Mn$Jvq7f(#uq?E5xr&Br?Al zH_T1(=*^}2%W!;Ibk(2GA8ID2p2%_PRfrO;E4}zNMAOU>Ks2iPdVOORTS_?6or0K2 za-!m*Sc22M3G|VX*h+h==2FghhJEI3P>zO9y$p_(T|z1aG%K*GTUU4&8C}6e=2QxV z>Y8w4(>>2k0zp%QPJ_Uiz>m5$wq;AEEW94faBF>i-y!Of*iHOBz3c z)Cnl=oxYczk*^V3bLZTzC>r%O&FSPJJp+;z632tnMS2~7kuTZ|7#^S5V(R}AK(1IK zW54yQ{8_&1d)?bW)kq+0Txs$9CezalLfpSaHFL`e)2O{!8p%WS4LC+E-@NQL$_4@( ziWR)LOj9`ipTem)6jus+CK?-VaZh<)iB@?QpqZZ|RuAi)u3o~7oSjJ(k@sGd`5ZuU znE}XA2%X_vP&R~u8*tAalKIk*~<#R=2mTp0 z?22XOu@ISqZUp(#FRuhNBJq4~0aGquSr&(Elx=a+9S-7`32MpdOnA5I1WW4+bAK1B zmaEYz>n%6;&@b}mmiZ9x*P;o^DszqEUWdwtX8?)zY0eQ{U-2;A#!zD&U~ISno{c25 z2Ht9dWfdx;MR0pLDC@9Bo6t7US@B_h;+VDqpp4-h5Jslf`g_=u4ackVcTB|ub_=93 zbCHgrZ5=A)a9-W}Jxv3oH3c;z>m8Ze+A%b`PE@L+jq({_R|PvxUrp5mROr49pIp&eQu=|%&1pD)q!HCRww`bMa zIuSdX_tf_{gtIR-1EB0HIh+NDeeOO;vc&oi6taImEQ{z2Dm9}fDHnhqjRTYq!W!44 z!M9Oz-f- zW1!3!?EUscfA`?RiyS;?9T?f8aA%vrQ*gwwX{@;+%EQw*>RYMmnO((W_?bMK{Vjh; z1%cBmDt^WIH0AI)RB~6rv1?3t9+ULnuCN(RQ+WZ`e`9XZdut~KIlhP|s++n$I!T}0 z;fIYr7L8uULtSz)v&+qgPjWE_`Z}z*mwB8C&RiEc_ zEPD^Dh8!^{XYT{`wcevCQb)RXA7uC9*YkmNB)$=~=B z>lVl6alP=Tm(ebuZq)IuW9#9iLBqhNS!C_2TEPiD*DRxLAMlBNT4g;y36hn`#V~0n z!;wQdl#=Wzh=_mh+|0tSD>Ceug6hTB&wM+aQ*NeSE zV5jD)d~W~1n#3wwhR_~_FZA?zKRn~gT$?1;ZJ}?ttjz76EL~(EH;UJvHH!^!UNFcFD;nrV9C_r} z&P_n%F}%xXW~jek?=H)Wmv6?Up<1Y(FK2sVx)#|r$6vM4vUyBy=~LTY-n7?&-ee-9 zuC51?6@g;;R#&-h=(+WEid>uQj`ZAKLpX)Z+eUH8<-(szY8+Gr>Wkgp&l4~iY^~f; zIiOUC$A?L43LzT<%_zAm;MH3z&dCg51nJz|%06f6z{r74tFUuWtJk`QL3GcR^F{U;TyW26TDRT?nA;UZMu{)5F-G&VO7m+?*T5)~5yD!y!en=+FaD`=OtEGV;NoC<+ zTVH(uB9-yUIwDW~Js4a&&S$Eu59RL7%Lgiq;KNY&X|CPFoSJ;HeVQM^t5)Yf|C-eU zNImQ*y^0EwU$@OI{2c|PyS2sidjg1b#anFa?^F<6{|?hjv%r5_Ym!glCHwZh`lpo( zdI&<+%YCIqOL_*@nJ@Vf-NR*S$o7}9((9hX5dBtLr)uW@Jb1>j|BYU(q1E8|QuUlT z2T>*ej48w&Da9(9k5asd=<>=^6t~ZpaGKwtpHnVhc1;Fb*`aA2`Qhv{DME)_d-dYZ zs@K3S9Ys!9jMs5_+3E@YIF?cJyvWKHQCushy!t}>dcDUurbV@d z0jq@7^54a#rPmS=rG=7_XRcXK#c&x}DQL5?{}!WEf?CotT+6&^=}3J?KMc$XBNPl! z))<20>+vzHZCP)whVTiLQq`QB2tUp3O2Sh~!~G17D40WRBdsCsb6m1x6=AacbH2b3 z?96=G-@XLoNA!#KJ@^WYY}PO?(J|l%fYo%;BdvS>E)RL{`sFL>8a*7XkH9y9EI8b_ z=@}q#uDOFsCk@3@7~+EJKQ2nL7n3MGa5_rvhm+E8Dn1wx^*qi#_G1aeD4` zWQ44o5irzZrYr$ce_I!Rx${+LL7s_8otS6Q8=^&@m4}9RR~0V}5Whp)JO_|>a$Qw& zJr_)ZLJ~CB<7M5W&N-n)0No(kB55;DP?65Z)*u(psSBWPNU0^A{*=cH@p;qAT2H(P z%$s(V)~;k1V~Awa>cK(EME|pHMTup;z(h#fTAVr5WhkxZ^2=!yF4G_`7j#^6q;;*r zMexhTvC*qa@N1AIc&jJ$2|&*0_VNUxQb1(R>6k*{S|tw7i;_}XG&jIWYj%tsJqpss zJYFk%6x&UCgeWa4Np1-4G{^Gv_|I*?YjNT>`=#r43y-()?pjO)OLwBNh-PY(+tQ~v zI#%k>b#N5_7SD$dLR%iY9<3CD^kVJ&NF-5<~mt1x!OHeE$?F|Sampy*6;`FP6hx&U~43Eg-!*EweLNk-YBM80G=l71Xd>jOmw^C=?*?^DsNf;_U z@ffTIhv#ZO{6v1tmIgmQ2`AU)UJH+F6;JidRZdN>U5QU)NZEjH^~Cj}7F8R+nhsSe z(6fH1wp@G1C=x--F8=uDJLnNmn!j(krA=9_vc7;$*=!N7oW6)i%b>SA6vc;|tY7M% z)}Ac#GL%-kdKs-)>_d`zrBCX>=yb{L5Tf#2kq%ySon$TO7+9I#B>Q^qSXtS9!!@A} zw&?<00*p{-gp<|H-pb9xqrQZPb2Hl$?VC0MssaAzwJZ5IRVnZ0?$zs;(;-|Ztxetb zpZD{;;l^i+xgs}jt_8gh@{@>OZRzMkFb#yi`>%MoNc*GQV%%%HEI$S#TEe01j@XMw z9-p8U|3Jk&_|$cp;mWo3lFyQY_YY2Lm7n)NGu0aXzJMbww!&u^@BY5|XJAtd6Q$vC zW7=1kbVx4CwPackf88ghInly@2d4yNQ;y|I@Ozw=;{6MMO|O7dqduVIvAh0`Ap87r1EK|AP}7J*9rtS`;3?J+nPS6ydP{Vj7H@g07#qLQ2k z=Z#!QYc=`&-2CIE|B59dKeFEI-`jM;3lZH5r-ny{Yr+4b+&{SLN_s=CuO6apUXtsd z{A3x0!8JiF{~?s#Wte=CDbXu6box>3sVn@V#0yWAg0~7FofZ3eLr@r?$o`7Ws&OqS zGjHGZ_FpWTz^@3UB@T>L;APEVpr@=a3uLGi8<@^@ep5Td`-UD5 zwq>A^9y>BeYZ-;5Khm~=jRQ%4>$LS>!kNIxTI~^co`Z62?{7JLk2i96oJtCB|7Cfp z?*h^^C>J_UIY`amh}1l|ZJM0z?!URRyE}l>tZLo^B=1I0V%P_hl9R$q6+QsT$}0OX zVp6y<1ewyJ$|QJG?jIc6J?{f_GnB&EMmzSZO1K5Hm^m^3Iae__^miTpAG;+EOlrfI zq?*L-h%`tui;=c;en+3u>N3aZtd_aovuDNcj1&^EYVu;hUBR9rsjG|Nx)<{ z+|?cLtDyHHGnN`jt=5)iV5tL}Z0!^c)F5&{BBim3d73W{2|j=$YRy5|O%{j#mJ##Y zl0RJmm(r9avh5Ri7?lHE4*#7OW@}veg8aG$@kjg`Ir1`T>g7R{$JlQ z3oExs!3XZK+%r4vo+n%<>vLnxs`4a4qoZ-{raAyAM{WO$b@S;wefH1JraZVNL-UQw z{Qh6QFdoR9>ir_N!JGQG1*KSkh zH}kwKkC}wG-CuA0KwdqoT!QChkXj=e%ee{AN45=uJ-Bp80=KCa$(iKTYLDV zziVB&Lx0+*Gc3qUTl)J96+v?_(@sB!Q#I7j(lEb3D6KEH1il2i(iZ$GzX{0TWA%0a zBs*ehVZY0-S2Vl9?}5B$ujvXsp}(86XVVoBUEhjf>gmI#(ck@Wo0uX-#qSa6Bm?&O z`))8vyuH{CY6nm)RvwAbFzXY~WN{Rv1OoB9`DnoNx%NgWTWn5zeuGeNjcZmfB2AKT#tC!$i&QAhvKA7IHDh$f=qo!s9rWa3DP z2%e__wMKYq|NGp5gu!gGHc#{1~Qe;1E$+C)-nOC2KBN=-M+UAmTK_y>P!628pPIX!M+FeT* z|3!_N0G*fFQOO&uOdc2Z&(L(#}B zg76JpJxI|2m3nI7VU`Kk29O=v*nrEVJ+W|betc5e8&FAX`8M_4yrVYaBJ+v18);Mj z*yJuARyCdsp(3;a8*hD1H<3bIsDRpfc|YWxiNHY!REcfAMBoJ z(VDNrMLYu4K3#hH^_b+u$C3bVfb-hz^J3w$O(43D&BL0OEx%M3mTv$1*o^vBpE}o( zqvx_yK*@-4&WP>&Q?)&T)u6b08jQCCs*eLENl-?>ypCz3RHrA;0bAIN`zi?bKEKGrp0P6I9snX4(E{|08nkW< zCy@BoHYKgW=O$b=^jbwwkvD_MYq_*Z=N5!EaQ2V?J9>%h8eAy)ez^^bR9CdY=yo`A zzh;toMDFkCnVRg@fORKEYR#G9F4wY4Bb1H11MG;G&wGHRyrBgGy%$c(!|l`?ibsFP zcnd0jKO8wZdS)-hl-kkXGSYhagRs{*^qLSK0+OPbe?l5EPhisJ2CrU0!RT+`D8d_> zeF77u!ZNea=MKE&WJlS^gRQx!gvY?-WNhEWaNDc;1d65MC;z`cCJR5LDGk=pWz62{ zMNeTY@od1h7RBi~fbdp%vMv5S4=dQ3T4vW! zUkKFPR@7lwFM_JnKvZU?mmrysr&Bf@y^Kn+wWCBnZAOPzl8${y&F$n6m`v02eq-x9 z?eH~RGR@nj+^t_n$aEclOAP^1$wKaH8gIf~w|zv{_xxKJxrqm`)tW!2fS{7(=>BG( zd?!Q&Sj^D>E<%CqlP_nb_n>4eXK~e9%lmyA2d10&ev3$zG0&P*=#(Gy&t=L`y8eea zDc54MG5MqZ4&VIQnP%<%xaVr%pmqKk`vp$ob=;&3w%u%>qEd2ajvDPhgH!T73y-&M z|2c-Bj9(lLz5t@Hl}T1jRfG5v8A0&JsA1tNghV%ujVps@`E_n+7CcdG=_Cl*SMgi? z{Cgy=K-}OzdM7xrqka-axmS$SX&ce#H zIj|PE6i}bdkEHg9ofE9MLNU`q&h0U8dbwQ&rxDY~9MK|==b;p_J`(kbppqa`C9-5^ zQE5;YB%S*Hx0vu3{&3GfR%O(Qb{FBguK5z{9qY-(JyW@<$VZo8Qlzqlw>UFK@=LLb zG&@=P;blNKM%+q?Yu30GeL}PwVQT3mJ#+Z4WxzPF21D|jd_1MH7D~nK_GRL zYmwe*hl9qp1*n#hCrOSOu8^yO!9t1o2$rvrGNJYl%OrQ0_BH zL$g{wxN_Y(`UfI;yS(kB-VMu}qZR4^$!eK1ARIh zEy~h8U@zTM2ArE*OLKUHTlReO4Z0blJ1xDD!jT(S4#>={u8G;c@^k4MKytQXdapO! z-ecQ&qR~R+u8gQh_aiK-SzYEh3+DO|CM~%nH%|xYpV~RU z-2p2K@koEq@+=-6L_`V`!=r}XN8yxm&^lVl;V}%MagrdUCjxTp>61WSIOK(HeX1vR zPMa9fGY}C#J8-B*&u35>SqDl#eHKi0>khl>v%9stC7wekTa;eC=6Sfw*4PxKjpBhJ z=gIPDd(m~$9G>z4d?`1VwaFa6_F2A+A!E&Q4tk}(eb0!Y=1_hVthqkbujOI4ZZywo zE}a+xm~V5+H-ML3N$D_CQ-y zX79kI3==hTc`xuPC)Aa_;eC*(Yqyon{KWWMMA!Cy*mC>EoaEH#Hwv`@Cv8_&GK;HZoiX z4}1X=`6ZVwJ)5fkWqxf3oz~r7!Ln~aj;mI4f&3bo4Am-EOZ9iTPi@#Fp$WGrAiqbZ z&1~}d6vQSmCci-^2coKZ$G32r=iu(Kn(}{#P(uvTtRJ~vUEcOb{4cG1zN~HW|47i_ zan;on43`mG9r2kJ*rRb?qiyXNo6wq$fu*{=RxXZptpbJxa-~t^MtP?yYwE}6$Eg-l z@dSW)DC#w6oY>zwvcI(Ili)P>y$g-U6R%*a2UgJ~T<0C@Yr3!$jA+fo zYNmeU)mSu?4uOeBA*38fAsoEi67bUGC)JBF|RokX37og;% z`SD)pI$e47da6W^=4m_^Luzh4v*#r)Nhmv#F9nj_3do~sE<;4VYsWZ+5X*|5@u6>S zbE`1PZ5frFHA{0qbv78Q+fl)FuC?eiylvCvih2bsyA!2Y54g^oR$AJwg`N$Vh`ZF0 zTKi)hxQiyyr?RPyyPJ|qJBEA6b$WY!6T1Otb8eWf5%5~qO4xc+J6LWDBDp8X@(ki@ zfm*DiGTqlV|9V`qZa>#knnO?-dhVE>FJN&Jt(UGM02GCzhPS;m6ecJ~C7@6pP3JS_#Aw4TK3=G%1hr*b=4 z{fnj4ji7eY4NU_|;@nt|b1S>H*;$io#w(K}v%6xKJtRq>1Y z(J6?vL+PcUve&BqmxDO7mBe0wQWqu|b3*r2Q$2)?tTzrf1Ji4WG|TpRd>u@C<1oMu zB(HCvv^ROvz2EGAH{#Fv5VB^Vh`;pv`IEg$b z^lIKya`_~X#G0n@DVWM}ZfFbY@)<^M_wN)Dq2=~-gyd>R(7&WlK*>*4NmGHpM0KNL z+Z(>8MwhRUX{vOi_UZ~O*q4pb;Qd%s_L*FJ1713gp zeFsYaXJ^+O;(I`X)Zbkk*jxOpcNc3~9_s^eBqBAvvV!*=)kEJm_6jgXFx^{Rw>T#E zI5&=+gHi#e@yyw^rfnXFNHGNLYx}m2$H;R#ptJ5cAz0K4xqwfEGZrl0(IUO5kdtr< zvU;kj_T&J&!)5uDJY7oHj*44+Dx4y;&{C&?d3^=ysO8Pm`v*2P+uIpE-pI(u9ivZR z`y9WUGeLzmc}-L*ZlVS09NX=PjbB(5mOgX{VvgzP8s>*0964UcNZY|qw#%d&Apf8$nu z$kd5oB{V3J`Y4=t?^ew6;2!Uv7!~}S{s5=>i%(ujZtrhcK3dtI>TfWIu0FU6p^6uN zRrAdmp!*CyV5t9hWwDsWk;nFhUve_GDgq`C2khaiD&B*Mghr?5%_#Q;SH-rSEeDc7 zJ7Dr3=mvp046`_U1gPE@{!YWa*>zXGz1)+feM^7yrdpf370j!~7oJzT+w${F7q0Gh zuv&z-v0hLws>}U|Zg8@WSb?1h_klb$Q`^g`Sw09yzO}_?xGhLMgwra;;F}islck3( z0+l9sWjP~1(!-7IL{w0@=F6e|QP*nE60X{N3{d=T`_B_lC#&XW^CX;3B{D$S8|j4} zdJ64b&7Fy&@-#?LQ4(um>lsA1eWqLQ{~%Pgdjqd^3swhImBxy_SAGN$ z!B3V@2l{&jJ%6!n5c~v{0tofe%;2A5=etlLu! z@ZP!6RGL`6gpz6tqlEdYXKF@Vx%?VK6mxsL;&*vkA!;J^d&mo?_Z<57)-ZsGa9~We zGAZO$%bC9qpW5_6A%PWp7P2w+iqRbJEi}<4mzU^z+}fB!q(t=vB>3t=JwN3s=V}wGjgYK zjSA4;*<$@xLC-`f`qny~WrOJ_D+4B3DT5RxpvR;0HI|zbZkk{>C2RJ==pv>IFB$`kT3M5H7ZPON1sP#Qs<%;O`m3QBTo+m4tuuowUA zx^u5%UFdGLkVYg|KuN21EVUB{22hcT8AqFN5Y>h}Rmc1qJvKtg(h!#qKW{>$+0l{O z=rDxw&N(_KjR5G%_jk4Gcx{g-+Sy%O;LOW5O^@~=vaZ9b38%O3E+>xb!S1CrcCx<# zk(X-Mzp8XcFvM{6T(g3WLf%@ZtoV-51Yj*~ssegXxK6#T<61CX$GA2HYCArPN57Vs zAEx}EJa7Huf6=RV1xz=)+zcSu;Tc()bsgE2TPlB#>Jht<%3()p8wvM-5yqdi59vNQ zO@*zCjjCGx0bFOcPyCq{XzxaBH()Iq7g~qD2`>lN|J1zZX4etQPP%iM6K=tzidSB} zY2d1jLzD!RPPKL|{h+^pZa3vbhrJz>G6?Kp;Atc29mr(J)}i?v0ZwPYrb?^~m5bJ0 zJ>lrwvR&QXKexCJRNuvWuzAJgt`_~R2C{pRk;_WUAM^(}`4-t^YJcs?xF1!Ll&|u6 zcp%V6r#a0&2ol5kJ!YQ|xmI1foBH{1e%M+OCmwMf`TXm79~HMA>=~XXElSd(m}JjR zjj4slWs&O8llIG1fR1w1cmgTU3y;;21a5k=PitGLt+M=7?l7Ee6n294(BCmN#$+mu zXE4gS&Xx-w-qs4AMQO(#TKG#E0~EDHeeRko>st*}&!eljwO{B3*90RRU<-=+BAD7L zUF7YbXH0qt+clr=^f&$HA7~T!2!5cgLB0~amr$F(^C37Dv9au*HNw2sQ-cf)O*4NT zl{^m&&@Uh&WDaT0a26YBPH!URvc&)c-vYY|UwEAG4#R0pBUbXr?wuq;3=XUClC~cD zZcok_+-d`%N8dwAnQN{+&}&OxAbIF8rGRCL9sX&5xK;$#u? zkl@j9m(Q}j{%78^2F_!!sbbxuD(=`k##-SEYpgsDR*@=dXkd`e5vcvPN&N|+5(r>b zwRj?w66%fZ!15oeSH!xxsvF6<>6^2pr+8t+DkyFy$}*iE1q8=K=Y!b3uw1^mokJB3>&)7v>IM z=~`f>hQP|ULWn&o=f#jA<*GGn=pe3>;cibKO(YLXo^QO{FN2exw((&FC@o`HS!GuB zH`9PLm^E;u; zMH@OdB4t7zSFNX)KwWQov8HN6aB{rqYQyGcP>UYkQ7!t~fURS-(!2#o7AUdV+}cJ7Js9bi_EBB;h9r*VCtT^w)U-TMx3*bQ2~S)z-yykkOk_kvgcI+ybZSb$Z6x z_Pw~ZziD+dOWp?OU6y2;$@+E-5g9vv^Y(3w6<|JJ)mG}`nd$kRxozu|$)R-{E$-et=P?^df-~pFOu(qVq zhz)-a;#4k~lq&NOq^5=|xbefTm4e;0Z7zBQOeuW5+R@yat@ID#OJQljKpq8>JuO>b zdMsGj4jaeZ`~)ohma!*Yr@R$)TopalKYaMn`szLn%Yb&(zUI#aYB>WH`SV#Yb)%a) zhb}e>Wa>hY@YZ;q&r?jG&8YSQn3oC&TTb6E_D?u!rD=XIVbU&E?cc?_(`Wf*6d|qa ztQJ8+wNxz~f)r`2g0H;h+UsdYi<|vl$3z7ZC0pz0j+%cXNp!xzZ$e!kvZGBkyoDgf zH8zJ>hM{2O27PYI5oK<82hnx1g~zQAyXRe0MkLc$+msTb={;=v?uJd&3n2=};uwnF!PO+!f^1r}DR1W&+;`J-)m;H^0|Mu-WYk~MHOlp4Z4@VV! zjm!(1$L;SxwW8B9W#p$i(4u0Ce-k*z#95;J7K|u4^v7E9T~F1LQ9G~pdzAFfzV2-5 z#Q(NfT{F{y{8K3qop*g5izJ{+&8VxCNBJeWT0TPFNB2)oEIe1Hl4E+phpkKn$M(ed z_I47@afrnssOj$2=8i|I7jqZ2a6jeU&VM@Ao2aiqD%_5RUsw)towqe7FGb(bC$Y*H$LIljyM>XRHF2O^ zAV{^oM#2r3^@M`aCnG>p>(84pUeZD=UF%w|SY;;GS76vJ!861A}r>Po5mS>gr9j3_ug6 zQ`PFt0VuVyf}ZD4i(28l5t#xwT(xwLn-EExs6BNvEOU-@8g22EC>R*23iDMq?^}av zvxeku%iX%kiWcgSfdi8l*mhngJKTYYgvNGlcfuYC-PzxB1?|w#dlw>^pgMcW-NCgc z){GDLz-j%LQ-T->ng#M+EU6g}J#rtQF3nNf;BbFZkg<`4=QN!MV1?l4QEsdcLaEx( zni^#t1eaPluy^v%&wVK$?wKoVy-sl-!7L7RY?Lf1GJg=6%u$hS;xa;O(2rtM?Dgeq zPy@wdDEXxa`27h$%l+{U%NRCXCp+WvXIVtWK9%2{O-sq>9^6&Opg#j7pI*oDj+T*I zZ&-E%bB61PUrMzf>G=?~E5n%aYt{@zWNj~l#`Geb^agF+sxx`>h3M zr*Pzc2Ke1IQhwUs-=rqc1-%2OH~(|n2|jnJmA?85AxkJ5P;xWS90R`W@s(}m{wp{i z87_dMcJbuwjjxl4VXS)S?;si0%3BzK-}h+@(pf1Qa0(#GAQ$IaeEb#{xtaZCo%%bJ zGR<3swRQret}4!XSwW9@f03D)&PMYiG3sHi`~i+~ExlUwtC6@G-O;Gj+J>scV|sYy zr0#hvNDRZ<_SaJBaZrlM4`gC;54El~MjnqQ9cvG@4VsDpd*i|{_1qI(Cx@GC@F;!b zBup0s?(g)2|7uJ5+xM5*^kjD^;(@WM*HZxbt0~LdKevq1r?<^|SdAd3Vbc6rGK?xa z+cfKRY$R7LbgaG@XY`aA)$IQAwq1fE^U@4F8KdNxIHg&sRk!GsXTd4KO2KGK=~$YC zu3`b3-4m7N98@x4FrMBy=df1EpNma58El$i8~2y>r8NwFBmmY#K`n>(; zvHG!f^ob0#jWq+X9Iv}-!ypwTV1KhDZ|v_JwCF?q=&{k-STY1_T_d&9xY>0ip$eN3 z>)M|2=BLY9WeX+)q4rU(h1Vg{neE6_OV`8c0qa(*r+f524^d_*A3c^Gt$j4NS8FZ{ z#5gDms(tZnfr;EeJ{v0k+k1Frg*%(d{eCUGZgq%VnDnZ2a;E3{!p)%7d0F9&1hY^w zJ>I^!yWy0|qCks~5u!d?QL^?SBAY=+Mc3Tr0LJik!yi&UT&8>Y5ID_UGr0+s@rTbi zqkb*o-;7I>&`<4(xA5m6K8grFxp8QYPI@bxcdfEjyOt4d!&EJmG>T`GW z_wK9!M&5EKX0e8vug6^oH8L*-U-h@U`zQ9!9oSz&zXu~lLafjJy+O-YEFWA={eZlA zd)3wbu50jMS}D8X17NZphl>Z{^b7?ao+`O{-9uPS^YA}Zbe4znNSPd3tvmupaDyTz zFyzrXQ0e96^+WlpFZZKKWNX{l{1_m+>o%-feJy<>s60j8_$1Wjp*%6DdUY1!Q@N*3 z=JfG-?N<$U`Ll=ix%CI`=g$~$n?HFOTw$z4V87^~^r^Ldsny!zz}Q1_9UpLH@w zRs4&%>Z=vVhQXNDfso=KoV|?F;W{;YO}DQ13MR!Z!*Ca;=@2rKy}WHPd=1Wf_zW|) z)O%jXCEt6>!2Cx4i1i>pzX=n&&6mJi{k@l$YtUg(%N;crc*ms-F*__;*xv2$y|TuK z_rPRjd-GL(AC~Ugrknn@|8Y6{4YdO98u+uU%YWAPiy0k1#3!#Ct~Mro)IT*mMm)AvpPM4LJ`+u*mj zWV+2K(^02>BM7R20c4nJ)J?@_pJrtOGd9V$KM zXtcuETvN+M$3U_*GGE&CvH8h~Z)H9`4vI`hhDU2b`FKQX(()IvoB;nY8l}2wIsC-j z$3z)7^OIojsMA;|6MZ6=x8eMh9$V5gl$;7z_88&F`ZOqcSjk7xxO@6PxxL1sGvLU+ z3||$ej23}QkMM=rQ$c*)cqUT1=BrhsXF=UJxWY3zp`e2BEXAe@47qLn`5a7=XRxj% zDt2A;dseJr&d8k$|Fb+@&6$J(=W&2Gyqv%_@9i56FoC%y{BZp z9pj}#!)1+}0y-bO{?zXJy3r#1lG@}pAmE#YRQJzd^TJ)j2aKMxa3r~@7Dsl2$>8jm zQZQolgt>BQA^Q-Kb!{N6HFZElPQQI@)1KuY>*T5TNDUPL4_Ch0z$ z_1S+3m)EVYVe(}#ubUjL@%0s01vlf=A=h1#E>*d<+bGRzSfkm$Jex-0l4$5{rB4Km z2{cQY03ul?%;vB67Dnc(Rt(UF`ojnr@l|`i9ikq%@VJ?ex4fGKN<%F}lCJX}Cb_AN zAk*8j!TNn{^1pU?N|pW=j(Eq%+OpgSJ)wiu^z1`KvQ`lp%7y+TOtlz+Wk}%T{`cDB zRn7MkOe<#X{rMC~HV!}Je~wLNHv9HT`wIl=uCJEzrE729 z#=ji;s=syJ+;GJ^{~DoSWyfw)f#1Ph!&>p?=kU1b41+iFfSK+Lpews@!Q3uchu1$M}pPwmz>Lpk?XV1 z{xR!?>xkbxN$Wi(kF~i9^Z2p-&&+djcpNN;wJN6f91kTsI~co53MXLF{fuO#+kFQLTza|BOYq;75P5;)aRUIWdPcPT0ZPFaZ=pK5A;8B(%MEj24$ z5lCrRRjqRss6>Cz{n0)UNtWS)Uxw_i%_E$SY!IV%fDwwh7(4*ytyfL$-)W0Ze!zBv*SQYwX#&^ z`Ca>!Y9jaVic|LX{uXC9Oo?+|iA*7*@vO5Hl8{g=b#@Pd-HITv>5|HAu2ZeP@n!1}NrvqLON`poR>R93J^BNm zpF5#sbjAGWD1G=YM9N+hgZ9e1`+HWi%^Mx=L8QJ{v?yrz_Lyx^Vef-ew!vz|_k&52 z;o7&$sPF(PS=vd3docGe{L(fPUFjh(J#>}myJh!(7!#GTmNX75Jm>SkgGGrS$)n2V z96-7U7%}qE)VoW`eH0fla%ovy=YI^Da`TC(SR7B}$^O$*t|m|7QqF7FT?;>zd%SFQ z=Wsw5!4dyRomKygYYlmu7C{LC@`eg3LMqSoPy34QXN=KKp2u~PZ0-jyz$91R6xFp~ z1S6))H&Q!-xH;6I{xU?WE$W!rXKmYm1(nohUi}+oIs|uZtc3m=7`a|Gx?{Ma{J-Ac zuzpUGZ-8B67;Eo%vwxzFGAWzbTbR`4#>L}_4);&-ek%q19YiXjbZy@lZ+aJ}5?TbI zKP!KTTKRpif6_tBHO#+{NC7rkKA?%fc^hBZ>?{5t38@2-y6ntpU)m4*gr?^fe$9aL z5h6vczF))O$Nh7Pve>deK}JBjIlTl_e@16{`3#6KINX)#@pDA?;i!|^r1=FV1+C4N zWwxM_;HY++uBMOl4_w1;P&1LQF)2^K4gGhyNsZSUXtlcEV`NJI7PkN5H0Cj zRNh-BA<8ZRe}|LZHU<8x3S9hse^Xh+%0Yuh@_#IrcW;?gjszoa#nDlZ%FpwT)~+$> zXfP76x4di}#~`%zrDt1QI<|jyO`Fgj*Wb(;xw_r){SC_}XT9u%{)W}#ZBlq*Pl&Qp zD?2Ck#Nh6QgBt9~2$Gnbt8fsM642`xemO%Oaox3yJ<}xb?$c1|=glbHDI-YLv1$WN z#AQlbr`$1lz-oKpmt|!=6UfN9vu%VdMV83}>y49h^HdPmQo8J?JT_dW*@$aQ?Fd{~ ztK#Z!8K`!$A43l)U(WOTCmhL6;{cQU9h0+Gpw!<5sANFkH5Ry9xMl0W%D`Odg%`O_ ztuJpYSM~eFxRklf-Q5n8OOOgSymh-bUkVsP79RJprfuY>$?Ka|v236=L=n9Ao*6rGEB%@`3No%=gjM#uk&RCRpE^bCa<^>i2^oLDw zRLEAO-I8h=!4O`thifCjX4gvd`rnjA@Y+5BhIKt*3!K+-?<#>@hp5g|@xpb2>%sKd zmA<<*INgvNj30Xz9;IS{-gIdx<)f~Ns{Dhrh;b-MvqF>scLI@0vxQu=Rr%kZC&sol zcfG0p2J4(tV~75dMFZ8f$qMnw+^!HaJvrHeEwrI_7Ma?$Ut)KBMt0+5ZfNal`h{zm zJ7D{nB=!Yar>a69fI2fyO_+rWZMx2l=sp(d)AWThPnC~<(+Y0#YhIXW$u)tz85ePt zCqq@^EtoX*9c87v6;v6NDZkwY5J)rM)*`6E>UI>Nt(>q;yB(i+2O=ptynMI}&39rH ziYFFjqq{Dbvb$ZUZOm=2Z9n(Goo%8L|9h`_`t94$@>52$ z&PV?lY-BdDZo|K(bHFM1nsWW1b0ECL39_X9JdnnCIk(-ZVVec5rsR$~+K#@_KYz8w zGsSobp|Ykch|$ZgldVCE1`ChX)35XoZ~&-sA3`Lb_Llkf$Fb)%tT+1%J>QINEsWvo zNG*Eb#O|F0`39t+*^nVVE#n6mN$)JHw!ZdOZXiHMQ8ImiUH8nkr*{yNTQS|vEO@v7 zd8M;Cr2ifw&1Tg2+CH@Jhw9Fq^$&#uB^NVB(i-wU0EusB+n@U(5Mi&HEVcHd{5UyR z9^)VPKh_+Ay#tQ079OAB#8CbIQ=G(`wdMs1MxJ-VDY<{gez`GS+scm}ATO_egEzXFvNR3dRSEQm#YS+Q4=sD$mSuKSS0yeiSk} zWOmyrk)siL8x?ZgW=4biF}SoPmR+Chv9_XeY=5T>Y1FpTtBwoj(dInS-g`W*TF|`@ zethrE{-c3lGL{Y04E{t=9`x_qX-KrAlMoT=j<$q#GMpwOR)S$wHcvsNS#GfLqgGo^ z?H^j%oWV}(AF9m`ZNcvJo)UW{Q>rsi)y_mpE+3{PAl>4pH&R4gYI5aeNdM@cq%p;$ zu@ohf4o0G8$33wdr&a^$F0tkRE`A*HR)B+kZg`fn=#Rz6_Ql>LtO}@OT^01TW3q97Ad@cNyf( z+<}dil#tvksqBGX1xLa&Vjfq%*I=|Sq2A~quG4OI&F`wSR<6j8|9t5ZI*99JbVg*X z=I5~in=0@N1(WzHzGF3G-h}CTM)QQN)y~7dK0|mla_wdcNG_*LRIha@E0a5X zA-1?qYZ;v0MTFPoww>I4uXjm2jsvTJ2RA@@>3~3&H4_}^8IdoVE;^bg%C}F6$DwMO zf6vs_mMSOu2SnIurr_X?O_fxPYViRO8BOaF}Vr#{iYDG%aXv1Glg6w}*W6R5sksof4mpsPzsk;OYO89nw2 zy_H~ncjB~$esuqr)D4s*$7`R)-LBICmY!oF=AQiYM?aU*URNz~lQ6dEr#8 zg?i!rm^8J^YjR&7qz96MPD8zL-L)zK-Y5W_!SW$|UV7!M5BVBcp2gKgB3I5m0VksphdV&(N=QLY7>mt@~m;f=e$(l*ZX_QySPOp ze*=|fu&NJ*@Fq^FMC406(6`{!W%)i;9EQWWqfY#!^fF(-aydG^rX6j|c7aH;qir-T zz4$#GNq3%l@Ar39e|@`o0RFb;xR;CUUEzs7z$x#6G8lg7I;(LXS1!H#wTN2EU6ZJp^GjP&O${;*c` zKbP}a85X|7q}R9oNOV5DqEY1D&Fcnf6RsmL^I}8Z0XSK`T9?ulZ|sS2pCzKYDNjt#&ImPN{Jt4Z zFWD%2rJ>x?KT@F`wB}n8-S_yJvVOGFPK>s<;Zq4477kJ@K*g&4?`0tF_Dek9yTlKlrNw(rD0*BXcPStoX(q(`6WywY$1=~ ztZY;-_ji?Vd)3P;nB+}f%l-5aA}?~_*jS5l_gbG4Np%4|U&p0lsMr))t(QpOz-ByK z_`BJ;e=4Cj{gTf9Lq)QHuwD3pEZ+X}xBsdf9S?)a&-Bg~Jb>QOv*zR? z<0e??d)Nrx_F95}AC3sh{eP?-T=ZL9WWTEV*ax5nv1<9BP&-_w9%`AfyyZUX6DX&v z98do-PCIGmchWohJ2u%IVKnsGPy2N2e{4bMKI@r^UR5gm=a_o$RYsXFT&qaSBpSn) z0c`Asv9AE{GE}X*nz8|ZD{Cyr+_>a?Nh5QDJFvaGoar|3E zSHc&xG~N6=OvEu*I}N_?pWrZ8-`OKRTztnhHPJ5(=}44p_|C+C`zSb>+$+mRLy20rxu_gCFR6&TRrX`SPTkgOd#kD6H!Ds zKc`Qg)ZcHYTwp;uxhI6fDI4l3h!lOV#eiz8r}j))b!t*^8m4;Gf$CAG1B$iqi#;`M zIRlF9g+4ZOT>>l8h_q@VdnTA7@eehdY^Im9kj9U=>_Kuisl(0Ku&TLNoSEOqx@>RG1G3%R;I9Rf!;N+!cO88T za#76@jx9bsUUnTmGOOc1cWArUW76i!LdPk>T{qxl$s5+xH({iQSJd*+D5&%ck7#J) zu2uF3^-n{afReg%@Y>ef?U)o^RGgeIJ%x&VSlT4FtAA+qwd)3l2I(A_WT~yai9JR4 zz-0{f&1-%uou0Vc4{0Vg`_%+;4l52tLH^pVjRQEjwxOv?dt?8=n(E9q z7^8(#UnrDv?o*-%Ff!K6fk&R(H@z$vtC%6b9G+}=~_Xu4Uh??9#A z>I9H(4e3s#ma(;YT;2sF-SKu9$K7!9;*-!^67T8nII!>wAHaM2J1Ur7^`iSQWU01) za2LEESIuvCjoA+XI{F_jW8MgF>?FpAfUe;)gs}M6qTR#D$ad{cD%vC9e{<}K+Q}w9 zx@|aYlWkHu=qHV2WZ0PTsB7<|>*}MA0Scur2*R!fJprdsjPq@S!jl-aGhYYAP)ML| zbg00z<_B#<=F@2DA2{?kRFXWvv$$y_P!i1kAvO+-_b>dSR-2#C&GJ0Bh8_Z?l(9uO znZ4MjPzK_fV!qTfgKeKVZ3N@}fBeUPOb>CLCP;F%sr*n+)LD^U`Wm7NJt7oQ)ynJr z4c0eHCwK#qCchYM*IEfqwkP+M7~Trl*7gP-22#Wsn`cT*done1J(cH|xWPpemp}TVrIqG|<Grs0U(>J8^VH<_`tE-L zq_8G}%|`GgM*nJ>Q>`IBL)kzj$4ky#Lfhz{$<&^{fsrXIx)cr9>PvZi(rXd#8$>rm zJIS<}biU0!nu+LQwYBg&gpyRqeyRqLMrl^Yt)zMf86WX~E!Kj=l=Y7zVbAa!)X|Ot z@=W<&_J<`Mjn>+`Wrt(>6n3_?*kgl*RFf}nI1WxZO*g3;U|OE-H?=->0wOO-kOYHe z>CPvjmHuaEUr*g|t*|ZVAKkj#LQh7?r?3+8c?uvyHTbWfXh4zvn%Oa<8AStAk}R9j z>3ML;#XlwVGXR+>o3Z5}0gIR9S07C|J2PM+3m`yR!ouT1b+?YbwC6fwn`zoPxG2!I z?q3T!wbAxmtQ?lnPG4FEr3DSo)&h1jE1uUs=%d(Nc&eA3k4<3?|A$)rrF&qyuIuPR zZR0|Ojx;neMp09R7r_zu<+Y%4F<7;gGkh+|%|vBkoH#CpiDv4MOkU>NyBv2fm+eHx z6_^xSNIY}!n(ME^CG!K*9I>p^sDU*-IX7$s%DdL~WDZVf`$ezl$<_PD9JAC6odZZC z`cGKKH@H->+uO%sBapJ1K-JK{36Tt4#n~vwb`0S%kPOUkRp8BVL}tO(cb6HXXUvY9 zH_H}`VsAYl&FfrueY_OY79r<)+>&GeTVk?{twQa!r2z9q3Xcg;TSvdaJ#xB^o=D*U zx>~VOh?qtZ$<@qksrut^a)wKe%=Mdiq3=^O=94b3OeynhH0>6j9?#k+?w$!$A)s)nmaP>b4Y1%WLa zTrHp<>~H0oo>jz0QSve2tNWPClwjkyDtZD;0SDW1?UQicJm%Q0TC#j956pJ<+o$^* z+UVe`@(d@B}$fHn6s;b)EZ%wcS*q<7|i2c@h_f5eda zu4{tUxSKcYdteG-5>SWPX>af2I^l~jLdSq*ves4n`T>;srFB)nGk)l$iGSEXH9T5p zHGULazBFxo{TP;swdF(liR;w%?)_@}Q*f~$W#Qb)wUKs#jWkB4#muEyAXG~Q_@-F^ zo%Cxn?9Jst^d&CRukTnjjW(u!h0fd7*EzLcgK0~vM@z@2UtqkcCQ#X6^m~Li(e@SO zoBsa8k7RWDHuvi^4%jGb;dedJJaFk4{WE*IkQ+yQ)P(4nYd2CZpon-&v#=foCyN6m zA9-_N# zlq2#p`o}9E9_0c~6_~f~t<~f+F{$j)$ph&n@xfKzY4gnBt*~@gFrpsK2@}`;k z07&Q}FnP6Z#|TJ|NCG0o0+#^Zqh1I9gc6ZwC&lQjkMw0AF~0iO^Lu>B7%kwWrExX| zXcZ<^>p%{Jj}+JZK+SjC8rGsBo!w5|7-6_Tbgln^MMZc3C-DiBMF!5Q*$tRhGd|rS zR%`^5)j1O11a)JJh!&;L8^kKMMO$7U@@v;9n4R*~*^FCkbKA;~l5w$D~+d%&bUGo84e`!Lde^{)u_fNPcB+(@cb-q_#meDmpYS-A<* z)gD)bd^v8$r6CZk!*L{Z3yN6A_B9jgtwChAft~U;C=zU5@3({A=|;w2xxWMUW&ML6 z(H?SV&FyQlc^BAe<#e~w)@wR=H#Tp#FurYimG9I&dCvQ;E=~PjFl9dcuyAsJN{hf` zjHu;Yszm?5nsVlN03>2_{%!~JgMA7^T`-S_FsbP)#>P2VH-G$xkpxfeHSRnDkn4EG z>phqsL#Qp9xm{E8Cv1XM222Gle|YCF5!@!%!)SFGpB#yge8x5NWgJ;5$26wz79|Ufs-= zGXEZ!Mr7oqTr2haxkK7!t2Bu|5m87@teZb4E^o4GS(`jQ?1|MRqY-?Bkm2|Y=ocTu zgv79FYX1okxvrj@8D97mZ345XO_T?|8Lhd{XGvtaC{CIrWdhc+EH=<3TgWqv z1eBVbo%gn{dU&8_&0qJhos0fEFs98F4g;JlU&nDPBJ31?h`hYfhqbNkp{2~Xsn~{Sr__1&`L1Pe;MSztBZTfg1_3P-;ieN^s zKy_pF)HaIX?g<0IiAZ@Jt^EuqxsEVb)b8Jt!DQ6P^R(UhCQGOEx19AyhUQbjNW=}s z-qU_~E<_O4&(7uKc9H88eW+~~xELlpPOSVksTzTYUo3kHT_aH6YQ7kkf!zRokHNrJ zt`(Rhx4wl^UIiyRmz%$}!%Ehml51OW?O(4tUyG}zHh2|1!!?=E(hL*>K+;M4D2cQwVQJ7pf+X+*O9@- znh4P*5XoK5+S*det*DHYgqtAnp1duoT)o~0xgHAO^sppq9_Hxb?0Co;Bg zVsfN22;PNNAK6cRcmGIj_@P?B-6-&)Pqrie=okGxD-&)DmVdL6Nv`Mee?Nkd_Onlx z2ipT65v}}mSR^cg>^f9b5t(~qAE>8q3JW zVFiELbtGZ!tBxIe1))H*`^%1e2w$fv!K=Ti`oocWdJOws6>8v5#6d>SV2qb7`tb4t_b zDGSateNFBRluD;q(w3G4jPUlSX#kOU^ElyCRSR-wVe@A5zh(+vib**KY#pL(!0LFA zy_ap}T&Ppk{>EG|QgH!k z9J$ptgyxX}NpWtJ(AKz49{x3pj6AIE2~qlLVtNHa)6f>S)XvTUFp6-uR9L(A{w<{g z-_;|PrH0Rqd9W19(t0+5$rEdh5pd|ABt-MQrNwu9mhyp5j(lS)-0Zc8)ZW`aH)Q?2 z^9IO#3pQ=y>RC>Ee`Mw5YZAPB0+mb&oL^h6n~7>WR^Cl?Yull|K7~+6gtz2kiU`#82;Y9qw*2d6 zkdg1kGQrLEaP@?m+YP2n#NF0a_h3?{%j@X$W`)^@Q-G^yYVWX*+JXM5IcCB+Z@Cc@ zahZ+9#B~!&-gL719N*kO!mo#OJH@1bq&7%45A9n~^18eo@$t`&xD8BI4Ke=yf?9&m zKJ|9fk~@H8bik&J1Y)=om*T8w&J*k&7>)7Of3Hd0?K<@^Ir{2f(MMo8{MpZb@>3I# zd;4ccXL+-!-uo~ql>J^J{;0b9lLP})%moj?a)ej`*gTDiG)o(H`!fjAo~pyEEW0?b)8i+=-3qi#RG33lI{9DP!$yoFTWW<+V7V|81D_urIeGiiK`stAtZ03DXsfG;!lnfxHVp(Kt`~Xfl7?LO3s{MzkWW}tLWw*7O zkCMpV>59nnF_3zwr9|V(CrM@Nbi!4Ym_J43TfoHMf=TIlZ3y_xFLB>7eC1TT_KPxo z?kD9i1*uuu7m&P)Get4cXTOBgTzp*qzCKW2VU*OQs0s0Fu=|SD_Pc*9!O=so$?eAS zucM0eM9VPi27*}kl`{FQYgy1F&ODb<1L|a9G2Jm>ZHsDdU)5BO|EE2=zVdryu9yDE zgGaeueCb$yj*mt}@M0xuYsbJ5eA|Ok&G*=z+9pz3Tb?-%rGRaI<*Ri(m{cn(qDg6%}Ke}A(~q!+TPBS5!wOgM=j=*fDL1Nz4%lhavQXUH97a{E#-oG z8aCf-af{TzuCj8|E6~ZqnlW)!XcY)~_@lGv6fR}=uz1>K067y(mNtx)fX?di-pPvG zwiHe;sRNP5b}u|kqrfG3!*KQSa}lZatu2ZpWdh?Z_O-OR_j&yj=G65~JHIE&A6xk@ zKqMc-Z3Xs1xLZIt*i0h7sSn57<@9I;{vy9hUJDSlo<;$tqQ)0q*gaA)axdwrYp9IP z;(aNKaP>bKxD3#S);L2*cU%GG?OU5`?qWX8nA2C&i$*JTbmmwq(VGWFZvrQ z7)E(V3}DEpeMI?aQZ4#h4nM3Qe@eB;Eef%7hgCMp1)_$Ct=r4_YN&s}_P;i9+nnDA ztu1-OwNMJp9fkiBA#OpXvUZH^H?J(!^g5i5;{-jLgzHXh(`0Z1m{ML=h-@Q zdNfaNx@sfSg=?~7g!97$pf&xg%NQ*3h-p}>mnkqAD7D9c?=8Dt}>$oG%VrgH3YU4n65fYF(NEq5E$IB9wt+dcgvWN3R@@m>B!q%=;X~JNZBdw4$)f3bERh8gGkEj8Fg#-;Jp~- z-tLGxg1j$?Q%k+|{ye_8EXHPm$OvH{g*k_P+XmzZv4nNbnN$Ur$)^*|X$xT5PbmcU zrBfJC#3kH`h!c- zsPRl*U+-_+HeDe#-+)uTt9N7{|C{|C)&YIi-wIw=p7rH7bGZNAvGUY_cY3^au3G84 zJznq3DJlf49QEn0^V8n%e?I&`nY?}rr{oih@#{W7`BJuJx$@Lx>Seb;rrFpV;RvYA za6iIo2?JLxUqzPyQV*BUlR^s_@Ch!G9u@JA06y(+qP2{P9jHj3;UWZq04(3q9x(Y> z&j~(ks$BMvFOc0hB(f7FqJNB5GPdv^DG`|Dpfl8}$FFhZzi@DC4OYK{WSGaqKH397 zH2W%?f#1H#Z^Gw$*|+&keJ}il-@fbN&NB9Y-^1n`al|K!1u*g%*(ng1nD$GVnf?_RNjeFBo@O{+HyQ5%3%*UaRrx9q6qe*!Af zGrYHWk|*|zEn8}f27&SVJ!N24nv?q*#`dwEXf>x`ymGJFlT`vJ8~RIuNMoqUQy>d9%e#ssp^(u(wULty~wNq<)3QO9&SR zIfGY5g3QE1*;@iCuwA255G*m^;uvSGMU% zdgF%v780uO+D1g`(J4zt=1p)qaa+giwD=)x1UopiiA@6RV!u4}_N_d=N}1~Ney!hB z7Db5wxAf1go73d41Czno`KkJ*T;KoRR+4W3Nx#lOCZQ21rJk}$vK%l*G5H{tw%DSa z*IUNXURlNvDg}@ZzVJ9#-Ik5*{d3muwKy1(UEQgA~XR&+HtB)_qiRd zai0CNW%Lf$DO7@r@HZ!nJF#-Y2x@x}wFAtnYwumHn%|8{sWW!l0j+eFFmf&OmhufM1d&x4oniHQ zVA}7>rcpiuOSbJFFfF8fAX2u@FHv|m3P^4<6tpP4bPz5>6D<{TU&wdV**P5Vr zL+A7TUA6b8#=IALhVS;&eqH^=o~d&Z%H;kMMqa$O`p(P!?{@3Ph7o4XzkkVzfzV> z>%YjubNgxkl!Y(B6u(*ODI17n#Oe)KPpSq+Mu>-^;X3bH-zJj34_3D9nD_=Ji1`*P z>f1c{M@w|q?{d9$!i4gB*Lj8MTp?F3{e)D}4D zL|6^*&Z#YyCk5I0SbLC84w{b#=qXTYJP~!Oc2C81QWnNaVNOG&;g%QsY_0s9j*3)< z=l3<2o-;6Mw?F;5ht*9<}4wzh& zcBB~RBGl|PW%^j=It4c5%FcOkvSXUhS(563$s2tAYopRF3>Ua0IYO9{ybzHAMdSR{ z+jp>D7>6&)Z8a>b@c)4pf=KGM+qOEzB^Wh1B1|7GB%mUEQbT~q>T(e>)jUN6Wd)9xq9~o1+g}f`0kfFh%&hBB+c#I?3!~i~J zT|dvRwzFE)hMw~ot?)TAvJscJteviS`kP>-$g=tnusDoYFIV`VQ$(=paj^CaDEGBs z-c*4w^A+2YR1QB^edD@-id0Ye2-wjamu>*0v~r^Mo6tSL)P=(c$7OgFLC89aJ%i0F zVH~5vCuR&26Ry*`uA%yG&o8h3g21O-Cn1VjGh!f8%DEQ!VFvbMCcrix&4LQerCYn% zo!>}wRr!DI0lQXR?qNi<4)f>puhR@S~R~AOid3q)0slugo=2q(Ab3b|5bJF z!Bt(^ofc~1q9{VSD2Jjb7sW6PAq>HU5QY#!XqqN8g9)K&nlKEe38pc@G=phEQ-ryC zfQ2lBOba1E$O6kk##jg;;OYq_1dY{QI%i1OMOMKD!R!1PB?DZhoTI!$z2h3ryMvIzZUZks;Rk|TUk6XbO1@E z^LjMTq`47Pf<`re#rKq1MX*6_(yM01O_}~T|Lu)b?T9vbn}r@OXG8V)JLV&&RG`Q! z-`~i0SlnAXl}L&#zD|&SO~6gEPHL)SzbstYpht#K~zYt!hcjsP@b_11A zS}=!;75xT_-Q}nEbe*{yRf|;jj!62uU2cVb6Ir#Jlm3mYTWo7Iv3hVLz|wH zs~!aEyXeP1S+bB7L;t_|s=516+5LWj<)XXm^#B3yZAxEU_SjJKSO?=!77Hlyw|)>4 z7e|q3%TE~=F;La}F^@U0T3}n%OMhRI1p|Wa4&xjyXs=~%ckN1{yXabjPapR0#23jo zbAZPPss)RNbf7#zQLp~9`&~PxUsexj8d0pi-rmA6vjy_A{@vgGB_B1Rjn7dudVjEJ z5!Wr)q{;83zn$=s4g7UA|Gll8!hXk=`Iof=LRKujD|g@13*KxtRkif{Omx6N6je<_ z%*DhjXzI-LH5Pu>GX0j&p#GiTeTN6EXz5@SD#njbq`w9_q`xd?d-u_j{VXWZ)B)}{ z!nljca-!diOLK4Xi(@o#Sii=qV(UBUYaFljYndxVEHBV#Ah(cy@yoOuMI>)ajN zk!*4}=Gtb~lXdRF)7O#IW2Zf>X10sGdNQKEG>Cbzy zrjS5|-LHF-x#uIOM(?a)A-I4|4m9s=mnj#rp_;tM?~w3CGfG^9rWXH*1}tVvF~?va z1h~rPLU9Q)g}QH3xU9aEO={tuDJI0Y{j*H}rxxiaPL{LDK{?&q$Hfaa=o)@2nuSBZ z-FxoLc9rb-6b4bey8nOa>*QSmXnY0sv9gYGIfV{Go<<||a}u%&-n4;63i-{wVHx_uklD)47(_}x=h z5C|GyZ}QoI^eNg7B%HAQop)*YPBt>Joc^dQPcu2k?&>dy8y_yk2M}ndT@_(1p;GvA zBJIl8KXEfz0{`t{TcP5f5OC!c^joawS`W(!wsrk5eM`qZC20ArsNb@?!o6O-k8K=k zKgd(emZE6Vq&HYg^lz?lrwS!(>bTuUnI2hel%lBp+=F%Zd|6K*smFep4w@q>gI{Aw z&z|M|dn{h^qc^|5eyPqR;uBz(){~VU!gmN{4 zV&bQ7aw($!-|Lw5+5Hp(9l~e)DEu@Vs@?aw2+1`FZM%08wz=&(wylZ02JFr&>QP)y z%h4xcU1&ETsfKT|;HS+O5IDp;|G0xg1lgcE_}r~`n%J@e%UP(95nE6+8u`scxBRbX zO@YQi=CYFK>L-g!mKKuixapr(ZAY?r#*l&CAt8MuA^p9jYY6s|j4PQN|LKP_-Deci zuPk5Z{|w5N?y<>ESbc{lcq)YQZXi*0en7!3M!MMQuR1Rgaccxc-W$UJVS&+a!LtwP zuN~e(QNVL$lYLYHRvPF!7*p-9(w`db=`W05U87&`v5jB8zx78<+uIkXf0gAv8hOc0 zXgnC^9?nibb^8EWHDkS#KKu4isQEvx|D}7EHLU;hQy**jvX-0gqbUF13D8+L{$r@B z)w_IWW3BsPmIM+$=8cTE*%L4HN+3G9U+C@+r4V%R1%J;?efD{XLickYl74&4hKA7M zxqrtci~il2%iiWRm(ND@@3@t3?(v~^))q)qMBiIxU7>P#Wy0l*QEaJCHgDOs$-T2O z8ihX516X6&$jsriQVXiFi1GrrDCwAS{bg+AGaD{w#`i1V=N_4R7iz-qm5to-pgj{& zXb;{R!&&4cw$xzvGYr-E*hX z)6wK)_fQtWHUohknB%VK&x9$PUv0n#v)E93=J3%278JkVPTwhm_2>MiMcN$;v=dF;CyGmTj%qk+x zPl^78`Bd}+2y`3`@lZiF}#*f{C142PJYvzR5(^6fF|}npsiwLncEJ z(|fyUF_WK&mG(7=!??-yy2QCmZ=%0sRul29@h;IqW-`d_LVCR^aSnI(MN62j?B6+@ z5=;CmCMeOr=R8<6mJ?vnf8$L9(IRFa(VsKxK(74^v6w_odlM!$Gq*_m3nmrOR3;PA zbY>D6x>dvziR9y_qWMf2`nQ{xpG)M_<%MV(lZ5DRIV~0aC#Ho&dL;dn=>T#yYhX5z zIE#6}M6Toy61qpM2J8Q1jUl4{O6nIaBj?M|y?H)N;!jBOqM0Ok(E?Js=s%OmA$RO^ zWO1Q8RUKtmO71p;i<}ICjD0@gHCFT=|0rvm{hRx(a9{r`bH zY^EG_iv9n6&h899;GE73m^`L~?Dh9#ThS_Vt^K=-Oe=95IaZ>(m^w$|PLiu=IeAs| z?>IdZ&F0JuqV?`q>lT8fzHs2zq)!?A2lA#2zTQK&G}{ZDdkMS2WinmC6Z zglWbH$e4y7lQIpDkTb#be&u9M$zPX|Hq8Qcr(rRPQ|KNx2^fA#>J+-)yvPS>#x8QF zME8td0hm6-G18~W^huH7ha^y8H~uLWetn1(YAU;x2;J8`OHKWVENY1Vj+ntGq)|iK zSs`>UO;j2lC6k)LDN?DhyMk0Iyoy_EOr_^f8J-}UngOvr8DUp&D|C2BCWw z`huxAB525NO<>w59JxhuH~X~-zs4t*h3-Qu?PfsZIt=NeYi3(WVl~7Oox&?r_=aIE zxz&(d*DdVE!8Z*VHMh)wKD;AzU+C!(wlmi639r+P`@*Y)*8@|jx+m-=Ai}~c}Th3!mcDYIq3JQRM-(H@&h%smla36g0It4XzHOE5nN(_W<+FC@1U zQ!jf4u+a>`X_Zv zrgp=GU*nI0k%_xT{LtW=;~8KE}Aa6haQS}NukSxR|$^g!VX5?3Ns)Jtu&Q3t^(6OXTQ~wuM#I~gk228wZd+~a-Fc9B(mNN z2<#2Quc*XE;WywWFkLXgvswJ;62)zi$ra{;t->w>Vw+hs)4=VLuVTDI_!UjvX)62e zGL=3IfbJ-{aIWNT4xTUUpe1{RT||6=u$?|E6kcVwB4IZL?=u5(PO-2D7acHLs##(N z)Us6g4el%h(@kY2KO*@GzArZm#z}=C9bYN@iu8O!*iOo*0@Fo&KvI_6Mb51;3kp6Z zyh?^YE&PfAtQCGk-(-YpM8ew4%w+Mx9YoSN zlXK~~@sh7FBqj*2bCij~FPSbT3GY(!WJBU|itsCTn+m#P=fcw@caR^a3wwx@8NzQE z+A~eXxwFic@R|*}A_6#ij`1EmI@kChPM;_9FR90TllNAUoh9ETUKa{G*m05YHUng_ z@G9uKuOd0EG?P5i*D9HOO>C|l&ErGjBu#11jNM8%OD26{{!3Zf^>oZjq}?4Kp8rOZ4)26!lwZldF{ z$+=9iPsFiO^rtepLN7lvlRUcOx#YWKuouGHoaw$4b~9kU6Lw*c)u+tOc*z#FlQ9Px z(%x5u_lTK6rn1{$&?TZHjFBPYSUHXwYI;7s^QzbjGLsF1@#WXY$lSskRB3p|f>K79 z1-(5|awLZ&^13*7kWL#V4&@Qbqs9JFlKL2!!n>&ASjmw9{XI_dZ|I%z!X9$V1YtX6 zOcZvJ5+{M_#7aC&7DsYOGgC~@p`)f|bQ0P$m_zokY?v;*!x3kgMJ{<}rsON&ER%DH z)7g@H7+-TT))X>V94n=n^TdxD@!@=#cT%kd!VY?Bp)4YK_-&CmoI@sFEcVVYdX|{} zKHa$#rnXl(=rYNVS{NM5C0_?uNcI{y=1OsRFLAmmqtlGl;z%A{w8l7>SXe6#HB$V# zj21L{9R7e=b(1(=Lc2GMBe~Rgi#VA>`CG+akci&~(~@?&X1nkk z{Jz618sVMd*f9phE}2{*RRx513BX)ggaeHCd~v*l0{6(|E8?XnTOzz>*D#_shvr2Wwgzl>mdsQ^)lyQ)JavG*CU*eZqGtXgU)=9ogf~uDs z4dSl`aX3&!J~u8nNj?{c8VS87m=fhZ1j?`FUE=C6oWbf~DK!DjI#$xEk|YV!sZ@+>!a+k4fi}6US-M zJ#i!lKi-!~Cxt%{UM(h{3$GCCVKdIfEzu0q{FqF-So=zN#St(8E z4LBnk=1A?7K2WmX$QXP@@?APDM7<{Y z26<|@u!9PY5PpLvM#?zi;_3AaGeSm*LwR)QXqk5~ipNOys+fVsisL18;y7`5FOD8B z^KMF?kTGZIOf-`{WcW!k>0-~xW)dV?r-(y&#Nt$Otbk6RCiZL5r)TWRf-_*}M7nFH z@GhM`OBRtFN}MhE4y&IzlD!};nk$Y5$^7%g{sBBNU*>l)SzvNGV_~7>E)w7($zIT< zb(0ybOC*PL&Xd(MCTB_OGWn7MmzzmR4ryH+aSODS8Ky3)WZq77R|{{`%r$1=md0x( z-@&2lgjWg5^@c?M1~bkjiER{zb8yfmnRK5fp=V@Fwuob;#M4%>7sSomWZq8t*)BQx zc^Mg99BO`_j4qQd2Es1k*UU5lSwwQ-T(Muyf%3(%ax&*0nO`9i3M7Z~S)dh)Be{&5 zBI6ulai7@VP2G#dp(Z?fz&MW@l)!|-mkg~^$zCHzDiizfGyIN-W9P_AAMEW|4I~H5Pn6)gR%&h5DHD=NFFY2F};93YLj_8A#z!A+?`*wi$jg{QHMB^Lz}P3 z{2QX^dPXK)bc(%CF~1>`ZgNhS9%O9D7@pYslnfIV$Ii3*ie_v|$?D?pUYwH2>wp&1!+gtw+g%( zayrOjHIj!X2Z|pB@$f4$>BJj@%p@1{!D9bIN*N+fd`gNRD)y@CN$Y0s((tehWB;1i zZ)DOMZrsA5MuzAo9TB0Y_g+(ZA37G5Dv$H>C3rFml|cOs9I9LZ(J z@#64a>N3IfJp4OR>;;LjNoG@kvnLz7%Mw$}riwu}RUALfaGqv*1HCXEroKJYV20#y zX+D`%kJz_6N z5-AXe1Js~U9Ld24MP^^f;NNGwk3KDisX-@^e?W4)wU*>=CbcxGR2<1AddkGH0%~?d z914=6%f(&;JyIc#Hep{W4wn+OC(J&N;Zc=gBDflMRiM{u%)EdoI3@WG)Bb76{(efY z%`oGw&UhbVxLzD>Vm4?Hhf5hx7sT;mqA4g&yib=jnf)b#qQ$rwN41H)AboNfre=5P z{C3G9cdFE3CROOy#Niy42iIkOgIabuC*n{Oo$%D`gRC>2nSCxEdoB*=P|gdn ze~ujS()0j9|6RtONY8qh&g-!+=#Xr2xRl5p2vgx6hTtoby&$7>kl5dY_Xf-43bh|1 z{0)^EDvNLlG5M-E+Dc*?CiaU9$nfG&D_uEU9Lb?kBgElS76v2D<`R{E-Sk4%5u?P9 zTB*%wvuP%+je(tq>A$g({iB$S6Gxi}%JJgZUR*yRqqFNoapE`~HA(CRNl%l-egXSV z5r=a~VN+oab(83uCTyp~>1Mr~Of@6JlsHo+J;<{pC*Q|qvn6+(B*jbis_2io;z%Bm zGSBoNGsS#yIH#E$pRu8H7K)R3WU58t*oUN*#b$qqa+Zkwi}+)y=>?yW=f#Ntow;0` zbax|G80WQ;=*>Q#fL>*M_zN<<@j;rhMjXk(X015nu0*U8$Ig=d*2DPhF5_r}@Cu8m zjlzGWGMi-VHB#Vav0uOl++uny@v&9ro%m^+nVcccw~JGS9Dj#6+KTy3u~!Z65=Y$C z^ME+yE_CF=)awdv%9k82U>xrer(7m35PQ{hW}!G5q#Z>tyM9Sh*(bbBL=?*+Qd7`CHTrRHe|sg;#dXarCjWPNN%byn;_$*Qk*zMV^5geV3&znB=lCia4K{dLoK=aJ@R-bseJ zA-S)bzUdP8I`eLEtez^~6vs=b)-7?O@GMzg9BM5l&6~|xhSWWAxD=1wH+?Tr`2eO# zU3kC~w$le;S$JpYo2WRHhe=E(J**BAW^$g~(JKxgB(J873(+5nBLTwTv2k8Ld0rfM zrvp#Lej&m4%($Aqd2Tl4gw_k#1tKepmu9k`H1(b29$b_4D4pd~m3TW_9IK$h1C5W; zs8@{ZaKRw4zn>r;3{(0y$U`K@tMKYjvDZMdc~zV`Nacr#{iAK1uT{K!8KTN=lG5r!jF;?u=G8V=eS21?RixaifVS+f+ zibp1zevxrHDPxZtCX2%bjH4;yZx2$=RB^1FfiVrn4c+wBbd&eOGbG=pW-}!}YN1PJ ziTw&pW{X2jF4c=8xy0ODvuVbg^D;U%^TpAiOZDQ^2h?VvxUY!XEE0z<5k`x};lOcH zy*POkeW}^F;l4~9KT2;chh0o#zCv;&7hWm$o9Lib;^aY`wptu3qgiXr-pw{^#i3?0 z$U1Rv9aUQ|jy5q=H^3a~9<|vhx$ihF*(8qS(b~=80MfjD`RDX~x- z4zQ#sGCoX%>=XM{N67XtwdtTC2ZT3KOJosh#`C3S{mJL#dgG%<$@SuJNdviF?A6jc z6=t)anpK*9@_ll>IMm8;t`dg}NCef!2g!yt;@D}r^AzlAMqr*claEt<^{2TnEVqIe|xZuTrZ9`kx^RA<_q*Tn8V(o$jc_T;)Qm}S2${i zij86h|A0xf|k$duFN2*xefIHk(!g=cd?eAQo@I)ZllR-;wMG zTFCas`^gpe#GyQT_P*G2v&jQ-@-TC#XZm@39~Os8abVQ+TKXX-j=M!rLYz3oBCFSI z8tB86IOSFq4`HhI8xH$eax9-vcp?s$Gh&{aeweO(CJr^z!slYI;Vk(crkLAA&P&OW zT;%V>(I6u%>v1~a`^D5OTb%j?FANk%FC8P{!xVFc9vmb&)XZQTY}`TvhlrEMsPRy7 zB$va!Dvr9Y9VU(i>Ce~1aW|g~7kfcQItom?~>rpOK)IKxb;2!)x(LHcl(Om5K+voj_{)EsfDqL_Ryj+I;@-0CrcT`4Pq~d%>{9+lBxy8egWa$ zWHyb&W{c^Cw5m;@s!%+bxb2;e(sv#9mx{%lH!e-VrBVI`1+441s@79NtS<+&7yO1mgp7sI`%lFOKaa zjKbnnKItMV4)39VV&X^+4Nr*uOVq6wrm;7uPD*m}3##)_@=ey!k0r;B(o;{w@fwEk zQ*p0*kl>j(=`M*s7kjN_ofj~rUu6uuG`WFw#&=@hoy=$buXM(bmy_$V#r{V$aG*Gn z<1)T+F6;C`;zS+J87xj#(?3JR;lqsnp=O`Mcz6}23Oy7*OmfVfd%R}m1-r@k#wD1K z5c>rjcBD97P0+n=yq7*4C646M|D(N2;FHAhHoP-g?721D6mjA#PM#|E3u($UaoA10)5U%J@ze})?>WZCOmX58 z)t?1Zo!cal*^=W&kmnfhC8FkvLrp9{=ZTXCN&oZ3i4SPif{YDOw@~ag5HE{hitlE% zy4d7u(%}-ZzZ<75&FHLYmWgBTfraJbGExoI~fDpCC4hM@D4L+BS?3OQ~Qa7^sglGzjD z@LtA1ulU=;gk1`z?!Uuj4<#qd80C+}p#ZgiB90Z{vZvy30TK92+Ne(CPtqAbe275E7W*HylJSk}IP5FpWE;IO$n;`dJlMF3pdJEK zWIGNSDmhWZIDFMicC*SDCJws`ldp+G?*8HM4AYVkVy}krFw(e|n0{Rx$)Qt58M_m& z(Pq=gcpj6n$7XECzK)DndFglCW-ysIBK$S6IGr9v+E7a zr%FzJKr){ujyI!E7spD-+B3wVR=Rwq*#D5zfLY>b5S}eg)Da4E#NmU4{akS*kD)YA z9CCL+=ZoVdbjkv8^b)OFDE7ag%8SI|!!&WRICcT=FA>L2GsKp{G^LY>T_(Bj0-d#7 z9BQU-SBS#_I%1`9DNR`=_FDJScJYkUlstP@9a*mu3@Wdz9vakvDJ zZxs6%vDqX}x~p`X#i0P@Y!UaKXA<0M_O-Zqn>bd^47WX_Gm3YJLk&!(JH?S4>bXmt zs=%EAak7o!kt_Btav3OJ9NSM;+XLgO4tk|PvcH=u6dJoxRwPbU5+wU%ej9HVOOCcs z;sMED5y>TzJ@?pLspPBptW0twmryt&j=RT*%f+Ea4pbqIy6euB;&37DI03uJ#06Dm z-pm-NHj`44SB*ICE+(B4M;mF=X>sx>F0O?s<~F<5Ne)%8KByNb+yih8Vy~6_c0rsv zLhXZMzl8zOWHwbyZ!H-nVYQh}G5PSaICP2Hw~Ld3z2tjw?>QWGO&rN3Dz3vWU}<=# z#|X++~L9mK<^~XWTSyA;aDhdkwqE_@=vJdQ5kj`kpwIPfP9_ml2~6 z#L-6f^_0rFl-oH|CZJcL~X@zZ0;N%wr% z6LI_^t$Hf<3+c{h#vikEd2W1`dHRJoxtmP=QrvgOC46zhT?ESd-|39+HR7~vajb!U z7%28@>E~C(@k**c$ZXD0<-ulCMFJXP>@Lv`6^8;eF7B--5sVUt-6`_uj18l3j5ykiK2{v7!9C-|Nq5P8yxF@) zBPW=Cl$F#()2kUZlVGZIn|_`wIpQ7=m?DmSfj(92yE_BZOfO&*OgH|F7S1rPBrnZ` zDYApoXGxClBSvP6y&xuY#32__bH&jn4mD5g|C-R8FHYBAMJiLAsax?ZF1}z1ZJhNWK?)?#;3iapE|sx766(Ybt{| zQaefKh-ANjiML!FzsT6AkjZT_V5Q_xkPbf~j<%4%tHi!@N440if@{PP_w>#waV$s> zofb!%83MK9_+hwC?78Jiy|H^fvO(-Sw_Y&&&zKd08J#*biBpH@@fKtEbZwhB6rg7> zi^GT93SaD>p-(!D_Y(=%jGKtA>t=tLA>3(PKw7vVPSn!IE^+KAb?$~;zY&Z#C5KvR z)-B1M42L_Cqi&As5k~?pKF`j zs5_K1L~K!T$yC-g5mB|f!KTLAOJ)r)YIO>*)!^M6rAvMCdg-L#-IB|v|UpIE| zkc|>Y+vvE_;!q>uHAbA=OLfMI{oUA%6Nk$fx8q?quxRZB;g^l%ds&29n7SsJwR@;? zvN)bkP)`x}ecD38hbiSZjNoaK{YrM7E{>JsvKcbzAi2#n*}bhiOB@d1z1iYO9$`O6 z?7J%%bH#~PX196b)Decsd~rA*|1A*5&XH&qij$w=gGJ(aAvIoX?4IUYB2JYP;7i4^ z27I$j9CfcDEjOE!_-}>St0ojyn*BwBeU&&~>=M4|pHb&E#wA3^TH`WK57rrffbZ9f zLvAUxL7a5`xzYFtfxk)Y1sU?2#eNm_*<$SO*K8Gs3yG<1rn{$^wu`-LdT56@*~~!M z3A-reP`iZfB&L8YLd|qyuGrtt^p-D9xJRh>h-2<7v%u`#a4t0a4@p2p;-p&?>=UQz z@mjGs_9@YOKpd}SsFsNRMtY@G9IBv4%fyM3B)%hN?{2`9oBdIGu0q_m8;?}NwCZ;> z@Py<<8?#82mi}xrdCp#mN(d(M=df{g$TRk{oJbg1960&ys<9#PM=MgaUc^2je$;Z5lrQ!!&_gM+KTZ=L ziv8W_kB!|UX-~v4H&UL8J$G01ndyZz_qjOQNNm0^eIIfCQXD@?H-0Df-R-Qb|C3Jo zA@|I4wm9Nm;2&t5L$-NE?46@~2Z`fuVjXPyc}DjTarhvU?@)29fcCy>`umhKOzgi; zHhWDRYNGzbO|PSz5yo!8HBuaPk6gYkesqaY8YND+Q@+t+?*i=_BaWS;kH(sQo@0$O z`!n?8c(eJCewiTlFERa26vrFz;UuxwO2|ykF!h-t4!H-4r<#7A<4zO%wG5Z(;#eKy zdxki9h&gwrINDrA<~RF`)ntCNZ=pVO#EB2+i@9cVmbTB!Fm;=6_IvT$0&)B>bz3M- z9%5;*$ZXu(nv2E$ZsKZ*v3vArsn|P(&zHdri`#VXa^e4E{#aoa)wE=#*l%PmS!KGr z-M3mCYNhlw;)Kh^Yhg<4{D`zKyvs4yo3(pGXoEPMkM%~e-@^FYB#t*TlWZ2pT>G}b zlz8JalD=f`Q&vRV%)EpOY!`o9MwZ(l4wuulJH?6fxPF&7R!S-kh$A_0t~h>@VVp1a zs%h*VaiZyS(!MxZO~VVt(Pl!VNF3fxm+TXJHH3b#*#`)h1L9B~`<96PTJ|j!$BG!z zWv07!9WicWn3anY?i92_+`BDrNIEp*Twal$<^*dtCJrkH!;sC$3!zSuuQLmrsTQS3djS4FN2o6Q~uP*j{c zNR4A=?}8*D?yDold(Gwu!JHC@%gOH##W8n7`>}C7dEtpT-bOG#g2hedEt?ch)nNf-kc{Ubc!IXMheAC*7ISE8@f%^g-fKBMuxa z_Uozq5OJS-3u~y^G}D8xiladybC}r|FsHv}+(b$lE{>Hm35|eh)Lnvcq~u5rzJJ}! z8|kZ2X6|17A1(7Q8Ixno+&xx4RvdP>J;sTn?h_*8#s2%m{RDBSg)W&WPSh~|CyB%E zvF^#nxzuusIDUbaOcnRKE}SL~1>oso?^E`j0n?JN84feeq>VH(OB}AC+h&XX^dp+$ zlzTmYt~laeGMxug%vCynzT{{VxpaZp|EQ3JZ(PMFTqI7Mp*D-f$s+{j5^--`ISC)8 z$lK)7Ws>~@D!kmdkYZNIYxMH z3-2>bH<-11JZ_`0yH2x7?C)pXZ5H?antE-4od?);tMFe*E8Aq@oh1sk8@r1}JB(`y zrk!HH3Y%Txc>3u~apE&ctGq`!6o8v-P2K}V!sp*mx*JgxbKKK=@xM1;=Yq4oC=sKbXAb-C5PNM zXHJOYZTP85oVY}IREz!Nw4?@R*RP0_Q9`U#N%%k_j zUJWVuzBuMyZhv6*0qi{(citnu!)9L3u2C~NTtK!L`#Cf#VK(lpt=ISh10p33wK57H zn*AQ!`PjHApKLFVH_;VOVGi3t3_g>b_?%(*T=G>y@P)983cQp>sF?u&PV9eyKeGNU zo#B&}VWq!T235yh(42k#XF;Y&lk( zxI~&ACr-Jwz<9COh6g5yL+<6OiDKV99XLrGcG-HeINFFlMI5W33#N+WXW4g}IB|)F zOgEcfQ->L*e?qj*6!)HEyw4Ixa;V#EvA-YtIWP^p!62C{Ioiy?n`b5;F(J(t$I8AS z)r-9bW`l)hvm5^{GOi^$78@53=u6E00x_}F_#y*lnKNsD)Iw19nZJSzi1-t;r0BD(G|w-K3k>Oa|7svIMkd+!WT!}BN5f&XcMEk zM(ppW%u~khGUsV=-zn^CjonmNC-&T(f_iZ>K;0U|sbiFLA!9@Q2E|eLshuWq>>zD# z5hrT!TbnrP9+|%^?saDd?c!7gz1t!7-NPo=#IXW0?{#tCDaz~=N8LNfH^d?L%te>k zySJ&k#or#J{x?nkfwz21fGh0 zw?KU+j=Q&Bp8wmmVee-T{;N0m^_ic){T4sJ$3f^sed}HKgW7NX`ILWpYs%!wZ@$H^ zrEdAhUu>KFm;C+3TkP_SfBxx@|L(1~woMt4Jv#7Wz~rnM1Gm1ted|veU;g*MSsutg z^Va6e`G5T{Sy{9HCNN-e!GP@I0p94c0Rt)ryjnA0K;3`=!2ts<56HScU_keP0fFoR z1=$0Nv$M*w2b{>xx}2SLJ$pcRc6Lwp|IPdj2xtGn=y1;aqkFUeU?#uEGV4k9_h!B~ z>qYj!nQy!|EBAYUFmv", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "1": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "2": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32000": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32001": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32002": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32003": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32004": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32005": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32006": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [ + "", + "", + "", + "", + "" + ], + "bos_token": "", + "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}", + "clean_up_tokenization_spaces": false, + "eos_token": "", + "legacy": false, + "mask_token": "", + "model_max_length": 1000000000000000019884624838656, + "pad_token": "", + "padding_side": "right", + "sp_model_kwargs": {}, + "spaces_between_special_tokens": false, + "tokenizer_class": "LlamaTokenizer", + "unk_token": "", + "use_default_system_prompt": false +} diff --git a/tokenizers/mistral/added_tokens.json b/tokenizers/mistral/added_tokens.json new file mode 100644 index 0000000..4a9d3f4 --- /dev/null +++ b/tokenizers/mistral/added_tokens.json @@ -0,0 +1,9 @@ +{ + "": 32005, + "": 32006, + "": 32001, + "": 32000, + "": 32003, + "": 32004, + "": 32002 +} diff --git a/tokenizers/mistral/special_tokens_map.json b/tokenizers/mistral/special_tokens_map.json new file mode 100644 index 0000000..2e4806a --- /dev/null +++ b/tokenizers/mistral/special_tokens_map.json @@ -0,0 +1,74 @@ +{ + "additional_special_tokens": [ + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } + ], + "bos_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "mask_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "unk_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenizers/mistral/tokenizer.model b/tokenizers/mistral/tokenizer.model new file mode 100644 index 0000000000000000000000000000000000000000..85c0803f3d614c4324dcc494a36cab796c77759f GIT binary patch literal 493443 zcma&P37BN(Ro;KW#z14)&0;WnSr*o48a))+zvAvJ^$LI|lSgb+eVg%CmrN&dh0y!T%D zx%{_&=9%Zb=iAOX-}%1#?X~c{h0nVGVCS)yy&&Q5g`c_b+=XY|zgMDXEnK(o?E7zj z20nk`Irq08Z@0_v=PZMO?w<^+T%#m9!Hw?IMTGok)}P4H0^PuX^$gK zdmL%n<4DsUN1FCH(zM5srag`{?QwV09(OnGad*=mcQ@^EcheqsH|=qE(;jy>?QwV0 z9(OnGad*=mcQ@^EcheqsH|=qE(;jy>?Qu`j9``iuaZl48_cZNsPtzXvH0^Ou(;oLU z?Qu`j9``iuaZl48_cZNsPtzXvH0^Ou(;oLU?Qw6@9``owac|Qe_crZuZ_^(4Htlh5 z(;oLW?Qw6@9``owac|Qe_crZuZ_^(4Htlh5(;oLW?QyhekE2a{9Bta;Xwx1?oAx-` zw8zn=J&rc*akOcVqfL7pZQA2#(;i2g_Bh(K$I+%ejyCOaU(+7<#YIM%esv8FwaHSKY%X^&$~dmL-p<5<%k$C~yy*0jg5rag`|?Xldn$8ysi%T0SM zH|??9w8wJO9?MO8EH~}3+_cAX(;mxBdn`BYvD~!Ba?>8mO?xai?Qy(mkK;{y9BH0`m{w8u)*9xF|ItTgR$qG^v4O?#YZ z+T%pi9w(aiIMKAniKabHH0^PsX^#_4dz@(6<3!UQCz|#+(X_{jraewPV~^_=p7ZoO z-^RZe9C~c$*B8EU;kkyN&iuTEX9M@Oo>DA)?!ssGcOcUsd%KW%vEV3Oz&QAuJ;)-+ zfnv{s+!Z(ZWEp!8u=q)3*i)2NkX;qi4sxKp(@(NuBN6u_%g!ekzIx$>pDyC3)7C*5 zD(_)c&}BgG+*Q!GRQxr-DENKFf0s!KFtz{(kngTyuLCB|Wef5bOiIS0iZTTJN(D9m z(}>hl(%-K5tAN=j7Zw(t)!(@Y{fAY+05Gq>HsCKJ^;Q_|DcNa989h75t|b1PAp6qe zxlff=-2^N?71@X7zvw9zz&;BrMXkNCLi;N1DOE+XXQkAYmN2^5plI8v?D4Zq)>+L9u0KUb!NW*V+Qf2tzIfsJpWfD?-Q4Znk^ZSj-&I9?D#$en^~+B!+`G_jr2YSd&YV`d zAEx-#$gi)u|Fs~)xqrRV_^VGug z7Csxf_BW#w{M|@)pl0)XRkF7LW^UY3{?id>w-5XG-L#@Y{~*|1^)!DN{*k%r zr2e?B9C_rQRD!I973YytcVD*q(@1lm&gRc5Aszz!x%txRFDm|Vz+VP`p!k0ke&yQG-N%1cT_TWHP{=#5yiu|G~Q@PU%oODk~zt{z+ioPV+yYVG^VX*tk@k^_u zD}XPn!0Q2DUP*Kz1pf+W8{P9)M*1zazOSm1oB@2b#oLuktt#L_z>6Zm?YBc-U-4fI zc(D^i7kER&2`~AYVE4CFg)a%Z8(*6nD^o@Xcj)TEmo9w4UvGGBX&yn`Kn^!oiVWj3 za7!tqw7<0qT8l5$Z7yqDL5D4lEP8uY?2ZE35otFP+!2uu)G+R>B1L1nt3tiU2|MC^ z<1=-)lPITqsz}i#-CG&vNrH|#MOgB_h_+VtX)mp!Jw~*zjm(C!_x(Xe5f22}S0D1S zAfu0dd6n=i{L~3$;H4<)U{@Y}EaLPphF2|HhNy+(RYTa{jQ)2e!uFyoJK=coae1&Z z=uUj>9;#qB8p+8@ghwgs6_xfeAbF)_eu98gRjeBUr=9sl+3QRMZ2NP6wo1OQD(X1e znvC+Qpbw&HpNnh{-30r5khf`P>O#cm@9Tr}u<5AUS4RPZgYfJ}DluL`tk*>1fxZN< ztxU0DANaaTkjs+sb&*{!jQ08}WKTWQ8!8l|(>GekFw2{wq~RcHJFB9_7q=VbhH$G@ zz^cSvi}b^&fc2oeyK0X;)6x1jB5|1RVvt>#XS0kgY5P`7>_;6#PV-pA=tqTa1$#pS zgl)5<2ib8Z(J2l>{C#`Su(I02q_yeESFxEjP;sxsC!_+k)QJM>dD{cwtYU+_y*iUDD@}4u}u#H-z%tRk?a@K)-cA zNxreN*DDDDP|6_spKq!%&;TF@5ppqx*6*l76fXzqqZY>d=Ae5T_~y_S;Kk}_za=Eo zAU%h4Kp5k1R!Cp>97JaQ80LJdX?bXFdNsW5+k!ukPv)lk;i}&r{7rdx4((aiT36l5 z->M3^KtsrDv?W7-M-{cJHj@J-L73n>O^4A+I(o8ED0BU-M2EC8L)GkeRmOZZK>$9k zyQ-J(w#d6UTMli}HoNkp?}=CkkU3;n62+45EmMdOO-46ST=KWCF5I$k!(ad8{B@2$ zLS;!|w|7>eo|J_^Dl9DhcPf!q^AN;>-`846nq5ZLY z2?5_12@W5I4|#au#=kDw|1GB;p7!+x&c8qMzY?RZA1FnYY^DcM4L?{lPsW?{!8zFa zdVKOjmA)6{bs&`5in-+vhrrY~9a#==4eqI#j;+@&JyH&h-Mgy5AE_c8(BN~B6@C*z zKN_*ZcT7H@snU;C3AR-Rh@6J%EPg!5HHKU{q)U!*$=|EAVPQaNz@)jTp8F@Nh#I%% zAOiaO=Ko{}!dydqK-8#GygTS#@S$ByS0AePMCySW8zc#OTk2bX%6bo^H#F)oUY39U zv~!J$Hr>CdCiOE{7Zw*jm#W<7)8ni4bWc>;w&4N+dkSy z1{Uf+p)T{6>Z*zvQ4Utv4j!2Na?pdz>iM7%L!Dsq4}yFPvyB|G>?~vc^D7mM5l#-Y zh$pFlFKEx#8H-5E->C5RAilw1()Ghp|7lebmpxd`VU+%(0o-p^!QIv9i*pzR_sc27 zKMP5zMn>DY!5};>`K>5pwg~!|Ob^0geI`hryE|%Av#Nj{6v;ujzJ{;* zBlzD`v4>GRK#LO-*rzL`8D|bEeLIG`|F$x}zUlzv(Pu6B{VG~NtO9h7vHtY$B3?K= zB#zM4H!}HyU~7_@n;fdL{(Y3tkBL$aAbKxW*Z#1|bEpadQTEC?Qv8SNbJE}TJS2xx zE+2U?_D7K?tw|OJkf-O<|1km{j=5kCAj>UP{U29qs&qo-4*00-`QEd5_15EoJ)fD&)gl<-fh zc*CydUO5#xP^-qrk?B1LX_?mWoWt|gS`J%<+$35fiM_D4PyH8 z=T#1S8hwBW>Bso^zgMYTy?-qPz~IS>cxj*dH1b2a{fG@s!UMk2g~>N*b@kaH3N4XbQh@4J%K>>6pb9SlmI)cdk3K76 z4mI^jO%LKDVR}0fL8B$(L`n6WNH7>Md`O{@q{p~2hx9OUSboC`I4)}7;a2k5kqr&| z10)65#_N&XVR&AOkOtHDMYsAnk!lcKRSIybfffpKAZiX&X@!HTL2B~3Rm}Kira&aU ztWFXVZ8k!i!u(h^SqOPG?a?11!YB)iSqG59cD347nC{2o$LB?@BKjk;A&7!v6kP{s z5MDSSQam@}T*R6wq+O4-b2&E{;f67;C;+O(7@l7jY1E)o$Y3)j%uTOHHJQE=uJ*i$ z-jnMp-e&l(e-Y+$c#w3D9drWzlBmn2Ps*&m!M9@|kCN&s@*X95$ zt`4$*bQ0M%`9j-Cixep|k@}jXKoV)+ANm4hl`eR_@rx?F+SLFk1v|_59D%IWhL(v7 zU^I+amUuxFx2!%Yg{HVeH&UG0Jq^|hXlE8v$1jdZZ(?qlLXy!=Hx^pVzm+dl0qOGA z<>SehSR;+NQ)ml5*oo;am@IY=q;BDeTV8{&BGL>c+Y2r9fpWizU&9>(jME<;4}wQ8XpcdJwWMUTitN*FN;v>ku}g1V5MX1Q==Ogj29qhdnKk6 z$(M&oGKFK5^hb*n24^;t_N5*RIS40A>ucnZ&H~LzsFk3_I!tBjxL|+-Au%M>d zP70!uG_}edfWYg_r3z40p8V*mLL2%m2J0z+Qg)SH?f@@T7Y^8p5&Bn0&gWK$Z?dbF zWO9Qn3rI>}M%Il*(^ix)IM5T<%Zd$(MH|P!6yKHdWgNu&hFSA$f6>+?A}5 zmUU|oD+*vTx%PH7mI5GYVtk7Bu$WqdE`PCg2+}2ru6i7h_@tucu^J0dek_#Br*N$hBcUF3A=xYZe z9iLAMgC-i|5FDZaQ1X-7lDn%)v7{6r%Q4jEbPiMoTI?w5h+VnjkqGHYq!ikTp3Z-O zL=K8gfuauDjdh;8que(#f=nTu=zJ_n0bKz%Gp{N@QaILQ@39QBOA75o`!Tqx;hF}B z1<-ly(GMl}I-M4DQfQ~!RzuH$*n%|;*{=?JZJaMS8kx{*hhKo4FqX^m8g?}{E`W&1 zh?C;(i#+KhaO@OdN!Bz5bPyw2jT!*XaW{@Sywq7lHA5rTg;<`>9k8YDqAX$3<7s!G;I2Us-b|kbW72ZK2X73`F{$e zth47R%j8K(2kBN!=IU);7O_^&kO0y_j5mpzgDJMFk9+~`@5tU}xi$InO3JRs%P9;? zv#vd@97yWe#Vt(tW0fQgyLI=q-<$$eL`+7YF$nWT%@PZvgKh8kELC~$X;~-*qT05c z9@0WHVU}At=!#T37O}z=A#Lnf9tAngb}aA~4z1=Q%T-xV=if_NKt#Mg7M4u!X}7n4 zb|zZ`wX5WKB-`qFYdAIJyLDNL=0I|Z!BYWkVNPPFWX1N_ijkYa^dr~4oiqlQV^6gJ zxVY&jV(xPyGQ1SyWYgXOfW|E;_v?@m;3|ocS@K|nt;eoT3Xtiwf}J~ntpa%!;PV$2 z$YFo8Y*@)dRpINge42ulHm01=rDrf7*B7{OXn&3BNlr!`Xs(<>lE_^Zw6(e98JeSY z8Bipzh-i$%NCIv7FGo)RM7O>>D+8PuZ%JMmA%n{?egL@BW%#AeFo%J=Xy96yRJWC! zs-kr-#>tKp2K9mx(RG6;jBg)P(gJ|&JIX#eUCF*3yTvJ(h+F;Gj08~B6%DN*E&N`b z%{UXevo;j70FVu^8y~J5OuD60q$|mD6m>ST?yg3A1|Y*Anx^TSh_?@3RaH4NvTh74QdlIsD2;PyG(6bS7qT$GGxT9OXUnpZ znL?7_aP0a)TRSapkOu4;*)5NCoUcUDkT(UPybjytkmSFm&*aN zMA#1+>0&?nwZV?|kU~?T{q;~G2ifQ>iE1Ijp_1;SNcVMB=>D$8Qz_Vbc|;1N1dT%q zNV~}%D0y9lmZ-=SM6@ThSz_|!aZ*4c>Oo8?Utg7je)H3KB zg0KCm6jCLPh#h{jk~ONJ@PKDM{-g;W@n)a95@Ve3fOcoN+Vg-V=~gli zV_6I+-F7uk%K@&j%W+nx0FzVrLb7T(wF{6!lW4G|qdz&oMu-iK0$6o+J$2v7niFab zI)z5To>pKXU8}2V#2}a4--#0#>s6+k(bWJEBu1aP>A|{MBs7AsB!G)2y(&#V#{a-C zSy=Se#54a>G662jAD*Jd7YA{6b|Zv5MdhR>-*79bAT4iKzWfa01Cv|~G4OkJ9tO$d39cz}b0AqlW7` zFj)Z0O5gR7qpiyC*vgH*DTO03wTPWVJF{*Kt$@_xRt)lz?aIzCc?rdWB#xXDk_`4W zNG!~L@&=^bsjTQOECrJp{k%qu27RVaB&uV26q^hxDLjU#;YMuU1F;digOL_w1?kJQ z`7PD4?l; zuf2|BPliHN;6<#Lf-Fjm?{c8^+X_<@b?{n9`t?=p^;o?E5__#zZEk_7(B8#ZdUS&9uAu5chpU!)^UdF09x8D?w&9CKYOQ7I4|`)dsT zGHA=r@d9;>V2bEI7G5znBG0R1N&q>VJ?%u#fC$cFr|kN+$n!uuI4x)e>}gV+Iv@@& zWYE^`y!t7J^ktN>L;9?gfr>ibIi#O~{Ap%-8fRCMw^u^5Vow<@F!{0@8S{G(#UnHo zml-g`N5~vt1^PSG{TNvGRo;mzVAvSU(lN%Y#+Z?El5dFgvA9ZhEuhoy?Z^3|6ow3Y z^?ei!jT98D?zN7|`3JS0F$`Qw=TQns3Geyx3{=`U)=Iyzl8DbceL@TD;8+ezfmW4K zSq89mFQ}~=9e7eZuEW%<3nQ()Ai%7Q=@WIyH${njk z0n=v67z5DoCfa8KiD0x1b)qv6M(W0i@psrrPsm6ugX5m$0*EzGtPI-KvgvsPn5>Vl zPzt1VK7~~akUfQSKhYh`94;fsNx#{q-npoyq!!54x53gK$x|3sbzAaej*TB3Col&| z(^#My8*tc>ctxEo_6cS{OUr_brp4b<<5)Xwfzj+hdnzfkMY!0DDQX78`ds@4%5>Bi z&Wxd*E&U{YP*H|Sf*CXhxt_Jyo zzi}OOf(k1@r9%5U={f_Wz`FJ!lW&cZwGh@ad59TC3TZ7gJjy_ndf7)B0Jib|fMUl$ z=gq2}8bcktF87}SQEZ^^`nOeeyHS2tT5u?(%K=7J9*C$qOO`<*AO~2`%XBP@j!k=v z15M%sZy3%T(%ojfoqW4f@6bE8Ot0xmQwnXR!Z|<`cm6y@IMRW37RPm@+u3X`9BKIi znn1dy=~^Uj^0#WuV=1czMrg19n?6uak(p+VjFL<)TWl_wHr zKo?B!7}JdJh=O7%w*~Fm=&9pQ9Tbb<8MJ&QPLt&Tw0)8V^+l#}o%o`z4j0NZ! zG&STc_2hdZ;Jn0aL8G9)psCrrnn7pKw)Yy+=SEuS!!yQZJS8toUwx0f$#g$<8ou|L zwXS{lCM{T$be3kDLZT&OfXFa&NUV`Ux?CMB8-viG8*7SAgFVk^!*~WnAtuRMw))$S zr|Tar)9X4YmqIJj`xPkz*p|asZpgvNqpO5tNL!sn9+U!7?1NHpX1W^}uijZji-nC= z4fnK4mjV&})tX`dF5;->9iV;4=0V&R%sFN`Q!l23DiGZHeZ$aBl zdymIK*%SuiYxs~sTKv;7@&{71$IN3#i{%JNyX;Q6;6HQB{*F4VEw2n39XSRcT^y)vI?iJ3R5vmUDj+S>uGW}mKxFcvx8(0u>2|dDf^sk@?LgyQ zNY~P4>_TQ>O24A+HHUVdv0gW>qptdU1eS37`-(hs!ri!r@O@RLdvT<`1+MCw!Ep*y zLR&opAhEEh+i_&=y~P4axz?@6Zht1Zjh< zF|;I)%n>;zLKWu*EIIu_F^fs zhHI*aG0?AW=ocn---bt$zz~Ruo<*#j_y49u?XmDKd1%m^n;-~@Dxn?%X}xYI*7wOM9smLBNpw( zK(dd0=fd=xbV*@WM;pk;k{^$fnNI*(0CGR9V`C;yuTT&qIrTTxZ2Ea z`7ib30|u}lt8rWB?^P;n$HCSX*mdiPbo9z8Ov;EA>I|C9n5bdt+<;Fv?P?4_zAoe7 z0@8+=ekj}>T#j1Gqe;n6L`AS>8a@vqQ-)CnBAtlC8Eg9PRZuGQ- zUVv@Hht#>vfQWxPf7IE#Ba4&E)EJ~YfOVR03X+oX5y=c#PG&}sJIM+zG=_A}=hgCx z8q(!a5YTdL`q=qv94oo>cU2+^lyPnzN(V}4i z0~!14n&i&_iGH^sWX; zGiYh(Lnl8M`E@VFxVHtO{ECF9s{h@rC75Ex+c1e z^$B}KV`qas<~4dvVc(KbS&eVKB=SY88pIsc|8YM0P2z~#$XJrAy46WjI8mb zSsjm29m*shh@u#7Y5k-HDqX0j{wW2!=C$OZwfGE1GCvm`r@sS0=v@$)q?%0Z>NgVN*z}+0zlv%pr%^t;``U&sMDHjvW-o=^^FY zUO1l_;m^k?Hu=S}pjf^Ils(6G93V>_%6c#<(0*sYbS-6-v zT3i!LK2jBXGWIoq#L}u{3Tb_+rQHlpHRKt1l9bmhRu6m;Zw!l?zDb){1+=8Q`aV>e z0m%ClBdg@2l|~G8)il7W!V44&X`?;$A=Gm~X9eXT$!lw%^Vwqng~aupqE2y!@7~PO z7z*NP$;VuNyz0;bIA?9Gry%Ujr#2>T84w9>)KKl2c;q*Bzzw&eO&#zcGS5JW5Vt9l zk4I{4XSE=eZ{O#3Qy{VWs*(&^*4WW0$s9nX>LyAEFm}Xlc%ndRKiOAsE zJG2ti0#MMkxA_uj3M4;`fUOYN+QtmXk@4y{89H_#AHbM2*HM=cYzC6V^0cI=yn{Vk ziUgv-MhtpV02b3yeg;Wd9I992%t2&uN%YwC-T~fOm_E>4X9g`(?8lnTCnLdumd;wx z1o+NE3UHg))+wdT@p^jHB(H-mskw}SNWfuktu7QGWfJE`X9nKLNuG+LV&d6?E;~1W zM^gtf=A$fVv^&g_Z*Fpx3K&C5?47tq;pl6dswHSQQI-kmCzGocUX2?KEilUr}H}1BhhJ&!IqG5S8P_mpnPurX`l%pBTmOs-lrm9tEXCP0ZOx} zkhF&TGEN3WloKb=4AM<}qg+23*Kt>SnlJz@09C6g^_oT*k;DHu&iaIT09UOHAMwFB1QSKj9*$id777+XI#-@7; zC_e|d3^HUe!twEx9NO}*kFJIQCeNX!vyiGpc5o>RU^(~VEYPn+7HrK?7&NuB@_4;* zpTcp8%=FZ9GDpFP>*Ypv4#(y0#U|Al?EF`Co1_5R4jR17YNQ+L$v>>>mhC_9542z* z^^H36U{be0GRPM!JdaY<5bEW#9JY(%v=tfy?Oa@;i9uum2gNVMAa!QIxf60sezj7p z?2=kF18<27bTBOV^utI{Bh_CHLdrvjC=`++7=g=s0cs$+5IcjmaB+n{`A3mtRc8kv zZ83&n+L8j7H6q8#faKVZDPvxT#Zt%^h)`?3u2wkafhSA`oJgnetc1Wod`7>BoBWdyMDOFYAbs(SD%&g1 z_Q9(%*5Q;4j1Y|WGPQP{O_}`CpQT`bUg)*` zEze-tDd1@Gnb&E~#H!C2EYH(txPZ3Ev3NEE+D}-WEH_4y-wYL*JWePd;PSDo+CCUl za#}XVYar*w$d-bEt_F=x)PlxvC^|;Yh3URVIx}d@cp<(-$v>;AXGwKyG~jl-90}wS zV*fY;%HLhBj>)`B$7#n#IPF`mLr;*6Qm&WJoRWGf=8(gO^||=x;mYSxqg9fWmO@&xp>J4ZFiZlB*sO=5uPKV<0lH+Z@v{5M|OE>5FJESdDS%zo@e3o1u?&3x@OAR5hnSN?Sfl zX^_g2P7Sqk5Lxo03GIbgV{cK^GPGy69Lv;d@t_ia+s+d6)lIg(cK zDaHX;KL|aBw$mJwOFA01rdOIds9efR{#8Ug?}fb<2otcJ%}|x<0@|b;J&BdJ%+XoE zkB-IBX=1B=avnR79mn`K)qzwb#R9Pul49!Q_d=hZ23RdR$i7slWFgO|0`WXOwojmg(W&P{Wi9v33hAp7{t~^N$BH~cX_nGM} zRdEhYxr4RpY#!LY!7(ig10wSUTFuPllZPp3R*BE9UA)uLfCxAVMyo-OM4B%gF_l{kK)Qbgno@3oMI%?Akji z7*G~Nb0FIchE05VQ#D#A=KL!DGOn?PLlj#_-_R6r=HO-48DsFB{JzWjXso{is@lUF zQir`d_V+S?Ti*TmQz{74#3B5#qp=hqBO9<)fowC!iglXg-$g;3Ka+-ly4PBd(}F21 z%(HR<4I!;^c}pjMhOs+{Gg&Yw-nVU`vDSsyt)2nM$E=RVkqdBMV~#+R=@@Jg+3!}zRKTE=63y!9>@d-Ol0U4XU-ZG077U7gjBYor z!>S<-46>4{R9g;m&GU(&U?4(v^}u6c`ij;WLu9Yl(pr#~bf;{iFvy{& zpj>BRDD}%NTw3`%qhchJ%?N`fT&;g;TkDF6t$;_225WT{x;*Jn5R(}+8GEQYCo zQlTHDFc5|5_Gjh*O)J&WI1Q(j7%hxT@L}!w6aeeO$w3+M85}R9CjYex_koiZ7)kw= zOAYwe@}SVtCHLJs-IVOJ=FZ()q>|b&x($Gmx{4rSRmxMV6X>dsL26#jQu#gGzL>PW2Mld08-lZnnppAMVwbo{=&wj$7w;9 z8N`?;1zY&j`UYe*j5Vvgj$Q6zfyqA(TR@^FowDY3vpVAXK&t}D|A>gHmlmWw;E+z7 zI^Z!x$xN=O?jVsuJ-nuvV?!!79)T;Kf$`x5VyQEJzWOgCcZLiSqXlx+af(+%kralU zLKBkAQOWg!Y7OLiA0{-2DYgO@o+T|(GKF&a2=J~h=^T)v01q#Ix`yja#ZqH&qzhT(K19U9>_sq zpwE{13D9_GD1%8+e%ZK=e@3l-478A!<1MnH#=R@@-&u`G&8R2&U!kENZfF5;hsTJQ zLRz_1eb_Rft&3p%^T~WSNiG}GG)OacUf15Pb(g|m`t#}xX9ir`L#5<@M=4JpLRrYN ze#_4y3^Zd=mNKC+K8F+NTylWz;A<*A-3}A8p75?s;k5nuN;$31tSA4c(j|rhEufmP zw1gysuE$>)NQLVHPHvjBHR$C8%MYOyq|LIfYpgS%(r?%9Odb*!Bfkn=1dYWenEqUULGy`Cyn{IePGKWM-YJDFqlFx_$J>R$pNn#%I zK{;rb^+^0CPrJs0*fmOx&sS^t9j9H53k=W{%uWtXW$1vuc5ZTLm#i=Q2;?G|lp@I( zZ0Di7)K=021~RX_hI)duVPY|S3Uo$KvUofLm2D)}7c{!NatYYKwg((a>F?Wgm>c|WhhQ9q}s0yi^G=EB-UB%S(3OpW< zg=N8Xf-N@t;UG8P2Ry6|u$xT3m(Ib6tGDXyJ0J>KVzQCd(T-EMLy#q8e^mWj4&#d! zuER3%d|$cOH*)$X--x=9mO>8!s`e&u5Sz=2R4X(>FtCYwH6pX9SdgT>B8lbz*Ya6; zfAU!sT-Dy$Vhtaai?x7=zb!2xDJ&k9N}dz3RjnW=S|?!`ODQw3D{)yP z?>W##@HmP-yvb*WqRi-IgGEp&fnUpL!LSbN@$5w#?CiSna2(LnaxS5ln<$++kG*mn z14C^_t6D>7G0ux39Ku4)cu*hGa$b&i-9})Wn5DB=X@x<#r?rYZak_n6xiFhC z9J)eN9v-jH?hGXVU(+ShnI-2<)cf`SSaOYsV2{Oo6hXa=agB|D&pgFezNWfT5kw99Y`3d{1Ho$Jy$35jBe1u&mh$ zMjR98K<)17979pZXOnjr4^&KHVJ3D@sPP#9`G?qKUZ;F8-X?l(l+7Wy%GVa*5Lc%X zTaYeOU+Ol9dgFvLdOOlZeP!xM`VFBBWV^EPOaJ3ESAPT{a!1tA3-=mg8+mx!x-ZGfdcu0A2HVZ5Q*0itq$CvG)lbzn#1`ymiJU4K0~Lz3lk%w9$Sm#1U3 zC=4bOmhIGTCywfaQ3b%I^Rp{7)H)?g`jjwpN^Go&)*M868;F@azp}u(ugWjNpbR}! zMsN#+IJ{5;XxDM~2fjxL!xgYr?XGrU`GXp9qJzocQQYzhnnIEvn^X9FUdPo{4%!QK zI)X==80ZY`*mPfe_Y-LHIC+w`Y4*h!{Xxqj9HT(M8OYU10qRZWj@plgxmdXZ8%q(o z2#2y5_n;`WGTL6%ys8b7+FUcGCIJ-v01nV8`B8QTz~nSP_d!5%JE?MWXj?(IKK%OR z2wv%$Z;SO$FeUO$)IRORsbfp9fTlpTjVYuAc~mma02Ej)!_6JGi>}n;=U2Hu7Vk4I zf{`ZHCRz@Qtx&BtwZZlkexL;XYWR@cs$36(AWw6O?M(t77+0&NSg>X>=~w6O+sGiWx2VVSwUPc_fLGG45#&jD45 zeo!L$!cbV3-%x51RMHeoS$-vW2Ec&oU*>hZ zy;%28z9=$&<{|k-7`7xYQ=lo3q}xcVSvhJpBxSta{02 z?l^s&NK9T3iHC7|ei2|-)>9HKP~{d;mo?t*#l3B?rH_;J9S5IgZ80AH&%p9V?OG0@ zT|@eb1!^R()5auv1eC{Z>nEkgPRPz1y4CUaHT^0a#4MajG#!U(XV573^>m7JNKfbE zHHY%1V)Dhd(V9l%ivX*65@WVNRf|<`*#^5NySjB|i9(sd_UYy_vYXskGn!I&kUjEU}T0t1`%;@5JZOY`b0IR!em?wap_&)`^% z^BVYpDEe%ybthjEX*B}XI>aK7a(Vx@+`@G*{&P-8ZQ4L*bjt&13z818i`N05>P^~O zf^;30N0WvyEK%&Gvlf;+_Bzfpf<$`MjD46fkRtRHGzOi3oYg9KRq3G$7?gP}K1EZ2 z97j*7&YTWOS7-xsP}L#oP9-KyURVjC)3H&zaLU>}L+u*ry(%(jgHUIcgej!!!q4e+ zYRu&bYA>rX-)FpX2zJ@pY35)%>eei+3o=O*evH82^AZk{s>2$IZkwA(aeS!%P6 zrTYXa$foGT5D92g=tG5iHNF_1Ns_gPM|HT zmIh1a5>lhH1!1v*apsGJqjrSMDX`aTU|l)ks+eZU-!yhi)%Bd2`P_0%RL#?u%{1x^ep z@QOOl0%&bGC`RxUj68fV)kJ0>vh3=6JcqUf<$N*ustAlzPm9p@ABJr$fb;Zk-?VGY z(P;cXb;7=$OzJpcdFIstaj*>%9T za7&X%XD9@x(!`c-w&Y} z$RccO#i9dr6>)zmR?IRuD52*w^hYhiu&FrTqi>0G(D_Y`U`IgAdhMP1HQBMls>>S_ zhw=SGj>3Q|8LC4^MBkWu%z(C>hP{N(YX(oJYB`RpakAACW?O_r(F5I;X+b+7OKW<1 zvJJ!f^&Y2(No&Y1H|o~Wb=nl6vO4^-Hdco~lwoR0SZ?5-O&HY?_q0em272tqq?p^| zez1Q67a<>2g+f+_^)C%h;b6ySHkTY(FPh9DEp+@OP4c3UVlQ4XTm%qCYmE0;T0kqz z@kRAw?V3@&8407FLzh1R+kscMY92{&J;TC^!%fUGgm(FHtuhC?jG<3vj$mLG-Bm&L zI`X>4{*xL;<1Fg5Yg_Sp;S>htT-G~0Gsor26u&+=2V$=H0fFl)CtaZ$7D1M0J+8R` zRrjaSwhmd9o20-}FKs!*Tn!fGv4KbJnqJit_`^D-pEw4h65TF?x{>3>5OWMoVP|QC z6DPs&d@HF^$GZ9++-wRhM~R#0Gsn{{SbgQ@4&!_j4J>(aD8?s4nk<4yw#)Ch;JW~u zS-*`!0c{wFeo8kPQ^#bJS#Ap`{X^GiGlwCRreXj$&qrd#DhJct%LfNWARA~=Gfvq)yHF`$OnhV~!MI!711!_w)eo42W?KvB@=uq#Vqyu0FY;l42M=-6EKDOuQqT z7SJS~EE0)V~-X7J%CnMlWB(OunY- z0?G}_SdUtSYlFpeUo9Xy@p51cjzF#rvSFx7U8b;X8BQ0$4QqB_mNP@Vy4qjM;JT)l zBi0a%gghNVJ~;p*?CQrNM|H&WF}@swtsJ+K; z2I)dM2tvd;m~wbKkV=0^=&GZzdKY04h&;m>Ew%uabH{b0HVhJ;)wm`#9gVvK?E<(L zM}Zlbvh_yO(9t-ijDFD8;W8TNl*Y&r)r*fIF}e=)sOlz$d|X~Fs~139y?!Hk3JZ0K zu|lhyfh;TclC%mlhk;Dmpo!hle|Yz0l5uFOe4R(wf06 zEL<9fRlOE3HDzE7wj008GXz-5m^7(t&ta9mq@P0=)$q73YmI@W%kEwYn@`|4|E)L~ zUw|!9`PAza2BM$Tj_M4W!hNV_ZosGo^CgEWXW!GuA_k0$@SHVIFVioyz|_(K4Yh6Z z3fvUZIYICE#nV8gKNs z@R&B(wx>6X-=s?6SesR!`RdeB=pJIVF@s~L(|~4JCtz1bPM5<#p?KJIWSZ$IdP8IC z!;JSPkmS(w@4FO6II(^NU<$I5j0PDw&>&~9WczsaZSL4Sw5T1Xx9F)M00s3RB41fXH8&}W}M5+5yBZsz(sNusZBd7GYaSSNc z9;h3g81q_{=kG-wz8pV2It7#Wf##(%XiLATsnNWK$8`EBxixa;3+zqdMKCEh;ye;L|zNhhYsjyNI4UDjvq1d{l>Zwc9WTqUKul z7{UaMAMa2?0a;l{@6h2H(>gJoC3-rtp6HpIe0T-@ZmTTE-VH&EV6wcG1(6nH^@WZ_ zFI6TytIQ^*K3=KQ_p6g19WcuEt4C~rR4I>H$<#w5opwdO9AuGW_OCI|$f;?6@mpwP zU|E`v(M@1r{Uz`u&&%UQpyPoYU2D!RD~?vJ67YSU^ilBoUyMtgX7_i4?x*j=Wr>U!(Z;9Zm&9+ zLpPy2$da+7OKk&PhEHv^9l5+yn1T@I>?wS?j<|yhpf1??a9|XLftWPt+%(UoP_@vO zIbPY8#&BHXqim9(Fc@Fa1sJSY7%h9ba|$BYTW&${nL}`rOUOM3m3G?o^{HB_GMOs zIZUUHxz0j&WTY<)IV`6*P}YF;Q$h-WMH|MbW(r2z1KD9#hw%$+(iiNK;)c1F^pZQO zdR@lQiy%br=!4*AGadY(Z-=%)uCCbrOAR^49A|7gj@$E-zZpobPK{Bqj#*z|1L6_O zc*F=2abx;8hPL6j*rH?;C!@AvZ?*tZ$tQjIVCulQ1dJVKK2 zFuAi5oZ%{J0twrSr+Qidgz225S|JgD-H(s^ZN3x+nd-M7;%n70mOydeBLgG-l1?=Y zp~bj5YZUL4t}VU#)t zQmTLVCArI~P-PL)wqzY5zN|11b)b`0ZD<$17t_uZOySk7;0{oxdzZF=GJ`&QN9YJZ zhYRHo=s1XRICp$U0Oxr!9#K1{=(DRMm@cNT^@kY(d(Ldm6E{foLKRrI9+?CG_Y(VvHx>M3ErmwySQ*@y1cZ}O@oeY+2@OTW2 z9Cx@$Z;(S%0v%4AgB>}jtB0}14q9e3rBCYwh`dbRQ-?0=70nqmLg@V` zlmobs2XUz6-q1it@D?GR{c%0<)dH3cJud3B!A_?hIR%lWAL8uPLASB-mDLGf5wA}V z9o28Cg7rAgA~aUpmd22BgE*ku2Dk)H4Y)IPOdSI3KqC1iUCzjyik&p-WLT%- zclP3e%iO_N->o(M5uh49^Z0X|ejLNIYV2syS4?Vr${$CtZL#6Aol_X@V>Qez&q&W; zqVENtyPtzR@++rCoN+?!msTpN#?)&p!a(A$f+rEs6-Y(Ls1H)jAr79j4VYjeU<$AZ!%f}u z?SRpmk3)%$tO-#V0U$NgF7n$MM zb9Z$mZ3?vK#;JP;Zj}fB=0GdSK8rrw$^BLAW1vN&z8iWA2=^U1=aGKc31Cj9fNmPU^YiLW~q88ZJi=WtO14+l3 zOs#{YAS7eKle+ByY>xGwUvbVH%t(&lp;KajSCBFXIooK(qdG0$&2Y}Kqrd8ag<~`Y zxn6ji?3LRndoW^M4U+Y2u-%7SO4= z%oX>_+Hgw0*onL3Y0Y@V$A3G{Vy*o0XF#ecenfo;P0{`p<=pY~sz_`*0%cD=jdEtk zXqrQlFFQoFLfTH{dcz0?x%&C( zvB3tNCh7soOzHIeBRmB>GJV7As8Pq|BBQ=G=Ib4^Ii!_fw*@ODX_XCb#;j2T)eue z$x>DBl2(WpfzCmv^VWP`6kb_N2fLa!w_)I4ydFx`*MVIc4%OR%Vax0t=vP28M`q@P z9iVX`4n3<;=0>aj0g4gG1~}Janl}bIn_;{=F{$x}SNWg8u^~SNBNNA)Q5jmE%7c8Y zV$VSqpN)Ji8zsjoh8D<(zX+r1ijYiW4>)Qi>~NEyT_<3pN{jL-3<`?{`i|)n@i->5 zGr|`8o2q#@*!t1v7IK4IT@lsAN!CA=(s+yC_q5 zwvBe1(HE==rD3410r%Aa$#NK#-S_z5ECMO>B$2n$-K6 zGf1R+qpnfRp{?yj<|XKKJOsi%dTVzPtK4V zSncY-!Ki)RAnP| zs?KiA>Xf?KGY7df^1@&o$~s|3)OP10v|K7q&a?ms^%$d=Hl&Rmk1V7$F@t zCj;7uuBjo|CF=xyF-Zoh1rmTz`V;8_|5@RU}7@tJaZ zKLt?$b4#^XfUT(WmKu)C;aFCV62?1c4@Gs}^dy=?O3pYA0!`KG=bO}y+CXw;Ven(P zu#@NqSUct)k5Nkou=BI#G~GI?AXU1rf%ynb^16)Q75m4 z22(gL>FT@D5@a`wQ~vWBKApdd>OEP79`AB!WD$t_o<2`Yg+$*y{c=XzG$S1JgC=+G zx$9@bIv@+rR``~FB`t#^6R)e29|BzXdOY0aFkZ=~-HZ(S+nPC!0p<9kzvE*P6Iiwa zcR)3oDqvWzr#?cP1v%@pnn28ehjO@=qYP9L}Q; zs0nS~!CmnE6pSjg3VfpjDC-Z!GAE03l|v`vzJd>{=EihWmt*^Jr_RKT!e*Xy?2)G$tN&-Ngxw2^?3oMyBy`EnuP4rq+a}&`9AQ6PN)k#hy;f%xic~BeUdGRT`a9Os*H< zkkNs55LR*pNgbCP=^BC$CAZyRMhd*&sSfM@T&YZYD0XDA_pC@s0I%1yJ zyR3`Qh|Xw{WG#TJ)<3Y?2D=`#Os?-o3dagv_9=)C3=6_9X?uG8vv-@G4gyZU( zbL21(^m_e#)(9G{%Gb2UAeZk`a1*f0wvaATLDzt#5$8CE{uMNkFX?um&oKe1x6r*^0sZPz=zT&$A864+3h#i|6 zp4Yc02bOJz(KPCVsqYnBVGQZ|)z5QJK<;Dq_(5-G`a~|^k#%exy%+U?7CByjBErlO zI29XSJO`5b!yRh;Y$T;;uzv%cl$)g7bVh$T#MLkOwClj?eZ~|HKC%-F03Cw^cBk-f zgUfx*42E^W_42ju91b39LfX}2PpuyVOKxQHmxJC`T+6_ zhE26WUh_Iwd!bZH(y6rDV`)l@FN<){Zd<(CL(vGkezd=ycn3Y%h zq-l-Us5Gh;Oh)CUfVok5g&=uVl^2hZqr)QD#$D~P_KVIiDEA^{yAI=od@M88i8)q) z0v#}Q{ct=XnE~vDXX2)b%rS(A^3h-9rsMYvMo!4fmU3o*Ye>%-Piiz}tWY>2$1|wv zDS+JK$6IF5D~WEe}}ZD^!mtgV4y z3aZQ!2ZB2eE>}V_5Q_6y1}$j_P%d((9NP8IihV31jNsTwI5g$mgfT3N;y!~8k4}u} zyW^xk2=hOR_e~+ohKIi1pCwf;B<+Aq$;bT>I)xmPS3ECQXLhZZ723yu@) z$FEbj!KguxL1m@@RA%d^Iu-6vR@A7Z3{bkRmM@1u^5%Vl`ssN%Sb_ajJlQFAoQ~=R zpl$ii^u$4IgMWb91(ca;F=6VM1FIjtnmZ6PJ*e8fP^COCvo8W|WUY@yx7C7W zU77mJnQMGHww=;C2d+Bd%0NtU;kEiPtPJ4VKhGBv&l|#U{w&k^rbqq^i$O;WnMbf( zB){B(kYg};ZpKffOaRt(Nvl7Fp+6U>4wUt@{5*pu8_ooKr)3Uy!&=oD^3X1MIO4y0 z=@iOCQ;dJjq6Khc(lo0L!kZqtK=ndexN=7CAnkxszXq%5`F?3@LC1Fuc^=81_u@BVwC$Fwz z(q48z74ieoN5iBQaHAk&SEjNC(jN%%%aO8WS4L zL!h!@rulXL>MX1Z({UzEa8`qh2AX`O^;hH?5C*aBCJ z+H*LVDxU049;rloI{#4=2cr0SKFcjgRN%!bsn7-?71!O!GBr3@?`sAKuyX5~J!ekV z^})^|$f`07W-gW27{|sHMvi^;C$H*(_Av;-xUYkz(5hNSn}yk2YN2YO-Bij8KQkCE z;UKQ{&%v$@d_8W_Qp2yQ5^)+wD+Y@&?6rD{q6MPZRqc7UO~ zKsRY)J?=HcROgnnevkg#X=uj4(3c~n@dBL>b+q!9$bdN0vEvlJgpR4YwR zx>;H7V=Th7aF2MQ9PA=>91`Da!>|=DJVY0f*74VSYqX9ISgy$qpc*g;^To-9q3I{J zUZ0yDtWifJXqUJgBiS+7#?r%E6USs{0d1jiiZ%M-X`M)Gc6?dNAY#p(B7W^8d1K_& zUytkUiy(_0zslPJ+8gkEy=s7Ruq+EFI8~`&YLSPW^&zd}^NY@YNGyXzjt4_e3Wh*L zXT4LisT_p3d-7q&lj~?YI0m}p-Gd!ZLML^My*MocBqwTJhXAG^yFFtv4~HGzrChR~ zgDC0rDIEAsk#N~p#TJdY#w`U}AlK!v%Qp{oVIV#uk*?M@Qy{g~y`-k| zOE1Po!x}#tQ>h$`086h$RY=R)-}h91498}V<-!RN)yg9)1qciKw|u7n+eL3og_F); zxvS(AnDVml93G{eJ&(FsB=_iN5nw5K51vl0RpV=KS3_%qNq5sBRE4xms|V87A^P>H ztPY8Nnjz3OW4NyoQ(iN)!jH#~Y6e|mpHQ9YJS@_WqegRpEfUw!BtX)Q47okvYjr>>ZAJY|%XIgb-j1@+NWnx*HJuvupNL^% z$MNEhMFwr7vDKpSKG^jq{UmY@r>sAn0vgo`ytXk0+d@1GuP^=t1`@@ghQjpK_p!-3 zg;rx?wHGY`)!1mQ$965^&*7D>qupPMPF5qywRdQ!w+JBLbNbA*pe^-o{2WXhY#Gl+ zyRCyY<)wbXmQl+@RCrcLz%|Hw2qy37*z-DUKb(71Cyv!UD!67a&6D607}#2iaq5&o z$in@nUUbWKY-ZD{i_JOb(K@>KqsoX1%Ksn|_OMEk9>}wUXL`Q8vhU z@3alJ>G*xS*k(%MU~T5K)LExauiu9veFlT3(4of|LL%-XtV(MuJa;-)s)#lMpxFOK z*1ZMSnPvB3$0I5gSK&ofc*p}ZHTIyT(HM^?S(3qPgFR&Pm|%B9mMsA!0Rr7k@`xb2 z(K8+ek}L_bWD7}b8M1SM5-G+cu>;E<3A#i^Oe;Zd&S9>|wNQD&BOdXX-`fB0e63Va zr%?O9_HFI8_TJyVuKg;HR30RxR@@491WMyuI<%97t1^_Lp|NtRBRq$zMmM7$mx=Ys zYh``X=v!-LDq{*rBx9KBta|KC7c!npU=MoxDl7z0uJ7Ieh;R(-YC^k$kjn6+M#00t zv0!#Us&;8p`M4uCcFUUi`-AxYkEhiHmvo1ISMepW|o_PD83J!p2Cpp>c3IVdtL*Zb}BZ~`P*rmkM5GasR) zV1`JoHY3Q#0VAdEJ-60;RFrzXWk!OdzpKnp;&v5k27wUnb+4Gs1=ik6t%)pv;cYAH zQ1O=7gBtd(fU4j3e?JZPKCjB)?@r*`dcfLeU=Nf|8Ov!k>mU(dSZ6xwBVMaLj1!P- zT;!}qGYWE9y~gL~30h0&Wo!R3p3K^mekPudYg}8do&vG<@V;ssAFZyfG|ho>=w8`f zfJ(<3wM;)qNX5&nF0h$@`h7)AL^Fv}+YS^-zHi*E*WN>rlIv!^a|a14r5|Gp>IkZ? zt>!f+++EA46Obb?$<&gqN_URXeSlw#SI3d0tP$dIVxs{UJt%hyP!{`WH3pf9*XFA_ z5jhvH)!a5;fT}9iNMgX9ivHqJ9YCT-l<;(`9qQ}hR?XpeK$$2@J#4rKrAiFi`r`qN zxlNai6f|J?x-S2p07u7)Oht=D+#U$_a9Is6vl40RR~a3`Klv%d+{0l;pls{ zvwaYFqYfnD`Up%mb_i=8dV(MuJ8||3sAB|6>GoYE0ja&JVfqet zcYVLzptfO4f_tdxXX>htr6P^?04+s$?Bj3*MWmm!+0{vWYwAxqBPb%>`n>d;!1u>y z9WG&lxpPm^KWWYzH`HFv6s*QMwT?1_g6ogl+Vva|aeuVYo_bn{uV+cfdH{rtS%ht=RtK)k=NCmAJNRioPq355g4 z#z^+Ek9VzJMw$3Db$9upl5)4|>V}b02B|d_Z4Qd)^o?|zXziKG0e5ovW(&F!zgRis z##TK;w$lgm@PQ=u;B+CjIATWp0Eu90^HKqUDBH$gw`g#J7F~Sa9cvh&s5C#^9#7`R z<9O*Yr6A5kcd=$9( zI9GhmL8;!M<;(ccxl56>`n;t$XtAb%l*h+fOPzs<%sv>k#{K3WmQJ}-Fvm6Znq)LU zO64}_Yl}|^*-~1vcGfMv4-u)T(V)C8;%Ae9CfW?IwV3mE-QyWs+a{d;Yq zbOdAao~C)ii4Ez!x9Zz73M_lKb(Y~A?6`LO1K|?k``R=2#-#Nab=GSphAqJx1C^MQ zXsVA?NFW)nH&M;O6y2`5FC@+)CI+0Hg6tWpOuGIRG{wH2V&K7E~byTC8dm`-%?^IO~blvTRIt`5XZVsZlZA-=k>Fi(Eao{ zOEdhc(o;Rb-c-#$XAn|d>!NIX7;+AYG#Iv}$ps*VFoIVF25@A5y>0`pfY`j9^N&L) zY(DP{*6yH4%V~9i_kgNr_&nf@EG(7Rn#&PF^10QnBc8x1e+|G!kX$z(y=)cpJf7x< z%@RzI8x57o#LFhcm-Q0g6t_e=EBiA+tB=zaLzwy6H-8N!08_qtm`Lk8>IrqaT!fRJpa-iD{Rlz6UwH>d=iKtO zCQ;uSAR>SM!T@e!Yb>e9IVue+r2Y7sPtHJ{Uv_9-;M?SLhdHYnZe{g!I-eE`y;Hxcq~%~5bk<#ip~I)RgXjRNvS z@(2w_uiG(k?(tW_W6xz=@DeHf$2#JxR@y;c7Mp@n+U~~t48r&u@-%CVDD?a-)nS1% zNnLNBpg$G%al22t(g#>cfsxD(r-Jp*YwW>^}voqXLspt&7WN4?*e6Dsnp#dH_d_>qg5lN_K=+ z)gt34Ad+s^M@JwP->#4NId>ZRpj{fggu5DNBe=8h`B@9^Qkym$cU#Jt;f{7|LB0wy zheEppBg}<3A5*{}aje!VYTSbd`s^Wh3Z|2($;1vq_-s$z?)Cn+oz)ll073G#Id;UE zyf>faBG(Cul4#%AJs-g(?>4$U2a=UX3+o{DB|@rpZ(n8N^RX!2YcEJn!Qtk!`nu2b z-cMT>IR{H$%TaR{dQY)O3lW0yNWY6Wik)mb&-I zr%)mq=0(^*S4(D)L})Jx%yFm2FpOWHBv?^zK2U1TuBNhlDc%Y~M7UIyX9%VxzWtlD z4JDtte%r2S?IB6)2fm}^oW%QnJ5F{KkoT%jfTZ$>q(@LvsS_aWPW(Ai*l_5(4p3eq zq*6PQ;oDa-nD}BGsLoaq{S*RTS8X5G++YSwx@AAFSsI^1sv7mAJ}C>m|D%3kswcUU zRI(-jB!V-FRQld~y^aId+;1n|?Qq*3cbK&3)ft;RK*Ljw@ZSD{N}u4*Yr*7%8#DS0 z=Wa$obmG=Bot!6LbtrAf)kA0Q6#ZK*)kj@FH3g=Qt*zS&(#~d3s-SHKI|tCoHg*^K zkhzsb<0!2rNp^)hVt22owgQF-(f-Yn*iP@i9@`Xq2+6qY*w=*b07Y_3^>F!7?>l(? z$Ue0qqhRah>pDI^0#XawiEJo&D?v4+ON8Y8=DP$k@$HuI>qoq%;%<_e;g+NU@2~5< z;;~t8Wh`(@-ZFG8?+iMhE%T}s?&vA+u(soF34JRA$DN>h`MegVpy=lF&Gjl{k00Ba zRZcjpcelPy5D4K_y;d{oJ-1>c$6TsQ22*>m4|)#Y2#JKywc% z3fiiJ%88IeV-#`(hlzI9;RICXH@~UV7;&dsRAa4$)bsc@L!y`5*r-iSrTxx0s2c(8 zQ#XYY#&Qf`29~fB6-pYn(-exz9fn_Tbq{I`kRm9HPHme6A!&Wq)?J2hc>ZXk8ZU^G zwMQ>0YOfD#c&mFkh-ZxE9Q6^M4^xl(uuiCv#R!h1sWlP*|+itmVR~oa@$YVh?T(4WHMp-@T>WiQb)+8w9OsG^DwbfV1zE)rTRF#VTK- zC&ITc%MPl_@|pUu$4Rp7a_2$Zo3Cj4h&y8Jth1aTM4z9pWsPvuQ(Q%Fb{5W2QVy0E zuyWZaQpV!tHu0~wV%Ii2AxuHoxPMQHITQ2q_Tt4HOi3cOZhWEl&Ma^c?;0Gw(xt5Q z-mA@w{SXfGZj9E7)J`AZhUu$P!x8ZN{Yc{gO_pwVZIl9!z*MceJKesYCts5CG7qIA z(MlcyK2Hjd#|QtweAL^cd`^_92NOFjg4ff{D<*;P`E2!z)~O++Zn}B@eG}7xce`gg zW)e3qJkACfw^Zu~Xz@iI>6-6CwaN1WK(012a(ZYno)2HPQ*}VH$4(7yml2Y^$p*24 z5rN9X{=1Aekf`hnfrou;ub0&jVmp4DddnTouJN<5Si0Ut!J&FU`rKW0Z{v%7IN3g^ zH*^nrxK-PvhhVs9hBoVidxR9m>)kTF?=gb5*KJUo^a!xLdXdIZtz{MR(ga@Jt`{<=WGvHXDDHTg9A9yubLvR2}*kt$SSvi0%DFAj>mmq51{) z?&^CwK#t_!YZkqhVREZ;%_O}7g~>Xq*6zY>AmL03avlOW9z39H&emqgza6?GS z1v~AlHODQfJ0@83oQhy@RSjt&f!?obzsQ|T>S!^-03pJw)s@z`mXT5n-(d$0YA~~s zG;Y+GYlAxiThCTUpM?0CId1o)EQ%7u4icW0VzXtpPs0LO=^a*USn~Q?)DB=W`mG`Q zLE^dxRpmK^6!hD~ca&(G^iI)0*L&84Ay@QI5W_5~O6nA*e52)dT5tr)>aF7t^8Qu& z4&k*`%sf7Do(N7-wzt_Y;_o}soqM=Mpn`|B(R0PEj6A z0a2vyL_1Y59dF+z!k+29J$cteJd2{p-&9TJII9LeQ{m>h6{gI))`)T;Ud=@KB6k#R z%w3gpfDl1gVE_Bk+Uq880xsgUREU_8_+VM25|b!A_6gHiId< zaJXQdr5e{BLP|b9LAc*XvC<{P0jzwVmciOXZnfggcPr`@xrcYjG$et67Aw#PX>!&%uiMS8a{X zbEZ1|z&o^?nC&*(@6?9(B0`F8DTIQ-h_-s~YADM+r3nLDD<~X>c`>?>&)bjy*0wk;zt_5LyKL@p#?kY(tVk;%`2wIma>(#vB97 zQ>82M&(jVYKsc&b42Dp|N*L_}yp7Unp>BlifLQ)ot$^=xW4TTX?r~z-jED&L0ZG;# zJgE2q|F= zs@eemJmK5o^aXb(vRe4P1XIGb?X_mj@(Lvq_<)WlesLVU8T*>-q1VqeU9c+XR(*M< zfmEGOHtXih4BRy`v*ohHI*X!sZs^h~clcPUEv$J^O4jSto&_K&E$W%*wZ}1YIq2~x z?I8a$EG;i1v=fvoU)+>6Oa3Gw|kWb+$m}8Kvhq1h>~EQ#)<&I$P zwjovz+bHoYAy=znopVXEyoMGzowY55rzJO3UR5f1Y zj+&R+2I2r#9<~5#adA0_qTc)?y)l$pUDwvzrr$;ayvn2NLnt{d@>Tq8grr_(A!rs( z|5GEv9D3bFi-=4lysNF{_7nrUwxYI=qBM7?=72LvKKA%8EyD`4`r7v3QSY~S=a7*n;gp4)vgHsybX+Tm`bi)A0)>bG_gm3poA;`g}2iUh2W(d9l8 zao0JGSBi81;!K^1zz^XPN1J9H0g`I3+8)Ql9z6M-0A1B*LI&Y9A+Oa2>4>|pcx`%k zhEh7s#OfT1Fy8)B9aXsKv8yC{pG!E6^IjWFgLJhjmx*5ndqfH~B}j|2M`g*JQH30+T3RHO$t~6hg}2!~^B`G3xSKaW#WNfzI60d>>n5XrFC_ zlyYOdd=YIBSe2aLRLSUH+#D`lLiR;!eT8iHWoLwAXvjx#iac zc3c9x2!V`TK4^!-6Swy2qMEKxL^lfvz^Y&dVk_eC?*H4HG^HDB2V(%9=9bXN3s&?3*#Z__js!9S}R`(8XP zJk-!(A5!Xf?`qVXIPj^I>|sKY)3@!O9>uF}4N)u*=GH8P>YPpz*0U^iCifKT`fGhz zjpIl0m)7i8sJHiT`@xESj)K9bFY*}b1xWgtLEZ;f;skj7)2&TxixmRoBKJBtw6+-%F&VAo{p2*;=Ts(GZy@{ZcD z(1+iuUDHKSp3b6ZuiUb!L&D2_sP@X|N@A{^pq2UTMlRrIUylx|uWaQ$aPdAfxF%!mze;^CH&_VoC9n)^LFo;CkLvobNq!Y8=dXwIT{|9$|Cw=+|4lNfIBjGprOfp86}m*k9FX&NC^p>zdjoMEv>eLf|n z(Q`yuv*gq2&IOpPU6`Vt%N~1gjwG&nocr4oe`_q%9HH>>MZw;B4eF=3G43p1q3QT+ z)*8zUcX(-5cCuVDi>TGMk3{#t#T*j$o_EJu=0T-dTGWC$xt|aocr6s!N9)Acd>$Wq5Nb29iW)x)`NG-AEt5L_##pEN+8j$hk)2_qb z3)Vy4Yeq))`?xo&Dj$G&{!STbI^a+uAZWPJE6;KaI!N%J$WW zJ4LQDb(9NBZq`kVbI$H0Z7slTS|g}$`USG2-oJM57QF%~)Uz5yT!CHNFrTfwCVs7@ z_cK33qnsj|PpdCeK&do#)TV>VxGTRIC_G*JbcsI)I@z!N_LMdZ zL0Ldij_cQGHO@~|odIXpc(Sj)jF7CJvBD@~`pr?qEIZ3L6V-8+<t(t;|^!r+ppel8dVYUIhX+m=W$Jo zXXD{1ik~;0ipK|cA5trKq+*v$z=imEvWiv{N%uohg8+-xQdmx?)#tuKE8IBV=uQS~ zfOz~^r5eTu^Ub>Bc6|PdX0*ep%zWxy_FX7t>5k^ND!7NLER2e&&dk9S zLTbcPQ!yyZ``^@v<9s-v-^7x;MTGja&LKo#OSU!Wxes7(w_TP;=$eR?RoiYNEus<8 ztt2rBGL@)fpUaSZ>jd!%XKAKAGqwSYKULyY7e@1l5oC9fe><`7-K2ZYG=cV@K+U{& zktFCqytsS4|JE0lDvH9jvHH+hI?!d$v|X$|IDn4b?%g<32#0ny>!# z1VuS(rmIz*LXpHxb{@N%$%#Tn8)bTA8^p>|yYsP=a9?@n9opp%Terst>QK7sQB8q@5mJflKGxPqjW-Vv zQ&HNDrdkU+L<;}zjZ!q2;yr9{A|1n}mK;a}DEXs&Mou9`y-5qBc;Nfn3v}DXaU_^t zd#}5O-xp9S#=14y7j04G5=}by?$K@n*K-JpF!66SiM^_O#FN}9v75l{e9TmQ?>zi* zI^|Y~W;@WL$PAc5SmIEq*?42G-SL=9%s1aP+?eN%m}(#G<@#z(M;7|bmVpS}wVY}ND+rOV6V+z63bBDiX5B*X5K8&WR+TDgsc#!CMblRsXK2qT z;onM?y@Xv@rf>gxGY8WLuj>%4Cr43w*R@;YIRNFTwBJ!x4k6P0-1p|Fk7)|sj+P$x zlse{IYnCS{$$IHkZJ(Vc`qF($j@NCgC~ZP~2CL9FYiIPFTb`TjNGkIMlrdP=)VuNF zC2|T{r+BK{yFyX0VLKc%@wfAFT`hHU<9d5j^X0@5lk;BQ(osyq5xN6NwWBqYc(>ly z6V7r+z7|7m;ngU9E)h1%PC~o3FyE(8Z>nKq0RbBpw1Gt+63DgF0UUAOAcSQo^~E~| z*{7!VKFG=njYDGw+ikBQ0zuuYi|E^N+k2#D?$nlIcC&1&?jj}gYi(hAug6cD$%FkK zYt`g*07s-Ts7&7|Y0oc9kVhav-se2#R1hVub(|9@TpIJ%(Bu?>5C->aFLDG_gJMv- zM*3%9*wP@>l5;rrrBvCnnq+uUy17U+t37U|>$}0O(W?JdRj*L6Q-@zC{?0f}<6&x= zgu}RZ7^xj3w6&kshfH(hwVqPSo5eE-Nw~K2tKXeXY^xn@F>4MSi}sl?Ld+)-&Egr$ zbO;LwrS%);S&w)Jg{aHCF%=M2|l<(!F|BR zYKd213d{Ur?axpA`nWk(raE72Pojk{)o!ySHiZ&7+-+-M({QBQ?g-7q`$fAVC-^Mf ziqU4-bA9BGnkmJ3xU|}C=q&)rtKGd4c@ZUPH@oTskPyD})vs&X+|o6)t&VU7Qpy`T zl?~4DCl!@=2qz66F)9u;#A_qI9Z-_Pr@kD!+z|*hmUXzjK58=|y$@nm-%BtDK$vAv zx6POi`_Nxh&vcZ~zW72P1E~HJEVv)su-&%zdIRqQ#mQfb?O;(1&^I zLXb$y2z0&n(_VU_YpAPmwZpK{!@KRJ05I&@1TKBU6uer?i96ik#&TlKt#=c*Y;?T+ z1^Naf(D7t$A4OsE=Ewn%q}e>j=OLWrI&(^AF>~)@JVv}gq_n>}t9keSK2XNP%_ip+B9_LtQzrJi7W57J z<|fsbIfOC=rEq3?dh}_y)aVUZI!`qB&*_|7iflu+Z^T?6BOT3(-aIJL>hwz6(0EgV zA+&Bnx7bt5ZkD0#XyNBE1qzK`LsTGEAg&TVH;&_z!9AflATRKhJ6rrx@mVC zruufSWGC(qUaAtiaX+m~6nos{x1?L$=fptW`fhFG0LH+*?&!fGNFYmh>V)f2!ZvQ| z*vfIjdJ(tTU^?l&E&1T=6vl~R6a^=!sO{yVGq9`wO3@4hoG0{l)$R*!@~n(h;7bS( ztIyy318q6MZq{zr#Ba0=+9LEMw<3K(Z&RF+VGA|g!{M9Xrd2TYw-&(lQnLtMkKs&R zw-APg_4d~9Ja;PayK6D2w}4XWX3(U%>pp&K$!!&5(9@oIXDOFCm%CVw!ilxghp>&? z8Lins8#k3$P#U5r)lH+iZO&BH=dGsfz@7;A6x#pikD?Q2iRAVYN3WEwbiYsIV;6sQ z=WvSN-N>c2!#<3GhSqolE3752I`ef5MYg_`8h0O9`=OCjxa%5O=BR-W)hCL#!rQ{v z8K_A2$l#olVq=8i0wD4iHS@oWhe>z!-ia{#6BPdU#=7gq6O#zZ&vDevoR*tH!=g)- z?F_?oAEe$Jd(n;q%%G)ymC^XwL}=z1zNae(F?i#e@mRvW$dcUxl$vupmg*M~qA#Da zHl`b(D8z?#7;>2#ANr*Bm0AI-YiZlJ8vq8LtTmIlLny7GnL@2fxs8;(%9;Yn?j!~) zLFK(0Z{w(6tqpFPMo8^ot)=!ZdTi~I-VR`lU8@PmVcaij)9r{GS58lxlOFex*4wJ! z2^=P#=G%MP2Vgzgg5W5zS3MS?+XhlK-mBJs4$AStt#`HOKHk>rC)mG)tN4y*Kvxif z*L&&{e}Al)r_dyXx!B#90;Ko2-d&jH?xoW{&k0!v=IIm2C*y_Ax~uwghy>(|oku<9 z;pnawX`11#gwW|&u#n6mEO&<3@i+jwZ`Rt&*au(k!@EXqez^jt{C8`8X#)(8K1`2w zfkTAk__mgLevgv-6z;cbxdSWWMt#9|<9=B?etX=+^^xT|eCicOQcvzLT^)n;1t`eB7`M{j@fgF0N zc^RJ0(ZW-I91MjtZg=N#EcuCu!nXoNCNCaaMWyV7UVUkQ0vv*=E#`4~%5*y+)GqhdHD@r) zKYR0=%C_5kw`(4IV6^6RMSGy0-uob|a%<6GSay!7RfEHxV&dH%2R%X|63hC!gkwN0 z`H>%?@;>Av3al~VDN5IIdyjUrU0fS|No!9G)SB%XTEx=KGM)Dc`5d%$+>4$vK45j( zNBN}Y&{yDCHjX`&tSA1Ru`q5n!?2SuVb^R*7c~{%s*i1iG7We824w-3#oQVO&7g)c zPaLQWpptu3P2uK%6mGK~Ep2zI=Fw>X&2I_3z)do>y15wg^>)sHJ34y%v$d+V9KW?n zawYC}>%#K}ch@&Kg5MS~he%P*qqTG(+bF3r_w%O2PEvUDTk6v;H*Oz&QSE!Lk6WE( zRk!_k7w7a2gsWOg)Z$?UxqDwfdz64#tT+aeqfv3`_#`3qi1f{^$V)eM;+&D3szAd% zyjP2B=U`VcUaDtLsv%yWrNDL-Ghn(zQUWIkuz1z`>-sWEo%nZKCgbJ6NiYqvTUM%W zX$rB_p0Ubw(BmxA%s@)r-_M_g$1=Cv(bmwA2%^~pp65=PhP73+0805rHrp0^&wKS= z^8h4@dM$97Qz_p5LM@j!OaS?rq;RL0VOx^0J zTYjfVie*`zP4y9=S`4Dinr z#-4{ydJWnp{=MkinV}jROa`ugOA}VSDNquoG}T*9$Fp^f!p`*Y`+wBx)GQb^+^(JF zwqrMkhUM2U&1L5M;7iq37C>cHPsw5ts4nj>(lTc>7pv0CJ+`-_POre!-C;)Jh6?~?Zmd1(=wFVL6v)6ci9ExURMqGIFtLZUAx_fCBId(r~~fQ zRR$|4yN8cz2k)qlx!Mj&9K%HC++~UGr1!nsY!sZrY4hvOO)Az1A^P+2&oK53DOKjh zBgTQq>>MQ=xFf20gR)r768a^iNS?4U;JZpl6AITOY58DWEQ`Y`!(@-W&?Dg~Slkyr z1=I{A-=#W;M?Etr;g$|+7&nW65j%pd70-dBJ}b7M*+S$asz5YfIi>{{fMN2Nf^ zz`ypde4kMN|KIn`e)9ZZb{tq1Z~gR&@N)R5CdC_Zzo=EAVG@1&i`5Zq2dE~Byc6Kz zdeYqmQZv*ym-rr4H@4RkA34+C9oyMQjEHZRd69!2eprLOLon52qdll{1Y>KlmH>{q zmCsI~cqfTh4pag$p<7xbG>olrzn=iF(pcznx_4EpD z1C)6ChA%Xy0yB<7(;-)Pw7}Ul+U*)bFT#=H+81;&NUY6g#t_TBS9|_;rHAb~kBz_> z*Q8!Bt->J?wkl|C8!57~dDnJLcM|8-i@I4x*B}<(slClT?o?bKhO%d|-xJ>aQ8Vvz zfRJ9xYPE&1T2??KsCE=Y75Dia_X%_dDozm6ucm5sZ$X?QliAn4M%kPgdh@#! zf5zPv=ykV2c#dR!K>Ch_`wX;xt8IYr2=k6(H^XQM(VT&Fcd=1Z?}NFyQX{O1e}A0E zi$~o%s*^pbn$zn#fiQ(c!=9KGWxDq_Hmr>|Z5}D@_$$WUvyd`0E4FjoQLdM7R0B{# z2IFEa04ljQ#;-Ls40~{67tb zyDzU_&m;kga8aCMW?^N!(XQ^#1&scvtoY0Wf2sxIYXPOJsg{xv77^rLR$WwzL6D^1 z?$S6EUT)Mr&kCqa-}p0>&Djk$Upq+tJoda0NyQ*W+vko1%|9f((_^Q5`R~F>`m6N} z$X=p)1B1G`WmC5j4>-gB*g(@EO44;$KsrZ#2qyble|U@{F33AiQheAyJ*0@#})LAmg zBQleyDNntuTNhZSP|`A(&dMxi8YwCoZu+Kb-w5=Z^`TjyidVMCF*FBuwa+B#c^O7f zBjyum{Ql|!7;R~}&$qHRvxtcIwOZmBaEFWQWBVP;NHQJVy77?a1I&Smd;>_8jFILV z#0=4*&UT$-yNAQ}(lQvOl~HHP@4cT^+u!Sp}KWoFh!yeygmKl!V zYG{^PjyWUU^=^se1SPC?mx)O16peUmhbWWZQBqi!*%@aTtp#}PyvL5dniyWdk<+R} zeyvx$L?Wjcyu1RUI)`_wf=v8&ORydRY#UIM!K&BO7xm$og0UP!Ywc3&bSOe>b*}}^ zpimSO>Y6%ty3^N&>X?f9{T|0~)!kc4V1Xd*(>ND*l6hU~!+&>aW& z6=)v;j~>LVC4d7+QQv%*Xb!of&3fRugn5LL^sP`ZFND$KK9GGctB~3|N>|tZ?dqye z`$$ZTiSuWk05?UXzI7`tf+kHiZ-(SKCwK(>-2q=ZR)uV)$&WI(KfW*l1(xxp>^WwoKK1 zq}9i@>AnC*v~~JM%teG`>{M9{jt`I`)3Tmk&1<<&CCANHU?q}UtKA!Y8g^8=g}6S( zJ-ffEKp&*8EVjzEgG5M{R%`aW3#H1m%Y1vF!ab-Oy5D>0m#DvoKVGUl1k-J;w~OU@ zxebx*`c;x+n0#u-hw_tvO}oa&DVX9uFZRh~lmJ@SlUidu1G^e!O{Co+JV(Obc5S3z z#I0whcyzB7&J(R}iZjvLwSD54M!|4>7If`^ zG`drxvrtqxermUm?M3ruRdgODL(q0`?aLwPLQ+z)(chtaNGX~a2iz%Sw_Hu@NNE8E z+rCZJBdnlNQ#&K@SgQ||-lH|;7=jpWR~qRYN~s2GFPg2n9TY`gv$Vd;8MRry%wnQG zKJjm4>1jVm;$QtLwL?_8`fQjE`4CQpx!0O~4?n3d=5gYdg2DO@JZtukdO{VT9*VR>0);&7Z~+h3Rj zBNbLL+mV(j6x_A<=coI?pRMWYX8OR+7PVc0S(LC)S5++P)c9r&kzDJ7_dI8mX72cD zteG#MQHujQ)qO7ZDX?Ct?X@m=fJiFpTiI%-dzc*RsKV$Es2xn9JjPz1qzwf1)ArTx zQ+t3#6|?zmAQh?I)+_PvAVuUk&b|v{_b=V0@kI6EmFYe}5NoxfaS)H2?e&{OAlYwO zAg7lj1nj-tF;w^++V1^7cglkjP7tKDSl!2I!m38585(zB+iw=~3=F%r%d;$Yj#SO< zd5# zheEn_9-sL4`|x(obae-CSG^sww*6ES;6p^^d{Iy19dU=TCtu$3>l3-vEZ(2M>esEm zMESkf3oj?pJ{+moYoz_&+lw}~YR-F{XJIb-;Lpo$$7SzriYT=!xca-7ZMR_JKWYNp zFixX*Aj=5jcPrYbatb;5->WtM=|p+)MYXA!-fvhJ7-m6 z@Lf+}!_DfY7ok)ecZq89H|T?V^rD(KOrkGqO1_e)#``tI+2}*we_nV9r=pb6=(atz zoyhG*&`$4NFQv84XctKl*R-B0)n1?q{%Ki%-v^bdCP2FB11QzQYE4@*KSaUVqf*up zw;Jg@FgA~YFcNFO)yk0+^GgdJr-}T!x_}Y4N^s*2EptZb539iEozDZ`?%wC_3vn*N z(x*8-p`pFqpCdHNH~yeo0TPnT4MFlj+biC_RHest0tj+ zAewX_q_x@ulvKj9Rjno*B2ew)b{>^G`PR#sZHe@_55KzgMfHUzJwZ=U*7r{lQh;t) zQ{6clS=ZxCXK`M?PsNjh4iKh{|F3#t??xD`0$o*a|zR$+J`7L9fIc}0QcGy%k=Ame_9S&Il zsfwjOnJfa;gDK#EGllk&@-TNTmJ@cd8LV3Yl8(V6Eyr*4Pitvs*!w-I4Tfzn)z1nQ zS?s{$!Gwk4++5s6#Qi`2!*6K3+*vs29)oF1Yx}5C-YvF^+Zg5m3Bz^g?vS%f)QW~` zU2W+IvD#8A-7ye0-u_a}zE0q=Bz~Nl!6a{LS;zQj8A>{&>ld15`!MgeQ@e9;3U;?1@tX%r#q1Jm+CZ4}C`i?_MTDv~Z963)*V}2J<#>GU zuYO%;&7BnAs|SWQ63&*BiarFet`XM|X}d$OHp)2OP9N`PyEFo(Fl^0|?_RtYyEX8A z5Th^K8tefe*@q05O7<{_HJ|DNj-aHo`m7Gj948_Zx0>rog6c??ZqaIw>aOAliVj$D zZF9^slvJDX7>44`5mVfDLE@s1wOZSxgm#HSXnG8K?NhMLwc1(HoA^hqC|~)pGT1Q* zNUolHt=nExfz`W~VaMqNt;-VB3rP5JRi9bzsOny92hVjOTQ@x4`&u_E&=kJoRNi*)$AVWI zeIZ0#?dgeKP#M~#&Ar~My{xt$=+sAh`TYQ_=1Zc7oKa~Vp`qd)j}EHOIEJIAyqwJcQ}1egYxruz4^`Ruel=~`yb7u<|T^M z!P=&}>ir*eFPcyMr(>4d;;7@es^%nO1lQ^cZ6lOM%l^bPH+lPR($@?ScE``Fx1-{- z$R%j?W{$J#k*p9mix=}qcTbQu97`cAs$ zmdWdnX*%Q&9%w8>ZmAo+)hu*7J}=5@$qsk-Imn=aigwW?a?em_k2Au{q~C`bCE34Z z(@f|C1lhOi35VR#TRon26ra_XD&cV-jRlli&^`>+o%l5e0m|Q_hyZMQ)(J>U^=89PxSfeHP52`oX z?qNHqwF6?tdL!NJ0$q)3H%#|nf~pr2_T#)?Qzg!b!^%Rv8-0kNtea*0@rb+9+p3w* zaXi*)!SRGUTC4Mw)s9aS)MSJA2uPxjV!-Y!afh|+c+MTZ9G!9u?*guF=DL>4sdS@1 zc=YoC^c&IhN+w{N;5QfTF48klzBt0OH zxrEJ4(s>{vW<`Zg!LX}AVPr8e%U%@)44@=DsDq2zEkgKOuP0I4>FyP@NQ>rMsEwZT z>T}&U7KSLvW<9&sa92A7Xj9%Dq%hXHxm}Q=nw8)^ZY5uOc&SJ}y7;-9n}zVh|2P;uO!*7oldcl5Po#ySlt!rNc; zT~vc+K>UHQ(BTmqpC9+HIWbl`2m~j;^#$c)UpN`d77P2Ry z1YD{Ok*Rpzx9kN?L%Hu;$2FTpiZ&*kL028uu{+BdRo+`I6A^Q8=|NSlmOBrU+pX9; zU5MwR?`M5?7Qu+6I|DvINMdWlcHIN+8jdruwYs%}lA6K>T3)o@KuWcI`-|EzgthlS z&@snjQ&BXz9jHXr);D$oOnk7s2gs$JkV^-$k3t`eSZKWQRWC^3^;6aP5G8E8pi^U_ zBLsQg`H*6Jzjte*qL)VKLk9@SgY6W<$iYw4>xsPb(ej%_t$xSUZICT4#|jz!cy;JC91rF9IG zJ*CFqwKY10qV!&?sN*`c4Aa@GW*vAYp4hkUVtgMTC-TEUCe(O zrA^8f;Iz!_Jz;ARDN=a4`LYJ=0|d#|_o-^)GN_=wwKXkYfuf%G+hzL=I0e7fP6KP( zeF*bK@wfZ<)sl94Uz>A!wq00e z5v(571o!w$uV%6K1dhJ0J)rua&79^P^8zK7awH4&nmFrkxr( zy95*go7ZbzcGZXVnbcw?ey>%kYt@t{xg)LWG)f**C^+6w)2BI;4)tr!VD!Q=Y_;E6 zgzE3A!v5W?~@CHIkU)RD}mx{AX-__ZvmAN7QF36RWjPjIB5KK&;MnW@wz!p-$&i0u>+ z6E?!D0~$fq!#z}hGwu}53A!41!_>=SZs{7p++&M)mz z3?$T>KcHYJDsTJj1U!u*Hye04SUdw0U)eP)>CE;Ko_IQRoHI<-wlXPH zC8fFTBTv(CDZZY{QGz)TH#OMiIUi4Fy3=ukGP8Uyp+z7KXu}CNgaJaD+|~=db~^`* z6oz!<%^tx@AIto#lG#9@lXf6}$Q@C9@J$+z&uUEocev$j@vFLPfJEO_5BEa<&QI0J z(0xGkl9MP0kc8Ks8MhsBcWvuMmGTIVBD%-aOU%cJVYaZP$A78-Qz+ySvjo$6F14GMWoDsh$~&fyWIcyKfHjw%=alc}dMq+5 zB%)_nOQVZWiSVhuc94Ki-Os75gyjU3Ls{EgNx%p7vfu`Ha?^F!2xr(6%*pC{)pk#C zm%bSSPV|R0p4jCk^QBBM_xhkZt`fyQ0!=!i@^RY}I!M%JE%A^$Vq%Qs*K2Nd6klI? z>P<`>n~YgFJLyAvhOQZ_JVhakhusZ?5vU-smQxEHXYuy{!$#Xd22-h?wAUmqdhC-$ z36}{j!*{Dkz5=PN7GUW24^vgk@I3uOFZ7t&-ifwu|%AFT?7$ zs#6-FDCLG|XPnVT?I7sG;nGzNfG&E!GUZ0&J!bSt!dGw_^^LN$HStF+>iq{dsXXr1 zNl$Ty*S9}w-e!AcI==t>55KN0b4PBgWxtK85eS+&9xTiOVedU3etL$hJ(u}mm3{%l z?dfmm1{S%a6Ft^vO-ci#(y=Wf-9?C4+7|d;4?k{dPSlrev>h*RjJ7erA(+DILlyoA z?kfH2^X{_wF&gPulr$ST0kHbwWt+vF#;f*ss2GSe+O?Z1${A9mQAYJjeCH@BTxSij z-LAYq?FtlwRhJ0(@{!Dj@fBP={9c`RpZL$3rwb`6n;v%(R=4UA?kP?QY1p6Etu1;- z=&Yu-D?9ZXWyP6E8oH8=HuRtEu?vkV+8j*Wg9o?N0Pe0}^<}E)3rN-9KU4`gOCm-^ zG%*O0?j03t8NyBt@zU5=5Td9DwL!iSf1&BER5e71K5Cst#|~qoy#uwAxb?-UVa9H} zZd6;}dqwQ+F2R*IUiZdiVM63R#tpFSWQPned87k3Zlqkh<-8$iT zf)M6R_p6onQR=0?!_vKqcd6)PC=v3~mmGWH1*SZohUICF) zJC{H4$K$}{Uu|eIg!-E6L`9fFNSSLvJ}_jCxCKpe$3;PnbOEw!YxxQ@n?uS69g@5+8?fPt^q-8k4QY+Xy z2%U7wlKn22dVAj|a1WBv?K|(%dEDx^2G5vlsyY{>fNZ; zGnb$adDJTPe1Zv-Q?y;qaO<@TW!;O{><`iOmtLP1M+b0KsdjDg5Qr45EOg(& zqXh7cQtV?OD!id*Xd{Rdq~vwC&F4;GYENr{TpRb`3y(_osDU8>2nmtX%nkz z+MtADcc8}X!P?iNW5-ksC-1uT*w!T`{_`ff2y@Sx{puLTuM7u~T zuETUShTemu?L-)Lb5pPT^D5i{kovRQ8g*hR!`olbg+s|_tF|2I8bLDK&m?)m9g%JM z=IY1er)#8TZdrIkK)|yAS9`H0jB82Y7k+NL&Ay~?4i=_xM(n7mS_W!(< zjO~(Y<&zK{&wf80b0+Pos*;-S39cYkcRquV%KT-1vuLdYt(R8k0y1oy2Z*o+w-Q@` z^2Kfj&bsf^B5I^ykfH}2z$Em|TR*Nbb4z(WZz%y`&MbRV>)t@NyDmVbfqm5>js)av9rF-_D+xfR6 zP>OXRYP|V+jMVA4J$7^ghv(HNpS4p0rzpzUUDHVlYL9GV0{jfAllbd8=UL@EM`9 zueA!g19xSpyGfLt&<<~Czk7fx^J7xk=Ol~U^{m+e_c)PS#X3x!T8*aOcrF?3G@&2E z$zjzRknZgSA!YEuWp{`66fH|nnm`??XsS+3>v&b$3F8bwxip0uM4kt&yUauS%$W|X{-kVC&48qt z*IH+R6s7uCb6Skdp^@1&3!w7}>-pts$N_cJn~s4<@LHRQ4&dUqJ*+{^3A>R)3Vqm5 zUstEQk+8;AZG<`Oy|;u&_l#h&)x)S% zX92#Egm>OYY^!k>eMB=^BDsW9inVPrYaf=YKFWvfwAp_#&dbRYpR&mwvu*M7v<}OA z^|LyYFbybOiLFL9Gho-k+K!C2-N&}xR-bHG>u_zK*16MDsC<@)crPHej-fqLvIwO` zd=M25;w`(;A?5ep?<$=(yMmDHTj?;gfg(HAMe~l^Ho4Z#9lqN-`3{(V;9vYr%7>Cn zU9+hkY7Zq^v(Mjb7)o!yCvMfpGPi#Yk`mi3)rlNJDPenZszh{zlrk}IT{a+&`!IDC zx*d=`L5jNTP9OF6_`TH)?2O>B`>v{c22~mBlHoab%GKV9xacElaXw*}z4u3V9_rNt zSD$u6wg0k3&LRlKgQe&;IGX}A)<5^>=seDkZqYYq%2!vGT5Xy|B0NoGY;9-`C8b$! zA?D%m_^962T>v8xCxNQ-T104_S6js!KwXVhOYEI#4$FNC?^cht66nBD`-pDzzVFpi z#t@7)>jk9NwWAQ&iyBey^ntI}?XF!=nXM8@cMph|m+n({kG}FFdUgoc%}D-c83Z}( zz1MZCM?u}$qGKp5JYvVLdW91d;{3LQLZ_T5|E(BY93kK#@4=n{c(9C9yWrqeuD)y-20|&34>-66mDjiHQ>X6iU=`tDRE;tD)8g z+6;HsY1KPDwocTgS;Xkx;D$vVNz5T-$)uZRb;Is?bUBxT=o_$N&~DoQsekm&Lyk}C z^w)qpqW_kxmZ6AfOZ$+x zr0D4Gi`UiB&G%t)EC6W%DcrYJ=0#A!TwS2>K6)F~&^bbwtM#CiW~GlmtO4-`*y+-@ zwJx;{QAq6_bBt}yuvl+Fv;)gKNYP1_gm(KNkIPgH@bov!{l6J_lea`>u#y>l%kK5NAl#o5$pH~5Zt`>S5F85=n z(INr5Z+rhUNx?_gIdAPgDfndQ=jH;etE+7DK$6Shx9V#b(D1g@CS`HoDZ||ZZrSM4 z$Jcb1Ly=80QB$oP1+NysfDIse$U(#&epKJRZLm}Ai%;;i1Lp!#tsnJoz3w8XBXjJc zE>l##1ptfzPzZ&i(B)O$;QZZ*TdsI_^sxA}% z&6uCXHng6E<)RyC4=zoCiP83SOE1$%QM2bn+wSH}PkCS^r{*8CDAl_#A!Nuj2a(IL zK9lp@L$qf{=?><0g#?*}xE^E)D&1*OMIPJXU z5L6-F{&H;`YzLT5)Mp&%s(qQ`@*$7!du=qm2ZWh*>kY;1J_=@R)V7(-0jQ)}Qya3! zv(741D>(w?`mimJ9Rtz4my`9ECkd<6?Zb4Mu=SCoYaj*Eg}LcZq-TBbPhT6lor7V_ z`J?t!-9;b3`-#*ZxYQO8s2=R(Uh8S?1o6awJLag~9n!_aM4=Ydx>|kML%`ybyh1SD z$H@7Y8MqU?O`3K*U>1o$mR_|n*j$h&2Ws9r-$&20Yzr{?u=U|+#A1TV>|25k;`PZ| zjlY)xRruRqp=wTb>BBpEf(`EEZmLt`X5DzAm7NTF1|lOBvDLJlcpLNc-UTS6K40eL zS~HAu=L_nyvLA$LiDJ7N=c9l}?ReH1kQ#rz)=tmk_y6qtZ4$Tu!pb9q4$bQlq3b+r?I<%dw1~z) zGK1TR|E{H3`;t{vC&5m2ZVR_aQ%I?yY)@88?}M}#BdYe#ASvZX+5QG_*-qwsPc!eJ z6_Z-@pARI!>veo@0VG{R`;zq{l&tQwH^l}$-fG{@WmrBRG|v?vdh`l#>k&433K_IR zL&H7+4amCGHY__exmLM$pl%lSDz_p?U>8xXTK6&D(Ogvh1 zTT2&lj)KGM@6$SGvR$gR_RIK`HO;FySH7+@|CeKCssT5hP9n*Z7XG5c!BbGgZUUfU zO~Xm`bq#T6`uH_iZt-W4l-=nj-bwYm+!qW%WX_hlQeQv#9wg(7Qv{|;#594n3V2-#WoO^3NxW}LbskjPx(tDas zQ7tG@-Ttp?4!=gb2w90Y99**js1Z^zYIq*BmvYbH6ziJ1y6t0n(UbC2xtel`gu&Of zadXAp_2Zg)*RucnajLaorD?+{`aMOOf>Ml!RgCE#wi)dVNHDJ?JsTgkf7*h|92j0* zk@D4??{S^>rDj+zMAZygK}ynD1ly`4#X8#kwk+Umuq_Zt4mZ%{ahgSuQPk8?;_ zau?zv;4AO^g+D{ZKuVtH4(jWBg@&Uy-_=1`w=3Rj{5#26iC9UbTmWNly-!*PEvLcs zbF|n$oSQKA;o6{l79_OSj$w?iTFxOw)vs2|Y}kBa=rp!!J$3<%IBs+nXp;p0cGjx8 zszD#dsJix@m-{r9>Im5ii0kkCWStP+=>4`{6Za5QxS!I4ZF43o4<*$~>`qT`9i#Tg zb`jFBYRS5dc-5@EByoF*nmJRLCu^@&_=9*oF`Pc+l={OmfzvA25ke}+SN^;f4n@l^ z>V2LQFtw{T*P6ws(1%9xTUvVCJ%iJ)VAY3wDJA1Dv%Jh0@7-ljvfU| zqu;B0Wb_Xb3+wOI37$b+?aip<96|1Po*Q7+=kEfJ0O}ld+a0(>O3U{^eb)fC-W9 zt$9FdZ~n2*#R9hyJx>7^p=e;KwoM110>%XTSu+(;rm zjO_)PAt<-+)QR=&#J|m2qx!cKpV_bcS<2?_s+he%#oR*(D?e1Vc^~Y)F5M^j042iR za=ocp;W_L{-Esk?BPn0EM|BKIl=+XWwSnC@vAta1hJvSv$>}DUK2}dhiKWyp%gWOk z7@3<3R5uUH!Xw7@Qq)BPFFD3lb>tFM`qd4ipDXTUcWb+B8MMv3iT|<1miT=^CLs*2 zbp|b`z(}QgOJF*oJDW2zQ1u>5w9P4>`;Sbm=D0h_)D@^2ea<6E_w&xk&_Y6gQEw_O za(7jz?T8P0{9&{5unZ@){-o_nPx{ywqHPv#Aj#mv&Klzo4C8jts@85JsI{A0HSF2} zx=~GT6yR_-36R6hnm_D;gz8FjTWW!lT)RA{og;)PpM?xv5Bmh1wxf8k&!gse5Ko>2RrdYfegHDcAGP|J1&?rK0A zcVTSuv7j=pb~PzU`n?sqzTbZtws0m)^GR_C1vJBc@cYOLf`q zBU4BoSv&yb@Up%>hun&H<2EgGrt;phDMrKb`W%ZVoE?j`9(xL=@jWvZsQZ1T=041` zEv%pQkQF=)8%((^DAeK=W%s0-mb9Y*mmzJveq?VvZThNDXQ`|yO#IKS{DgI9K1MB%+xJp?}uVfg_~Rj)q-j#+;BN%wBpY@*jyMoLeFn@Usnpy$7g;HylGrOgFI z(wF=DvRH(oeUt5GyCF&NZu>U)5H?26wdUD3DtxbdMta&N%IM z!W!df=G@8axus6M+ipD9s(tPyno9QOpXlD9DC>F+mJa%u_J7;@OZ?rhXxm#yV7OrV zxxG$z+{dV^K2&>xK+HcjTkk)eCb~j4JGmne%Z?dW0nYlUbzGXh&J)#xFnGQIy6Rjf zhE$776ug%8oD$$wptSGRd9sQBWvsL{C%T6j=1n5XQ2VG1r=V2Yhs~VRG@L9{&6l;> zJA;CG4V21ezYN}1i{{||ys_Qz@v9s2=+wmcE2VSeaojsEf?W=q&s!M>DB939En_+H zu9Xe?l|G;WReQs0BLVM`%aBu|4g$3WwQUfSHFu+HAZ^8IhO9;G#=n-isT=6W3Tt(N zqs@@_kyVjbFP0R%CKd;Ylv|UBKx)N%H8D8?tCy*z@?&liHB_%Pn3F!><61I41tZ{r z7r!*~1TOulTxY#+%*CF=%Heb{VP639*sPZrFS*nHvI)>)Um@b@L3dVU;(s02aStg} z4>XA&fo8LIiaUHLEX7a5;5I>+IEN@o z$81!q4o6@Zwa{8^=Q#da+rxqWx2#rnvN|;&x(&p1Ti-a^6P5KRG@@5YOQ-33|l$8 zG6T!>?QV^!X9I5Ez4k8M2i&{${zKh45O$VYo9_JDmdhfLema(4suc{-WaBd*%bm-? zweQ-LwUPur{h}J|MxQ{N?tZIU#t=>2eO4o&ZEl6HC#QBeqowf)soey0@_>@LQ^DFm z#SR8m_7THg9?2-x9wbt`eRjy5`c}4!+Yt2#DW!c`syqhCNQa1n6ClYwvUIqePu?W@nJp-tKts{nvEqpn7C0UxTXkp9_R>Bg+QOC5jC1w)xUkqBDYE zm!7`=w{d6>`dD(OaFlR+tD44CA7FfIZn~$mR3o(E~c? zR0=ico045f6lm*{YU^MpRU@UmsXa&pMta-O4T?}y0W--JSfT#p@7Hv01EAjfAE;Zy zxZi2BGHzwL(e5-+cc4r^uhGjcm?pUOd9z}+hmbtd_3y*crIMOpkj_CLq%N0f)o>}Z zof0?#u)$~uEyv^D{hPOF8Aw9URy~Pc>qw_aSgXNx8?Oao?Nu!~p7jxOEK!2zNFBG? zlDa^U+vZc9`(^LhE~9DI2$ElW-aYO2zqhb2YCC+AJEe3bvos7-*EdOII>Fxg>NhBr zJN3Zhn3M`C=(W2`$}<<>+MD#|6R;+xReu&hY+3rJ)_5t^v$|n02(-!73}`GT!niCY=|mJ(_uhwqp_@yT%-L11cHD6pt`^d646Fc2+j+r~$Ob}sg(V^wLQ%ZUhELn| zuWh7c{_U>OTa%ei?;#{}id3$BSmukZkOx2*Z|7GvM5;!U|Fswg zKkB2KSNl#L!{LYGS+bYq2~zmES09tpKE%82@g9X3^&wuCWrH&i3pW<8y-(jj_{-(C z3pny{yRJSZmk7~Y8T4#gM~Q^$;nRu#bByl#LQO(Sbn{Oit)D_GReqxQ)7)Y9T3ykY z0cq(ce}KX{akcqmwKN|3><`m*oUbgoE^tc7cY%JnG0|p=R1Ae{_onMm`Z9vl-u|t+ zB)tMeX!UYT8|-W#rBq(rYcug7iUf^fs~K!V#B!_kR%-eVEbGl$%G~9a_U$`&Z}}uA znnqs(x&5G_M;K~=ok;fQsFhnuZ+&QN9rD`q8e1nqH{m>i!qt`qkXns7?J4y!Y~w5x z3H`)ZDU!29_Ug?6MFxz1zdm>uK%`TJxlEKt`lYKb)c49L(Zv5Xo+ub2x@FZ>*-2zM z+|zMPafTr#B--iOX_T&hSR%#Y3_>)@$SF1UF|KzuY35LHx!Rs*oCi|l4Hy2{)rFs@ zRy6rNt_3nqV!l~o8gQp(Za9NZrLc@%IVShW-uz1M?HInQv;l|J+JGoY4N7{3@GjyER(maOSbV9n1cGO zxBbM6F2Jv|aPX_t4dU?4AJwSj3g}vfoe4Zn{NIgmvmc&KB19#5ueX}`6dLAzK58&C z-J_rQ3WY-%gw~0&IzcbOEMh6SO*Q5MbSQl0fsO%I3J7KaAu?O99&HiqdbGz(2v#RL zNR+p~P}c8iaeL?b213~VnjU%hW#}Q#Z7}k-OG}(Pa5hx%Z*zow zP%U>iNocoKQ}&=RRqv^^5&eFjOf4}|J*>n_Z4h<{q>vxy4E_-UeSc1R$DArfTfaWx zPT9X%b>uYA8CQGu`f0GL)<3NW^FA(@^Hca{e{pf>D=hK6bGZJ?L#nc=!~vki1Z z_nn6Bf@bKM>d7ie$2fO+x->sFW zagdNDV3){fq%@C2Z10orkwk9I#C}Pa86oruESAz^~LQf+fAgXAda)!dAHk$a#%QH;BiM-)=DV5dq8sF zX1^U|=m`#K)zs>7qH{I{tN{_w>Dno9B9G3|BH;t=a1vNeNWI)j(@+@Rx3FlS@Cuf> z6-=dh4dlwV!PX!40^st=z>>*#J%ss_wU9h4EIrG7HR7jkICYC-!?sDd8KpDL%z-Gq zC)KuI)7ds8O!NM>SchGA1Wp-I*Iw-|5SL^Mj0fltYb_k0>%E8QS}>w(7oceyA%fUb ztI+cxwwJ3LSQd^L#@ju`)%@q^9gPd82}vvj2mN|IvF?-v51~ZO~IXN)lW9 zarK)w;V@zgqAKKV1dOnz&uVaZr~lu>+eNsCAjfJ6jq9tmet?44wz2kDSP}Q{rEbCG zl*1@b;Vvg`7<~5}Aw@o0Gm{r!Wbb5v6&eb9g;b)nh(O_hY}#3uq5oo#wij^j&@!B~ zb#k-n_HZAev_jQz8jZSzxEYf3)LgyIOvU}zPCC?5+O|ISC%D)FVQk^VLY$}&(k>*7 z<)-?0|M}UaV^!-X5mK;2b;2blN#`a=0CSqiQZf|XOtb~mJNZ`w^S1u( z9yC~VX*YkXZG31J6i(UiX+96;`v~uQ=c!B`A=PwBjbDY5LG2H=d#d{85`^~a^aNDOQ~!eOyQ^6fymo%XKBubwOR3<{)_es;JqHUy7T~)_Tj1r zkNdx7t4M3$KnTVr3(G}(n)nB5+v~Y-GKS z|7Bx)Y=P^#I_|w5#>K%lw-(Njrt{*|w7fB2s0dc8napOmvnhj?;%o$kz-q&Mn_z^b zp0M&h?x!9HuKsElO4N5e?^cc@;G4lI#3T@*o?oa@^AT9ezs#;@1dGu_(`;Ng6{$5g zjm_o!f38;4SB1mkN6e~@*<3_vmfEV!<)DQVt@XGH$z;{w<+@<`y$c`cQG`(HEZ@gB#O$|M|ZRrj4?-VwAx z4Y#q7b)cC(Vrq3+#ynbzf4(L_%OD>2){AGW!b-RQ;69DbMIfS{YC^mmyzG#B+w;Ae z$nU;en~T?hlzoZ^&Z@#U`_B)Uve7ae#k<7iezux>r~l~GlKaaj_fQ6{aT|C&Kv0w3 z*Bd?->{^R@sd253O1$;$Q-n_Zb^KJ(o};7$?yA^edI858wS;PFwr^gcCf()g%wL1} zZ0>ivJ%geDs)=Rd(4Oz90YJ(@_70kp=rEG%RWnNUbz@>LnL4Ne0ecVDTIdK64!x58 zn`?hhCCHa5!7R1!fRw%r!|tIX?LtTa_PA%SDNh8^y93pe2w|qi%1sPMkWx45xYk#H zww$rgppoL(80`xt$?rRzL;Wz7dn}lMR)Hk<$sFhQWjYu8_@Bf#N8)>q#-CS#!Qht1 z?pmry_j0{Y$llPI=7jMkQpEhTKlf9r1E{Qj$ng$B8E-qIA{-`vm)~XST*6ky&Ad^sOvSG z>(q8Ra~ooEHnXV4ZwG>MX;*<=0X36V7l79O&XyV{A=1?WHE(A}@@LggOwHuasx~#B zoCi}4_n*z{dCMrtc2oh$cNHPU$Z+stA7SdaN3Zng2+B3Ohq$jM{84)_MmRO7IeD)U z(@mrlwATHt)K*b$BX-AK47BQ~;kg=;-a%FPd>HJ^ZlFkUs~**J%$nEmyDm0x78*JGHw03MFDN_o@NPYXt1<%XOEb z|E8fjfv!1P577)apvpK5bbM8A8^IKLw&u^d)VdjQAk$LW2+SyVYZ$f(MjAE6EuUX) zjQ_T*AK$TOM$%Sy^!&ygNZRdzFAihHQr6`w~5L-oY2 zrl7~4H}7?;Fb21NM=Ky0@hun@w?^+WlJfJvtcG0$QvL(gOJC1_XFsV8s+&NvaVmpm zZufD=Ylw6QOfhPBij8{+l2QCOJt9DQn_%LtoH%hCc^VR zdO4<|b2u%***bvSiv21{EY$11uZ2?%16g+eqOr14KB3kNr)s{vg~EeQIY*-1+z92f z$>Mr*{_KwFjX){jye9*sv<)F;$Uf^1xN1_{n%M;;sZVPMZan`THV|jz02a($DwiWc z-sP^QcqS2#o+^8gW!{E7!9mxYsH+dW(joa|;h7ePAy!4H^p{^17p-dDdtrJyw0 zd5-JC>dqf}aPKAxR(|2__iC%pzb!; zIZ4!bN{@`KZECBUV~k}j!(#U=)Y^J9IZvr^}=1Y*lf!~!$<^? zn;aW~`F5;)wsh}fY(pZ5&fWBmd^}lGbSl{usEW^4*E$YH@u!ad zu)I=;GKnT{&Ai%p1PE`F16SiS`G5Ck`xSY#-tyo_S zpksaJWgt~5?^<6Cw!W9u#_Mp3ezs(P6CAXduJklN3AfP(7B$zlI`8x!eNyArdte6I zM~}4=S8Z_fp#N;rMY!r39*0uSi#qSkYbaFR-h+B798SJhru95<@9Ny~lH&_7MdrMv zn$=eb;j~R*wIAAZG=g>YSA?PezNKfQy4`AC56Y@JDi{_{Vx9ZtjR;YKn$Q0!C2dAZ zsqI#@DceX=I8pbmw)H6-UusJ?I}lo})O>aHthx2$gcoXn zza|{pe z5b1Pom#jw!1LrMPx9^6LG$jZ2Xz~RWa9_7_z8O@&{kefO0!2OC>(pB?+YrJNFZtB9 zBN5uORBa(;S0a=Tt2TQXhci5^`w^zX?cf@sOpJ4CckoC)(P-3yfhquFZgQ0V`@hbd zs}-JQIQ-Z~tDW9e1S!>vkesuw5gxW_$Vs<-oqiw8i}if#=Sii~({R-M**yu01@d74iR zW?1d>{Hggf>L<__up$_Gw!!wRKpcEjZQtu29IcZ)iy1N3~h@9}o6ZOo|&+#z{ zq&|~CoAVq&p~GMLfI0e4(@wiMVL@Ue z`o$U_KLAU|?$VRSB8P9oB1>#dFm zcaks16YOXPC6aEANm>&$j~4O1{q|PX0F=2tL;O`hYF;*D)51xOD9vN^>L?NRr|n&` ztNGN^U--Tbq+CqMGL4FS6DbVWKF88$?Yh8i#MGa2db?++n%|6idbzKJ9sXb4C=r@$@m3|GF$zS+AAP3%Ei}e zkY1yuFse+vSPcD7gJ|EL4q1;@Cgf&?f(%30?%onq3*fM+_-X(tHfR9m#%NBzLxs}R=eh6lX|)EzV`&xexi-psj*AlJilCKws0T}P46yIWMIoB7tY z146tFkdAK;9-`)aSSdLzRqHVK@}Vw+iiOFFS+|3RkMn6YmHu3VuxC5$3JV7)_D641 zZ0Ki>o+tem9cYu-S3q=nv<^KtBY&L;m8J&#Rb#r{_FXS5Z|^X+1=Qg{jP9=i-NwZH zpw?D5_kXq5SV!{Lu`0ke;jYfMCCnXgs=3E>+KyQtVXqE{HN&Z5-AQX2MM{NfUH%bJ zAx3KjxPP(&Ey)_dlo00=3Ac5FwN7SIE*g1_wMXbzfzAkXhOt_Wi-?3ceT0SulW9Al zcoj@J_O-70Ivh4N0@}wi>pse1!&Z8RWoL;}%69_XNTZW`K*z12x0&+;gw$C+X z%P2VcXo{M@{nJJ78jw6i8DwsCy}Ie=J;{RCLOZ(r0x2xJ8dV0}r_;P{W9l`EOv(fO z(EmJ$tSyoMdN>>nPGt|HbOLk2svRFw3lO_T!tJv>E4Ud={oWn7BO_3!ycMf8+rSjP z7JzWI1A&xIwO#RD{l5-U)PQ0f0f+DY8MQz#Ssk{;lFO|}^6}w#!UmK#dt_3chvjT* zyOn6U|DG>C6VF=iT1}KPzBygs#U7q+2hYLqTFQgkL zb4yNk&w*&$abWmG{8YGmMaw zPZ-wHc^`qRwyOA>^Sz7s^#tDtgsZQ9z0Rm@>%aYIsaA$}z$r$#)Zl-=+H$nikhX_1 z-bbmu7*{c*If;f_9(j$E(!AiS>q zt}aaX^&($1O@3xu3CFLHuy|-UoeM^C*1Sv@+k-Jf|LY(T_xIb3X+28x&CLKdhT(9Q z`?N?Kks?*EDAgEjbN|iR_PWCeOkt;LnP{6ZiN{roDv-Y({#c*BOE~Fx^}BY9#u1X2 z#dt=JG>KH>T%DET9D!0?w!@okUG6FSG%>Zvaup?2&x;EyZTfy4 ziS2x%rrs39PTl&5soliJd_m5<8Ye>ZA6Nw#yme#Cbt!LDx0Xs*k*MkeHTzScTDwxcj_N#@x>j+7N?*mnrLEc14rEJ^t zw?Pbz*Gmg`gbCsFQdQ}D{in_SL>*dwfF%FAHThUD{PIcCmKWCg`1{JUef7Ujkrcf> z`9aY^3~+RcmLd6`Ss=q#!bz+q>b2|k8YLR=iivUz{qGHPx0D60^>7O6UPqnA8AeDI zR2wXeRrVXv6vwKX6gLC%(<#ueaH+)ll~1?jpZy>1rCs5qYgMeBeL{$|{@~(&Lc1Ud ztWw6Lpw#Wp5!R7__?(*oB(I>8GMmp|^PBccF^o-G zk%#_|_Q4q|fK~CC#1}NY6vrom@49Q7;kj6o3Curw4JKWV6y3M+ER03(1RE4 z=@}&Z-Q{Y1hj8?)W2>pfE(C=>ymxermVqV@?jR^S=(J4$_2>u`88{(HoHO}w!5D$A zzy0$}J>;cMp?gAV8Swo9RRo-j@KTRq` zYB5{|=ws+ka#;5TQWQ1SuBox^{u|FjQ9dZ=+74>Y<%V7lK3J)hb1~MV(8cur1A8eR zNXZ@ksgn>Jd%|eW&TK|VQBE#a?HcKEo>SchM+xp1Qavd3dPJ1k0V+_QJ*9UjygLfe zt^iGxY^xQQgKek<=b9P?pN4i~jZcMwyudP=oc z7w@4E!`kmw6Y&5@{$sV)_qd1SRg>1hWU8|?wkO1=NP{j|0~kAgj!0Nv)l%r4>PKIE zn@;uHuRsQPto$Or7RGh+ep5}*(EmA*Yh1Y&8|%?$Cp@(C*R_7PwbGaw%KG$sfYENkU zL#!V`Vr@Fd^r2)|ue%;?rxr*y<}^qiXq(ojDhfGL987uNsb7o%Q10R?~>6rRE#XaZMMB5UmO}c z#KeUqG+Fi?^65I1J1HC~4&08=GFqz2>2B?01|@17d55B*l($Bmns->zhgFbO!HCS2 zMM?&-{43>D?Q;G)deVxsVqOK4@v=iidDQDVS`zj;ri5?8QK#oovha2riKv_>VOVzn z8LwDMycc>I9lZi6Ky7HYbBrkAYj+Jl*Fb!ELYZ2HlL-iYEMMevZQn|* zV!Q&w=eWryD~6MwV`yz1W$6DNBRXWW~BJt z=^QlHJQF7)=rl35clQVH>&qe1aQ?<$JAiOCW%fbiJ+?69|M5hwy9e|RgadD9Q+XeE zcbmw~z&T@5x8dsbs>ankatdlwxcPBjMya}OQ-UB?p~&(hf^2*D7ZU-fVqNYd%(fTf zuyqxws|coqdS$fhXyMM=U+Mu!&u`Bd)d_ZEExVM1JLY##Q}irj-Gie*p3bVFetkgPYW@wUHbq`6NbsCYXfv9d7dJILXhgG zIlOI@@B&VrMmAd4Y6Fmz=S(f>zZRB%4auqd{~3IHvdx;;!%=jt39z+c1YiEJgtxK( zlShxLNSk|txXmFKLS$tz>eIn;^fpxdHvhuD+5uuz+hh5^8g*buXSG*BwA-Ls~oF|wM2(Boxce1IQ3nIl=+NdHr1BK z5hBn}+Me3=9y_)~$2Z}q$+)Mz4sjc$RJ*;^qCQ8#+JeVh+7#dgti1Ci`6|KbJya|hHIxz85==GR zL;vp}`Fe@jJaIh&u8)4Ai5(WkNM6?72&97Ttqu6iJ^bY1@3Z9|?ygF(4GfcU=Rv_e zCA}prqeP;$zd*@w^vd^Qh69sGRVys_uxdcDPO^y^LGr38(O&vZtjR;QuDT2))q`!| zzY0fw&Q_Fnvx|KMtd*woH5WRi<^IoUo;o$-`O|6!a5H~;X1;0}vxWx)^xEpm038u) zMhGR|dqFDI+HX@cloA^8yBa0K$4FuC@X}{Bd|N{Z|1RLkjHF*c z(T62@n(n{zVw~?@!%5g~L5*>S{=Y%|v6{242lKy5S8KDw2xM((sjM21>%=HUhd!r8 zr)B^P%JZ>vz}ujdN7tT)YzI=b(m|AC=R4bax|!3Xtp$5Y`-yc;n+4!I+@Sy-L8dMi;st73U4)_&49wB+nU zB+YEk7oOVM&u`wvdI$Z~M2}zpch*;m-wp1p7)Ijew zI-0JhK_Rold-wkI&~&1Dr%+-?fK;imdJW}cFqxan)bgwrD$zNOR+L#FMX8M}l@^Y~ zXZbbO;x8h|=wLN-%l)5cH2}U|L5Oh7@jGuCt7vi@_~@z0{e_Uor`9Q&GtP@hDUn@@ z=JEUzii`{cReWKp`b3^rLOxdpdQ~{tm7}<0Z7ca28ademmCLlq`ku}y#oPG^caGZ;?H9pRT@w@r1e2(9Eqti_^w3>UhUc|S8c`tPI=$4| zw)+T5u~4_nALK&`@2*zy5tyodzN{=rb5g$UzszUtGQgT}RKhU^AwTJ{F(8dRg(Eua z$8D$RS-yVwTdMYRVeF}SKK&ZNUR|}w8qJFku++AVe|qR80;&8dQ{Y$qf40@C3VwqS zaZmLdu&>d=oEAVC-@vJE2df7g`o{+F`!xbv2Zq(Je$C#Q+2eW?LU7=umd4)d{~tYW z?@xUlMj2ejJ9Vm_Fze>|8}eb@pJ1aKp;YU-so5Sg+=LWPPS^35%^*h0+eq7_+>-B| zRj-D31PWtEd6BO^X6v_93z~M1C%76G{#tns6b?~Q$CO}H43-y!zcMh77&~*RZ`xA^C3QOfrt+DhmQc9xZ z*3+@?o-#NOJBxy|18uE*9_S`{?FqnY$(cE-l6_ElE(@np@8KnyY*zBU6OniISpj?6 z6U-NY6y!whhFk=tGF2Nrm-@f~>ZtH}1G5;7$g&0!< zzJv(wXUdDtnlRD03hS#U0WQX9|DFPolj+P^CuoV}8CsMznfFkhBdAOt9$^0ce=hac z3jB-wx5_?V3Z@#=qs4)=SwQ?70QemzQxzM=(rtB;?XcEfNhe$~7U`M8$ByLMnBN@|?Zj!qqn;M)a? znxk$;3WsZdwa#*ENg9;J_0h$02{wX~f}MBDqZCi@g>t-Bh} zX_S=xXt|y`(!3t{8tBDPqDn{xeQGJ3Z_=( zF?F?1qP6)ojJk#t4k)_%!RrVS+L%icnIqmn!^Pe`%)d{k_>EqRY;(~QDsog~+-}mFxw*Sa>qrOby_mPPFoCQ7HK7a;m zjP1<|W?xO@qr_4hr^d7bPzuRSWvzC8i4YYpm)oed{{N4RXl3#Qp$zm=N$@EUiP&bCF4e*Dm5_hV@NiWS!$+45i|O+M7>2Vbe=&b^x6{^yFM*ZF zNmc%GAN9NKCH5=93xg*8YXATD+SKP7oU+t8Iow}IkeIa?oqi&Z)*-~ItTz+++f$IY z5`jIOYiqT)6G5$1n@cTUAf)t$mzpQizJnBHw0)bqVEV>m-F@AX;XPD!zuvK9>_Ccd z8`5IzgQVaEyS6U*2<)0)jp^ec1I-#tI(^S8~$s zEtIrc+Qp-}-aOpX_SYb`-PqZXgsjXqN8=kol4y(UoAQ?{x@|bN86@qt#iYN7k|W03 zou!fdUu~`h<*g9e)jK@8dO)fRRi(EJ6Vl}14)2acbb7X|8%U~EWtw@TL{rwf88?_} zRnN05)oR5!Qfk3*`A$HxYBzo-6F$86mzcJ2Qs$x3fpF%H67}JwjQL{(30b+TDnA3^ zvu1T|`Zo(pn41s1yKtB~Tq!MrVG2(t7OJi<=WD`P`-`>UyV6H-fpxL1ysq|9X4@ln z7hn>5`1h-&x!A`Sz21g5)X)FZ`}a<$w@oJ)KhFIb?pct>$d#?+p zhVE%k>fC_Iz{up>spgpfW(fG!ewPW}>c4Pakr~{EyUzWbwRf^AV@D)8vZ`6wo&FOi zek5@hj-YkFznSJ%t*&R=!^z3drE5vG^gY1HkBJF&N%yzEF^b!gNL z;GGabZQZJ?7L07x{#wl$#^7jvX2!DMhtefbattZ-Y+Nia>lPKkYfBX z^=MVFYf6Zt#$XpvaIOs_s*6BM)fSd6<-=Kn?aTc?->WUUD_|P?ZY8{3D7c#VCu(pm zn2e0X%dZeIe5U?G$RCw!>3{GJGhi`-^;+T*?h+i`y~>~SH6s5;IO!do&AQ%e6mqJih05>^sE{YyL64z-YQXMp zw>(&KkKKNd)p}Uu8nm)vpc8m49}fqv-H#`a4S{W+osKtx7=HM-w3VBLBNSWRR^vCr zX)nB6nD>>oAVn_ittWPj5M94xfVmY)shqBCr?Ix7V5V;3Rs+8slw`pgT~sbRLSiW% zsm~~8XFfujsPWV;Fnqb{L5^c^C%bORalHTFK#c+>K;ob%u_pl>?it@(Uel(bq?L7q zBl%ZW2c~q=5N>P5zUG=UedIVWm`!Ap$+FA&{?j@tx|q1G!e{$#8SWa}dH}sz^%bO& zg%?L^KClW#md?;FkpBe)oYvE9GQS9=l+9Iul3YSblE&}n7H28`GE#K?-6f`T1?-Zn zLA<`Y|9`61cCUfjnV-=dUr%sM?{q^jIq6b!_!|W$`|F&dUK$$oP;Hi~@V67g_@Q|H z0#b`yLSo_Ju8fA?^=&@2jdA`wfVlHKkfgUZsX-=uu8Ge6WW{|9dsz zSO-ePR;~=z0}--TuO1K~*0&PD!ltF9|tYZGykaVMP7AYK+J+-V4W~y4d zP3U2YF|)kQ`TS3NkYS6Ud>{T+y{SF|Nb6`VY-|;dF|RqqzAb3M_*_A@L#cf28TpF7 z11ahnEq87^!8C~Nw3Aw_Si2Czh9k!!jlr_mX8`Ws@uW~I##N0bz$n~RoaRn%5(RfB zPN{~|`IJu1kiiisRlQttNb6$+^4Yzc-R7&d?=~wOt_J=I#GOZsuxr6RW3ELsoGmbQ znJq&}zMQH0WCZ~yv(==oCd^j=qZN)2rpgiZMX*z2yKGsN=@MFsRqNc9z~vo zC%bo@9wS8gr{`*@`z0*5vo@-wvIZr$!!_D?lCP<^92SZFv?sXgd>#kS5K^R%1_vCT zBN5($>yvt9AaXXtY+1bMDWkT=YrE=YKFHfCuL9=WuGS+1rGIY9srz*hf525S-z4IJ zb~JEk{eX>QbwH@*G3$`9kyDxLfozx8eA+AC87#krOlW&gYlnx0JL9}j6Sxg<*V5HB zCDPc465*Vxc6?J{o+)O`U}|SwRFvQrgf78^c#Sw+r8s z068qrY7M4^W&Qr*5H-w zbGVtf%b)2;ZV5*$uEP57Hk@1z)tNsXGJ?e1&#wiyJ5YFbg43CRGUvN!DZgvFor7bf zh;F$yZtjE15ig++fUbs~%l)56eF9zkqr*m0oIQv4vUTCGm}|>xFpf^Rm?iBeP^smQ zGjqYJe!pW>^miCJ*ztAL%UETL&l7`WCTbG=H5kQwd-hhW7s%M1bZ~%~12{R?9=8#e z*XW0ATriyh4_9@#rj~SHBZgD&-qX;V9-pE^z4kxT65ey@0OhU=n78+!&jzBh(|Xdk zpor+atIpM=45L)hxb~?5-v%&RTJd(9@ye9LtjNpVJ-r|}~I$<%?BIxzk* zmK;A0V6v$ff_3LT<_`cHk6{Y*B!WM`46xeq zo}L__pZ9*&D_u{7!@o+*L5#Rhk%+T-!)t3q&+_r;xiW(108Yonb(x^Mb8wN7D0LjJGOFrHVS-sFSxbH~boIP3l+ot4$8 z|8+o&#vv0Gk%^;3TzvMhaWE{*t=7XpRlH%eu2HFttbDhjCmIA8h_-TXM2sdr*O6|5 zlJry^6WE+TPdWl%mu?G`a!u-Em^@503pIM!Dy%$CIy3@j2@g-~<@A< za_W|zQu-rkm@qvl=X4+Ei}#%hY#tJ4`V_c?rP$dXf7G_#=ix|sRt~intoAMMwTahPo#wx0+_7v8dOJHi)GQjmE~57W;FuJE@*MwdWh=Y*f(_fhjE z@0A`;w1d%C;j|jvTEn#v%B@w_HLfE>t{w_H+ct}D^dzgl4zrQWO{7v*TtDA}DEn|5 zXWj-Kga5%6kR97VL_Jq@1N|>J9Lr4wW$Kvlp($3+Q2Y{j;t?wQQ zhtb-lZ!3q7k&>6y3YPFC99i$~P9v|O#5Nok=$tS->AyO&v}C~d6vm;mV=8P_-)9J{ ze^!dm1=FwZKV2I%-6HeX$l=9PnQ^@PA|FjL-fG~YD3a5K5lwYWTY8#H4JHLOQW-%@F{bOF z-PRsX)yb-DJ^ZNVQ`a2|FAuXauE!> zv(0nBrF{3ZKl}3xU05OOWk-F&mHhKwIl{XtEX9eoAaf1qN_?#*ao6GS&li8T`EDe> z2?f?}0x68MK6?*O>)7-6iPpf~?*BeBSFc1f@18Q{>|0gUJ1Ctk%yrAgcag#cG3p%d z1vA0y$J~b%#)*V>SR5jr!?nxzNSLC3z-z~2!LYVi!(!t45+%t_>$i3Buw>>nEYK4u zV&^t=uIw5?x(90I;h8WMd6@4`O+}xBUH9m!oZCS)w)h%Ra@FENFY<+f<>4izd>ln*?J2Ndl>o?R<(zN)XMsJLq2u}8Y3G4GJKyAZ4yk){9bi6n?Vv;I&LJt zML25X#r9M?ATffJ60DfPk-^rU;L)MtYg-aHSqq2Tg(Cr%SK3*e9X*A2on{$3QIg3i zGk|s(au-sHv0H*;P{dkY4eN$ug>8!2)t3p>WaS)48;DQ#lzq-bx85Bk%c=ymscej%|OI`u03=-3nYD471~nEJW2$2x)zBRL1|IOiROlTIhZa?lKj5{ zCvy+8Ru{60kWyA7dm+*L!<83dnb&gx#_@?-?$_~k8H(n+N5Xu5B{4kkTqCKgP?%L! znq6E&k(_;aI=c?g@v(Z6`i5{fKB+?#4kStKCSr;0n-v*O09qIiN zC9Zqb%xK{-hJTnM}Ngr>i^p8pO9_U)1#i==5D) zWC`VE{?^s1l&^rWdpz!Hzd^vm$y)V#EsVRRlg+8Y8))Mn`J*z`GuIGZ|zL_?UppvAgb}rVX?5&^zPW{pZ8yAkvo>kXdAHO|+pR2>d zJA_j~<4nGqzMW{~S+>xf`PqenVXt!OHLC3%L%?wPj%DS5!l^1%r6%$<89FP`8luUb z!c9Oc?=%X2E%UTK=LncWmJ*5#gn41vpXCpNLKmTd zwi`do2()s>DU%h!6jlq@DRvbtmFI-R9p$wD0!p~ayQ&vqd9|08FA2*_;p#&F<;1iE zk-_(s{>weNV4pAFtII0agkz`9#Ix%jep*fW4KP}os@YxjST{o&te=!#Zz18Nxfs4J zjPKf9I8)V%Wuv5Ou&%?S2XoNo4r)p>*_)jvXuz?iN3JZwIne090Nmq5ySX32`p8c6z+_!Q$eNI`$}?iQw=e~5hT&$D$1zs3ZMv+X?PO9cFyK=|%ezH6sAzUjYf_k*Db z_*l8NsJ8hHQfkTpE~GUSL;qaM)oTv=Z{e=0*sM9zmO$5|$;vyPTUj`e(wLeVwhqH- zoXfFlSGhJICX0HvT(TPxl58C7*I<7WS_G;k{0D1)TgQ%)wDaW57Ffpn4jtCCi~uCn z9k|;Hk?(m`YFq!+0V4-JI-GR9N~V@D_MW0;TWF?^5^c4W?OotN-+bn^gF<6ym7n(9 zz-b(!8MmHTy0E-Y7(Gl1N~aw`*OX6#DS)AaA|8PweS65K+dEeu_fbGrJOf8mnzY(R znMIIqb)>rgd58!WTisvGw{0P$o}F0ExBMF~Lhk9-(Jn}|T>0U+XYSo;p?`6r?^7YvMDMl_F4b(Dk z&5CaHl)ZHlTbegf!dm%BAY;kiLL=Ds&Ejqgc9LAGW1wHaIV2Oes@aS6&7D5t+Ra*P zy$gp|!<<_DxrY!{uaqmt`=H`4eOguUK_8g}s-%w)Qm{Nn<@?7-C5pv1MEw%N;_gW% zE|^l9QlG4`*^{2)eX2G9dy10G$Lk8~GmyBOqv_}UU+2&J>uV5SvrAPyUjS*+&Yt^r z;p-)8n42xPGc}ccg(Sg=w#z0QepJ33K}YHAscrGRfy2%2%4O)EZy*2YM~)DzOW1)* z{kbp^mhUEq2@&2xAcD?u^e~isTE~Hl4JdfeLGO+I-`4!wd#syK$m}4sZWatj)6F%^ z7I+}<_Ou*3M~X=`XYN}Avogil1CKuZ8xQ|F>)8$_J#TmzL+pTY+BIxDq2jAOxv?uz z9eb(u5nVZqtC{y0w_xhbDKBOy@I>OWBg0^g+$5aJ+U_&77GWAK!t}@sQ|~{q>V}h# zVQF&2S7t{@Gl{?Q+45X1*UaXBS$IsOMsxFh0&Blt6?qYkn2$N+UkhlE3OJzraOB(xiS)V04(FV_vKJNc@A82v6B(_r65mAUbF4;34o`=*pa2PG^-X^{(If>8taWf*%M9RFIP zzDd+km1AhQrLkwv-l-{e4*9U&MQgpmdbq1)wZlT-Z=rq8R&#)RCD za&oB_i6;6e)3v}e2`0gbHo=?D-!{gY3!EeQ+j97srCQei7!Jc{&mGI#7c)Jnyt^;d zByBc{v?$dG^fAgQa=XX5h(rh<*52!~u=;Rru}y(jpfF@uuM1g)Q^Li1C*wjsY(63` z=0mrE>jcE5{B!mba=6@oq-kUFAo+jw=W1qp6^Ih&jaBp**ZRnAelhnRo~-7Gk@ELXnQBOKuLSVT1vNpv>U&o_xYmx%XEUp16`Xm=Td|&9f5A9|L~EIdwnD; zi8T(rkD&1T%+#wsJ^)G0Q%O2?;mFC{mXIIkzlVQSE%{OyQ^r@AS_}BefB5jX*?Is2 zCYGl_sz*JRl#|8J(8A|LCQ#22Ql?Q1eGReJ-D9jPRhifKe@kD%-^$+~ zY1wLsJPeY<$##8j1CUlN@9|`^y%AOM*jH_)Zi2!eKNxfM`j(2d8Bw0LU|OPE`Z)F8 zKrNc$Yb0?rCa!3+aWLhuN>Od_HUykZ?=7b=+xvel4A+QlM^ETpdftf;<$l__(OqyF z0w-%57h@>VoeRqCx*~J$V>tuR+O!E26=q`eAWIkS^ra876lP?P6x|;AP-hQyt%b3x zeL!XZ7>#afAABakeFllMf=YATTRf~Ah|G)vYR98wwumU>g*s-nEF59f{8maU`BQ(u zyoF`3szzT(yv|Lu+2_Ul_wIKXw_xq>-}FOmbc#OZ1o)|a;?(~ z?a6?PTwMe;)N8FCFXsQ$0phfY0kh!^VXz!sH%C-p6j0 z??wK!{#MP&OQ;*F)B~~ID)1{*{Ovh3p&J*D@KfDi=fAe1`9@eq<+PgKhW>?bIjuKU zG$HGHQaNRAi?HjFD9rPB3jY=$fsfK!4MPf|FX3s(wAP$oO`_ru>d!r3LP64X)nF~OMl%F#U4JdQ$XXB`?Y$xMI|e7_wnwaNao za%NCd;c1kV(lB3)BYli|FQLT#F-p{5tD=l>rvK!afgSsX(-Jr_W$(2H3iD`6HF4-8 z)}8Qoe$fzjS-7h@2beY_Y4^Fh2P#$(%Gyj&Sa^VDK(lZW2%k=8sLGe%&S=`MP3zPz zBStE!chkug6gkvd|LJzu;VJ^g9{!eQ_L?w}wPm)y!_@n)-JNPD^lzZZl=iZOb2FIo z>NYi#PXcgzG2wRpZnN{!!56T?+Rv}S*qtDe-`S}{5AwQ2bv^f>$k^ni+{WBT=$hYF z5~)2v>KfescKahVr5UX=T#tn#A)98>{IbX6&ENGJOtzM_E!sTEhv#bi^;9?m@3xIM zn5;cRjxf)zFgH4Qj-ouy>sDj}72)P?Vwu_<7^$B|=zo`-Z$;!B}t(LB0B> z|I^$^`~4a&&HS#izDY#q+JT{ev8ziOn{~ocsJ9{X=Ky>7MUCuRf)U{InTNk6^lyy*v7JCXebRcY-O-iK7mBGkt_8<1?qJ z>!9oyO8E8Mr5NLI_}y1K1Lf;dD}bnk>fE1U{6K84{DVzHNs%+*bH&yX6yp4hqCOT( zu}14T>Y4t_z0Rc1LSfF*R*i$^5mKK1e&QmMv{v^rdBN0M7d5+`s};1auC{i16;6`1 zl16@=4C;MDmWBmOm$C~yZ^|($eG5$nW9+vqffO#|h z)BbGvuZ2W_?Q&){UQhCgDFFVS1}uA_S%(i0U3jkn)N=&dsQv9)3;P;Wk&Z8Mf5qw( zThioG1EH4)ir20>)Yq@z)U*>+Q%%!1D3Pv#dq}U5QaPuX`I~$aPYbO8LmOI!+WmcP zfFN1U&y^d5^+0Os?wZ5C1xD)*Z&y_r?*Bj7PR@W`tv~l!>yS61L|s$1MZ3G0oBC*L zKmLkiD!o4n{`8SmeYQZ+pq7F7M&NL9szyv(K^&^?q`58N;r-fL-JXbpr%iT%VaE<# zJ4?3{h2hkNgk6a{-@UCj21-=jW7y+BzgSWpQL@=Rf!y)6P+j#TjB5{k7uVBJxNMFR zj^xjp+0e$KVlJ(AATwvU`VO=2~}RB#6=oVIrX?t;;fSrtj%gXw!#@V;P7VASOH0fev5stP^o|5?$= zJciPs8p)QU&FbuX8kswNSVCDtOS57$*-tEcef@bxB?Nt9=Ru6VoU45S^$}mbNqq^?1a9kQ2(8ADO2g7y$ zaO6{7L{!z>Y(p*iv=PSUK8mMN=1*HhD|Kv16e}s})CfQn_KQzxdbdLIDChHScC`&A z#ucN@?fJ_QwY2X$;MA$TwUMjSM+nc^qONlVyUSZ~i9|wq~ZZ?;J zPXwyp@4T~>^@C;ob(e+pLsN{ekseXdbgybDm0(nmKTTttZw z%JaH~7T;XzV>u?+oLgQ_Dhm3mZ?pcOG6i+xDx{ppRm^LG5%GRQ75QC9z^Dswj9pkk z_Z&L7Z~y!39Zbg7B)V5EZuK-xh$ZQg@$Dor`N1@E7fubTWtD0fYFpzDDh+W-hlRU9 z#H*pe-90##^Uga=zE41BeFFb&`~N{tC4zeUwT(R=q2j$=o_Q>ccg}KJ@BbyJ$cK+G zdcovpO|W|T#N>s;2}b=qIIxtQ4t<6q*`uGSz0ZZCY-d%~V8#6!DI%;@1D5pyL0(fP zb}t1xk>yDwx_X6{@=YFM^H4h1cJeRv=XIhS))`RA8;G>Zu>rgPml`w2t8T9o4y(=M zDUGd1N!juc<6Av;h?|8C!(qQpI5oe^8<23jLMVtq4S~>K&*(->snXTHURlYa2wEj-9i2;pEqR>&b6NK6Q__Z7J-` zr>?Se4l?zQRP7?6^*T+4jNk;2H?oHm<4h%~(8rNfwn6?NkKOkEHgQ`I{k5e^&8 zu`U&ToPW=jm+TqgsOu-DV6%`Umg#+7uuHuyv@gP`>Qz%~o3p0q%cv2?SdFe$0;k-l ztgZi5FxoHU;QzN4G%g^9pL$BMob6piQSusEv2~zp1U1{A8-rTE>uDT;oXb|jl|C6> zL6?pb&{dRhY{y3ba}5q}Q+1sAI;f-)tU>yXMAo5I!_WHRNO#YC^ICW-U)Kn}mLG0| zB>G9I{ELK*P1t(E2lgLi>cVoU3vB+m2T74!+>ou3-A72lSyCRSL{ad1j#M8HbS^OJ zTIIs$gL(L4f217yYj@~N5GO}Y5%5|PaKq|vvveTkUp`Z({hs#tn8wSfzQ+z%(eU$p z?Eba#e+{Jk9)hYR=ofwLW6ibt%S5Poz4a1XnXeEc&+2c#>7#$z?(M!#bamU(OHJtB zAbe{LmF3XC+!F8vb}cTigK)ol;FxzkocgBgu*Q|a)LW<|_VBmM*oJ}dJUiDOHs64N zrDaXM^f&f@*A9M}J3<7+uTc%@;Z!vquC#Ww1HA=}bVuI)IhHP*>g&k=XWaGI39+q+ z(T3q8gAY8brdHFx9mIicnzD`^5N^FvBj(P;H1hv*b#8fAViM3Ns@E9A@P6LP-nd`{ z)$aeQNfRjHLl;sFel6QhA}Wkah8d7f!}2;XI`us^4=9C|5Y{dnhTAo(nS3W-55eGZ z7VZ?!{WCu26F-kL=-R<#-g(}yPGK1_JbriXe0!;O1x1c~YVB&Z|M!E_8ak@DfRg6y zyY@HQoc&@*ql3FLVqHQCD~oZ}av4FQ4}V`(zmo9LI$C^HxT{M?Gl{!~LhkL&52l{D zBk%qcLr=Jky>AMq0yrw9?6=^E=9lXl$8As|<^0~CU-W;r3HTi_MJSKn?exT5Bue|y zpJeQU;b?H#_p`>3rCB4+I?RK9oRf#>c3sqf&1sAA%p31 z-|!Ob6lV?GgNp1PDOx&vj`u32kAMfuQmWag~Ao730)J>fub$!{wd?k5&jR%*hq4Fxy+v(?;sdp@@(7hZS3a%wwz zJNs|Xe&&Fv*t_zrlGNLNV^DEkhE*zHm5}2=ZOfx;DCaGn+vnYgiSg%Ds;L7Y$4Am@s3H++nfN!(pU z!@*Gp;7R3LJ{E4QU(d&*2QYU-SSH6*JH2<}nOe0m+pWY~c=Q{LI`PcjlN4rO3|_w;hUdn5_Z%1`Qi^+NOpxuR-eh(axV^y~3%5dl%~% z?a;r{O3d#8$Ky-YRjflv89tp8VLh1i3GoiUFh-Pbp<(CDT-!Jth9v)K)u#=@N<7z= zkTwD-@%gz$b1l_t6OzPh*QQ#I&4Kjs!)o}pfKlIZJDZ}9Aard_=SgB~ALV?!0&GK& z`@*b-ZM$%{e$$?@>Q?4=AmhV^_|o9?|4yW|Dp>+(TkyLOBbyWL1%NS_Hs*|k#`9Ny z|9=7wr{&0?dTxX&czd~O8c4w%4=@&0>5m{`^L!QaW8sua6V={dnn8)oC+c~?*&d#& zmSP@EX66W0Zx{RjkJSduGMMzHZSU0q{*^?ir+n(klT}c`J{;W3yZ~d?EX0WVB0!mR zFs&Sypl)Q)HY=OyT}DhD@Gxl|(v{|wq+!tMtE)f?yx$Sc)&*WeQs8s7esWzn1t_P! zKEBb%w`miWZzAEzjfdaVhv(yCbyn|oA9wkj6IXRk`HTGhZrvTflkY~Srw=pt04`=3 z(>;LtKAdLgf`f7A@uic-;niq9K#jhg_=X?Bv@lp*fBc6&sx!u)wzZ!xQ6sKVoUG*& z!W*lzU{64W`0CHsF}SCR>HJ+;#xp2Bm-NfWd5q~!Nj%RddBE>$AcAr+uf|v}5Te@S zN3(JJ5(SfA{aP)P>9|4ccto`-#y3zDwa>t}78zJS8lH@XG57`;nB>vgzS08xtAo17 zCo}U0lSJMesZGK4Xfjz`YKtFlK}n-Kl&KF#QfnOBxYDj2v|9ul5W~B>jIADRL`m`X zo@?)lZ9-5<-u^{7^<638UlSx9Br>OUWdro zlXaN65ir^6!cCxKv>tZ91+v~B{Xh#K*i|^aWJ~l5r0{7uv7Nv{NW0}4c?}Ya`MZdb zqJxbL{2nZe6<0NCns+~c*Br4W`T&js*Z!(@`S-L4D6%Q9tJzn0jDY=xqd)mI-9JDn z5C5*(u$DiJRq*}<2s51H%`P~FqAz=jm>OqgsyY(J_AFnO;{csM5CM!=gYk8sE3xeX z)E8icup2AQkHt%*uEMEKRp3{B5@%}{kIg9sUBO+T#u;GBk#Ot93UJm)LfmehY`Y&^(2dv1_X&x2fDa#{0k=V z2r1uOj67sgB(FT^= zh3P7z?H&1J^zi#8?ZPS7oOKQE{A$d!3o(4H)DU|NOxkK^T#}3XXx3TeZX@s}TE=Y@ z>3xyqsN@!MbfC%2sk+-pkpO4L)w_LxfWv`z;XD2RPUckwzl)&wzi1`ko?seVCnGp) zuT8}^y}jRmROhVN?*oLi8>bA^H3Kj;DL@?=dko349m15um*0N9(iYX$^8et%{u8h( zg~j`}+wc^LeB#;p86a_&L+fDQb0~G79M0G1{pf27Wnyg?zD!P`^dl2h(>C_-RGIcBFlyb;iKB|W z8KG-IyQK}q+JZ=I=i3_HNKz)P;B4J`nGsI)vk-{$*&drVsS)#u%ZI+szyHr-m=Z0+ zk>pB7B`b+c4cdbK&r=WoX0;X<5UTE-Yo`=0_J8xsUDkam-;UM6r^^Yyv-PL61HuvI zvFhVn=Xn(k(_>yVZ1`H@AFWQ{y0C0W?65KO4Jh^T;87FIn;`ianu&P}=#|u+a=D%O zd{MMU_lrKt(PK8-+ji3(G#ShrBa_!%h%84&-evT{k>#{KXMKMDGu~n5`3hTq#!>x8 z5aoY3#=`rLvTFDxT=vQ$_4JANDcwF1jIf=YuT#xW5wi1sbT+;GV9o29e^Q}al{Dn= z{d2^wX{e)!rQ5Ijc=lq5v0Ym~GeNSbHelk@Le2r~S<3Ow9@TjvL> zOWS~w99>C`vz(1c;dr9e(M_-l;HKN}m4$2u)B2k!Yk|6zw=Kw+IQ?0bV+2UOz_)wd zbvkM*T58pVioXp?t@^Z(6nq7#s{&daaaIw?v6^BA52RS%o2%93 zi}0Z5HNRb2&brJcWMy*Tp>4Wc29u_zEo*(@3IfreoI6%0)2>2E^ejh_x_`K&Q&(rM z_n&&^tCqNKAVg2owX$`y|Mru1=I$0OWghfO0=FT&r|N>UpPYkh2OR+Zri zLRaPM3Se!(JVh#@w;LtT09=gL+THW~aa1*C?D?a;N&P}t9<{55*_Ti{(K_bjd`{b5 zd6hV7&+fhY^>5=?V0N6K>FRYKr)tBSpt_qw+e81xKvDbJHOO^6o;p{xb3L5iYndzD z>dfCl!IiVUbr5zKQYup}y4et5>+3J*&w*5dwZB$#(oHatbPEFa%1P(u#9x`OG1Qg- z-7sN|09^|+-?k~YB1E+=tTW!$?r%dJh@@WMwR)j;Z%0*~ocwBV2N*s$SW*6+Fpd`& z>b=%oiOVWlw>kz!ZJr;bXT=>yiU3C4cB}e3fkJn#tg-a|-xEhaZMO%eQOfrJb~!F$ z>tG79w?^2^9YOZ{8ZR^b=jBJ`n2Ixtl2+~T+{tcAIFCl+Q<@ztFG5l*8JMNua2Y|0 znG>!guKF-~fT8EFWy|ag9q5)qDYAbK^CEJr&w`T<9#lGS0PEoMm#Iyg++G1ANz)rq zufj5RhF|K}fQZg12yM1O%k_Lt5Y18jjll9yU4p#{%INb)zpjrLmXa-Lf7}KlaXYBL zQ9}Fzg#Z-l;h$yY5Qdjs{A&kqz)09Rm=e%Egh8#~^LF6Cs*hI3yk+wMA*G>xgWyMf z1lLTeN}rJg1Qt+jc*aI*8( zr0k!=DbxN5W)DRP-nllK2Z_M)OwQ)A{{C}5snz0F!RSf)#hHu1CY_bf%MUK{X;)y@mzD_3r9m zH-KFc+pK6~k4;+$aZ{pCc8c2!My6~3u-lvB>)Xwr<(qzsf2ocmz{)!$xXrW#_ z(ZfT@arw;W%DBD%m=0^_Vg~{atr|BDb_SBlul~sNZx@)FriQnX&=`sYPnhGiW`4Z? z;L}=op6EX~anv|h9iK$0O7b_D%XGpc@BA@#pRjQSlNU~r%voC>j}W=-DZ`uvQ^HYI zVIInKz;drc8+#40$}#;SI@UBw&XU8+a8li4p9WzCL8|A@oNZ2^R{Q^t=2pjr{9jd~ zcJD8OUFCHMY_5{LgcOa~RjtuMdnNjE5@OaXSxdZv5UCqn`|K**6{m*w*TAkNs5(@& z^*RzMwGH+g{ol6gv>g(-i4;9(#M+^gTPV!PBwr1<4Wy)Xj7R)05M*c|+)8=}3_l0k z62;v<{`t9u8V}z?AmF3kpr^$9P})K2AWj9XTpskPsE#%UAHnMR*rb*~P=5O1*6w`? zO0GLVvlc|G;|l*IQ5C#JeF`f0o_dJzS;9;8a>a9D<@?@Ty$SGjzB$}2z`g)eoDbXZ z=p~%$aX>Tm3W_jlKqT335`F4}?$-NjIKmvbUwDIpiTPP|ZD?~#>|Ab5t%Fk6_@F#F ze{AWDcWk7&KCzT(RPo<}()XtFwYF~0hjFC`(xX|u-aWI zsO7dDh+*}sfA=eEj>Z5eY2_x=K!-e(c^4YNepsFqGQmfP+$Rqk2S8zDygivTk$+W! z8m~;|V*-2Ma+Pt@bRT_pJA`#45#H;58U+M3;9L{t3=n?ibxtZhY0dU2=sy3hb^%V& z?C4k5Uxu}an4<01iPcroS@PQfGLtg6x~Jj>u70T`cvo%dZVYh z-e5j!Z=B+9qK30^=oZvfgLZ5CHk>xb&P#?tUm#(2iUVw8gg*Y+=1K4_98nk~)IQ8T zge3o+U(h5#5#xCi+}0YPNbSS+s^BAl0loe6Z)*#L!->;}H8SY2Q&`HqmJgTCS1L~c zGI7Sboz!^>hKsfaph-w%{-2FEo+q+8G*JnEo&OwB!S0Lx%Q`ad-dw+|Vv}rZ^EQB~s3+Skr&hx^CQh}I<>6uzNc{F7>V(N=C^G$_>Ao>QBHLDOM|(!V zWY7CkySTI!MfN7h>d&^smh|2ll5g*!j*{_%sdJo6SUs(YzXkx6Mi+jlE#4&@tt{q{ z*;r3--m1>dj3cBjTm1DNMfF(|Xjm{dW*d_Mf12AcrVoVOWmA`0`#*vb)0nEY~VWl0|2KKyO&nkKbT^kEiFp-Y9h2Z z5x{GGgqkZ>Lwp?tlZOs}pVbQ!f_YMRhx8@_fxh25pj&|YVGvTS>uo6V|45IxaJ0^u zev$8K{luAbRsZkgdskxV^ezxyC+Z5oy*{#|I&Gg1A@#q8C~f8KK_90U5h>vj0*3Oz za@JYVVFX8rS|NnlYjdtwp8B%J&*;8D=u>XAE zaTZ%7UjqbCHl@10fC%mvM>Ie$g}X-7T?BhZ)W!NyqpTC>>Wgn+0%$&X{sZO@%FG#M zqx@ng#}+bzd^Zct-*9TbtfLgqk5jo;U!mC@7*w6}x_hq(g7`IIh}McV*j zrN)nK;JOh`R)@M3?@fu%?MrRYZBBe&ue5jBw!o34*LS)b_egT)IMxV2SO+)#ANxBUVGMGvN0mDh@qp^xD&j6U)nnzT$&P>Fx?SL&$Z^Ze)1Q=7hg4JyKCi zv_0x?P}13|O6_71CBH`0c$_illlL1aWpP%Jz=!@=W3B2#>1!RB6!IKk>%-S0`o;;! zgVey>yKme#uZDa$aB6QJQ{51__B-5zvY(A$szmurm*b}V&nP~swrMk%I=Fo9Of8Oz zvL#UtIQPAva~T2U-@hBN6^>lmGO!P}p+qBx+ihi}?LEl`eI4x43M3slTj3>{op97L zR%PGSL+5j}17OK5rZNuE*-~5Kod8m%ZVPo1Asp?~-dBG)jY5y3)w><(qaUlSMGZk8 zy;g5)&3LAdLhdDkS%mP$g|RlE|6iX~qohRy>7SnCfp1wDrw;6tJ*_~hVm+R`Dj03r z?Dv>kTgAA5NLDD8aC3q9ViMrSqt;O_L0uJTm)9;HI6FUnQC}Jccv0%XhckncQ&25u}B*cTSDXO50?gDD4q8pF7z7$kJRd+R% zq4?8%=@GWcgv^i`MuR+LhEcG>DBK7~*v1}gV-Fgz#DYb&#DWD26tQ5zf&~i}Eb@8I z_x-(nOSL%P=Xc)o=Q-zj&cEk8=W)JvNLwT9lOSDbn&w@Cl9daW>BhQqsCKP?O6QDCvsBB{eI- z@Y*h#2Cvorsy@oYUud+|psGx=qiRjDCO`jP=TK!W7>C zk9Xj3z>SOA)VQ12u97J5y@2e~-S2*3Bw%|FP_y>*p)u+an38=;$r={gQ>T~# zJwZ*4Y7ShYu!MrUPCHmm5ZtWSJ8w;2p>=;QA+k0aapSz+2X$tEydZ|>}IeRKbT$J>loTVM?H!Ef(1 zs6)xl9(8eM8-k*4FCQl(zw~Jxl#If-V82kSj{&JC$9^ZX`XBE)T}x@sEiGI4^E{fF5;O1lU&52{Zy&NsM2*f~6S z{$;U`{7GE}IG3L#|H)jfLC(W!R5r)v_kCRNryAJ7u(Ga#)DtV0`mfDwDWS`-&N^M< z-AA^hZriC>^W(u^CEWdwwTn{omFqpFj#ui4=>|%A>sTF>-UKxRFR5f&SRtLWP*Jym zG#IOfI<2~c(6OjqEk0Q*m%C_Tw2LqcY_v)DFGaIa*lzmYN0SMKM#d8!8W|zr!dN<$ zCt4pvouOa7ig>Q~+wdgO=IYHq(hVtP9&>*Rq*v$e%Cr1<;><%^rO$u(@k~uxUck{G zwWM^mrJ zI#e1TtKRqDlyB9_obdm{frzaCnCwH<%6Pp(FpczedB0l;CN(p$cHFWG1rsn>ldcBT zp!^qM@S1#U@cfIiuv?p$a>Rjk0pGMiXg#2X%e6yn&Nk%BGtKn~i3fA=P)1JnbZEZP zVw=!YntDXDbmL}}FqUjFd5B`(g4#);{h2l|+KQy4JgC>X!8WM0Rb6lz5mq6${q((2 zK=pU*+T->c4Upe>A~kUs2g3W&n$tHY=#yxYH2_!n4nUHf`3i>*3ghYCog_ZNkA8== zCx!{0L0wp0bDeXmc`%hTT|HB!Tu21B9NS88F;F|Z+Dy>F6z6bVY-*#&d9+mAvEcE= zGOxPOlN@@+((YnUDr;sOCih6`&0jPdvCFWUocc;JuJpfs(yoMDg)xkYwRXK0Fl!hf z^L@-yXKO}q15OcZLd`w_hi{@u*;P1Ak46$Zcp{yMM>3$ z>+XMb;(L80r$2T{?9}&>>ba}7czA#mF(P`kqfv!;gs2ev_fQ3o`&1^{gZJk0eJW*v z)KXdMNv@kXjMDX`DvxU~13vobANl1K z5Weqn=0fQ6b|kz4)%#rtRUYRY2;pU+19sR`(w=+sF7daEjvaj-Z38daMlH*7ZM~ zt~VXlgR&UjZqY6%3vV~!&PG5{#&!+w*fX(%WCyG2XEt@oHJl=tdzQZ6f*_B*b#}N_ zINhooe4K4d_ig;!{OB*Aq_J1H$q12x--04{LsWp~q)ObKSUeDE;grU5#-iXk9LDURN~#yX z=)e8k@b|L+-GvFSnv?GS_n(p2*N`HZ9;x@6fC)n&(GG~Bdj|~GdVV$X>1Xc|mBnb8 zZO>&J2ApIDVv}DhEk{a4hShfklvdwfI~Oa#jFW9k^L|WESD~gQ86=f9Umd~^Zqe5C z$xKy2*Mb<=T!gO^mY+ADYUo=ZR0~DL-2jDkQ=Y&k?!ji6He#bW-#f*_(qW*AYXB3sF|JKJwcER-g zcBb4+<;Kx8(69O<`T^hYVNdD-^4A+82b0B=4dibZ6{#Aq&$dR7{U4v7%p?NEU>;2DMP~z+jIE-_m z!Ai1}*v+2S7Ol+**)6m*mYY^CU^Lg_w|nByyyx)Zk?anl;+<~q>D&cUno~2?koS7L z^dHJL<32pd{@B#F?TW+$v{Z?iN=*YE^^t7j{OR}Y#`I$}tSgJFCxYS5_$UPsWX4i^ z*H5A3KXj_@*gb<4?)z)C8T7pW@v~hub$9`5yIq58D_ZDf;@3Kb{O5OCXv%f=<|gws z67IV2`$ymZTWTQC<-2yS_7+sTUo!17(4(ca`;_NBB+GJ|WSiw;>7`{K^(?9BO6$tY z(ZZ4=Y({Eck04*xmVZ%m^Oc}#@-q8XTPLl8b+Y{vlazpPdgLC0*SNAKKX3Tpzo7$! z(?53C;J*$`gS3sGW|6r*(f|GrN)-rW;E+FhppVQQZ|ALU6IwWacB(yE1*c;>m1)PL zTTn7^dn>YYI;G=oMJ~api~QS!Q>bQbG6JUh6S=lJn%|AdZE`RM%G2}ON7{9eaabYq zuH+n_Ekq>%SgqUg^)nsYCjA?Fj1b71pr<^{O+yhxR*ERjJ2kch1 zxBQSYaza&W*P15WMpTTsFG6m z^V5XGNC`cHQfPb2ZB_6XMK%P1qzk8SEu1cGvII+rPXgd65S|^Y3HviJDV=INQN$f3 zlJ^P8djV+(XYQvplv2G!kkN(>8%VmagunmiOMhdaPfT5A2g@}|;Z1&Z*iRtd2H5DP zZr%aXV5CH}6FyOnU6=i6(CW3!QqScGUFkGL)nx@r>Na7Ygh<1xV+d?3{8fPpF<&=~ zSA%%F9Rt_&zcg>$Yr&*EVc@MHX&s6}ee|2GMb-<42m4iI1C%_iD&)BluHL{^qCvg? zk=H$uH^Y+os^%YCgrgJ36V5o@R1tn0s6tOK!kzw||OgYX_f`8(_DWxqBkE}vhN#PS}0J$FoPSHPuT zE|&GrN}$SlidI@BteiW#z4O(HSN9qTdVW*NiFy%kEtoQnI~lGWsC7Lh*X@Zr3bxIa z`;uy_4RCtM53gfvM3cR9OfoN=&NyWh-VAm9HtxK)Afy?aNHJKmm4=c!5Go{iNfK+zZ@n*Nbm|xxO#JF<-`m^uO z1L1u6U@0$+7f=Rrb&^@D!d*mCv8fs!FA0ZlqqQS`8I-T1Qz>6byoZ0TTU<@}yVBIx zgi9b;6j-TU51NX;xdAD_jyB(E4SN$sk~5~Rq#Q_TOuAzhcpC|8u}(`W!ku7!$#noq z^e$ZT;nmoh8Q<%F{=^EEj0d;7nKi9`0C$a1YvbC{d4yy{+F=d;STOvVJO1VA>i@x{As2V@f0`CPSQ;txK;2T?hmeTobIqXOG zlGJ?#p!5&#OJJpN`cc^dtJ*4rt~HuR6!BLh$#eTmnI;fyu+n0~L9Br=Ph&W@Ce|oY zU##Pw^}>pGMrYX|DBbN86z^kgF}D#(x~%@1(cz}RDZYeic-Rce#!0MVZUIuvV{9;$ zX)A*M#sIM94BMcH&xz)Yp;$12mXf)->6tf@jFNd_JtENDDv@YJ*_>_8d&dDK($7YV zyOaF~<$bJsF#Q0cI(_#WBwDa*XoeQ;80Zj^g2q!;t+A&2H25?fT#K7UjOr;X$#!j1 zo#s)~Nf?(HFBg)Q(v(+l5f%|ryi=!*#pizbov&yubH4w5M~x^Kz@%sI4VNy$gUyPm zI<9C}2`-_Ace^!jSw$0hv{bA%u&i4MK2q2@Oem@!TtykMvo7Gbt*>i{xU+A95)jl8 zc2#rT5Kd7(J6;3Q%^vGyhS*y@&dVvc;pApQ*@}4wB|YGa6Q|1-?=C{hU1m(t8~6HT z7(?p=_j~M47FF{A4g-(YOz%{(<*O5oH2NAeg_=H59`4ryB_AJB1%j#iE><0S;QBOr39^A;4Cx9tq3WYt)fotZ9`L*J$tAF zK{c&A`zLFgY!psSch|k>a?C!4lx(de+ha=OC`pqc1zY6OB9myDbkwHu0VsWdhyR*C zHVdPJsA22)(7s8-J**tN^toxlwApYSn9l~f4c#`A=D`$!AAmNKTR@Q=Teh|#-+#@{ zk_O0+M`xNAJP(E44jszb@CAevW9i@3;Bv9Y+Kz3+CAgEa8QGWMqeP~~I68N-?d@La zqdB*4BjQyQ4L-%@&d?9Yg?ZFMo6B5>JEl=?$J)Ku8;N7GgGt){CY+pY<4o6L?iPw9 zE$#h$JJD&ruTRuO@D7Y=Ckz~QpZ_kH9GHOF*edg>duUx1F~Yv4d~`Tx*k%@g`WxOq^t8?c%kuC>QaVKu8|dFcoUA6)}thDedf@%9fNH?IYE zV7>o$F!gS})!}R7_#TvkOx6_tJ}9Yy3&@(OJqStsnRA_Mo<~Tj&%mq6V-)HCjFrF> zLFN3Socb;aM>uRD)kVSL@l(W9mI!sP6F=)|^hZ0)H$y4HtoO6Vn; zVtisg(CtvX3Z<1bheEGW(g(gWj=zDFjhQKhknr3QYu0zdxNyX-T%Cly2fOxbkM}P7 z^#MNCJg6;)!!%b%%@#m;*iR`9x)MsAznGaGn2D@H49A>}Hm6mqlZHlPQf>7Brd#FV zkj_u(TI5dF{#`BU*1-ebWpZiufR-wbS?+IulGD-dZrnzcbo{TUzNx;t2>}-#-Zy4! zPWW`au(_rG)c$7M!rO|H?6{-w0B+sS--f30`dJ(IM?fuqT)Lxz$>=L?X)tCCK}JWb z0mu6v%K?jYCK06L)DREM7y>ojjvCPq=F{Ev;@%-)73A5V5}FROrmWMP*~B~Yt;QmE04u}=|d#A(;9o&o7o ze4LcaA74C2i_Fx*s@vvzftUi8ch)`>P+VoHzHlT%1L!L1E zX3V*QH3DMw&CYfHCQuvmO+>o$lRh|FjkyI>oHEX|FaWx);|xR}--eLHI~OITV@L9d zCeedOfz+^`kZNTf+VYL}u{J4ym{y)^29b7?CuJ#=BRpAtw?Km+9 z(}dPZo#W%{Xn1(|mo|KEgrr1fMocJF;${+ZMAxPzx4?nakr@^Mw|mMsp({;z2L-ol zPWJsDR8!nd8hNgm?1O13W3js4hgJM|^ZW7u$naBpKqj*Er`kS3PJ{ELAJXHV)NU5l zgzyPc7}YH!>(eE~Zs=g&S_h6sezbH5V_tQrX9(ffNL?|14l2@QyI%4FNZ!S{CfqNP zRKbJLx$^i5CP4Lig4AEKdx60)&n`%b;pi6tn3q#?fNdxe`uqDb9}!M-PU~;kRvPWU;O)2R=P?v| za^iD}Ku}F`F@r7$)W^5iM%MvQ(myYT92Cx8L5-YMn&Rmp)PXiCb7M;;V?QE(?Ao)7 z0+~%3?YwkeI4w3dQ^UYQew(Os$VK5Wxeo56at~tss-hvLtn_(hHWY# zzbN<#(~?V2@;UzS|Ds?o!(oe2t20ix(#J6kp}4O0DeSczy9VKlgU;VC1KaDM!u{~3 z!3_i~{A4dBkf^fWQ{I1WfoUKU>A9mC<#s+6p}hJS)9-}fVXGPM=;7jBgnO#a=8d= zH=&*)rE`zeBIX&0=SOB*j?bawz(sl6;(39hk4zm?)0c^>rXT#+nBVP zEB6M{L0qEs)7t=72{rmVAnAXuE7dgkJxYqRd*}cm|L+C`-su3Q*-$P=lA29$Nv{AB z{nOf}TL~&XU0RFeRZvoI?{wqOYLxW4f1J6?8U)qhn1PrJC#{{eU$rinpm!K6}8!bo-_LNdy^5fMjG5Ud%Yn>Is=%sYXEwg9pzXUvB+>ej@x68nZ2iaWuu8Q~gOn-KSOr&HnEIoW|E(;y&~s zQl#8ALOtoxH0_~$D3`K&BH(ZsbE@1d&GtXnNvCliPBBXVFyt;1}K$;BgtB*Wc78|@zV zQMwJ(C;7hF#w;cL@UOMbQ(bin<_CW9_p4 zevl^C*2ay294Sq~Zk1B4wf_n<`7|K`SArRaYQwMFtyqPasw`ctnbzujqZhj{U{vp6 zbF#G-tfE$LBkK?@jqcdDgQNqgrzs{&a=&jtQtsNLI$;@3;t}v^e3ubnlW>@DyzZNC z2Bj?D&ver*kd$X1{w4lw1-f2lNY(FEcUvF5p6VWf!k|+#ZIgX8KhMP9#u$`-aa6CU zoziiX)OvSYh)=@uV-6<2107&BW<&>4Ek4@g!Qc>*TKwoo9}{w6op@*$0T)amX59W( z{dt6x+kP9LStYOwXsS6>&X|celwy4C0cZlAUrR6P3Fo0CIK21sUAoo&(~ zZT1Wbe~z(gX#3I6^Le-Asp;Vbd$LG;OAPui@0mlCYLV zZxF(iPiq)`3n~gzp+fHxyX_#fDdhY7Ogs*LK>jxm5_29?{8)~V#LmcO4^IS6+vzfk*a}eL>v+Y26#z zARH~zIys4z;YLImI4<>*+x68-h$F~o9cC2`v{E5jJlKnO4`D< z+oGLQ%{zitO`4|+Mj`n+E3^wS1}NolZPAS<8h-AfKPI6rp&!P5Gz~Vmk4+Qg^NHWn zUk~*kINNa#r+UV8K9J-nEick6LKu6vUcD;D&m*Oh+SwwMLO@Zd?|#=XxR~(aT2-7A zPLW))F}1I4^Yi(7L?d<9vKLUo>{H$d>h`QICYpi_>@F_#(b_>e_j)?XWkfljE>2z% zPH%N>$ZuESoRqaDnzQBDmS@+{@pI3Pi3yqkNc&9In0X`c6f2Y3ySNFa_V$mmPP>I9 z$9?5H=(ccm!9Bw}Xapc^Y46h?CdeUx8o z53iH~ltON=@$5-{JmsMeWn6+3=#w)0eVSjKKh`4RSrDyUP5T^@$oFNe_g8cPsO)7d zPWXW|fmWWHrdROqRX)%J@uC9`M9gK=df@d966yKqw}`%=y0Sj2+a-h_*7L@DkEibu zk_FqRI-ji@JInruLB-m|-Q}P}N(kDEKP%u)X6tT5H6L!QL`|v@?K%Vu+%+~v3G_eOO(~_c0U;YP<@31tQ>rcC zjmVhcOv+RqvlVL-68lvM{#R5M$1ICBJ-7w?wbz zGYofawrswxAxQa?97mN1Tu+p;%)omRdIKSBG!Tk#^M^E!*-A12Ku}bB`y~7}kfJg` zX7WeoQ6dvZ=exiv#tcJgw%-hiqpmVRiL{5y1unK@jP~U%*1O=epvrA zqAr-ia|==K6xQXplfOsO1=ZMd)r4GFM)XeYZES?nsJ4`Cvtsn7L|&*)y;(S%eSrO4$I0rsx`Wf_P^#y$+12Gv!>V=j3Xtx@ftoS z^Sv&xqlCIg9~0Pv5H3@cGCl<0D0`_KiZ_FwX~Zz(i_E!ZlfZ;cK8epmsdH-fzy78Mu^WjzX?;h$fe7T#Fxd{W zvOA~MZzm!pWRvj@Flb}aayK!#9;pjG_aIyz*;;NXpXb8L(*WyX+;eG6CmCIQ&=4l3sV zyJG%(psU;Qy1-bCwd{XtDS!8ox!`i)`ZU{(D>v4-W< zNa?HdnR2f|kQ(0$WmmZtQUebO5NlzHl^H@!pVx!wm9@21%3=dbCKCLJ7$IX7v=KEq zJp6x}spzJj;4$GEoHirKvP@58xh1IPF3PtRk|_<*_9?an>H>>(pLGNb7a7th(ExOXtxq3;jrg}4~!iL`k%`RjHJUUxwOhb;UNUta6Kh7-G6$l z#^_lP*SQ=6=7BWWnVH${X@Ui`q~&T)ZLBULC>P@d0-mqULa52}KjzPLn@c-eT*V~p$01npq zn*%WObvX4se!50Wc|S}1A?n|-o%lxvZj4^QnY3|#(#+Fa_kNjZnEWY^CPr{L<#+MC zb{1YEgozWx-_e#xZ_r8{7u$WiKEja0JByzZ^+cdoD2ev~_Sifo;#(Rw9{wVm@=!Qn zOfhJK$$Ov3y5`EBaLkFHj8`F~8%>_5bF|gL{gLjP{&$x_af_TwxQ?Vvn!w)`2NBxyWMUX1;2v9xwL}gy$Ao z8h>zE{C3izvfH!9cfb^KSkJqgAM4by7UB26FwyWz&hNt=rv`VRAN1ePm_t5-!nbCB z(G>k-v^4xsdAE21YQ)owUWk9b{=K~T_7qBX!_8^QvmP`3mi==$O#h}Ws+&4|fhfJ~ zVZHn&~HtV8Se0ccK!1MRG8J!(qg!Z`oK8+xqDQr`$C@2Tm$_p+&v!B2wu z%jRHC4IB)Rnk}%R*LE1m4{(K7*Vxv7`Dv^52%J{_tkwjhJv>r#zA-R4x2+(zjmA;J zrHR~?nC!o3FH#)|f+dEefQ-s`FP}HkPy29kpCq{nZFlYy_;QGOJg!B#X(oL6nj&UEY z<5F9%+=TW2Z*=)vf?@gBb4EtZf4h&YW0Ax=a2PXd_o~~$x{D}Vn~pSdJ%4mB1X{%o zL+x$e51}3+EGt~2e;;Ezr?g7-2qByp+hqm-rJm;dd8g`0|Ao8BWmU3-Q1aDgPEUdG z^@~zr&${2v*7WN+5GK@7hXt9&e}NVzJ;UdYw2ksx{O*iQjK8eyhpk7L3~zFwHD`RX`ISGO`hpX?*JJlP)ZJAk6LLv73P zAP|1;Hn@|1SZ#-laI8ZRrV(WJZS{s(VM&)` zuXe8gW~x~loQKncce_8)yi#013d3EtsTy7EzZq^zflF}etP}0rMW8REq|T@6u;B`r zMD4WE3l`Z|k??cQp0P?@OZ?BNj2S)odVW;KQL~8~5dQpjYrdNS{I2N7xB7T{KO^%H z&U{_hNAKj9qaIm@?&g=Na=vvh;gjVF;J$EbrWCcTdw|gC&s+qqN#P^JRBzr}FeO@# zlg8<~i2S7gl`WP#<^&u?dFPtT{HH)T@x>f<@hrdXYis@I0dBpm zsoIeD3MEOk`$wx|h^HVd5;`~aq2uZ(+njq+?DKjh5_R7d=lyJD-?xp?K^oePwbRHKjU-k}jR6;akr;kC&Jog7{Q~X^QrBB}G+1bhg{x@QHJicTH6YB{UdrTpk}wUi9LS?) zH;{DRPii)GvyVN#e;)N$ z2y4P|A5lA>(R&^UhK0tH=HdJiipHr+O`<-AsxppQNI#%yh^!4aA1}cjL(G8Mjm@V> zIf;~>XBxlf`FrWlsDeJKGI*s%($A5wMjvW@^97g%Kf!ZQ(;b1mMD8ZUo_`n|3cW&A zgTrF8U{vP-QhKzNZM_J55J4$+?Jl$0Ljm^jhs z=~5#3W}o>EjR2>;2xHr#zJel$!*vt+s<5Vi_=|Qw@fxITyJ{-iPI;~)VA?;_t~Ufz zuLd4$?taceYq5%!)t=4KzcYQD1Lkv ze7f$!)>!&Hk?SQa(*Pia@E~E$g^B?Kh!f@3b9vx|dev%S1sHj)S2$Nf;dO0{=NiH) zwA66I5}i!~_26 z8im6>Nzc}Fa;%SWs$Jq9?-LjPPEVMjQrQLez@my4PjFPE>f^#M3JXO2(MW(50kT;UqtA zYp1w;4Mp-(wS>DatTkua%Of}P!>Q`*H-(d~30V2$xRoEbpLRZQTR3U9v$wls9HryO zUoYJ!-*8%Tyfy*ufwEcp<0b3B`vBhTsC^Nl-G51$>#>jEq}`mJ*&ceF_Ss2hdY_EVVn0RUi5LiRE2{tgRO^hcls5q zynLV6gz+_md$xaxxp2zzN$q*RO+>9)6aRM*mbfxnu3kxcKJ?flhY^_(I$!DbYA65!uABh%u6)+GocSP&{$7-Z>k`pJ7@pSra4QFc*(}LE&r3%*~ zq>XH0_-Y-T(w}^ew^eIb}$V+bM8pW_Ps%2={8gjJ%n(I`2AA^&9+m1AtBVf=_?MzJeKf1zD zLUjNk963I7mVRr;_6AW z7imXP1GA~>vgm!rlb1d4M+iZ)K(q2z6r{F?l@DX zTf%C}YFbm@22v6i4w^;IokZQg&tBVI;qbNUMcVV*z8xckT{tW_UL1M=N|lR!)qez} zk(%SkIDdGY7(9-P{sc(MP6m`?3GPxWQFscbf;N)v3)W`xGqhB&I`wl%ej{xs{{@gP z#N&A-{}Lg|^Cc#|SAB%htY2Q|^Ez!I?Evn4cW@(77Y;+FPUd08cPJTa828tdJEuqQ zk(Ica$dTk_ztz%scwc3f3#atn;IT(nrUfg|Qbhf>l=Vu4bdh>?@kE_Lt?Eg+8KdB< zds6c`Ry}nMQuWk7e)uOuKfnt256C^B9=|=lgU}179;LPFjB*1?_&1D08=-LMQ*uP+ z`}q4D6;#*QoEWw0Yy%8;&+EA13^4&>$|q4egGkrFb$xY55l=IIAiW_ zT~-?oHTyc`2Rvh`hc(jxHtnuKYF0P}(aG*GT(iuWM@vN~XO7GsFFmil7EseeC#Fc$ zBABde;YcIE;qHFZyYo;Q*EJtz6FR_!e2;g3QSMnUf(pT=iaHPui)!5Jjub8xYzT;*|v3i15mKBSd`v`q+v9;7=+vKpP79H$kMhGE;=P+IsNtXa6Atk)VPdOrjaX0oOJAfK0~ z0Lmf1)@Hv;t90MvKKjDRZ_A_N6NK>AL$MlZ2~HsmD7r=I@26;Kj^Wx-eg-PwltGei z0CbtpZEU^}OtXCWtJFe|%4G6Y(CNd?6!|rj1b5agls90?@LAh*e+#EMoOZP7-8&Rn z?i|`RW)9FtKR#94ILo%R=6Lv*D!g1+8SJwaD_20Nj9HSM#hOK}L`!C8o7v?mILhMF zDTZ~_0D{(?udTc_!fBo(T!%E%fHM3nUA=&!L)Fq{QBX^KRaR{0`VXCO z4cWW6u1gLNQ6lyaC+4vNS%5QQIFx2!C=?+@SprZanM@V z&X-UWV`vBQ7F578@u@!gtdD!vZg5S)pQEGz!%TIX9(sX#25ydy;y zj0~IZL+L8brCrVJrDS01wCE`HZkCda~RS5c6o9wR^Rw?g=?IZd?I!Y`Y)+XYQ z%GJrb{x`Painbn36FX8O^I-VDyAFi(ij62?fK%{#x=GL5gr)^*%D7ojAw0=p!Y`Z> z9y!5X4{=9GL0kZ2sIje&@JSA6Mnb4PLxUwaKt8rz+l|{C=dYp{5i5stjRfg@176W{KU+KIYNo_V7=R)QfEk z5DrhZz3xQ-1NI*v-GZs~gq&(UkC0+1i7s}bk1)0Pb@^Yuh>)_K;vGD8N|$Ji8X3)evnv8e;R-Nj}mL& z&~F>ag;he8u=NR$4p(lBO6M=-=ep)Wz=7!ZdbX`@HkYD(hMHc|JcU0872)`ds=N^H z(wjQ_O{tidD6+FAm-wsx%g;*`UxR6i?)J6B-=Kwe^DJew!`mcagwk=|0XoH=Y<<4( zzxOM}TC1OB+tWsi6@7VtqdKy-0w|vHkW$Zmt^_5ewQE#Zl^@%rnH9}yP@PPc$a8+8 zQ-59$tgZ#|r1PLS7#-H3s%AO7CDj3^=4+w70Z4@$PuGo%jR@%slwORvHz9>>&a&F* zzd1=f{6#q|+tMddccJlQYoA0-McQ`2Hl#Gl>Eono1Wbx;{PfLeAHf+5*#;Gu&pKcn zNRj@DV~utUJDJ}f{z=Uh4git-Pc`X5C|T{SP0mAL*J5mv)OZ>psg>sn4*v*wJ~5SS z6Nz~+vO~Z6Vxh-aqGK$=WN9bvpX+}jOJwjoEOWJLUtR#yg@&C8TF3kPVxnlUp)wP` z)Tc1fI^pI1le)B8;{eG<3%g8^+s4>c6jj!-N-C~FxMg$4MD2P&3~FuwdWw6vhH&9< zV!JnI^{QJ4>S5!S5r0i-P4 zCrIHDAp6rbE<8?n$DX|eTR42N>))MiFQH`~(x!(w3V4bro2?&`>wJH<_ST*Yrz)P9 zu2*4RAf(YpszG1&zki*x=vN3)6_#4m<>~hF^=s5*S?9?1*h_!BK~(0cy8ZoDSh9wn z|6IFV@AA{oo=L-Y!XtGSv}|~gPT9zv3^$h}V#M~gF}|XY;dhbRvzaRq@xRNPl*4J} zeYMikIac=+@71+Uoi!-h$zoQ|Ez=U4+EcZ_ z-rxs(J)-a%lZI9w*=6+Z)3D9e{26WTe;>w?El^r+l+VG;4Y&5Ry110>n{8;txxckk z{VM_wYA~=Jt&(?yNa57+b9~-o{8&%2Pr*(YX*{VY+vnA{Cc!X>Y_#VJ4xp&VuYEx7 z1tYa}Ihx?Z_&@h;b=PSiEj^ewVZv-;l(DWEJCGiOTrMufIRA1Tb!3y+c`(Ju8ga%P!WMZer9_5H=>C<9g% z5#)RoNXb4gOJ&lJkdhy-gV*a|mq~3>x2eI6{+q*d?d_eLeWX3T0Nz3yNQ22~wy16+ zmIT+}bw@aLZib+ka<~8FYdHEl6H*W3WS?bFKpaPMGs@`zqDGJ6l| zuD|7ts8LN?3^@L{sMN=T86YMLO)N6XU zt9F*wf?fJ;4Y3YZj`2Na0||co89B(#?jTASIZ=nrv;;y@-TEta?`^tIr_9k=i2F9?Uf zx{#^8>@Fgu*yTm3Hb5_-q?9|{2cRJO7)~DA#P|wI+W(8%db$c~;M3+>*97rNIm%Gy zI;0qLN3rLIF!tiLE^-q{7HoO_cneOsbOTE$jdU9+4HqW>MZ1G0-{!9hybDV5;UCw0 z`W}$AQ@Ks36~3j4r0=7re&%0o((?c%Dc7hI%auoH%I!i#4Y!XW$C&KdNC8RBd zmJ-ngoH~Mgnjcknv1TpLz~p7)aHev9j*>3Gtgf2x1wt|(mtO9=zC;T*2(Cf$Rg$q! zFu0HLy8r5wEw%W6dDGMECDm4nZtxbZx@o+HzUyP~6lz$gUETL+UE{XXlVv+vRS%b) z!E)hr>r+~~bC$LOF*O(~t3v7lL2GZ{O)&@#^pwt7=I0`;MpcM~(+_LWxdzfmoIm<$ zEs)$fev{)mI1DSxhHB#VeT>Ny)wedl=^1Rja;;_~Qdn4b_i}-96PjF%meSbVf5ZBY zeT9s+Af*;3=SVCqfsl^+m5#a%s*ZZh*CPr4=tm!!I0&bLluuo{YNl&rs51NRyK;^< z{>xDesxk1LM8%|C|Abx;l+)be!<&TZV2}o?jm<+)s$!^IoNX66rqLo?x+Zl1N@>5j z?|gpIx9n=t1YmdmFin^jJ5KMwVKo2XphGVxY!~3;9A|C}lybg!E zw!m!XlzkUP(Nk@r8bf`LdJ|PY=+Co|ZXtF3z*w=^1oAeTrdgOPv#~n>)_qGa-4#qd zhHYuk7BD6cs&GG_4NVS@T2ufLgR`?{e({O@2(i;pb>vw(_i@r_7Y7vQ2`IONRu4-* zL~cUv*QY(HnYdRMe3tL0tQu-=_B^l@da1-0U>d4kCqQ_a-|3pkx@7Pwzfbf>xUUgZ zeY)LZdjqJi>vXjyp&0V<(x2z__FbT~97t0NV1(g-eWYbOTSELbDb;cyU7`&4i*+kd zbeLV$1y>4p-Q(ZXR`n_vTR*SYiZsCL{%0ynnb+jgv$c?4E3CNeous)AP=Hc87`r|{ zedw`o3IIp}>Pd=S!zKwR;f(hfOABv8NGY<(yt$7spTpZNeHu(k|DqfxY(*J#JsP6x zpxgR{#(6cP3WAM`c9(V7Q8=Q>y`xD2n1Vd~MO)gBBc!ZtEv+h(C~B!^n^YVC)N;H| z*lB{G#S_ix?I8$PPkim8Y2ks^aYcN_hLQNQsB8YK|GLH++Wu5^-LBa4eo+qgKeame zGnyd;QMb>uS@I%+zWKFYcTO;6tjFGKkKlYCn`6fs2QDC_JCu*yi;$8rO*1tR4yQ-k zA=Bj^^FpG5ufP$>@Bf*#*x%M6?^U#Lskv=5M?g@RpU|(@`|r61pzUw;c;9gAeW3_+91u3o%(g`j6StySFDfEF`_tZw=SQty*) z>$UOfZGQFb_=J50NYXowPd7%8egrA)YcCHi`()6?Co7lbV9HM!xBa#iD7ZBLO^Ls7 zx`R8xVy}YJMrY#bc6B1y(#%2Y8n_CbYrH_DX?tm!b^XU&RMhdu`b3^@-aR$|>60H& zup1$T`1r$(q+d7)@L$x$B*T9n zdHht#eInal0cHVZ>IR2-DES^~PeL!i$=3;L6?!q>e)qfe*wVRt+j%iR-^X2?I#)x> z1q5|s9n$8*7eUNyZ*pJCPbQ0=RyOtTKW)3x1U$bl{TWrm^581ib&u|#?ixx8>UMw> zuJ1O;oZ&oM#4-3iMBAVnb+!McYw6-5$4U!HuWx2vURa%<}^)x z52c!^d~5$Rk`1O5pP4=oZ5T6mPknP(nI84O9&R^KAH&I^JgYY4`~*oU59}Tz*23L9 zqudj>1?W?>^xgFvOah;QU2`lnH@DCG-x(qq%t6W7dmzceDVxcw-(CgVn9Tg=b^qy} z8iU^S(f2jmwzqH!Tf6to#m76Ou){3X(IJsWNs$d?W-!&EmW>W1Xk>_BLtTezFWs(y zlZLnU+Tq~J#2@oM4-T#hVvk(LH>)A_Ib1$E*9eCdUM)7RCn5o;+xY8xG~*SO^-wa+ zLu4Cz?6C;*kc~ZN7D1nE%GVs$v}rx0SLpvrumuRmr@v{Mc58keZWia;x(~}wI)R2# zw`SIj7o#Yq&fUl`w0+muuF2mf-N@1Ol}s$v$T(^$c&4_8Cc#uQ!Yd7+n?)mZaePRyBNj;cy54T&Y7kj+m89HNopGYQJxvzb>kL8vJk-Y*d z{@kPgo%9P2wn40XY|iN3*ATlhb6nDz<~ovUMu*l&djo+uZl|wtsM_wnD^u;OJV3|mHo*QB zr2wYocA5lLQ6Bfn)k1_x$6P%V_XIWda~acy4W29^DP`6sO8FFS!^w&263-wUGf*4q zg$Et%c=skd?SP5_3`sQbOCVifc!$G_S712s#Z<}R>mKiI^Ts!c?jA>D_1pYRX4Ty+ z`yH&b3`5y{h9B?q1p#fA{L6NQ4Sz#({wsO`3>&=6*fyM2pk$djxM9*J6)TZdtFdyG zaMu#^u1U}j2w{>}8@%a;v<4{^nesRQJ<)$NHgmQdoURMOIoh!lbbbHDvDWoAz}UpY zpPAyu04fxVHv!4c{jNM1usNjfud>~~%ydiDwjhPG?O=XuQh0c)cG$N8sbp;#G^5E9 z))8b=)x#$5qk_rfQ+q+px^xUFys~NERMdDMNv&gIKluZRzqEFsc>pP_uuM?%gK!Gg z-jqB9R)_goHK&Ec*~yc#r84@sZVlQJ0F%@t<188jPAU1`%|6j0Qdi2eGtHyPIg}*Y z?q68}AY`~GXTW)@?gE-lteZT%ZV}Lba`eP}-GI1+kc@0o)ds%F0#ek0t)uyA-hmbH z3Zh1%yp--LprU5Aj%KKe{I;*XHgX+EF`4`x*GFEeqoMO zHFyb0IIsS_0#Y`+<&LZT{u(KbcHFvwT|3$vv@mg^&V&{3ZBjYggyvoUiHSn(Rf+H( zty7v7PRGi<3+l2@2l;*CIZbzoXa*!qw6nrhlWhPn{bT5JD}kW05u)Z+C8{d^>}S00 zt?r{fwDevBg@OCNDTTKd?s^hWzAQ(n0;E)}X5HWaL3Nh(eKdRaS=VgnNk@!mP5*7| zzbp$Vw`SC3Qy+~bPn^7NMyifHR)mh>2P20vx{XCK`vwv4@VlMzv{)yv&MZuKnTdI~F4Yr@_Y2`oJ zR9j)rqa{hVz;hsb0ZD0RTyi%tzX+v33pN)C$Rz|hnuROfWk8N!N&SkTf==!;tGX&2 z={sDWkFEu_!&>Gg*TIzSB(vc>fN=vY0j1ep!7@VPCR4F1dC zY$W=D6E$d)zPpJyZ3=WxP-RTRE6Mx)XCHoOqv-*t$lhKc_X*FO(a|3#n$3(qs*~3z z{Z|v;kl7_rl5@vT)EM>@(j96V-V9Nn!Kp~+QJKIa;TUlh{{upyG`S-17C~a))=c_c|9QECE%T=LDB<(h%&ZBEB$j

g6|kYlGm&vvX0%m%xq1_rVW%F^>E^zh z5mRK>nyPPZNg|`BBhBP{D~uh}j6c;N=P=vQG6^U{A+x)vnGxi4uOlS`qhONO%r)a3 zLr8hQdia;R<2W2no1NGhs!?MyY1CFzUD7)MYO1!Wcu-i~ceY-ADDj56W&d>IO&umT zv%*Q%?owCHc{F9QqfmO%NVkv=PPC;$e#Bz8blNhZC!BIIORJpk32X^#RKI|rRq8bo zg}w--Vjq6|@vjqhSgCjJvPpbdI7}HWXX;n-p}f8&Sy%fXnp~gk-b4wv*)x^L)>{b4c&Z*@yWRhO)-7CBxYPf>P|N4L`F*7K zeR&T7yB)QNd>^1h*4s0AAeeq{Qgpc0|4~nJaBQZpZ$9oxMW?R(=bFX;z3$9pu9=>n_)O;jWiEq-1$v8(7VjjSULQ*>~GxT8>h=)l&v5 zgd?uCd%m(q6Z;82)Fs$ngSPuxi}%&YDTMAz=d%Gdb^ok}f{mcMn*giFCLr0K;I%?$+>C(VWKqNF5nEtYF*&Fq zc58l}JGH}P!ROn8HQ#<#TE_dt){o0y>?l+@QB>p}&~V?9lyO0<8ZLJfllejlXXJJu zU(|?TgwIc9>WHU@ppl~3sE zKDSW9E^l#_e!JZhPR7y39RwwF)J*P$m1F31b2)hrN@``xRsPiOqa;6SLw*l>e8OvZ zW&r&c`oKnZET|KJnE1TOujx;30abG5mAUXcxLc3q$y4Sf0L54>>hg(yT&He9-?jCFCEeMkO?t_hlUpRSLRhd;aGr(<#IPU2f)foXg zd6+*o)11YQB83s2_j8zBO*q!m_St3my6w!2qsi!Jb%;5c@cuHRJRq!q_ zo*=)61m!%sV`#)w9~cM#V-{7JMd^B#OG-+sC(Wa!1}=d)rEF?+0a4DMH|L(hX^(H( zg^6=LHWDlOd06&8G6G%@O!{YfucQ~zBsH?bkbgreb+)<(EOUG9ms+~U}vbnq3# zY`Ik@)3n{0_Eq#~^LAj`ZAe{1)r5H`p^h)FBSeSPhVqU4T>6LXYLlA?o#-yqLEx<( zPtMd#_cj~`|Chh{*n|M=nsC~-b2Zo9#GpmmWu1Fq`p{ALsdTFQ2;qI*1*sA5L88#W ze`le`I`g7U{9;zPG3+2Kx{hpm)*zW>H4Uq4J-VR&kmnxhf6CF zlCqO3ZZxTcKFY#ey9ThjC%CXzr#EX5l+#NmRDv+ZPRPWB08r?iyV@gU>!Bpg+e=$t zZAj$dy}Nf)1p!u!SiUJ=PQ{PsW+ikM!wbftR8Sf4<jn7^X7qSojjC5L5Nc_hsR*#=SZD zU+FOmF_pO5V~bPx8Z09g05ZA`RR6D6Q*8U($ZxLt>nAsX6k?jWl$>rMq}XR?4%dUn zQoG$pDw{G=aHo${dr9B3k)Q$4sJ&A};htc6mF5&NGD6r&j%Z zYEVYc(UgWv)_C^7j?4yEdM zEH`$}gK{nJZ>0dB)MvX1rkpP(w(4%LyM&h#8{C0am!V|-#Y`1=HtCmJ@er|{Vmxcc9z-*cY3)cuM2iL-0Z=R;%lJmPa$)g^eO z{06+};g_`=@)C?XIAT`bE~39elH<~Usye&|)ZvK3RXY1kKF<7|3IK6o>eTGv6K!(% z4xv*n4jI~oocBl(?DmN1sWw_H8y~PZw%Oz#C0~}?k2VWkfrMdo}@=Uvv zv?V@}7wLqUl(YD_2Y!@~TfDy5Y#i_cr zd=Md0Wbcc@I0RQ`YF3Za`M8N`^72rzl{uI8ftZ>*`P7RAjQY z?+JP$DEQ=Pxl(-!OLx{yv*sNDR1o8uQa2KGNrHUv~?fH0us=%?q~n6#b`d8ExwmvI6qtM%#oM>7xx^fFA8BHnI-aoNWvR z*N)QHOZdy7o&fVWAQ%QV=m>i z-F%OCTR%_(Jgg3{l5hJ>Ju9l-!2X!)CH&Oj`VQ)*EZLE^7xErWbL?wRW-Qw`$j)-f8Ah(m zEJqFV=F0Ag`anpl?QqscdBAdUaiIGADoBba&Xho}1~9SQ#nc*U4TvSfL*r%z!YS@d zZA;Y8>yVQ0cD6vY1uS7_k+r_w07;7lL0h(L1k;2}kelY-l+Sw?uA5;c$@^eifYic` z^Xkf55s1dnXXXWhisiuzD*@pY%Rc9=whuXql$?g!op4$KMap&ZMlS$L`K4#wlyVYM zhbT?y9e}0XHkS_OvvN>O9{?%P;hAHn)pi=8Wc9};1cGU)_7+W(vw1YtZx-+7G7DhJ z$y$kxn5-6&lHynmZe|DwiuwH?mM#DJe6N4*sKd7lVC80q^J1bs{Eu~xa|uY7+3<5p z0P6abH@?~#%oQZ%*v)rE%}%a@>5O%EuO2RF&W5icrz}TLN|+WvP!`7$MB@g4DeV!& zoB3gQXpDBa1<2jw!llP=Cz@%Zg+P9pTl}`V;N5&=*Fdy;K!nQ<&l#r~)x7uft<82* z17U?}<|u6<_y|EA@Zv~q`aK5Iz}(}R&XD$eAwn6!spb-bf;HO;vflrEGJ8_bdIBa@ zSALF=u54eGrh5S^Bga?ep-uDD;PMhtqSkOFIIp1ax_Qqi2E0a6&Jv#5)p--h2Bc}I zw}H$Pv<2}7!-k`76xu^5E%qK!)!PYLnCFhSYDmZa9nxE4}4} z>G)`(x!G429mk+fGdhf~B@D4n3~Qp=QJBofCR=*nf&9eQc4)Uj9@2YiyPQ{`8oCdm zq_)}ApN7ND=Dep|1Y_VVYC7Su>frN9ru9h%ULq`@gcG<_=4XotCC~q=rUII zig+F&xh$~ZJ8_CDV(h$tnzD?NPr}~g(zz_kQ@jT(kbOpt#l<(#fV=(k|1VLl@AI~ZFZ{)t)9d*KM= z4*j<|l~^`8(A4F8NbQ7y;>x; zRRD4c-tX7VpYi_l&)PQ8B&l_QfwAZ|2!f5U!$ zEaT0R;#n}|AMAI}qeP}U>q(ty0ZkRAXY3o*D7=_N$imTif_n}OU*g3CU(O?`Y zE|^iGj3m7gSE}b8qDp`G6Usp_%7KwUd26g#zkBjmcRMawj zc2e+%6hxm^JbVn&T0+M+s@D`jSk~WKX+0BEkFVWTZQGB};jW9;$S;Q%C~2<+YqQoW zFOec?F7($7JwU72Vn1St41)%g_Ye;=6gz3nNbbZVG>hteg=H?)?1kDy~)e38Os z`v)wnYwESp?~~<-X@s2Ptmq@LvYTe`QODaW5hFIfbhOp=fmO%~$ULA@S`DSz<7~vM z0&7sZ;+$>MuC)jl&tSWa{OdqG{4I9K>jkm);V*2xZV34&ADb))hH-2lGx%;oN=K;0 zKd%Cn_h!Tt&Ma(9&aoSsW~UnQuoDgn&-0v2+3(M%p62f=U@4iU!gOE zlWr5z$k;TZtR4B(Ks+1dRqh{KLd-+ehszBSFfL9RY`m<(MaTQ2) zYdf~2?iz{`%Xe(Ae6NEkmUq%>8j-I*{An3w+yt=ZSlu|h)qgiJcaD|-C0rY=ZX=Wy zxPyXC60Wt!-F!M#Yp{F5zr|Kxnuz>0i_DcW7JdOD9GmYg2h~A^DNQpOxjE#(Jiqk17W#sTyKFOn3Dh8V(b+Z zmet~`2J6>7#fe2bmV6UR*_^v0TYD;RQ98EPB<>wJU~949J)Bl@1-DAR?DK(0)eAGt z+Il%sq-UYbw+3d1E07iRcYl`x5bXNUOs#@f!HPP>(OjnsR-?$LUD^8cnqRL$z{!8o zykxBnU`1}Gw+=|F5J7L%G*!O7CwjbQu*%%f6Im5l*E{{A6A+^lco-8C4>t8A{kAUr z&a;6WnH%r`n2w|7?jEBD;BJf> zZ0=q}!V&w4<_rm+K4sv(EG+AtvbZ9cwrmza_S$r@tBB#=i6d>__ZmXDFl4b=tIO*s z>g(oR&A)CyDsaq24zdsE(1SMwZUtI~w(Y^&U?*&g=5XD{^_@f_(?=ivB^>}teYnoI zXo38wuQU<552kcOc}VC%ejl7{Jp$9zHtISfx5sFy=HZ!kQc%mpCy1%lr_~MdbP45$ zuGuR36e(=C<}0-THGT9}ILYp6`v(1&$Lvd1`@Ba<4gY_x?l?xvBs&j1ijajYVj+vMxE8X= zn0T2DJ4P0=(3;)Q&<)K@H?--V8Fr0r8oHr-rhyK0Gt7kbC2MRW2RXz-8eL~}@(<6|IiE!zEQq3aWw98N-E{fWT_Rk)t8Er91;RY$^XeDN5K`sZJfrVZ1dwo_ z5GxzUiX`7LuzSox0f?4HulpmK-Hp0yLL{JvzhrGw<7K@XQ8zoTu4}q<^}vBe8zEIw z&RX;or(avE#ySLrr7dWn^#OH!(ZT^xDqZA5d2h^5btlot2Xca{#K*$B)?)ShA)IWt z+6F;f`C2_>Hk9y}N^p+|*GHQcI|kI?*le|sI3Jc=-jlGi^M30y|1x*j}O%gZZJTG?u&3}sh40?}QY9oG;Pbi{!t zqaO$>`%0MJ2-IKP95nrd=?9;C+tim5gRaqv&8i`L3^{6$gHRDrZ{AKGM)d#D0 zKSN17m6g&ys4_lB^9^TqfER#H@RrmsLnmJK+gCssc0d~-yw0!Vhiisdw7X?7H(4h# z7YFnYjBYX1fi$sm+)XksMTxj|gwz4O=+(X-}iw2fYh6@lfBcb;G?7k`sX`dWF-xN9y@Q;vlNdy%o3rbkGs^g(Ne0PAo$!!>#pPhh^M*Ex}|LaKI^jJ zKgLLV-es}hCrO|eA(&Ke)Uiim0U_hJcX^;WFnfinn&bPl7U2M4Vm%8dom_DzN|wSA81Fa)S=!q&l#Jqob-;6Zm%zypKd*rC5VxjTlC<*5 z#2?r_(yp4X0yPH5YXpLDI#vIqml#V6TGRV^UR~D;_XI@qyjzQob!h3+Ln=o?u184y z*&UAF2&PttTi4yxC93!yEo}X!AGJ4B*^%0^r}TH-dl0I1s7C)lz(Kkx_VhdlwTYYMyAS2mmNS>SbfbO7eh`~!CA`x8R(?#cCaE4tSE%iVYaOAcNb}}(1Rt6@VzGHcScyNG zv0vSe3UrZ8+BHw#LQq6aK{L7os65rQtzqvbayAy5We@>3jvr|T`TJ069~T71JW6_! z+kJ-CBUtA4$aM9`fMR{=OJ6e9h0~d)ZN)P@Wnz7rDEuXx{e^q2<;0HpU&nop);exH zg1i7z0CObQRmOWFOnY6r;=c-9un)clQ@s73?ALb}eRZL6M~4k{C@nu)>)|C}hTmbs zuRWK%G%?!a&)+fD!DRPNTkJ0HGMK2FjVr*6y|&=)?MJLcO_>c)r^)C8Nu~;aTvb>N zrbn`J6cg8k(A2W10SM`n^%mqh2#?DaVY~w=(zbR`egll>M-N)IZWPX!J>l5)sW){L z3xRfOpdTe!*E?NTmp#M19?UpCD1!-y$vE(shHU@bcVgaO0N zDapyi`=Uwwl%RqfJ8p=aPS_U>YT-_`+3!+xZP}bf3?s(y_#C86&f)>*f$*_xVWpH_ zKuK3AUkrz9hxsB>T9K8e)U`{=Vz{~EyA0Ggt2;YpsVjj5QuAtjHIQ$bnUas!z~s)x zzzU?9uO|jxG&A>&Ko{KGD7gvt8XAAAHn&ief2%)(Jit{?zuX1FFRn8b^B%0+HuAUD zY++7E2qWJ71p_>32irXHD4o8fvXYKl#f%(2F0NIKy^c*}e)Se=tsa049Bp$<=awP$61;F*v_UIj#w zG~2>zFtt49++wY4)}SZ|BW7uOYawl5ujEiOxvzuMceai@IA&BM_|T!6-!=#)YL6%2mgLeSQx@zQ9!EB7}>NMl)h;9P;^rBc2mo>TM#|*hWwjl33m|fGU3O7vDCv zg;Nne-4tsMR_1bgP+j0epq}<_r926$Lhnf8si5~PXqDwOloF3mv=gIey0i!D>FBdy z8rvjfL1;cahon@G{{8=V+EDMJ507i`3*dsJ@DP@8FD~Vl}xwgQ(3a35B$^*$YP-V*TgRHMZ%EIXoa0AGEIOy)?GN?8>zYHUI#gsH`u{^qkP6i?|X=*KD!2pjzSJzcZV3Sm{XG&@XAlwz?GMY(6n1EHd>3Y6PSIW1ldCOa-uYd6z~M^ZE0 zOkvgn(SGchSh@7v!L379#*qbg0PEp!`2PPL-@qGC6zXt2p|w%iN8Znc*w)yakW^~A z){w?O)yi{l!DT)OJB4#t()vl zB$?GhdJmM^@}_XSt!nlnrL~L_Q|1I5QR{H;sRzGGy`=L_p{9QO$DL!A)eMRX)ZrUw z7Q*Cq-`p}~ECij6H7k!qfx-(C_-)s8vrQoZ>y zN?1Zi&_}L(7GsvbRzvkFidxjB2tfd(13NxfX6)c`W(!5uG+}Uyh6V3{ti7cdcn@yw4Nc&)e#_;tMc!Vb09^Q7@6O zhOrH6UI8fzSD0QDsb$S;wDjfuQ%B6&trsr(#f2(N)vm~5uqPAvr7f9|-?xtCE!(AC z^mpn#@nv9GbgUiTS`KGkE4L5Tvmz)~pyPs$PAqh3W?Gt6aEj`zSnr9#)m@yawgtZ? zSk)#^?6hKA3+qMuEkV|G$?NetV+;4zqoo$cY(1Q{0RiW^MyYPL5yGi0W4nfE1c0Lb z;?Mnk(m*)v$?*hv0EWrAVuy!AiBYSY+EN(-!_s}c5Ac6gbM_dbkImr{3l{wLi-VEF)0Tp$ps=fGqD!&~#T3Xs^Anl>acX?3bD+B>r>S=Gq<;^yf&m!X7 z+i$HU1cXzA!@cLI&UZ=3^Ex(M2EPkQ^48bvh+ItgL>*DRBpk+js)-x`anLMO!~6=Q z=FPm`B+gX?H8*R>@>+mBSZuov^cYzu3Q%sKDCGVcpEreNRL4AOp-2r#TEfmp?*M7- z-eb-rKz<);ZiepxHG0cailGnnx|m~=rkaNcslvM@x*p|se#P|J>J^U>s?)XCo1XyT zad~`y(Cm~?(c&Q98GCs)=N@gd?3rKGVf*j^IUt;7-F2j<$rrF}IV!-ZmjSvEardf= z+23}%U&ASJzg8#D7X4Bqv?ES9ZSW(6dwioEuFc9Ni9+fe9G|a^lBEc#<%cC>mw|Y~ zF!+f8eEqPIT;)!NUAxEO;7>VZ(F zgonDH?QxZyA3+Gy`)Ddgc%t(gU)SDtL)gYb%{tm}nom*~lyDYS2Oco+uw>jEf{b;?wkACRszIH+SKyOSTGGLp z4CSuM|Uu?JHnQFwA1vC(CdNIe4UDNt)tXFLJivM zC<^LO_DpgA2B>28=?!q<6uMgNR*-3~HgNAiz0S$CX*?U>Ma$=Eleb3DJrp$~&4K&L z;HZHnyNCJd_V>!K{G)((2X^f+w1IHI3PX{ez?y$lPkAb+u>I|jrfI#4IoPJ@=N<0% zCWO%pSMR>_uMKA56s|qWtqQL~Y1>CRWcwOLg{hs@#*2P=fxnwgMh4$v1O?*@r>;;M z&am3mV=IPoz`@BHBFh5C?BE&4fKm+ASXd$K3*|0GuUHwR+tjLZ6_gV4=;uyBibDU*`st^vbEaKE1JnN6S0gCQv~g zkTG|6X`AI0I|dV(XPst%8UlMmk2{0r*=}5g3t5jPFKB740+AX=w6Y zlx_1(>D-H;W6^aEA6qyJGSMZg_ljycFolwe9KZcdlXsU3n_%&279pjhTiZETn7Jfr z2gWEmCq9e8fY$bCP9mv)dr;<-uueBnGq@n8)+s@3IMb!GJ(AZ3&LU})BM0hbuXBm* z&~WwL^ZDr?eE-z{WoQH84^vS|p^F{i*pcSL;Sxf+K6l!xaT$&~Z%2j3V|wvV49BrM7jLg6w&Opz)hg+7L6K9)}`$NAl3H z)2hnObd*_6_1kbqsa9y)+;hOf*mnW3ICugHdwy}Fxja!nZLX?L0cjFP-HddwH~fy2 zfIS1J3tPO^cg`Xt>l}HP<{VOrSJ$9Qi#v~!Oy8;J;4gs6=9HYGUj*b>-r1s9T|&qw zCl40)E+_WwZ`O&*D?o;ScW}{2xQZ%&o~zm#{u+q6^`x8gZWB9Np+h?>o|G z&)Ll0kzB!NWAUw9NIKj=efN%VWyywX(!C4$9#yRg{(JfH<4M~V#yphrzjLJN?TKti z57w^1qeQ0CmoD=-Kf0LF#NU%HZO+y_?eJJzi#+SF+j5Ha99F~+w(l_Dg%!$vWXtL$ zm@zSZ*xpB5tGq(%HP%r#>C3wLx{J1>orA0qylA|&kIV4tx)?}na+cKY$}B;tfmo*# zmI`aVdSup0LVnAVwiXx0dzZFf0 zm7cz@kkzbIUixY^k+0hvsWP1aJUjnvDD-M`r!cSYD&oCB=U zc@^qOnL1`D=W8fx;M~c7DOX$91FMBfJ--2lYs1+qyNQs#(j+Z~$B^%6=1KZSWqSv$ zw|uJSBRr@Sd4Lvo4^qJ`LuSqU!eL}PF7q%E-5;-gfk#kJe)Vp@m%aBfsvfy( zm})PWQVkvHuJEFz=-mF6mE#!#{^vn^4fGuLNw3*Ao$>{Ut#P^VGGKEvk-q{`>QNHo zb$*IN)kXVSqAhJxaWU}Ogt5PNRF-rkYgU)=@n9*E&mJ0&Khb4idd0ATZS?17-KM?Y zu>w@8p@DLBwh~A|N<&=Lp$XTRR|m}>ZdZWUB-wm>7ke$>i@P*U*(TQIx52quP_75G z!Kbwf-XI)~^*aiN?;8=)Teg+9u0L!-Ni&Vrc<2XH_`%%Wg~I{EZnEVugrov)TB`d9 zkS{nVS%Pm2K^uOs-JlkX)UaG@{zBSHWe;+Xsov^n{ss0Tr4aAd>U08Bh2eTxZ7Sj0 zHxzD0I74I0xQ863+Y1)6h$%6*l|;!LT*HGPHndIz(FRM?KMAF@JfYR}e+ofee){v} zwc|8^IsFS8um(7aa`Ac4WYSqsxjwGlsdK`~s&0g<`1z#P%(F*6Zu8*`!#$%aCG>Hj#7%NX{0-rLtc|=(UL*BLh6KchT_-Zy*DuHd^X^Ph%WS z8`L4Rn~D4VQy1ShtN|75PM;anD6+15^3A$&H@M#AYZkG4aN4TfK&sw;AE_q?jyWUT zJAC^PRa<}dfX?Pmwu}4-IsLnj>VR9WiSIH{>Yea22LQOACZqfc$ zCTmKcS`38K!)?{I1lAPCx)V!Fy9oQ+d)Lc?H=Ckb4_OYY;e&6~<<%8kiLvXf(8@0z0x-%j2(>f?Ye-`pSacM zE9n@R%FH=GS7#V&hT5GR-ul{F8h&z+U^FK>_I5eEU*mBCO#L>u(_>Sx`tg2IZ8Za> zuqxloq_ZfI2%=234NejunY1>qTWBY`NG$p!W-~yYM2yJcxs@OQHQe;RlJTbj40^9` zhzJnQXym=7d<&iJ2#&%_|!_=EYN|(^m zcZU|dQeQ?=bEoEuhgX8|?PI#lRVZcRzOlyAH3S9TGUvzZ!o4oiwta4Nk?RF_t#lJ1 zTwFNHeG5h9S%OsOPQX{)u{6B{DH!9g%MZSF56QR2O1j<`_APTI=}+1`gwx@gFe=wo zmE;j}zF5w|N@_huSvZ$L|7BK->i7g5FBeq2r(i0&Badu7L-6st|CYhwxo|4FeP3A- zUnDIaS%Q%V`5F5%F};G5*N5>k_!>c8J4@YIG_lZzqaSI*#bEV@kBYfV0Pzmj<8Moa zlL@ye_2V*xRBf@}eh7w_Nzh?+*z>jnZrkf+PF*tSY?>%X4_@iZr4l_M_D!ST# z4`OO`piULl5_>O_GBck{AJ7C7V7~p%cpLmv`Caqwtm$+H4CD7t&rg&!Y8EAZs?L%h zK2oN%V$~daq|$@mFj>?)Nv4Yvs2J$sDgF2);M;8YoWBqTAU?d$d2X|7oQ6|ct_!ov zbSBBU3ekRNyWdCW>qdgwoI@&>&mE~EoQG0aM@PrY^U8%TiTxWUE~JY{VczJ@g>B$V zXxgg37<9S&oi5co2YqGXcN)o6VKvGzrE6W}Iz4NaEQ3CpGU4C{gOmiQ*Okph3V9PD zo#vym1l$69qP1>Aa7(5PcM!wlx|H&ncb&WFG>*5wKt2eDS>&TzqPop}qKW%yUyG1CaAsLX=3vZ0AMCl2T@pk7&qE%?Bb^AK*txg65 zy@wIjfGILPV6Jz1Wo?%SHHi7Fy68GIC9%?JXEjL;1kLpG^KBb*1Ek!|k&7>G48**4 zglQ9~vq`Da{XpbK^-OXj`1WPD*UDHul*nAIo+UE^97HjhjsZSWCtPZ#-kpSQTu!wc z3VYyiv+QD}z3oj5#c*WMf@T5^bLyUEQca$^8lW)rIjS6ptP>hwYA@4F#R z0h~R1s^|Y(Mz94r{;Z36jF&`O0W9|cj{a+!c+vf=zv%QY^D}`qT^Ax>1-54M>uXT! zZ`0h14m8%h`Q^V)9T1jhO&oQscS)ea?W`@zrQm`tW0uU;lxA3lo^CkivB>t!=yD`2 zGq$U5%YdN{g)nU31tJ=lnnM*eub`roDwPc@L=$RX-Ialno7?iq`pq__9mxuXfmwF#Et%B;O=zWx400f zChykq{EMLcs2X)LM}C(OGNK=xH};|AUPoEuJ>g1{$iYIddcjo$HT&FZ5+K1cbzK)s zCEu^UaRXGQQb7m+K=U1F@5=E`(k{F;a~D*mqw}-Xn)e`GD0j5lA(#81 zD3P~Po4=NaD881v5RU@5hb6!s1B&G$U2}`^1Wd!Vvn-_|JWUL_9W2AvGcX+Y)I>89 zd`2K|NsJc==}q+pf`Ok5$`@+Y{8bl!R~DkL5z?a>tD0X4-DA3zPkR5&G{n4}t3Us#3|R+fj1Pdun*&PPk}dC@4l+zNz@z|B3g)=HE# z`CvaW0HxjftAniW($5{eeZ@!CAYg4C2v{3nqEMoBfPB`x^;R9cTMsGgff~~rgnhG= z0hOuq2pdttvwib32GFLY-OPy()DNW$&HM0RBA2t&CbEa}vtibT_Xwzp?RrvkD=>zj zh==Ffq22tdIW7Kbi(n5zq!h<~L<+qSbuVf@dAw|H6JS_rZ7)uVmMOH9d0W#JXW+DB zxezJ~gnVWZeY+GAV;;bfI(qG=6J7fIzgy=&PbMlgT{k-wQ1=x~`ar&3M@d=neEm!! zxPXAwXMud$p<2VHZJVA$O!=I=VX-+6r=pe1v1753TtHPOFOHYKdl9N0ReHgtgqr6t5=!_Wl6_zx(a|cHaIvoZi`I&Cw23-$)dTFq~4!n{YVZ zS5>+NreD@WQS-Xqoi2~9Hm%y}h`Y%{W0GwW-Rm%iG>UW|4qq4z4pXCRJU&EJ$0K$5 z<&m)FdGH%`Anh@L6?NHL^`GP?GbZWv6i~SB!z1lT|1&V7XtT*q4}FdhUYPkLegWsh zR12NFxyO8o7$%HZi+Xm9FhKJCvB8~&+G`-W+W9D>#-gcKAl3V#X<{*yJUN8RMI9ob zBOaTsC)wK-<)w&fXy1hp5KhbWya#CIFX#r4>Fv@|r!WE;;LP!x~S z({zN5pk}ES#o0|znCKCt*ev@KpVrq*1%sgZXNzVV5)4O&v->lGkg7T~jXPsq1kVsP zlVL}2kfy%32O(9hT}H+~J#jCZ0`3|ZeQ#)tZ~#+-GG5H$#S}^kS=Ol5HD{2t$;|D` zgo3ad)mv936`%!`-Q|LD%MIT>(Tk&2&`Bs6Qd*1{q*F+`j)Ov)>@=W6G{9yyI0LHY z&V{2lXM;81gMV@V57`Ld@Wq*+vS6J@P@)6VCu&J?q5FB8?JSeTMTC654o$Vam`g}L z?;Mk|Tn?z^2Dt#FKE(0#f~UZ%9ko=4g@Y8=P*aeezrX7tSiCix6-5C-$>@1aQ-G>% zhY1M5H05Y>ta}GeL)kf;iSO;ZXbKshJNJN8V*KzY`;L??_q{t)Y53Cs6`CXHVMsp4RpyB1BiR#u3u)o~Vg@c8Xo9DtJAcPHeQ0&!-8Z5ucD$i%Oj=pTk=7S`L^L3!79?s3e%=3KX#dMAJD1QD{qybbr%4D{6SU_6uL!H- zj-90`Tm>`&o|GWD2BqT=auy9>3g954%l2a2K$6kX+kZf1=wg4^?1o^o?lo;`$5oH}U3$*8OId!HG=2P6A1NvMC>Xy++b!J3u(I2osDbbV z>anIaESlJOil(&A&RRJLr+8MCpJMNGgtY$nv6*IUc!82aaekt|8RRd!Nc~5+1i_S7 zD0=Y7yEKL_ezW$JEqc+13w_2lA^chlIO=?g%*>DG36ZdHc|n@wpo9L2?yJu~5H9oelx%;5Xn zS`q@1x!H*zTNiMst@qb=iDL(i=Z-FMHPc2Y<#js5ed(No+JvfFw21FgbFL8#2&p<- zMvg8IB59sC>$>fba9YE01iQ)P14`;O+KM~|D`c)#?Czp7Zg7c2Q|O|PwQBE$^F4ks zT@tBf1d>X#b<~-`DJYCD$5~op1|dc0Z*q{PfRa8{UONvStbR6!rqDaSbB z3Wp6%5}A8H#phF~8Bt@E_%x^v!+qNZtRa9@(BUm{&%*hdGr9Y7rsQ0gzTJU25B8Wi zNr$aUT}X`Pj_21%1JD<0oLNJB7Tp%4gF_?%l3KGruzi&dd?kc-0F-`0BqRrOsp!{$ zl#!bOU%Q^z#vt9KP7K}X2=8LMcDUIQ$}^Ko+3 zN3HAC)svD6Q0MEeHuf^1kPm)H=JpV$@vj5~TCy7DZJvC970$^HBX>CvzL*e9LJp^` z=tynpRi(|2RI5&Rc|#%hc{e1#8P6Ay7hQffPNdod`|w)EiahF&0N&c9a1=RBDoWg_6!_xlMk)hGn)($6YkDz=q~q zbTPPKU#QD7t@KL}eX#cKWVRHN*?9S1DUSfl;9h^L=Z)LJt>tK#tI@xE|34B2NutEG z9!XZhDKlGp+w)$P1Sfi4$5to7=)mac_MJP30yyliJp?(dMZl={cGXVCIzV;4@V3D( zn8Fw$-z}#yERBiFSu3V)1k#&-z&X6(-=(uM?U_#d(ehF2@v8YCLT@skpPOjY_fQw7 ze_-=_+Xw=LG)@`&#vmo@+f4unrVVE2KehR_ClUAA1Y|qf3t~j_sj=qqnIyv2uhr$;*}!^?piFUdV5FD_JM6&QJ<=2>x_nr}dbXC6Xk>v$ zwx|liDdvH;({~z9Nt%l^M;FeZR8hZ^1HNa$9#hzg4%K)&hm>?a~J4aqjxr7j2zUe^oWhf<@}2MS@O;_YF2Jd7O*XC3 zFCvAjgsxl9HHt4K8FOuHMVGs5n3`1V3Y>`Thrk@?By`D(# za4ts>0QfQ9IZC;S5cW>+tU(UIkw))trw~B6<<2cJAdK<5diP=O1*-H{cD_dlfa$iH zt^6`S41w?Va$QQIK1zbkwHs;pKL&e!V8KPt6QoqIZ0}_Nc#4wJd8~!1^$b?wgY7=h zbD;N0mP=jr1wy*U{<^C25;QHX{u+58m|0@Pb1m*twi*95s=6MyeY0kuAIhFrV5t-dyM|MZg$0sfYpLwBCT#fLRuu83UCLEsA1023JC89i$}fBJTRcWdvI|4zmHp zw^=7d#*>YSq18SvqZZ00l(fT#R_?7^^(T_m|3aw-QPS;qmcH(eg*px)re1^1JLgD; z4O|^^3{F?7CFJfd3(E}#nl^IwAf|G&NAqmuUX8Ez19Q7x;93 zZthrXg?a`~rM4N_XCa@(Ni{r|lr=%#{{wme7@i%jmtM1MyO88YY%Ps0LgC={b`PA1DO5uLH7IYOwQ8<699(OHi9Rr{M2(LM5z~K(YcAli z^+lJ}zWI2Ce~E&zuFsJL-LD^(ZCYhs=hum<=At7DrK8Up)7%11y14_F8&b zCga^wwD`WRLryIC`e8j+s4UC;4ZM@$)PP zN$e&T!61zPglm;*4TRsG!fD$wYr#m3IWFn+hII%!CS_OB^*~s}43w?N4IRm@8d0lP zY)q2Pe3p|6Kr%FCRZ061Qub0?Yg2y^C9Pq-sY5rBI@G1BCX(w2LfBU_h){sjQO3(6 zwHw5q=0t5z7qySOAu0i^#+yCq%%(h%Uu|Bwbga>)y4d_f_|OcTc2STzCXP0XmXCc{ z*PQ0MC`TvjrhY{^ftHH88&cZUNd%m;f3A+FfN-u3UA9^JbeGAHRc0PwJCjV>*;&SW zasdIidWEf=&Y{S5uy02{(UAB}#BJIV6$+mQ~-D zI_P^Pz!64`bQMTtScjXZ;QX4`3a$g;Mt{8negpK853%MvZe9%BLo||Hrp`2&!CLQ_a~Anb1Y48zFe`5MjY))sH;IL?Q^MXyvQlcV8f=IJ0fLq5cw--{Hx* z4{`HVP|0UQ3%y-cKugOvXWSdW1-;kqD{qtSyurrAnw>t@CX`K}kCX)Uk$xye zIN+r(J)hR~MRC#C`FUGnQ{5ep14Stm5>41`2?}5_?EuJS2 zl!bdQQcB)@drg4&={UkD3D8A9HhcSW>x45%=^Q=b+{XB97jb{x{3%^~4k;{1VS21Q zft(U?RHT+C;naySw0xy2^(nN}{P+=GQ&CQ%C>4c_L_5>{+^zun)>)M5et%3exK0v) zspS^!)0U&>ky31iHFojz=?kbDK=YI~dl5>}#*fuX`%-=_zt&iH8A?I>=%2ZDcLlA- zPD*K;EUzZf(dl;G^%|ty`mqMS4)hw-i$A4o-#`iPx7H_aeiorl0x8lhloaXMk(%}I zBtacVEA!6XF8%S_mkEe2{r;nIG$s%CK1Wg6uQWULivYi<=gXvQnk{+Ir961#NPCRpbs`V! zC@b8ef6{vN2jzcval%8D{Ssl!8a4mda9Ii}D+yk9^JR&+ed`!0Ae@%6H`ZInt;p}U z|BH5AneacYn|!N;W$NZ*d&Xupm@3!iA2E=qN9$dTwZh4E-{gXCigjp&%;v3B0Kv57 zU~Q&tNJ?f~^M3bJwg|v{gpW?cwjWN$Bko?8`e5**rDxIp#+$?Gp^oHAP<(-nAo=`H zzisRb`ute$DDG}BP0R6XsWW@>drgRS)_yOT{A=ag{A^4hr9qE5BG){HqhPVOQA&-U zfqblP?#&9O20NOycMc|mm@Z8B!pV91@P~}Jn(Abt*IAvmIddw}=St~1EnKbYRD538a#&>cJVOX+lfApmg;)MqB_e3gtW%KPh@nl`<*6rqGo)A z)Zk#-roIgOuC3bb3VfwYy}7(6Tm^OP1IEr|e=nGBqEfY@7w>8fy%e$zfwW+_N`RbMl}|6|J;1gTxr7%2rQZ58{X&nkl^B0Hsmq zrkWvWBZ3NiTnp+=!ueiypK50AM@xMNYOz0<=!3Nw9O`~*?>dZtKD2pgXE}Nr%a10B zKdMP$cYehE+t&GeI<(K|+Y6=U3hXM0T23IPahi^gGKHd?!_|9dgj1#iW#yd(!}pvN zo9hy9B{QnWo#+yL|1{2@1PvuOA7p&0`^`fkL;)BE*?uak-Wdc%>0HSZ2q=oOed|}q z0pawwp*{*fl!C2UL*=`Wh}*^n4gUO8ZtO}lTmsX5!KO&JF?Sg`*}a1?SD=)IJT80p z)exo+*t=@$uxkhzxOTj)huSLfI^qI*EQfA%a97>Rx|uwfOiE9t?A}@+)XwJpKQ_N3 zs9$saa#uLK9d56I--FXdat5o$zfR+|6C(W&5<4m5jf65HKSfspuYA_AKY{>G*Whr}aeGrfO6q&4E{(qcO%ao3%a?*` z!u_G~-y^f`z81`<9{fi3)oRPuq<^gSi;-PBw;23DYFY+w&W?1vC1~NsT;30cRpP;~ z*V3tMe#^Q}X~jjcT+u>vt=9fQ%8Z?^ndYdun7(4_$?p; zgd?x3rylfXeHmm1QB#Bd_xlNhB;Yo{azHq&tD9@l9u(PiLlZoA=i5=H0v)Id2Uh|B$*1OC$pO6eYL?5Aho6v;95nl=^SY~sxFHsAJ+ z&%r**N}?B~=buj$qHxT84N?M5YaSq^5iTZ;2sSn2^z#x@daQFy3oaYg@G^QhU=7_4 z;aovUvmC5v(653iW7{`v8`#&9C_7DUFkVN|yB_?TnrUwYSlSVXH-Ti=w+~NlfoZ6H z45>r~oUU=m{%f5OEVkZFqJb0iNQJ!DMPdTXp(&B>qx$C0Hv`#2Aoakdrk_4S30LYQ zm-vqnl%)0;OAI^-Y!3qY#?!z$G+Jx$XMtGw@ey+1IT$WXmpa&%hA+A(6XmDkWnfe! z(gMuqN4+In>Hw0%wnNxL*LZPJ?Omj!(J zi{&S8IiS1MD?TfFk=jP*%79wkk_JFNT@M7-`zNbWGJDvoBV|fqQ|mRz3Nd<=2w5vE z=a0-_>jZJ`J%{@0q~m%}8y=z`w1K<<4s)uH654Xwh?Y;z9^sFIRNaJ3m)9(^8!26jdtW_w51bFS*A}S> ziI7W@N;J{s!GA=K&rBipdOHX7rN|KpN!GeTU4@*5d-<~A;>R39Dzc^Ske>iCvDx=d z3Wp6e-Zpklb*bO6Poos4JAwgNlI={FhOyKDAQMo0;CSyi$T={Dq{~jlx_!ROg=>uJ z?H9U8M1HBfy8K17lxEb#T6^o4P?UA74*6Ub_LVIfup(bc25HfrMt?QYch(bb*Ag9D z+JLzZrbiueik9e#ZQ=%U+9}?0)$eAPaQi{*yw$-iwWEKhg9qwl-(4`wW@MqTWNTYK=u5u|58VLoE+*fTl(q~gp{!k_qHzm6e)elNk9|D zvtV~lvkCG%Nk(dKwaXpt_{M`W(L}7vQ4`iyA0YP2@+}(O6qFrGp(`I8!Y{(it({4R_(OF zV6e};+wQv$!Pu2sw1QZt69SEI>CV`Z^Ew5|%%phnLCF`%#II|#WdR%a>`(?2F&7J_0*+J{JCIL<& z`I@x5)cI5wp~i8u@tsEMjSAQJOJ*R1TguGdZ5Gqx&Z4FaL!W@>0?Pm z%v?&k3oKNr_rT9Rq%g0|eIy7%ie7iTIdP~h*N2EIzUCKxk@OH&4JU7IJoq&N1WeT# zWkr1g%TSl%rmObmpCV}NZG`GGLD>xL+)RlO)~uFd6RP+kzp!W%7Q*4v^dWu!6_^i{ zPd=(ces;3dg%5J#_(E*&!w?Z*%XYG6)g|4JE}GFCI@rIXk8l9xyqQvMoRSQ1IB&t! zw3-ztVVGUa+uy3?(aJ88;lIBgsb7WSlinfwdHri>0E zrh$pc>bOG)+GALbBZ8@Ly}MN!{}@V2H&R8|4eC+b59@~uv*aE$B|dW0=-vBS^fF~n zfMFMnJ=X)K5_`JNh0h49(9nX5?MmwR2hGX4RrW-6)W*Lg^#K3DeR3&P>pvGMup zViyte;r?pxOG#PnNpM_-^06a4rWViRC;SR}TJQe(``@#42%bOu(P1}94S(1d`^(aO zy^A|x*WQ4K^TCM{iZ?+;?HhZqR`$1ooGz-{5_gi6k<%s>d^f+c6x0lH4@x&739YCO zHUk4cK!5OItu^8)Izr}+(j*+2$HS%e1+zY;|Sf)cj+p z=`Dxq?)UBvZmaFdJzz54HQBZe_9CRz{P?#HGJz8A4mX)N1?MxfQ=d4bkl`|en$K(= z*F9#tY(}+M8D-{BQd`GB#N-N0>gdGLS{B_vAxaf&uWIt+_+~9O@OS>5MPng~{N^RQ0WwC@E%h zqWB6-S36L9brga8-h2oAa}wdi!tXQlPQeoj2q_QQW){$imLO^IW5-l&sc^F2f1qRn zsHi4R^Y(IJL2erLGi_#Hkp!mwc5!Ycn0{O{!O=Nuac#E>Rkg;P6K)3)SHl_g%`H@& za%=BBtVz6CWo_3m*9PMylZ-Z62P@r!Uvsxdk690<@ky5 z<0d$ra;r|+A5=CuSq~+!%v0=;~pxJVU%2Ee-YIF?<%tlG`P&R!H;s3k)?OEMAclkd&dNS%UvO5Oxj{PUoQodr^BJy?62eSr5ux(2L2UuS&7LL`nbMW)6h4EMx!O$kdLGn7AH@Muw~yLNL0I|dbCx^V zW_^X^W4FIuk7AJ#U>e**s)x&pz37w10c%1__fj2Tl6;T=TOufHw;D+YVfoa}-ARRB z1}f2eQM#5x$yL{DuaU1n(FAp2G$+GWBBWpGd?n>e9b1Ly14l}aSS=jha{6k=)z52? z(il6c2y4Og7}uV^V}qDsbRC+mFs^eD3P5CwfpORgw4U*01G>+&qd*%I{<)1mO(+QG zlg`4hIO9e?lKgJ}L8+vJK>E@CsB%LHk4)Q4yW!M%#&xh` zbDz|_y$3P1C?i5WAZ^*u(faAav7=6)CGR738GfpZQiGQ!p_|NfQ9i8C&4SAA91dR0 z0jb;2_ALWDMh6H9BrQ9vWltt$?Mv0Z(kW2AxUxIij)9-zg3iTX!+cs=@VJrpHCwCRLRi`5MI_)`Jf%-zlfkTV`D@?Vmb#TlyC>~InmdU2Y3^34yInSO_3Wx_?xCbJ@Y_n>-iOm*hi^0SJ?!A^Z|S^` zy4ahWCzZ!=`qsf|{-KBggp_l#9cm;6x+F}3nDMMjvcp>(&m9ptfAf!KfGVI79Z70Ji#FTi9x7ONQm_Z3cW45z+s3poQvYOxj zUMW{Z0jMC`cZ?cqCxGE*{Zeb_YtuHgC3Yy&L$!WOS%|N!v~D_u;f%-WY1-d5Bn@!G#sF0ML6s=WNwu z!F=eQQ8ED1qk5Y0cWUZ?8j=x3M>#ZI-S8QbntWOV?zwOZJ~3I|D_+3iMBf1yB*~SR zD84ZD$=sB*uaeedL|Y8fdOhmCEJ^wd z9+@qNG8p=sxy}GwfrQKD9ND)KRwATurG8eUtU^ig@BiDnxxE_3=?A}2r$;CNfhxbH z?)t9q01TV>}?NM+oo7YuALlGj(q0VtN0o)TE6F7&u#QWj6_@ZHMc2 zTz?0fPpd%?^Jc7f^@<@VlBjRac3jJR1WjJ9E*SBNefyh-Yn<#3Y929+_bx3_J)cpv zH))UF{&z;aaLUn66HI|xpe{rh<^fJOE5t0I1qMGbum!`iKcwWHM4+Y%wyP&EJ6 z_jYU+}aPCZ2=+(*TuNymuwlZQ|w$f#z)-bWo_GRI&ZcLd9l+TnYGkP*TZl_&3>A}Qe8 zZ@>MPVGZQ7&Y8Kuk+;B}ci9Z=+O>0+5gx*4kJP_J2rHd$BX?hQDXOBz>}!OuX}Fqd z(aDAQHv9d&7*>@Mt0i8k{RkOEb~N1tYbxqe#FXyf^id*}0$}tkLrvB@%EGW5Oe=bp zX1)gG3Y6Xe|4%g+t?clD_Po=o;9Ubh|K31NPpiA%N7@nVHLwp14~z}&>?Z-h@Nmmu z-^f^5S=J$G#w~Sfetnm^j@laeNxfiu-w397#076ZMn8(dbn=&sd_nmfE`!iu(rtck z>(=_j5U80rm-rV90s-uZDm)hOQFBJM8%W`rf__L>+JhASY^msb!EmF$Ef6MP0;Nvm zwnb5`(x=dr<+@YF1lZm1g%n~LTV;S1d#5rCAQ>7j?}_$RweYEz?G zJWeG}9hS>B$LTK7;Un{97&?QHk}?tNRA;*gjI||t&mp8}^#W1btT>OHmyoIlIu&d1>VcOLFx+V#<-Yk1tYOvnv5;>EW)z)34 z3=6ew=drNi9%34pw-jptP{tOXcx#?Ip!^TJtm>JIc7*Z~Qu|gj2tV%f*jby!PrCS& zpz?l52|&^(_LaBmXTqs#?$18&QaAhBiw2{ow0HtRN!oJ( zI~+$P9+Lc%HZ*3ET+T=91pO?ilukv~qJ0iZ1CO<~lKT60Fv2&vT#Z;!>=P z$kX|8TU$mwos#T5G)aEa(%cJJZ0=wk{ke)ldBp z@viTc;_)m5H`;q9lIL9v2Czd|`vM_fvm-n2H9c+dvWvtxTvn4;2;r5hUwHQ#CUWNX z69tQYW#OA;m@^wJ?g&GZ`$}9cK}d7V&z7B5hD(vu$5OR!DlUV<#%*F?0Xb45 z89*O%7nMv9P6cN@BO;?!2uk+BK=1g`YS_0ZqHI?=W4;EdazB5{KY;+^{{1WW&(RM6 z`IUR4GColf5ESe<>%Ts+AxU$1#?m0Mji|nvwsEuxL0-d~cbbp;lhm(ub!ZR@U)@~p zEeD3s7M8g*r+SE{)SwaM456Krw4NDe3^6>}JuLIxU3Sxln{QNg--CuR2TNh!+vU;6 zN?gVfoSR65(f6$3ri7E<$K_9Y1`H#I%34M|Af$=+yAeu2bn$tjp*Vmf^#o(p$^5eL zCj2Qd75#92;$V#Xr_qwxuv5eohNS-d7wHOu7~GpS&*g_TKTRs+2kd+E9}x$T9ITI$ z<3%{-9ho{(2dOTF;1QPIuES-7^s>?U{bi*i6_7L!iKkm$1yUxaB@J{9#*jMSRTAWS zl2eMS37Qe;Mq=#Tx^=Xi;N0vouujCWTX2fx9%ro_?<5N~AlJ&GMj|9l@7Z<|0al{1 z?K`&72lAu%<P4G9?9gcXEcrvy45$`dKh0<*qp%Fl7*eyH3r(mrB>3`1z zBU-$7?zMUbfi5mnlCckBNo@eNXA@rr%XqUZxrTj*54M{zui<3qxtsRr(W3uzfd@U$ zKZ_BR#$?$x?v{Y*93|=}XUqP$6fF(m7C33Mtc!EZ?N&G7(Uzly75m3$v!|FNM(E+W zd+c>Icx4EqhsS4a=U~n%gycRs_pvds8V-B<%IWVK(3m-1Yw)$g`1nzDk#+gyqgn;6 z7fvP)vKuWM60SC$?K+(DjFt7CD$pgLwf$ZT znwu!9IMBxVt$^F#EH&Z|kowm8m}Sa3_AXikdI^I094)(-RIU){mG=Q0Ir>SwJwELH z#wNvB04QFuvx$kviOb9FNAvpP6HraYwhtTZ!YO%mGdy^PfCqKj?75&K*K}f}2TV5G z(@OxmdyVrdXxf7&D*8HzQdq@YbSgzT!lYJ`=kLK`IxVy&N&3Vt*pXP<{$kF@Da}wni}ZnkxPG^Pm#g!q75n zfn?7ybXi>9?jnEnW3^e={l58VJ74!R`MsTzxk*%k>C6nk3iGqD%+1|7sL{WFuNaR}h*RUS}eGgxn zcOK2XD-jgQ>vx1D80KxBWYc|P(fbI!^0aNZ-NCyi&FHl)_tm6dFeB~(do1~F3S%=t z{1RI7RE#RcUWC+o(4151%s!O#!+{gMYugiDsv)exi31_*%Kbgen?gu0sg-*a@;4TJ zh^o1)ov0`?K)zJ_QdQo=9mOG?c7$;j1p~*K*wptZl)_eL_1QTD>~_qjS?i8>=@~BC z%z7f}ad2qpjYXd({V|4;EHq9cq?Fz-nsyDoEsuW%RX4f)J-yHE4FMSpGiLE1ajav0qs^=N;3?ch1Kgb*G<3mSr!xS z=vV`5nRAHQ5h{Ki#l(e2x_;X)2Hy@L*P!P==+`7H=&2 zH(+?_#&szw7ZDbkS>B~l{tlANn(wzu!s^afY`GmO9rAa(1dM489vM`Zky3R!;DR&1 z-$V4__T-gDyaM7VPFSXY8&E{&i%Q453Z+bg{T7OAZcY3*1kJK*t2t6IYoXW;ddr?aMNXRxm&=KJV2XI({lKy$ z{27WmGA;ZnlVBs=M+%cZKGO8mKS${CWva&2LpYg^mZ|G6Ks?zx)VFQabPJ`0ra8Gj za=fW1f0-O=q&DYCkCVf$0jG+0ys_x7^7AnDruox)0;dG!e%mL$p9ngdClYY+Y0?)S z8F*vSUw85MQHjiK&k#~i8h(4;=xXgU9Pk(<@MxuA`UQ^LIhb%M9HB((t_QDDnRU zk~gN=F>J$j(LZZ3zW5K{re^&;Af?^XOwEhoUMrNK_#seV_(^9Smk9QDcI)7O94fR7 zf7mb8*Esv3EC9;(UTtT+vFJyjUL&^+@MZZG|I1SIqmYl)s-twON|oB?HSku+*C&gfv?_$=Ev|x&yV>7o6#%IKYZc=ak1TAS0>v^UDhf#Q`l1IbP2o zybq=}HHFlawHrmjn*E>J0LibM-Y~m#X?K-~`XvxoHrHOzUSaugw2;+1RUs)UKU_jj zEA7Bb^s-1W1VKSsh&B7mMWT-%Ewh(`F_}?RjKJxWM{gx^)O>SML=KZ((-L` z({vXnwCN6Dm~P#3 z-6|0d6OY~gmO{U==vPqErQ)5uT{1j{SZ&k%u6-Ryw}>n1UU5!$S@E;L0W}o@MHR-5 zG;gtMf%eli+>vG&!W|0}{jI6;)Q@olu^+s`JY1+F3}Qn_8lso#gv zLgiqshUK*`lipOPnZMJ;@&x(E)$rH5SjRpt53#i)^gXon(uJN@mA--Md%Q`O)cQk6 zwU3tb>N4x!gj2lHI?GCxNCb*D^-Y9Z!L{$$#Lu4u>$nsg+24U>57&jP{|1#R;Q5B? zk+r6#ZQ?36LEwvuv53uf;+(RkJSI%}M`ZF-CGiyy|ao#1UCr_$ee~uDy z>pGJC5=DQAq$nP_uTvy{0Y#n+_6^Y~z*KaAZvqYTmmOhCX=ZGA2w|V?g?d|{G4-!d zQ@>f6K7q9MX!W`83!5zuv&=jdOy=ciOzHj_Ar&0$y%qedqfq18Ew8^pG3x4`3q2yh zGF@%|Hb4n{@&(Y7mD5(}Z9V4a!azON8djVs`Vv6bJ}Y#}C#mG5ZRjWVbKtkv}Aya@t(^ zqq6qXBGA%EgXWXw^zMg|!kOltM0uB@q%tl7!aoB0#;)3v)-1~cbub%hvRBHzcTtVkPjVmxsc)jrzFF?X6rDU5z>&xFHKf^_aph>42io{5Z_d*2G(Gg zxbC3()I0fE3zn1`%Fpd=lI@^37VSX5g?jv{OgAHmJ#RNhwRR=8v;$1jT5k*?MLp2k z?0s0JKG)vh-5nqwF~8cy9GWi^!JhoMwH;yjB_I`=EVm?8p}k1yS)aLQk!&9_CcpXC z-={4ke6;rU4+yLO4#6owonU%`OWjb%1=D)NZPW3$;Bdm`UKQ;eLO7ujY6}GIw>#Pgbr`n^>+?xyG0`q3 z&;St9pyeW`9r3z=>)@O=90_3&`IGwS^npWT@{e=KAD=LHnxP|2e3O)P6sW$#)(iV9TDmasC%cNQJkA<#*38a7fkRdGT*wZ&jjBkwhEx| zK7}(O9Im6p92qOkUn44>#Utro?fa~YrQwd$YqDbf4Wh31k@@_&U>G}8^V#2m+P}{c z8s&Wfl_W71dZ&^8JBk8*`EzvEm-!WEOlG>dxgLcOXlGU@2No=tuTbT=B~QNp9h5Ov z8+S_Zx+Bbvw{_zGMDW!B=H%IMY_F}j<(x&(ZEt&p9 zNa{rWZd)@;z;t^FdQVdQaH7=Jf(O55(g(Hpdrg!25g^_9IIBfvxU9=zxOvSff%v1z zLG6d@B=vGIO}nMJ$M`Wgt+Q}*Seg}xKBLR3$d3cqB7=G)Z6(x`H15E~0oYHVrER>W zP6Gh(eskHi-|V9I_GUHpYBag`?W%JSKLu#wtvgKng5eP79mJpx@Oh+^wobpW9>A%x zCuZ1E*bllGc=Vele^_bD7$yDBK*@4Tt-4G3T91@=HHMoCfS?3*M`VMblJOfYr!NL< zX*$J5AZ&8i(W%wO?k^*TGrPvj{*a7Kl#@Bbw7L7a49xuyu1tR@yj8f@a_s>kb9E|VpBc%>@ zRF6&Ph}dpKZP#9ppjCXemqp34J;JKz=0LIOml9Dom_J5&*$bvD`%QB3ueJ{@-{fKI z6CR*V{T<14YZTkF6@G5b@LbZU)l@=peN zZ1PutRKDl?MlDXEr5M9>qy1&q_&QRof#Va_nnVP#)>`;3jB4TCuxoxu8H)_ zXM`!otf>PCDa)1`6=y*VvqK+2^jpE2<{eK|&<5ZXrBr6>J7oY#?{iCs89-QvuP1B1 z(IbE4s~3dxk+E6;{~IuwI9(Nab+L=FuXg`_2h>jWkdQ)Mf^fL*z1cqil+e~ngx_U2 znbnI4?YP+QC5l3C_LwXU0G3Ly-xo|N$3K$l8km+i*ll}$ z2Tff^%6xrYxL1qPS8M9|UVa{`CF2d@u)Y?^I@2E_D1}mueuWMIsgqT$bbkaStIt;X ziM_=oE-R)#ffTKjt1^S$0aLWr^_#@_Z)oZ2gU#i{T{uIsXH2Y~_ovB68Mc%TaS!Z$ zmsg5bg+J>^!|g$CrMr)muN5EIKy>;)|16{G!Vd$d4}LUR+R$Hs2Fb!(Z1Q-7qVi+e zRQb#Nt{8S%+kw%?DB%{fNO@BIt0WNR6Rq?l3EX_%-#hE{{Vt30kyR(eo}#3flX0Do z_Sb01cC(`a8tqw^Y+p@5%KA556qcM)ke?%jaa)_0ruN|U-y&*&t@Anc^deB{_>{7T z0Or#aq&lN4o0lEUj6O;jp#2ZDYIHs{7ytk)ahk9WFa90W>rJz~x>rYijg%I(w*dc7 zSm*28WhuPq^g>@9-abYn=-^mMhs9v8+T(2#@P`m0wXCek35I})DbT zKP#-(URubaaRaQ$v!MMVpne|JZtes&f)V0W0gf9B>+<0I>I2T<2cz z$;${wVw_XnDm=1ya&p1*{gPcm5bd}8iJi$%q%x)w9FY*B}O=sMZj&(jYCvBwn@ zih*!%bUDaxX-754&r|y~|A)fq-CL&Gi$U$hp&8V48upwTqlXbv36D3U@U%tkh|NK%%QUA2wk-}e32+>_X9+522F{?yZg&D*1HJkEQ|IXiK)&OMZM)=i zGCxn-G+-V8GBihP@o*}!IW;q%Ea_>a>| z&Y+|n-*5f%H(`bH{<_1nX8}vaU;f|z-dOZoU8)b-+i>S#e5j#>XTJ@l@tDw>>5GLF!c11t?`1J<@arIF;T~lj%h;**1ef6F$GwMHwC2MQZ@_m7Xu9 zrXKz74^TA}316kW9IEpfM?V>5qs<}n-$PY@!VbHx0IIdw#V1|$+fa&gu$`L)dzPw7 ztUor>{Qu9_{l|Ddt@)xC5fL#WuH(8QV)u3J-j3Z{L`00~QPoxbYxj?us?uy@jH<4x z?rPOlb*idccW)!c7~>e%7{_&uagA|}W4lHiV;kf2bwwEw5osdgiin7ah=_=Yh{%0C z>;0{d{nxGTKi2#CuFv}MtY6Q1*0Y}VByw*JE=_y7^HWJkz9lj|9aOp%^VBm?YCX&# znm_@kC#;(*g$1z!MaCn)iaqu7p#HvJ0S^mmb+uerZ2GDf5R|wKzGU z>51GOOw#NSiK~;pjBeOmKd0ZkBAjZKy5tG{1C3t6%#MOl)eP+ zlE2?T$v7M-Urr^WAIUd$k))=Dx4`t>y*xgfP~S#So~crOzmvFSCsh~8-tB!_*A;K0 zS^!f+H;H8Seur@mgFfi+;-O;Yhj4FvM>zPXBhYJa)0{^gj3mOpodfqjn7T zDXLyF(&X~bfb>gKU(cV}=Sij3k<#;i0fq+@LuD~S$$*rS@cCbI+E)lY8N?wWMXBOHT}PwQZndLfY)Ck%z-y5ANjYrlCsoci^x_)I+kDO{;@uyQczPytP` z%gqbz0)Wx(0<{B&lfiVHtz381?T1rPQUEFwnVbqMp?Z3sxJK}42tCHTb8jzXW;`7Y zGwQynPtM3E<~v5v)fvx3(6jcHn(r*(6xt;fj<-!QtB_VYzl#WQ;!MSWtlVYJM9kXyqh>A`Dz()(F}rV}a+^TVpsRn6 ze9=W8@HR+0wcq_a(c9Bddk}OSno<32ru%gyPHkopw2%{LQiX6bpEr58cj*_9(nZP@ z74ZTNFBT`w@4ChQ#2C4JXlQ%sj1Iu+GZ&HU(2wyAm+Ia(h|pkx)h6(L=quFlD!8K&HZntE)KMDx3KJ5nY#EgKCy z(E_nevn9t7C_p*6HA>%*B-$mp=j#0n5=DN#dXzL;*-aKj0YDi2@qf!aBL@Sr>Z`I5+2qSUbe~YuGN#Ma%%eV=CA3g z5LB5;y)LLcjiAUYx4CLzd-ys7 z;O!(M)uy_6r~6>qO{v#L8`5dL@h3j>p zFN9OFSw;;u6(k3w-m+J>@tV=!S7_B`{@i%EsYcA#A;@(dy{6iJgOI*Rs3SCh*@x01 zx&W*myaZw4kQfqwv%YkH4fX<*h z9UHK5`^YHK0Z2JMy7>u`1DJJh`A!-LD17BP6w6QSu;pFVPlD6?oKaLhCnJP;yC!$D zohLP*tUmrHD*pff|39q0ZuX58j+8^*)4HT;-l~>49Ys-{1pIl8urmTjwqHJ6uhE?e zs^S&fOE+^?fY}8jRspHg**6d#pcK*0+#6jai0=1U1zruM&ZF(ow{ttZ=<*gB0?q&{ zJKUay_Zq}>3vztTYP1zd;mw32QM|bz35`tlE+1Y9_X^^|Z3*5Np`x0H7 zny!6g5Y737UbeZ5x#kz-2`HKIU|t$PLIqN%6cqOrStW zSy;ZPzz#T)LQ^#6DQZ#$vcBwMJYhmOTyZA_CJx}QrZZ9nht5*RJb*#WTEbrzcI$3qC}IGSE?Lw+uc69S|zdm}=a z7BzWk237thWSNdm9w0ghhtHQcZR9O*jirChRqtEDFmYZh-q!oE?t@eJ-yVn`=lsDr zM1>A+D~C0Ag1shxrgU8dhK|xVbA7w5bvKHVEG!WY?#bs?!RKoYycf(Km?%=0QHiJ3 za`z?Lg4+buXYU8QT-vJq07|Ca<~q3T6H4~jQezA z{zrc#gT$PkWwNiL&w{DhxHq`#=G=2AsadHAsixcapS5YlKOUC|dg6ZTtCN<;h zfz7TE-`)Uw?epI<@QIHW-lrsXAoIuWxrBly{z*AKyVy#l>=BN*aJP)rJC;AFpNACA5crium-TlzTj{h*7~jH5pU<+r;Az(>LvQ=G)sp!qn2NOfg$^$9{c zPd$cCGU)Js+E=6Uvkq@)X8xZCJNYBcE8!Qg@=xz>ryO5Gz5HA)_xV={De=}a6;`h* z#n(wBPZYu5BoSk6wW0Pc6t3>2S*&649ZI-5Fu1<&@-3sJf-aJcRNHF)fRdW5r~e=v z{M8Dk6N8V2b)P@|^{>tM#{immPKQ2LFg!l{>R%HLz*M2SxXh35e($S;))T;F)8C|l z6JhM0syXl^;gwBBGx(?l{NzL!7$wV`BAmQ7PCNHKH9rm2nsA!15^YnW(*;vqciQ&1 zCFYF8^k|JWUsy$knhC~P0e0w$x(eu(Jw9>I&S!n~4A42kVQaldQVOBf9mSQ1);-Qe z$($N}G(Q^EUgsgFUS2og5vygk1~GN{WyLukOx`Y(6#p;ie($TxY!`w`ZKoyYi@L}p z-D=^B5pb1KG&i#@=^~7H9l+?mG!b->&HL)$WNnAp+hhG@u;ix?)TXi@P_$_V^tFE< z05i4FK_7@M{(7|Vm^2fQ+8dG7U|rqhCuvytlZ-q!mNny+4sKf1O4}0Muimw)=tBrv zu&yl)3#OBdHl62ahqp90GdsF(H&B3R(<>3eVNy@oLoE^uu0m9&BmZ|fn!FkayLY}! ze7Oe9z+#usz9=qTn@E%mZSorr?Bj8vj6^3ur5a&a(e$)aaCopp=v)4cEO#Tx^3SgM z_0Nd_K&Ici&ODQ!rkfwWSwLyZzPDt!`TW*DQ!?yAKHD(7{c_!I2@3NKecP#88TO;4 z@Pq-I+XGE$eE>DZX@)R0yBtDNoPq6EY@;{;!qjc;!t0H&;ti-9apb1%R}4{#o8ja; z+0B%^U;1mU$8}Wg2SA!KdIf-u2D7rthBaqRhes+jWVp;W${o||NXbLp6zC0D>D(=l;gNhUhvvret^CH& z6qDZWKJ4GOKx%v^Ki1KkwLTwCjK#bDdtjPn^VqDX5v=x!;38$a2=M`^?B-U}Z_gy~ zLo_^_FJFNl2`kmybhAkSQVS36l!>L49tpcgoI_Th{1jXngL;26skGepBGK@a?&xzc zg&L~+!r1c#N_a7OupW*5vcvXjhT&J;7tSNEYxW3T4Ek6S#y1`IPBQ#0tPbM`ar!&q z6nl|bPxH6`eSRLTQ_mlSRbie()SBjxx@~0~u&F8K=Ftc#uG77xGIT$ti^ANeoNFJ8 zpfhQ%^5J@1_p_cqYp5OH5%|WdY2^e2o!?uIfAOQ6#S^<&7L?-CNeJmz&QTayl+pRg zXxhUFmF+13`TF&YCXD@LV$rKOZ5@$8Trs2`@m>x)XD{|Ah&=#Y+N6_|MM!J&V8aN>}7IZz(JE zC8$a4ipg=x1@*cV2}|eOQw?i@%)$M&BfG3iBu1YcgsXRdB1M`Z34p3xVx{u0hf>AM z9lKeVHX?)vUgyO6&H1_vVavK<3upl-r@Pw(CFK5a?Olc-<+5yR>fwB(hqardqhPAB zXKa?GkD<3C(TDmr3|jetbcSvA8&^TH_{AyJ@a5)c#e-qPIb}N5JAY#^y{3V1i)C7a$`;!3Uf+n9F02Pkk`C`iIZis$hGdJSo;Aysg-=kphE&MeYuZ*-jvg%ihEF# zFJ*xw?}by5@p{nO#vcJE=E{oIz8^{{w)4#%ndt$vs0>Un`)Vy@G$j8A(Y4RmqS@;q zVFg=$xCYC^`JqL6L^%0xZVqi9g|nnDmOC!a+VJl&L}l1FWl|#pKw)0((Chan^1C-< zO4NK3k{4N3$9M`*pTPqbiR}Fm(o?S7WR80VOkxf<+U<^KQNpJq|IjWAw_B^vp{fiE zd@?}y4I`1Jk~xf0t?g9=ri*~mF8?DZk;DKf40R{9&ZA#KkP>B)g8*T*uQ$J%-1`cg z((GdJRhB-lqNKR2W4sPZYt!I}wO?39>&UG%+I~EOkhXGHuz5nU-py|h1HlfiE;N8pg@F*oqNrWUw0q!4N?ZI-{kk&=+rX)Z6Jm? zB_IQU>FB@X-O*RyqleM^xrj^Qe?XJ%n}1ar?W6wlN>$fc{o5sqqmfLf>vh3n@@w5n zF8Qm(iDQw%xPdy)Iu2ByrCL&tPi$qHaD^Ea5Ym`MzDaB7j!RhJc$4Y2E3BsSA zp#2;H03ii-wa(>-;`S+MJxyMVb-HS?DSLklWq0T&zOL5UlrTY9n5w{A^S#8{R-; zAXp7t-ZEDU;}ykeGXXpoOlqY{dh;)d2Pn$RUS-yJTm$J<++W7H=X_X0O;430DcbTSL+NDA0F`(DNVW@j3 zuCK}UGNe?u*;(oSwH578D$Wc|V*@~PuJPSO1QguZDanlq?pQ}I5L8!2fz868`>IYx z>Of)}N_B-g`D~tkh7i(x-qv22J5WA|Ti+k{Lq;E8cD@|#@R9qD{3}rb)&zh0>(wL$ z;c5bp3)GFJt3ZWl&J0KkL37tPySr=h+44Ur_qD<*Roe!plqF$~qx3v_xLs-`aRkax zP%BULQpv111*M&Saqdru3t%PyTA+HdU=Ko?Wh?WVp8ebmVvVBeOS8f?Ja2vTuSp9) zI`noGU4YU+N%mN)K%42y4oWw!jA(rDk< z{TPRIH+6WZDF<(cGY*>r5PGN99Je5+#tadO3t;*~Ygp;t7D`O2+BN#yyEtTGCZ6O4 zl(6M@d?Vh8n97Yeuc~*!$!XjAEyRW{m3rp4#{WGXrO$oonwjrK3IDfjYR1?1b!j*q zCN+S`!i^5S`GF4SnCig}Yb&eVLvU(WS1c-phf%^67vq~2tPQ3|kTvO0y}I?NuySwj z-#oZ(l+4hjVQbS~$9Wtj&Adp%`2>_QPb{@ZJ)cC-N<(9{I6d{_*R2=Q8KCqQI9nMS z?$R|pgJ=-$Rl;ZU`DpWD_Z*NB-TII=tfBurYNp+Z+Ql~eoWp3XQ)0^t!pUNIvIcB3 z3VX3js`oxo+w8uCs<0cH!{?WQuyXsW)Jp87aGq zBCcU$!>2$B$dZz(f&_t*0v2Q16J(zw6(j2U!57^Jqb}u_An_%NE@Mt94Z~MKeR(VM z>mZregTLQEX@mK(dJgj24wwFp>;S8L?bJ;aWPutTKP{vo&M};a>kHkYUI12+44$QahphcG2TnLIS)(Jttc;TT9?z7X!U-}I7M zOq7d|!mgQ~gLs6bTo=i!W|yGguNQF3gz8dA^KBm*B_;@mwTDVubXnk{Nu&CCe`0do zNHh?Z4QZzL#O(U~czs3OC@g~}SC9@873Y@bnht3TLTa{ep2EA$Qrpl{N>k+-=#swM-MP z=|#2VlMR5CW~xue^COcfV50l+isr&%D%jysCfeO_3d!h^GyrOw@+nc$WbI65kd!;> z-&r6v-rjqZXC4LHFlxvWxBy9Rddg80n}(%)JGpd7o%iQA7vl*7WPv~*ay~$D07|EC zctd%Dz5&9=>z1D-2IQBXi=>-^eD(*90B?pAuBK3`0ste3Wpzkt<%c6h6GfW2plIl~ zp-X(Ep7^+3I6aT9u$HqsIzm}48o75O$fEYd6a_%qt@oma>FI7XJaaZ$d&RaA-Gihg zbucnEYjV674wq-@BJzD;CJ>|BD%d*H{fJ@YHeL0B?u*6cf2JS+CD@L&%F6K}IPI{m zrXZAuyAQV46@y2>(~tiju*s#L^1|~>vx0o8jtU-4RBQ8eO+Al+*uma|!a!JMHw|vn z2u}d%SX=hhA?TBErX^B$bF$JF$fuCQ!sg4@`j3#>(%@S8!SGC{!;)%(XVEeTls$Kw zI-WxcQ#6u;?$-I9PdxR;sS;8lkWdo7ulrSK7Tziq{ukOEcga2zoFqx#k3>c zuVmc6BnH5kF*!5UB+a+MRJ_*JrVxAQ$1m=x=6M%ZnRV@DwD$tM`C`bt-^E6ID#Lj+AZuxUFLf^b@4yiD#s2Bq3}c{#HAq!+IgVV?@8l_^S0VV}WC6mx1l z{v06*4V4Yv7hq5HDuuDVoksbx`)H_M<{|)uP#rcVJzpcF;x>q0h>-9%Na+|@%M(V4 zGvA_Vn_Bgna*R08#d2qi^-7r^f1g;IfH8NwiTndfkFR{Nu-zdP{PjvRJGkMzmRNw2 z9h_yB`m!EUZg!H#_#stn_t}pHvGHfW{yoBhFuu9pssWDA4=#+83WV{gtdIyzjE7Fl zkA0iA(IfP}YUjcy13HKcqqY2<0;TuaX3}L522dig#V(a)6;7Onnlf$c8zB>PaIo$J zoB`sU)4|56HW1H5t9(>R3!K$`FfuKnyU`I6yUwcTnc?ZWDM z>_FqtxoB9j>Q8<~4(NU#I=Fp2kJGG43R+|X)}Ifh?0aASdlCSciFnSfKljahqUwd{ z5_RR62$1l|q$T`fVP%~&Sy=dil$9@d{JXTn9Nofe;j}#8T(OI~45^ytU*G)e*g^M) zuzkE)3CL{#K@Q~UHI5MStFpU3GjDIb5kc|yEo2?poXBG{_1hLeae7x3s0k96Ox-+q z41s2nYkqG1Pt2MpJeWrSfHnN@Xg_x3+foM9RB zxF)eHdB0u@g$p~U7noBcjU#0sy8=`%Ax$8t^w_RCW1Rxh4R@69Pe1QQ(HcY5fA=KZ zH@uYq(8Y|OR4M_KO309<5(pS_{9yeDcB<34Cqu9p>!&5DKa~%T{G)#lkp{XC2Fo!Ec>qE6$MWF$vrt$# z`m1u>`&@pN-Cp&59>VNR8!TFfg~O=*)$d*aQ&z9F)U6y9eGyI5dMreQmw+&*W_$aL zHc7tReMYO-tKutY84FI?n~~|OD9Y$qqINZ}LF#;{G;gz|Hh3LDKPiE{U6X#A6o903 zS8zH}-St@`=J;#xiScqlB_E?#*y@=riY z>s`^Fw>+`KM;Dlrv)&{}Z-Ph~O{^K05*A;p6 zBme|!Rf;!!Iu}+M@2V)+dHHeR;H-@cbCu2x?OD4o=uHGb%#hqff~*ImFG0z0))yH2@tmqOm-bR>Ru^l56lJD& zyNo&@5lSOM833fB{q32r0a%||eFGstQ0w!tf4J3P8{)w+yx?D6fL> zp{##;Cv8`w6z>{eugT9w96f1U9Ir)DrM}Av0D@BT?hF?r@1kZD$a|l)=*d%qH?&yJ$DxMRr@b&XN}plJr>Hzb?V4`PQ*rV~28e z08N8--r7c+ZutPJ^6wZX0tm{{0ckT}y#Y*@SaGCsVF<|@)nU=3&Qzi7mpAz7AS z)rfO zCZ1xnhwQ3@--VRPyoq~tJD^7R-N<3;)bv8#U%v-If!0$J-`k~P9nG_c_aR}x_*{I{ z+z*C{rjgQ#OW=WyR$iz|l|V@7QtC-W1O=p&yKle!Q4WJ2M%3lE?l+1b5!Uu~3z$>@ zr1d-w2tO7aS3tVxFNtQ2->zspg7IN*^MHlzZYY z!Q@5Yn6DC%2+)$g5nGb^*ZD@H?;JCAe*>lpG{6(>TJyI^s#s4aQ4;{_v;2G+RA_?l z;q>_O+||2H`U9eV&}Rmmji&CX|8*tX`Gw`*=%_~{WUpFp5>`iu_;d`q>Mj3frjDwQ zg=DR($m%!%n+8e&e7taa8iiv!eLkV1=(GX20|4L6irsUM=OCp3>$I--ur(bD>h?u4ETr2wQMGV6J8&AWZX z1wn}qup%HQ0CF8IyUz=R)x6&BP19V6kQUd)?DdrLBBWlr|4`{(4C_}awrA2o zB`tK&o0X)6E`i0lsrKCQmM+59oxdbDfT_d=$HZg?IP5G(kR*Cjz%W|csQCho-g*=< zi%L&Jy42>V9bMeg1`#i-A6|)w%^n%VwyS{D`8w*KTA!}&D4XYY&z6e%8kEc;H6MEE zr#W1`7PY!|*^Z72D|fp9RASKt95#%WUT_Kwr+XXQ-TA$&+-m9E11gb?rzvm-NQu0V zu&>N5W)r)%7o|*_Pwb!m?B_VV0EA;(noPgceO+U_B>DXvg(C`9&U6Y%AX?W$-OmgW zno;x(C`pn^QR!}kRi?b})NFE7zK%Kc%>nVsb4!51E8AQ1mHxNBuR{eJC-V^Q!#AK$00h`0qE9 z4f3U%aBZ%6G?0%UcZdiFU^>n&N9>bjudK^Gj+)*;`YI1lPar7h^5GiCPXg)kP9oW4 zm!9D%v~-E_eXgRb^3y1p02a%jxEB6rkir_tPWD`vKZ}@}ZJITyJlB0z)(Y($>UpG; zW?^o;Zp0n#KJg|nks+|2#;pDRi=cM9zH)p?Sh;t$UCGNp%2*F!)I#t|zSIM@?%Y5) z=%AZ*B!fUY>ZbaA9qg?w`=}zzBT}0n-at?B%AbJdID#P4^_Op3M>YU@W@9B@ludLd z$hQ;A#S5Rj1N6rD((=Dlo4$(@Cco(|lsZnQJV47dy?#3x0SbdRvBzo_D<5<$4`K?%>KHJ9GT!#M2wt%`7bF8gk?QPJ~322&$nZT zYHIi*KQY6uBg-$Ll*&|=S1P_jN>vC4?LBMq0a^xxdAIC~+AdD?- zpPar!2$xu?xw=43K+)Mt5NX~Geh5}aLcbdyB2$i4*=nbI8%Tgra@Q|bf3-Z-^C zP}tHBAB%`(?JB+K#|19`^OA?&hzDTJ2qh$*ke^7*O=)vte$qin1i~rz5dTfSJ2`Qc z%7fvA0x;FsK)X{ahEq{gdv2n(dZz*5pkhQEJUzc(`jemkoIKEdPI_zO>P!^nTDN^G zQ6N$I|2Rw*0D7}Vy;^>D_oZ2=9kQN-BCoQ>UsxJlsJmmUkura5Xl7F9cDNq9CJp2h zm9d~|I^%H-l5A)YJZnoX2pldiB3kVNQ0bV6Q5^`YXwCvI0>Y=-xajj2Bc$df=rBdQW4b8niP+hie zA0P{WDUb!U9{=8)-(47Nuas>8b>x-Ze)Cbj4K2AI`N!O8AL=L@J#tay7)Fuqx_Wh^ zJxn%=Q0?8^(Cz?~$&KUYW%0`VRu^xr{XjS}Wp{|%7JsB1>`hZZB~~cHL zS(dLt_xx)jJcORo*UYZ4Hz0&5efGI^+cL+FSENL zg}8p&hWQ@hEV^5!Jv>LpT)Ezx_&fTxY|FWy^jABg5r1{TTV{{z2eqpt@gdo_UI7+ z3|wE50>J=Eqx6n@p3l$ix)+%MN|DO_Q{4w97@#S2=^)FY(2HR58*erWitrLrZ}dLn z7{%oEGD_N}nO-otD24DVsOhvjb*NXN6lGv?ymVKub%b^NtQn-QBj7Z7D!20AfRn2C z)O44n-Oc}DVrjmO&E?Bmu=3ii@d8DY(EVHtBqYEo|6aSDCZoQKq+Axe>Zb2O;ejjS zWumPK-bYKz^_3p>129FOtEWZ@3HjZGH@l>3egvkAFyFV9ecZ+0<;s#N@sq@FHuf0v zDX3eoTD6Ld(8Vnm>bU!P;=0B2H}wG87lEqVUuR`sf+?+G{om^5En=|JuiM50H4iRUlxr+$Ww9S1y>P=&Sy~+R zH<=xZ8Al8EI*}b{DM!c&Xz3z+!SMx1NI*$`JWMp-EutNlWF~5fJYF~jB$DnUE5ORK z$XZ6pPE5?px7kgeB%CJQ?#!s&qdB=FZMB zPe;-&BO7hn&k#-*;HZNJ0Zj4tP7`|~>z#$Dh`oD8tDsasopr3UVFjR}BcY$uecyX| zqf}d~5#{GOa{7br>$!@oy6O^-9SEKvXzmw9u6P54xzfEtsz1nDq343Hs=HN8)?stZh=!e7Be-a zZR;XTumGYA>V@s5^cqL0dC0|y(hyI8y;ZoqS+k;HpF$58N0$FjQ^Rg} zWkCD3?S}UxsoMC|{5}J!gSkfaXMtoq&#Pj|p7SWlX9Vk z-@l3O08HJD&eA>~$nW#D8Pn8f?fbT@RLICd!`l^YQxKRsyzyh&IISudN8 zZU*piBnOGNbl>eTkZn+IMF<1tUFRe>x++I&grED+lKhxMja{{P+>aEt zjg4Ev9srePz25Vnph^zSsRV%mO0Rb3l|axgQa_A{LHlY~_K0xuqphBqJ6Hnlqi9Oi zyVqPLc&tmtLnJG&@jQ;$8vqLjOPBscN7%lo^AQ*jWYpf7e@Zx29d|HMZTU2cI@Cg4 zyPIdaubq3>wU1}JFFX}(LqljlQO%JJP8|eQbBvQpZE+atO@hv=b+p=sy^z?WnDSzR zKmCtn1i_S}ty8y?5m3_Z8`{GB3Y>bGPfM77H3W+!wQISudM#fp%?*|(4Dtb-*4SGn z<8OfJ_a%0f@-wQ1Bk0u`|FYbry#@3Zr?OyfGUMB5%4W1^t2S@FgCb>zQ0;#2yMgAQ z&81^{57aT@ST$Sj_mQyZfQjM*!Au)3QI33=Pu*l`-U>bfd($2DN6(z*W5gbB2aNDU z#!NpS4W?wybIU&T7z7RC_|{Z$EKofvSIUordXqtW#_xDoL;Sq`kpaLAZ%w>k`?i(+ zMAS2n{~t9^&%n1m*xR#Ad5V1CcUkb$T(jrNkldXMs^}?z^xwSAbo_5r2w)g>J?X1P z{Ar=s3XmJxZL!l)QpDkw-Wf2?tp4Mlk_x)ta$b8Dtm=w&hgW>=rAr7wQ3se@7~*Fm zWRkHW#@6l}q~6@%kR~_GSEHq1LnWmu*ty+@c5m)+8UW`Zg*&Amoi1}y<8cjY*iV&T zvh7+^2yn+pKEIT%xu3!!8^Jh=;RYm{AtmiqROjr;uKd``J1@k{dK6@A0oHr2xv zm*&UCHj}Rf(%BitxwY5Wbs1ucqtvy2_ai7lIT=%)fh6GkLmk%x)#=L3=|Sc{rCGx1yEITx@LwUKoxjga|vA+81AA@P!)~lS0C0aODceskf~hn z68Mz}$=a(FSdA%HA%)eO%zGS?X7Mb-1JORp7!ir+h0qwLG?Mi^3MaGn^ zgCla8?~?I0g$n}&1QbOY(${Niu@s0eyGtJ5-^DM7Gu2iHx(EZM&pZSwc6%1SRGK$* zKaZ9<{*7SzmZhiO(zWrqDWAx~oG*!+VSNGK`YWo1B*1$}+f&~PV$n~3|Mv(9`ANj} zr5n5*>@E8fb~859RlYm&L6iIL6b{qq>{`#iD@k|{r^%dmC-T3)fZ_KDn>znQM)wM) zL<5_N1Q13qmg;(`Jip$LfMu5t4fNF^^8;Xdife?ClO9CswHBMh+R#0OBK1L=iiZVN zsi`j?NqG5jE%9i=9wsCN2qyu2Z;W^xMKkVcR!~m>y$OWMh)s?4A4#DwNB?oE&_P-e z>wgEEsPYV$3f2+Lv-y$oVP|!i=fGZ@)gJy|Y6X2hQ3!7fyG#d%yYF}S{)HfqcU1$t z2=&Uw{riDR@g*dcU#EY(EU1{%Jp+bUy2#Eea)RtfwLK*H4cO!T+}>sq`7MgljLlX@`3^`*Uhnhu_pp41X{=P_hwkH* z$Lo&z+x9Vafcf`mK;&kFKpcRQ6OTP@o;o(a`%ZK3Ik`CkQLa zko|6Jh!bJSxwXuQ|g z4*YC{4tEp-iwI*vEX zzw^7FUBPHJi5H+GUwYyfoW(X3^@XU4XZ)Ab1oG5H2x*Wq^=(YQIB~St?mRtyNeKT- z?~i1Fkm3>O+e36~QPPFjb6R&VgIlj?n~r`+6Yt!uA_KzV^w2uH)%D$ndzYU#ls5*K zDM^!qHp9IR<7l^;7;ZsI7Z{82SKWKjvfI$Z`u3bHQ2`+=VEe(@MV9bk)EbEGw)ZHI znz+W(R^%N&Qrgq8lnE$FcZgW04_?(p*;*b`mHKLwRLV6R>V)o#sS+}16!LrNU2FCk z2g5CA9GZI~KbOY0#Q!M}yADm--0v2yfodgm;*jrlRI#$}Bxsap`z1}^NhyCJuBl(sd{ zwzr{RgUika^zA@zC9}gSr40E14bSGTli8iZsUz2BCFb6R@IM`Yaw|Xm2K58a|RQTN#NC-IsgHSJ`7=nCdNO&HZ>HtJanw zsspIftu5h`!eJ@Hp?0qM6iRa1#sCaYo<``+O9cG7m0T<7GpP8?)h4r!XMyC$qDi-Y z4o;gd-$HhP!YezJ5?AQ3s#6|DPu@%+TAzCX1!oQnkRk-7Z#Sd-FNMCzeDShi_*qBy z%@F$)B)Kg=(j=-^A!X5WQGULbUwa-LUWbyW!#uijQUgl*2!Cj`s;k@)G?_2+EkI@f zQh&}4%N(oSpLrWK!gkdr%Z}9EL6$!2i-mE{^wsiR6y>qTEqGm(6anM@e7&IdzHsuc zUNkjRO1Ka5yC-4^5W;HKHi={hNcSjrs3mEC45lgvi58_T{-mRfOi~vp&Zj8hmTNni z{60fUTN&-f`R82}=lt}iTKo&76szW;_{91WE!#Tgna%Brx_yPHL{2I0tqYuAgDDZG zXl;e~1|^vdGaII2fKwjR84*Gn-*r(2+p7`Z!|8t$(Fpy}5o#}~nn&H;xKhis(R4H< z)88@gJqD#$Lvs~n1DxvAB*6`YuZ~L^Zn@NM^ms^~%%N}2`vNB*Xry&RhRcbF}3?w5{m#UlsXZ+6Bgn25MnlKkE(e1R3Q08uGaymk9=`AzE!;Wc{_Y736 z?m}sm^vv!fr(dO*Kdbx5!v(r|#GO@LG|!uqqPx+H=@pu=@V;uLE+H0aq=g5P_n5hnP8HqK! z;gS*lm^dM5IEyxYFcix1cH+5BwM!-C!n-3{>Iyt?y%5(WMj3A2TS>g(Rkg- z+z+J+8m-B!2T){wuEph0_pP2(J}PfOz>S~yBR>F&cJ5kxhMT&WC8Fv{HzOq7ZRMLA zV{b|HZRG;tR^hbkP#tC5mY-arAUFu8lAEnDzPqC%%)3Hmu-q9!-RsOcb5|GRm$rHs zbvHsdVK`C8#9higs50Hlbfi>i_kvkQ=t3-6>{@EMzYkf#`xbVs_&~WIC9TfzncVaM zoDyH0tEA?Li*ylGxHj+DG8zJ;0R0L|CS9WC`RjZG;>A5)vNk z(%Lb-u++5hk0aG8R%ba+2+Mhsr-`cXJ_+g%bJsTy5Kje_&jq9LX-Ly8KfC-7LH1PB`o(-1HH?^?ZAy@|=qtz>r&rLd*69LXyo#7Yqww)ZJAsn^UsuXrhgRCD zRIP8onatE<#diey5p;z$YF3qH+FMD)%w4_W?Jkj}>9QPnr;9w)H%z1eanaRX&GjCT zDp}88D1~#)G4Cfzd5s}b04eiz?hKTi{vk?oKU6o-J^~f``f_Sms?v}1HLZyw_)mcJ zBx5(G4WH)s^=*OrEWljp%g=#Sf55#gq6CaN20$sHzl2f{4;Z$?q_0rYjd`grJ=xa? z$*o(LzClWw-YQetZ()6JdQSK|;WRR5Ozpl9tppnOI19qMAG!$E9K)+bj-&odD^$4# zmE+NnA`EuUagISqe((O*|MwR1Lx*>@YevVxnAh0--v|$2x{321B3$dqCm^P#^OLdZ zIuQjY_U&zlOD938_u_oh9G%>K;L>_iil2g_8bchJlO+IouDGQ>t&3g0eoNRm9Yylf zySz(Hga~R6;SwRb*yZV>c3o$6l)k0i%@A=_N7>%Iwh|yvdQC%U*Lvq5q|#2$Ff-d^ru{zbf5LSqu zu%NXr3cw#W}bS^_=$dCPPKe0K0m4e!C0o1l#aNpK_WwtVJ z40V`KOG9NC4mV08*2dvzM=BLtBke#^+I5CvyQ5R1`AW2OHi8*F3&jIUnrdC~=xQ(p z=B>a0xCU0e@}Ofoshrm$ByYA)mJ5lFBlV;dlc%m-^Y=uTkPVBOWeTod7hAvGkaCow z+(QuMqIeIAjOq}hw8Nwa1O+P#hk6%c7EHc-2~W*a_dHU{>Nb|1zW}Fles4;mrKF(? z>?#%9elQcD8QDAMVch}LwCZfVkJ=8k4s{_ij4KIM!g~>V%xrf2_rZ#P zXtGT2?gvugW{4@*2NHeV?#bRU2AnK8-fs`_w*LN5QYlk4nu&a)HLjnlbE`+-r0RZM zGw^;CMdeCA85Qhf-6uyLF2VkBSgCA~eg8!F?{n2OZ`8tp>rt7v{ zwbH(Um;&V?TCxI4Z$tD@>ocCCEWW*!wDR`R+khhW)q(6g!s%j+=zDL!zl#<=8dt=I zz;ZTTm#^OkQ|I;5vylToK;I z=c0M;YvEqKP4;uUYCrc4n&dogemT*hix3CYRl4ucwBPlU{BjT;pwxO}bJYI>-0RL< z!n)IBS$(X0shw$48GXmlBAcZ=gOfI^n*m6~df1(V8poaV9J;GwjM zL#H8P0O7xe$LWx2y!!XG?Kz|S{)%Qab0(ZJmS0!a5k!f6U9V^wt*i1i2bG%UY#?3E z0ziHM)y8%G_-tIApPNn6Z^;f|(zaB@1m`@YOzm~-X$o%B@|vV$z-@Jm!O2V1h`tNE)8LT?j|?%*CM1fcAy^J%Jy9?BKzTDHh89iP8P(9Io3z+ipVyvdOc0aR)B^`jN-x%}Xh4OSP((R}!!DQHe@(v)a zt*>~%ex}y^I}vpzjZ`Q9cR}*>O60<(5(olInOhR{J)qR|diA&$NO`=sZAjb)r|!Sp z*E;6?2zWcF7!L@B4SzIZsMlQkAW~|#u2j_z1rGQ8DLxD)u?_rgVe}(i^c}S!e-zZR z8|$d!vBcfVAL-O|Rq1gA#iYZ-mnZV!-pRc?Yvb`Gm_BT-c;3+y$?+*PEYQb_uTMj% z7p`H+GjPh@EWztO(6dM>-R^z+>;BAh2#Ujd6ZL{{1f9*S9Cx#cY?9_-Wc3}~XBAbf z7eF1ETee&Ap`!+RByhZ(b} zG+*mJ^aYU#$H^iJ#|Fr6*uO(!9w+OwssIT=h5X6N?p zZ=NDgLCUb&NuD~jOJH`csVz=J=&e1RM)ZuRPDf11_}{ZA5F1dE&rnU{XM*9UQ8K@~ z`L{X?Eg5YXCN@CQwIcnNp8RWcWB0hj>M~|8V+10S#9c$im^P>^B)Sgs0 z4>pgj+d$(Wm|AYGBiLH&&(E({Y$Y&s9};{G)C=KM#t3L2An}35&|wm2qQ-FwuG|uqovWx9}m$1#uyjB>urtoPzul5 zCuYhUk+NtkS`UoLc8anYRaY4t{uQl*aFQHoM`YVzHL$+AEI@z&lV&r7udzDZec>vN zf{x~kH}A~dq8%Vsdv{Z7UfF$4$c#PdRY>U;B@$byYs$MiX_OQ&s8H7=jnSs|z7`1A z`kA`wnd1n!c0lna1XDbtF>BZqlFH3jH`pz#GL9wN6t)LUN+V1xOae0qVULx$oRqi1 z%%Y}Qw~oblWgbN~b`?)GP2Mx42c()ateZy95+u`wdAp_k!r^GyauOk6451YO4gpD< zmZt6AZ$MF-jhjdgf{NQ-hc_eK1g5wY&E;^D4)CJKm-GiX$zpwk= z+r{14p0vIXPCfa)@{%#HZ@slm4}ni z`o2wD2F-PkfaycYlDh4%ZLnEH zcoHEMFdw#?BTsdyP-!a1({L&@SVe!P`*~e+p7bo7OzPmZG4MGgy=1JsU_LJ_ecP`o zde~}#!zf9wZ^DZ40+`k(Ak_^0B0@Mec);MYJ1EuLOQ@!a(x8|Bt=55G?owL*Cxh^n zKzo8_L-;CKJ=c8^o$NIz+4ND8TGn4jNX1uNYQ51>Tp()R2aa?f%|{de76J~JEKMuw zPy&|6%(OX_iUG>9J@57lt)WtCq<4{%lZWh@3DbKW#iC}qqH93OEZApDchIE`;saV3 z(HpW&bM#^N?LhA=_9GO9Dy?N>%g1n9r0Kfr80eEkS)AOJW1LTe4HCY<-O2i_!|Qp8 zAvM58AnlL6$`?R9hH~lQra`$uN={$;Ma^s_7ZZUSr+y`DuUk(-VZlevdA4I-)Pi z=)@mC*sWYAf$3JRBb2;yGD30A8)#GwK(FrYu(PQf&{B5|wqlER8e)ojdG*cH!3^Yj z;)m>zpWA7?GMovffj!R^N${+YN?0hfq0$Mh>JpjYCxhq!t9E~}^PDaPgE(DcHBzrl zsQ0UpWqdB8Mi`AVj`Nbj>Yq9OUel$ZnF%@eRO9zW= z6G;(WI#M<$)_3W+z+H3UMg$!C<3Bl%{y{h`@{3>mg!qu3Hr9(d+k~-yM?HGjGawr3 zV)vCL?l7nmjB(scZU9t|W0H~pc0j58z*xEVyb?BN^sRUh?2W9ekZUloODH>ps{^rU zrVLB20TuMXdJEjO`Du&CS<7~G9F#RfSQVN8WZlPEwO%y^rQ6seyU1s5-;I>gIDC}w z9vEwznF8}|l0z3^<6K?corTj@i=xj%GFzOhfxgg1J=6{1@}Lf@F5u;GxEhpi&v5896Ib6)!=vZkrgj;<(f#^|e@M9ig*kP~>YhX`VQ(dh z``LAA>upF0wrr}a`R@QJL3?k<;_xm?I5kttfa3=Q89H5+n$~;2`(#TQ&WiB?iWI$( zqOu?6d8AbQrm^8eUGkdKk0ci7vsODhZbC+HZ-*o`tlI5Xbmr7rpm z3^V%J-F^<~Wy3p&1A>al%}b=?FX7Z|Y0gH*r(dB+b)i&kUnjQiT9S^XAMW*0TrzEZ ziy)t&;q9Z_t@=TA_t=v4eHVEo&$s`8kTPzn`R%Cxy=Ankj+~AbmJTbvlEROHFlQvk zu8j>A{zN(G5FD$HgVF=a=%MtC>=w|JeC?_=*8hYVaa;9;!)xM7Y22%HCm~|Q^0TIp z+Bck>4|ue-@+Uk{9xhK6R_Jk(RdMGuP$Mq?rY7Li6Ys$KL94uQZ|KA}@XUNTd7#W& zohk%k|X_5jgVUOxo~J2tf6skzLF|oLMrD4 z=P(tU)^s0jot!CKy6T$eCz5o@$tKJTV0`=OnzOCwm z;8~$M&AtRJ6Kc)PZ6IBm&+4eMv34z7jT>(imq8f1{8(8v(H4Ncq4l(x)6g17boX3q z`duH?uDe;}p>UbTs<^TlA^o5vR-F_RwxD5UW6GpB+hFxuU(GortSm--*%1vVrpM?< z+r7_GI6ZW0y*RT2OyZ4KO_se9EzLlFUFbQuyb4hc?NPI<6ZRm8rCwNWlP?&$=x)3Ym#V% z>|IQ~4N0G{-Kp!C(!3o(YfP3fcZab2M>y~#4FED@TDzihyDL$>XH9dEUrE3-y=J`! zMNvuxXtdu8r42j~)lQ@wH=w0Sr}xr&?r$Gr!~N)K(zy1Q`~yh6u1hs7&VvYM<#lG| zhXj@Kie_)~a6sG^Jp$;FT-VawQ5k??S6lXtmd84Z1Ec1J?D39b>1!9FoG>4)>@{kM}|spnMLz?(3D{^)8L^nYwoJ1}sD8(^ilpfWmuwggDTBIS9jZy~ zLrA@cR<3M*lwbd7UsKS042A1Q?rV2|^ny>&GI8y$8U52hQUn)BqyexeS1hziGMmQF zkyBJkl}pOM!Q|dQOh$m@$IZRIbs~@)_Ux6E%mz~gD&L&eeN*$~e4QVhj?|kvOZ&1gSLWJhAf^QUXcPtA zcQa$xbFG4O7Lo?qU;3(5!l@d0fc(%ww}|Knz+Pjx+}9KKRwJ5mX7!MB6Wl9!o}ffW zw-1pQgjc#mGsZeU1e)jl>E9RR!v&SSP!Rvh8k=4q;LX1)_0Prm$~2)XlpuKtLh3fe zE{G`ttR$DPj#(>MV{TeUy{wDoj+oiIA67@I*|zN(2-NBSyyp4!U<&JUT)lw2vHSeU zLuF31Iq{p!{#$YZn8srZQ104^28p4dd+Lt)5SW3|H?^y7$kwt(Oh8V*8t1RAy7eeZ zb)Bhl;kN_m4S5%t;kAHF{hAH4qUJ4X?I5$bUPhk4?_CPVre2~z?8;%I^ARh z6a%DAJhbV2_j}*O{wB&Tps4r0W<$0FB(;g9WNWK10AK!Q<~rqipJ*i zlV~8UPfX7`VJ#sblpi(d7-rtn zMdqwnL*EPIjY|T)ye~i2=A@Z}-4ACxj{bO+@&QE2JI83t^n-yauw_zu4kNAop4WNXrBZkIIxj25Ni)0biO~?f(_|)7EGE3+_aYD4@S`)z_BuOmoTU^&3 z{W*-(lO=42bFlsbTAFA3bv1fkgmL)ITWg!}QUE9GnDH{8 zucD+bT3NTe1!=|Ug`D%h zofP(u&Fq`p&SvBtSfRPKC9%8{3~6>`%ubWCI*gOWb#4x zVV;NSX&OI7R^0MI!#ae((tl=%~&`fiN%K82Lo4!gY?{TY~sCuP^j z{~RH#TG)M%gG#aiikya4T=0Df8_BcNgvYOhds1*2@j6A#3B%WkGgC&*-w4Z}=cGE& z{T4F+|Kewq27-#x`kmTa5_g)}2GFS>K5nT?6ziHg@w8B+I9~oi%)+Omg!$_ZwLQ}rNn`n+ z%ixI|0QLmP$%Q&(m9e6ol~@w8Z6FSGscf;K)2GhvepfOA0hkt@^qPDbsjWsyUEGJS zM`%a|9cgT0j2}bnJP#?Qn;&Zrn6E)7X0t)3N&u27Yl3z;@PaOhy0OqigA0+u@FrPa z1V*x|6SaBMNLASbkw3miZ8*yTlf~`f+0T=6;v&)223>Vj%cba~b zp7_ShU~b>8-KD%7K-E^V!P;Os2g}n+z zPQM%+Zuk4H2CJ(%{8hYby67|7sy0v8qNMBZZwF`N`CQ4^{16#H_2KN5tRTFSxxPtO zKuB-ckXz%Vg^uK6mK~V_%^+0?2v5)~6egEyyl$gZPn%C1xz?kI3y|(|dAs($1f+)L zi?v#0KT5K#)1W3>9SF&NRvWfxKo51_)e&q-RX2n}PvxD@=|vONjVP(S{hE3ACRlBM zcAguTHw*V>)Ml1m-noby6ujl4S$@MMww zy`S%dYIu<1$Pa)rSbnG%65dvp&G!grNVjJk+sq@Sdy&JQ;hx>yeJEki6)RWS?ng>@ zJKIU#15j#h&tNKku=`-jC~y3H2q^_Nb4=sR!w4CV{be-uNCz3$P%(gMRL{&=!;DU2 zX(m{cq{^ zh`O&QS`CBQX7oYVkd%-X+b!rL-DlSQ{ZT=@g@yyOCDFd!#b2i>i4w5BwQg`|lqdnD zYs{4jyQc5=kkWt3CqV79-bazzrhyS6gm6~ropaoVPp>>$^dWlMWX2rw5hPh>(dB-y zy&L**7h$KK@Cl?4d)os2DUci&=Zpp|_!){EnNgA=66P#`{2(ls65Yl6vir_5+8nWd zg@ViMp<@R1HQbv=jVp6Y%@*Gv%9|oZ|M<2`UI0~&#zVBk~ESGL7 z@Ix1?_cS=#QTMmfjX6y+vmOnlR$HAlm*ja2O8TNnicA422ai@d;&FhIYgMXL+ri`W zb};aOC3o3Ta~Ye zx0|cZ7S8+gcHKRvGpv^Xds!)*5z4V^@r?K{2q;R0FSJYB|9*VO4 z;@qE5I0&aK8-|Dz5WdbfOOFeHRI|KyQa8X!Xv}W3R-=pZv6HfD*NeN~2ihD#>wqG; z>zgQdX~5utIuck5XivvRZTMdX_9hQzp+uYIe=*$pQBwf!4dDSewPnX#%I)K|dfZhAVcZU@T1_NZqiBJ(tIi`v2&a~Ih$?k$e%o1X zH{Simf?U^}fU&Ji4C`s9DG+~6B&xPM-wjpzdxX=f|Fbrp>3zlXR&|=$L|iNpY`%-Q z&&XMTQu4*?+dJ1w2pKE&93CfF6GrEL#LQ!p%JzmL~L-nW5i zGb@@a&p38__esz9>m4Cjd&(^rzTKHL%AZFS;;y8jyEJ31%H?h(1s~qNeU#{s=w=&s zpF{>QMe3{djN&1Yo#>dS?(ZV6uetDnE^_%HRJsQdqI_!CuWXQdo1ljhbHgON!=+;0 z!w4%~oyWvBY92w;I=$Pmy<^%(5p}J0IqNaubgkibvHo#5?Cc*akpb+fzIq0Rz1*Ml zLo#dbdKx32LQ_$5P#IG{O?W`i$B3K@=D25|WaVO~9{6mBhuX2-bBVof`?hU^WQU-= zV|!}Hc({v8A)&4>z!@*D|IF4x_999ec51r0R3b4Tr7oLG!TB<%+I#C=g;#{TvcRwA zxAj}L(>Mrg%X(jfh!CLn{WakLSQ*_6Sz6yXf|inyx*E^lLRlGsR=l^n2pWH?hT%IX zGFZH>CXsi6^o|AEcjN6n1jYP=Rlgt`2xA~sX!V;9pk4#lOVcg057DGWw~C`50huF> znBSI+bL3y?7*N%fN{VPGgHQ5FIRkMFkstL{!n4l$87%jo{n=`=fv}30NnJrF9&}Or zZO#Y?a7Mmsru$sHtrg%a#7rw!DDl@&M7iPK!quM8{w9&Qf~}S8TPSTY*dBrUuEP_2 zO%e|}yw!3{IDpN^*ZJwFzhBAi$ls9(ptSe8x-)wWn3kb&B3O_KP%!IqU6xD$C>u@H zb0wOrxR*N~IZW;ub84^?x|H~bX#3I=QLu@*rDF);^t?V&kn0HK1Ef@`zgcyilISlz zy~g>2Fb>saU5#;Ce))AhQhs`VDcMt|XFy@^FwaQ)9KRr*iI_TRu7f*q<1Ca^`s_+$ z6__rtu6HrLWbCt1Qz>Vlrk&D5or9LP;S8e8K_b7cMo!T`y6L~1M>c>{yMgIN8gYZ~ zJfs@d%;B~1njmkS)Y5c5gvHK@mG**w@v*Yxybwrz2%Du(E-m9lh`s697UlX%{bD2) zT3J0W0aN3?*@G>@r6{szu~NNZEtK)I#XKX)%MemmszAr)rE2WYCpdcu&j%n?EKT0> zKlpV$oRUp6Pwh?I+n7(5^7O;zE`hSa+5)NMmhIa%P&EL`J7z>yorgd@V|2?10YNy+ zAT>&va+JF}1sFw7nWr4W`Dh11O2B_#S?ktPa3x||cB1U?uL8pePxiHYGgo)Xc>1fH z+gyW?#;^Riersx*Yf(#n`4hr|pu)|SmFR?U_|z;XTc4Qf5~$+ZuC^W8Zd45S7<*NI z50qjpUa$FQ0_S>;cxDq9r#D;N=AqNiI%QhH!oQ5)_nL>7qN+cZRL6zEiAj7mAOdIgfKL1 zV7JNP+3u_M=vg}sc@8bf*VW#hY4!7nTJMi=?XX}fHMG%g<%JGT&sr~D1oeu#y4J2r zycAsBVBZ3@%gY`9!?neeSK!p!A(7K#3v{*it6en5zNWU<;8dmTQ8D55?z`BQ8AjF8 zZ*<@Be)i`MDPTOlqS^7k72x8ef%7)doAn&j*Fy0QO7h@?uonAw6Mt@hNfGY_&~GW; z`#_q7ee?`17ruPZecs1R+H~?ELb_MEoZD5?(nm=|34HJ|5Xo<_xj-SJ#U$YqKj2|? zv36heQ#f62^LFwBl&$EIyL$fb90Xflb*az&mbTe>5I4RE+{f;ymVz(ARPdKI0*Dd_ zGV(&PKBRbG2g`V(-CX`mW4+GL;~gRaJX<#>ru0b%5(8KylYoU6)lNJiwg?c1mtgk|Uj zaT%TvVB=;`od`%~@4?0Ygc~OX(OuMneKOP=n?L=TBa04NI*Aa8ixcB&d>T|$a$HV^ zNK7wywTIBo=weRD<4mX~rP}|U)%~(*kk|mFzp*Q7hXiM%C_(9qt1-?Al;y^{$GjR$ zCoorG$+_Lntvj9vO3nh!*N1E7q=Zm>KUHR$k~kkl_4)Xz*1rHsV^VRH*~UaIffu5N z4eM%$b5Y=y!L99ekBh-{;WG4SlgcGXsmI7t4mvMIN%!2=M_Nb-9_ zoJcy(;bnc}Qeh5m{h6hJT5BYJ^wxN2S_L1&ujta9I zeH1Ar9`kNRNimP1q>oDdtOX$~I6_=*wWTr!|hY=@q0@Z*i^;z1jx%RmAi#-QzVN1>}LP*>%2- zl2m6)jIhpj&)t#)*;AB!~<*f^Rj-*MZ&7CUw1r#=JE3ej>ioZlmZd>;)mQmMN z2$E&*FwAnF|Swbge8rdKpblTeQ;FQBVm&)|e@J{Y z^}4p#JnH{gi9c9|^tDhQjilT%to7;`2;YBp$?uU165iF$AB0nMb~8249FGv6@3jjp zODCMj6^Rq_-AtX#o+w-$l_pTxPJ%MH`PVX(j0sDDg9^%hii7(0fznS7WPrfUJ0lF zxSdj0fm>;=LQR!-m6Hyq?>s{Eox$=I$zxO@w zInVj?KIb{ldCtyUZ`&<%i@DxYn|OA+_Ohizk@kYhn@P2N?SoS?tM$#naDN`=AV@#R z-P*3eQ~L02a0+aY76$JrEmDN5FDgvA$)bd>^&kyu>Og2OOgL!Jfz+T!#GEEAJ$02n zkS7T5N{1tr?)nralG$QqHDNWL#(AZlQDgKo5TVe-n%JK0lW?B82dD!WEuq#A4!BIa z@$qZnkV^W`W4kfD+|O$H_5~E_EwmkH2Z4+n>=AYJTbkP;v~)L;{fjP_+GE>l(X#Rq zwi+#;sZy0*22+&POd#3r^$IQ(vAAb*v+cZ!NmcEnk7?_!C& z_LUiB-h-0kC3E-hG30Rd28x19C7y70T}>7~=5V`O4h-wk9^c~`z-;K zJ%(uk7b%Rl<842IyDHMY+h_JuRJDFvOfy9n;;c3eIsDI*x}TCpttC{Pu%jMZN|GK;M+jttYbLB? zzzn4WrGe5E5X!QBkCcwfz2e0Ews!U$4@MEH8P1Kq1M1;~+`qNP*%NdBu$7%k>LgH3 zx6uRT^kk?TIofeWr@$(Ez4x4&=W128R<=$9(=g1}o16LRn8-l@E_;USBXtI@I@UtD z2%HI&P*}vEv!E1eCtqQs)Uy70AtY+Mw42Vx$&cW8JZf(Q0mz#NbDquxx!*XnQ>(8r;= z#r^%J{q0cFOE8fe$t3cZp64#!}jGE6=@-yj?kxJ|1)_E!jUKc zM_V;YMyqfs|H>M>Dla+JsiIooDDZkoOO-9-3fr$WIoWOXs;RKOOn^ zwbSEzAl+!V6rdY=xUIHuZ3eqB)-J!EhH z?5^$zf_)g`o2^~&`(39aGK?(Q`EEhVZ(C?>X9v?5`c#hmL%tlhCzZjnx7^{{EA{R} z^-;MqKkeBOoZG)dm8}#zUB;!>B6Cq#1xYMNy6*EiegW3ZHUb8e}vWk23OqMMat@)HQ@Iewe&&_6CttBgIB-)8HW?x$U=_rE&# zT&qCOJA{%^~xw(&I!~89NDbwHf_ED%FF68 z-@${fBa|^6qTJXH<<}Of_C?o`bNPii9)|$|C9P&HqTjv@=5^EjN6M7Rl7LHdRrkHu z-m7`gNVR94ZvGl5_5u}L0{TKdG#B&HQZ0y159Ss9v-1<9H*y&BVTP)e|Fp*}mu!;$?>TvXj~LT;F7 zHX^w?5l(8W>&tx-C^gM)uU2$2q`Wmvw$K11GiKRT|F%kYY91f#{6J32JvE)PN_&Ss zr9YjHl&A3)@K>KZ15V*7t~;_F>dZXoz4j4vq-g%4XCb|w)0?_k2FNJ&vh8^}8--jsMAD$R~UvBiny?L%`umW0Gx>mos}KuEbeLh^$Cp@l6x z$UD{fLTuhQ>SI!x`9(=a0c_YQRq)~@gQzXym-NYWOXvDNl%%M2hKS2>T?;o7 zH{YGhQK`gTH??RKSN#0kA(6OmuaE2?E?H`^9H}O&Fez_BqNTeG^@)|eLzS=TAGB$v ziDVr{g^dWa*6=W_Fus1&l^A)UNz}c zve~spWc*jwsesYN1)tR!AnCI#)U;+6q2}eVhMKKQ57xeb?La5po8mn>k5SkSdv=@d zjH~k66uC3cOwL#My+t5}953tE?)6H()3CZNUU%l{<`Pz}d57Nl zTj~Zft+MRo%-1UHZd_XBsy#drs`1{_Q`XbiCrRmE6uI-oqI9?>cgIs+Mcn|1qN2c8 z-*_Ou&9*244+2W=OcEPGiRPhTEsP;kq7TCoT{|&Ow{V@THGhaU5-Wm047^2}qn zh<@`l^TXpky!HG)qFsO~%)G6st%*O0%D8vlGAafX;qUCa_tTiX_{K?V&h43JaBXy~ zUi2)GH~L1iO@K1Jxs;g$`Ki93rG`Hbs`^#xAx=0iAd;P_g|hVOGzar|Yn6vwlMi`l zWaRT=k4vwoV)Qum@-p0Ip82>Yy{}-r(W>G6t`=GBRYcynx_rK>{k_&xTNWM3+Ek1@ z=Ec8^1J@gfq-?KqE6bZbv-+CR4Pz6O4NT;){Jit%8ersR=2$Z{qB?>jx5=F~8F&|v zH{Z|~O`q>f8bj$ABch~mKPf(Qw{i)kAfyf)y%5-fB|17suo1ELfUnV|B$Yt}#DHZz*SeeYT z{P-n61{>$++V-)p0zJHPY$YWlw@%+&1K~G7vSTbxtlMuGkRj6bQ2~Bg!hU`2%Odb zDvm^X3oJu)pGM!23d&uB^4ZwP!90>~Ot0vf7Db%Umt)ePS6Vr*({Zd*^mnZnTBd5^ ze2o0jc)G9P3WSn#Ow3N-3;JZX%+`dZ>hnTe+QMYTAuZ44i%^6zkmETohVp%7kf*%2 zQRot6_w5w9uDMqqq*-8-%_INN{%7&zGTf5Tw0$-S1o2PTKHDo?XYNROJMd0LleTV5 zt;jtywexJ1>tt}%bhl?|2$dQg6n2y{0e2s%a`dhfMCm5$`UflIU-OL~Mya0-qpL?K z6+q|6uFCj1i6m2gO0)`qw8q6ICcA4OrQ1{6qOQ$t z_8WTUI)L5a{N*eYE+uD|Tr5*Jnjbt;B=CIdBpVE$}Fx}wx>VKg?fXQ7=hiMeE zd0cFHH*f8KW!bDx`u05SM4%R}U>@{t>cp(!mF@ti%BaO!PAzsW;z+4>ol+nGoiN8< zEd7@%fljULDyx2tD*I4rC;|(cO0>?lA6XW@Us5Gp64Vf_%1dqwvWd7vetVEL>*~ID zK#@1Mv@%P;k-)0@_S^+V0&P&DCm_^hZPTH*EX-StDvvtaf*-tn+_&*wm)>sN8fSdE zuTO#*q~;m-BfNs=@r|{c>w*6FGT|Z~M5Mgr#K2e75HiW4W z*g95a{LwtYUAA^VJ_aR8!pga=kE2r1O|_o<1V|1%Hzrzfo(z_v39JV8RIs6UUptTT z>HhceiGc~W2{`?tt(iYg~>i62tcnDX+Ax9k=L~fV)U!6^VfYMlo8FLotr-p`?kt((<%k^QR zLm-j~hrAdsQLmz;X%wR5yavd^E;AMl@O3ECo2a?l8$Fz=HU2ljG%7)--TfAvj@DMZ zbOjACWR z{0x&1XYH%qwG@cxbF{JP%C!c+FI~m8!|UV;To01-$9- zlcHgi5%z4H!dU5P8%NIRAIkTLvVloXMz{u0G;+tpd+IZq`PfkI z(Xkc;FQiw%L?!HF+v~Efe`MzH((Zs^TuNm5GNYwbawuFu7&A5uF< z9*7}+3?ungMEc4Es0eIAq`3)@Zquxqqvtgto8x)znTL-Re56 zWTP1~Jp)b`^nj>-Sx}kJ6RS&o-sw7`T5W*w$RZ-ys5O00XbHPC{q8$}t zrnJX>`F(x+!tU=AFlDkNTnf!CxQKaf$Bq^_=(ao}=#O{a-aj%mJ!6(pQ^PxOk=0}! z+;wN}zo~t)?*dY+-f`Y7cVm;W8Ckb{M%6%-gsn~JiuVR>7;Euh?+g0fopdFt22|Dl z&&*DA-3J1FhTB%!2SIY}Lg&*m;A#l9Q1D;t{)cnFhA>g_mmbOe)h^mzkVoOFqdIu& zG1qMXY%7_MLy^Df)E*gmBDY#Uvfkl3G8ixC@23Li7k>92Dj49?{i9lWIe0vSNbOq% zRa4Kx5tDTRsvoPpyPoUsQV}}o0XTKDx{Gl8Jcc04blaNxOPnP_Re`u5x1r@As@qQ9 zwh;Pk{I_@Rt-*ng;WrKD)?1g`Z1y6c6nlIFYDjtsOo_|(s#;z~Bm?W`xUeedE0`2S za7@yD6_#AtNvIb<8YtKLHVRNOa>I}{ODYDCV$%pKE=`-DzKJFF^|i=A&w$dC26oiv zaH>ppZ|4>clVt)t0;M_mf-~u9Veg{yYTny(;=tkF!x=FDY&k_E&l6JZF?k=#_kNmZ zb?=4wL7&F@x!pqJR*pZ!MI^!~{t=u@a#Z|zTf z;g65{EKo)^tzFAP(Ld}sjIsiKfgsP_Z9Dy!fRvm~TALrg>hI+`Rq<@uwYXb(?TjK`y0cj?w_?-($zmqM9D1avAD;n+OVoTZL)9U1S=F$%{gjWQ_L zPor{rM*7qzH1GMw5IRv>Fx^)!Um zlm%2%VXgXfRMHgVN2@=h$D7;Xk!SX42v<{5It!7GVv}R@lU{~N8{I%(=O0CrKt*=W zZBwPq>~~I|)T$U7mm?HzWW&`XbO}JU%+`sB=ethrj#_0ZLnS2wr-{$Fyuf8%D0G9Q zHQo#J^s+i)_M%)L{wE*1i(Nli9k&Dpc2MZf28=zta#hFR6N31IYrb7;&kUbuck(+*rq$ z4Fle3kZdt3M*`)&O%O)&sNBx9Abw+TGQdhd-8?TRP*R&e{7)pcDL~Kn0lfxDaV)CS z5_&i?IKT=4%A@EA%y$;pBHv*X}*#VU{XVT6zU81AR>)yHwE>f{^uP&BvN=7kz5<{8ThK3 zK7vdAjM8*l3wso$k#M!TqfFV4ff8M?kJ8ZjIOLse(%Tl(ot7rOK%tjsSOJBfrN`9#|ws_WXMPtCrm`3p1G9VlPt4^P+ zf$n*r>Sx2+5v}5d+$nr}o6R2ttGh9EHZ|xFoEMZjsuOmN^~Kz$Z~f(p6)UI+aD*d* zibgHBFZUG3kxU+5K@o{~HSGC&bh%d%GPe8|)!JVJB6KQA#S@qF*KuhogN|UPDZr6i zxhj?s@l8xuwbRX!<1Iw8GQ$uzznkszZ4}81A3;gT^_Dsi?_Jkv!9IdzKS?y@_t2y_ zXZK_cIll*G@*RfIx@DJJx^SJ$Otp12Y5^wirr>3E^${v17@DiIMm~l!u&f+3 zgniOKQSnOKH}KQ`2@7*N;b*Yi`^-@cTi|G{|>EN@^WJH#iZia6(mj|`9#ws7~SxK4i7H|Oh9 z`}@tkh48J{oYvp72|*#~u^2?&aRw~C)e{EUGhKUmxqeUt0J&$Rt?_JG|CshkK|32x zMa?Lybk0GLh5p+%ATICkU){2HUhX&X=?wknBhnH}{V!YZ3QRt&vD!DK^MX95*H(-j z@3{~mzkmJLy6{D=<=6Ht1ri281<;Wyk-|ig4r) z%o+ss2RKWF)BN3b-3c*fE+dZvv$_ zdnDm~Wqrz}S1;CPj~Um}Y|H4r0<&N;y4BK93yZK7mAq7aF2%~*j`ebWgx1|u|2&); zsR-3Op{lEYlFdceDcr7YW))P1{Ae>CZ6Vjo&lP!MpX+Khtvl=oWcPn7b-`uIdDUF? zjN9N;f%sDON%fN3af+dyuB;GFcl3{Jrjj!tz}@KFg4%cHqr1>4ty33Nu*C01CAaNh za|%PBlt7WDMEAmZlMQXO1yJY6XGX;4W*NF4nH;|JxAdnMs`4JdB@1jtIX$tgCl8_( zdc(-@FDVZ|8q%z=HWiimVNA;YyZ>;LarhCq1ba}Udek+Mk5{-j8bj_Jo95SDqxj?f zlk+Xs{u6m}SGg5D={gQH&3&XZ=+(63DZj|hJAYdpZvPk?_m=W< zV@bfN*x8DA^85Y?b3-9ysR|hR+SPV%eE>uxyXLlQ2Oq-9^QV8#bl|dkxl!87KhAHz zSVld_Z*BbJT+D8ONID~}Uw#IY{IK8-waEE-U~^*wzW|kW_rmbcQINmvQ`o$xPJH|d zj)ZCpTlLGYG4l2cE+F6JN1DshZq0A|#~m_Gbl>%l%cNnLihxOH+gAQzKLl1Vn$o6z z1k*nEav*J=v!9ZL*1F`SdeoCkd0NLTUQ^MdF%sos)uN{z)89~07u&i7hO{eisuc+! z{kE0`noYDC`tj&gy$zE>zZZ7`j`UfoDu%eypNLTUKl&poLw?hkR@P=t`T{~v8TG|e zlvDbj*;OZr@>E1Vx}27ImbKn@8Zz?rac?3x9a9aq75qoqBvku#YQ~C$60W4 zZI2{zG#`kbQ>n7Kc ztB+DUa`_rezQTfmv?aW1sn;e6Mc_Y7BLG#-9*RlX5+E<16w;V7&;h?OA_VZd_ui?1 z)h#yL3?NVD;N~#Y0Wj@l$DRtnUyXVz&Pyh@wUyEBkU}^8QDn&ST%AWPHM+8X?SvKV z)?2Tq4D`PZ?Wu57yWtdcT-PeA{a#EOe!mt@HAoVd)=4A#T`N{Cfzu6uh(*ZWmz#CN zhwC=1EZPlMHuu}1w34+ut}HA29ZA7nG)24!#?64iJOqUNL zwf&8os01$4sCYG+`AGL+RAg3u0%coy1Vbdlb9Ij9qd(q!YL%2UYAX6ai3@Lt|xSdKyrpyecc;H|s&LW|xB?p3Spx3$yziDEkwu zXaN14{QBzO0}_jt>7a$lRHO?0dN-tJTA^sE0y4Zy|x!IWP1fzBQ0T|C)xNTCmT(|d4o z$QzgHPaVh|O1QH5WW3+w@pgdh2e7=^U#3bw>>uZe#2`c`z@+iAFfQ5v-R5r}=b_2{ zRRx~_k@my;guJgj(wPN;7fA54KCWvoCm>J(Rc=KBA;X6b!u(y5czK7F}u4+?&QV#k= z1}h8!^&rm-)&!_#ozOq;@42zY`E{-< zk6Ib~K)`%k#2M+IG;DHmC?n%&ZqR}xGX_Xz`7f-LPUMHVwk>`WpgGKOljI{(1M)Rt zi~^hOB(GuUT1@gi*dm8r7u*dmzW;D4Xv!?Ta}&J*NxZwV3urSG<+kj^>{3<{WNQj7 zTkFS-{WGr9)v1fk&!gJ*ELNGh^|^g3kV?OL^5&X#Y)7Ok3BxTgyisVrzlRl}h@A-U z7^z78i>{LmOZv6~lW{p-?#?Z1ht^UH^1LPbe?`~$4V3_qcW|}Y)0)-(+(ShALrtV^ z>F=4bb*b5JZbPYio7UQn_}f9{;00h*y8}ocbaZ?R<9sJ(sp{L|DoT47E@Ed3RFJ#j zPB~cQ4%=1NJ^fwdhhNf2?u8ZC<{^rK>ooDjee?o|^h()a5+}U}5b`7=>@;FBP6ll`IgO*57}g;I>#Y^z;A4JD&n`tul`!4Xx>dYk$C+5Vmif*9Ys=Wt!8 zE-mT+D2o%uH`+p;zk10v_60a~PHE=Dt2Peie)>5f9157-yg;rm04ePoU&Z zOe)#<6wXV=d9KUuXNayqHXYY%Kku2EP8IV7h6LBH92mCD03=twcsUH{tNgAH$vcCCWBuNhO$0aVf7c=7T)V*R2y5uXPxsid7N8;PBthbP;igDU_Tui(6r zx&TTpEbC6T9Wxi^?%Ll@O#o86iD`c^oVwhV@En)?oWR_!8(BW63Mg-|<)Ws}mqE$7 zrhj?=xW)ZCulNc?()VRJL}kd`{DD2X3Xo8R_Vg_q%B}ht=+`y5b*!nN>vHRy;436Q z40R26!3Iw^kK|^Hb&S@dKnk&HdV2*48AC*D_LEt|jMZrZyEO0$UdNx1{Q#$mHqGs7 z3&Pica;Qux>V(6d=&jd<7`=JJ=2{-6IUq_HljXyD1JLENjsnrfHs@z^HTO;d zO^iaF{_#;WPy{7HER}5*PUg7FQ-LaTTX9Ly$!V=UY{$s1{0-JVqgW+Fy!~$wKL#O*J+|=@zR%*yYlnE9?jxz*C~^Qm8$A{ z^5b9rm6^l6t|PTo0}}%j1UPBcqGVIa?nmWoz`@Fw&$(@70D1tOWM^5D72&}oWpEMM zjeH)0lWjqJI-A48xX7%T%?~gjU{pAhqW1A9kTfZ*^`(3)zaBnV-RANB;py5`A`ees z@`|>i_fpL>p2Vf88YAj>oF>nwuGv$7rpj7aC1w;yuZwK0<^rFbznZj75^ z|Lg2@J9qJAghc6y(*mZw0;PU7IB&95=c{?VLc-D%fCyMm-ckPZulG;bp~pj2FM0!| zac!+u{HE)OOR1t@F=M@zJ2uyT>$h|LyTAA3kz6~@hL+$uS#cJD4xkUdhf-uwZ2bOv zAkw*c;YJIKp zkLU%iy?n)O<^1_s?#%(28Ld7?B|kgOuBZfX>fy%vI8X}^UE}0`QiJVRm})gOS5%^} z!DN|UWG-Jej&E>Dm9I8=e%mK9uygV7(~|uTlM%qjl&P`J?7l}v7&~h*@`pZ^(dN(n zV}5Qs>s7~3U@EC|Ol*Ze>K|LjEG>|7e4w&iMx>+B$z|68A-GR^k&*!W)1ddnKGkx|XFz+`< zqhQsrelkwQ9{x?+KYI$KrmkLF9tNiZs=>OSCHZndIt?N9fg42>(@sv$qXw0#qcb2C zzOE|p%-krq+7F`w*ww`7+6`<9aHqw#`Q^;bO=@TNPwi~x1yF*o{e9^QuG1o><~T01 zDIjE&Q?&MbejcnqZFREe3NX^++CihCDqy+}!63#5f=K}-MZ;gSvQZZhUFFmnR91Bu z7jO#9Q1VhmySfAg}E8EB-FIDOc=`zO25VD}(&n#(W0oCe`K&9y$P_w~0{_(#v)k4OnE$%*FRo(E8Q ziE*`}_q7G^2eA?U;a|JyAyBeBGAelZ!%zfk0&$>H;8ZC+?Qo#F)`-Bp-Gjqo7c z_4@5hX=SrM)F+b_+peH5qL*4g`$E2iNLAG=_vJnz5sPhalgn5BAfet@gh>J&=}-H3 zXr;vcTGCqJ1+A674n;%)EVW3dZ(x#5A@_3d>6@r@P~_ zdi4ij1K*{d((r#(?lz+U-TPqg?1 zOcRL6Wu*`nEiUI5sLbQbs2TUvS_1tNue!GR>iQ0T_49+ZU{iC?uThaY&puWy+DLy> z9e3Bx<^2|wezm?npx=Qh^c3}84!`dSO|UxB4~R%%e7Zh@Kf=jzS!&xv=O>ibT)RyD zGjVHQjia7!Z(7@ZAdd!9-Hs@i-==gOlY46Qy>yjhK`mYQJC%A|elr~3Sl_ARdpK75 z;t62azhA0|SM&~yjIztvb58gt^$%>TU27+UDXEqHYO<#wQV{X`?0Z>6IV1O8xsgryOxKZ7?K-7<<&gb&|>@4*#hORsApZ`Hw|dOrjE+MrJz>1p0B)e zUX~w6yK(PwFe3$xs9Uwa0=HCzw##%75#iZ|rv$5d%-Tt58iMH$+q8i-F1_10++x|S z12c-&CX>VenBX$QSn|56#c^;gZC0Q}h8u;Gq0!DpMk&E5fl4n|>WTgigJhurn}PKBscxO6 zmOofDauZwozoDG~sk)WC!0HP=i^=%CY&rF$|DEP9lk^kJAADq9X16zan9ozqU6P8@ zKUJrDRkv8g5Z|tCzO%bsE979?!b3d)(^hKlxYkmix_!B4x?+`shCcrU7m-2Jwf4BUdYubyA!$^Eq7VrkX+xxCby^H@fDR z);RiKH>mIG%`fz6EEv{nvl4xy=l1OMfoOZ>58;$ab0}HA2qj}o%ho#reNi1rLd^i9 z_iBjI1dI?A7jx3N1jwuLVYzp<93{sO2`g#v5=7ssY=`SYP%xan~0_AL6zoHKHn^wAEMHSHg>x+bo7t<)WmV__RN2blPe?g-&phj z$h20n%bxcs7#T1%D#2%P+L=K4u_=Cz%6o=%>ss?Q(0q|(s^wPwe%U`(!fl6Oe}$^I z^3|eH1XQ3#x&+WwoCalYNDSQHqPyM^`_p%byx1qLtx{_f-zO!pZ24UL&_5#hQ8}jm zh)^BGG(N<-0V!*(B2;HR>eovzumz`P+D9W&Sb>TtLy|uRCD(RY5azK!G9b1?(=$ly zIHdM9S+UuVckKnuMyL;*04jk~OLua-BFz)Kd$&mJB-bQedos9y&?4ZBscnB)cht)% zSnuCI{X6SJrvk~}j6t@UDNaLqg)KzghX2zM=_4x#Mwl^rI8kPTGjlt6>`VZbvk-E> zyENowuDwg&(l^e|gS8vC=?|dXPOM$OdPALhvpo0C>?vj7JRr^U$~vLP)8}I(`>R`7 zepk3oJ&LDSiopelWU=i`zYwfGPW_RIi~1kO7c3B54CVvY#^-+f-zE4|#mb*|v|fr# zZ`vhTv4AjTExjBWIhQxm;_ivXa0Z6vT zCgp4h^s-fJ#|H*$U1<%h7_}2xdh4Kc_LVvS4IMX(>w58Y+;>JWT`8Jw*~Z4vBs6AP zY#bl!9}=UodBIO$YCx+E$D3TI#4GnyeCBKVyM?Z4OF-9R(z!-C#8sUA_UX9}tuT~1 zosA9wN^@)J1~()Pt5W5|UnA>gOlr`eS{AUWJgph6smZ_$m|ToEO@B6cPaOkSBHs$9 z-IlGd?Umh*^1}14JD=X*TJnkRS=+mJ=6T6GMA8>$5hmIZ+NLhK8;TgOwy0cO!z+-CUh{3`1My4yWLT3-#d_xAUafr8uz=grkdYF+gHo~c!#65Rtm zGu@)>QyO|^XXm;05GJk2SM5gHW_{`&MyAfVvz0aN5rlk?P42HMeH2Q*H|YbEhv1>5 z<98m1)7VEV%IFhdI!%q|)UVb(p2VgAEsC3$JcUuMHp(jMzEM4(6gwi(i z{Ohh$+SPOOH!aMz=qqm^Q`+IW|4lH10?odQ{_++s1=&@{a**5G7@0isXnpyQ0I4Ut z0F~-pm{e3j-TL0o51V5b^Lvae-FmKp_kGt&#E#OPUh_eI+%>IqAG(&cHqa6NM_{rc zVRLUL2u$AWOk*wP6F8li-??~4`sJsn>I!@3Ui;Tn1tTys+Io2KQc07h497Iefa2>I(nJ$&##Qtsr?p)2YPr@Xd?Q}AlK^`eR zxtV%SflKT+nX6I?pw!FSidRz~pVLrjXXY>zv=Z;>d1y@FS>KH_plZ3*x6jP=nhN%G zmTQ$T<$G1Nwk(ieW!tK9cJ7=joAo)a<#M3HH?{$1IU+(h@6V|O`I$&&7$MZ>`H1w7 z4V(AY!fuUjE0Bab)Z!^#0C;!JjcP2s5R9NKZ~ca1&?m5~YP+J>Q4ji?2B&Kta!Jx) zz-(r;OZ%VKP1fd}%X-W?MENdhM2#!6w-IOdKrI(FXdTuUxF1cGu=-;U(MXbX|Vdo2S}$ z?|L{5cMAup8clA%$c1S0wVQl1lx9^yP_oE1h4aQ$tA|(DuH+ez1b%h}okgUWBi5(e zqTJRzqG?P`&aowcNws$JS4>nILH|^LYDLYoZ-X?;+T6yB0I0Ofe|3eG4cCg-wh*<| zkvl{D9M$PAL`qXWc)s(pem5?1t>wY$jQ9Kjlj-`sn6#9pj^5YbFnhDj|El7COuAPc zi1PrH;;-CL`)(fWv3}r{4+TfBP@;!nC33!>q5qNmxNZC7thFGz04(XUacOyv<<@Zv zPpk+)L{(p>)=Hj0rIFX9!{bjPQapaE|GK)*Q~fQg_cqUgr~6xGOq|*>1}4KnDHL_$ znp{7Nto1wvdd{T`uW_IxRRAEI$?XD08z7$t)yVoA<@N>Fc{%gwV);ZK#7KE5cF*pc zt40qYC1?GVC|?Y)Tw*+a2}lXMsBJU>l=R0&S<_x|oqE|@R`yr(=#fY26ZTsFs9>aG z?D+k4RP>~>FB$ln$lk!F`<7cv3mMjg@Ft$%%av6g--5iVZSZ-K8qgjnA% zwrgKnXVkEFago7N&wdZ1xD|VkN&ut?Bjx1tJ{ZYY$n$Qy<_E~?J~bB64f2FfUki5i z5lBQVJC%)Qqx={}L@S3kPz+pq868Pu_!Ni$7Mi0~T**JfYBNf&OVo$u^E@w-Hr2rO zQg_@=)lkDdsUIjEs#>)-I%@{>#$+z5w;kqkCdh zkT+RB2I2OVYip(GSisxfxhq?6jtevlj@9?{crfo`-E6Jq1We>&g~#b;(mN5ALU99a z;q$2rxRkyocnfvlDwUyswrwP>vHTPiFp8)R((3qwVBeJ z&j9nis|MWS-In3z%sev6lt6XJBa;rgDL2VweG0#O$*pIDDc-E5> zCcWo@nioe?ii7J^nte19UeWzl^0>ftYOK!IkckUJ80WWaYaY@UA@bfKfj{b&i!qVm zjjaQeY%am2xU;pJ?$Z9D)${3Mmtj&D7Ehh@V+_9>)wOi%BW2R7aqS8$DT*^MtAjw- zzH04qRS#*@v<0yHgtcL5O@1!3;yeGEvH(hHtj$@60Rs8Oxqn1WaNRZ2a>k`AAW|aB z#?`~TdJLzwZR9I&!wJZ%_Owj;brYO6WArWcrG#)zPby;tb1Cv#q$1du&coZ{0~m2u zWT4Vkug4@ejYjA*SzBIC=Pf++q8D|)l>!`nH{MENwvboB*SDn!@ZH# z(BGkZw*dBgF{T!x^!$JRSQY#bkqxgM7+gENcH)JwOmBl_ zU~;=PCI0*z$L1=o>fJ3WGR0g;-u8a}nF#c{hEmqs;JJ9F*< ztHCu7uzLe0w`|p??hCM(=C=Fu6g}P!R{BGJGh}NW59T-X#J2AC5E!wI*Wt~?_AsV9 zI{QCaODPwyf1eq_BYsh58{=F0XwanQtp*>1l;-DPsE;F(u#K$Ec>Y9gsmKX6A*M@& zAfNV0rAi>Y0CBA@^E4EBl#d5%nUXz&OR8ht%=Ouxp%F9(^ym8LvXh}EbO&(henZW& z|2$0j__P0&QsFWp9xYG1gP=T=?;vz2_g$@lz37tcYp|zC0I9t498s2+5nUTCKa6(L zOUeEfeD}F(-`h6WzuG_DLaS0P5c1%o*Ss}f2jzBTVLpKj-heY8t??Ob2Wh<7Q{_cu zO@&HHXkhjaq$1XdQQgbuNX*r)@y?-DsD4#;9qfss^ zObl@9dX)_;HD~|4PlX3FyHpI0Utl90hKM+Deu?U)P_uPlJtu)?OZW<}4tH?dpsqj}LL z;AAuQGbKFg+4k0~iqR;*q&U)SzsJDo4p&l6kA)(RVcXoRevj)J+vC|Q=@l4FfX{EY zvH1idyjWA30+;K)J0q)NCZptUPkk5eK7#@cxurvft5+@0tTP|{(! zG9*_(z0+~Y;F2rM8JN7?km;LvCagjHwcr_NxlFH9GZY9gZTBX75-l;EjYwkc1aDpL z98Bb=23Vd=ahIb~FKj}-E4m_M1X6KsEbH(2xxV%6e?gV#pRd_MbArAAl}s=K(kH;K zQ2)`mXNM_61TGD?3oy~tfs1?APQ(Q(rR~#v30Cnovjt!3k{tOz5$a_C(d}6j2#YEK zr5y45lI1H<^3j5ozd(}+tbSYOn^j;MbLZkw-&oll!be1d-5H^4FiCnZ6Ga#)Y9{vI0Zlaq+opDU7$Bev-T9;4audRqEp=iNL_R#@qo0id*O7YI-a9c14P=1 zxQGJy(+@BNw{bpe=hZ47KzYGOCe7Ui2s;C1ZRrv8gfYv7TGEK+$O$-s8BWyK!u-1&BaioXyz(B$#C9=4Z>J z_bEiDb(Su{>8k0pPotH=fdf?y&-70*T-03RSwxyk3Aw2g&!Hk3C20NS048;{d+*`@ z%>0m4c5)F+3wr^PtX^HFyMy_C$bP?3C*sl=5E+wrQdPL-YSu3z^PY-IT@rq&XNK#@ zHz~c0N%n@UMmLM*E2w0KBQ|lq3a1tP;%rJo|G@60$?@wyKd}F`zvt8dM^I%2tR3ae zbonMaWf-cue+x|Rj@(bvdx7SF$&1Q7a|DV7ew-pZf8*dDcGAl7yGdtyzK$EQKmi*6 zezBbX;8H)_TNbDHU8i6Jv<2>o^oQJe^)SuBW#lE&SFP2Tr^HA3eVtj~$1b&XAud(? zCqM+UW>2|RehMe&gL}5$SnBO(xnsQms{>cArZT{a+5&*;sb7Fy8`x_%NR|FeR9e^0 z1rd~(Ad-YePJjGA{`jb`lLWk>obtX2bOdbsXndQyN0(Oizw7T7YuTyx_ddtWqR_DFQvQ|8aA7X5vX00!*8vAb^sxBB*eu^mh*n z&{xc<81+6;rl`|gll44F*$UGp%%`LBB{4RcF=n=S1~#?2d9vahoS6p+T#(YlzCH_; z7CTX9f@PrBwC$y5yH@-wucRWl)NChmSod;>plai1-hLjU#Na@6szHABlFf6pOZow# zYZ_CN*`aa)%4-Dmr5yA>bA*|0N910FlAb)u^u<7`juB@2%iI(n20(P_W>$E2vQlh%0WzHh!iX5ytLtL0+}XcZ)-bgcM~T0 z(ztUP-ZlM`*1EVCwH5Abdy+cfE8mj44k;I73rk*H*CP~ZL&X-j!F7tXYtMpe+6;Sl z`L)mA*p9%N!VsUvTp!z+{H_GG7+jkNW-%$ku4&VL1B~0YB0EVn+xK=@l~5#_9x)H5 zO8(?eXbAbS(yVBMi(m?C$YJG|+-_8oyRtd9?1dwtiOIQ}%Hp;UqmVWqnUB=^(SAf; z@xRzKatoB6K}9Hsw$@*6!)jp@?UQjkkXCcU*`Bxq^va!W=e6i`X_6=2dfH~u;J-vM+aLQAn0pesL*(vIn6%nYuR=N)rXogw}p zE;Tw@(EtyDH9iSMMr8m9tBbuyVSu$C<>MPt_3|T#Hwq1&TvuBP?wJj0h-3T~Jd>1D5#K z)_VV!u2T&=58EY0TR?b$T{X4dM_UN?!c7f-1N#JAbrq|EOZglZJ&#QRkm%%1CcEGF z&n!;2u#j*{oSi>J`y-0z?KiK{>nBJ`0(p4jQO_-9$(UUgd2~;dW}){SgK&GcSsn`{ zabukj`yMzBm13J38(2BiVUO>VxiLPRVn{9!PrR7ANVyr29@ky?|X7gh6aO|Jn z%yCPePsy)rTr4W50*W(Q;gD$xP%4R!JA68vmNmi(SEJ_{J)_=UEUU$tJyQc_*}c!g zc)i$3W<|^TpGSEqmK)pInB>0f$FYo_gOmHQaW%W#bsG0B89Wb4J=dmic{(2G8s^`ThY0g^Jwk**JR zwbBNn?vrpC)@$P6OLqVxs@0}BCCn>wL!C}oo5}{k?zPmS_Pt$&sB<_Eaq-Xp;EyhW^x_0a=F$MB@3L_;@A^#bM{=9?zq3w-8U+>c@Jlrf8UvCSpD3SB zg`7a8>iJR{G&aF0bj~reTH&s1u%+XzWl(UL{ykLdD%bUJYYFjsFeCb+Z+#aL?*?RK zw8}2BGJ9<9Z?gw?NlTtW%IcQ=r5?`o&uqV$f`^&KB&Qms*1i?ic7&{DUT~dEtuNP$ zc~D9lN7iarI{}5R23KbETDVxu?K68427fmwB?0`{8FJkM%ItHko`weoR}a${ptQ*G zo2J_W1R}+%ny+k zyMJg0C)=zN-GfsZx1K{^aM?A2+S%Pa?W+ymhfmh7p4{ESy5EmU)t3WwYlaV?NNHO$ z$UF$hi6L{&CL5Lpm`wKVquD&1Yh~ol;5zB7wYc7l6_28nlg6U;QWe0IVK;3~W*$eR zV~?tiCvvy;X`3cRp?kPY0-E1b{jVE_hA0Smlml7qAb19nVbS(H_-yWE8)@EB&w+%p zNk>26GFhD7$6V1A0aWBMx$EW~JGGt{Fp>O~d&8S9!p5ir`Pmm~$(a$M5XRTvmfy;e{_*kBXlVq9uEp9&SbE-j zn22GVt%ThGPA1FmFiYd__Z*K`ju+J~KfqO0v~9c}0$sAgk=4&1^~@?RRdrDN$9Z6H z``&y4q=`5pxdi+vf*dA9&e8}zgJens+RRTrhl%;>8W+BBoyuLQt?nsB{!3KGoSf3$ zR)fDnrXV}YHr7VwuW=dM)@~Qeqz=&{f^V>0>~cXvRDV07p$zKZ`X#~{G$Exjz{&Ps z)DCBQ10pY}Grcl)|A3QO&ViJMT#r}#_{p^*mPozysQViS5g_kY9cmr(iVWE zH#{ePpGrOplk_aMi04(6y9}4C8e*wJ#-X!OUNKu+Fc=tul)Y5A=2Wtp4 z07uQ(%X6qY!TCt_vAq&S@u7zxr9RZbh za|_};3PhZ0*z;p>Qe4`?I)RC>eyMPq@-W?0Q@;jCGn|;6t~Ky$5#CN+;rP*o$+)h+ zX`LlHNnDS}D@0a4STRIzz{ueEgtO38@n$e@tdPP}{qr2}>KKNZ{`vJb(v($iw!dZD zbh9LEMWhM{wu}qaaknG8mNQw>pNENc)xcW%LjT+Nbe&MQ2+P7Yk-OB%ZYbp}EmFhS zi%>~x&6P;Ksq_0#H3XE|YQO9BD}G7L3}6aep;*c~a$A3cFr1VIFzK_B)du4`5UEAe zXg(#?ckb+;Sv@6(cY(1F*SCqquKZ(4-@7~aIrgx95AFfw>#u+HFK7;~GZr##*n^W5 z8Xa0HD zsji)i?SSeR5J}Od|e5TlRnai%yE1CZvhwT6Nh!8Gr`Ue3gjJFO*aS1$ue*}6t) zO0V>fPShOrRZyum`nJB7TTLdr`TOhrvsX=Xg`r3wh-Je~^JaXk7d5Sa6X)H6zf`+^ z3sR`{Wv+PJb!0$O$%4R<{5()9^1HeI^D=_H=ejD6e+@NKI=|14UJ;^g7HQ_(L4+}~_S!3Ew<&*B|CqBwYoF;*PpqEbTCvgBAT-4BG8nFN zO$rsJ@9;nPUEv8MFTDm;t-r#$E9kXJAmM_p15z!u6_$dKAIIB9pc{ay#1@FYgti%xuf%A1DCLBh zsoX_yrn&VknL&8p{B$jM(GUXtzH0S2GXf}sYnZW}$JCrMwxha!-p-(&hZX!^{NW!x z0h{4uC%t14s=mMcSHjfo2GVEOOwY}00s8-5T#0(eE}DW*-ac40h?zr4$iu=7Y9zM+ z($%)=vD=_XlrqxHvA1JV4Ykl!D;4#rzXKaNZ=ah9b!Q&+Wm`Jj`!0;6M>^X=ekX`w z=J)zc+|wsQbat)C+Px^kbJNE0RSXOLGrXz^cn(q(wdI({ zQ5{!#02ARj#g#Gurkp;S=3dSF;{{~eLd`g`srDeQ``CzmTyx+W9S&jB>}aaq_adCE z+w)i-x0il?%mkq}HP`(0Wo%kVkriC_m7clToU)#L6_d7FkwssFlyOvGbVdgt`Qu4; z=$*eUhod)ec{63czK(Aqk~Ir5YV<8w<83E?zuo_)aNd3d&byA>O<915Xo9CnX|wNv z$?RmCfm0YTd6({6mYDYua`z`~@7D)_+|?{;erhF`dxiNBm(18GCC!ie6j(LOVe8}m z242$2`w1+?l^e!ZQyG9XH3Pr-m;^pUMFeBj$(k$n=Y1ODeOuZ60#3&3T&QxZ_!5)K z-+TC-F};pVCj!PVw7yoVd&I|0G%5+>Yta|Vmtpyo{>i*E6cY(C1w^W44p~xz22qj6%4S1f1*d$wY6E^dOm3*ZM*unH zT>}%z;a{hLt^?By_(a+K)4oZ=$kf4JwKM`L!-g_Bj=CnTn>moPC;+MFE{>)3hupiX zW+a#RpF1#X zM}{`g8G5|Axm?@^6WQeaeleCA75bmAZ03(U;I7)6>8)DVow$f~W!W<8t8o`9V}LU` zjUWsP7^SpZ;}^6AKsB!(*+flnO{!dq+VTTbudfy(>HEQ?Q=4h)Gw=XrsUe7HTw2`N5C||Zk@lf`e^QHb42hlFx5IVwt6)s0ZtCG2gWi4 z#_Ogg)!mc*-8QNl>z{&^QoelZo(43^ks-Q)%kKNW*XJujdKQ`7wJ@sBfhk$rxLk+6V+k9jXfhho6-ivj^i^Fk>lcmy7rli6F~W7pRd_@h2d*z`-$jeNa!#v zuatz7@?;Hi}oChcRZFKQFbnW!|)KUq$JfRZnUNsb6*23m$o8)LnvLj;y(rkeUWV6thMx~&r| z@6#9%&xhhBjq`9?%bq$``Fz);YTl#Et^gE5nGTg5g8~tvw%gF<-EkozH9a#aYJcs# zs1MLZ=!^%O#f+y&z{!U-o)+5rk}yNHb1>@Xr5L$5`&{F|Wv=@M#3ZPxY?0B+(Mi@u z+W%^uB9Bbffi8pjjj5R1h~Bmej_5{xwuc~Mn%>T;t1Yhq6)dg_CHr;xohMK`A#^wo zmva?c!w8hzwEav==AqFfqDjXd5t zOkvWNYAbBwkj>!K{p9Y$zoC2pX~0gos@dsQgqN57w_dUxBwuGQ)`VjoO3{5~XYKTF zvEFuKqh^%v5naV%P%}&~=3$3u)SBsTs2k~PX}Gp2>_w$n3o}CL08=8HJi@627zleb;oYncM{{C*ys+ zu|I)@X)4CT)j)H@0Jof>b^|=-9>VDTt-C(9I+CG3uZgwrqDCmQ5 z$~|2Ec@Omu3{M)e)a1i?!0@%RP1+v$`GN9udNe<4lhhNXc??#IE8WW#6Y)4gap*E- zV|fCKu?ULjN$|UwJ#2pA9~oyci_FG}sof<)3Q@%8R(Jv;j^o#taPTG;>--2@OXtEj&zUzM+uXMf#JMoETMu~v)Dsl5>+BfaT{)w?V z6y~SEvR>C%f7EYU>Rz_e8rjiMM7(PK0KEZBYKBg}89oKa=6+U@<_!tu^}CvB{`fpU zcyooMqd|b=G_-bv$-#BxRJI(g{Uk)>4@kZx!6UL}S?A}5_yVl}BILc5y9>bN z_NKPLdLbOSY%|}gf#o7hQmj(vxU`FL=}kK-zI{2hsN74iNqVXp?WJJJzcjte@{?}c z>|K|GUDYny%U*HFuRtXuds`^NL0H+19X?E@x2)=jKD5;kf?&nOnpL4|^0Vq&Z|w03x^wv^whY=}S%P?V$U<^2y)6qv=rj+`GlSs;7G=|G3vch+if?oRuRq=!$QqZwk zZ`uT^fITe`;Wa?&Y@9!$&&IWgq`cC|kiBWw<)-rGXBv_F>yyOcU)MPlHv|m(`cn^p z64pK>VOcHprKcQ# zDbLnDb~ecVEg0gP{M{X-bz6WqEq=KjNShdLdAg&2wrq_xFS`>%+Vi|rV{ExAgcGOc zr(3*(yAi3Yft_A|511sW81TJtib1>lAL`y$;XYhiw<#N)FM84aSlQe^y=dnD0Fc3A zr*c0Cb?>-(oMr$hg&+t&pNd<-ey z&D+03{WvT)LrwjA0_c_zw``kthT}BJ68)3-WQ2!hZLg;^Bpr?C$`-)tX*kVwwTv0! zpXrGk<4X7}LN^xP&B}&r1!a+JTb>Spk-`M4$MZef;no+R)N{oosln@D&rCR0rtEl! zFp=9QxBp*M2GE;+bt{AHORkkq3?^y43`8Q6LdM^mKo+m$j+Mq#DS(#_y|WXbM#qO3m{o!J*>gvlb+x`SE0*4?FnC>a^Lt2kwQ>Xw^jH19Frbl zX6g-Jz^T-s3ED${zp3;?rJH<(Q3jo>_4%*Ce&v4m{X8t|OdsqL(#tO^y1Eek3t!VKgzl`NH@1Gz^tz0uOGE&a4N4>Blr6d2Y z9W-Qye>5&-_+M90BziQb#~%xkpyMwT_BbHJA zb%Fw`Nu7j{mNKdAlLOYUCet4PQkvV!0aQJl3Z{p)V*q_eG76uDm-FpASc6cvlg+-h4(Q&{ zTp)+@BVE$OZX}P&jg`rg`DlMPy-lIVVA)-A9h=DgOzfS%@g_v+A%D)^kXxlSSV>8}i~YQ}Cx+C#wy_V94{;Yl*{Di`z)Igjl$LM{kjzqBES{ES?Y7*& zB?GuUKpx5J4j|Py&gMaVfC+vFN#E7~G`;G+eCs%b2wkiCYIdB#sl|EGUii>J>5r0VstsyzoUjT!vR5>4U&(u;)$ zX!&>^^d3fOszQFMCS7hD2f^gIoOk8v5JHivFMIWi{m%oGJOji_h%|xH(PI*RxxdNj zd^NJYf=C0_#+$wQRZQ1!DrQwFN3Ws0XoeL{<9Hp4s5X}i@f%<_=P~0Dr)XE{-fv=M zRxKS6y1=C2tq=+`>jUD=ZzH<#Yxit@R6KR0=k(Ay5iK<9cl!r7<<#!?5H(0G9eghF z-=mVH`HG15KA7fgxF@I&`XqK4;Jy39p17%w&Z0gbB8KKEE-N4Rj2)ZJ!}AkNs;`Wq z-Pie3q*ByMYD~WB?K50*v#AUmpM$D&vi4wo;W{~+;$23KfJwG3XHy}d8X+vSus#6E zQX5F5`%QipBTozd79fRHdiFRytjwF=q4e6nQDfh`^d71_O~ZSCfRp#K!Wj>%&_CuT zRpQv7GD5(y8P_dw#&+9Lzg?;*p^blCt(_u)lhbmepho~{0W}kD(NVqfSgc|%ui!Js zxlWrJb6RwJ`SCq9zO-xa1e8QY5xS2~0Yr$qUi(`bMGr^IW_B`|zCbuNXr7X1w}@Ln zs{kV9mF=kc)8I6AX>Qv2v!oLj`xs=5CqP}-E$7#~?$1V)mUE3?V< zGMFHI7PNXA1ei3%RGgZllS<_Zq;_`oD*A&#=kQe76`;r-PT# zw#?dV(CV9Sk~ggbWMsEJo^3bjaDFr+QduMUk;F%)t4oc-X-}3hY`XUR7%FuRxjpBV9`&Ms(BA&G*h9gTej6;LW+DbW8cMsZrwHsjS~cPpxE9h1#)u^r*{(pXc0 zWadHbaeQ;!>NzWwGq7(kULPFoHvaAa%}CDYbNLw$dt+g zO;zk&Nyt7&0nNPUZa5u@QrKta9<0I&MNGd)5;MlIOcn0y2u8(xX75Kt95a)L|B<2r zC(Vi3wvg#OwEEDqMh~$NlOZlGY@o0hevVBN?1D^Ev<~^>NWts#c!QqlTkW-Clt5uG-(2VCDyL zI`>UBnH+MN%pZBMmWyA6_1E_LyqopP|5!u7=Wx2| zz}R}K1DN+))yoO#U!vr08#Vo_Ty9XJuU$se-}FybD1f$?`dgH$ncKF9+R)!` zVmxn}_&uTowEggJG?^cuWaFyRrGCthJ|E?#@)MLytuYO5UHYhlX`S)vI2y{(zsCOf z?YqUVtLKhED=XiY7&(rGv_W!bVHz1xDX`bMlMj8=UL_AN#6Q{P; zE*;wz5fR6&F|#u3MYFPUGqXzDacnayv#PSJ%FNBmda*{ubsX1qJ;!mybsf)f+&b6x zT&b}g$8nun&$K+o8uA^;`#pL7xBasZ%;h&L`0pa zE7nfwkg^n%vU4h=gtd(?0!izLalgMH(h%RTqJ ze5ybjnL2QDeh1X!kVb_I0I4&LVS8IiT-e2N45;?TF9H}dYjdX0C44ivP~&1~vfkKd zw7CSC2J6~qa=H|)-N<^a_PQFS3=D9+I=(Dlm*+}tL-&Uid(RG&)&MeM$Qiu4q;@S# zf;F4l23wEHIBo%7_oy_j{PM)1f7IslP^djuwo_ZfC{nwe07nGX$;x(eWE7Ed{PREf zrELpjDsY#>mMUorAO+)!EY+*J2pg;Y(GLI^vzV-5^cqx><3qskfRvR@4*6b(NPU;; zSsiFHKilurI;Qez)l^Ln+mPX8>&!w8sXG8Na?-YRmz~|uv+iT_`7VIeD`~f)S8JMu zM2s8lc^L1s*?CwhoW6d28>bdP(yi5j0xY5wVC%vS4x!Nxy7)55D?bMkLuIWf&#i-9 zjLmy`PCYk*!n!qJ9lO36Ad6~<?As`l@N zC!4xNh^By!_y{?hoA-l~hez4lqqUTT4kI-2N-ca4X7v00b1%R61*HM3h146^bOuCP z$Rew5ef0=Lb(DilUGw`W((u#v-VX_9cR|^{%OIKJ`Qxge+IOi=JI7=&vt3_&6b}R?Ew;2 zjbLS7s~b-d3w_^b(9zBtw@F zPNCO1S3;Rc5*kyzQ`h#}Uj?Ok&T`x)afa9eRm~d!V%W)cbyP5Y ze13fYmd{HW|K)z(FYiwmWsz_makP>wZ7KW17BS zI>HC}`px@lIs9P=DdFoQgswF>Jg{k$E|H(=4N0~KL~krA3-G7NNM`e5tr$Pc&-;m` z`q<}41-wR={z5PU8fH>Ze_sO9D(V7=TCsiwA@YeS%zfScY6J7Y%x-D~M9zEaDor_k z+eI)AwP$j^gQO)N45JpP6o!y~URQReb2_q}RXMl&n0e0L z)p^~|V|Am_`AF@ne9;y30u=T(>8@=i$KHh??WSHSy+|-(m~kUY_B<|zMgF}T&@KT) zbU*+3&nXJXNWh%!g zvNXp5D%josR0d(`Pt8@czAE39sw&#msNU!`;i>V~@~(kJ0uJ-x|5|iR%5GMv7v}mF zwyXQnKCkOOuRE`!{!YSDfH7AZ)MZXnkQx$3hijK`8!D_iCs9kW9o+}6!D-!lCnQzk zz${GyiJ>xoGzMq$bDt$}opYT>Cc*K%AG**1?aqHX1RyL;^OUa*n0*i;u?RvONPO3L zPnU^+3IQOz!A;uGjl!DKc1qsOf?;cB!4mWqWG0caJJzU*nOlLWEF;UdrQaBneLbf!$J`KVYE(;7{Ts@qzZPlD=h9pD`L5pM%}m@kZg9?)SaeZ)-j8F zptA8wE=@rgv+={EDCDP+HMHORg(D6-xowVkAdzt}RKA`MBA0y=TC<(aediCs$>NuG z>mL@B)j+N6AL+jCuljrx8Rn@%M!!R7vSzOE<74^JWKLij?&IiG-ca5ee*&PfY-b(U zbe}{i#oqZ+7ij|_YfG$zeHe*}KP@xv)50m#R;PBV**pV@j4X-`5c%fWd^1qjsyrtw z_2JDM`)kmCzDvS7fbTj(LLxI8nftq69VoT@VnfL1q_(~`C|^RR+%`B>`pf9#Z{V+^ zICI+-_?0f1wOowrM9Qm>h+uktu57WdbqO)NNa*$MlPh`~#BV?pyuWY2?4Q_!!`1lF zZ=sWIvv~Jh-rnxgFn%;${~d^I4ax0JBuW4v&3k>Z|6cb6D}6KdybsA-&`pXTfYhxs z<#PTIfvcTMk5B`IEAK4Y*Rf(VYA&spbL0>RoSyqznr|=9-d^viL z*L((3$xXF!+Z--GhYi8O^06cpn z_6?#pUzGA>Jf|xp!j*k>-Q0Hw9bmoY@V%hIPS*zA55kzJ8gAQEKY4ChP-Uv#T}}0; zAo)xkEK~O}A?DiF;?EEgnyixp#|npg_mNanI1Zprw4UZ9empV^GWXSn7N$?=zS%I# z37TR6kl-)t3XhY7Ge()8XmjqqpdEBy?8#Q>DP61yFP8Rf%BKR8mxBns(d{%C*)9EM zvP5%w_nEV~wZl})#4~`^+f61^1w=ZfvEDk7jUg#a7_>xQiAvp9t85pIva4n~)}vy|!jP%Q|)nMhfm#uWr1p zOQ24#Q5w*ZfYVd81HBWF!O4XPltbU_$~U!bvskXqv*<{^makPw^N?lMyZ18|fMLe* zl;#XYgGf?_`lI)jfoLC^_{M%UZ2mid2roP1o^=pFa+?Ro=nuk*L0PG>{$^Ap-5g6? z`c&(^TarLC%{wao&iZu;k`IY>>`j6=gf)T^~((^XC41=$MQlOgisaC#t9fm#A zpfukIk!RbFcCl;El=%ZgZ*^Gh3}{QRw_!w5?^M=3Iqx8Qec0_~^o5SL%+|KwLx=A? zDb_}+_kqgwhgMb}2ug0#=JAm_s_|j>Il~C+z(;5`y|Heg`dBz?V()yoH_Qq6>L;Ic zpR8Q<^M6i7Ku4}ab53E3@foByEHLS7Kc54ndehITh+pLM%rlzZmx;V**2esfG7^6U zNO}z5i?y!$8j?;@Yr$_&T?rZPTM54fhF3?H9r^7UJHG=f_?0761;NzL8Y;r>$uh+L z0F!WSG2!GV|G1p-Mu)4)6dos5or$`iBHr&A!< z+Gc~Ln!u?cG0<1D^J&OPZ-*aG&)55Bt}nlq62ch}O*AvcnfbZCHSyI`mMb8s%ssXr zW}Aumg_Bd!6?kRKo#_he?e6c44;FRdFP|j z&iNV|>n}j#a&hK*KVFDPZ8uky37xmBd>4Vk>KgmFvb-3OBv)NVLqH|TdeVw=fbLBL ztWE7^LG`m5ns&0mx~y&ype-bw{>>YP251UNXPdQAa~d27O=csTM)j$+Xw~uN-EX$n zgV!UIO&u~=-p{%`AGIR@rGyQks}`32GtPz)QGkyAFLwLm%yyW*08iCB*!&Oma?2l+wBATspbo}^IP#wUM-?M8k#pSB*BipANHc%b_n%~TXyst@2Ue89CYZ2kS zX=4AtdBK=_9W-oAOnHYA_mdEUw#Ql5q)egGP1aASt!>CO7)!u1^->%_*s0Sxl!p9X zW-0&e82Bzwa&VZm-H|aHgmKi}Kz!Ax4M}6PuO8}N2;ni>yXgst$bi0G(Q`&xT41j0tf4) z(rv;D_v&Al)Aa3#q&8{RA)Gq`)PQRBp!jzJl6>Fn6tfaF0TQVj)A}Y_>P!>HVNI?OKT&T(I*%zSC68ujC);4qZa! zxxv==5TI8VwS!CY4?}QbbCZ)L#UW(%(@Nk`q~afJrolrY>edLV10o&JNU!9N=R>ye zW^Z`{8IGHQt46jbLF#36lL7oG;goP}SF^ny4nXy+LoCvJI;d-*(a)LYy zN$zD`ra7RM!g92&cAiIK`@D~Cd5G?i-}Jn;S9~PDF<#2$MTEA#$o!y(yp*53pv~Fz zWkdw5vJHDRyuQ+5t_hlrq5IVii$S#5c?~AnHRa|>Z$N3Qqm;ro1j%6Oc@jJ-jAxZT zG^1y|m7jPd(i4P#Y(`7Jp(=F9z_f)q5xfhD2pu0Tca--6w5z(JDF7lQkHBi_A9QK7 zlZW#85Rx)d<7&At-H(zCw)hW}@&980VU|Ayg#nQ?2YIYe7`hY;9~8eDsXyyd@RUsX z{eKQfakP!rdcWwRl-Wv@FS{tp$0$d2fMmR1tA~7zN_)DTjl(L3?f@#jw%%aJ=G(*` zEt@r~0;=~Ytnv2EdVu$qH~)QE%D?wZO5A5C$ulcIz*NfgZXM(&&o8rTm8{dy8z7a< z_PV0_7(|M{jbk6J;YWI=Od-djB{i{AQpX7^fQhzR?D43K0|Z$+|4mdUKqWpmRm#$d z`FL%0uaksRyDVWfaGl(Jubxc9R$FF>Q=n->=bW>Gwt$SFwNI|#6b6V4H~{K_rz3D* zx7D=DdyZZ;q}5Yl)4f7azs*dt0e}9&|$D1!)SK3VNlXDr1#!OAAv1vx`r6*j5Z~W zf{CeIOQkiA3TOMdoymC%AhkT=gHo!mf+V~3)1K|DErP2%D5fMm>Kc&xbUf(a*BOdy zLr%2wE=NtT(`#h2_TXyBbmH3SFdSmwJic0#rBqS!z(X zc2RcCcr7sdoVNkBlF52k=5}F>o&AentfDR;2*w20&P&`GvP}Ti3U?vHHcd|r-kqP< zZWwCk;O;@1MJ}@zzgI8?s;he2L8<#d*s}I4H@N#zy`&sNq8Ol4w;SDPp}jsBz{}%p zlaFEmh+v%}^#sTA-rvLBmuB|T2Oj|}>!fXOJ3gV0f|Ya2U@6Uqx{pR|uc+C_01B?8 zV3ZO@XWO!)Dx?VwsDHDU?@l`azIa?|2Yl!4DJaeF?($~A# zIVt-FIz?DE?3+f^%2Bu?Y%1UCw}i2>*A(=2g7u{AJAxVr_v;!{-bKkZ_m91YNTPLb zlHr2_R{in)L<0EY14OS$IT1^BKu6+au+M;A`q4)~{BJT~Zfncej{~Uq!C0R@0c2#Z zkvb;;X%W!gSYY{H8{D6DNPE=%?tku~uNS_XE8mB9CKRF{xRX{3FPwP4kmGX?wO)1au=xpv#`0JR>a)&&@4{0Jg zWX%RA0%Xx&_ZXZctkhRDmHOoTaP6P{Aw5A@X8lYb5s<~OP*&QcbVia;m0N8KoQaB@>g=QXm-`A>n#8ncV`*vSsksuEs+CYP*PjJRQTH?t z#Z}#Bzvp~-4iLj~TW7#H7e%Ty50&W7Lnh5~*{r?U^C1d8**uXiK%_+GqdJ~+VSe4# zt}49DGSKnILQIBHHU4W zw3~Ho0X!(Rz#Sb%-gWPSM0dhe=*Gc;4OE2?-oiFA%py|xmm8Q$Po0NgXN#AbFt?E3 z>*A_CLu!{AQhaHtEdmUZ_X2pF^dP1MY%TGtYO)^v^_1s6FNv zpqVC@U#nbPI;8rK!0~P##`}v1f_5x_U}%sofF#e$o!M(w8o!*l@l$&Rk(#P!`S`6@ zpVIPNdGlRb;u2C=mWxJI6}?(6>9=Y&W5C?sQ`#n@xdCj-a@7(>ue2G0G*Dv zmfwGTmEP$vSC;KlpGx^IEcuUkueVI;??HNO8p73ZvH3nwpITEZgSy>dqb*^5=mRkR zbc-65KMZBRr!>cpP!Y3nzZBe$0lzobw&Cy-SlG5L`0r^3=&-XqV1AZl^jW%Y>3*LB z6ucgVty$=c?hD?cym;sakhBVV;Psrjn#S-IR8CHV*O*U92z|x!!}Q;viT&o4Hfz5X zmPNfbKWBjd4ylxzMmLuG)%Pg1FvbW^KR~3A8~3)i!+-L^vh>Pk*t@;;r$C= zItJmBc4=5UJo__1Z+!MiGg==DiWrwMULiU`+ z#8h*SJrNcmb61qsN$4#0VFC$6Q8W?KC#7gy-GU^5V@4Y zb7j8@qS_|P5Az)1l-;j&FM!|Ag{eDjploL6p`>n_G{2sYh*+*@@7G_@(Q$ve5FL@* z#xC=JGvQnWmEo3Z{ug&&Y;u&58iAJlQ1dUm6p?CeZxhxOw7QGmXW7mu(M2($8T9en z50d*DJXk-W~qPp0{7UEY1;ju@&vH3AZ;Y-GdA*|1?) z^;;T|JV#Ke883;{U@{8G*fO@naxjhzA0C;rpGu{GB+otYNV1( zO=^|b2uBumNykFj(yvWy6T(e(!RB>cjQOT#$Yc_f`q|-1BI-qGH|uybjr6ynOH^Y6 zbPC}V!MIj^c4r4{nC0(FsYvWAOQD<YBDo3r7$&J9}o>&d$W^~wg!Cz?vw*XYIT2kO! z6WcvF>K9A5q3I~=I91**7=e0h&mvWZcYv_d&PbUB?+i)4uA`@2Ak}zt;AU&+ejKPh z-g}UdN8iG18$<7f$f_<^yRZAv0jSB^&A1x;K8RYbvs$P=gpL$q zcWOh+!@%B1u&Zf_k90s?B8j0#yU$AJbjYZh=^-GVR@)AIOfYTQ4Z-DSL`Q$TOJwcb z)*6JK2$1cOH~)fi0Z1_|_1bROQ(ZdTZp?KxBOdP3sXJxW$fnF`5OE{G3qMY>pt3dS;Xt(P46+Z0D-(B}fW8 zRyMYY?WWq7Vby^b+kGIfbZNMU(HE~K4J-9!)7ooY8U`AB=G7Qqhv`2r|Al&cLs)v; zeQObS6cwIK_;Py-oz^y5w{n)+{Wd5}DdInnrzZDKhq;HLx6pkTmg0=H%lqgT5Lq$D z)I$D!RNBIX30<%Lpi9KtwLMGjhcKK@m#^_h!h~1%2~#OT%=xodf#~f5JkWGOy8zje z{M0Yx-&)E)Fg74@u+br1wedMB9I#W@;f5~&DXsadjv!JgKv|yajB~p7YbF2RE0Fk4*wOdkl(C=aCG`6v+TkYW4 z4~ZBr#2O+c{gZ!M9)VW;noIFdkzst#%z?ZHe+;O)DmQ8tiTutT#bzx7oGLpOM6$yr z`QsAaSu@-5!dTFEmLH2`k~smGx^!K3Teh8;XgdGIUYGD?F?|v+Wt?%VcBvPDW$|;W zI0Z@k|7x$xSr{i!BseqMJ6duY8pmv+s`T{yDE;earlfQR8VBBDud1q1_sm3}*;8G2 z1wzg<2Pev}Q2dpENRmgfdmNkb>?~lf=`xMi%7Ge@WbBBO|2YW!%JpC!v_2P^I3=gbMwYwnnU-ax6;cTa5W$;ZHo<|n(VJh^h+*TNp1UG;rpTyY_s?PPloej~Q&2AmLYE zfAc@mBarEEQ*NYhZkF>PtguWG*+RbDY+<{YFA3yV)ok}6(=0e1)u3}A-_3C-raK@~ zH;gdEaU(h{))4mhM}J#Jrki0A(e7zCjEZ$j7t8iNOIe)`xD}Rs>P->AZGcFp4w{#1 zDEZwEOnyfnEnmDl(4?gy)Ua`9_v_?LrFj=RrQTTAVpRfngTjo#g+Dx%0uo_5YfvlD zx~Wpkd*LNPSNm7A`??R^`N+0MxkvZpll17!{BgdheZQ^x}!&@%MSYtTC4iwb9py(KdrNa#Xy1XwSF@~xpY1c ziO3xZ@=}*R^#(%1wFBBF&7){lyiWp|i+=`7w<(9i>guZCbD+{M z{cSC?zCfgkNXS%<=P$d2mVV<(aZQ85kZ-PTSN?pB(75+3wl}iA$xnG~?%Vvdbf|4@ zeTT}Rzorh#e2+}|_zFzbc>6=56YlRU*nVtV>uUgA}op5dY2B> zUJgA8s$=vMs?Gswle=qWbgpo3)Li<_fu`M@2Qma~r0AS4nC8vlpI&wWnn;XmF8L`%^Fn~6 z9EhVr98HG-}OEYB-Ofb1b%-o>IZlAiJiuk7j(jcV=cCWm3WqikvQ7uovfuzRd2ia*Joygh^J?<{t>*U|JKBZrKDZg3 zj^g-I<{dSa0w{&*eI|7)FIlot%z7wE;YbNU0 z*OTWm5Np z)ajpC4Lu+jDHyD4?e!oaU7d|*_ujUf`cN2*x>y>P*JPb?yw|JO+1R`7a%9QXQ zMWsnv3fjT)$sv&HTCgiogZyJiCB>Mr@^OSVKQOw5HX)oQxUF3#^JGVlx%7lC(a}f$ zy1Zczqm$Lvyn~zoV|u7G24-CDt#Rp&h7Y?dGHND_}Fbq=M$#G9?duHNaSBqD3HrzeoZEg z*SC|9Rb6{QiBTdS;kD`K?{?(UZ}j8$LVBydvGG1KRe$t8#kJ=B0FoNp-EO)4u#4ex zj7%u{0x;WMo>s>vBD6K5$=x<;*ilBP=UD^#VlZL(O^X>-?_hbp7iW>(TFTKw8hsf38Zt z6;9FCwU$A#SIs6v(bc%+_u`6il#Af_kksQ-z=;bq*QRI1NO zkfdFg2Gvk6?UR8i8^b(p`xLaib~~t2TjHl8W6!~lo`P*g7Cq=RKh%`GmiCZOlz_&| z^ngr?_K%DvXQETUzCM-*6uCR|LOLr$xN=I)XLaAHO6;yer=<^MulAe{Si8{PH$NB9 zQ(NkW6J5JXbRJX*4B&rNddvC9$bs_RihRM3NS#>IkS>JuMh^|C-TiVAs5i_W=HPH^ z^=M`YD4ukh@+o+L_5e`W?kMY}q3wfY{#lJq$}{CESo~cEN{6Ko)=2>j^uxmB{@R3t z3?!j7?a^h{E&(Q4)2ezKLZrzI4_--y2-$D9sG7_O za#<#B^$>p)(yOEc6bDpT+*a(g-n#{+ZLO*Y{;m>E)zpi>w=UAgdA_E2Z$*Sp?VIsQ>Zi{7YO-q8#?)odemInI{+zwwQw5?c0#b% zSKD^GgcW{E?`D`;q-^Uw3Hm~hXuHk}h@{)ce@;e=9f0l9!}bAEfAn`(&dU1$D9qU~ zmfOMZo4$k1CHF=ER?a^A5(?g^>F-bRvi z-;3&X^-@jP6?+-o=LhB4TU*BWcb|+8(Hu~bAD5|eu6+=Y95zz_SRT3v{aIT*43O69 zOPwx!L^!RCgAto|sh*E^2`pOo&=6MIdiHdg zyaEi9Ho1FSX0O6h83vjfyI%uD8uQ#joa$kQ0AYM?zq)y&``OOF#fc_xG(WorlhPoJ zSv!jDvZc3?Vb;x2_WNTkcn6y1&6u;dp0j>8Nwh-&?+I56b@_^4-tXetq};n(3;v)( z_SD28(+?pj*G7|8wqQSkMQQ_m>!=LKRH#>{6zLPR-hHFn#b^wO$e{Mi)aGZ2?dZfz zJ1qBkV!KR4^Zf!5?(5)b9qaiLlDvkip1;c1lhcd)n-k;L=+p%xrjfL{I)9UI43ce} z{1(~k6ML;NZCn7zd~AFZWkHxk7=~(}_6L-va?Kym6JA^{_*(CxPMXcwAz(hVtmO+K z9L6T+C>lQtIqnd6wYg)Fsp+~$$KsEYkdFuYOc=)tr<_i_Z>x520wj9UMDId^HgcT^ zmP_;Rv|2hTq`op-6ZFZ*8J_}5Vb{zxSK?Cv$!cn+Z9=W^G)QmIVOpatbhM|L zq0i20ao5~>d6V zfOA3B7R$qjj*!@Mlo}dBh&Las72*YmlxT9V_p<2*0Q~C(Q5-KqFV}>hFGi+GSU_yf~wZZ#4gWO}7OaTzMP?T^*BACL}E*Hq@&<$*2|kATvI*8);J z>gv{}&aMZAE4FC508%cxKxr34s8os{r^;A73`wP|-&>V5($UU^V0sjt`toAgwWlDFL+!OTHDDX4)KY5lj{I6zhi>n=;OqqTl)59f z|Nov*z%pYOG%_zvJP4rY&%&}rTA6cxs14F)h@bD$`2DNE7NFYg(&M!&yokU=oov`A znCkwWLxcxV+QrU#yW^m6B;qoX+BUio&||KS!QUb~%-sx)oM?;tsvF({QR-L|BkNm1 zsm}2>a@>ZNcD?)^{qptzWLq~fY7%$ki=O)r^`T3_Gd*hjE_9l8djpybeRMYv`+ap_ z?;hdEe*biB;3@XK`T6WW{v#F#;c&z^)mU~vI`V8*@A6)#jTc`%;1|ML>7xe|92%!M z2&Mrp)-KS)A(@$*Rp$|;)a%eA|4&*2n)G>TTW=gf5IF-wIVV1bNsN{9 znfz|aZoGB6f(ijqz~z0R=a9*#nbJ$z&x6t|JyL@d)lZGsFZCRHLccuuP1^1 zGcW%qY6L=Cb6q0+rG7mM$bhhX@t8|}aQYTps^g=5!((fAMWGjZ%X+BAkVpZHi0jNv9`8v z4Q9T|4-0jz!Pmk{J;>72Y@OerF;_KH^Z2*Di_O`{e6BkFd9jIK&0Br zlX!nSv-^|(eYvDVW$gVaGUG`-f;QdraX$u%wZ6@zj{FRfN~q&c^oabZu^FL{LnI3u z(X)H#!Ze8v+sI2JjPNI1Lcl+b!@;TzeSybYQCVqg%AVGmsc?`40U-IE{5-+T9>hSOHPs zrQg)*Z)Kv+?V&N8C7kkA_SWXgw<^EiwBl?^MEAQ{w(dqzzH?y_*xZh?fS!jACzsbE z?EH{d*v75CbpbLpz+2F!N(So-p<$65Kzn`WBFOUKIO|TK@>)$RyVx(uuk4N#3$(%V z4_DDE1c_?7NzyC=XfKnF7-`#=CGy!nXI~IZI&M@{jsbKv(B><;w)@a~Rn;lh2QVuw z9JG|6P9(a{NW?=3T}?hK{){GpiUgR_C=&URL`xo{sAS6+W*8qwXOycZ-p)yH0Yyf- z#J=U;e^ru^VU3Y z0ZbVlr}BL>*)>tba9;boc!2O9$-_0ZcpX;JQg>RL0i z7P@x|C!0Nc+a(Wo0m4ZwpX!B=yAyqIXo!N4=q?EPcZRHgq#kt9*YBNbkHXyt!N82; z0yKogufsoOx26~bQ0{Hhw%e%(z38)L-h2p=b~n9`R31heC#uQLxTQV4=Mi8kj-Nfb zJc^E7>UrJ>;Sj7>h~Kh=)3_dkDDznDXFV>QT*hm`_5@Ob-CpaHXd)R@UAgk{sQ}dn zdG^WjFd)ghm9d@tdm5t9zy5XYUOj_I`lB^`KAYcO{p*+ioM9ov)J&b!dLEJLslloq z7ph8q0h-j=YugJ>M<6}dzQYvg-+P-doDnYiJ$sU;Fa)E;7^h+%D34qeHy!x4%|DM$3O{g)7hL1qJ*Ffgf60yJqlXq|#h9PB9RyP9ELx)5P1e$O#GGpCL0V)t-%^y(;5# zV1%_nd8!|Nku-GgdLit~q@isL)pY(9GUm1)FP>_B?rT4!JmYiiHNkHHsr&NrRp{oN z_bs#=*=iBk+e-cpe)@6$gKX;HrsdP$w+)_JP+3HM@7FM?lkm`>NkGym#O-Yb{p6)4 zFOOtUBZM<>RLg7U2#*1EpZs0b6IB96l6Ct}-8yh=$hM&@ljD#*rv3^eNAe`no42V? z!^ZJ`O^Me!38__`08kH``)Cw`+GpRuz-V&`KBQ0AfAG2kjI73)LICr-Nz?sO*cEhHE6^ZCo6S{=3hO~E~77u43 zdM)C|gB+`Xim}0m=Ln|Zb5*2AI0+|ZhN;%Dg9+xDFYZ%(3b}2Fp*ZrBj3&vIhRfJ2>n14$! zmjRTKpANb8Cxx-LI-)})GQoT2rpD*B0HPWl{8yVdP$E!a(mg!0HRoOqNx}N3cWrOY zWGJ7Q?8?7r7!@IHtSgm9y7;t*+=@R6!p}`Ntvr`D5rW5@%(oy^VY^uSD&e%Jb<=a@ zg>!X&w6vf(2;*|BZaJbjpwf#5esAueMZkMa{4Xg9Z33IjmuuXsW*nIc#do=OVYi`r zO*rSdcK~oY*4zemCN5F!Ha_h_MKbLcb6?Lw(!=YlR?YlPlYruZJ1pJ1=Q6wildM%Q zc`YIorG|b(^FCCPUD%uH;Q%B8=l7T9d=L~7UtQna*u|LhIB2t7-3*dwPa(bqmFZ#a zt{S9o?Pz{VddqF-WIDipU+x>XLsFJfdG|FCok2QbXG357w)6jHYk z|08|F50S*k4n61rq+-wPY*{~uk_o3$9rB?N+ou=TP&5!Ju#GQg18rkKB*dSnEyy25 zhNXInQy)6C{JS&JME)2u@^Jvnt<^K_KE21GN!FryZ*_(zAXT|_Fqa*o`+ZwYCz5@t z_j|oIP0v6j*ES!qKXf0As)90QJk$GNs$FINZ1;iA)qE)dX)IhI_qjZX(gBKye*NoJ zv<+l~7ADDhe3d?dNFKIyK{DC1C7 z@*H0QrFpH3$JbEz#{KI_r|g#$3q(r0%drlO90g!)(yXyvURs*E z`v@rGfx)pgqjlNO$K7}3{LHa~wvi|`mU^p|6#`9a6W14WpLOxKISAA0@^eu5o3ERO zs2UyM9{!q;zU+WC+MQPX6+n`2{&ne8Un3%mqxY0K;TyEdE6d%tf+=a8#-|g|pT2{o z<_DR0E5Gj{ng)Ybn?Qd+YOG6#+Tp>UEG=_4FwGuJhX6!U_1H@*!7-pD$?35nKZArP zXLB%oEIJ*9o?weFs@!op5+do3 zw1wHp`Fg{i;k`AKoPtyYRj_#rr2&<+>`U%z4iAuCzqL24Nwq2KbZ9EvL)^12p(`uh z89><%&CI&H|I98%UDMxtk8;KDfqXM!%3BF6AwJsPayToAn5Oi^Rftpyql=45YBh5X zOnEjo$5pwWi;jZd6t!|rD~p-!O9HgUu%R~kF9k#@cDR>s(pe3R0A0M~O2VA%zYHq-SN~JJ z%-xSrp)PiA3dcZxvmA67W-TJDH9eI}{CY_E*yF4+UM~k|8w}E{6kT+8dt6_OjbTVy z%f?N^w2MSuvHC1_3Sp8jYYf4TqtZ~WXzo5+&|$oGIObgkT!Z6P(B$qltQsP&1}KTE z-DGx6es%i0G%16?wSb6Z(s0YVT%TN*PYkz+Nkp=#2yR1c7T+mom|?P8PtibkVtBev zQte10JgDeII}yo-#zf9dH`)d44PNY*xtV>o!?<`j7g^fCJS=GwxpN2HDZ5yc_0(9i zBrL*|Q_oiQ`w(G|+DjiefKGGa|B`-vu!~~dteiJ?^h`5*+>DMOb%SE#mLGwc=F)p> z2ke_@&Ga@v3S>vD+$yx|+hN$cvYjWn1ED4Km%-%D?rTQQa#pzu5FUG{? zCZ~ITj8lH&<#>5-mrkken%jMVaN*39lUB7wc|T0X){|66URBoz9b&Q2i~%3!qlw-houiAguy=B@D&}KI31z;y z9()qYk?1n%eu`4s{TwRlM(EFwscq(?e_KWb))-pD=kOGVQ|{7#wXURiba73(byRaU zJ7QnLQ?R|>il}o%UqRI8LOpKzwQw5Q9`{x;#?@%}4Nx6U&b7;@61%-H^qp|pPrJ41 zdt@Xu&;LQnKXgFv`S_o_+{$Xst0Uk)MTHf|Z9HVmn*x&B+vpCC_TI(MK=PpI{_>nZ zpnM=xRNaS7qZH`lVDeo$REzQ&x2`ba;ReqW%M`+ zS?wSXJDiN_wI{a9=FxQuDD!!qOI1}h`=0t^%yPLX!JP&Qvm5PT$4%jM7-3o|pGP4< zq;+3$Lp7i?J9^AM2D1iwId@KyI-D6e_ zY!oHwxG1pNFANI8rf^LHW+ff<~1GGyUSZj z*TV4ZhTt0eu0!>t{xl`qyWY*+0!n5F+qwBE1Wx3$xD{jDx-aynEOVr~11i;eB}&!r z>^@n)fkM3t897uJtO04ZOJvWq`K0GMGY^&R_L;hQYN1PGP1*bxJ91muhxQ>;b(>j0 zRK)>6@>pAnIF=4Vs&WqPDDTc2^Zm^1&St5&Ip2?RDzG5L(JcTqv$2jT-I^~awzpC4 zwtVSYn&wn+JJNFLmp^CN$S))_UsZM|GA(f({fDm6(KJ*=xEtMbvaCH%kF+t*)eLw~ z7m-S%Qtm~kve)oAY}bI^2U0vUe=fbFZ9oWXOPggMKtvo2dNt2I2uN$lRe5z0)I+cg zyfqZg@qHkrhdZ!zPQHURX+HuiVbnuUj|wMglYf~+4gtcQvv0P&2=6gaN?v`J#(_?g z8QZI}p6Gt=qk>X7(30e?;&Fh|`kn&FZ()bL4+|^TgtOjs3`A;mO|j?OXCTRHeS5X) zS#)nUX^$DT&h#8GJde&a&)??(G8mh-D0@LTX|hAMYj2Mv`lJO~-9bXb07QBenclMd zC79-%vNCfYC_aKoC`(C@n?v zD6$&u-@3|&T7e3yZ~jG=z;8oRe2<0V^c{3K?HxR^HhLEpk+kSCtdr&?~^ z#nSQcWZ|yJUF}Tk2_00Y_&Mp&B|s_HJmUr6BtYih+D%$0g{5Ynlfh&|R@;8}S8N>J zXKr39o#a%2+;UTX_LWWpksl>rflu$gZ~gKNWD3I4QI~$n;!IdtBd-(miY^XAdFdj! zTM6rRP(yw1j_0$0GMbsLOQTnHUon2vNnFgG6KwtB;`|u@i+1PRxiAHCp6~uTk8vI{ z@-7#0+oJ6N_4!aKyQ5Re7a-Dq*zph7QThuZ+Q7mbCti94DrK@fFvr(Kd@(F#np*k| zFxDV`J)U&{jgrOQ6~TcfH24Xr4@QD zBqiEL5nSHXTMtztK4?aS%Mr=Auk@3lE)7ew<`O*&Q9!O!)z3yy%fm&P$Vbs(Vr;st zNXEMeP9PYkwsiDZvy5GZHiiwD(Y#D1 zQL^nB6|E+w0FrHYNNp4D@#uT11UkK7C&$iR<(s|}lsXt&^m>5pbQQA;7$L8(>tEV! z=Cd$E*B|@|gN9&FyuBAZ7doVlWYuJ{2#Gi_T6cl$1B4gzjRtZ6of;W&YH9gl--A$< zFjKZFvbYghBDc<0tv92R=IGK3H5K0iNO`omxY67SN;XFyFB|J^y%bzKDuvrolJ{Cr zGe_KkRQR@c;jxCSdG7>j6idG;6YE`wM?S1corDRslnR$ zdJa&-7pbvdX<;8EfiN{&P z{Cp|pC!m-6%C^1Rr>o(gm`|JuX4*e}G_OzJulE|9T zK}MzX5fRl!n-5Mg%H@JygiXUDT!=`MS>tslsb16p{pGh#%K+emKXmylUy`3k8Jlt| z%%%BxXt2Eouo^|EPC4a|f&obbJNh?uWW7HLRF~Mhd)~-90O+;ug}E9`t88n5%YD6u zLp8h}lt%H3bACa+2wCDOX$>KJV`VMXw31;6!OqPS{)n(LFqM@4KZ@!JrA8vxi!frq z!bMqcdYkxLfU+E4pEGV(Aybrpy|;X$uI}j3xjh7X4LVHkDG$|h5xO=h)H8RrTD=ab zg7fCtWcT?XQ!Gsbjp1$O3bHNVnR=RM#*X~7u4e6>2`~MvxowxQa&dB~Rs1X}b-B*n zzl1X10X;vG1wgt5H}d)AGcb!?wBD=2`v58ECd)6n1sdB{b~bVjA_!$|IRInlMr2R$ zdyPa2c{8L_@-0$(r&?}FBKzCX#9I-m0vfR@C>8iNn8J7_vs%FIsPM3TuKeilK+Cpi z$N#{BflRiw+g$a1R}c%Vn%AmM?}lJ|UAqDE9z-gK4y#{c{a#R{v8LJq&&NvVK4@5D z0lnqTzo=pUeweCZ@}gS^N6LoSvdujR2rJ9BxQ8I&eZqDJFo%0*TS)_MIah>=iJKGICQXgwv$iUoG(# zOFuoFG`O9#U9G2+1_l`SJPcNneH0{WR!OT!1o5X9 z_SQ7?0w5A35C=(WEX< zB6&5GSFVfY*An*zYC$se+1zH7`UDYqDvT>Bs%w7=ON!$gHV)D(x;TRz))%MCM*KM}?P+Mv z_9Nv2AfH_M-R?*D5+a`g^Rj>?fi>{Tns%4VsAhVE3PrpC6gSORS zuC~nqBCpZGG6|iJNZ#IIXs^Cq@Z%R|)Yj-O>>^D#r&Th)C}}wAgt!=yGSYa|!6jXE zjuJ}AN3YDgN3ThNUsdw12Ca~U+!;n!0BNy^y|i&%LxTYfi=27sE<(r zwZd9YZPHZy_4$c?in1Ua&UU)%j<$e~%xKPpf!Se@>a{&yI?@O#UB~LW+Wu&l!dUaI z9`ES>iQ1dm($V9C<(PdHIwfeAIz3x!=c{2!bhAN3p4R}<-q%$fT#LlQp7;n}m)JbM zw=rRfJqb`ny6+uIp)??)V%7?4W_GWpU2EGNa236wy+gGV5hgBI&wkyN zU+JpynC*V8y?;Gr9*}l%^jX`iEanRkT=qHRV!g81eXymrd)Otq4@^v$pglm;(fVp4 z2ZgaRL2IWvbWv+{QrEuS+#%a-Ce#>n3&gN->A$qsA(-M>otdl3q*2AY4KDpPeMES> za1E?wfmfV6^3$rbZ=x{>%i1-wtWkF%(je;0JXHbRd*Y>B(cPx&y2ZaU{r{ysk0i2g zgq8W;?z?{ewUh;Ps`-X?C4Kd)`@1+>+o<(GzHbh=4+;}rol3*rL#Wh+`Mnz9!vH+) zxxU`AdL)E?p^%S;&=y5IgizvF|5{6ZOgL$5nQL3bj{~&rf%aamC7+L<01@Io>c^9U za+q#gbx$F1W8Au0N{91d&55N6)I9YxD2cDrJe2JjK)Nfv&D+k|6?(QqkKW5U|8Skm zrzdpjwJTt%#-9ff*LW>{mmRKN2qs(32F#6nFnk0iU+W}J%~c040#aX2jgk9HXvJ7M zwDdQ2I1p_Rpjf;joc6kGHhA^-kk~g~%g3w#_)jji#eql>>!<A`A6?+Ys(*WG&F znjHd-C!%y;NQv^p0A#dhqMdO32#{jxI<7vWF+egr@{{+J-PUrE_ymZJH9Vrs1ZI5Y z`xJ(a(z$*18LE0k8$dn};oPctUxb)ksBz&-gnre(d3?OQ(!N4sSnWpwU!zmBF|FpC zP&~{^I^UuqUoLeA(C<>jV)bVo!keXO^v6P~&A0uW;}EHtA2r(J6JgTZL97diYJ0adbBAI3M1aC> zpcWZ$s0!$`fQ|fx+cb1CNRfsHjr^x1Ol4tF5H6)h@H9buvxy$5ErHXKVZ+7Z)2{VV z^fO>7x?6n8@?JyOnO(&BIv38>j4h$VmS3KsD!{OCc0I>)R>DjF8*Wz#NBp(Gk>ELi zus`d{w6^j(7nI(`;lZGPUPq5k*Gl4i^m3iJ3^q!}1;8I?o1AAW-ReSkG4h8twk{GT zdFMaW^~H!}T!+W)%g_)&k-~VL>uQFQOY{Bqg?YOb!pXRLpM_+NL6;@Me679ug=LV< zssTh&8?i5Deo)%AuoQo!_NS=@fZhOM8=j&ut-LK70O`anV zjeTZ#YS2PtG{0}0@o@~QFo=i z_b?}3i;mbBXuL|T2VIva#*5*0Gt(qGqjF8TH8qw??G!YfaziNxsFd5KL(Nva10rG8 z*Y2(FouKZBucQ`pGd{8H%IDTsb@g*4KMP6SY_zx1PJ>VvfFw6DrJfcNZoXQJ!l{P& zy|i6w0w9ubDOt~{;s8u_AH0Hp`smdGTd=iHv(45Ut)&=2yPXD@$6K|qAZI;x_~cL7quaj&BE?tr};h>yjs z>i!;KISp*u+&`qL+?${F)MIb=31hJydZ7^{o?QECR(${|BUhU3a(|ujJP1f>x%#!e z*oPpg^btFp&As$tkc?-_r{s~OAi=ttSY*#o4KTwA;r3X=wZH8#5=M zlk1KNCK#f6qDy7^K<_BilOW|_Akl`N3UU2nmE!P^k)7MuIjIXBG}5kWSF~q9xE>!c zbU&N$HGgJgcurX3+`nVGB>g-ptZi%eX}!?VJNMMxvPbgerGLDdDj-aH8-~Zm`-bc0 zf|mji2HSWUZeQ*Ix3iWgUI8TeF&kBk2k11uy1k`LN=yi_NPOcoPrs_R*Fot(Rrohh z8Sfa7>KV)G1_q0xaQyV_PJp)nL^j75LN~~-m~(4+jVV6Kgvn*GZr~%k&ml_3#A{(h77+)L@cv*GL7^#9{m99 zRq_qH)Ynh`&*j2Qw~?Fb07=HBx=H7lE{f^Ga7=lC$mVxPmi{XP142m*GqpBt@;EM# z>fE!RGEmdU@gVXZtA=$#KFPgTi4bb4(Ek( zX1|1dJ}PYVMT8f0z~BNciw*&ZSVkRrZuhEQ1d23`MYVlcC!8+^mNa`liUECim4T#%h6=L3@3hLnZB`H^=qEm0~k*E91dH5_5M~JC;JZ z0EA6`C}parUgV1_<2QXds@H#A^g3TVG_;HED=ut?Qqmd*DTiI5!*Bit4I_zoXRI3b zXcDm!XbXwjdbD2wOe<0LHuuprV9RA|8xW1_tDwDBKjAWHW(Yu}#JJM-?XL+!v7`RA z&U7s#70ew|qr46sQ=jc=owVmWF$rF-NoN7*938+9xN346AR=O1iMh$BL6+Y)!TbU!!#>O%Ie_tZji-#CH zfJ&jqERQPcK}eePY`u1HBQhzm&G`9dw3JrfOv4Zi4-CpnPqIV+bosMaF<{;%oGLEs zzkQ(6v2O<|R1GSW3`9~MF-_7l&|$^Y-6pKNAoBXv2t$Nml5-4&xxI}__W()E8OvIK z+}nMxDvZ;V4M>Wzy>yTJL(=lvhVKJN%$;ppP5vwEvDmh_GevpT-p{kbNtr(w}sb>6nqJ=N>M&-evP`^`Jg z3YHYFZ|5wZL#b5P=1)2-oG*aFV!dONrQQ)(Wc|xu{xjN0k|C!_1|i?P z6rhaO+N(0W48TFTV!o1Zm!93#4g|c4)DW5_O+Pfuz6QafjnVq@^+d<+L@7ycApbuL zu4bh^3QzN#)kTczZvi4jhBBk|+vpUdf3IynnlLp4rWlSDG)vICNG*Ct9U6JBi_WKs zG>ftKVaa2EohxOc=pr%s)cp-*rGRCSFha0|(39%hkHBf~>nGe#gu#y?avQ1!`H66t z9c$wH6rBb-J-c)6hMWuf45$e&JzH04evZJp^K9*Ah%b;5`^7IT8@?1yR;653=3nK@ z740h5uM^d8l#Yb|K4!ykSxy3)@e{ z!TK6!ekv@fu?@rPYDsWRewwUq`7>dK*wXGWJvQ;`Xhf~@kIQd2Ozc+C$0H(={-qbR zgA>pZ{-n$K$?wDtDC4z0aS|Xk{Nw4ylVP|T8Tu8SBH@{>Qa@EVsk^_Egib>z_2yc_ zG|>S&sw=Te0D9ek8;?8jeRU=*f-vTBNNF{_0+i_{Uch#HH}%a4vtw-MlcWG`>y5wl3?gckra1gybztw@&@^b7rx zjy=Ak9%s^oE&`Fw#1xC+#lmSbHt*WX=@N*FD!q>efk>~bjiG6cE+-yW!*QtNZ|S2z zpn5`dgPI)r1ME|~Stbksl1+K2RCiemk&UZA$#^{?wQ_meFTEU%!=>L^LkxAlt#7wJ z4u`ff%JG2_bj04QOWy7)C1MnsV)Itv|D`6zL2{(h(b={j!jMMP6qBp+MLn`zYEjwN zuLfab_0P|>TOpig;5QAwoQbZ5k%Hxa9jm+!l|l^njgiV^N6$Aq#Z+Ry{6Ei@dc6%5 zwlPpQEjD#!2Q+Ng{a_8+na^lg<#n+OMbtIxlot&{1R%M3h=_@M9<7uc`%Cj&K=cO9 zcE`xU-n_5~$N0p;(tp*!_MtS<&9ikfi?Wau*jF^Hg9sv+t^J7`6Hld&jStchkST$- z(q4QkncR{OMh4f>6ofHb3sWjM9r89ngg9zk&*cKQ!xW$#S(?IkN9eh&Wl_=u??gu= zbew%lzroa9T{3Up#!}fH%)Gn9_RiPzSJ~Z@PpKwl>1)uvAmZuURom$I<#z=tw@RMH zv3~l@^60tLOKj6Gzuz27h!UGNAu5q2$F%nyg4Y<+4FoDoGvn`6pwUNnNA+< zJ}I9Qs~jIH*C8LCs~1R@o?XMV4YA4f+HQwN%O+9i9oZhmHl(5boq7{CIteJ z*6`+Elt!dho~J`VdqeY{8ytD5<5x#uhQRGw)QbsTeKpNNP($0h-EjD_aK=pcgHRlh zB;o4rI)cD}fnFZ7wtFj)<^bVXSH02*RV(6l>oO&xi^@RRR?lxh!Y4;s-W1V6k3v(N zwT=>E@+|;%&N=6oGzj6aGhUm5?;s-^Zv0&3ariDMiN@t3E%&{oac)_B-WQG_j{av) z739V%STc2gcWJU80(wo$p>{9nsWew5z{bpjm$ z*&M5jN{#`L2IGTYf7ZnwvR%tyflf1z6GQ9o$90jIEDY(#qkAPUH`c$WUqGaJ<%Y3A zx&=bPmJYQCs!u`@K)qRavY__hmIC^OaJb%6zFDUtlkWz{342F7PfMIn@BWAX=P&iH z(~&7j9URnR&gg*Ix)qs90Z83XADFIm*e5_40m?u_l|c8}kf%PK;8JgA!Ft{9&)a!S zH_A~h^5x*}o?+)4v|?>2#f*A^TE6E;OZ;t{O6uL)!<3DDsi7QwsD_2}(aCf0fE!Ai z>HLB&&IThtdj*<2*45qGN^((1Neq?5FGi+d`~cIaF9Bg^jLAo_E=_#TY_&mcH8Q34 zx`FBFGBnB0T(AE7^P%0-YU2Z4O#UHqTAN>Ge5L7kJxaZf+Dx*`frywcXLa*w(;RacVI}<8bPGgl)&j)yVEFO%CWKej@jzOz&I$iy0G+{vZQPQWWH^6Q8H!J zu=4?z(b5w}&Z~v-we)z|zOU(0xYEd_U0mA%wdBM3bpWhr614(kFCp+rSQ_B)1RZV) znKsR%tT~= zWdKsRzS%vkr`!Tb4l8do`rL|4_p22bU*#G(ZUbVejOqL|l{Q-*Cl4h7N2mPuFe05}ajqUOe*~Qb>&dHj z%H>gzL2Jed%0mf`VD~XWY?!?X1S0aQcFsApoR^*iMnNv zwk>F$2c>1UmucF_@B&OPI{O!D{fMySv0L-+i>R>cVc0tO@Df0AM@H*d^UH{&)q53D zk9-A2kWQl*(xmWe=)JpoUPG^;<@JkS*mHSZSYBmOBFQ&Uy%G0+R@ge{Lk&L)O%a-{ zrWDt=Kq>P%bu#2_}QwMq%tv(nP4Dd()uG}D4C4dUJbz=FM z&-VdoHnif_Q$K)&gYGTR4?!vTCI;acdq09H4626ik5S7;3s*dq!MzqfpMb;D<#T0x z`4ruFdh@P2@No2nntDD9Xt>=~joHoIzWf}RLeJM_!e1c6!Fah;E&WEGU&1ud^Dd=f z2-W~RsVRRgtja9a2>BaC^#K=G9({p^f$TMNcU!ef0lx#O2Iiw3ELO`4oA03#94QCN zAA}riw zdQFbWo`OnM)tElB>_BuXSc#V&ll*DIDe=g}F8ikR4oKLT$-|kgshj~-1HZh4+L6yU z)^@>)gon#rVWqGN$N_X}2C~=O+$&rKTm^|tZaVvq{uvWSKJFX7f`XBcwdbb8o`+2B zaVpA%SLcH==(J&w81;k;fLg$t|MpGCU@t_ZOIuNu9q*zp70*_Nhl@dJZ0_vhPZ83J zs+|$K6d9fe%Xm&2t0Bt$FKjekCP-`tYdGu|j%0s)x=C^a;FNH@IdrbgCpT4T*C%}5 z3Qsd$E*viU#|9`BA(#HPT=eJ_$Z+AkvGzFR2&BfQcG1#k_j4cT%W7=n`Q9RdLPw=Q zMtE*jilON$5ZV(BZ-| zhADuN0-_>*{R_&2p!D>mGEFU_6pafq)giyk*W*U45~#>wh_R&Y)Exw+iA?XBtj)+9 z0lnf*?4Ot+o|`*l%>)C04u4CKddj8_LJ-oe5FBjgtgO~=Lt%bHo$f1F<=fFU67^md zxg(M5^fT$)i3$fASn+=sARIW(*&KK8hV&XguYhJ#xd#;aTsd8u<-O?0y(X_*?Q$Os zQx1bvJ>HMP#CA)&1r7Fr?t?X(hP~BLx2r!0tKqZlz&wNqUv0yR-q3wv4y(I8l=Kl; z+HJeq?9ndDo*U}ci}Ij5)M1m;?Sz*iK9z;d5sT1f-b8cpD28m25 zPj*SUl)C03<#-B)&E|vCX3@idh_5Un^~B@T5Dj8=UH1QsFoyYr%+@0G*?g@fuBGQ0 zzn<&9-dd)1a(li*SVY@RdM_l6nzUN^DsSm5cGq$eiEH z?Yo_k^_9fiZLv@)!mC|EjLdC`^cp1NaQEH@@xBg~` z??X-Tp;-Li%gl{fi8kr{6qJNFa{Sbse0U5j3HKWZ%VzU42w^aWnE#JO;H(-EE{;QG zR?^kx7O26^{C|8RX;mfe6HrNUkkzKll_z${{+jg^@uV)HQXJ}9`qptz28Y3(-!2AE zfnmpsmm2@6U4%EI=;|-0!IB>an(x)=kW|8M+d1VFOkQUI(@~rz+%d~{W_uPVXZk@A zHMH>o3I{5+&pIdjmFTck8;H9dbfI~4aR%Bu9IMc2$&&{S`R8;$*Yh1z4|ECCxemsR z?#C-zkNf&87;)1D!6QRZ~8Ya_|^gQx0h$fHZE`pn9OyCj%f;`)LU~RO!AQd0iyG4~B4w(h*vc>h$bdw-!nN^MXBma2Le}x2 zri*m3>K^1}2g6J=CR~>Y zSJo4Flfsy+t$&81Qb+(Yy2>9-Bt%{9>re+GRWP8&cB05?w}#^@o4UJsZtH>;_G`T5N| z>lFWuh=^l;${ckwG6J2ntxfwt<7Umc0^KT{yfzH48QMJ1Y;(7D-><9p2yPE)Ik2H` zi0J~EYP6+Nd!cs%Qik6BbEI|`tYp5}$7Yf6hOzZ57Q$&{9*xq&iQryPxNQfl?n6qx zotBbGnM;;!l{K?t~`WFS=Z`abPjYRHQUq3AL$T=n1wRgJqii` zHsSU-&P<+%U_B$+;bunjI6}EOJO)p0KX+}& zo>1+w$2ad{#Yhy-R*uj*5NRA9_^X-jH9#7e8*=td?`ns^U+-eMvb%)#1|V`e`d`Y_ za1^bAhc@&xVF*VgbykxxP4&DDQ#gi}w#0Y`nd0{?xG%L#mhXa+;`Jm;+X!u-iNDwT z=xU&Kgzf`jh2$TGl@AeW*rZ$2`$vg(ux5sjg_EQcY5M6W=(H`T7MkPWr;zYbXLZ_D z7oT;JC?c(CeGZY^TpPx}KqO7B81ncMO@1z*oyb9yuK+Ub-(Uuhrmul_nPJ`U5#A= zkePVxvbS4x!85ve6V}Rx-ZKLbqoqU%ZbcV^RlCfWD*=jYLTPrOlFwNn!d=&|o39d1 zL(xjwXnsyUz&XT4(YQGmL_QlCkSPs_@Vc<{ObPgWfaJE-Nx2JzOY|H~aCad}&AoYh zy*^1#$d5hWy^HgseAWyPQxlNps7rW3?5|;UePX*9v1E36e%W0G7!sB!7e>*B5n;r2O}XuQ z1eDUR`G2W;AE2y{bl)@AvMkH;T9)PJS}DuQIv&S(Jzm#!UDt9v0RhnnPj$@25#2AMd;}|r?7(}8LpzC;Cg05qT$8`~SUr>o-S(cTuQmOqs z{q46iOPR{|`M&+@U-#49Pe1+lr}_7E2&Ifc74EUEQ)UK^#X^7Tb`hLcH1`-;YhDaP zj!Wo-mjD$jt;;Or(hzz@Y4Pi}$h!3SC`3M#lC-uw+f9e9g0bSGbK@0-NO~o`-v(R-WM!4f*RxE&oW^Ak7rW7e|Km8o zDL2L0&Ic}o@S{6B$ip;=Ki>Y|Fak={M@e&}cwb};CBf|LY_y%B7&-}TYzC5Pq>>5>8FIWbyhd_mXRm%3> z<0I%9@*ih=Bu^qL@r>r^lTRUI#O#vwe_WkA4Xv7pTW$V0J^~bpDs~_204vM3;$`i# z2?jN%%yS=1KZhjy%q=t4ENx-&LU<_7ZmNK3)oSkFx#fhS(-S3g8*;k z;+8nTXn@>g*b>PVMZof?71LEo{FpumpFlxhO?@@Pt`==aRFd2rC5gg#T!$p;3ID<@ z@(qRO>3D1TH$!;)$OGrPY&oO@QrcVTzI=EqJdKvhu;`)jsoJ6Ik~{pu${xd4Gt>(62ZZ!+wxCscVpnOBt3imMtDUX|j;+}wI32YSOHZT+7DXf|&vreb9Z|g@ml}5ZR zn?6G%H~0|Ti$(5!LRN;k+*bb3&rqZyX<>RIUc$FA6{XOtnIAU*hM&`z3c*E?@Q0<7 zmf|5GkYYEZeAe8?!4SSCB$DVMh)Nz))b<%$tjEy6t>ID+cF>{WVQ`kwc_}yuV3~&3 z(wJgE`lU`=fsG7I?m9~#9ztMj#m!AH!05dUgBeU&s2AXr?-e#iw5Ucykufe>#2So2 zKu>nffh)Zikx5fxQWCy_@Z?{Q#Vr6Up1B2uR%37rpj0+!)E`*FL4fs1&*k=Ktj0tb zwI$pkY9Dv@6k7A26hf}d^YT+>qsNOk);;UsMZI%I)&Vt;nRVU=0^s;Y1)QIgBXHG?53Q ztA^G~85O!y<~1xy4o#y&cX!?;_T?~ z9tEec+|=n_M7=RDS0~l?DFCt0Sx-v$BPsz3V70Oq4z1Du;8%e4zAP#QZUwR;bXUKj z$wMK;tBFZ*xE9DhOQ#vRyUk*U;nd<671-s@O537CN>9TnFPx*}u?A#K zw=OWB!M2STxvQ8O`9aO-w^WQKB&DSyizxzDKOhu$GB%pr(EKliK-e9d#%YF-qAF{01U%<=7=AH^EB0?cm0vrCxUEcX#UU7FfE(-r9xB_!v-1 zcpkS|+u)pM?&z6<6=c2>EDdp~oVCs_2xXo-)t!~@331r1C3Nw&;1h+!^w8c=b030l zQMtB+bw9{OOg!LVgcz)(3iJ`jhma_&0#E1>&}s-{6#Tr$NJLEsN&cT8sB6L`@uU`@ za4__UUH3(G_)(9%ljlU5>`43EvgHURUatUEq zt&I<1)^gH;!kGYpy8eF~wkImk0vct3+xa;Of_gsF#%+_qDxPd6(3p-uwuZ(G1p1hY zq=aSA_4);kX^>KXTp_#On3+%!dWP%vuund`KfAen>Sc_h+fWNW*;n`qW*=))uN561% z7qyPK3f* zDYYELi$F9h!O1gx2nlRt=b3d9UUxe@HXx`w=L?mtvbZL*QDH8BV5^^-5GWaTY~qjE z(RaKD{m!ja2vrEa`A}bM=)4sM-KW{>wh33*_h+iX_cmvAnb~WBG zreOasA!0{8YZbz_wOxA;Hvthvw$$0Y;6eR{N6Tk8a1anFXWN>9p6V2mSx2&c7(uVL zDO7j3P;#3@od63!jMpMRTJhMAc(IhcRjyeMo&|{kDEYcJC5_u z)+cua#+e)5dke-_me?B1Z7B7w*7uhB5BlOEz*E21WKjn?!K!!8`na!lb-^i*jAOAs zJ;=P??fJ7C+VKxyRG4XY0gNrCk>WmB6vcKABEBzy;~5#a2ndQXhvUL*qeRwQ=P(D6 zMV4_BLKu8x22qcZQRJLtT~+)9QAL-hy(~{5cw0f9-68X%0^2I0mC7?jbhm;9)SvWA zxouG;jXxtQCFModB%Zq#Q|($Tnl-TjFjC}NXQfH*KqwUUx@{pE1SSIZ%WJ3fU}Tbk z38YyJ(T~&yCYz}(Ly@JYL@NO|Djx2kd#QR5tXMXt*8uJgZyoSu8 zs^<|@9~LZ3c-2;dN5QC?`41qQ7G8jq8|})!i2FNrmG0tN6$a?rfH4JRFKDSWEd$023u!Mckzb6A_hW z42auNG6_Z{nxO^D$$?-ygU!253D2}CXp$spIBXTp$;EN*e)_gNtdR^F`4W(Q&oOM=AtGKAMHyXkFP5TW8BsbLeDnn?gs zG`7=$*#-n9kjpl33lLS^vfY`U`TChVJzN)(B9ZNDsDxPuL@78e4Hp1Og|H@e9ThK5 z*CQy=4R10Ly%KZX=l>Q#cw50ztX0azFcb4ZR)1+axCG(n!gA*6 zsEx~Elz9v^N8_B4&W2PbU2m2nk=;%1`zU#c%0)VpSotArg3Yq7LR5O!a43sB7y5_o zs{w4tDMD28nNM&xg_pp1D;9H_TuUy6*K2GwyiCzhF;?1qVFjX+ol|b#&2@8e>mc=R zZqT*tZV1F&83eCB)x1lg}5<`3wIcy=osN?~ZW z+CIa$4G}XESH|}r^EIlfRtVC_B`Ek0A(D*$QO;!^)*o$$t+KE1UZe}`Zy7i>8|aRFJ1()RP8?cOuVGL4wB~KTnV76F*lBHaaa1EW+?-f zH_pk!CLa~1&Erdem4mfKn08zV{m3LE>la{j%A|An9?3~4)dD+kcM3^qvF)6Gh;_@NWYu;uP!#QO%IPa^YX3U2(!vs%E6f{#u`dMM1l>&-DV&83n=tDT!B;rm zV#N6do&|(royT5VW*v}8E{=z7f2M1}qY%Y}D|Sh@*mdeDR|qzU=|YyiR@x`*`3{a6 zMGw5UB{oeK)njJh%d^RKAF{H^%(dV4eqUkOvcgBFP;h^U;uX7=_+fa2EzrEl8s{T8 z)q5rjON(tsip|bHhNkwshJZ?+ga^~|`M82Ree@KLTYJOfP#Chnox0Y!{tTf!*p48K7is2-a;yKjCD^V3U{b*#(KY}KMB){0-HWqG zcpMO_9?Y=XSId}>fKtISW0P%r--o~uUxuxA4b=lwI3~>b53@Q9iuSmO7B(D+jhp;y zyPs@?_!NeRBY`SrJ7tNJ5oGSvrE-lzRz+cn+s;(I06|2kk_JB-fjltxXs6`HAfYuj zBrwZ@JAtUECv;WT;z%GXM=Z+Y(GuA>s94Lj!67vXcLI_~IRj|kzyu`KloeLzCI)e? zxr>t!6(=jms0qDKhT^rF?xw7!Ap7RP<|j+_)bJ2TFxiGnHm5#b1!E?qLX>tL5ORCpajR+n&xaoX(0{6fM0=3kyR9P$HpP%(=E~i>;W+KES9v2Q>P5imW1GOZc&ODmqJN@wxvw7{mbB# z3cEwzie@SZ+z6w-F3q+9*$Ed4eiN*WPi)!5)s;$U-u8z-F#S>mRDK_!f_k2Qe*2KITU-fLU1V*BQgr623eKu6{}p^5h-TdW6h5{!sD6wxvn?a z1!J|Qz_wfL4nHSkm2yi5j)Z=`w%Z2jdl8gz_TAZ~Gy5RIu68@o7dG(i=NIY^#*2G# zZDkLC0xD*~V^>TcfHQN1e|Qk6oL_Sv=QxB+!a00ndoSw3!*&G`m6p^VhC^>F3-At( zfR*d?Vl3%VM5WZW?9d*MK}c_}6|fG8V(Rs@`$X@F5ZpdHLewWANDO^GyDBJ#Q^+)Y z`aSo7Ak8nFhUA@0**4iMO#`B@@O=A^UY2+o6_0(_%FXgNAuAc|Z*q&b=fYF3<+J8T zH-$?9N7k*!BB9Na+?ENP2qdW~ftrOEfvkqgEoNnG%wuW-NHpG@cF= zM*>;a%)Z5H7qd}A+8|X&Z4%V>=DRh>c4(61ZbD<8A_OWgVv#m>tJ@7|N;8d&-vm-N zDfUH^4zMzfkHD}LPa-h%!F&ZBUjjziU@eZ%@Q{r=P{FY}_sfBaPIzi8tB5qct`L)5 z=3&a~fuU5GJ#(9Dqlhv6P?g;D1XN6g<i9zDim~VeAjlV2D6$ewY6l|6vd-wk{Dnb*;)+)8 z3!;S&hVuO!cmHcw&Je|f8n{t_$A%)SIdM|mZi*ZRAw|sLavqy07!G4b%%VPY;R_SWHxv#3=v`&T~9L(krH@yfjfda9!WXMNRG1;>=PhVYRo~}ZXi?f zL`a!&uGP!8ug6)RH3^mq$Q5h6WikS9Nl%0U{ZbuKa3->HHmB5GrLrhILj80O5-n0Yg8zUW%_y6>qj7 zaJ4c|LSJetRv{6~U3>xC~tI65)9n3-L0* zN^u4gQ0`;WWl&OXLO~8iRDn#1tz<8#S&elPZC8((whf3x%=eh^F!YnH@jKT7QRTMQ zRwbw-l?wc;U;PTN0_>Gh&SD2{1q3E9`M1nZgeTo;D8?DpFiK})4o9eLWK|O)Fjo#@ zJA~E1^_Zm{U{qxN8Ncj8QkIh4G}`X)C>6{mE%%7<_bTwdy%0X_9Idnk$bB#tBU4jU zlKn`&Y34JWRk_qck^{RQ@BmP6%;1h7JPBmqT$qnWjfWu6DoteKOXKk+5PcoD!=JqG zFbuhAeFuExm#0}8#}XZdATf3>jnp4g&|ljMW<8k3#+e}UeIh(pT*x_7dH0=r&yN}-d$m>ulpgb z2u~!@BhZ>}L7@5yw{mwIK`P;-|Ot!pt(Uj}d|CK4F);;4?rd zop{c;F}z}Ic!(1rW+}%(kZ|7@TafWEp^}qq<0imK18MFR?!yDo1UqKeZUP_X)K7J9lu#qNEZfhc`5 z!QuMonSsG+T2)TOvtZ18as4K4gq|nGKr9A`l1!%^>6<`P8;SgB;Urz_%A7`>Ozft%4u3qjg2R7KovZnIoF!yQYb0it*k9WL^sSe zl8xmED&4gu?tTemZ*Z|j<|x1XF^Y~puELwSSg|kfPqKpdY6bYI3;xr^H?|($jYfL%{jZZ zPO;4Vr^yq4&$)sPAq2LQC0v@)MktcPTDm#H6v8GwLQ>-G=xJqmgdUzDCn2iz2qETh z_=?K4H4v<6n7P~rL2E5r?{-O5BPpE=oHeLHMj1?=xl;_=5mEnwxb#=>FAz{;qM4Rm zVCC6nef-szfy-T=%ZD;-8_pg$-bQ*-7vW)q5b`?gmY;or0A`1&BB$Zh2(uWqnETZLLxdO4nZwhFKxJU~@|Dfzu-g)XH!vEq zRf2O!DoC!FU%}i3vQ%j8&P`D?L+Pb-*UXEU5?kTaW?il0x*F7f36l3FCfHSumlYA6 zuT80+v^^A8pwvWAyBX)J5PJDqcdD)pnOc;(FnxiYDsD$qC208)uOkGGLz6fi3LKZ2 znQliyZ-$?3eG``>$YsS=!*3yb)#b3W!QC`+8%~*xv#DvT3wNNXteFcLjCBf^TG!SE zwi?YJ-UfnHZkHt5V^J8HA2(y%hp324@>z|x_uq#iGj_s@#P=(F3f~U0&0i10<16y{ z^$}uh2B@f_%PvXYCg!B3Yj56)>Q5uClh2RAw#}*)e)X%P#yQQ6H5M-dM z@tS#xcF zFcPYG-G>~4sK{++4$cL#%AvSOs|I)%aMEWE2Ec8l2v6BGJ)Q+J@8fhfJ;6u>DSN_y z=boeI5f!6ZJgtU}f$Q9C0pSmF^_>F`Ti; zN@MOi+Y^s#5kkbf`$Wbgt9oaY<=Pb=6Cg-Lo|VT$Fd1M(gfIy~Z^Uqgsk_Ors_xmu zJ_SkdpR?ZPYNrNSXYr;5nH`t>J{=jQVDZ^*&m4qAF5-5xYBLe(JYP$~Z2?Jz=$vA%Jh1)hI0`T%2)4v*O8tsEq! z$GQaDdunAe^%Z)J3 zY*)g_mM7RIEL-};A%G;M*~PY5ZmWa}?lZswQR+4_iya@h2lUtmlfk0=plE#6}E0SqObpM55*p2%lArmsBABis_mS7 zw;5m`gx2*-9XklUp0@wQfQMV)9>qH`%VuXK7tf%9Vp(R^#}EkD%RJ` zKAb>Qc31NCF?xPvWe|IA_4gEnRh^aPjHgfQSFP>akh}pxuSjMTNRsWnjgY)TT@W3A z2*b^*EuY&wAN@ZhdP8ZpCCJr>3s4%_aJ7bgo-*8G*sKV-iqaXtRz#(7J{JH`y)QwK z37f%nFQYBWosPZ9NMcJY&xAA`Jj z-n@BCJ_MP0BXs#wkn{Lh621Yl-oP??1)0V@xEZsgsq_~EkW^c)ZI+4{p1U(i1Nns- z%K!mo50b#(0Tm5KQ2yyD^K7|yh<;wiA1(rd()mD|(d85b@?q!0-KO8E$Wjl!8uc9gO;Zds zGxzBbnCURGU*`3eGIL>F@C;Z~K=%TZgFOAgPa7I1h8W>2Q zGoc4;$JRBjoxz{*KhtRWIZZ#4?t-G%t+3Pm&y2%KRVI>Bm5|G|B4~6G48_9*aySxT zrO@ry(*)5HNYxf*ED5wU1Y(%Pj$~)z6n{cL#pSr=uA=ZTS}2g?R{}?w z@~vUtNtHqnlI`dW2G2$Z-uEj=+K15YCa%J64lH^88@_15EW1Y>p5 z+4gQWa1$blj7zthsVWhqJNtkxt_89xAG5i(4)cXAFm4T@u!-2!ZOBR|8!5{2{|7UI z)#1VRTE^*(7#!U98d#-1iH2--Wjl472#5Qy+g=F0M7v>_e}M7J+k98sHc4pT;C^`J&b|W702-@}>j6t{VmIMi zx(|ea1$@TPF4#0Xc@UNs_Ur$Q0|J!ZIA4h=t_yMkGd(yV$X3HPI2&?A50;bNqah%j zC~+P`MjuSsaq!<<#Ciy&(bij;?_mK07X;SKiu>5`$q>Zf81Ig?ol+1Ml_{aq2$Xr( zySw;@3xX&W(Igm!;eWu8m)|f(Ytn=4V6IiZ%V0~$-p#-H4a+50B;v!n?}J=b$hZ;=LmOCW49goZWO|WX z8@-NzMzKuhTi-yEA+STCxJPzW^Cl$kP0dQhp8%_hW)-d1x!GIc8FuAz#@jXi+mI9_ zV~eT~D$V>+mjop8!#E_d(nHVx{#ph;2jYBqA}*f4`@f$STiyQUZC0*2I+9%}qju+twy0>_~QdRgn=hW^6Yry^ZF z8U{ys#wEU^6A7=x(z`KXX?R(%mlMND)S;aCX0f}_{&5azhR{3~llxiqx# zFuc@lF|x761Z2f)+f?1G%tQqt-5duX+DSoX{|<#S8CkEH=dQZPv4Bxp7ufwbQ-jDQ zwD=T3EVNrvriaJb81B5k84AEDGpMr8>hIJ5S+5s1n|0@G^dRwlz>f!56k)_kZa%Pci z({TBY~&{;q+T=WV#|e#^hWH7YJMZTo*`eIwK%wB{x9w zc5V))v^F9TamH`?q8CojCPXyO(RZ9DThOe8@C9?;O>BQ_s5xHq7|fLXg<3 zqE-BQK*F`R>`MBB2&yOC*~pKFkd-V(%q!i2m^xAHoLq=+=tjebLnuc0>TQld#Rk%x z%dc>q5Q#equd=u6R^3WAUPU0eo@Pb9PDIv2qHy=Qb36(pt59yr?<4}La@Bx=yfv>= zP}1@myN~@eBKb(<6uaxW8}tOl=}++EQb0*>-0N;@T}==)vaB`cSAf-?ez%;JjPNv; zRGZA2!_#qvZX;GJgo*~I)24pO>?Js2Z21zic3IdhEnfjk%W3Y4(5uKwiY-Bslr{)8 zVCJy+%smSjdVEbejrBU1#KqaM1$+x66%iM#(AufeH=$4#7b5U`hXmVZ#Y6=HDN2sZ zV522idHnTDcoIln9c(2bXeR_Mz6@*8rKe`j+=Sy0!m&%rUKWK=X4nvm3)QFRHjw}O zK;B@x=Bai4;mOtP1>on05F{qAh3T?KVCC2r((Ib!$00a7o9sTDCkn!uc6{$5^GTk< zsL#Y|X<>RW{(-r%N zV*yDyq?Due(cy{00u+N|0U=G5aSsX~k71qyikdMI2YGA=o`EGGzvCd3d=l$sL&n2U z8ROki{Rs%lz1u%IF@$~vSJ5`^O@h(u+(lhxgC;|%Sly*ouD6{6C!>v;S$I~xp9-rQ zT|$eShNPmf>&bB}LOi&*jQMQjGy@I=xx;lX;+cAA4p)b`J&&`Hm7h6y7Pd2fHVoS0 zMm)RZSoCc#zFP?`(fn=%6#tq2~VJmF1n6xi~Q9a2kq$_6o z296N?VtC(jPFc&b3G5|szF%az(#F=Nu%)o7jP`vnIzO|V%OKHoGH08ZlEAw_4g)Rg zl=YTuXwsIGPs_^DBjla_pA_W%!y~q}o_N+$o${eofw{HcUbPBB`Nu6OG|!|EhWd5! zeQU5q2vSDet2i1VFayAIwti3w26Z85kb`}&E49*30bd>kHf1#l1BafGw-TW2=my;@m3^hGK*&J=V!M;l7bZPYFA67 zubpQjfEq+z$r&~~;el&`Ohf$L%eWQ#34JbcgN0qlWOM;;5?7D#?CubN%E0UQ=-IS* zJc#fUqrWXU5Xee<0wYzM48(^BTpR;bZIr2nL-WP%D$oN6((GD3vC0b$2Eu0AD4RnN zQdm*0jX3L&)zsOhg)6}`hZV%{PGc7YGCIYtv}$!UM8GJTal|oX)rDkUY1XzLMv1W7 zvWf5ngx-;E^SgGw=_C}XX16R&@RS~4oWWPU%nv&ap*X4Yn0+QAU`Tes+Boi<;vJ31 z(vH1@UX_$J!I94R@&b3~REVRnxZE8HyZ}SG=7eusHA8r>Imes@-6_fI zqI9#XG`~(nl}{&Jk7yZXYYDo$Ol(4a$^j&B#KOIi~(>hLTHlh4!GQhk)^?- z@EJij{m{zEytU}owB*iaJapsfE{tTf)XY!n7u61s%?78p5W9*W^$v`H<P+o(2^Sfl}?Puxp6j(W9Y(i|1oJ%nwqth6V0*25E&D z2^4$Y~RvoI2Dn3 zK(?Oex|}i4yZ}=`HF*(1SzE>NHrur^7Df@rn_DstQ7K4EV3Gn!>d&wbhO72Y_$3Ja z;6xsy{oYfMNl|W*O*>CTP)?ZLVcF7hH%$RrKDI&Z zbO~ARml+|nT}aLpEE=8(r}wqg(Zrdg2!y#-RFK&a%2XoO>}4cg-%@ARI$lCrTzV#R z69~!+hd7-jObSd%`GyVF8zt+3BpN1u1OhpmW8bT?N!k=-3brJVRHTCC0_+{0 zkg777=1Auw>XmD4vf5qmx(be0mfC6kLNE!mJ9sGDA|$m2@VA^&4Z`<&{YI7khmZ%^S8JXNK#C`bsg=9=#}W%_CaA? zM^<6+<)yW2Xr4F11KHMWZYn@Vu5Ge#Q+XXQyzDn)$I;2%3Za+SMYgvUnuk_4`YK)of|a}Wq4gk9&eZI1%}P`Oh|~6p^db2K^SuzOd-q|e zdl&zRBKn1yaKmu`s|u}c`4Z##D9GG%Ps$!66Vt2KMQAV16Bu;GWj@8$jXp(`?$_F- z$jnhdSW@kPC94`ps*-a#gYAyi`~;=Mt)~BO!7~3Gn3bir4#>2`tytAG7nP;OQ?|S{ z09I6y4Yrccv^_JJ%H^J~FLYy2m&cs@Bvjpx0EQ%>ujlmoWmNF8U zcEE)|rVxMczKuCNJZ>MUVim}Zrboc)HR*O@1x=3(4Bd|#Q9d6St}V_rcWV@kv|`h3 z&N#dPgeYcjAIm)-Bs^z@_Z4-3if@P-dO#bl|vtbfk*x0 z#855lb_BCw<6)G?#fxm&U;?s?K&)r8_e>1WC0WB|dTtVo5@aJ++8H}(2{;*AIZ4l5 zsn#+DN{x>t>{4!LG&?j^q$|yDC{cGNWm^_0=EmgL-oA!^kjgKrC^ob+Zz&B% z$;jZu12+hvru4vJcv;x9v}D4e64poYT2T2SWVN^)+YyQ*0-3V| zsbU~E_}@IR3^{g38arjm-143^n&lxNYZ640jjW=yYeBKhHa(OBshYq&P<*i(ciZMV z^Pm~3uOfx{K$L08Z$HC~2BI=*_e9wlJA4y3s%?&4$5-To+uaE}nbSa2i)MUOY8PV3 zqbSuQ?uNrMu%cYa?QI;aQsloJ7XU+IgE&QZL-|m+;jw45wTV7B5N~86@Yrm0wx(drswfi z-6v@45lLCGKI@_12s__&a1s$6a2=6-y4^lyPF7Aq`UYyN>b5)gG!$vgwtd76VA5kZ z8MxAKlx%YmZd})dO#0YJ#)6gk0p}oSRoS{#@q&J42Z$T-;iN!NSUIdt`*UxtFr@eu z*Hv6ncx#2!NX*z}2q~A%Q(U`j<#Gj1Wm&v}2|@c*5HlZ;ss-2-_I%^8gh$shEB(4V z8ru#>);N=n#{%ZX*V#Y(i@_0H$(ubtnS^#1Uq9S3L%n3p~gHYPswia5%PcY`O1%RZ>4AskjPQYV>CspZgD& zK)%{bSILV8AS*q14jejz8VIFkA7@tu3_>JQW;1LCcrcO-AVWiQ2bfQ=hlapYs0-Y; zJBEfB;_VWcVfrnN^Vm#tAfO?#fs?{@pd*k~JaO412_FSQ*D<%NYwniT>u_evMB8CL4Lq zQy-6@l;|o}91<9>Ee`d%%grZ3sr2ls3EO*(KLRHu&Ckj&&3ETRZAEf2Jl)Vb`hY1w zB|0^p(a+Qni}OO{frKDQuck8Pfv9xb1TGbO285EwVd+(NN%Kq?wUdR6*=Hef@0v9DGpo`g5J-jBXdiV^(%LzQT65{ zNbC7-)jI__#AlbySS3jfQIr?C9WZGjl-*s())5Pp4k;ze8dF4>P!!SHY|^m^Ealn( zKQpJ82^{dn@TkLoW!LQe5*SiU5a;bM=K*1s17F3qZ7h?>jvzPpxg3!!Fq+o)PO_1u z-@Jl#&er9?_*dJB2zHUvH8Dw@2T$_&1|a?g7_BERwzG4q5TvseS$S@`1cDTpTVq)* zlE`5^H~J_+B%VyGm8Id){4BROx(tGzn_~*@*dm)LZDqOwR(iKS65j%uDptx*8^TW& zOqt+QAQL-Z0>zmSUSJll5-feQ?!|-*1z!afGfDWiZsXBbIMXJR^)^|HE1@`kW1pz6 z29xU)SN&@csD=~9jx{s19Z4ztt#j~pNG4BO72AcZnqr$L?IY^ct=*7H6U!I)iKJ-{ z6tAFa(unsWsHRW&uQ&`ydPP>L?JwQ0rwi=+1uF{N*s&H$Il}WhVIBqZ913Qp-Y&hf z8h;Q99WAiAq(fl2N?5qu(rl(FDY80PZ&lrA=-g4A!;o@2SxT{MZs#aA?++JCUA+C+6_MIh+Y(l}wzC+3_b7fXg2> z4ffpaSlaE_yBkLgfTI4+MXl!{OgoJ96Y-yjSFc=Z7{r>ka;XCLl&IeG- zh9XEuX5g}@OT%EKqink+!0hI5DCMrKrH-_YKvs2QX2F)$T}Lt!mRGuQGk)pwh{_NP zK<2)Uf*=jQn>h(50<0K0K8e0Y>scHyZi>K((9b0-ySb=eL{=)-a%#7>cwh#E(qF{fnGjN zf1fY@1ccsg4w%h!FclHF_5%#EoiW@6C+$*K@hA`}uMAs_(gun>Fuuv>W|O$R$f^l+ zXjZvx{%W5BahZjV2JKfgD9ZdTn*ytaA>Zu($D;t7kK&G+9z;+AGScSZP9W-a_y{zc zI%MU8b2EFjU+yrR{0P_KaH7yv@FO9nR5LY4Lrin=1#u&gRZZA~YO_uCfmlm(M~Nr& z1Qu2<_DN(_-ncoL?!LTJFv>i`G-id|ZJVbd$=V;rVWbR$BToGks99c$^Wyw%Y}MxDBwpyX9jXLq>Z2h@_I+46G47M3PEa zj<(M05rk@50>gezDDbMsaK5E+9MsKxJW(vD1L6`vG3xLh@at5bn?OliL99XKTe?OQ5*vhlp}4d@skb;!quyog+U0? z%Y1i3&tPO}E}bKtrhp+Zs;+bd6@GqdC?x4jvF%8N85Ux(>3N(6Wa%ZPe4X|GBNTu$ zP}-X`QgQ;r5H~t|9+~)TBHrrsC`6@#MNoIU*$Xh<{j?2wv1@nfu}AX@*|)P#I1dOh zi-py2rz&0yTpEj6#k5RZ2sm??R<*uC{ThcvwOx_VjP!UgNzYtgk~@#(S0*ALNE&5T zvYMn#L{w=q$Hjagkxhb9Z_TY<{%DvDF($(kZPJ2F{0OjWcM<*1R7u=}YV+{Zki5>R zRo0HC!;lE>=ub;bx0T5m5LA&xG|QPl@{1!#YGxs*aMtpv2R_kZWjY&@6l2xVCQbvQ zQXH2)FJoTnT&5rtf;_FYlhg@_ss!vE=JiR)D!!F`0+8ZMh9D=&rn>pzNAhoD{}c%6 zJPWU}#C_v36;A2mXfIxmy8tN-QVB_Cy3i#x9ZG7kvniQD`lm^_4@k1mX><~d#F;zb zb&H{`wYeUcbMJCUWqL)+XXGmzS*c62 zFKU`a$bmwU*aGLz!8)HWHvk7+*~QQCb$ruSMzsWDUMMGioa%3RL#;B1-UIT(Vx z#_bC{a|l9}oBn+Iyh?J+*Gz+09A=zaYy!#AS>T1n2WTr-ccCx!Coqg`WS*N zMLF9OZ9zm^y6P1ui-}%5h!DqG+k40hPr@h>tW~2<<4(b$l686fF&BZPbg+Vr!WxjJ zB3st*w|F%|lI{gmhbEvZ+3MI8uIIudOiktH*lHW@1SA?gtYGF!gEox#MWu;din!c`R4Rb%K?k*UFigKysCJC8_H{(7TywH3J!iP|aRt z+eZ5kmBGYy=ut=X?kff?0HxQDpwg_!T1iLp5Lt?L?u^@S_2?IFD?1FnX>Y{SfK$v) zziduGbx$Dm_O*5(?kS>D7sm(PDU=@r!EF|9b|O45cRgD92~nCWMCnq9VA}sz;c!Ue#I%iS2TI~DE~~JEkQ5I~v(`^AGXbTf<+Xf*ry+4(5f==Q@S%vP zd%4YQ4g*Ug3*2|ChU+)J<%e&f$0)Y3lg6^BfvC!XPwHIq=L3^tA1@*5QF@%OCo>%p z9$#yF-9{r)l#|@o*2f^Ilx*Gwopr;Z$ z);$fZBqq(X?;cOrZ~uMbM0^M^Iiq9Zp_%%DSpTO9tBcx(3?A~INWpAa5JmH{5G^NO z2u~ki*2<0aW5#xLzw3{MChEs*eYMKx>FkOiHktsy`StLYlTP#GmED@s0E)~Me zEfbo|41f@gBU@+^-L()SlRP1cGhe8JzFCOwvQUV*^dcd~TqQyi7{m+xGus}8rqJIC zQ3uuuq0S9L^XOZJGU-@_sD+h6{|jBI5QVW-=-2d|LUZUfg_v@#5wdA~Ax3jMgl5t^ z3jH@aMxjJ{MWLzmh(iBHHz@Q6X6A*`==X%u88izmpraF_LLCxHpkEV8p<5G5WnNtf zzw;=FMsITf$3!k<`0L3NbXy{+geQfVkT@l@kq$}-i_sv27u+b6Om`&ozcQK?`W0Od z$lk}4NVA8Gh+6gg|3Ob9#Jc8Xp>cFDLUUOw2ieo#FiaKsD!qshp5t|)Y4jXI{~OEU zLNn+rgr?I+fQZIc#&3IAMMt3D|983pPj03U5IKtuKxh$`Uq9O}(LRyCV?-%LC(`eK zK8uO>5G(#jzcU_xEYcPTo`{@}0rw$KVY)?*XP_wbpE1`$zr{-XfK6EG|JSwaf5Ae7 zEJIr`&m#XTrdgz2ATU_u|BN*TVOF{--^UdD0AFH>1#Mr&aL;~?4fZVKv5`P*$YN%J zg_dD&K`yR8VQ&@GK2JPa0bXX@=E*ZyS;3Z%F|a`Q*dA=FXgj(x9>^R134<#1ub5K( z+(HT``H*ii0`u(W*i8jpg1z*=zm2){zt>?YJ^KcRQbCtvB|W)~k(Lj78Y>BO$*RRp zineoP@je9lPKbs05L@Z#gBVK>k6vpj$DS z{`ZftnS#w!qa~i*jn(w*Uoe}3*2gaQ0rq1!1+Tq@<@8}G$vn^Q!*=?xWOJ3E-K|~d z*mIB!1A(gaJu$>xEC1_vv z-RfZtrqqZ109)$AQpsurFQUEeo;`p$_3#k(RIueJ2Gv6<3O({esP?_#x0VQB9k9B>eX# ztjuEtX`$13B6y9QJ{4>xjvocvNyRe{u@gTDwr<7%3)&w0=Q_Q*X&(c?nBecp(?G$C zl>eYu2+Y!8(ap4#A%g8Be`qWO#WzfJ3vU_jA*OePU^4+m3bw;O?*kme4g<++cO?eq z1>xws6!2&tgz7Rzu!BUuDA)!ZD|nUnjT3CAmW>x|Ig2G0yhz)dDA>wllLYNx%w!Ky z(Glv#bfBx8So0YkSCRag3eti>nkD!>_H(vE^zNZXz3f9&VvYq{ zDar)Fizq)yu!DCedx&c119>5{;+SL6&7>n$baWp<(nNoc$w?QyMuo}rfl6Nx73cU3K6Zvf@~ArLWZjan<;@B!B*0}-Lv>NJAkePs0q77e~&%dE%+TT z+2bLGWv>E9-=o*vC%S{GvS08b39VI#NG&=$Al!47&<8!g6HOgb@Na1gb)vhrVhs-q z_kV~nJK}@T5giq5LutnpB2v$b>V>0wN$d%a-$839g?qmNp90h1FH#*&3pUd>8Wf`M zW0Ke?9Id8PZStY3G17uAvx#}?4Y{dQsC}N%Kf(Ji@-agAG}?M zrS?I#P&{1<(nbmQh>q-{W=2JSM`zaO={M+I?)%VRW2`-0M@k-ge4O_`@^~lB^Ra@r zkmDz!dm3n}PldbolKmfrqkGBcGX?*iqN6*Y$9fu*IZkwU74IA`9Qho#VS;ea`H!*P z!hQ94J(Gl^dr|ddFjcsNHZjEq*+ONQD%@K|kZHm_jU;TkaO5q@cLtb8Td_PdeUNWR z%`9Pe4c=^zw~?Bc6}*K)iWl9x3soohAbTmLB;n|nn1W>Czc=i}ii3I6zf;yJKF9&y zlqwwAi49IukPggUx?meMGgI)d*xf}6+}UsvGcNjj4B`^eJ&hz~sc^KK(paV-E#z~# z;9uXzjw?jBsU^pU{)ocJ^Ze%bu;dE<9c^)y=*T^RE7P*e?L!6s}+u%An6B$`_5xB4tjpqVGKIhDk&+b6CHgIJsuW*@G*7ai02Pt zu8#`$9;C4!6OPnU?dpYlzM)W02=~87GEWM3Rbp38`S8`0a-=1$;?#-{!dZaRpH1t zlv$f_^ey;y;rk8b>ALXUEu`@Vm^6MzbGRwGvjIQ2L%3_-8(4PX$meAHw#PfE?svdE z`aS(&XN;!mb$NV%>e(aQy_x2CyDN$u1*%bPL-G<_$^I$sseX!p}41c8jVgDY#~iE6yjbb z^voEh@Ma13o+X#Fg(Kf!-Ch=s?!hp|3wKvxm=b*W4^Tyt53`;4lZ7KSbWQVxJI_!j zQ-u4@5r3+1_hzz}=EF3YeHV_rfx;n91f9|b;d^glv^EMyKcIeW@?olRek+B0kI_V` zeE2sAzg4*J3tqJi%&XcOF!Z838^}nFaQ9{&-R^^*{{Txbe9v0TF0j>XGP_%Jv=V*q z@xkk{Y@V!Gc#{I&*M`$3mKFkqPb3pj-O(fxQgw%I=e1-z=5srRNDMUT~lGe~C++Rb9-}iVc8tE7A-a<(| z1e2U^sk)B@J7_zPeTdCipC`hRYC49e!qL;D_($Qs8j9qZ57UUn{YkjzEUEcfxZfrX zp6hmozI!t%8vr&XU^WJd?mUC(8su@^2@Jh(^m7tFM7ZZXm1wANcNGd81}2(I1RpNC z|3ezg2+^&mY^3N&9cKA?kE?0EqZH(?G@TbbU5_msExLm)c8p*%NqkWudd|=)#`@6P zsh{J7duwRZY`L8{4hLX5`(Bnfv{ zQkRl_m^ZM>^M!klQFVKhUlw3e~J>R5snmsHDR9yd`}>V23!XykgqAN#V#& z?9nNYcTm$$3*X&B@*6zgK=?+Ft4Tu>nB@EwQ+ZCXjn4dnLUeDT#G8e?_EMi(g?m4z zm0l9={}2OxIToH2UjY;2MKW?#baV@iy-m1x2PNL_gP_Xmg5U4L$}2?oW?p(z_--Rf z?-1_aPt&<2+;@C4W?uM*cWF9zgrjev%1)2>&@j4$`wx(U9?#cOb)&+a$H9HV_cmi= z?)&h2iL>AH*6}?Q?)!%B<&o##C(VzAdmB;r6XD1gxHM0}D506=`=jWdvy|X7;piTm zr=NuT>QTtg!jU&<`Ooz@J0Cqs)(3d}HF_QB@jjj%B;46RM>#l#u~0*V?{1+z3>A*N zfpr}weD4D);Bb%Mq?||iFdvYuk-|M^dDrt`lK&r+#3<2y$EhqY_~2)VaX|yy=^n*XqH%}MtX(adz1!<;&&J=9tJ+l-dQcne)ExL``|FY;F>qp~-AACtI zNbtdTp{^w1=sT!7+4EnJ-1)-2^%QrCa8D!Ar-D)A_mo(g=q{T+NEeP&lg3Qp-bVVG zMV|i}Q?gjNvw@^75$-xjwwL-a^%TJ};qEQen&rYh4J1F?huQHFw%&)SB>8#5_ui$l z<_mWoCT**Pd(EO2f=TiO^6&sl8uM&a&Cn$9NS=pJ%XDSWpPV_fC=TCCYt;okSDliP&ve`&VfhiN398jp`t zw%ftHw)rbez39H<$1(N7-BmT%dg1>4D0YupV1++!1L2ZcNBB*G!#u1d^hopAp_D&}F~?y9db_F&V_0c^eK-j8vMj|oRUq3zUr z{w!s9LOA*^9p_2Ue?xhm0<)g}pRn|veG3!WAl&~hHLX#&=M1qldEOe&IpMo!>D?~~ zch=EeHw)kUn9Q~c_kBe?mxQ~`L|zt-G*KPUxh_hzigE#clzDgWETopsdSJDxv9UFZ~!zD2=x3HSe*Cf_66 zWpynIM&Z|ieWE)Xs3Z4_{+Cwon(J7u{cr%0~I%UtxA%5bmxb z_-NtIPpB?qz&!drt?fn8ks2)dSm9n9tB><|52kUv4^I9ji0(U%nkEWI&eJbX^1=T? zOP#FXS19i(KFAx?p{c@M+o<)^JpV^p$#mfdJE?Osg!>PIX9{;V&^gVDg~!{N4JPq_ zrJcU)gC8LJc;V=AY(j$Y-7}Ol7PCnB-k-3+i-r4+qrWA>cel{HF9q|e?`XiwME8D9@a4kYn|W=v zf^=YTazx)XcO=i#nAdzyn~%CmII;_E7YaW(2w&vGY{BxE2=|z;TN>jj0aDSO>O5nza|*_X#Q3DEweMwQrMf*B(q^rGhu_#n^jDyWgr1kviyYqQ9fJsgBVo zrbf8$Jce_-aP%vx{0`x+_fYmO;qEOcd$-3`RD(Ujorfv3y~6$b>ag{~(YMg*evccd zm9=0us;r|T9MBJu9n{N%!dSt1kGTQ*g^$r72R8lDlZ9l)=@t%$1o=Gig4G5sO+k6_hy6vhJC))l=IPxa^&%)iC&CW-ioqu41r2)csx6l*@g2~iPTF4;LJvL}sp4*JX& zeE6+sYP1i(lV`_(Nm@HT(~F|JHdArO3imXS`fgv|=EEGIWiJPl z%nr&eTktZbB1a**ZQ(IbxbHkQGhevpEHSR~VXA2Rg~;meD^Hw#a1xM zX~yJi6Kpw$u~&$`BP6)S8nAH5AROyqkAQZzX;pm5C@U((_Pud#wkfYG|`?k4==f3iq8S0hfH3?S#4P`O~z`E5e;k6x3B8 zreO=l9!!G2CBf~2{}*lNx({)Jl-&^iVLMgmrh;7I!4A>gHZO5YbTeMyZPAf;h~tddHyaUzz1V$aLRNS9^@b{=T&-KdIe@2u8 zgd-nOmIHJ`Cz81!+M~FNp5{m=-l!IPx}$86(`=gscCe@cl1oMPr4dHKbr1 zn8aLx9xwXt0XmEc!Vh*~c_u2z-v~WPbl;~WV6t$hZDX3^`Qw<@sh&TB377^Z_Gakm zqN6ngpW%bllgODKAI7nqCES0S3Nl;x!5OUS%ffx<&S2<;qaTxm1mVuQudwvO-BsXZ zFt2T+z0UWLa!QGXphcyMZl;LSL`QZIVY=vx^!S;g@4ZK5S|r?Q>z<2!@Lhkw$}4y? zm1C)=|48s<9)C=bLgB~< z^fN`m-PUWD2>-C3Fr~uL&oR$s!o53)vm%C%W9Egsw@__12tPPL8E+K6_Z}5-6PV;& z#Wq!n?mG@$C4Bz`g|pT3jpT5f$2MtRE!>~Vj;mA8w;vK@#w`uLWg!{LW zmEAu4m%Mk6aMxSZxxK=jHp9J7xTg^{><7z{rw^~y58a#j;ec@DYiiv=;k&lzcF6NP zNkm1;4{9IHnMtpQ6or;m9AU$0vmEHj>Pf!oBY^WH=?< zcZw98j$!IFTm{^4CJ{vF{5f1>m41oP5XO0!F_nR?cv5WQy2qQc!()tGwE@1bej z7mn`x6Q*9c?^CS6L*d95SdB+u(+u_Ov8OlVJUtPP?nA*(h3{^m=zkRMswK*2F`mxx zC*huRRGXiL9~`BnKG)}>Z^Jwc0NTFQufM?33-{RM?I7XkPMod59=}g=hWPN+6xvYX zPP1Xdg!|v4(1r_lZ}}8c@5Aq;K8*yEt2P?{^P(Rdr`C@W-9ah5;OTeF)C>2VrT&lc z_zXSni^83C)TXh*-8O+VPPo^$6O0#*n0GP3hi@SK#8`NWZITc31$eS>m(7?=5x!^3 zqEm%CZQW*?$A>AO=^ocpiDn4**an-KVD_iI* z9@|EgB;kJB!kaAI`GIz{WclD zNVxX{tmIMEhfTs=?~>h0kKd*hRQWJ9ylSg(-znnR=6TyRT@5B_S1A7)(Ra_%n6?Xd zTjSdy++RoZyF6csrgr~-J)M7SRmZm9jl-pgB1$Qxlv0WiLJ>j)Arv7*c?h8>LQ#ZJ z8X<&ob0b2zJW3JqLln_G+Ze~#1`ILAHpUoZjN=ev9Ak`sfrvv0*QH!8;Sz3y_CDwA zAIImIP)d0SMYNyg?@<2wu+H-AnOSSTd(Pf7bHwR<^0Cr!BR*A*Z4g(By>2d5D^3>R zQ|Ihwxo*8Uae!oKa9m7q8^!6<2wammK1>%j!&G?)m28nYIKVJy6=ymatmnnaLI!A? z+;7vWcA2AI#&Cx?T|n-1ioGWEuuJaO8BE#X$MIqM{w~a=?vpt8 zWRCl({C#nF2r+-)?4?}lq2m@Z;TM=ceMlcXa^^3Hjweo6QF=lgHc(D9TK6x{)NE z<$P=v)@;YNh;0r`jjtmvbA{g%pLz1goI|hP6lafdrTKFCkx(s=dH7ojU+7$p)AftQ zDO-27Snl7G-b-Xo78aoQ;$VQ%mx_nmDQ1~CDx&!1V!xhZRyeyYY+MOb-|wi*Dw$JP z$i>y-^l841yd(DaHKO<8(AMs+asE9d)O+GYCB?rFQ{-*_bgl3KskKfXq0LA?5NFF8 zkbB2<@Op6~ADQ0ZxDGAc=-5^qeycu$eSDwaNW(~xSU{a5r_7r zdaL6OQf`|#`kFdy7yG?*><)3JomTCHY1LoQfjpTLwOn?WIP4(GyTx%E>AgpsEFdO( zVe1gSC4Vk+qJ(>4zRZK%C<>hi2AW;zxE}W^arO#Bxmx_|c`j8ePE>IxtP=;ePO4rU*~`ubacpyp zM#uSdag#VaMn5%+y(X^JA`ZS_+_gHkdDMAv)ImMl9Jk`%?tIz^XNNdhKpuBGyS)~pEzSr9|pwfTC(e!IIxF0*BuvgvmbOmg~-MYacHmSZ;BHq z=;)Fe; zdn_I)pjA)A(Ex(>)Y+>MlxO1f8J+_@7ss6x|3d87lNc|Z-4^)#CXOr_UOBEHNpglq zC4QumGIPa=DtN3oILvdqpI!nnSDdak z!591X2>MNN{OuPAzT+}3yFlzUb9Y+^bJ@SqX^UhI+PI4?cJAk}FA*m#wr`6=`&zzK zoN1?3%f!AdtzQmX%o$iKWX{^Ipq0-36sfsNE`LRgR)f3}`gh2^9DG}A@vb=4PXO15 z16!o{o;d2`n(vFfHfD@##qkBAv(EYNr|us(|7OJYLs*XvDp7p?V^PLugYXu2qK$G- z+XMfP9DhZ9KNcrWk;k8igM4~;lQ?Q)$ZQs8kM*JVV&5LJY=ybf_sHrtnZqJ#yj`5! zM~!zlciSC9s1f1JBs2~3f<7!0Mt?-1K^=e-|~Lyr3hRfXe&1oNmkxsL``iqp26 zxJsNUyMo}0;}$AkD^B&|Qzs6}@u`QY!F}YRLFQxu{n04S_7lJ+vDZv3o8|sJiqIl+ zaFE+yt2nujN$7c)G@F9h%lQ^#w_OgtZIpHWA}zQ>fxSo6Q;=PxZjdFI82fa!Bl-5 zN-t}s439hFR4-|J7q)=WE%#*hnux%C$IaC1fjF$X(XKlq&O6=EDeke{i(vDdCYd>WUi-Z02dsZCUgOtbO;cm)#;_P*(@>Az; zvz=$+^cOVix%06_2`|K1dmsPuwGVfL-yBzy2Crb+avLSi$&BJ(2w<-8e{#{WAT#`= zy{jE3^L4Uzyv#ub?Vcdc+M}n5;@EcjO>#bm8KRS&-R{p*V7n$bRru#Kh`c;}+v7gn zdE4UD8RDptnZr!+u-;S~6>nIAf0k=8B^ejGK8dSN)FQyy?u9Hi-xY@?D9jq?f0Cj3p0ihCe;*!AOxpyw z);Z7ABvNO1a7@JGD+Xy?DqJ5quB4I%#Xw)hfV5XM%!eB{zIYh17l+?4ySOG!+d{|d z;^7XGeGs-xVE%UFwVCMO6bJiBoLl19zN8O{M{K#uZRcZ4-|vVMw(saJOp({gvwP0{ z5Q%nQ9JkR;55#HP?)OlfIp2xgi^F`1f8=~>iLwV%o=&amwByJrhUgDDydNZK4ZafEwl{so72;&^v*Prpu-3I;yanMedO@yrm z+_Wai?Afl7$uj>$9j7=Cnlx1&*=qWCnmDq(#nZ*%l`6#Ev2DhgDbCnyv{~Y$t@)ZQ z4hB%LInKY1OU)HWmuSd5n2HVdA@(w-u3(<;+-+&d0&%vHHZ2r;_MCB%*zYHY7r*vD zjo6D5$4G;>#mPddv(#|`DYQ)N*{X}>;*n}*nJdJBt>#}T_Ir`VRpRI}chuG5jJ+~> z$N9I@Rqr~VM&^xc#Hj)5@E%MPf9gldS&xhiund+~1 z{9ME zz9k%m`O_h?vQp-3HSSg7MAdn8UK})#2DRd(Z62)?dsm1}y*RA3IlbeP2uLGr4L^&{ zgT&x>sJti_jd!jm=;~H+wzdldl?;lAC@$d8xQn>cY$a;6ep?}=)Z`E z?OpOC=hJ)&jTeU}IuLnrU{mpwIDM4ph0dpeh{Z6ya0gi%mN~xEj>?PuUJ~N5IFV1? zo`{2fy5y-i+kj3zgGcLJNr<1j4~}!)7vgk2Is8%_*-Yd&=hML`ekBg+EL{ym8{VnQ)DVssDW@G!tY_RHHi+#mPdNJ4u|i1rU?P=^8Y2ia68G zY-OrAtb?bCgKjiyx??+6VTSWRLVIV5(~ZnTX2De9I-#EJ%twjD9C5OcZksC(ZT-zW zapp?|TVlBC~&$FDP$|!&7L_ zQt_|$u4|b%?qoi=#v6%N}PxL4WTRr|r~?&&9vm2BUm&qMF-d zq4W8Yx)s6n&KpR*^Qh!bRsxUq{1FnfR3Bt)i+Z^@v{lZB#7TRPSs@OJzeViDiBg2W z(%IV>lT~8R_VQPY!>`EXT5&W$Q|e&LIuf=Xq-Osbsh7h@6%}X{`#m@}iNhjBRkJv< z$E+>Rzq|##f9+2noEN7}p4(t9bcfN?E^}mi8#=^sC-v$S`@IZ-F0t2!V0DYbiw97A zadeqW^}!VRH%cFH?w636YvN=9#a|a^ZEyFWIMs*D-+=kkhXnAZb1x)-x5RNP#SFo8 z$G<@EWla}wp*!Lcd;EV_oTx(l?};NjGv>bYZzdiO#HlN!(?iGo#P}C+;ux`c1nUMt z&^&z*+CH*`IDL|WQsPVlIUR~e>`8g-e2y|^hQ--tlrAgwd+D^t;^6ER6yN!up~Ig# z`v5nlXU^VRkKjAIZC-fcxQBSYgz1v0$UQ{=4HBJKa>zDONY3L?hp}iq#8Vruo6C$9 zCu+E7j1z|?6g?iMkXtCi1esGdznLfwipjD`;;01OpDg#E!70w&R>V#fXWPk$Y2v`n z8JR9l)F2Ksoc&7@dZsvS&n0GwQ??v>wm3dVb>=w#gS2TbOfCP!L~Wk%XT0BZ9)~G$ zzT;vB(E@Q;M+g==`#Ge0kvP79^eh&q2gtG|;$fR;zb#IbQ07u`bdmAB45kJ*xJNIS zIjEvGD;(Qvot2JHG@|t4#25Il7ROidc?TwXGthZi{eJFzYsBG!tLVJg>ww=E2lkqJ zEo_0JH`dAQ+l2Z9=iWq5ekhLmxz((PNAtOcAaBqIaX+cD@%0C%P9ygEVE7_K^JAC` zJRn&5u=`P*`s>fc9N4#^zbys|Lwc>O>saYrX`{~(wXK!TCG>AQW1kost4%2N-;>2;vX%?rhkk&1*^)i=jl{v1V zh3CbYv&e3nI66(QwL6~@l)pp#tDVr*30pBFdzW)RMr*sp348wD14|fD-adWc*~wP} z|NjGq&^3J!RJEb^j%^F^pg26l6>f-Q+g5qg`CO(CZ#h0lCJi|*AiTH5zRgPRh|{*t z;I88a?Dt?AFvz9uJF~6tcmR)9`V_hJ(D^V4_(k{=E|26Mln@C|Jlsx~B*f{{XmHB; zw9~>+oVEAKu{gX+*ABys=zoLSJ8KDP_gEa+caJCHxP{5)Q?Xxlv5^fo59osj&bz-mS8u?>3o-m|kob{zRAUkVkCC=I#`ytC7|`anQtY-sSiT(~jMaD;Upv z#9=q3?-fTU2=nL8r|2rGFU}sL{|cSmw!;-U-p{p)#i4DwDRF$BG%AH@$aPY`T;}u% zQuB~F_=ZfVa4tx}QD^>=D^-g9Ub>@79ND9&YH`{&i`6HE8-8%@_N2&@N6KpvE1}UQ>+b zi?g=WuS*;?FmAiW@j)geJ>sB%`%9nmX(YD>9QQKkyCx28>T_M3E#_K-V(+X;zBu@j zp?A}99d)<`)A#q7Mh?lGC??Nti^BueD85{V_{+O8duM6kJ#l2u`0k4{wsP} z=gFwzhxSx9R~*?&$+0js`$zO1qJJOaJYEh#9hI3Nj>{N!6UC9K*(9-_&rqA}?3LK3 zh*Lcz>r}DVOy#FJe>?eYx;SW~GBd>4eeg_iYJj0UOB^3$kjxf)wtjYwI6Oet&lM-^ z_2fKfFJPFy3DcCj2+Mq#J===Dz_BeLSt!n)Bcm5N`(^rdu{bQ_?zzNqC#v)|JbL#z zj7l%n2f-2AwG1{%;BS{Z|2{^>3UTxm@~~1oZ0{FV$^A#Fvsz})1nnKMUr%-3mCIjo zStE0BfhfNxPM;$y-xnusL&jQhqK1xL2W#TRfcZclWNksoxJ&p~(tEc&B3lcyN1Se8fbA8B`E9V6T`?j&Z zQk=FU)T_k7Mf#&!oVB%PweV=D$`7LU{D*S>L^$i^leSGu4bI<=5Ns6t^<1_|9NKI4 zX2-`UzeViXN{Ci*GQSSFclM4JVjg8qwm*wza_5@*iR z6i=M$!#^SRZSi1A9NLPT5T^K_$(PufYjGbI2Nl#ZD^8aql#k{9BYO2jX1|`B&r`>C z=E^h2SLu-F;#3cL@j{%a;dc2_9N1xbzlqbfboUiZZHCaCod2bEQ^#D8M>=uqH^{ws zxPx2HIB~L&QpSrTJIZDPY+XjLP82@m-a1Jh=@SHGvN*7%FH^+XR)RTI>^0G=)5Niz z+%nzy+q1D5;_x^;)A31kV3s&4YDVwHaVJ%n>|o8s^Yc|0HH zhBh9#mo<@3yB9j|9@1lxI9r3(E*3{8NXsQKe>oGem({Zy^HOoDpHMCnhj!Nba&c^% z&{l|}E+loO_}4y?Zxu{W-et(HmN__PVlVc-rAhCK{XQ~ijX3`5B5Lpa?bi0bvmc=f zYsJX|#@9N>gSguLHlg)mKc5meIPRnd8^v)OUGtGR{WW*3kKs{KIzkVB zq7MRFgTG1a+lI8w;*_1pyG5KmM)bEjw)gwn#9lx7v|Sw8{-_<|xR1WyDbCpJAWs~e zrV_ivBSrY{7KgSNeUCV4-{mNe(l2D zxI~<^BeF}KPc@^b-0=xS_0Vho67*gioS@&1ihu3nZ!4YMmOWRAGq&iwTAZ+#C$(bF z4iTwy{@ql$UL0N^x((tu9}Q^~r@HCRCb4Jhj+&i+88?U)amJ3#ZxttPJN|ibU<(u5 z#8DI7+%ER(N%xM|{Zo-xnoxL^eEid=Y_u=6vkU zsSkWR@9{Tr(w;WFa`t}WmGgA8iT@tWhtT|lZG{~x_UoCVjuVHr?_|6fr$>MAW-7`h(H6ue)osT8nG;v^u{!AB#RV4Ea$1P00W{NX*Q0*+S ze~Nm}hKc##z&Xz49G9Id_L?bXo;cM{y1eOp?Dsw9JG*V?Ss+f=5V?hp&(M@b;;w-AnrrXK$lxS33VnWNVdUn?J7> z58EpKcbxyX477K}Q6;zEHR6#P+VY+_S%Ce0vDZxQuNC{Y!(<&ySA9?XKX7JyzW$*& zs$~SN7YBXZi#LelOEhJp*fW*>NSwBn10Re1eD0y2h(}8MP=3diG<~!4u}#%m#ECM( zwH2m1_xaOpGJCBQzg--(Gkw`1&Qx=$opS$)4$PA|VWsa9M;FNU-Qw^JE!iXXnz+(l zabPph&mGq>{PM-o0fuy;v)d1~6p7Qda4{^gAyG@5(_tb}3RC@`5S4P7M=ELgA#u`n z2~{|E+s=1X9CgvGO2-9^n=0pT8&#^EzpZAjg(?0PO{|kSbBe3hi^C3vP=h$Ox2BEa z#3g*1oPRTV+AI!iQ(22Rw47}f`!>xv4|AzOMp>K8aVt5}E>7EfqYiOchro2gqdihX z-MaV>e>+6acFQNQyiia-~p?l81nK{^f=ifn&KY%TuH2tB> z@i~h8#kts5oJZo&PE_>7UN_ZDIG+}(nG(k~K@Oe$2uT||{d^C=#S zy~`xS6LG4Cu6+tqwYzBFGno_i#rnB8V}~=m5QkM1^U|?h>Njz0KWXv`rkLy8g>wG) zsMJU2k=$HyY(IxJ*74Ot$i6sJ@fEV~>=zjl6U0F^MNWiAlfhnZP2xY4KS=LPc0N6f z?kVD^lN_8Xm)lHprpfGCe5Q-Tc2j+E+{wtADfgdf$Sj%tdirvjSTuRxK8%Y%Amvai*TYz3qJL zyUJ3Smfz-D%bdB5GM9_}dUS1tIMqXQS33JC@@17cIETKh7RQ&!!FR-&3d(uc`Pk-z zHR8}_pYMr>+bd9g=hMdBW34!DW&W^EoIS@~;sdc~zy9-~I9*HK){8T(y~w_Jq>Mh_ zC=Ln{$d4S`>#C2%zTHJX5vOa&l1<{!o`G%_$2CNKi#TEHSGJ0y^R#!HINeOYZHFz# zao^$GYw7)+V((jA@|=D2j5KjjNz->bw$u3ch@+N+=)O2SMkakO_F8FHzBsv$n{J^v zuy;a5j@ubS#p1BM5ZM<;bws^XoTzO;_nl8C*F7Xo+l!G3an@c^9Tf-P&^?vnL=j`R zN}RH-nbj~&zfU&Q%Iw!uOx5%JQ7C(q=hHW zT8bo`&oPuF<=76%3&olJs8B2p_oG_F;*nZLbXFX=Y{@VcrfNSi2&Ty#l%Xlpoy!3t zF+=RxLEAIM{s96#%ds6>Hd`Ei!JTK0<1e|?TzFKw&yv6M_>UzO&3e=M*so;F7sp5V z+Xdo`oiV=9`COz9i^M?*Lt?Sx0rYN(IJO!4+v03B(OL?RUiT>dyi6ZN_BC_4*f048 z;TMnCq;aJqN2N5&QPTK<_&4r}b;Z$pXgrdoYdsj(N-bGJDM^ z=UT^IxU3V$XGql##OYcZ@S!+it83SbqaL(jgY&;c95;%CVp8uTacGb4KNkDF_oQ6VM8A*on>@g)Qm9N}1zU@}x@UK~key=D>Dt)rvD^ zlwK$H>e$WIfm1s&c|KYN?z!v#=+`p<~;u`-?bphB6<)H07^E#gp0V;MSiI2ewHn%_v`r!;@U= zH*w0&4SNMs{qGRloc}Yb{hqCi&lLv;Y1UYAY$pJZbKFlVj~Dy-^zsC8sv9ktC=Tp+ zs7Ye)GRZbsoGdi$7iVn=##HghA#VQD#PLxRf5)d0=NYi&EO+6VG6#pd(0-YRC~>yT ziSvx%Iq+z@+YjZ=mCsLf&OG_V?c9~#lzEVl&zIS|Krsu%eh>Y+P#hk|eUal!jEKeJ zgiQ&TIC~GG__jDxMx&OBy$7wMN>XH%bI)f$uXbEU zw!b6J*q6?Co&SM0WM3RrQqA|o{uORM@537Oe6Us@1oedozc?PX7$N=ifj*yTpNQ zBj|Q)=gaqq<8pesPwcf2#{qHlHNAdKoVK-=*Bv)7Dh9=wuc*%r=if z$j-bSD-KUH*2jr6&8Yi$XTQXyCWzxA#B3t04Tt;Kb|a(?1ASL_XqtX##C{( z)?{D&YcFk?E>7B0^cmvdEK)cV)<8hvXSomTfc4qpbO9MW$MG2|G1u`od0Oy8D? z13S`Yxi~t3j;;{L_Ol%;#pzmVyh`lbQlHf@m->Ooy(6>NL@&JS*j5j%5eIGfyyw^w z`+afR-W08cDdrZVY@N(r2h)cSoVz`o{7{^*RW9qrVHq8@K^z}s#BGE}JHLjAe#Cz$ z^LwTwAIm4zgW`Q64$dG*o5a5TqTOa^Kg^$Qft4l5#8&=8IX}^Z+vMYQFc!Cq{d#0~ zhd5;?FYgpb2MJ1^IJU)(yTsW>2G?$}pHFY@5hv_t1NMrCZEE&8JX#0)rkAe|f(mXk zg^ulq-ipNW1$>Id*^{Jy2|W78XPH`*>VwEu7L~vL;0yBkkUsG2344V&z6u`|XO9z^ zO0nNZa#V>!TR2iJjyf6Rwc@lrj;RxS9fYIa`IL|a4dS?j5H~u$RFC#Mwmrqo;*7ln zX%YMR__T^cdoXfd99j6<#6dCL-Y(8m60r`k_cddu)A_WLDqZ5pzJqs*6SlCq$FUs; z(kD*!l4b+qOas%DYvQn){<$uW4)eEz;FM zsDtR;5ogTjt~g;oN_$V7vOTu<#i1Ro^#CR-@A4hwq0C+vvHrzzKm16XvfmZ(#KGxO z@7QY>@s_S0U=#R+>jyh0p& zLyxQ!r)x;GRpJr*?W@&d&t3$+<9vDs=A9m2be)t&P2?cC)&anQzX zeV5~Qs-hFq0n?HnsKQa1ecQxR>0Iob<|=Vy^Z#mbY@5Yv#hKGc zR-O1)`z5}5c(ezr`Jh1`_@}5(qd0qvIyX74X0$Xrwqt2p#G$PiY!ycZ@OhYe4wBhz zGJBV~Q@4xLcED|iI8#skJDpDl+0!LX7LcCZFvSd=L-l2j%5dou$MuYm0ddM6_+AqS z_QMm`#i9Mo;-EOPDbo#@B7bJ!+?3gCXYAe*2bZu9iNhlV=(ad+Bp>fM|7N7*t~hmt z9=shS~NX(cK*vTi*lBY&HHqB-haCC+dux@`uw5` zzu$4K=s*7VoSc<^mp^80$(Y=VG2ZOzF=HCWOl}!7rhUwq?lEJojmfz=X3U*2WAbyy zl;n=7$jzzF9n+Ybb1gULX6~3fxw#K=|L^O6V-mT)o1G~5V)k(E@0S0=_P<~8H200= zpRIVAJ9hb7pRFi-<9Ew<{9)6Ik~eZH-pD!mMh^cP-pFZvBWK2x3FH5`>C^3bf6V)D OQ*x${nVqvZ=l=oIP5B%E literal 0 HcmV?d00001 diff --git a/tokenizers/mistral/tokenizer_config.json b/tokenizers/mistral/tokenizer_config.json new file mode 100644 index 0000000..2b3ca46 --- /dev/null +++ b/tokenizers/mistral/tokenizer_config.json @@ -0,0 +1,106 @@ +{ + "add_bos_token": true, + "add_eos_token": false, + "added_tokens_decoder": { + "0": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "1": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "2": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32000": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32001": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32002": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32003": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32004": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32005": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32006": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [ + "", + "", + "", + "", + "" + ], + "bos_token": "", + "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}", + "clean_up_tokenization_spaces": false, + "eos_token": "", + "legacy": true, + "mask_token": "", + "model_max_length": 1000000000000000019884624838656, + "pad_token": "", + "sp_model_kwargs": {}, + "spaces_between_special_tokens": false, + "tokenizer_class": "LlamaTokenizer", + "unk_token": "", + "use_default_system_prompt": false +} diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/basic.py b/utils/basic.py new file mode 100644 index 0000000..b482c2a --- /dev/null +++ b/utils/basic.py @@ -0,0 +1,309 @@ +import numpy as np +import io +import os +import json +import logging +import random +import time +from collections import defaultdict, deque +import datetime +from pathlib import Path +from typing import List, Union +import itertools + +import torch +import torch.distributed as dist +from .dist import is_dist_avail_and_initialized + + +logger = logging.getLogger(__name__) + + +def average_dicts(dicts): + # media = list(dicts.keys()) + # keys = [list(d.keys()) for d in dicts.values] + # keys = list(itertools.chain.from_iterable(keys)) + # keys = list(set(keys)) + res = {} + counter = {} + for medium, medium_dict in dicts.items(): + for loss_key, loss_value in medium_dict.items(): + if loss_key not in res: + res[loss_key] = loss_value + counter[loss_key] = 1 + else: + res[loss_key] += loss_value + counter[loss_key] += 1 + for k in res: + res[k] = res[k] / counter[k] + return res + + + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], + dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + if meter.count == 0: # skip empty meter + loss_str.append( + "{}: {}".format(name, "No data") + ) + else: + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def global_avg(self): + loss_str = [] + for name, meter in self.meters.items(): + if meter.count == 0: + loss_str.append( + "{}: {}".format(name, "No data") + ) + else: + loss_str.append( + "{}: {:.4f}".format(name, meter.global_avg) + ) + return self.delimiter.join(loss_str) + + def get_global_avg_dict(self, prefix=""): + """include a separator (e.g., `/`, or "_") at the end of `prefix`""" + d = {f"{prefix}{k}": m.global_avg if m.count > 0 else 0. for k, m in self.meters.items()} + return d + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, log_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + log_msg = [ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}\n', + '{meters}\n', + 'time: {time}', + 'data: {data}' + ] + if torch.cuda.is_available(): + log_msg.append('max mem: {memory:.0f} res mem: {res_mem:.0f}') + log_msg = self.delimiter.join(log_msg) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % log_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + logger.info(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB, + res_mem=torch.cuda.max_memory_reserved() / MB, + )) + else: + logger.info(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + logger.info('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + +def compute_acc(logits, label, reduction='mean'): + ret = (torch.argmax(logits, dim=1) == label).float() + if reduction == 'none': + return ret.detach() + elif reduction == 'mean': + return ret.mean().item() + + +def compute_n_params(model, return_str=True): + tot = 0 + for p in model.parameters(): + w = 1 + for x in p.shape: + w *= x + tot += w + if return_str: + if tot >= 1e6: + return '{:.1f}M'.format(tot / 1e6) + else: + return '{:.1f}K'.format(tot / 1e3) + else: + return tot + + +def setup_seed(seed): + torch.manual_seed(seed) + np.random.seed(seed) + random.seed(seed) + + +def remove_files_if_exist(file_paths): + for fp in file_paths: + if os.path.isfile(fp): + os.remove(fp) + + +def save_json(data, filename, save_pretty=False, sort_keys=False): + with open(filename, "w") as f: + if save_pretty: + f.write(json.dumps(data, indent=4, sort_keys=sort_keys)) + else: + json.dump(data, f) + + +def load_json(filename): + with open(filename, "r") as f: + return json.load(f) + + +def flat_list_of_lists(l): + """flatten a list of lists [[1,2], [3,4]] to [1,2,3,4]""" + return [item for sublist in l for item in sublist] + + +def find_files_by_suffix_recursively(root: str, suffix: Union[str, List[str]]): + """ + Args: + root: path to the directory to start search files + suffix: any str as suffix, or can match multiple such strings + when input is List[str]. + Example 1, e.g., suffix: `.jpg` or [`.jpg`, `.png`] + Example 2, e.g., use a `*` in the `suffix`: `START*.jpg.`. + """ + if isinstance(suffix, str): + suffix = [suffix, ] + filepaths = flat_list_of_lists( + [list(Path(root).rglob(f"*{e}")) for e in suffix]) + return filepaths + + +def match_key_and_shape(state_dict1, state_dict2): + keys1 = set(state_dict1.keys()) + keys2 = set(state_dict2.keys()) + print(f"keys1 - keys2: {keys1 - keys2}") + print(f"keys2 - keys1: {keys2 - keys1}") + + mismatch = 0 + for k in list(keys1): + if state_dict1[k].shape != state_dict2[k].shape: + print( + f"k={k}, state_dict1[k].shape={state_dict1[k].shape}, state_dict2[k].shape={state_dict2[k].shape}") + mismatch += 1 + print(f"mismatch {mismatch}") + + +def merge_dicts(list_dicts): + merged_dict = list_dicts[0].copy() + for i in range(1, len(list_dicts)): + merged_dict.update(list_dicts[i]) + return merged_dict diff --git a/utils/dist.py b/utils/dist.py new file mode 100644 index 0000000..f8d92e2 --- /dev/null +++ b/utils/dist.py @@ -0,0 +1,25 @@ +import torch.distributed as dist + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 \ No newline at end of file diff --git a/utils/easydict.py b/utils/easydict.py new file mode 100644 index 0000000..241aca4 --- /dev/null +++ b/utils/easydict.py @@ -0,0 +1,149 @@ +class EasyDict(dict): + """ + Get attributes + + >>> d = EasyDict({'foo':3}) + >>> d['foo'] + 3 + >>> d.foo + 3 + >>> d.bar + Traceback (most recent call last): + ... + AttributeError: 'EasyDict' object has no attribute 'bar' + + Works recursively + + >>> d = EasyDict({'foo':3, 'bar':{'x':1, 'y':2}}) + >>> isinstance(d.bar, dict) + True + >>> d.bar.x + 1 + + Bullet-proof + + >>> EasyDict({}) + {} + >>> EasyDict(d={}) + {} + >>> EasyDict(None) + {} + >>> d = {'a': 1} + >>> EasyDict(**d) + {'a': 1} + + Set attributes + + >>> d = EasyDict() + >>> d.foo = 3 + >>> d.foo + 3 + >>> d.bar = {'prop': 'value'} + >>> d.bar.prop + 'value' + >>> d + {'foo': 3, 'bar': {'prop': 'value'}} + >>> d.bar.prop = 'newer' + >>> d.bar.prop + 'newer' + + + Values extraction + + >>> d = EasyDict({'foo':0, 'bar':[{'x':1, 'y':2}, {'x':3, 'y':4}]}) + >>> isinstance(d.bar, list) + True + >>> from operator import attrgetter + >>> map(attrgetter('x'), d.bar) + [1, 3] + >>> map(attrgetter('y'), d.bar) + [2, 4] + >>> d = EasyDict() + >>> d.keys() + [] + >>> d = EasyDict(foo=3, bar=dict(x=1, y=2)) + >>> d.foo + 3 + >>> d.bar.x + 1 + + Still like a dict though + + >>> o = EasyDict({'clean':True}) + >>> o.items() + [('clean', True)] + + And like a class + + >>> class Flower(EasyDict): + ... power = 1 + ... + >>> f = Flower() + >>> f.power + 1 + >>> f = Flower({'height': 12}) + >>> f.height + 12 + >>> f['power'] + 1 + >>> sorted(f.keys()) + ['height', 'power'] + + update and pop items + >>> d = EasyDict(a=1, b='2') + >>> e = EasyDict(c=3.0, a=9.0) + >>> d.update(e) + >>> d.c + 3.0 + >>> d['c'] + 3.0 + >>> d.get('c') + 3.0 + >>> d.update(a=4, b=4) + >>> d.b + 4 + >>> d.pop('a') + 4 + >>> d.a + Traceback (most recent call last): + ... + AttributeError: 'EasyDict' object has no attribute 'a' + """ + + def __init__(self, d=None, **kwargs): + if d is None: + d = {} + if kwargs: + d.update(**kwargs) + for k, v in d.items(): + setattr(self, k, v) + # Class attributes + for k in self.__class__.__dict__.keys(): + if not (k.startswith("__") and k.endswith("__")) and not k in ("update", "pop"): + setattr(self, k, getattr(self, k)) + + def __setattr__(self, name, value): + if isinstance(value, (list, tuple)): + value = [self.__class__(x) if isinstance(x, dict) else x for x in value] + elif isinstance(value, dict) and not isinstance(value, self.__class__): + value = self.__class__(value) + super(EasyDict, self).__setattr__(name, value) + super(EasyDict, self).__setitem__(name, value) + + __setitem__ = __setattr__ + + def update(self, e=None, **f): + d = e or dict() + d.update(f) + for k in d: + setattr(self, k, d[k]) + + def pop(self, k, d=None): + if hasattr(self, k): + delattr(self, k) + return super(EasyDict, self).pop(k, d) + + +if __name__ == "__main__": + import doctest + diff --git a/utils/init.py b/utils/init.py new file mode 100644 index 0000000..cad40fa --- /dev/null +++ b/utils/init.py @@ -0,0 +1,154 @@ +import os +import torch +import random +import pyhocon +import datetime +import json +import subprocess +import itertools +import glob +import glog as log +import sys +import re +from os import path as osp +import numpy as np + + +# def load_runner(config, tokenizer, vocab_size): +# if config['task'] == 'avsd': +# return AVSDRunner(config, tokenizer, vocab_size) +# if config['task'] == 'simmc': +# return SIMMCRunner(config, tokenizer, vocab_size) +# elif config['task'] == 'nextqa': +# return NEXTQARunner(config, tokenizer, vocab_size) +# else: +# raise ValueError + + + + +def set_random_seed(random_seed): + torch.manual_seed(random_seed) + torch.cuda.manual_seed(random_seed) + random.seed(random_seed) + np.random.seed(random_seed) + + +def copy_file_to_log(log_dir): + dirs_to_cp = ['.', 'config', 'datasets', 'runners', 'models'] + files_to_cp = ['*.py', '*.json', '*.sh', '*.conf'] + for dir_name in dirs_to_cp: + dir_name = osp.join(log_dir, 'code', dir_name) + if not osp.exists(dir_name): + os.makedirs(dir_name) + for dir_name, file_name in itertools.product(dirs_to_cp, files_to_cp): + filename = osp.join(dir_name, file_name) + if len(glob.glob(filename)) > 0: + os.system(f'cp {filename} {osp.join(log_dir, "code", dir_name)}') + log.info(f'Files copied to {osp.join(log_dir, "code")}') + + +def set_log_file(fname, file_only=False): + # if fname already exists, find all log file under log dir, + # and name the current log file with a new number + if osp.exists(fname): + prefix, suffix = osp.splitext(fname) + log_files = glob.glob(prefix + '*' + suffix) + count = 0 + for log_file in log_files: + num = re.search(r'(\d+)', log_file) + if num is not None: + num = int(num.group(0)) + count = max(num, count) + fname = fname.replace(suffix, str(count + 1) + suffix) + # set log file + # simple tricks for duplicating logging destination in the logging module such as: + # logging.getLogger().addHandler(logging.FileHandler(filename)) + # does NOT work well here, because python Traceback message (not via logging module) is not sent to the file, + # the following solution (copied from : https://stackoverflow.com/questions/616645) is a little bit + # complicated but simulates exactly the "tee" command in linux shell, and it redirects everything + if file_only: + # we only output messages to file, and stdout/stderr receives nothing. + # this feature is designed for executing the script via ssh: + # since ssh has a windowing kind of flow control, i.e., if the controller does not read data from a + # ssh channel and its buffer fills up, the execution machine will not be able to write anything into the + # channel and the process will be set to sleeping (S) status until someone reads all data from the channel. + # this is not desired since we do not want to read stdout/stderr from the controller machine. + # so, here we use a simple solution: disable output to stdout/stderr and only output messages to log file. + log.logger.handlers[0].stream = log.handler.stream = sys.stdout = sys.stderr = f = open(fname, 'w', buffering=1) + else: + # we output messages to both file and stdout/stderr + tee = subprocess.Popen(['tee', fname], stdin=subprocess.PIPE) + os.dup2(tee.stdin.fileno(), sys.stdout.fileno()) + os.dup2(tee.stdin.fileno(), sys.stderr.fileno()) + + +def set_training_steps(config, num_samples): + if config['parallel'] and config['dp_type'] == 'dp': + config['num_iter_per_epoch'] = int(np.ceil(num_samples / config['batch_size'])) + else: + config['num_iter_per_epoch'] = int(np.ceil(num_samples / (config['batch_size'] * config['num_gpus']))) + if 'train_steps' not in config: + config['train_steps'] = config['num_iter_per_epoch'] * config['num_epochs'] + if 'warmup_steps' not in config: + config['warmup_steps'] = int(config['train_steps'] * config['warmup_ratio']) + return config + + +def initialize_from_env(model, mode, stage, eval_dir, tag=''): + + if mode in ['train', 'debug']: + path_config = f"config/{model}_{stage}.conf" + config = pyhocon.ConfigFactory.parse_file(path_config)[stage] + else: + path_config = os.path.join(eval_dir, f'{model}_{stage}.conf') + config = pyhocon.ConfigFactory.parse_file(path_config)[stage] + config['log_dir'] = eval_dir + + if "CUDA_VISIBLE_DEVICES" in os.environ: + config['num_gpus'] = len(os.environ["CUDA_VISIBLE_DEVICES"].split(',')) + # multi-gpu setting + if config['num_gpus'] > 1: + os.environ['MASTER_ADDR'] = 'localhost' + os.environ["MASTER_PORT"] = str(config['master_port']) + else: + config['num_gpus'] = 1 + + model += '-' + config.llm_name.replace('/', '_') + + if mode == 'debug': + model += '_debug' + + if tag: + model += '-' + tag + if mode != 'generate': + config["log_dir"] = os.path.join(config["log_dir"], model) + if not os.path.exists(config["log_dir"]): + os.makedirs(config["log_dir"]) + # copy the config file + os.system(f'cp {path_config} {config["log_dir"]}') + + config['timestamp'] = datetime.datetime.now().strftime('%m%d-%H%M%S') + + config['expert_config'] = config['bert_config_{}'.format(config['expert_size'])] + config['expert_config_json'] = json.load(open(config['expert_config'], 'r')) + + config['beit_config_json'] = json.load(open(config['beit_config'], 'r')) + + + config['model'] = model + config['stage'] = stage + config['loss_dict'] = {k:v for k,v in zip(config['loss_names'], config['loss_weights'])} + + return config + + +def set_training_steps(config, num_samples, batch_sizes): + config['num_iter_per_epoch'] = sum([int(np.ceil(num_sample / (bs * config['accum_grad_every'] * config['num_gpus']))) for num_sample, bs in zip(num_samples, batch_sizes)]) + if 'num_training_steps' not in config: + config['num_training_steps'] = config['num_iter_per_epoch'] * config['epochs'] + if 'num_warmup_steps' not in config: + config['num_warmup_steps'] = int(config['num_iter_per_epoch'] * config.get('warmup_epochs', 1.0)) + + # config['num_warmup_steps'] = int(config['num_training_steps'] * config['warmup_ratio']) + return config \ No newline at end of file diff --git a/utils/logger.py b/utils/logger.py new file mode 100644 index 0000000..544f559 --- /dev/null +++ b/utils/logger.py @@ -0,0 +1,286 @@ +# from MMF: https://github.com/facebookresearch/mmf/blob/master/mmf/utils/logger.py +# Copyright (c) Facebook, Inc. and its affiliates. + +import functools +import logging +import os +import sys +import time +import wandb + +from .dist import get_rank, is_main_process +from termcolor import colored + + +def log_dict_to_wandb(log_dict, step, prefix=""): + """include a separator `/` at the end of `prefix`""" + if not is_main_process(): + return + + log_dict = {f"{prefix}{k}": v for k, v in log_dict.items()} + wandb.log(log_dict, step) + + +def setup_wandb(config): + if not (config.wandb_enabled and is_main_process()): + return + + run = wandb.init( + config=config, + project=config.wandb_project, + # entity=config.wandb.entity, + mode=config.wandb_mode, + # name=os.path.basename(config.output_dir), + reinit=True + ) + wandb.define_metric('train/webvid/step') + wandb.define_metric('train/webvid/*', 'train/webvid/step') + + wandb.define_metric('train/cc3m/step') + wandb.define_metric('train/cc3m/*', 'train/cc3m/step') + + wandb.define_metric('train/other/step') + wandb.define_metric('train/other/*', 'train/other/step') + + wandb.define_metric('val/msrvtt/step') + wandb.define_metric('val/msrvtt/*', 'val/msrvtt/step') + + wandb.define_metric('train/champagne/step') + wandb.define_metric('train/champagne/*', 'train/champagne/step') + + wandb.define_metric('train/visdial/step') + wandb.define_metric('train/visdial/*', 'train/visdial/step') + + wandb.define_metric('train/avsd/step') + wandb.define_metric('train/avsd/*', 'train/avsd/step') + + wandb.define_metric('train/nextqa/step') + wandb.define_metric('train/nextqa/*', 'train/nextqa/step') + + return run + + +def setup_output_folder(save_dir: str, folder_only: bool = False): + """Sets up and returns the output file where the logs will be placed + based on the configuration passed. Usually "save_dir/logs/log_.txt". + If env.log_dir is passed, logs will be directly saved in this folder. + Args: + folder_only (bool, optional): If folder should be returned and not the file. + Defaults to False. + Returns: + str: folder or file path depending on folder_only flag + """ + log_filename = "train_" + log_filename += time.strftime("%Y_%m_%dT%H_%M_%S") + log_filename += ".log" + + log_folder = os.path.join(save_dir, "logs") + + if not os.path.exists(log_folder): + os.path.mkdirs(log_folder) + + if folder_only: + return log_folder + + log_filename = os.path.join(log_folder, log_filename) + + return log_filename + + +def setup_logger( + output: str = None, + color: bool = True, + name: str = "mmf", + disable: bool = False, + clear_handlers=True, + *args, + **kwargs, +): + """ + Initialize the MMF logger and set its verbosity level to "INFO". + Outside libraries shouldn't call this in case they have set there + own logging handlers and setup. If they do, and don't want to + clear handlers, pass clear_handlers options. + The initial version of this function was taken from D2 and adapted + for MMF. + Args: + output (str): a file name or a directory to save log. + If ends with ".txt" or ".log", assumed to be a file name. + Default: Saved to file + color (bool): If false, won't log colored logs. Default: true + name (str): the root module name of this logger. Defaults to "mmf". + disable: do not use + clear_handlers (bool): If false, won't clear existing handlers. + Returns: + logging.Logger: a logger + """ + if disable: + return None + logger = logging.getLogger(name) + logger.propagate = False + + logging.captureWarnings(True) + warnings_logger = logging.getLogger("py.warnings") + + plain_formatter = logging.Formatter( + "%(asctime)s | %(levelname)s | %(name)s : %(message)s", + datefmt="%Y-%m-%dT%H:%M:%S", + ) + + distributed_rank = get_rank() + handlers = [] + + logging_level = logging.INFO + # logging_level = logging.DEBUG + + if distributed_rank == 0: + logger.setLevel(logging_level) + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging_level) + if color: + formatter = ColorfulFormatter( + colored("%(asctime)s | %(name)s: ", "green") + "%(message)s", + datefmt="%Y-%m-%dT%H:%M:%S", + ) + else: + formatter = plain_formatter + ch.setFormatter(formatter) + logger.addHandler(ch) + warnings_logger.addHandler(ch) + handlers.append(ch) + + # file logging: all workers + if output is None: + output = setup_output_folder() + + if output is not None: + if output.endswith(".txt") or output.endswith(".log"): + filename = output + else: + filename = os.path.join(output, "train.log") + if distributed_rank > 0: + filename = filename + f".rank{distributed_rank}" + os.makedirs(os.path.dirname(filename), exist_ok=True) + + fh = logging.StreamHandler(_cached_log_stream(filename)) + fh.setLevel(logging_level) + fh.setFormatter(plain_formatter) + logger.addHandler(fh) + warnings_logger.addHandler(fh) + handlers.append(fh) + + # Slurm/FB output, only log the main process + # save_dir = get_mmf_env(key="save_dir") + if "train.log" not in filename and distributed_rank == 0: + filename = os.path.join(output, "train.log") + sh = logging.StreamHandler(_cached_log_stream(filename)) + sh.setLevel(logging_level) + sh.setFormatter(plain_formatter) + logger.addHandler(sh) + warnings_logger.addHandler(sh) + handlers.append(sh) + + logger.info(f"Logging to: {filename}") + + # Remove existing handlers to add MMF specific handlers + if clear_handlers: + for handler in logging.root.handlers[:]: + logging.root.removeHandler(handler) + # Now, add our handlers. + logging.basicConfig(level=logging_level, handlers=handlers) + + return logger + + +def setup_very_basic_config(color=True): + plain_formatter = logging.Formatter( + "%(asctime)s | %(levelname)s | %(name)s : %(message)s", + datefmt="%Y-%m-%dT%H:%M:%S", + ) + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging.INFO) + if color: + formatter = ColorfulFormatter( + colored("%(asctime)s | %(name)s: ", "green") + "%(message)s", + datefmt="%Y-%m-%dT%H:%M:%S", + ) + else: + formatter = plain_formatter + ch.setFormatter(formatter) + # Setup a minimal configuration for logging in case something tries to + # log a message even before logging is setup by MMF. + logging.basicConfig(level=logging.INFO, handlers=[ch]) + + +# cache the opened file object, so that different calls to `setup_logger` +# with the same file name can safely write to the same file. +@functools.lru_cache(maxsize=None) +def _cached_log_stream(filename): + return open(filename, "a") + + +# ColorfulFormatter is adopted from Detectron2 and adapted for MMF +class ColorfulFormatter(logging.Formatter): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def formatMessage(self, record): + log = super().formatMessage(record) + if record.levelno == logging.WARNING: + prefix = colored("WARNING", "red", attrs=["blink"]) + elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL: + prefix = colored("ERROR", "red", attrs=["blink", "underline"]) + else: + return log + return prefix + " " + log + + +class TensorboardLogger: + def __init__(self, log_folder="./logs", iteration=0): + # This would handle warning of missing tensorboard + from torch.utils.tensorboard import SummaryWriter + + self.summary_writer = None + self._is_master = is_main_process() + # self.timer = Timer() + self.log_folder = log_folder + + if self._is_master: + # current_time = self.timer.get_time_hhmmss(None, format=self.time_format) + current_time = time.strftime("%Y-%m-%dT%H:%M:%S") + # self.timer.get_time_hhmmss(None, format=self.time_format) + tensorboard_folder = os.path.join( + self.log_folder, f"tensorboard_{current_time}" + ) + self.summary_writer = SummaryWriter(tensorboard_folder) + + def __del__(self): + if getattr(self, "summary_writer", None) is not None: + self.summary_writer.close() + + def _should_log_tensorboard(self): + if self.summary_writer is None or not self._is_master: + return False + else: + return True + + def add_scalar(self, key, value, iteration): + if not self._should_log_tensorboard(): + return + + self.summary_writer.add_scalar(key, value, iteration) + + def add_scalars(self, scalar_dict, iteration): + if not self._should_log_tensorboard(): + return + + for key, val in scalar_dict.items(): + self.summary_writer.add_scalar(key, val, iteration) + + def add_histogram_for_model(self, model, iteration): + if not self._should_log_tensorboard(): + return + + for name, param in model.named_parameters(): + np_param = param.clone().cpu().data.numpy() + self.summary_writer.add_histogram(name, np_param, iteration) diff --git a/utils/metrcis.py b/utils/metrcis.py new file mode 100644 index 0000000..8f4c132 --- /dev/null +++ b/utils/metrcis.py @@ -0,0 +1,174 @@ +""" +A Metric observes output of certain model, for example, in form of logits or +scores, and accumulates a particular metric with reference to some provided +targets. In context of VisDial, we use Recall (@ 1, 5, 10), Mean Rank, Mean +Reciprocal Rank (MRR) and Normalized Discounted Cumulative Gain (NDCG). + +Each ``Metric`` must atleast implement three methods: + - ``observe``, update accumulated metric with currently observed outputs + and targets. + - ``retrieve`` to return the accumulated metric., an optionally reset + internally accumulated metric (this is commonly done between two epochs + after validation). + - ``reset`` to explicitly reset the internally accumulated metric. + +Caveat, if you wish to implement your own class of Metric, make sure you call +``detach`` on output tensors (like logits), else it will cause memory leaks. +""" +import torch + + +def scores_to_ranks(scores: torch.Tensor): + """Convert model output scores into ranks.""" + batch_size, num_rounds, num_options = scores.size() + scores = scores.view(-1, num_options) + + # sort in descending order - largest score gets highest rank + sorted_ranks, ranked_idx = scores.sort(1, descending=True) + + # i-th position in ranked_idx specifies which score shall take this + # position but we want i-th position to have rank of score at that + # position, do this conversion + ranks = ranked_idx.clone().fill_(0) + for i in range(ranked_idx.size(0)): + for j in range(num_options): + ranks[i][ranked_idx[i][j]] = j + # convert from 0-99 ranks to 1-100 ranks + ranks += 1 + ranks = ranks.view(batch_size, num_rounds, num_options) + return ranks + + +class SparseGTMetrics(object): + """ + A class to accumulate all metrics with sparse ground truth annotations. + These include Recall (@ 1, 5, 10), Mean Rank and Mean Reciprocal Rank. + """ + + def __init__(self): + self._rank_list = [] + + def observe( + self, predicted_scores: torch.Tensor, target_ranks: torch.Tensor + ): + predicted_scores = predicted_scores.detach() + + # shape: (batch_size, num_rounds, num_options) + predicted_ranks = scores_to_ranks(predicted_scores) + batch_size, num_rounds, num_options = predicted_ranks.size() + + # collapse batch dimension + predicted_ranks = predicted_ranks.view( + batch_size * num_rounds, num_options + ) + + # shape: (batch_size * num_rounds, ) + target_ranks = target_ranks.view(batch_size * num_rounds).long() + + # shape: (batch_size * num_rounds, ) + predicted_gt_ranks = predicted_ranks[ + torch.arange(batch_size * num_rounds), target_ranks + ] + self._rank_list.extend(list(predicted_gt_ranks.cpu().numpy())) + + def retrieve(self, reset: bool = True): + num_examples = len(self._rank_list) + if num_examples > 0: + # convert to numpy array for easy calculation. + __rank_list = torch.tensor(self._rank_list).float() + metrics = { + "r@1": torch.mean((__rank_list <= 1).float()).item(), + "r@5": torch.mean((__rank_list <= 5).float()).item(), + "r@10": torch.mean((__rank_list <= 10).float()).item(), + "mean": torch.mean(__rank_list).item(), + "mrr": torch.mean(__rank_list.reciprocal()).item(), + } + else: + metrics = {} + + if reset: + self.reset() + return metrics + + def reset(self): + self._rank_list = [] + + +class NDCG(object): + def __init__(self): + self._ndcg_numerator = 0.0 + self._ndcg_denominator = 0.0 + + def observe( + self, predicted_scores: torch.Tensor, target_relevance: torch.Tensor + ): + """ + Observe model output scores and target ground truth relevance and + accumulate NDCG metric. + + Parameters + ---------- + predicted_scores: torch.Tensor + A tensor of shape (batch_size, num_options), because dense + annotations are available for 1 randomly picked round out of 10. + target_relevance: torch.Tensor + A tensor of shape same as predicted scores, indicating ground truth + relevance of each answer option for a particular round. + """ + predicted_scores = predicted_scores.detach() + + # shape: (batch_size, 1, num_options) + predicted_scores = predicted_scores.unsqueeze(1) + predicted_ranks = scores_to_ranks(predicted_scores) + + # shape: (batch_size, num_options) + predicted_ranks = predicted_ranks.squeeze(1) + batch_size, num_options = predicted_ranks.size() + + k = torch.sum(target_relevance != 0, dim=-1) + + # shape: (batch_size, num_options) + _, rankings = torch.sort(predicted_ranks, dim=-1) + # Sort relevance in descending order so highest relevance gets top rnk. + _, best_rankings = torch.sort( + target_relevance, dim=-1, descending=True + ) + + # shape: (batch_size, ) + batch_ndcg = [] + for batch_index in range(batch_size): + + num_relevant = k[batch_index] + dcg = self._dcg( + rankings[batch_index][:num_relevant], + target_relevance[batch_index], + ) + best_dcg = self._dcg( + best_rankings[batch_index][:num_relevant], + target_relevance[batch_index], + ) + batch_ndcg.append(dcg / best_dcg) + + self._ndcg_denominator += batch_size + self._ndcg_numerator += sum(batch_ndcg) + + def _dcg(self, rankings: torch.Tensor, relevance: torch.Tensor): + sorted_relevance = relevance[rankings].cpu().float() + discounts = torch.log2(torch.arange(len(rankings)).float() + 2) + return torch.sum(sorted_relevance / discounts, dim=-1) + + def retrieve(self, reset: bool = True): + if self._ndcg_denominator > 0: + metrics = { + "ndcg": float(self._ndcg_numerator / self._ndcg_denominator) + } + else: + metrics = {} + + if reset: + self.reset() + return metrics + + def reset(self): + self._ndcg_numerator = 0.0 + self._ndcg_denominator = 0.0 \ No newline at end of file diff --git a/utils/optimizer.py b/utils/optimizer.py new file mode 100644 index 0000000..53937a2 --- /dev/null +++ b/utils/optimizer.py @@ -0,0 +1,35 @@ +""" Optimizer Factory w/ Custom Weight Decay +Hacked together by / Copyright 2020 Ross Wightman +""" +import re +import torch +from torch import optim as optim +from utils.dist import is_main_process +import glog as logger +# from transformers import create_optimizer +# from transformers import AdamW +# import math + + +def create_optimizer(config, model): + lr_scale = config.get('lr_layer_decay', 1) + weight_decay = config.get('weight_decay', 0.01) + + optim_params = model.get_optimizer_params(weight_decay, lr_scale) + + num_parameters = 0 + for p_group in optim_params: + for p in p_group['params']: + num_parameters += p.data.nelement() + logger.info('number of trainable parameters: {}'.format(num_parameters)) + + lr = config.get('lr', 1e-4) + betas = config.get('opt_betas', [0.9, 0.999]) + + optimizer = torch.optim.AdamW( + optim_params, + lr=float(lr), + betas=betas + ) + + return optimizer diff --git a/utils/scheduler.py b/utils/scheduler.py new file mode 100644 index 0000000..7df4d2a --- /dev/null +++ b/utils/scheduler.py @@ -0,0 +1,240 @@ +""" Scheduler Factory +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch.optim import Optimizer +import math +from torch.optim.lr_scheduler import LambdaLR, _LRScheduler +import math + + +# class LinearWarmupStepLRScheduler: +# def __init__( +# self, +# optimizer, +# max_epoch, +# min_lr, +# init_lr, +# decay_rate=1, +# warmup_start_lr=-1, +# warmup_steps=0, +# **kwargs +# ): +# self.optimizer = optimizer + +# self.max_epoch = max_epoch +# self.min_lr = min_lr + +# self.decay_rate = decay_rate + +# self.init_lr = init_lr +# self.warmup_steps = warmup_steps +# self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr + +# def step(self, cur_epoch, cur_step): +# if cur_epoch == 0: +# warmup_lr_schedule( +# step=cur_step, +# optimizer=self.optimizer, +# max_step=self.warmup_steps, +# init_lr=self.warmup_start_lr, +# max_lr=self.init_lr, +# ) +# else: +# step_lr_schedule( +# epoch=cur_epoch, +# optimizer=self.optimizer, +# init_lr=self.init_lr, +# min_lr=self.min_lr, +# decay_rate=self.decay_rate, +# ) + + +# class LinearWarmupCosineLRScheduler: +# def __init__( +# self, +# optimizer, +# max_epoch, +# min_lr, +# init_lr, +# warmup_steps=0, +# warmup_start_lr=-1, +# **kwargs +# ): +# self.optimizer = optimizer + +# self.max_epoch = max_epoch +# self.min_lr = min_lr + +# self.init_lr = init_lr +# self.warmup_steps = warmup_steps +# self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr + +# def step(self, cur_epoch, cur_step): +# # assuming the warmup iters less than one epoch +# if cur_epoch == 0: +# warmup_lr_schedule( +# step=cur_step, +# optimizer=self.optimizer, +# max_step=self.warmup_steps, +# init_lr=self.warmup_start_lr, +# max_lr=self.init_lr, +# ) +# else: +# cosine_lr_schedule( +# epoch=cur_epoch, +# optimizer=self.optimizer, +# max_epoch=self.max_epoch, +# init_lr=self.init_lr, +# min_lr=self.min_lr, +# ) + + +# class ConstantLRScheduler: +# def __init__(self, optimizer, init_lr, warmup_start_lr=-1, warmup_steps=0, **kwargs): +# self.optimizer = optimizer +# self.lr = init_lr +# self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr +# self.warmup_steps = warmup_steps + +# def step(self, cur_epoch, cur_step): +# if cur_epoch == 0: +# warmup_lr_schedule( +# step=cur_step, +# optimizer=self.optimizer, +# max_step=self.warmup_steps, +# init_lr=self.warmup_start_lr, +# max_lr=self.lr, +# ) +# else: +# for param_group in self.optimizer.param_groups: +# param_group["lr"] = self.lr + + +# schedulers = { +# 'constant_lr': ConstantLRScheduler, +# 'linear_warmup_cosine_lr': LinearWarmupCosineLRScheduler, +# 'linear_warmup_step_lr': LinearWarmupStepLRScheduler +# } + + +# def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr): +# """Decay the learning rate""" +# lr = (init_lr - min_lr) * 0.5 * ( +# 1.0 + math.cos(math.pi * epoch / max_epoch) +# ) + min_lr +# for param_group in optimizer.param_groups: +# param_group["lr"] = lr + + +# def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr): +# """Warmup the learning rate""" +# lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max(max_step, 1)) +# for param_group in optimizer.param_groups: +# param_group["lr"] = lr + + +# def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate): +# """Decay the learning rate""" +# lr = max(min_lr, init_lr * (decay_rate**epoch)) +# for param_group in optimizer.param_groups: +# param_group["lr"] = lr + + +# def create_scheduler(config, optimizer): +# scheduler_cls = schedulers[config.get('scheduler', 'constant_lr')] +# max_epoch = config.epochs +# min_lr = config.min_lr +# init_lr = config.lr +# warmup_start_lr = config.get('warmup_lr', -1) +# warmup_steps = config.get('warmup_steps', 0) + +# scheduler = scheduler_cls( +# optimizer=optimizer, +# max_epoch=max_epoch, +# min_lr=min_lr, +# init_lr=init_lr, +# decay_rate=None, +# warmup_start_lr=warmup_start_lr, +# warmup_steps=warmup_steps +# ) + +# return scheduler + + + +class WarmupLinearScheduleNonZero(_LRScheduler): + """ Linear warmup and then linear decay. + Linearly increases learning rate from 0 to max_lr over `warmup_steps` training steps. + Linearly decreases learning rate linearly to min_lr over remaining `t_total - warmup_steps` steps. + """ + def __init__(self, optimizer, warmup_steps, t_total, min_lr=1e-5, last_epoch=-1): + self.warmup_steps = warmup_steps + self.t_total = t_total + self.min_lr = min_lr + super(WarmupLinearScheduleNonZero, self).__init__(optimizer, last_epoch=last_epoch) + + def get_lr(self): + step = self.last_epoch + if step < self.warmup_steps: + lr_factor = float(step) / float(max(1, self.warmup_steps)) + else: + lr_factor = max(0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps))) + + return [base_lr * lr_factor if (base_lr * lr_factor) > self.min_lr else self.min_lr for base_lr in self.base_lrs] + + +def create_scheduler(config, optimizer): + lr_scheduler = None + if config['scheduler'] == 'cosine': + lr_scheduler = get_cosine_schedule_with_warmup( + optimizer, + num_warmup_steps=config['num_warmup_steps'], + num_training_steps=config['num_training_steps'], + num_cycles=0.5, + min_lr_multi=config['min_lr_multi'] + ) + elif config['scheduler'] == 'linear': + lr_scheduler = WarmupLinearScheduleNonZero( + optimizer, + config['num_warmup_steps'], + config['num_training_steps'], + min_lr = config['min_lr'] + ) + return lr_scheduler + + +def get_cosine_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, + num_cycles: float = 0.5, min_lr_multi: float = 0., last_epoch: int = -1 +): + """ + Modified from https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/optimization.py + + Create a schedule with a learning rate that decreases following the values of the cosine function between the + initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the + initial lr set in the optimizer. + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + num_cycles (`float`, *optional*, defaults to 0.5): + The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 + following a half-cosine). + min_lr_multi (`float`, *optional*, defaults to 0): + The minimum learning rate multiplier. Thus the minimum learning rate is base_lr * min_lr_multi. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step): + if current_step < num_warmup_steps: + return max(min_lr_multi, float(current_step) / float(max(1, num_warmup_steps))) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + return max(min_lr_multi, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) + + return LambdaLR(optimizer, lr_lambda, last_epoch)

#{;M+;2h{&sb>pl$a#7tNt50g5PTC*RNB;Q3=lK0REJFJG*;LX~|NYhfSM)!K z`shkj(PM}o&`<5@9Tr6Cp#pK#A&C~G(Zr5zzGp13gZ;ZjpXhffGL7~G`}YR>*Ex0F zY!$LWIB}Z&i$>JnV%r)wZ%$~(a5PH)BjEQo+cW>~TL0qxd%vI0|3`U6&-{N%{-xdMUhDf98+~2*M_=S1nIZq; z2hdvw%0C9thoId%O0SatZ23p7xZ>H?kdfEar_$+cMfw<&h_8^>Xbd(^cmjI-I(hQ> zynJFa8#&tJQ}~)avux&~nyF#yYx=!!_6k#+pO0ynfmxV?d02o&==>ue3EA;wwrZFf zmeQADIaXpdwmh2`){^VdRl{dZ7N7{*u@k$I{>{{|mn=A-&LQ&6mGx6UUs8rQsGn=p z&xPtI;(UPBsEKTR)Uo;N(LhfixtRY?+t9Rv|Bo-S1+DypZQ>)}ea||?(e@>|-a#Bj z1vAD(!vwVG2DT?Kh;26xuLN zTyaN!m_g1$DSZw(4<)zc>!N)5cEkeVMTnQn7pf4$PqRmcdih-9*is}%=Z9tFazyvp zvO&J4=ZBTTtC4nJ?J3umuN3(Y8=eaXUz!{a+{$O0KX1-(@)M!#rAI@#dBzGKL#yo5FKiz_F8t)szX=itt`}doEl=N@7aCUOg+}2dSv-7dh)bsqNwlDJ?9@<# zG`;fqsrtsL;VAt$s+*l(JCz@EsNoTXXUJ&Z<2mvIqJ5B;$SY_)rVT=> zSsRq#pY%?ndBkguUB^vyeOp@`^@V&%(SFOW*Yh9t|9{R8w;k^snjh|yk^k>O6#k-d z|D9eTb57W_)pIKPURLN$FI}kq&+QfZ(kt0G7096vK=nvAnD!y2eW=RQ-b?2$201na zxfp>_7=to>O?lLh?9h*F(QoMQV&rH018chV@6WTZQI7^BX0or@-i>G)&OS%WNcQ<; z_W5Y``9SvhF!p)3-;C5Zxy}Sk!W4YA{xF~3%AStaA5NpsKs%c{EiRRi7G{Yne$hHO zavnt8ST}Mp>==rJQ%tc^Bo6jOG^Ncl7`4xJ|}M zn~ebrjv3Q^mp%1{aYnF5MOrJ-blUjuyzyU$@gLfD8UN|G(tC{mw(0+k`D)h~|8Fq< zCmVzl#^y;hqF#T!OggKv7VDARp#K-Yk=|M>?Q++r7a>}^mljSPaxHSZxZ;f7VJEp8 zrN%OQ$^9r1-{V)jWBf0C7(IT)RmOfd9{L$OzQhI)-}w#wM06Z=NTLO49K~^*K(zKf z+JoBlHhXhj2&crILE*c`x61bgdV);i;rO5Z`GWoX<@(2$}P%_jmuH7+4urgh}IQ-zW(`p{lbIjI^XFRGXFQ@ zH1tId2Hj=?xgz$A2i&pY_Q`k9%TVTy1*rePLJ#g$+Vy>hg? zjZ|*G<~{u3G0$4w#>ykA_+fh1zq9o#*!-1b6?-^F#)YfNO;>$q@W$hzfSe({d02o& zSc0ar?0tqG``&emW2cxWIKv)TV-qjc*>vdyKl49l?+ zNpT4_`)YdY7HP9_*V5OceVep5y9U{|>+!HrTrodD0a=7n`gU?BO85>c7R%R>$HQ*n zy@(%soL$HMcfT>RjNM;;-g%Sx-%&MMy+B+Yj`siV_nU(_j8bgs_s~`;Nby4nH#w)- zx&Q2c<9}C2(r8DGdRBYqQ~#Sbq~W6apZI70TU;7-sBczgeywb%(@EhLM0vIfXr{G59s`X|G!{AbUbT4 zf^ZH7U=Vtqmp(1>4-65`Mb|GM4#9g{@>~5_l?b~^i?r^X?%_G{Q=)U`sKio z{*iv_QR9oD_M!XD1WdvdG~J25ab*63Y~8`8-|YD#jds*rwg2~q{l8>Ao4BJ$z_OdP>!DWtoZ_B-dV*OH!TQ+87qJu8&yYswSa z-IR8htcq!e_0Mtrb2YiwZ{ny!5-mvMD2}5^pWcj?_q4lcJFgu^`lNRG82`I5KE~^a<&l!Wo=H=^p#PE1UmIJIozEx}WD+e%<0y{f1Ww@$%J}iiQGrTSA%-}r zQKQ|c)n?R@_1cdH`e*Bp8nrD=Z2abl#@>06FUj~L!}vp65$yqP*A~?4AJ4hY1zf@v zB!_E<#TR~KSZGzhqrDB+#ot7`x}6r6LR+oxh|N~c(DgPOZL$42fAlRM2A?N60h2HV(VS^M zISn%~3(?+*67h5BpY7kCM^Bdx4gdB2?XvOqpQ8ems9J1Zdb&CB+2*OqYwo+?q;L91 zAFu279ewF6LeKi2PdwjaxuMy7b_>~h(VRG$5>Aut3+=zpv;RKZ{`-FR-;)i)%&l8L zkfb+ui)+peOQg3H%di|tzfG7MUrBE@U*2*rH>{?wMZ06^^R98)waE42ilwuWEI{dI z^Qq)^loKr={_LzSkE&r&P@4OY_*NERB|75lK`@Md%9|v(5 zrKmt0b!hrX{_nWQZuuwMZn!tsO`{z(!{q;}{EM$A8=RLQlfsQ;{YAQTT9C$3eD?i> z!f$C)hD%#M(_b=o>sY&Rda`SbcP;X>@>jgaS_I{1(~eGxSP%sc=8B zb@I6CVcz0oj~|DBzJ9ZM;JgHRfxLty{WbGVSLm(ERLc$J{;G0+kMHWVdO*MC*mV?2 zBmKHDfWGjiaA&ZF;LlHo+w_j9xuNS1))0K>>2O#0J{}6@}l+4!xb%7daS!K^THuG=HQFcBtF;)NwK;+(x!*3({omXzPDQ8hhs%gAZ4) zjei@AeH+iSQTp3=D2-7VgW@g57i0-piV5^7$VY{AE3ayQy0t&jjidT3dwr(9X1+cf z#oC1Q&$IYrj0-T$?}{#Ghpp(@U#RV_>Tev7t$*pKO(thJXBOt5>klE!BNt#%6n@SB z5ojknK2}GBJBQ|mrDSCPFN?xo)_%#`mhW5(%Y|2BHImx(1lH196A#Cx>**U2*`(>s z`VTVND?UkF0gA94JFy#ku^(j_)^(#|jdk8^?<#$9OrIPtH$Fg(zP^?XR(DVTen(I_UR@ans(ihbM<+G;T_!o83te0kkXPrQDq46)$&zOJvrg1#_A_oI7 z1i9!LZ%$Oc^L!6ZxabXbnB~|UL~G;Yh#gVa-m^ET#M&M43$O@FP4u{FYkNtNY+4WoZNybr!Bw7&7 z>7~h|IF1uYs^ii4`xL!3PyL(df8*%q(BAEQL~8`va6w$hpM6(}?40^YxI$jTb=<^l zq`&uAxKBPn=6vr8ebM!#`J2V+4~nny5vfzrcb{?`8-UV%Y{#<4LJ=QArFNhqrR_PO z4MO!AZ4k=X*5#;Rb4T;!Rp#AebJ_o>Mv-gY#USYoK`usM6vm+GhV}nlet=!hzK_$Zn;#G3 zTw?+zVG8mw4KpwcO?&{&`}hbB@jsx=nt~KRLwY9v18Ug#wQT)5K8kv>K{!#%|A0nB z`|sySXC4+{5t8mRF)$}Ap||#zw?2Fn^ul-8d~4)QIOUu+vOrzy*?+GtMgPB3I^A*o z_A=*}czzYLJ=gi3qq)gw{ZL%E+B2ylW6oXfyp>puwOEgh80483kY%n>j*7w3K@~mr zqW6QyKk>PLL7A$*!@ltT6W;4*>r4Kd{;6B)J=#&^8r!iGyRjG1{QiD2vgf03`bFyl z4hkPeDJl?09qMoO3rVsCX&gmzbH9-2=ogODTjv@#%*+fY=%>&#ZV-;gdv!8YRz`gK(6v*W@sW0PnP;Ah|ADVS|flj9xV9S}Od zZmjX>zY5X*yW8TrrW%VpH7MMr7k$HjzJ0A%xKDq8p80=i{TKc};rJF~kYWB)Wr6yJ zzR1A<48jl;Hf4t<&$@YoI!?CsQKz-#J>O@ew`X`i+JRbaL>*c0yoOi2pT*wKeD>e$ zjF2mx5g3Ir7>5bydX`P`>zokH{dau*ao?A;M(FLFFv+oksNdY6-eZbzKGMI@|4z4G zm!2S#v-vXUMPHTQH?oY~+@Ch0t=s*P=??dI)^|?#JQ@yWKN=2P>=)k7do287{9|Fi zy+HeJJQ04L_eA*F+5TZ~?ZB`n`^oUu)q&y7#Ne=d{8M4qlU+fp&T+`2YDD}aP z?>*MXUc{{M){e)_Z}qW<-Tprmz57pLt8=3NV%@aPH&};f1vYdzbj-W(yW)&Lhr{RD z^8>rXFB9G2S3cTVn(>Lc|4As@bI_v1PT>sB;Q}t<3a+7SM^-4`lNBmZS&|j1QduG9*~ITb z#@~Mq*PVA0w{aKu@c^0shxT_}w*4R3_J3rDR@_kvM7IicEs?cDq ze*#H*BZ`M*h4lM>4!xz-7da?hm=#JUW`zOt%Ej&tgXlw0eZU?Y#88D?arOM^BgipG zYH!Ao6Od?r*!Hx}kNn@=Ve7B|9MWuxcD6v9I+7yGF3SH*eY^Rx$`!^O{Eo3b#vsNR z)y>BDJIw#6F9qt$Bxz1TKBi#?nw;D0+!nGmW=xYXwn-V|knOHpV+>P!mVW~EjyH(^ zY<)nZ@GNP}LGfwh9J1t2udskzgwkQX!V+>RD)}-hvfbM%_t?k1k@0oxtuH+imO0+} zf$v?C9bfV-swix)$ZB#e)}!lNzRyDzpePEzYwrkIHoKdDr8`ud?hcixy47u7tUJU} zjZx8W`YX4CmD|(*6m|;lM&S?S5lw3zX#XB){~mb%54`^e-v0yk|AWwe=Rv61)opy< z9qLezhWQVa(FfYU2im^}_Jntbz0xbH(m%2x_tTTBxqwy+ijcu6UUHCF`PdCU%D;*@BWh$NS3{(!xg(Kl>n5yT_^w`9#^te+uRDRe{Rk z;t@j})i~}qCvXa9a1Ix630KgxL;l@Q%O3fU$$!GVHM_@h`A1F119kKP|FgW_dtiNv z{JV#wduViC{lb3=*QAqFpRSWPBmIzb{Ps4zb)>X$mwq4Z{iQwDHPANxLC?Ch;$i>f z&ln!Qqf*y-;JnOl$^VT9q2k2{p%R7P)y|kNj=v&b^W}4meA4?muk+alA&2bvQTN0B ze~sfQadkQUU30&FHv_XU2lKE1 zi_m;e8N8+L%~l7=)brAncJ%F$G`+TuI;fu4tA}+N?(4idc-q<^;b?w7^2aTaPUj!( zUm=&_FYSlr!Yi>F>#-39=z7N3i7ftzEkS;^|8FO~$3EHT8{_n7&tir4U@v_?di-zl z)sxR%zELRdFiKH@IO>o@mvLm_J6Yxk|0%QxcNThw; zC+@K==&dEjCeFD;zk>Dy(m+alTg>?1oNIYMc545J^R89nBmX!1dbIv`q;J{zO~HHi z{~dY6pHT3(3tvZ%|L@P9{SEf@9eulgzm;qgPR)JTw@34`HH+Ej!`axA+2_)27|gat z@*?~Eto;M~eD_3pw{aKuQCe&NK(qY=^vEY4eg7wOp>Y_h{Vw|UKumn}ErPz{>fKik zIRHsMhe6~JM1F{#{eLf6<1ah{J^KQL+cub|SY!Twto%=OjXe2xe?9B+r@IEC{ePqU zW(>w*0w!S!@-YofAN@D-zl)DxkM)l`KHdN4UhCXzJ=xGL|723Qk&M>cIbvy1lsEfBv5)xz<~58qK0`Coj0bh@(aoSywX^riHouWDDnYiu&w_-~|l zp6A^UR|XJ$e{`91mZN9?UwABZ>OZ3lY3E0-%lPychbwB!s#{ba&$9_E=rG*N_QHLa2@c-BU z*R%iUtb4=f`+tnxd**+}dQPr!6en;BXK)S|a0yMGV>AB6_}X~8{i^Z5aenP$b#&?5)6e++p#6V0g*)HypWI~R|GOK7 zA5*5ZpZDooU+{f_uUo4?Prm0G7uhb0)PHrq<+M2mdSA4s+`rVsP*B4+2^RD#}F@PpQGkH`}zj^`W{>Q4*Pl!`zv4uD1 zgel^p>lS_JJH>6?`;+}IejXNJ z5lTn9{&4m`z0$o@q}c!TWvK4>^!lz{#lNv!+)Av*TCB%L6rc#(u@k$o7d`XgXT9%> z_PYp|J%}%#3hhKBceSib<#n#d9-$55`7BVhpB(~Y(Gc7xQ=hw>q2%; zRZhuSn1gv(fJNx!ms~f?zgeZ z|2IDP7yIj6qsg&m)(7CT@Ajt;@B!@c8^?FL-fry0enflo5@&s{hu%8;;o5@3 z^is5s)+UNek!=&*m$+hmew?g}&Y>sC7L@FAe8{yF zq6)EX+W%V5703PN1Ww@$&fx+s;R>3JyPNr4TgX=am^LycoF?1V@tQ;0|Csi_MEl>Y z{r9f^`TN@;T$4tDb}w4WZ{jv0KVo53X1Gh=`l!19x^IEgOQW(+W`0}wo2hLl z`yvMekQA4|AbRU)+Fh35aF{(p|}JVgEf0&)@ld;NczwxC=aQ!!CHF;ly+ zP`iLQz1sM_=l>D=XfN5HOQf?D%MpEBvS{c~<4@23mgj%R^WWk5@A3RMd;VxodH#JO z8^GA#dG#Hie*d3LUVZre|CQ2OjkT!PUacn+7x~YO2R70RP=pqFiS{zL>AzExJvYy_ z9i=<$zj1sgO12pLkkOp{UUEO;tCS;Dtx&FBjOwRx1`axQ7^UbaG{z%4|HjxZ3VY^t zQJ9U9{%}}G(p%8Qhg|q)YYYB3G^C^O*B%K+$>TVIQ~0bLouNm2qKm$2J;HB>h6}=% za0STtI>Lb`Ylg~9O(?eAPhlr zn{$v$kJ4=Ecsh)rk3#!<(iWEz*XBB7#1$WTI*cPHp!DR^VG=n7CG?6C`HDRq@`a}% zPOsh}pXKuDSeg0za#Y+i|Botq4Dmeg&Aa}~^}mJR$qX~3F$-JySBl>D{kzk~f9KV) z4&!V5i}k;=)&Ke4=VJAr?AiZ)%lHpH>wnF`&yn6dEWjcpU$I8uMRUIN)?L!xW-L!% zhMx7m!l})!MJ|uxy^~^V0ansWMq4XEu0^S}0_(|*sC17NN0h<$ltr?7gEo#VaI6U1 zu@k$o7yEG#hf#_O#8HP9q$7+OIzBK*ZO*^*!>2>E?>_QvZbmdOa@0AW?f*MYKY>#? zgRUCgqAxuM1270lanUz` zhR}QFA4mGmFnt8tjakz8Z2qxqA{%70aSHxh>whNMq%#RqkdJAYfm!JKZEny0 zhmLP6Bf_25p3fr}U=fxeTK~J0T!!UXiO<&muBKOxl{dbXwe|Zu-g6$i9C%^H$xQo`VOJ_IsVn3o|i95Z+L3-;*X`{$EY%3pOJHJGF zxNDGY?)|WHA|FC2S%K0+?wzbd2|vPT-xWv-w;=u!yMBdyu98o($NtZA-ZuG1m9%>P z|A=0VwBH=Xah$*@oWVI=3+E4oSrgNfi_>|If=tW-m*0=P} z$0GY!|E%uBhUuTlYT=HzzYsc~;v;yH{w&|YQ@+XRcY`nloxk<%53bUov| z4@QPD^ypjMvAGX@ebp~1j~VJ?A7yr->&-W&__$vf=e!A+g!Cu%sJ z7y10qbb~*hUGdrX#-jg1O`K&H%3mW{!(Q(3#|zgBN8k6FA)V-dIA)QB-}Fs=avm1o z&_|i!oq_x`)*Bp_-!4AXMdFuWDVAY5wmds3MBgGP2QJr&Sqnm0_+vH zZ^hH$=f|E7zj*Jj!~S(c!htnI!rRCGI{d>u<7ee)*DHg<8@2yAyt(2FVfV=|ggvMK zD!lc|0N628V)*k z7(d-IFznoC?#o`X*Y^wyZ_a%(?4JH)D0Qp?KYP(PFxNa9_V|;&x9$xz_hmjY)-S{z zt3whkNaO8W{>O1ezi^aZoRJj{Z&7CVWrbhv$_l?4{9GtCk5u00xllIxId#)MtPK0E zGQJ$DhD{CCqrYs9=F8faXU%^-s~&yH{MWPmyY|$b{!%zD%@a6)+aK$;-@$mmgd~=F^8+UOZP3G~N=UO9x zRmP>kkJx`uhk7&|;~zQ6KSDOHvnJx~(tYL>9=rlcn3^-te4K~Ek)0OV(uIksSey=vjpbYY$3o_`dh}zO%k>^<59yuugvSoj1)<|EY&sT$>%M{u^ly z-Y{d-!dr;u#IUfK%#zE=a%E1=`o#Qai^59oPW5w~hui(oe`r4!<`LXg_xWuN)}nIG zu&|!2B9jY-g^l!zpR2FmD+;OW%9B&-yerBZ&sI3!$MaTurkZKW8_)Hx=a2YZ9Uc29 zHw$AcN>PTNu){0J-PnsZ&(fauY*pf~jo-CN{jY76CwsUXJY%D0ZaN|UXlWFG|DTgK ztu^cgVf2*`4+qKKUz>M99>sAaQT{1&-^dJ_kVF43eQ#tQ&G+pwNEXH)-YsGm6oohj znibzO{5!XKwoUeCqn}654DUvKyO%sEiz=rL4omr;#zkC4a-TJ&69$K?^xAI!?<|;W z^c$#uX#DlQ@z(>!U$I>|vjb)9?B+f8GIvd!I~UIox*Grv`^HLsW;VQs8gR6+9$Cd*N4z>{loc(WIP9;<)*#^G!jnz85i1=Mn3$F67xnaZbg4za2zUUO!A8MHM|YMZWUQ9p^6p zxpss65ck!u8!aCq|F_C@zdb$AGpTjU+w!vmCmO8+`J zfI+K#pXmSE$HP!^IGSJ8M)~tuVFY~)ijm`&#dvz>`|_WBKaoBOJ=}TjZnEo~e#N-| z&yURQvHmaa|MP4<(>Z(fyM8LBBmds#!%VUS{Vy3SFfKEjUWk9utT30}yg?g=%#QYc zhgb_jUyP;bNccX`g>K{#=OKB!tpBHHk=ycGXmw3e zx>n9830s9V3dz??LMd5>szsh_p65aZ_ikk9^{7J{h4HUNz7I6-doApBO}72Dkel;b z*iWzc0=uC|dyu&XX8FYKa2Wxp3QTnH+a@WNjT5%A}-@9uHgo5;SQelR}T*19(P_lw;Ye?1Nfcs zFL|zRn*1jZX2^Q(LI0a?9z!u4BQOeMP>i;l+S};ZrG1MoZQpL>={;yzqW^7$yh=9T zH1_^bdmK4>YcIbQ@`-RNKBJ!giT(KLh5fs_KNcp@JB8J;&m3d=RK&Bx^5{m_4*ULx zKMa+l%{B1bOeBR}Le54NJ#|69;uZV?}uz2>-!FplCllBian*C6$Z`Ui1LIqof682{&9 ziMVj`?LLjZ?knDvwz9#743ryAKJg{_0z&RgR?k~ zT)S%;&6%fnYD;wBBK?8ywP!S&$a59!f6p27edTrK=Ih#|NcxYfuDyn;`|8z8;)RO8 zP!4>}xSG1Qeua2$6Hj`zFbew*zWI^%#TN1BuE!1G+`=8)!vj3R00w*Rm3 zbDR86c5~;+!uZo@_CK0-h(B78MGmcqX9qqfoS_(w5y(As&0Tx`&^xb)KgQ6D(Zkly zqk9SaALIGOGn6KhlMwe#o=i?fJVPnYov6ih?wQCW#0#FrQqA#aN2v_?`T}Q=U8`Z|;ys(Ubj;^Z)jB`X8#~pS1FOiS@I>%r+_?u!>%8 z9AXXGA>7Ub!rvqODZ)n&V%}ft_nlvVGpr{!BKg=j1UA!aFDV1g87qkE-;EE~Z&S7* zt}(7VVLhX3N>PRi^f&06_^~-X^u6eNOWy`tq_@pF9Q(2D=XVf?aTLdqM7ebY`7aL( zh5oT=;xo$_E&mLfP&ohZuk-`Z^EipqID@k|kBhjBtGI@K_I`QYFzbJOhunSI2e-&O zxQ7RbbG{#u0~oYgS&g{2aNJ{VD7`TLIL?{?`Uv#g*EU4=1mEhb>i<{GFP~h_sifDG!f z&^?Q>6w9#^tFQ)Z(I%bR_pryv&Lix9dUuRdmi@ma+JEAYCN!TC{}a|0Acs~|J{%s_ z3uhxXV=Iy`ntw2QcqpaEH5nkDeT>x~>-W9bk8)#3c~pGC zxiJ4?t^vQJIF2Mz$e?-``*)xGe?b0k*Dk-HU4DxFi~9TQR@davfqdjG`)ZP>aRzPj zQM-K9fx`J4WcT~>=Uw>|4SU$XbJYI})b(V`3iUsk<8CG6{FSr9Igg9DjNBaUXYGQk z^v)f^p685P`VI7O=MVXheNWmNxA;{qv*wVzhh)NdDftLh%7)Z1@jCUfFu*-%jW}OX z25{Gr=?foozK;I=+r~lUm;N^U7r3UEy;lA`V;VoTuQ7N2mqlR&+5d?UMiqY7??U!{ z)w&LK{yTqd9x``xx3&zKO|$Q-wE5|iFc}@Ms~^xMZ+0*6Zpfa8`v3hl9Wzma*_ey@ z=>NIr_*zlu{i*MeyYD6YT#`$%94k>dMIQ)R{%P}o$+bwnV*T&9L18`p={oO?^sOkY z`+46u&PuT}g<_g3)1Dm-^QSrx5<_EN$)Bk-* z9o4J;N7p`e(-C#l0rTH>ng2FV{fFNTr^quKAs(OXg2Utk#f zonI%q_;usy{=Q|N<)C+4LGH$0?2mppjOx4QFR%46Zwl zBvQz&Q2&n82B3G&5jQkNfAn;V8@dtu`aAeluH#3ZL~^wD40#4stDp28)F$jx$bBA} zhtd^wNTb@lHI2sR=9z!so2Xmi-H;jINIiMcJs+JPaGComt|9-WL}=S#{Ckh_@14fK z53&DuDcif1@mco2Hfh5w_J6PWKWIh^vNw%?voBlGIavO>#MZYC@SV@fv%};seoyE3 z#4~el`L8=D_xn9EcU}HeHawsg_RW0VdK&rwGW7a+^4B8y3xoc5=_4;ZM-D~um~jPi z1mgUlni_jB9JBV(^QJs^`ieD>sK2YN_E zz1@W#dOYi_@qjdtCd~_^0X@4y8laWieqUj#Fs5TBN-!I9F&_)@QTu5z_j0VnJO5Lf zy){<5peW@3=_Tj)JQr5+UxT$+kB!)jttdqqDzF=IAH==nejLQ%=w~eAD0v*o=oV&* zjOzn4WZeI+DY}*Gx#<4tb7A|7AJ)gOh~r~v!2jv|l05w+%D*<&_o4hbByDy{qeapN zJ*b>g9G1G~G|u2GlFRJ>zo%*-4!5}PAkW>yziXs1eEtpB%@4S3egN5QK0phZy=i{HX!8Tm z_KNuh<_&br5)Rq*qWi|010akZVciqv1KjZKJt7A%=&!YdFcj71CDfoW51{+^&KEfH za;RKT9EQ7Y1V&*DlKiSrOs^I9)a%7zJbfbS`DeJ({OhFAN9*57exHn~C~uL*==nt= z%;c^xSK}RPy2`uk1W&)z6#%&HzphyT^tU3w(l#$Qhv{%3B8{h5q@x2 z{r;&@;UC7m9FCela^%pTgde>6C(ea4j@JE0;n0jf3g2t|qww8be-IAV{6RQ??`)bJ zzCGhlt$+K|u&?n?!{1N)U&FW36GPuOUk>Mu3-tb#J^AIaod4KMetzoO!Bi+%_9 z@Boi6fI)Bi{xKBWXDJ_sXdk|Bjy!keL*tm*Wyhv`Ed1~T`ETBa(3p5DG!1zxG>_XB zS`yz3*^%E2(}k172>0Yaqn-0S`}tkxD{_xPF~(yejww&A4GopsLU=EpVK>s6>HE)z ze@uKLRFAXY%h&b8d_7cO*Z#Px{ejw7wLhk5f6UVU zAnUm&34b!CVmfA`1hX+0ZI87-_Na$f;_Jqfl_R_9d(pN{{0EEwF!3k5#)>}~=LqzW4M()UHmNsU z*G#r7(zaP4{!7Gv0l&LN_UM!!aTrID+h&a(j?+6Et?gT8y(K+`oG$9Lau3{E?s_I-JjOe+)W z@O1x=GwwNy^SFr1xQc7Ifx`TMHh%1TEbRYrB zdEZ`}e1(KEihGwjJ7UWn}+<*Vmw3(D&=2(EEwF??+MS zH!jfssUrJUJs*nQKOVj1i7=7u`%zsNh~m zF2+(++h?{0sden@)ofPwe45OlehK?`C)@T_X)rPr=D#j?Pm*8N0rT(awY}_LbMRNu z*P#BUd-yN~U^$NYbD{Wo)OMShBY2|S(uU(Jp# z?0+!N^UV?F3SqzQKTyAk4ep*YRA4vuVm}VzFxrkuA9M_pzJsOjSm~RPz9_8!9wvR? zm%f*z?*-}GD1DKuk-qM$JTW{R70z)ak&2REm1{HfTKA{o{q!8-GsUwR(+SV&w+?=J zoWyAq+9T6^Ps;r>+-Gqf<-VCbD*jA6(QaD-HEP;zK?U>1$wa2Z0}B1qHR=a-=d<(akJ&%xruqdP@2g)jwheq8p^&6hfL#s}!|FZ1!ZuK8o$DO`nKEL)# z{R#a5!s$STKEimWSe||or*Q^t>x}OY*2YTcA5b@Ush4BjoPWjmJ{orDUqF+3wt2Pw z1@&+C4dEir4``moHW$u$T*PG*?EeYcT=b{*|26sz#5P4<8g(~5vHx%JtK49&19=b8 z{(nF|Le(4pYX1*#5Bi$4dCdOr{m}luul+wk`+u7EtX*D9*1hVz?)%sEKhL>mD28JM zMxm@*pE3{$Hp4ze)RFJ1kH3aR0mOf4bSeSvGKuJvoI}jPWSnt`GYq z=WEa_{^wi9kK#CzNFjqJ1Mowx=|-A1KwvDGi-frTVl=l~1M0r&8rpX=qqo8XDJ? zhNdm0p?O1TXjxXOZ@o0+T-TZ{4b@9ZL(Lnd=5UmTTGSzp4C*o7{~Q#zi4pp9n1sog zifUojAcb1g?R(qz|F-`3xAnij9j3cxCQ2|Hb1@$au^4ST-`4(rTl@cQ{qJw1|)>?7Gd{x6=v6w^27Z;pD^ihqsQW`0|dd0o1o zkWQ|xrq?WT9ldrNf9ai;-kFEq?_K4-JoV1!)Cuk_Lj`tYFZSbi_Mc%V{G0hd2iX5d z*#Bhn0`~tB-{uPS6B+lPX}+>892CxB9K~_uZf^@&BY^bai3KA>k%*#B~$_U|G6 z1FvgS^Q%0xE#$}!Bx|;XJb4mT^wdc0im}eW(V+GE4@hpQ`el-JXj~bH}yGv7U|R?@n~xBuvIsOvg-=plz7(-xu|};o17? z3+}t3ekU801C40fBCR(_|7Fr2xf#-b8oyWV*(02}n2&{cI)7*}J+|39u$;aUzrTL} zqvv18J;7JGrvI03hyE_#FTMAZ%7vfepG(;jZ-=$f?~A2jJ=yo^(r`l>E~VG96;mk8 zYa{E`i}9?O3T5UypHWWlwBL^Fs>sdcRwU{1{GL*J87iynu_lo{+q-JOz;Mw`RD?9`H^ssx5;y98>A%iC5&~{V(kB*1xe{|8ikzb+y zSHC}N-z3<&W3_L@Ez35JXC1a)GXJkp`S=n0qeGa<8uR~-34>nySla|A>E-YFCf?UJ zIiZb0)?IMTPrO6%IPLc{$kk|1kmnKiAuOy5QT`Y9|9akcL67Uk;@Z1zJd6J_@n@UY zX*0xm37M??Ut|2o7|=ywUB*>h!wuZR9sGy$k1qNCj`{x3j20C37~kjnLnV9so^T%E z5eATCn^zeF8uWjP|7v+d{(X)<6!r2;hF^MU*Wpl*o*x* zh{MQ#V_0ZgC;tzYN0kH5>_6^2cMsWc$@nLlT-Ur>`fro|WDc!M?Eh~rR_|Z3`CDQi z;K!fL{~N-7Q~n+GAIFhG22Hr5KFE>%WdB#?&#y>7`}G^SFr1xQc72Ucx?J#s)pcKJI37rrDkc*q{&D$M@L} z``EW=LNlJ(w~g#uwDD^{!cKKhw8OiW8GFam{rQ@Q3=cQ_&n?`+J>;gbkq7Hrpm%O^ zFCNhc(6dC}1G-l~nQK4j>*}8c&M72^BDrCB7*38r)$5*niswf8d)kV~yy&@6H%>fI zT_t|vlp<@}`Cs9GLi`Z-8rXjONk49xv>GMM!utCb>uKr5XnWuLN8Dqj^W2B_oixrL z^8OEa|2w6RcmHhvH}5^mU3mYlYkqZj7%!ZOn1sp5y)ORZIF;V14CvsVPM?V$?z}wQ zO?EY!SIn>SP3y)t(g-F@vy@?Y2Av!nHA)g95c zCNrqdYPY**C01b#)?z(2Vl&#bdH(#L%+TF{1Kfc3e_5DsAAlYsHf0OwT?x_ebDIcZ5tNwq2|DUCOK;{wq z@~iiWA5y4Aow%ftQIFSStuWSOBR1psICbW#-qDF5&q%C;P8X@hlC?Id~8_we)sdnuI%C1J}ewOwjq3X&W7-vIh(?P#tmWr zu}$`H+!X%)`j)WogDqkAijCp#=4>>MCVjWR6!t1>zqNXJc(;9H*h4Ryu{Hb+4*LBt zDmH8m?;P43%7<(XJJ)RvhxNaFU%hhFZ^w~DuXU3*W_ewhavjrv0Z-vu9MO-Z?7NCO)aG9u?9XMufWTh)~}?Ib;%_4ac_0YwoQ) zRveDk7?*7v6W+W3a`?yVe-e`P@}IELzxYyUa!n2$$m1kVqtUvshD(z}voXe|o0CHJ z{>0Fd_;e`$`q*&B?}hcfY`EBm72|)F-+8>#X6*9wW5Z?oRb0aj+`=8)!vj3R00wQ8 zU(vVSdStTqhvwXo{pJpiAV*;giZLD&F$t3q&-a^3_UZqh9^LwAXGV9O@gMDg*f$=Z zum2@u$NXm_-}bUIfaU+2`d_%?8vlx4z7!VHbJ9MmJ+qh|*Bo|WDSbJ5+I^dsp7cR> z-S_>yGB#ZF{B!-b6049rG&WT28ynWpYpn-NVJ&?<>bDsGHm;g}u{hLCv(}tn^SYPA zW^yZXt6vVKWEryjJB+V(qJn!j@)L~P8`JNaVjLgU$BfS-g<8}hjST9s*FF1j5QlLT z$B{$|ZE5oZvi`@|eka+*-Mz&8fJOdif$>dYG@=R3XsI&4pvL?HWB;uOjQ_u4-+XP; zIH%Fa5;8IDalSMDP4wE;#UX{l`oG_(JJ*T-8{)r7{E>4_^P%Fxj3DGoJpjs@a>)ol~@AMnVI$h`0Uop?Fi z@r?KI0FN+$LI0P$f}t3W5r{T^<)X1+6uoN8*pNIhD2$;OV?3U%&%dkweXLBlum1Jz zPx4#uuhnm4-&={oSsXhy$uHZ??crD0f8f2MFr7XVC76x5=zrJP<2>zuq*kckm1A{V zgpW+O@Uz+;KO7w9yLKTKBi~gNmXgb{5^b+afA6uwd+j7+pI-L`>3>D~e<1xo(Eh)m z{eQ(6B-z3p>*m~8=}-2S8~^{>%c0NQf!;6M-(Q$(uommF5u33UrMMZ}{l9je!WVtF z+|37+Q{-;!#eU@4{YRs9SoEj+FBi^d)R)n-&$Ef|)BTqxx~6i`;Bb^Yh-4i1jS^Lx z^xN+-ervop#hr<8*Q<}~$Ta(Z`z~_>QFciEul{Uu&C~IN9K8eiC>N-IwF!QI{a^L# z>fZ%V>iB=N|G?_!!b$g@#u=Q&d0fP0Tt(Y5_CGq_6h6APsNa!a{iJ=-a7zFG2m1fX z=1bZq?`xkR=h{|OUa{wzaBkoh?jU)c{eRzHYxJk<{~pjE;oq$P)5pFe+H1;>0lyFW zhBo8(q;cG%f?n|>{dd|QL+QgY0;4bn#VD-*+oS*gi2mIJ+U3gUG&1sR{R#R1hvr$k zZXzaOGNxiWW}I>ZO?O4xj?5#@^=?pxG@@yq{Euh-PxRL4H`4hr!kLY^ zn2+2H-_I2L_|QA&s6Vinz7#!2w2{$`F6?~E8YXqka=)!aa-05qat*3B$yW*a3TwI7 zBXdcP)aXD1$JXE_TwN9<0y_Ji4^+2BJIdn z|2C1aFEB@TAdizcjWall>Nn+oq_%iY)X~#;n*Z~4&xiA_yNJtp)<0RJf0Daxk@R0C z{oj!P(KTBB$20r?-^>3SQtaEsp^ z+(VrE^MHI5^>5fClN_{3d5SpycqsXF{_$}72#i8}hn*|DGqPKIAWz2m$D7?>wat3z zO~b+s`WV+1V>~L?Sr2}|dhjFGgOl~In4iyo5>ni?d(1hwZl6BxshEzL$nneGA0A5R zoyXYkm)NKDx#+24zq7Nu8`MrxR$J&#r`HU-1TI^KK!u#GvECSu^8pX zVwRF^o~a!jGxRT@i{6d=W8?hytv^`7{uWM?@Zwp6EyB-^GyYC*9c}!*>V>deI2E6< zuih8!pGi;htIEC*R?%yFt)V~kLRdpzi~8%D2^kE zw)?(6`MU$1=t4Z(CGPRqgJ8vdHrxj#0LvUpaF_IJa;I<;Fknkq__)`Ogjx1LUC1 z@)-KQpnXsFe#Y7WvQnNJL5{*06eFojjb-0>dR+S)*FaCCPeT0#>4&sy>d@!A>kaw< zKEeLS_QxOAC))j!T|X5wQG(g%cQ(UZa(>jUKZtV<7Sapr|DPWm7SnV5Vjt5|dgpCp z!&pvVi5}^aSKfB(2k0UT^$+{9aPDfidYzqGIA4dXAN}z#-94+Y25Ye%8?hN%(ZSyp!YM-qb|c9zo@=+4 z9?u*~VL$yK>JPC0YuNw#0qV$)mJf&heiX-1{w3eoUm1_1r%>?=_3#&cSM(<2(1ASq zii*NXvbSY$I8C0xS)9j3T*g&g!wvKsXSzk+LGx{U^NJXw`C z-_IPqiS$Xxf5sYpeGIjT-)wrUZ|m*r2Rvydr;- z&9~+6`||f=`I~IzZd1k-<|nzfQ~6Rjf0)c8oDn{z;ant!*_ zOHn^s++XwzWZgLJmp8p16y|68eK+=EKjJ##@~`Q;{7xbqp20 zrzep@dAT#s$tL8`Z)_m{waeLlF&hR&U?)_{hI!L>+9(~ufJye&H80D3A-6BcdcJW&ieM&+uDx%oM|qc3gziL zKQiZ%o;+7#|L>A8=$o9yjT63_BaOLx?t@chy%-Rt>pmV|0?tw9R4*ZJ{} zzh#75kHYwWc?fHTwH_O>8Cy|`wr$csA^p*bE_B;3AU~zVJjW8{uk_y}{ax2gwk(tW zbELm_+e%jNl)nzhU#P9}O>0A@FZjmK$=@jR9~Ib*o=@LM>?QZ(AP%E#iTt%pzFsGP zt(L#G$>(IAyN7JZvVVoyw1fSNmJRYJa%e?d|94b4$B{%l2Pn6M{V~n_KHoy8HhISe z&*!%c;+o=kw#L)_Ynu4Q^?y0C14(%^Po6|oyiYi_v-II}pF!rXaszd)OQX6*{E=#R z9qQ<5WM+6**sh#>TK=AO|9Rw{`*WGRing1|Uvy}5b?#KAACb3qc-QF3YJbgRe=T8) ztWf@vzrX)u{~tQzO*n=14?i0mZqUoWtbQT;e_RsokiFzRvhUCAQJ@TY5dE|p9+A0i zgTeqgXp8TClW|(Ic{MwL9F9Ui#2cRLis!oEIWKum<^Bl2jY6gQ-51?IhF*;ENSfDO zr7oCAk7u!`hM8wfpN#sfx=DQ!&uXtDr}CSQnJB?W$2Vqk=f7r5mn@8b8_$_fUx>w6 zit2Xr|B%vtsMU6;8=`L4ZpchA|L>KLhjQ(+<*r+aRak?ySdWcpd*ApIIxew)&uP;< z)V4wXkoF%MR%m~W)BZs7EbR|rW|2#1f1r7%GY*8a6{RRcP8{PJ{R(>Lb!G1%{qOX> z=(%nF0=jRSzi{8Y27U)|7)Nm&Nu<#K_eCK?_Wn%$OZNRzf5|KAi)han@5<}1is$rp z_^nX>u|M+k3fnM`$SU42u{<_Ansil_b@2*|8FwafM2un{XBUQabL;H?MuF3A5^l^0H3u7pTqw>DCG+8yp_!~yi z$DkN-4npln&!^3iZv3$RC!fx{Dy&C$UB$21&0kSQeDe7)iF=K2dono{(=ihzn2ovk zkIKLKu33o1Sc>IXiB(vGwoUT?Ht9@uDl59=vA6~?|GxZxLjKSGUir5~-RylloqrY2 zStz$RLEOuKt?g zj5m4*dPcLYjVpJ%rVDv~m6z<{N1jG<->`6oJc}y#rG!)4?Tk0>i^#BP>)Ez-u1lku zUh@X~7qzIvv-9ofm)&y}*Kh;3a0mDB0BsBGKd{I=e{`a2gZTx>qX!KqesBLh?ksX> zMf1%U!Xx42w*Um987FqEt z^Xq?V?8mhuFbd^gH%3bqqxa{=`^mn)knUvVn=gb(_RxK?!{} z>gjQBhBybK4s-c6Pci0CE=0~Zx0qatthQlX6C2NfS*Ji zu*z?15ZBMICD$X)k1wpBSD$R;j&sQ4+@ClHs7_gt#y=c;J{;XLG#q(zX!yaoPlWF` zP7jASO%I38eJ*^jcWU_Vx~bvd>Z!*1UJKuOqa=KLLrK^_?Z1V68)k;T-!e07KcGB3 zqC7li{NI1{|GD<^H(m&(^uCvzdnYZ+=)FJp{r;8mQhquvy^mcP9)8%aFQ-Nyf#1u2 zR1|W@=)W=_|6huf>-O;A-@QSZq^#*-r{w8-`Q?;dv9F**SyXn#`oqM3vo?aw`Xgoj zi?4-uM!#nL;eQJ|SIi8%wVVEK!ha6?-FFas+oy$Zjr=d+-D&?h?AgKp*nbJ%zBDA{ ze>^%Ij@L8}v$y68;X7OYQ#kO(e+q{OzYxB!J$}@0$B~5YiSPsMq!hjPFZ3soaczGS znL`KiIElvB|Hv5DABE?zUZnLpwT-IRDK!L07xAVaT&#d(d!w zaA>@3{TR+dTIs@z^3BXc#G8{4y(~Q%tuxo{65S8X9WJ>h7VTCOtF^ z^`FBr0;4bn#Tbu?D4egL-IID%`)8i^&jRfq?LQOOvY49$4r!9Hs+#jiS-%g zNOX{$8?;HZ9lGzTf6#-5H2Z&#_J?*(^AYnO=~>t1v>jTtBPy$kerNvUd|@W97lo=* zMPVVmcAhxBsb7h{9Q9do=9fmDaifZ_m|yV;=hXSFd8fXAGM*tG&kkQhUyJql|7su9 zEm4lF(B61Mxq@o-dkw$Be7r^4A9eQ8b^m5;Mcn_slq^F9y5!@S$79}(XMX*De_+c# z?F~GgZ`439|v(5ao@nBWS=@A|JN^tfEP=@~rj|Eu_v^!xGHaMo|FPR4#H}yn9Fa zZ48Q$++h6MIRAKh?SbNu+EpAT(kG#Qnz3D^cNT{_Y+v_bdsf{0e=|BvcFj~w$4pfG zNWS$BOX#yP7rpj$m{0co#Q1UA`1c3?QSz*Gb5}f$~ z=BBX!dk2M!^v;p&e_WrL!qahu zy4S@M)tAJ7ob*C1>X1eT^%!u^pi=n?vHdfY9F7qfgSI`||MFl5Vmqs=+xyopmqww7 zY`D(;zs>%C$o{{}{=Xz|A}4>gX7&C5v3(|l)AyN|!+3H>Rq)^;Or)2Qh4Bw({Y>JX zjH#H8{;w2=nPdrOW9P4aotR6`N8eYC36QZITTJ%rGgwM4$5MH{{5RGs(pMqwe=~|) zgSA+XSchi$Z=`qDs9TP)k?C8}qYlZVyGmW7Zs~3LcqrxH_f7Q(xm~<>ux-mmdUozY z{{OOamcAGJaS;8_7lp%QrSOlE$B{%TO8-~IKSPgu5~e&)6FrCevHX#qp#R}j&&Ds# zJIs?O<2Bnp9!`^IkTu@W!Tyi){?2lrM?5>IM_pT(2dIysdY1n89sWc8SbIR2?ZU>> z`F|JPa~W504L4BO|62QJd%L}7q(OVP=g6vm|GoTwMg1$Snq1c`UM-000&@BkTK!+; zFnhWQ^A7GI?mP8#{$I?;wQoGJ|0_PB9MxYC_auyG7^KzTaWCXY?rHWN4UmH@LyC6g zbL3FOc|IK*e3x64|J);xpQ2qb%XhlScZz>A{;e!X@00&gk5TR!gJO)wL`=eDOvUf+ zf32;(N#4}9jc0`Q=mY$Z_P?GkjF~7wn3)`k#jQ{$}Wl<+loJu^#!KKOZ)dn^9q}zIUEC{&+(E|3Eo$O8Clx zy7$H3a}?$mNvEx@Ek#+p2fMKs`;q^%=R?~m`JavTZ2tFM>59Vq??cLe-$dgA`JZf7 zH?)vh?zl%^>tp%f`u>B$>QmPrCXeDcl1QPv+SohUgdFk>Y+W+84f5nkB$ZL8$usEt z%Mi|zasI=3vR~ivW%8X*#q$AQ2-|1LpW~#@1oId8H-9iJtRXMr8g3x>nEiiZShz*+ zG-lk97#{A>@1ch~&)rRS=?8eguTP)FBeM55_Od4jZC74mD28JMMqvz!(f^g_!+3Hc zDi;h3lgP=Kis?vhGyeR#a~|l0^Y?bJf9bPPzl;6LohIX6eX;&)o@U(i!}>30%)k6+ z`W#)GH3y*MoPGfm%F&<2a#_E?YW)Ok|L!gJ|1+LnbK4ldvHV)pv8B_|BJ&T>gg8&268&p1o`^#fG!i~Bz8Cif!u&>ofKL1a((E^Fj} z<-=j_qbThAa6tY~`%bIm|LgW=(8m+osBxWMp?__b?|+*7wSukfo+MJp;G_M!P26p- z+W+3Q9r9);x-OVsaz!6J*&|;yyrTa1dy_Dm$rjgVN3svb+W(&H5JveATu)Z+&`u_+ zcFF&Z`t0cypDNOy!dC9}e2+cfb@}tQ=Og1f!Z^;CH-5SE-N%VI?svvL&E#3~Jo3t- z!uX@Hh*&Pgwa9Tz@K6p7Y!j`7gzCti&p;!CF+SuWHm+DRp8kd$g`s-E&(Vbe(;AihZx{Yn-Eh zo!#23u5BS>y&LP{*0JXQt9#qkw;g2XEcLFw_wJGE>lf9-sC>~mv;JozHe)N23)KJf z%nP8`9%CCXv(GQR0`>bo9RIIjD;qcX==lF`zwgC<#PcKaI7mN?!uZ3x)>P4tBZ(BM zk9-)vD)AGSI&m$mZ@3_SABd-Gnvg>W@;HgpIDCUy9}ER}ZZu_d{d^Ow zd!pB#leXG4$D==Dn<>McKJlcjvzcEC88o5)y~l|h*?~OzzWF$DlI;D$ZxW};GdPR$ zxQOQK1Bs9Je_ZC5M+ zVGO0$j`M879ZnyC`d2&~zw|WEI%6O)ieK~1fy5ZH7`X!jiSgt_WchbA4kS9;2NILG zC!=sKj545W-$0@p)nmn9TvOs&EADkahKx{Ee~&A?*Cdr^0&Ilsi+j{HH}>Bfa7a>hCX^OGDp^ zQj|r131Vf??Hon-KY#`I;FoaZTzuae;{)7)+*W`hOt|ve)?8NwV*2+7{$? z?dBcEaO0eqv)s)Oop(%D{IzkPzcQ9i&#lnL!DV{qJmm&kmh8$Nh8dZhBl>pJkuUr+2E~JBC=}M_-H{^=cm73H=BBpRF%E zpq^3(*Qqblr_{Tszwg|8_bkUstil?s#d>T++a~t+Hvd6(exQAELA|9u%ac9a4eH)T zVK*INf0Kp%@6?;Ij%`Kd*pC~}`DJ3Wu(qNU$vNi#PqU_hUb{#gH}B)2g1#Gt`G*J8 zZLh22_#d6~F!2vn4--Gw_c(E6!NbJiC65!|U-3Bcy}@@Ahm-{etL`Pf`^MeGch=oc z9GG@LvA_3b;@dkPB>sNMFBAJ#+)Ok!jtvb}V?*<<(V^*eYikmp4lVabhrRx5KMta- zMjkV^73Z2AH||h5cuJLNegOS=cFNQi}MfHi7LT1y8<{|Va{^phb#8F`!M@8@b#5*rONR-dGmDsuG zR$}*-2Z_Jyy_<;p&!zmn_u=Ejx0EUG&bpV_^Txfz{&@q5Zyy;@r#~MmzGmO4#z%?o zJ{%mrv+Pmg01m(LT=@P8@6)|a$e{y2h;1aYIvw`_G{*3O@uw@s6v*^rV+{9=tBkd8 zpleR!49?;_F5)t-qHU9Lnr+6Ph8VX&mwUPs#-GqL%Q(#j<1}bOGg=lI$3YIQ3;4|` z3fF{l1GjJoxot%u%l{s|Gh3u@rzkw2KSIxeBK?m=Y$t6b4DgF**A3b!Zz4HEA0as$ zRs2(Ti$X0%aL2O@Gv|szJ?eUkLK?l+{FOg%{Qon)S^XDdT-VQ@Da`->?66SG-KYI9 zp6vaNIlc1g#CXj=85<%eV=Cg=gVV_@Ig>0wE^D48ITxMAKRf0r8_BL^%0@EJT{*{I zo#HUcHS@6$$!+%VpZ(~;@)_>$-PJ# zd#~cZpI*C1-vA2jkFRMv(=$k;4hLP+?D-FqM-k`$A19N@_UaeNig$M$!&k>3ukMfQ z@}ABAfAPch597pNeCu8n|0UwTLj3g^lvmrg+I=~6#QSj)r*Q^t>iu^1dk)fMxd}R!!s84 zp`75oh=1!5=OmI@?v8HnRXVp-dC&CxtKREK@7X)Yi(P!z@+N2(i9yFlHzOx%#6*UvHjVPE3u!kLK@l(!Xy+2mZz zN84llAFoS)bfOEtJO5h$2b-bk1N|ST^nYB@HXw7{tz_>{+5ew3_un3Vz0cd{Uzm%r z6w9#^tC0V6QCLfA0*C%T$4}%9?y}YTztrc2{;zfH`6K3P@ZXH=0d0?6`ejg2tsVAr z?_&kqMZDtKZE+4|rSkHDceBRzTTzPSGUM;c$1-}YGBSk<`fk+kP&OY@M(hJsl^*`A#LH+-#`k!o`rT!4-&_d6CAe_7MC)yJ7H#*RX-#@?YmN4(&9v+~= zyL?0rV9-12*9Fp7|4B;RYo|$H`9FQ#_!jD8{=YCdJm*H04rV{f52NU{Z^#?I z-!b%J)W0cj@O!%d&v$Xmlz{$n;Y zy}6hEBkdpd{6g-<$f>86lFRXQzD{>mzm;}<4;jxmSn0RkXkU|k&iSkOy76@N=Nj&{ zSdWd^jIAg|87k2KP2+#^K#jbR5~mo~x^v=&4C4If-LBh<{WyrjIEv#)qU}xZ2OUGa zA3R(CM=$LEll^f19~t)wY}w-dB1dn1L;wGe+5Yco`?u)_cZPPJ(Pdhb8>0Wt4F-8k{z zY5aSiabnb_jSFXu3tuq)ea<-XDdW__xQwf~h8wtrJGh6oH;lid1D)tXH}dE~L$~p7 z|IxI^SU1^n#Mm~O<8CFJM;C_&!g+)N4EkH|eTi#k6^G~Oov$1J-clTf(ubpmJHO2M z_afupLZ6KPaQ~k<|3BkD59N8!H&r;(F%u=oiC0$q zX45-&`L=dA2Y@~wJ=}TjxW=z*pMG6_i&44BISypi=;E-PT#1+#bEWAjdhH2g-0yqu zWISUat}TvhjMw;WE!JZrHlzHH?d?yNq6`(-jlI~9!#Ilk*PqxVh4n96taS-R=HIDj zzG07fzxO}?a!8WBb<&sY`;~Mhn~*~X@;HgpID`H#SuaSQ$3^tL$BriB{y$gAYq)`1 zxPy46&^_`29$^4Ee%U4V1>7b5#yx5G=6|SN&fW8h?|@(TH0|IS#{Xu>|48kT|98s& z>d!QpL9~$z`#;?`2NA>FUtv!7J4J)T2>K|rdB5$-_ztpjg?BRV!}?zy??GYxf0g<_ zE&m@=|I-WWbNx^2HuFM-QH=4Jh)I}?sp$V(>%X<1dcRa0rgQgw$ypL)31(w1D(5Ia z$%R;qrAYFtQm!qh*UICmdCpa%uR{GR;>IsMOR;D1-B$mSEsxc|#t?F7 zMe`-s{&)RD!rF>b9TYsRPk`i7m>h|$R2K6}Zo+MK7UNj+x4&>3c zQ~dXMMlzmN*hPxEba2B~2T{HT*aGu_& z&hEfP`enpD{qyK{P1gkDB(EDM;C~ItdCwKv;@joL%3kR=(s?@kVqD{yhZ&TA&RR5i zB);nu@;|Zvy?f(65_ib#CF@_w2gs@aDz=+X|6ld3ozj-X(|xsiM#@KcV+2NF42lu^yT_A#pZ4EhXG8in)5xfU>wUA;W97%ezFE}rE3bY&OmzJu zOh&$ISeQyq$4qoBkeB84JmPwzwww9`(Xqla^J{ruS%BOn{e5T{tv*H*y%{CK==&FY ze3Nniz`11X``UpxRz9D5Ar@mfR-#{;ts>W;^2*1;TC(C7aovIQ$LYyK=Kt+8Z;@Wh z9#3I2eJkRaYK&`~^H_&cer2e@ZtTT=97N&#FMA;LzU%wtj_3azC66PC6q>c+GGr5S z=!jCikUgZHpg&#zcanYv&-M>JqO3yoV0IC^Fg2E)G?HD!=1e2QMy|(F_mq9W#%C*U zXEUGXK8p%#8{cV&>23TQ9go?$_u2Ps;qE2uv_Z zJ6D^_|98GiJRASt#(vyn{GXj!GtGZY5r5Pz5%wJafwrJxOoKv$I_{7xPhGq%UZf zJwNDA_rF_A@A=G(u#`;glpk;VW*_=?@pS*YzAsyA@*}pZx@x)WR$>*_U@g{TBQ|3z z`acubyqnKJ@BMRWpOxp|v_>Q$&kqqc8Rz)aPw^l0xGzwdYb&rDd$At}aTx8_eS_#k z2fERP9^}!eu4_=fG*6J{k$puS%rDM8$e}R*M>&_eFMh+~xTrSLV0j90{ohglaU3aR z(1aX1kVj$vpIOrMwzR!2jUP*EGSA&ZHoU6+H;sMgnr5ZSJ<5N&PLBx z<9*X_p|JlCclR;RN+$W;L28crPyJ9wrWdFy$a?k0J-6)v24wttg!T zcTT!qF_(e644H1}S|y#ybT;;X*#G~&wz==7maOyLq%nfg3E(*jNh+RqVF+v> zfn-P^h7C++c7Aow&+hrzJplz2tbhVq(1I3}VHOlnU>0uR25w;Qry7IoWcTiS|M=>8 zPW3s}r_MR`S7 zsu9;BtsBp|e=p@pvD=?u(ug;vBp#G`H(NZ49CD zkY|yQ=Ec&El;_ie3*xwh%eaco)71O<`ZDQT=-Il$aFc!;Ti-4W_sNGS{nE=}+b@(W z+WY->hE2b5j|0iUNDE702)%QH{5Q@x9eo743)~+vW89zLdcQbbzaQJbLAm&{eV&~& z8e=gYC0{U?{%7W?()Vm=vt z1K%XKTzkcuORt1Yw_XY5=8Atm?{ndkdKx#(VUokEGbe*|h8(s|i)=UX|$4m-8-Z~}xXw?@& z*|IN~FZ=oM!-`4a2lGA`cGDa4pA8KIo(;|BB{eO0R^P~Tp(XWPSna>oVm;o$Mr^_1 z(=YSW$_qbRH^cbX%i+)o`9OFagE=hER}}QOK4ky!Pxn31nUC~w?4p7>jUH$g-zP3o3Ukpbq4)k)>wT{ zLt(f;?|s#}1z$3DO}~u3KfDsIlGjoC6Lzrq+|ANf^2z?;rm)+%i}avE{%7(Ez4MN} z?eUP_ZEe?pZz<4t$Tz0dq>^c^P&BJQCKxQ zb<(3}ys1EWFV3{^=p&z`cb*WpdbWvP!oMJA|EsezGt|H8*0^_gxp>dT@6I`u`o_mo!=r2ph_`Pdry~9XFAl<{Z!K zHodbzdT^J1AKlBOd4u#9OFte8D_@{3_I+(J#I=(LlS2^u2BPm|ErvNBfn=fdkMW*o zNGGb)&DFk#nhDn5Lfs7CKa%u%jB?FrjKxGu##Ch6j5m{QH`v=-<*Uu|72=+F8TM|r z(Yr#!LTzv~Ij>oJyyXFVoJ^zjto)^3-i$c^H>LZII|Iv|iXd*og8;);l39 zw5coQ=TiC(>_UyaT8lqlAEEb`)+m3*`hC{wiSzU}t&twY@^p{uN`IkW;H&vzKm8#3 z+;d+r7nfd%8YIz#G&+#QQS|*huK$>C?Q`otIPQJMI01^4#QrV((Ei=b{@uy`-OBz=4GDW)a~JpV5FhRT>-Pii z{{Z_x!Tzma|FieI*#4Qt?Eg9J|C#LnlkDHK>|ge5ZvQ)RrZVh*vek9vWX!_@#Wfg1 zP?^vE9W*2iqt^~)|I$n2{1-l0^&8yV8uysT{uMsLIioNd&8bg?v1H0Uq}{`K_dDM4 zM0D`=??jie{U`V59;;TcK~W>lTEsa82_#XE$*!G>>39i6cn!1hI@&y^_MOsCc6t_F z^o;anrR!1upY-37el#Cr{~wWlq|u7o|5aYH^-qMwNw5tlmAhx{;E?R#5yAS zh}J)_SFdySVm}U|J3Jc>la;7JTe19qL>@yYa`o&9c})K6M#EP1-z@omuDnXNES5LP zwBuGXuK$x1PZQGUh;p$y5?OjIQ##a}N9o6r+t+iS{JYzKksk<)>;If0&!AG@!8!5* z;uu)2AMw~z;gaLaNGc2J9oLa@ox$tEdVkHf>TfJw|KFw)?C}SW`unrbm|yPq?C1N3 z+vHu`M_hyRA=z)YXZc(0AbS=Jqz^`~IWR-Wp5J~d3?uuV<)c83!f1@ecud4(OvQA( zgd)6#*?1kX|8HymVSMy_BaY)50lD>K)RVD%nJ=t)ihh2w! zBykOhmGsrft^XyBnIrN)S#^#76>9GBzd{{7fuy>j9#^IFP1hL%3UU9mwe3}*){TlcBX_%w_L(^h)oqDQeuDb1(`VXxrU&GHpJf+xyU8r2a z|6s|0u!kP&?i%c+??-N5BJn0RtHa2H!VaU^n0zIfB5TMb(pNqa*1P{EdS}KQghPA- z`0jP-U+5;YsLE6SkMkd6)RU+~Vv_n_SX}2g?b?s_Z*(}$;%JmFvcEl}cI5U)JR$v_ zMFv@VHyXs%I7RxWN&g_}A1VDv(_0beKbBaBy43v7ZGWSD7jI?a(fr3#^x8Y_?V7Ow z`Z?5JbZ}B^bN(aqPwoSkB8{DCv5s&v~$V97=pGN+Q0I6hjOHIv-U4NL(gXTnJ60? zp!v*m$rlT4#kSrgZu9wwd<7=_WO+~EEDF2>Sp56Ht9PoIb<{ztz1IDWNd zC_h!_H9&#`C z;~;YVJNkHHem?9t`?cpo$=9syukW^xPjHVuo}NGP4|v&nKCZ8EP785a&+DvM)=o9pw2HDnIGRQSV!bdk4ln=HnV@Rl5s9^*(F-A1Vm7s5@QY z+bRgjvjw64L_wGy!7Yy5wKvF*ji|H;-lqeI(C{sU+E4_xFwKz7~YKk$J6z%~8@XsEFMe53TCd8hUJ z%go=uWB$H#TFENo-PNd>Wc=P(dfhnV_w*!LFV4&E@hYz4CT`=nZ}~2HAI(=@2oK4A zKh%E4V5D#O4-BDqPLsAV{0!(L(7i_5jH_q-XP5ujvcg!~p%=oYg}%Re=HFMp5Jov~ zG%Dyd%iNc1a`U);@7YUtT-Ts(iTm^%s;0Pq)Xa1qa`W;%m!xazF;+a|F%gq771Qw& ziqJN}_%k|Y$mi31o0H`0)7sLAbG@Fd|L=V_qeYpYLV6_s6Eu6ruZd?iUdJ0q3rk@> zy))l;k2mRy&`r-GgD$)!tlaxuN-jqweI>aX74$gvT#L1i*CR=<$K!FooP9mi_m6k{ z-uG)`V&oQ-{6L@B7yRGn^-27TeotYg&!}(z*_;>p9yBj8$BW#LgE)+|c70r9u#(=X zy%EO(Yv@UI_joq)TjtiIzT$HEzA0XV4rEc;F8?&jGxW#h&vE((h-(7GwE+`T$Mj8CUTneZ%!BI_~-H^Wi4h``s79Z8Ep#>i6{de_4OR7y5_$!XBdU z=hpcD!|2cx@VE8{_=0@`8b*hHKk}VoFos|lMxdmme~9}Fj-sdZFQ$j*h0*k}7>_6Z zV|S$efoDK=-_Yl(uBpx_S5oS~6Y3OoR{}|T{SiLKuAPjjn2wiFgx4?|Pxjx<=YOZq zz6%*-(T#@f>OVBC($~IB|9i3icQTE*|8A+Ymd`L(KwNj}m5cSq)8`}Z0aP=`ec?^V zixBtzsjo0jKqgY`tyPc8{kR6_TYg`PxCZrdawSS%)o%Gl2&?I9u^#VWBevkl{y**h z2X)obkECZ%k5cFCz%J~;UhKy~97daG-j3Wnd$Q~NqqbF6`>LC5_GEvg?dpHD%#;6+ zMl0g@L#23XkVF$6k3Xd8oyF?#b)aX_U8dea#&hbzQDJfX;W+sL;`qZU@(gn058{vO z3YXNmj{oB0ePioJN++r=YX9F*N2rhgVt#GmQ{jSmF5xn=-+Ve;C9mTqZlg_Gza1Ut zwf|3Rli$-eM|Pj~f0_DU8^2LozbUr$<--;-)dbezo=vD*zfPFV=>A(CDsNQO^(HQOvGeNMc?zrS;?28u6~Zb7LV6+jOz~+3GX#F z^BNh~-*`Qq_icUDztRuz^Zf9J0ioU8hpe_&H@fH;W@{@u;~MKgV`c+Po;`^keyC!B-B^v-2$Yxh-2uR-@JwzaU#I<_`q zzAwMQE{g3xdgV3u?z|>c&};M;#4+@=;|?V0aUX-n^ZcupNWXY$ik(N+i9c~i91omF zmhwB><~?Rzdlbj<0Z!ozKDPd&dNqzYWsW%?x%|IV`pcyMfV4{QU+ll@KhBBg0xsb) z((9!E3ZDmh=RE13GbCK6-$eI5aqspYv!q}6ZDHk&)?py;qjJiS@R01cSNc8cntbUR zZhu6_gONOIT+uz&36Fg!Rq}rIBza_<{y+J>j!ZaClJ)Z05Z4UD2#msLj79c4`q;^~ zo$^0hyaP|x*IuW7DOSIb4cFvepQV zkuoTW#4>3WKHE9H!F>9!eal)lp9-%#egpIICJt3U6@K=>Jdq=O8uPV7^g;Kr?R%9; zi~Rl;mZGG8|B%IUdffACCAk`F(W||(p6vOmHV(NFTTqIRjxX$RybF8q{oS7leeaw9 z@mponKbv17toPrCgoET^ls>0l=Lh}6zAK*&KfzD0S!-~{{|E;jScmb}{|NsmKOU4X zcTadB{NUtg!Vky25cX(mmsuO|M-%=k{CMQ2?Zx(2;d?#Lgq`D_3A-je6TW|sua&s> zV#kbU!#|Wg8%nM{7q$*~E|gw*E^NE?Y^d~G4Yo{qKK%Xg=fnHe{X^1m6Vm8F7W+^4 z4?i`J>POc1D7*5>uopj0eKH*N+i~n!_;mOo4($%%1IOig?}o$sjNi4t8-Bj~-SAKQ z{w`GZ6o=}~8$#8gzYDe7zY}WmzT=zycBotUolw8^+aY=2+o7@NTcIKETgDIH4owB$ zVjH||jN$EYN}OkK4i|6aTB*uRmlH9KR^v?^$XNp)UG(j{{Zy``rV!LQTzYC zCh!HsA_GjjbO)0E?d?0|zbgT<9w z|JPifA@sO+VoKX+7`-zkZj7LhLia6kYe(ePU#b`wMhh!n_=zx<9FIzEu8HJiR1B3a z=htGYw~j-PdZU{CuP&*{u>aYKb>sB^&C@4`dc5SCBD{v#D0$f$&g2`I zkG2Z-e}aubc4pWFWX5rp%-R1_*#Br^k2a%)4gGk3n$`pC3UMsLTWFp!AS@+Q+T>~N zlI8T3SdET*;zt)dJcBrgwtE@7!f%zE)%OqhEYr)EsYAc^ZYceicf+>#-_?gDElZ@y ze?{NkxQ0jE>wBYfx8T29|9A6NeS#>B=igw1kh`!4d-1XIUq9P_yZ)~>-$&bj`^9k( zhf#^17qv&eX->fBwMTxb-S*kyP$Rt0*j3+i>UVk*(&#|*V$b5s?}jY>D2^lgHN~~S zKcII?e_Wg86#We1p5$3%oYRH!1I1yz@N>9;OQ<|s94b=9;WEAUp8vm99In!@qkj0t zkVL{cb=Wjl`Mp5-jpoLU;ihv^%ic5o{9d?CFMXBYPk;5pwf90Ny6(NFefXX+;P*oJ z$@jut=iJ9YJVd{r&@mW8P_=4fs9wA=)GXK-YPY@@>NdX@5=f#R4Kv>hjc7tMTGqX% z4f*K*TgkS??}heB?}d(W{=d-w(=!GBf4Kj@qwVnEy)exGjKC<2M&+1|*6-gK#?osm z{5QtaC!&6{^zWAb)J9`L8^dH_Q!yPcp$M;GHeN?=y(Rrpv7PdUIXiPdOh!?kba1GAI=57~8qeMj;q2;F4E z2L3;2a$fTlJ}+#)6q!crSz*4Pwc=Ticd!xZeZC)gWeYv#=?;|Acc5GSo1LK!o}w-$ zqitS3k`IyJ_Mq}&zp$6wkBam1+C6y=2OS?qvRJz!*1cq6o$G(qKU4~@LH3vW9myu7 z(SeUXzpUe4$m2~^vn7MzG__``UPCVWn9H|+{A74nqP32?0MC?ndHM5 z_PO^$zkR}#7X!(`7=mFa`PP6if*gg>7>mB=-V5W&iDYg*K#p1WSM2|B*lDLitZQ5XXvpUVSgjc3$t7HtK)uAGVyA&+Q4YsY1S@zaD@4 zg>Q%K`DcDS(m$R44mM&7O0fgGP_>U;ftnlYkq7FHE9?q-ZvCH&>Nj;zV?zB#Mql<8daS#0 z`}@qbuFN|AdBytP$<8_Y2h>#=Wa)8!Uh^z{qW)(u_TwPZbH#zf^xXP4Guh#B{7>g| zcVoO({pa_%{>_%f@^BtIY?kLmuN>rFU6({f)CaileD@dQjp6kAJL>u2(&6{q{I~_q zL+y6yYZRW5zG~;8(ls5(;wX;e1DwJcw9R4vBl^^K;>r9EZR}XqHh4x)p3hFt$Foc! zO>f=mnfd*kcrM@)E+e)LQZw}9(>qtF|3_Nqhkg^?i`DDl0_i{d;rPGrwH9^LBWU_ve_W4OR`iJG$ z5Z4bwoFA0^4&MO!6My^d+UHx92?rk8=-JKM=i2uT3$*_iYyXqY?0}Yg+W$z?TYI$2 zKg%wDhF_?-#$r4sVlw*7=bK9Q$Pd%WUVHgv|HwyxUWC^WeR(?A6ise{|)mV%5cn57G z+3l08KQQi*9p82Sk=>p>$&Nq9ZeKMZG_GUEqj?s)eKEVe(E0<@*zpqvgdTGud%mi^ ze^!0pW&Va|y{S5cjpB}L7{)aNw$QhI*ZK<93D`m3g}z^ZBJ};z`dJg63VR&y#eN(_ z<%37pR%~(a*k%?VlmDj=RZ*v+UoC>|Zix|Ch0C{ZE=~^}o&R&=16O z3TJQ*ac)2g7wDbF!8?Wy376<2(7llT&nC{<|LoVx!phmfSIO(Bq~9cOqk>+uK)M#Q zqkjHWxGO9gm`KxM_e$volrJFp9T5a$iYdBl6^opY5B*iS!*?s3Y886UP^r2DY2O4J~UzOQ{MG?8g^ zp!Wy9H?qgNn@7px_yDJH25~Qr&C(qA;wX7;fIT+!nF`N7Yh8P?_cu?4%Vdv!=&NMu zZ?sdsW^H@=P29#^+{Z)odtdt(gV9&V|Nr;>?dLHh3~?Oi{|zHYU=&6pu9-TP9FK{Z zjLL=b|2$v|9v0U?+NupM){{6PtcQO?EgOJp5oX{ z>|5J(|2F9V zM;xPmT|93f`n{FDsGk42Z>4Zxm@h2n=eCY5JxjfZo5?J?Q`$$@q#sSrX+{e@by59)Lj8|8CvvZN_TwN9BOT9Sn^)31 z-FpXW=t*>s6u09Hy0B$<9DmgQS>hcb`VKTXFYZ5FQQ^83(uERzm>n?=dYlXRIGs8B zcc*WpO#R>Po6sJ}`TvmhC$;-G8E5{cJQL&m##7-ac^n_$6x!Cw|Hbkc*(slQotL+e zrFS1u|7Y0$;&0l@{+AD17RZ~6<$to(arFN=Bc75^YP*vc@Yw(768-V|S(oWo(Y;UH zyZr|l*UxGQ((ik}?zfwW{y(?LyU6+f_{MAV`-S_CA0l~Hxpq-GH$0}(e8mIu8wR7V zZeSQf4#NnHLiTshhtcF%OvGgLZ2Cl)O7?!kbJymK&wUG;i&Ek%lPKgOFvs6 zzni;%PyL_{$&C-4SB5vLAJF%+5Q?1l8fN2lyn*?M`<;}0>-o_0YvW&zgb$MYeeLd<6*_KEwPzRvuhVg&nioS7}?d@qcv8F@BG(g~sn^8NZ)v{GM#M zW&Zy?^Z$>S>yMUp~g4;X5EA5Cc1N7;guKFjoCT*@$~+)M&%k-q7dh_zfhleVBjmXs6J#B~OLdc<*ms2>Z!{=;3#HIEKB=2Z5|X5`Ev*4kFW0uk%flSscZ2e1KCp zgLCNno6+F{+4HZX!zHr!`O)Drc^x-#8=F^nZ{BtJ73nC}e@@;NUh-`JP)U!zi}xMZ zZdMK`ZxUy!hi8l(PGkJOA)PZX*b$ejP1IwJc2uXpWLlq#RMeb4@!^nU-S z%*0@{sjG7H|9Adb{pYxQtNKrzAM^iL2d0p|q5iw#_mk%5oi#u2w7!5F`~%qLsR#NG zu4x~jO@Dm*IPHr``VYw5y8UF>*GbEp^zu=tcr(d_u^RMu365bKH>6*NWEf3`9 z&*=|*@l*D<;_D!;rD!`Q|DTlq$06N8tlfaz zIvTs>DPiSo;vM8JRL@`o6;N1Ck`Mj=8r1v0<^T6h zeL{Y_j+?lRySR^RU@N?8?3do}XYvCEV+e*}1V*7;`{ytYDSLk=554L9;~4F?u^111 zfMN6QU~ZWD)A(KW0c*gOnWH_9eO$;sW>+RrkA`CQEnB^509$^N@$DIGSLD`lbN-X{ z0gmvGM^`)B{Q#S)jDI}axVg~!|Ne6_reZqMjpFe8OZ3ib?EiiGA->Bl@ZEQ_&$Gy! z7uPNO0yw99gMA1~w3W=0sARLhCTun;PP2cVTbrMJvpxl=-Y@sv=v*j%=}7q!3iKe*>O$DtN=Bi$F0`R;R) zdGg}jf>P|jF6_Zx>_=OH{uj@=!?~SgSD|Mk&&M&I?vwf-7I;64rIXCPpJ|@0^B%vS z=FR!xpm+|W21%s%+5f@bjM=aD51sT5dg*uBtzFvmJ;JRAn7L>FN7rtdqwYDDZ{1k^ zg!D>lk2b|PP;u6GgIZx7j*|+|8MqxCfZ$qk4yNKQ?Z5_z1zxQ)}g7hpN z?_V7EgY5Z&^*=vp4M5}kIe&=L$`6cp{X|U0R7}T9C_>+N`7C_F{E^|>OOChx@_&Ze z9!O`ln&eqN=1CL2zu|Iu`jt$UXJds6+sPW@i2ekWtt zdhxu2jo5_lML8?GyhW=O?rc(7%_j4M=ttX!DU7 z$62yl8*slk58^N?QG;aEeJ4$1)g;e%n&*o+4qtcL|00Qc#Qy)Z^E!~lQKYB1hZ%hO z=$*r*A0N<9p?j6Kgmy<}o%C-oFF;uA|361wK&9|Yt944z#-yHos~b?F2m^2|7QgGc;0RoTOf`-X3>pp?yK~hFNY@Wl2OhZjowf84`a!m z-wzDq$-bbCM^466Ovg(o!fTj~*YO5=f5x65d#a4jl70VfZghcf3(-DWBk<1RO1{M}fgXJ;q91xyXH7Pz$tum{@)qLZO&;&$DCK#|F5wB zUkMpx=e`oUXTA~|66U9;%ujDLKYge9>HEx2M|!*Y=_vPGod0r8To-T&l>>_OT@{7P z^x8GkUp~Wf%)_F%*7)0tNHVuVZU;D0fR9F z!*Ev~89|Oh)ndYU|RiPebh|Fz_Lyo0s^^{;yLznuTKUH>1NCdmJ2Q8%TKrnl-Bi2Z*X#j^#a z*n#vy_JzK^UG&bm>QC&U??v|m`3xC!VZX40IE+g4Jneary;ZM-B)M5TeA5(d>A!s? zmyer-W&hYelS zRq{G+;x-S;Y^oOWd1|(xSfY|@u@8`ZB48{-)!w8H*-{*ZBWY5b*VJzAE zrM$3dg0g71^3r>sC@jwZi#`<5cVM#Psdzm9Z#w<){J)pzMfm?a|1a&H$Ga}q{^rZ` z8htii#~b)h=l|_iN1+@4JM;hMi|0)&!ds|Z@o4_vQu^chf6M7B@#pjZRtsy^cd(XR zkMyligm=h|NU`fW{1*4O+2XhqSvG$+x{hh5kZmjUU!Y^V{tI-eOEZW*cDebBL$wbI zv=7w5&CY94=cbURw=PkC&QZTF_(=OHwky=Z$u;U(%r5tBf297{;U0EjPrR3JvmMIV zsPvrw-H!6wcrrV*bBCUczNOng7S;|JeM$M*eyD-m z6Sq;h;nDoRyY$ENZ|>6{;?L**ZC)jRqhE#RJV3w4*Pjl9=|j*L*VflQ=y~28L&v>; z`a~E(jza0bu)nRX_uu*djdt!>jK@Sw##BtlOZb@o-wDt4yyuE6z559JE2aF)DE~a4 zX8ig5k0NozzJu4u*~mWsbaafWg z`0dOa zd(r)oEsj&pORus1KgzGURyxOVHC_01%FJIQGde78xZz8?;sAN5;aJo32m!BVjqr_{|Cwch4TM2`TxB9 zuPxPxCg(LPcc1M4MsHnU{NNkr5?G%h&h3xwkF+?W-_K@kmN<6N;W+l;XUU^cw^vwf zfAsx^-;Cqj`UC3!Q}kwK%^5OP%zt{FHZ}bME}>(d@)=$1g`9s1*?ohJi6~@MQiyJ!hwq!(1~0qc9p{F&+~!8EseeKj_o% zAUpN%caa&#S+ZN-L~i~6Y3fJyVw`K&B3-%p_tMsy7w6w=8)#2$PH9_VsyHhTS^r=7 zbb77!L|p&B09oNUP18s_g`0I>M6bXy_3gy=Sf6n_$*$+wBY@{;MK1aX70_|_d zZy-Cvb4S-K?}4mxU)98%3hqOOV>)#j8TCB%AsJx~Ay*D6iq{lUHYUJZB z^iq7xe=N`X_`-H!5B4JW-Lgma)8lu1kUWg+=Zxi%Ni?DKU-%Vhf2Zjk$Rf_a@BOU( zw&*>-$_vNIz8Cqilc#V7=Wqd+a2c`x?J9X4H*p(}``_-;JLj=Y=IFbiKScETj{DwZ zX0jj1s!9Jv|D!s)UVh5HT@dRJN}1{LzY@uYKtM*PsmG|2IsWqfq*s=kwKqFq%FV<1rE0KRzEO zlYRd_F!cS__glu#$MJN$gd#kizx*1#R-TV@uxHa>NBtdji{rTWd7Oh?waWbSb)!R# zG3;7n+jV8erH%b3>Gj6BN4Vw<%*UHpgtxF1%h9&o_&+-Kng4HWrpwrV2H9fc|LctZ z8}DvBY5bgQzG(cPOgT=It7 zy(V%m_M_6bQc6~(JfE%N*(n~f&a+8)F3AVtaqkVn8_T2*%@xv@aqk~_zq#*Yp6>&3 zEv7#&f1Afi&nJyR`LDzAe~}&|j%ogr-nmBlSNK1A6QWPyll>2tc#gu#$qq7$%4MU& zQSvw{T$@`LL0lg=K84)=2dA|&{BPXk+lcTP4YIr zKV@RLPxk%t{|OJtA1;|3_7wbe*nNIt_`#fs;m7BVm#rHX%JPku9hn?{T3|eF=7_Lw z&WP}n{7;8}bWaC|ekL3+ZukBMZHGhlraWLgs_cdEv+d@%f7hCrpLAbu+Z*#w=D`0e zADMUgglvBpTJruBn)B{^*7rkW-XHmn+zs_TcSADo4H142poMA$pem?3^N z?=zun#;CBrc(i$N`ZdKp44ah|n-=7S_ZR9{^gU#M#U>Fx3ZpR=<1rC^|2#TOCVM_- zj|j5&ZT>~%ODMu?=v<+Fwov}2zmDz#Wfn3c)%9zXIXA57EPOuR#3H2KPfGY(^zwZB z|9Fo}=_|1umFo8j;j8Jj+l^th^NWb%|Li@0k)C1~P^>|Vref>!By0#QM zunT*z7yEG#ADdq=K>P*b_upA`qrsR&BbqiCYd{PAqw^EW{nugfRH6pSs28)phify? zYZLAbX?h3Zo&ZV5(MPflSz$+U93S9G{h;nU<@gLPAh-XpaivT2%eac`sM^W@XP@@} zdF}t0FK=o8Bd#$K*Po8<|C`Rcjk~yyhv-+MPD0yBmAn(=pQ-<^$%^{M|*rP^lo$^gQlIHo3>6v%5(I7;+jgW z+E*>NJV(@MW7RJ39_N@xiR4A^?N}UN@gC2b+vpyLU>HVV6h>n##-sVPdBo&o#QpE4 zlIdH%&wK0$`s4LSUZNM_&(|Nx`9G=yH!V^AqcUaQjq_edMdnf67uO%j&A+d+cOCts z>yP}W`wus2|Dzsp{k=EDH6L$c5#GX5EJxcE^*=fg``Z6v{@-cse>5ScwPmR1k2G4* zV{Ct=czR=B#jBqRtLbt4acvCyg|RX69c)COae&|Jf7ufCZ}K4^cVHLxpt;@LD>7BB z{V$L2rys;@52UDBUH&Od#h_TM`5Cj4HB8YH9Mm>(*Z8ONp9mOUE#&;DFn&2ij2 zEs4Zd_CG#0|8E&v+BqpQ%?{7fbMyb6)dpivce3}pl+77-c$PihO;#;r|LQxaK`rW# zKoa#hDvsm$0H<&U=Wqd+(3Z#kM+Z6!#GfzzY2q)8@%#SWOCx)~iLKu}m;Jwh{lD1# z(OYM-|0kG3AfBtZj+>|)!~Pe3n_kP_uhE}zmwq47cOb3}m>}aGau0>|tCbfq7=541 z4@1ad7=d1P+9v_%9+YJ+0DHWz4^N9owRqpCB3M^e0a9pF};@`p@-v z2j{BgBXSE$u>+ePSnqF;_c1brU5@u4`#16qc@S~_!C`XSAC)2c0xIb>NTLbN(wHVY zki}8NwO!&GXUFNC`dK^HS=Wtz3f-}902yt7F7$l+Q{jy8-ZpboeqsK=ulRgAzJNUj*N4?)!ox&g{{dXlVHpEe9qFQ93p zdSa-0!MQ1<{eSBT{p{y*|zzq{~UKYXKWD$H~DKO-;-qmfRC17qo(H(ZPH z^ofXVhAc8y#3g*PuyXa*RB}2huYDrCL>8ffUQ_*OUjJ*3XCt}KeIvKez@bls*M+@- z`FInH@D`S0IUbL%t)$0qWHq@K*?-Fm>&epJ@=Id(zeC@MEinu`(AUsE>>}g%?H;oC z>;1!Caz75@Fsk-_SpVmH9|Pq70`H5=?LRS1{r_D)Mb4{15=}^>16i~kk$=#EPIMuI zEV_|f|9_nPKjuUKKYH|SeO&(>6~}Qj?__6_ao+DK@(j|#&XE_;c}+R6S=}LjccHsS z9U|p6Q?ac!J>Wl5|{6BWKi)Y4;_aM^X&(f39B4w2iQGmig- zKZV=$ySR^s=$BBoVlalFH<$xM_WU+4j37Hw{C+a#BGN}AuI<~6t_n6jIo|i(?3}TF z8;|sEe#zuyq?}WwF04k)LidK;x@7Cz^BVR4DrtKALCF66Kf+Y!Psd9r!fTj~wtLn; zK(3!>jy%0s-kvFsqq~^>KV1D^p#CSDPpJRTD_fDKx1#s6?0;>5xW8HNGXp>7Klr-% z-@tq06dY=Oh+L zqjTb3OG#7}KMd8IANuWKs6`z+D}iMDL+|ckcvC#@U?aAm6g#jBd(f787}_%r^?&?_ z{*V6%UC1DdZZw>D7#h)Z@uBt)8%~@lq<23It!N(lFzgl2ejLPMqz5@C@1eOYzPo&J zqlTVDxBJZ^GvJ{%s(6K!FAyKufy%jlCy%0Hmh0w77fL?=R5&gyIYYWo=e)$shvv5` z|JA`;(CofGa8Am7rq4bMr|6~MQ17Bc{_K=5yX4OdnROiZcRb^qbGU#@xQw1(JP23G z-tYY>TqkehHu_rr6z-Du@euuz^8Wke1KUVKN>Hiz){)YShj;a5pv59QHsQpjo_7^2vH{^x!;+TlZC~todrjiw>AB4)Y z4?@Xro(eM^XZhg7KET=mz8&9kf=oKDCpXWL&u8j`8N{Al%>R7kQ(=~C=Ad$@JpBuP zga4*(&R0hxk*AI>U`LWk*EIC-4?+{-T)q}|d5VuuZhi1Vd;1LFU*Nh9veR{4WX5rp z?9PNR*Z<7J0xZN;WyWH136`PimiECt|6T1KP}lArkfhgF_|KXB!V2fD!Wyi@WB-E< z^vC{t#q`bSo})}~O>9GV$v<0#ZO2aRM&I|I3T5Oz96;~4^lgwmf9u(h)yV$cfY3;m ze4E{(4U_ZlzNcLytW=$`?W+SqJH78!-#OXy^@rgI8R!2UBTwKYPNP}F27Q0%AJ&mo_w;`Z@*hJz|4HJUA?|VhgI-T=a85BcV=Eqy zlSTVsyW_Si?9UtCG1>V*{wFhzvt;)|`QLMFG>+1QX0#yg%N+L@Y(>>+^`H7E=FM7p zD(1}unMD0G`Rtqh!fyXlhJ84IL#RMC+Q!MBlRRItQ-19tbL(@-bKPi|D}RrXzlAsZ zy@gCUKTWp!ujZ|MaDJss5m%f8*hr>@rOH6h?d!E&Ir2&4_3w-GntM`KWXLXgd+YB9 zhv~k_@{{hv`4t!aMjk<>uw)FzYxN2Kl0qcWhLjAg({zn#WeoJZfUo(dPq9$Z0h z%TwW6Jl6icK}KJz&Fg$`#Ub2sdf}MA~Bkw0u z;T^SmH}Y+kKFE3b7>W}8AH&I!C_vw@^THUi=L-YFI5LinMEidNeG;Z%8p^fhXOM-6 zbN^=>D46{LO6t7J6zL`+o}iAM3=`XD!4HWHB~lE4E`NcB2gYZ~%wUTcv-5 z?D<}PXg=ck(GzGy3h86gc$!~6z4HX$e8(C35p?TM$O_B2u8WLw_m2rbfs;6m$^rTx zh7Jj5>2Xd#jpOt5Xp7db`MCeVrg`3lvTw^0<=-)7yKg2%rcdfWcTLX!?^)m83HAZH z&U-${(z~7axL&Rvq%GVo{(SiY3DhCZ`Mn~ZYq)`1xPyDhzG%&h3G)6(c@J%Q@~8Zl zjd>7peqWsP8`u7iYkX7Acq(`ESKs$k$ZPa1lLN>>$VXg*FsUp^ zC==>XOOJc??DSm@_4{y)L;=QN9425AreGRopb#_hq<>akoaMON*vlMp9{P*{Eg(yN z)GsU~7h{{YPicQ^iiPLG635HX^Mkywg6!QC!YXnN)*+Sw8^~A&q*vvIV)|xmMXr3l zC-2LX8RXW7%;bgbe%p!N$o_IQSJIJDE6E(^6^{5}-JF%s!z>ezJyLjk>0UEAULj-ii3_i)b!8FhP{zaQHl-{1GA zFu`wAFb%o(hi5Z`UVF{E>ET}x_Xl{NJ$?33+bnTKTfwtExN1=NN9lb(-`bBi28I3R z=KOReA=Y`yh|u;a+Thkr1(Hp_2wu&d|k@crRK!uO`#4mx!GC33r+l*A+=k&!-3{}dn>S@Z9Lia+}O;XmRlsOsg zA9Aa6w__)E<4O6^9zvPpwz=&8#q9r8`qu}s2}V8|d&|!?_t(phsWYJWqK{b}Dx}$YXvxfk`-klQ@mDIFF0yK~=)~04eLZ zqjsF}??V4uU_9E`as4{u-}%PBCmH|tTeGq7mNH}42aHRTt&ZDL`~xz^r^!y?U0aRa zFR(s<`TgBwb3sA4;(xB;25uqk{FLkO&^u4K_S)!hkNyDN8;qA5L(Y(0r;YzlQ0^@( z@J*=)m4TJ_Mu)s+?JrdLy(VGY-hB*m9QO}PPIKRd(joo?S!K+*nqK4HY8!>SCUIW+ zPCD<{=#cNap~(3^e=9Exr;kJd#-MGna`uS&6`klp=7jS7mNp(5q@_{ZIsX7<@Du+p z^+T)Unn|APw1O~BToaIVTwnIQ@6bLB;|lce7^AUA!z8~=LFLebFpZpn3X@Wr7mN;t z^qH82IY^K6{4kf^Np{Q_9p=#&pu5MtsfRM|tt)SISSV~UmZ01|5X;Dlvjyz`g0Ol3L6>|H z^W0hC=W!7|NEgd1$Jia;9-#fp{*jhAH>Z}Ylcu;#;FsFp z*Bsx%9kf-;|9H~>x6r++TeHaZ|DF8E_@DDyw6Ri1(_0VObLjJY9lpr#={xfGbG~zN zK0s;Pqke#rFMT59wfLSf2w8KD^U1zn^Nsiw|A{;O!cfP z|DSR63HaFjgIW6jmasqZcpdRNdIFEv5x?PHCb@13reOvOF%z@UrcQ512Re~k|E5tL zhi){eryA8$O=R-}^*@<%oIbDqSC>Wq!8ziYi+Nap$Nqy0>5uEr#q=fUR-eYTbu(nw zln>YcOXxS9mlARHij^y4(2=1W4aKs6FbOJ@p=^v-I}=8$80JG#~PSz#G; zc|KL`ANoIxbf3tr%U9_6PjjErm~qWV{fmz{K86!GjW%t`cI4KVk*}Y$FO^H(3zR?O z+{Yx(pNwlNwa`-qp8s&q-#1;l$#eO)e&MXR%K3eqCoA}YR^lSP2Uk#YgMa81J|EHd zgG{3Staa?P;jj6<90&Hz53}eV~n1D%`g6Kn=8{bntO>>-W zcq+^ww|zD$r$K-q9d88N`S!cJkkQHGLNpAP%T12}{}dA)+{`J8Wr z>@}CG^atM8pPmklj#Jpg_KSXRTiAc)7kvxDD#Tyue=_tVIEJ|PM=k2MYyTtZ{_5T5 zN$;T0?BG^z!M(4M1~z#Ya{ecChbWx92gLOUM>;S24F5ynHS;_-lzdKKj_~9( z&#lmNoas5DY6hDQH47h|S10ZSlDoYhl&WvGm4qKkvKlS*L!HHnM%zhy6C5O&2}0&U4H2jLWXyGlQX?H)XnoAQRp6LVix9LF6LnY7NU7xeppN{!7{8sdX4o5Hsps@ z^iFkUhcUx7^mXXgzRnJP)W+^oe{K*~K0H4ZlbcaFEkA4}x1*wv?~3}d_5nYh-#ry} z3yWi3_3Fnu^<#ppTBZI^s3R+!mk~}T94E>8V!nFp=*_ntt^b&{hHsfT_Fp41 zo!X~c-QNNCM`j(zwIs_E1Hw+fpTudLMWwI`oTtaR^EEREhKuwb)H_Z(PLOr;28Jub zuHgo5;fZ}TBZNDS@8JP*{%@bv$I~jGVG#0Br4Fu6>7U4a=wC2M-~JT#Kk6s(yRUjG z40Yacj6?y(U>qjkzx999H_$ka{V%TOLiRtI8qWSNVE^kYoFtAOe%DjT-aiZo)5sYp z#7xY>9L&W$EWko6M&Iu|bGGi5N%FpSVoduI;c@MN)Di6vdZ)U-LmOcQJ67?&%crv*3m1c7=Osq_fM}~;`t997>eneQ9sY~U*Y*9uGO$rSaU`{ z61fv;<@#>246zRCkk308=Y@Ta4&P;6Jm*_mABC7xK^dw=Mbo%i)mg zDo~9C8j(Uf+7`1nwX^@r^*7FH|6VlyO*XG$|8CGuaDJNJN*4GwGU7Rc*p@v;R!rf~ zf)n(UIE|VD`E$7Y%a=b>+60a4bM1hf{V_=WH!_5?&O47DWS@CCTp>$;@p9PaySqle zfm^tPdw78VZvJ0ho9`8akdL7lj*%$9$ID;&>%8ZBO#VWw>;Gc@-=CGgJwMksB%a>C z^<8XwIg~vAGWkjvCu{;HVG5>U1`06~(f5BAxw+VPwkh^EP>lo{kwV+7SMXFdQRG_3>`&&j#m`k6B1z3p1Sb}9(fmK+8 zIIn0OS*1*=mWCSl|78D@N&d@m^pV-%oMLRoR&2*klr;1YyUDiG(tlq1-CrlVc1!;O z=`WN1?b0u!2U4-He@J@<~3{PERVd!OHL z{~qVJA8YNGvMjFueL~uC4Y5ab?y*9T6id-ke>*tfAS zy!{0G@(lZu?DSig`Yg_Si}T=mh5N+!pHc@d(f^L$SpQ!*ssACxw#?g;quHFUIfQ1m zWy|&A@QB`EonEq6XP>@Wat6A+m;AcGTMNQW;n~P;QZERncglZcf?eJ;)BAOwE$%b6 zsZw9PYh$(T{5Z^Y&U{qjD=fkiEW--)sZaW)Ie<9UG)=~Ivt_O`Uj0)V{V&>|kF-D4 zM_pvMa1Yt*xO#T}D(S32cCq|zUAo4L+JAS8!#Z&-@@Vtb;;@0f32nlyQ`Es^a<;KE z;CID@ljKoe5vz`Om&e?9)uc;B@4O$OOh+Bs-H!vDVLT_5B>yZ`uC@}qS6kt<^J zkUcSOmG(0|u4yo^M7xB3H6A0JJtglPlZVLb;%?$L?qcMe@dM-#9$|Qlwnq%F<{zD0 z>cjAU5Pdq&W4wBQsLuTUIzIm8pM-tKOTtU>Ba8nuypF$5{V2R8hqwJnC>Zp8V>Ct* z>ZHf~T6NZgEGhLJe-ui^H^|>($?<3uH<2tuoCDG9-cxJ*Z!45-!f}tM#3pvzPWC5z z{=@!v-RwAadR+hiscTPh{xrIk~#hP?fd{8 zxA_O|^AC_+!rj~1k?3_^=gUvSLg_5VQY=T$T*t5{SJDT^vcK8ltLbZzW1D7$Gy2Wb zWZwSn_<{Y+c3$tey#0OFbFsl&R(%>ai>oz$qE)!R+3$Haq-$+U)Q%oY-3#j+a)3pC0;A_{sQr;aKJTa1=k@G%buI zt(6dp!hyPiP~|$?umdBH3qmzHWQ^->GM4i-Q5wrq8{#)hOmv$le9{=c!I-zxoozOP>#^1nN`4{2P$>AfLbigD_u{){|E{S6OFpi#aKdO_{sWy45;^V@tzE*Z5X>r>i7ypIs5YhCROPEdPJ! z8UMxM;&4OU9o$1tLy7ipNq9gXv?t!cb9--0H~(+4_4_7z&arWPLrF;Ep=0XrmW0RT z6Ex}{eMY`OgY%lD(~>C(uY}*Ab(M0B&6GS<5>g21P*a3aD8U$v!vu_^3&JFFX!oaK zGC91wAWS8vqa3r)={?LL=b-`%(Bpl^K8D5g{Q4gov=8*N9N){`3%?dr{B z6{4LzFj~EjZNfW{RiEaNMh3UMlZE1|u^Tn0#X%fKLf?JfKhVuT@Kl}aoD^E=ZIATt zH?i+=ehK>?U2E9)=%M$bZUo?qi+n(lg7943WQb?j^tMXi%E!v++zqh0Ir23cL)kWVQ^w(apPar*w3%GiN`KEiesQ(ey-+CsU7sy)c{}tJ@%J(z(qwt14xJ%yL{*gp| zr^xxXvUtCKwFN6vmMg(BQmg>BM&RB;mYjEhBi|cP` zc7y|W%EEqY^`G?n`}Ma@(+976f2gCMLyn$B25Fr4|95WEj*jiLRocyaw42Y^Cy0z= zNCu7=W2GIO-ye9NaY&byLE2+!?c?rdq5ZqPEL?HiHQc~0G;S#i4I9eB9eT?VegC*ee}Fc6D^f_}p}5Z5pNGff6ZGge ze@4DQxBl~iv-?HNoJcO3rwn%W}%ocMnuu5VT(E-^*_Jbq_? zf&%^XXh#RmqZ3`l`o2dSf1q!_cdqdVrN$qOcOPZ$gG>u&9vOe|)c6Ck{@52`l4;hEC_lq!w-r|^MjH6FLoAX+S>>=>dUILgTu5;$+VKO-taoodnvK(<7 z#DHfU#4O=C$nMlG-=m#=)_ovx)AQZ+d_%?`Ai2W$1GJ(I^PE$G1z3p1Sc>IXiN5>t zzxpjdzU+u}Pig<-y??iTw)cbc=tS2``Two8!oS8xqCa0??# zzX*}I{?;Agdw77yc)$ME6Z-r0w`!-#yARaAx8;Al-+#03SQ-1+{vc<|E*}?XD71X$Fkqa zS>o!oMdpz65d8!dyQIEOHGOk0Fp>6p$YcGzQeK)HOSE?zBeExOOTclkZq zrn3fr&))!2<1^r^%9 zF)-S7iltj7{};&r$jxQ{%jX~Z{~Y%l_5TLxOXDC8qh5RNDB18Ge~Y^LIQ=9}<2S~~ zbg&%}=f~Ii?Hsbo?pk9k67(e6(S`hYd3__%-t7}k;{pz_7h?Z=Tod?;@HO1PE%a2% zqqsvK)ZQK#TNv)qA0XE+Z+FP!WV)a*JQNql2R$Z-`DdPx&+r1T@CM<6?;4{}f-x9} zk)`a1Rs4VQaI-dY3x2!*BWs`LV$Mt_+_|BzYj#-$83M{}vEJjw}|59?KO8aBF`n&0) zuv~cmX5#{^Be;^@xQAaAtLbr0WAloS!dm)z#5LtwpKD{1$*(;BKKB0+b~2gY|Bs$x z`?q$pliBQZoU;*|QH5>TfokkVUrPVJV+WiwNT&4}XZEns$sGH>y@~xV{=DBi$u7so zwf*zs|H-)jUyXEXaS(^mQ|XxEPs36AAbWmbD|?cD61f%n-?y;;H@FrV^KRV#FW(PD zZ`}21IPJJPG<@Z`W_Yf-`roZxe@_9a-EBR^2?wR}x{QoudtbYE3g$-x;#F#!#5%v z=l@+IuizSPp#J$s;TCxZ_wWFX-^u?&ABBhXmYwoH9@C%TL;H>YFR8ps%uxTJx$@Wk zXa4lqx8J&poq$^BL?7FG|G+chTHnJ9@)i27s^1ao=s~3E8D!O&Ikb;v*Wmm_?E`et zyAjv;??q?#$Kj21%$W~yd|vo(*c-R47{3sZ%sqm|x-44S@K``*I`Isf01xp z8*NmSJ3=TShqO2AuNH(c^l{j?ZC98;HeNLLO#CEz3)y_RAWWuDMO*`qYPNy7%neq;7jZ+K?T5Tj!0{>qHm5`?~pi&#iBRJ~nkf z2Jk!k`}tN6O7k#|Vx#}+IC&DMQHO-KMiZLVeJzjpDBl`GMz*5ub_nMj^TYAQ3E?E# zkzL7eT%#^~@=@p$PU8YD;R>?mU=Hv2I$Wa<_3a5a$XmFBdw76{c#J1_hLHk2hbJ zfB6+A3-5ctcTJDuhLRofFNR)etN3kr?H8e(9C=X~W|8}?vwOgN%i-N$hdIJSH3eZF zS%C!@dH9E6A=$aTAS@=AVmVeKuC3C&fgMO6l*i-RU~B2?k((&r`#nRZrxb*Z;x?lS z+pq)G*o~1(-+0-s{pRaXOAbF1E>%wL)ZW+=!a;F|aTN8hr5i?tIOq4I@M$!jQYIye zLmfSi{cY}-?~lp%O>9fD)$bqbfAvcXk~7o|E7TFI)PKv=o6e2>X6MM`s2{So;i;l< zc4ASed(JQEoPNYQdhoJ3>b|;)%p&(({Wn+rU#b2lJ4@C7VL9#w7G9*t3oGR zI`4n8zF^N;YstAz65SpA29Nj+`1;$0yO5>FHGtAD`SIz0|GGN--J@#jN$v`#r|b^D zc)mOQ?S?)2@3x1NcejO~4{ZxS+qEN{*t#Pe$4^sR>>0N?9NV=y94-4V!?#nmhQECD zUxhwtr?G$8=5XM|&QN=5OW23MYS2AhqrK|~>@*MV{PRhn<04-Xe^Hk_ z^;kOHLlgL7CWa^UXLx~E_%J^6ZDDvLTtB`zgv*}$Ap22UbrijEn*N`;#i4}WvV{+2 zLva{GABVOz@{qU`lG}^J1aXs4TT~JzlT$HN#f~M1pZwRMoScO@7)g8_=8+XxfQ4w{ z-%jl1qe;2WDe0b-Za*JQgX=U|Qn#a;~TaK_s!OSi2?15xR+v@ZIJ2KZpp9@$o4Jle|#AKG+)~YJ!7>&u)ofGUs@Wf zrL`L)=RVdC=Y2-~8ovPF#*nz7dBzBtmvB)0VI0MAoWyB#&Xga?I0xe#nLrOcNw#C~ zk$XhuzI#Nxw=9bMwgX-0LmC%w30IJq%|2#7HW#pu@nL*A`|`v1|7q-7c5ElvHHGbt z9`vH`GXDTOI?jO~WK*ZfjBwUk0XcLweHyO0&JEnc9rWCk#@$cDJ$l?TaNzBy;Q>8c z&SrCdHm2d2v}2;(S^xCYkheouXuqBKG(2?dV>Fy`-~H};A3NTa?HXU%tHkh zpidjF|F$}YJwHgMg)=ME1<0XY8XY)4&Hv+n?3&@-x=xSl_3}?Fl*VE#MZNN7IoZ%q z%7(Dd-#ugM`0rPXYyM7IgrqS8DSH66qV2x%EQ{1f%hX4F0QJ7NwekF~?0bv#^!)li zK|MTEJv`0#SSf#zt!Nu>&4D#+fPH@7>v#Ni{@W;x&8WgQ>_9bkqXvDO*!Nqt5y-(k z+5}`qI7{a4^Zz%n0aE+}WT*C07rpz8dgcWGz&dfGJ)?9E;xLY)r&J!rae97zU3T(G z`f23cZ&rTJAg-@lC+-{)NFw$-wv%!1-!3vgE}UHv{Q}XB?h}{B1;p_Oab3uvB zu01`l#<%g34`hjNgN%E`=I!5AY(&JhZj;^W|BU*dY!klZ+&2jSz3(5RP=YZShrTWP z*YUoeKRr>sjBKfTTAyS4ZS}u4!FjUNab3Ig&m*qS)4M_cyWb{AXA&l3DjF-Io_j39yv}O3;kwUWIV*@;We_MTjo3x)-xb7YQ?{?{9j^p<~knj1U^6LZ4*KU~M z{)|h=mbt&Xp8o{@#~Ifk|GDR%Cyfd$z(Op>QY^J&pY(poc61@mMe8He zxPVKzf@`>eTj-SM?vU@t_ur#Gz(Wi~8~u^|`BwgXDt~I5=kVBXPw)&c@Ct7buK3=N zP$o3tL-{~Yp%rat|4RN|CVwxImsiPiyZry?nJ9mwZ-(*#1M^)UX?lKrBEQA4Jo$c~ zvC6I~@767BAV;}w2^wd76vmJf&~lc4g6}WY#P;v!pE$ukfm(eC*~Mkyz()Q5Pt4=x z!9_ouwD|{IH?N4;TT*|(S`qwdlL8OsE7CE%9(|;+Q z^U{lJ-FHpVf9X1Lj(RWZ&wd_`O7A#M;xrl?J~!|2^H4{Red5ihJ`d;U3A8;Rj%ydRBCcH!|EsumR`fk(OA5j(zxUAJkm0K5 zSH47_)8I7o2p9Xmj33L=b2I(-vwZ_7a@;7yu@_mkehGafVeA7r4!P;G+4x_C3G|_P zpNC20WK6|$lw%g^uN#L(HfUSUBP-A-ZUMOvac`Zt7f?#OIEgm-Deke;qz#%d2BD?E z@1y-rCMWt`o4S==TU8tuJAWybV};f(63DCLw=nwGJ`a7$f9G4Huw_nXqq-S zx@Q=JFRpjIHaHUQvFWC3qoq{2I-ZSO%r-7yBV)Det;Kq5M%+8Firj`B=yQC3nfE-y zd&Y0??~q@AeUx{J`yZj&BbCv&<%75>Fzd3~mt`vuAX^o^q*iF`;Hii%Tf6J`_ z^6c|)PT^*;N#A(#>0rXsUjr#?mPI&O9d%Mnlrzeo>@C{t` z-(PgEWc}UZuu*&x?dU>dvHG{n*nN77y0@9#o~BRztp9aJ`)`Tyfvb!UTw$!1y8pxa#M87tv;#WTv16n&4ihj5aSVU= zv5&)K`XCv{@K2>rNA4}(1~N!Hu3TLG9^(+mIcU7d#w9DzaNE1Ps{SSy2*>$<@Ap+s zIxf|(uKkXG4$ZHP|3i|V!u$3AYqZh!zSX|D7{Wqn#5KPl#c|ZX)y^gxo|^y1Ry|Fx!#Ol73t}0OL@Xm>+0cd`jxR_! zE{S$zSC)i6a{mSP^;Bg7{Q@rG3a;S>V&6lPeBO)}B<1~-av;C{^88r;mWEr7yMudp zfQNXDC+M3g|I2S5`hVrC{Q58c%Q(&>zy9(H`Cs1eoFV^9vwNESkKXZYdt(ltNoUwt z!WZN##5ovm$nZbP&sCp=A~L)FvoMM*!5EBa3ydRc#```d`aWjr`?H+{ey{6VZ2-iRzchxh9QCf2ckx4OnA_HRs+ObNG=ZT+8x z&CaR9Htaw(cB2Nh=rhN>-yDR2Rj#u|y2c)4%tgo=dyv~;{J~Z8)^YwWUjw@6-RN;# z-2d;OG~zt=9vqJ8Y76Gyr+%T%@zN`ko^-OvIp?Tjj$^=YgQM*qkoqi~6wZ(Txnf@z zdZV}o)X`g(c{ZG*C(uTZa}ZL9a}W~SJwKY!g5(+B-#*_T+Pan1Px$Pe*N!gqA&m>T z6vG>QfB3Nf6g`71a%flfcQp9^&iek)Mek1e{*L+n(D}MFT#?Q-+`ujL6ssrIjd$pS z8~BaX%lGIHklUiYD=xE>{fmd<>W#sFOg=&5SH=^QFVL_;y}C@kRqwtMeuMnHx|#B^ zawCPkPlHKaaX&kAFv}Dm*OcwpKaX9zyfBo|$6y>LAg=p8i5xa>elj@~(@~DjV|@2y zcfa=kp29GPJ`WX$YhDfF{W>IBWoop66N}{kRqhilo7~qP_tl|%K^)%{`~Md>e<2oQ zDVAd;RwKXu(^l_)lls3@ojTF`N0y!|_Wp13|KY>_A3MGOJ>EYV=U)v~%d>OL{qN%k zFwTFiG}mJzHlqsLP>tQF!N@~%|BVf(rPrTfx9A(%%MTXUf~-Al?ACVu|J@&jgMQDi z|Kr*AFZMpK6@?Gue>`^*dHc6PJ;l~)|#)LC^rz3u+TVTUvh<0y{fBu=9a=a66j zcPIOQ4?jJb-pand%)Y+Pz9!qH(;@Bi>bkf;P?!2N)~!A3)W7QG&S~mE=_JvPF7!-s z%v|jc`rsDz?*?@({Q`2U)W67V61P?RLtMSzuaMW!DE?e1 zLcX8FbrMbNe>A(lmJadPJ^yU^4{fNqs;!A{xyc_mKU=L$iih;a_!r0Du8{w59&xRh z-yVM(_auBGo&5a&dj;Vcy{E%BvgqURfriv?kVxJj6dshEy(%)%VZLj@LKAr@mP zmLt2(`goxt3|-Psy-;7-;ZHuSgA%{V5k9be3;jMuo5JzKcmA`me|cp%ppR{>@W?sy zMxtEsQP@asMisVU2dYt9Q4w~NHBrYw4C{|POve82&J~5>DE&B2;xu}8MITUMsG~=l ze_%GBI6Z;)eyNS+2LfttZ-`ltnjxd=7wL)m=jKKofFQS znj8MEa$flRx%0wGY4+UyGIU@5GF+l}-uyDWA;bUVdqnQ;mm%x-jNj83qz`=eWfPXt9v;=oGq>lzx=Ku)W56@V;nmU6EF!K7srKmeP{W8vOVKM_ig=RnC!Qy_;$*) z@Ru{Dh3WKi?7uuC9N1ePYA4PJ`|wu zMRUVXC(aG~AL}pXll|G6`Qhgq=7r-g=Y$jVzdc(KezB-BR5*75_U=(mqp*!@2u=c>>qpAm;Q+T^P{lPIg7Cr%drxxu@>vm=eYjy)<3}DH0vK! zT94qZcmDFrkbC-NXy0S~gRiWAfKGIwdztkQ9M?PdN1=XiMc62v&8WgQH14Yi4JRtX z4tfi{xxXS*(|4ooP=&Thg}#%DkVNejem83c)cCCyJ=PF7NFGKveIQjC2CV^cRQNcu zXDUN(inc>TWyqYc{)0J{O=Z>}m}32bO6w0yv;M$L>kp7^YouE$e`w>zw!ul~pGF-L zF|9tfzx>*-Ju)y+{~OZUDj8&5BVMOnS{-vM!+CTTSB5Uv>ORZ%M_hle-h9WTwDR-& zlq>D@#+2tr7ro`V=l5;((bH(##P@jD^WSvdP(`>Pu5-pm;SzZTaqQDI@&@7_W&^j~ z*L`F3gzq4`OSypbW%o(m6ZZgd52c4>!(3za$R}u|KO%D%j*0r@FGFIc^?$eOd)@S| z4>oDMe9CzFR>^W)0wUe?B3d1<(>>tBde9{;T`XuzJpZjASt*(h<_0z@b=F)d# z)^h5$_S^bk@9TRdI~~`xOWlqhdhZ7H?|k*IG4;)-e!c#0Kl>l8ciI29KM9jvV=Csj zkLhGNX2me(p-&s4Ut3~8f5ITr`W!O&u>U}c{m-v(9-aHx|7Y|+Xs7gOuk>ygr!7z+ zodsBk#prQQ-R^xUeNf#x;9i&0S0b)ym|f!;S9n%!jn(4nwMW*H>k;=3+(>RlgL9j; zJ>tCnD&cKtEz|yZshu%JJ7c`(?`Qwx!~S2D%I0bQ+nMTVa))z%=wGQ8-i;b$x0zSC z*Z80#{2%-HKTh#~9OC~_pJdf1Ips|II_(eT=6Q6Y3*FxtUv$&>BINyt4IhR4dU(^{ z<%@&TK8&~rSls(-aEkxGNQHN-|$GyMK(Ocw~xZh8L zoAkb>5oT*KF#Jm+b4eaV|gm`YHSR1bg}n`}&Y|<=F83``G74qJ8fB^xiVa z(sS5*Mtz9&t`*zZ8_CV6!Zz%WeN)t{ZV1=%!`vbdJdSJm6@@$Wdw76{NccvYd^62t%QXK@H7smVjOZyx*=pDs2K$?C5xt+=-WG;)trI?meij-x@vYF`pkeDrA^aQfKV@7GISQ>q@;c(0A1O3{ z<^7|?Z)1=+rT#^8lX?ot{&#-qR(c!8Ip&A;&jjH~n2hWW?XQ&jzgr#2Z!k!vg)@BZ zS>(`umi>?O=tLLdyttkQ^}p*bbl=ej5Z90GqW4ttwM%db~3%^y+TcjO*01M~~5%<(-MM~Ont=;_m`>l@IvG5q*y7ckF=kKJ0&x(ne@-pL^JUZ$1BI&wt(XlU?^cKiMPP zOLjihw=bP4Y{L%p6gy_@$Dx{@pZ_@DymWdEetZ67t+;ys(?Rku8o#oh0a@$2Z&>BI z_IR#E?ngOvQhe(=&n28BQ+`jJV*g(BU7!U?q~zaL`MM2z_0`wlv~$0GZH&NC^Y8ud zb;9S+cUk}Xbv8CRcwf7Kp3%pjeW{&r=GXo6`smN26J3aVfApXiolCS0q?1HDy3ix8 z8-4V_ebPpnegQdp7MT;`a7kQ!sWCI;H8j$1khjo4k9+>K;EwP;kqS?>#+m-0+PM9+Hpo1S9jzpN;Z`I+J`A^;gE9sLS5lC$*t0yb&M%7kOh$ zSr|p`zu$W4Gha$%;#9}PPa;(H^tVLgw{%;Jd)6c#_zxsCl>d5ZY&%Q;!{9FCs*Y$s2*8iQ- z|J|?u8$I-1{rsKu_0rjh%{cD;^^`dVRlFh>Lygm-%FplClKFq(}rJaYgwqOSNcHV9If1l?+;`zzWGoGL97VaT?wZ%H0 zejZLr=QQeY4n5+!k)RJQ@%&%8FM2z2!dc-AnO-41arI5sIUv($q+cK}p@H69?72#f zV-UWER^c|`k;lE_^Y*v;CGK(8a!i@gt?am| z47sW-xo9k!bCPIB7xMmu4*wVWg!?wIzcGM8Hg$Rv`&%DCmdur@{|_mPz@Q}3k7KUp2Zsdfs!WlAsO!*Z)4UNwp5)=xt-T9BO0wnZ!2aLzrAbKYc3#dMTo7UrPuJLCH? zz!o0dXIwnHIm0&2BG=7UT4ntCI^)mDPRDhTXsTaiN2HCBr2B5$7ai5cl}Y zp?#+3pX>SEZ+?G-@t%L8=O69)QQ!2b4TR;>8ubGo&ptka#+2vBLGdlm-P?EOFHG0J zFo9p}zURN``5$?H@rNDL`BWW19!DJed6GPhxXx$3UwW(WS@;~X^xQiCeY8R831j%1 zjO%ZnBL5?a6yh46ZAdsLiFS11mt*1b;Wp8jjw@>F<6ooEdMu!9{i^Z!G^9l!t8{67qToszCKBTx8_ezxVa@bmkhg?oM*v7Y}=uKs@b>7L&Y zr{-^O4ZCSVdKBf-XKdZd3~h^>JnJ-=S>P5lFCXMcC#JUY>(j_XF8!{2)`jv-V( zVqll{0rKnbO;h)dRR@=<|Cbbpsjf2}<(P%WW#*q@4!vcc`VaHy6=+izwj!=wmc#;a z3$YkWu^cP08fy{X)n3H%a=mcuGpfaAdKD6v)&I9$197k4Hz_qg(=pqy z-`wj1llbH5)!2<1eAs{IkoG^)r{pVS={dB^XC2Dg^JM3D{BO$TST^_2d--KL*YSNw z=O7NFHtn}Fj@ikM7ar{Q|8*3E=w121m$N87)dUbEtef590`oBZ{kLE7kCdU4U z!lqOIp|H8&MPWE9M+FTE+OMSH=M!j4rVh3A)z6n1X^e+tJ*ZvrM^ zGNxiW$}tQ1^?&CS@hkH0KmIt(5jPJNSb&isLwm)2PEaB#^IveOKRNq~vX3 z=PLIsz8%>G+Gk|X9@p6QePJJcaOU@g1IUlhOZp~Dzc0)de_xmx|9xQ^^(Wl3<1XM5 zuAnjVtHOr{=55a;W+6`K=i>* zA{(#1Eo{JKdW-rcj=h~u&*~p<6OLnVQ)FDjvu26!bCv%CasFSq;~L0D%%bP#|BYjV z&(IE^rhHMxeDE*GXLB4g4;5H|g;HVSV>ocotsr=7D?JBMB+ZX0%>8oLqghZ-`@1*#O?|cP4h~xi;#}tPb(iwVI6kd@Z z+SlsrH^Sk6_l|x&{-B6HVvge|@&JGAe(m9U_gW%62IDXRjq20}Orp2!@@(7L$@Hm+ zYX-NzHpbw&xdz%RiDRB0&B)LH>-YQ(oY;`aNVUpnWd*GcC4Kbo|UPQB~@h-)G(md;Wv$4c}}(}tRDYymy?eGE8e zEqy(5Tlw-gz3cl(?=sdvT)k)AOje=M@!QC_262A;f!|^uNY*!5ExvV%=klK?D?R62 z&wg1Q(4p;5HY3{pNwV#we2(4Dsfp>~AP(awj^iYv?caA%-nuJ~kJ2z(&8@Q5~ljzM)coZp;SIw7q3p&elwb_T zVFD&$GNxiW$}tNg*R;={u<5M>G)H)Nf;C9U3M{}vbUrc{gsiRoEG#9L|zHJM*0k9_Ffztyk5_t9rI`uZ%a zl*W4OKdC>?oYDOHe~Z}J%h=gu8X0<)%*DSHT#!6+u6YW&#-^VIM2TK#HV4S zv_?GRX0i&~umjcDjjZ=yL)PLT4rACjilgL^H7DvHSsRdk5~opzMsW=<`7Y>jPC+w! zJV8$)&Z%#m|IWveBtPuGx5D$|!}`9IL^oG$bsjUW2gF2wko?0*bk&~?*8?0;mD zL;G0IkMrn6*K^O09`vHqyC_y6&_8$)1bsf5&xp zn`0}TahQNf=$YskZHvkDLH9L)sr2cHdunI>p7DF!Yr9-rTmx?wIS+B~+X`|48dfRO zc6l!BU#yPxEOAY-Hf2kaO!@s^+W%{zbQWVNmLu*3SaXUUn9mn+?2+;xALjqum$$U7 zKCJ)f`8qP5f3Ii7hy53|+v56>e~5d&&JlkU$8i#k#_BZ~ z|96_6U;qDkaj2u8Lz}uNj-gLECW)FA+J9@5e;fEBv>6hPc|ZR?Nzc#!TdDkhs(qmD zO(U+`lfB9Qzpj1Ym@f1ojSIMhkp)HJ3OO{3kDnYqUKDPSw{Qpd@Bq>FeMmmW6FfuX zTkH4Y1-(VTL$kKVEBYI>=?7?Cr~Vh0^jpF`G`W{%#CZb0vH$KFRF|n0^d6XK?JhGCecjIoh_decP`5@Ayjl ze^K-`X#bPl%e4Q=UdK(BMmc67?%O+ue80cOJbDEdAiqC{@*sUi-rFY+IyOhfIlc@1 zz8DSi^-{9_wl!(Vm1w26-GA4nNRiD?<;R=+^)IX|;`hXC{f}ry3zEl-0Yod>jN4o5 znDy9*&8WgQ>_9d8GR8k`GX80+@lRx0J)9x4!a1^CTQ|SH+I8P&6Z^2=7{CT&bj}(B zh?4if>20{;ib%kao<_Hf>_@!=w1h$dPNFfxLiAG2VR7E9B6P zf^dz@kH2^^Da8H%ZV1;a+isC}a1RgA=$mS|YdtA?%SGSTYwIJ^pCG^fpJz@X>6lt^ z-;S4uADQ2eXTmS=3U81Y{e|^utbt$lMQ9oSMM#!f`(FQkE85UL_6y_7z6j^}Pdl%E z;aR>2-50;`eSg9KXD(@~D0^_HXA^b7URU z?Y;KUtLXddw3CW_1L}mql>fZjfA8IA$+#9n;*|W4=2H2;*nd*yzajJUPv2>sTNT69>Z;^MCFV&lG-`NIjG3>W?bRo8X`^Ypd;1crwLf_Z7WxiwK zYq)^}U2*;IqP+j_j_^G^z{q3c;K|2$g5fvDz?1R&e?h*&8-)MMH-pX%`u52ZjKMg( zpZ_y~9&P>sOrlRlZb;dnOvoVif4BFmpK*S)_k*s9-cPajgI@2dkIe6{w$=OHO<-WC_gsIY~Pv% z1lCws5Ov>8c1%%NOkawT65lDg605N`#_jmr_&4+aVjcXIvP_w|Ui|y{KO5;S>s%Z8 z{eLR>?1y|?Z+&A(BC(GRc!d2+wwzM`smtQJxUCs>Vu$g0(%6hDY{L#zV>fEhr;hAT zss9@Ee~(tbPgK7nzy43L`u(Q*59iT|-`*cEt_fHxor5@xqv#pU{w?5ppbw6hHcrw{ zBezT4x?LSxtgc0!xS_hDaE=^at36WcUCi~pRq}0!OQIdM>)DJ|^29TJY)91t;zmwd z|NqD*p>~wIHL%+b8`B`Z@sRezUe7{rIiYQU3-n89<2z{OJ4o%*e!vxRo!@;Nu8}v; zL%&7dK{x%q|L>mg1N@Hvk3HUm{5-`Bg+bKW)Y?tp7S>JJf|G&%s3ICttili|LC8$4Zojx4z=PEj+sB9}E9}!^h$8<`#!D zi~b~>-ts5m7po@fqx(FZT4UVPB5T{M4`Gt@hnIdCCX@9YABA7uwjN^lC!y|;Z>L~h zXdnHhw&h=h^GE(d-@(5MT?JFkE&Pj+omCQ=uKsyQT>SIUVxC&_(7)!t_|KJ5e-_$a ze;HZ}D#F=`e3voZt)JRoE!u^@7tUE<@gMg6erP=Pd;WLh@)z0%ZS=n_{7cl%{>Q?7 zl^Zz&;xc!fT=ZPEgJ({E$_7=qX zrT#PDV1@VvScnmQPK(Jn|8FT7*Z*EluEc7rMcn&yJ-HE^QH7o}?59&7g>Ce~L+qyv z8-ZSp{Q6}pwBgoh%N^7HpHUiii?2Z~4x(|Lc?4@p!(n>M5`Dva*xU4zXgg)?w`10T zBaCmfwf}t+`;7@W!0$84@g*38ahQNf=-c;$ z`9EjWX~LN!o{!9Be%=08=be9~{rgtA_f-4$zV@%SL9gGV{Ww`VQ!yRo=uy{qpI}qd z8ONE!Cbt}KU z`Z&)0NzoJ1X+raS?SCZcDYTx^{y!1IO6RP`TCB%LeA^_?cyBf6JLUOP+W!sO|64u( zPS3x`^OHH@cF)`K((|M9vgh~g-JZLr+w-#rhN`u}+10U~JzT-gmVOnou|f2p=lX~?`R4RQb19j?=bKBUoTEOM*^ zF3{uLwdNV-+0(Be?g`L3Q5`i_U8UZ-Chi7q;SPT2zrQE^01uI0f0MuPG5rai;RO;6 zzuw<3!~VXiPQ9ycMJv7SqIQIK&@0EiLHNJ5`7jD47z1;$L;r36KL&l{u}zmb;r~D5 z|0mnkCgvJvC!GnHgvsbxqy4eM*lGIUS#|g_{Q&fG z0PGmF}E)ayRR3Vy;3txp$A}-X6j%dK}|2kF1Elt2eSMt-WxxAS@6bwuazB zaxs=-{~hn=g*o$QD#InuF-P2TtVHAOO8&pfu$tcT+T85tm0>M?J=!K}SB?ELq#TpP zMsb~IzYLqnD)gkj4BN;Z=1{|Nh0gmGAt63LzCeFX zlHOxpeD}+W&`uvTcYdJ5TylCJa%!- z-G5Avd-4v<|0q16KSOR0-ybr&v>(S>S3+F<*~;*We1paY1>QsDG#XOA9mlpTv!gTndsf3|ps8Q}LqHy3z z5xa=}|D655$bW!D1N&25mS6v-U!6ivp;aB!hFWPH$4Q(<9nK+vB>J`+|Bw9se)rkT zm)ZY(>RB@Pmi;fS4t3ml{L}ik>faUWU&Qrq+NIMK(?S|OGt|H8xC`{b?dspH+8^{Q z$g#z<>gLQE_3sAl4{>q*n;YaUG&=SUc@GVn-nCs?)RPZ{AEI@<=ent#G0k(98Bbm6 z-Kjs~_xITEPw))2_xPL0{Wbh|lg;m@zd?B9+r}uAU<`(L6oqkQToZ2sISG?771L3U zS(t-)80jhs732aW*!&;HUo1~MP_ZK>DF_vOER$?{QqHnAG&rTR%Lk=QM z&y*_rXUqR+zbpUayyH4I$^YuAZn6ix>Y%-uLOyVP@A`704?M0-zh1go`L9PEyqR8w zL3Qr{V&7i6gYUmdIY4gnTjLh}?fg)+?AQ7u+LPk;zv54c^EICH6?`YJYY!*MSQf-J z!Kxjz8#SoKK^(@gcYTzM^YD(7ac!WJ}3UVj3O^|8ORzYHh_79ppcU&tcNsnWTTh}P3luu|r!Ff(9m(i#hzXwYEA1_{=vk;ED+R_OcPSpB`f_jCUEex7yPYwx|*UiW9M zP1+1|{c#vPz??x3I`=C%V~{j*Q;*G`|9_UWnIVl}5c3dp;NJ;dpq`c36Y|~K=J)6S zpCZ2Z;WV6qkKr>o57Q?(0}82*|G!2~+Bh3wNvM2_Sw7-B1a<9m5poGEg=O&l|G$=F zZt3OSU?t{j;rnxoop#<8e$ap4&Nw5UaYZ8I3TQJj&VZBLYYo?}h4rujZiP*-1ze0l z+_Q``kRHY<^ZNgcmj_{9|NkuG66hjK_XWl!ql`<&8K)roM{qmL8UAYjt(W=>?j%li zZ$K|&oI%E9o@nBRjHmqNF8qeVa1UYqJCb`vu}1(mEBDIGrkrv{g{nJ$KW;n0ZfESC z$a}#<*dKwm-MklLq1GhclQDj`o-@~Qe*&I@r=c&6`{VvBW=|RSH(V;uV}1dKX1G7^ z;p3iOcoDY)pn(jSToNqV$cg35pCG4Omq;P96e^(x%wU5?6+gyx@WcNVFgu|Q)V&U@ zNB!{qwGzGqGPY2@IoHvJ*%eOulRg8Hr2kpU|BTwN#~o!sL z@1b1X86bVT{pBQXr(pj4kbR5`&;xiM`)L^5$hZK!yC{RuQG||=5#1mQbrAHNpzJ~) zsQ>74U1WSQLm3_+o}*kh&UMH^>_f;i#Q8CN2IoO{AWXhQehp9Y?KVbGzA1O=8CXBe zUtVDR0PXNC;pA_WC0GR0J2`I(xfGVcWQNFcMor4DT-#_YJ@iknlwvFGri(OZlb$S4o@>!r1Yv!Q66; z=bB}nAF~bS=O6a*oGCo#37#FdZX@kEvIEq9MGw*i^ZyTGtUrG)A@)~b{=bCV=%?WB zV-Da5Zm+>G9EX!|3f_n5#y~lZoVYbm&LAiGr{l-S&)__K3D$7_zZd;oA7Xxf4>!js z&rhG@Tg)vNc>c4D@jfOW*3zaQCSAvQ{t2ER_y2YO0eR6zyzR)r&zSq;+WGyD*Ya&* zUJA=#Ik-|;e<2SCg2@AF3P(*f%3_C-m{nT$(Y^;gD?d1 z>uUzuKOOBK^L;(Zl%RVtxYJsY}$d??kHl%Z;>w z>KWwCKHeYM>g4??7j2Y}cD0-y{%IHeFZkj7#|!koVE+H>Q|R!YqHjUFmu9E^g9F5^fee^F!nqE}3FZd!kdvF( zBY=KDq3XuBREn&Gd3`_9CgaYP^zm53#mx*h7~0D_fNzv%gYWkPHR9I_R{kOW0>h9_`lcRPGNr^PD3yC<-Qd> zcMoZNiarqbWbBQs|2KvHU-|_b(Jx?azYU#&_K0hkANl{nr~E(Hr()-f6>*O-|HmA+ z2fRhh|G^;4pMOmpoy6G%-Ov++ZUKD%f4%wm`OnStL*YVFNSb;NFZVpoGe)sK5Xt|} ziu}bA8ZPEw)~htDL`RrOFXkhB)1&Owr+=uvlm6e=_Hk~2dd7e2X>{U9s{uQ9-q(Qt zkb(XC;KR)e)=<`bzrSCINI_zxIO0hE*i}+Q_?BeSe=lLwbYQN!eFy0ePTbV93aZDK zN!3)S82zqdj(Mrn?Yl;5Q^LeF6DBo&S4+uMv=oQjEJaCCQV10@AyR%JM9OsQ*}E2v zE&y}jrOcsSxL(o?>m;LWo#-akOJ?{@l9h6kv~u4vo+D@X&5|2+IsZLlKU~-ql7C^D z6og+n|6WG?i+5ioB}L1nluRz0Ss~>!E2Scp^BGS2OUWgyf9>X3$)_5=)AS{jcW~(! zORHZ1eF^HjG{$nA6FSV8p$|V3alVQ?3a^0>N1FW>#fv$B@2RSc^|amBDc&OLyG}K~ zsqbi0T-2%a`?0ZGh@*%4ws(a34*IEg=g(uAqCPY+_lwj$^e2Bi;j+G^KjpVroS4sY zk4|J>(eK1yL+|E1{dtzU4ugIf;ldV6?h=s|bkBYUZo&@%&YGfE{N&G=T}g}O4dh99 z560m%u`HOTaY)CmcKy8QWd5l_aXm; zyb*aVVd9XFA^#KkIC2|)&%k!<{PsZeGh_{tP%;u1iY|@+5keMj*%m+1JN+bw_2^t)CXe|CgWXSl zN?ZlV!U;bqI=4`Y`O!oP^BtvxDciMxGUg{0jJqpO_=%Brw~F?+n)mg= zQwQecWs1eUK87{-3Bbg7@&oC?i{96dqsG z%+S3R+SMUEen;Rh@F5(-|1xk{=o^ri{Xo;9c>_PXrke3U@)Wm$K;`G!L+r0wA&M$aRnWW>pRgE?PLFEKVf?=at6&g_JzjLcRb90^pH(4oEtfU zjtPFPNaxW&smI+K7$|1$VTnbrAUseSu-lOKP|myI8LGMdcI0lT9S@YcjRCBaD`FZA zl;7f}2OH14VYR9XPTPjLfV}%0zJNmVTp|A#Ksn!kDfV{C;&1TFg$k&LW)=MY$#-`7 zWqjW-9PlG$q+^3Jd@b@caw$9wk8Dsz@>KIiWn|e#W%#5jZ&QY!zg8JJeuFZ+0rMK% zmuyi+p1wmFUVeu%a;qxil#y3%QHJl@f;m~PUKqTBIIik z%E+4QmEl&nbCWV$8L5oCwpkfoxkedTx<(n!_%YZ27=PToxVIAKYwmw~lQQzcCgld+ z;VaT-58rq;j9j~pYj0GVaa&J%4cmBLGvPN~ue=_BebZ*XAL3gYtBgExyE05-4#u$- zcy7J&4*nkM)Bw`R9k*I}lQ>w4)7WuG4MNj}{0-j?-4XuxEShA@?LPizXS_*q+Ly?y z*jtXSR{Zflg8y~M<4C5(G*Orn!0NYLG2^y^c*CJCVTsi4T7rJa61f3;b$o=t4e;)Ze zV5&ehbh3JJp{)$?uWsJqBv{N9JRkDJ2KQ_vIlO{5n;JzWuFef;KB@!J8GW-)1E} zaI=yTwpr1IZ&orxHY-`7o0aUS&5AyHvyv0BS;>vutmMURRt#~QmHdsHm4X<)N7}}s zvzJM_dcR?wEllK)YJMNxpegh(Vt6i|w?K8{+0(bur= z=3Kjk%S3&@#-`0mlXJ7enHypyj2d5q8g?h+_g?P59=!q8&RSnL`W-#!cf5(cF9`h) zQ2!t9qOx>tBW*S@c8I!6-o}3r-I3u(I9qZG-Hwlm--j;2F}1Cyezj39HLI181C+T^ z%6UMPGV&Q^<|)EW5$-T;!YJo6Kt(5KL-Ky@#Muq~pr(Nb`7YEY{#f}7a!7Tj0UM^z zsD1~3!2T!IZA|3P$iJ%Q6RZQM@&e(8f290PHB*rffBYlmQ`JlwH0|YGR5$WgowM)X z5zJUFSbCYyQRjACyG25;?2TTK5eP;}$yMU5^-{|Wm;eE;+Mf5Ef z@lV}FQj$b{#r}n|@I_Lt<66wajGc{fi=>LaUNzU(aJ`A^hws5}DLSTm7mJ0yQa#cY zxKM1J0lY8kzeraw`uSt_{sSB&*%LvckBn3l>a45bCfEuO!LOmvxmb#}2crKKC?$IWr4-6Am+uXfijY95 zj8We|P^ysC!R)CBMHh~?uNGMsM7!7&B$ip)$&0j;ezc3UjdsF0kPTedh-|{%jBE*` zUDXHi{_6S6{0n$CNILw2q%)QCm6K_wxxObc2;E8AX~Oq&y$k7{h*Sn}^MLn6q~ePU zVt-B$`&fe{eM6vFUgST@od@M5-p7X7eh+;d%zua60|7Eln|p;H?aMs@vJ!bMtO16g zn)N{UQqv`@<$TLO7cnyo)AXHTedqi_`3Uo;FmRgmuVLOLn|YT;{_BKW_u+$L9S)F- zn7@H^36Oswb*!h@ucbaFuI&2*BnWvaTm|jXJlhd;L?W2;iCQR318UehN;|+9cJD&@ z5E)K8^Pli2{2ub*ZNhy9frMQJbTKqu-c37xqmn*#qoSFm9L(IP%;!6KpE0qRaS-1< z>75JNpoe^2kle+!c@vZaLy#1^Z&XUQ-k=m3Z&ZqoULxh3`%#g6gHn1lSjtYUQL3<4 zkF8TGk;asT@^h|z25M$6kyK8ULm9c*+`k$%jr=_yLBvm@MA7p;xAoI%yncq3c z{LVq?OFSt3;EF#eZWxF|l$FZzJU50)Zb!9#I z)(Y+5e;rK~_#$5vZ|@_@A2(7KH?tLM9d2+8#$W=@!9|#bpd0CTKr-l{4D8Sgqo7GS zBxz}fL<{MrLy}=QB)ZN+k_lOmJ#k3%kOR51ha_+6kQmM#l6+*r_#r8@ACe;HAt^?d z_zp=avJ889(m|<+J}8x;2gS&HR-HX4)lf5Ykg@qesl{A3bx_RV2gMS3Q0l=NcTj9d zJJ&go4G9OOaqmHCBCcj+3+dX5bYgEqw&ULsd64}Vq(8Eo>wA#Bq;p^V@1=hqXlH_-CZcq42Qdsn`QWUjciqrN>$;f{8i|?1Rv->Fr`=#PU zyi#e|FUGx(DpeQuOZARNm6{X##kA)osZD!WsT8 z!%-LoWAbA88oA~s>M_^}kHHi06lgztNz$KsSq@;1yKLW z0VzRNgB6DF+^&qsc4he6dl`4!r?lYK59eSOLMRvcPzBA<)p|g>?>xX-F7qkyI=lfr z&mNH8fCChWg_34kC|YEC6T0wj`oEA#$3F|Q&n^@_z-_P}UIHy-LOv8j9}K`d@E&{sAHr9#;#S%oSPgeWCgeaf z48W`KAuNbtUIiY6U%_wTCEyGgO(x_(JviW1cpct?ci=<#1U`o^;2W5O+S~XRpjNj; zK0##?dms~Y7G%%(iym??=RzKS2FR!3EnxkvkO5T@|7$5` zov{Q;<1Z!uFD3u!8$)HvrR4vm_kB|=qkPUjsfn3M{9b`flXdn%=kPg@X2j4W@2ET>~A_;db~5+zpSzlkf~2 z1TADjJ`_VaR6#AY!5`sucn98t58y-i1U`o^U=9}0XSftDhZUf4GyVtdG3NguV~qVl zkcl}9vS%3UK@R3z$ivS7`Irl!(9QfG6f+M{0;L;8%3u|7uZDGS3&hcP*-KlL2$5@e zpC=SCsvTwYi_$ntL3?I_q-O_6#>uNiSGqznm$P2MHQ7g2iC&gUP9x{dwT4KZtQAAg zjgs$LCk5Up{tv<2^b4GW$GC5jd139d?5pQGBmJnsyKtv3rG1)vG4A%@KFt_i)v4BI zphLzOa*__>B%Q+{{3h9_@ds5hPX3cBS%>?xdJS{kde^cL2vj`+*ls{JH;S?E-+ z;yZX(4Uc)AGyd`y)$StvV{pH}{8bHq59TikgZXc&{Z`D(_p5umKUMAUcbM!6kbfZm z3IBrWX9L8aG+PMMj|9l{&H%Xt^Odjyd?e%K*8%b>awGOtz*2{1vL-+lMT+KspbgaP z-a`glk9kXgL?Jan_bHhxe?OmATGDZFry{rGZVaK_3MP+RNmDw?hNo$>4l9y-0Lj{5 zZ4_=ThWu;xqN&zk@cgn`H$ zqklHo%J?rTNaoLHWQ|vw#~ASA0MRlR^jq%XI?pw%2OE>vqqPT_!n!eWt7i3ngLiUI z{A-!GoBv+z|MvG`Uy1)R{8?8Syq~zCYMii8cXWYFF2T)<{|x?5v0m{d{ul6PeWng- z$B?6h3m{A+^Wzc3`6A}H@7%O^5gvE--KJ}Jra;1nE)aEpXjL-Lie1aKs{bU^;#L<- zynf$3oBWf)OT0&-q^%7k8u4F!Wap!#>Mxb zEyZp?j}x;;gmg!TlJ%t8edtYK*FMDDdjRE^__`@$#!lw`pQnxHefu{N&cd43t@!gS zEzDu5_UXse`=K+ilyE$|a~ap^c&==9G|xT#5as`QSOaSbOPsYn!t&1Z`N#VC{PlM3aVu`5 zzmfScH6NzGB`o!l`u=K~7Lxwz{$7UL^Zu3CYr|D_c=YBX*UhJ$T92vUj`|I?;XnNf zaS)$&IdN~|9;AVhdbf&t&Zw5j7tmEETwNsTM7+kcxb-jxMjci)!ZV-Zddjx;l)uap zE{(iV|FLt;4eGNHj_1k1O}iGIl%<~rfrJEsd7!YY(;h+6$vDK zH?nJ-vHvmlXdPy67We2ws%Ja6z%#-eHuwnRg(1QZ5;llygJB2kfe;9V>18kaPs=_2 z6Q_3gPi$&bCLc@jpX}M`@5HYYdch6z|Mz5FPCHKfpG4jxb^FMBKlI_~Yh_0={~5#l ze_WvC(g(|9p3Oi%EFW_L-&0{Pb-j=FA6c@4bEbFm4WJKGj;z35nauZqzDpId8hZ`y zrg*-C?f&$C{ptUrTLATk(PbHDKMZ|pJAG>hvSAkeRQlXa(0qbKJ;s#5BkBi3;i4z01tfs->&cV@O-4fVK@rMU<6LUD2%~4Ou+eT6`4Yw zg>!HLE<(-4+r%_`o6KM?DpX_^*#z5QyDG`!fy7g?9k~Nsowtb_c46KP@sI$!2)hTl z7j|QghXmLQiI4>QAQ@6171BTldN4o{l!0338j*>_bslpP(uCatcJRX<2t_Zb|JM4k z5`=m3)M_Od83LgY2H{|54rw>@15L=o;DmC_C?Ia4ICYl zOOX_J%?cpNHP^#>xCJ)B?Qj>|3p-&#^P+N|zT@PwXB4ewkxZB`QzjN&!Tm2)CYP^L z9wyAK{@m}@A1l)vu2D8)zEia?xm>v$8OgIn0bMlBgAk2*Bhaj99*6y)g>1-&Qm6u| zGtE;FL)chI!u%Nbio?7Ws2(*`4H~K;%}Y=TPeL&~2UG)^lb~S@JS~BCc{lxE+M|pV zbTS|la~5RBFA%+j@r<4EjBlajAq}**`N%K0{xdiY#X-xYgniqk7g&FXa;SjH6SRR) z6}wETku|LAo1iw8wmy6rbv1JoP#wn{1(-0`Km_lg_UDS|&lH|mASIEMaVW#D6e_}* z;{g2yp7HDg$wlVOED%E{x?0!^nifdDn|H%qbB=dIYVp^d;2rVLLS~MsXD6sTTRnqSb_z2#E0Ngr|U&Ct5i=h^?2Ks;|O*03XKvSq` zM$+xj$6k!A#qIJ< z)J5y^uQqe&s>xWqvmec-SCi-(LBxmYs$&Cn?yx4FtV9rlmDg~n} zq)>OY6z$-=^KF+&32U~c$TIBZ$cj|{!G|NQUd?}RFB22gLLHb7UnUl)2P@dz zmxvu4O_xZ+2?U5V`fhn_v`|A)R<)_=hj z!v234pda|Z>BoObTKogPfmslW>$NZu!1s>*ZMYQst#BVa4Tqo{C|;Vc;4;Eq&Nsew z5%n|NfjM>!^C@73*T8)}Yc}v248w7_c`bDzltUE^!Jl9fF2W4V!obO|q<}Ap@ zPY*eX%>O}N3iE%EkGTK}@hgI2`b#Cz^nYUL|4^5gQ>RxTE0Y+jCI4Ih30Z^P6vH@; zab6wLJc6FyDEd#-vsR>ShW-yY&eH#Z#xd$R>P>YIdJDA1qB{-q|M7^UZrny4x#PRD zMY|KI8?pcJf00z`*6s9vV1EB!D*DlT(HGg}5gRn{Z8_o>iJNdP1}Pro0O5T|uRB;a689kXPV60H z!R&1amM-kQ$R7Ork$u=V5$+!N1$2QIiogQLA?;i+m07T)&vHfy=rCtM7I9`qET?P* zOC?mZ#$UyHz9}Ub9m`;;OAKZ_87xJ5&hAJHmJ(wyYc#A$VlPEjqy|el@-*MYwcN)5 z!|(wt;@gadpTK$S-@p>gVXzS%fJb0I+zY>g=RgB_PzqM~0BWEM8o&o9;Y0AdfxZ@8 z4L^lq_?v3x`%Zv8uoun~_9onKhkM{U_!;~X4#M+L4zI#7aKPIz4n44(^0OE|#T<$J z0{Jcc2=kqAFFXW~!*AgbWI`?2p$q;DqwqKI(xtzW;e0_yXy5KX+W_T7}yj-SV31-d!UckH)8sU8) z&8M%uLRbpZ@OvK3rNoD7i{=z&)U-4am>1&rYvdB-e&nsl|A1!9eSj(p7-GF;O{sqXwq&O)` zj+5flI4MDvVlTtJ9RCVrCE<-RarCX@q#9X+-GqBB{&h(6xj3<0jFbA=II$va*zLGG z@NYmi622*hedJu$-yPY5y%+bskT~fF*Ty(;!vJ`| z8^V9*VGxEu!~fmWcy}%Do*sOMWQ5)!y6`(B6PblQJLV42$K4@0$Xx7sxEt`#M;2f& zOuj>kI7^@yS%SS3_p-1%q&(;jsesDZJH!Z8n5&@%KNHkqt^@PT9b$oc%vP}BX9ox7 z255}ELzxDkd{osn_d;}Q4>;W%+J{ZJ21ezJ% zAGF)!Bpov1<3tCUn6n@|h5Uyc%(;+^hVmqYx_3xV+Z+xG-I~}l3(Pt73npRo_5lc>xYmo>~0%pD?tS?8BRjBn9wom<$S-8x-pyi2+AR6Vg4E5Dy=^o47MmppnTtIGcOCwj$Smxd zz|Dfbrf8Ar*lQ9tiD`=O=^Wn^vJ$&-+a}fzH%T?J)V+!I!%eIoZesm#lN54Y5!V+Z zOZ;Nx73!c<@By5M{nSsZsV|w5)-X)gJcQYV{4@CA2zA^0v`sJ57QF#M*l&ir;g|3d z6v4Z+Z|5P1u&W^k?tx#xbC3q5@C~2_r&$g+z-_P%9)O?26OagHPzwn>z_U~&ol7~EfC+;xFAupap+YxzeR^SK#^Rc(lo&!TBZ z;bV9g{foMc7cd z)#&SfCO+FV>pGvyknNnfWP3%#E=$Dm8tdh3g7}d57xE5)+~3J0 zO)G1M&NEj?+sBtn`x7D3cz&@otqhXpudkGrZG-^<+d7VgyrJBZ_Uu8HN^|Hi$OFrRb3T&}sCcvOYYG|$-nJca(#InF#pT4&LJMymhraUdIZp#PlA{=`)DUzx*c zf&ce>h#$^(V0=6Of2ow!^8Nnb{&VQRjIuv*oc)Q&AI_g;PH2$0hQLDl7|4e?(&|^F z>FuQB0O|7)TtgbI!tD-Z8~%Z$VHzBThw+~yUB4m?@8z0D$%`1`Ity2k&JA31ko^1! zVaoA)iErf%_z2G8=ivI!xo(pD8Y28(;D2y`j%xVCZ{eK7Z|H-WXr?UQ!%*L+AJ;!vTjs_B1>MjCz0e2!p#Gb~4eGx+Jm3W%48qVc_D+xnPm-P=!WVE8`4dfgX-T^W zu)hu!q}c_^%`V*UAq}5~3exzGr28AB<4dId3c`)yR*wuOPa;YGjc_?(?u7>k*9_~3 z_fqb;nfrW7yj_IfO1yc*c?;n_B%gnZdmY4b{Q&+K@E;@27YXw!_Xx%PN%#uxCC)*3 zlsLapzYnf|hieiD{{ZogBVR{8N0=P^2k~p>-fyYz$@QJF*GWeRX%|WwLND_dJc0&K!NR!OT*NtgO~LFF<=g~!FgonP(tzE8Y{I`0_xb-2Bb_PK z$@^HlNo0N&Sro?jJcRLiG~;t*SsdeYsK8zh#&E{>P@Tm19_GjQY3NQRemA~PBYsse zHl6!viC@S4Gdh_cz@CZJKh7``F*cZ&)O)&drf@K|P^GDL|kL1fIu!3}| z$L&|7V=(D{6KQr4IR$?w{jVVXzareNm=_Rk3i~I7Z6;kWBhU5_E{3wEhf(}aU|&s~ zAHls`_bYe?4#GEF^EtAZdpt}Wi?QDZ50Ezx!ox5CH{te2*hpNvArT_U*Nw0bBDvRA z*bX`Wu!?uz425CnYaok5sZ)_3Kv@jsKaTQ`tPH07<5v|<`A61ZH(jLskFS*n zNfUFRpWH$k)cg6#!^j80PMnS)KY0}MgM9_Z#FMLokrCN<82i;T#*}VA(44;MkkI6&(a* z7khZSx2;0YoBx=z7r8Hi|CVF#LH4qz_xt~gH{#b6yh>i;S!?0@|BKgie!3ORU;zg- z%+Ssu?NG&D=xXj+i8Sg~Nv(dB)Ikk)6ZYaj_77iVA5{=##SkmyovRpEGR|O)yv%Q< zhJ%HU4L%9ccB=rEO{~R9#^&jM3urg^}59%HcGc=gky8(7k zb^ltJuy5{U4@cw;qW(*>-N@dJqCjyXTP^7D`Jv0ly^E4#rSKv;e%Q;y(D@5N_YYak zK8>o_KrwDaHxOADj;^UC+5qX@Qc4R44Ycn0(3v9UbP3wCg1oa}MZxS<8k3 z%=uuzKM&p@uQpTue*zD~L3jfc%H%U}IpuO4JPuES1DZjhJiZ63D4(}MI%TWy6UsT) zx@jwXl+jeRoWfoxf(XiL9E4ClBOnfpa0br963XmKxP-W_fd7PFLJ71$HMGMt{1@f- zXYdP1hak#wI6O}Idl)j|O5W{5RZ`BLgo98Ie}vcJ6VMna|7Dba6XhS7fnC>2`S(%& zk=fYwhr=bui2ffkPe=L3EgxAhOZ$&33JaIwBGw-TC9Et&<7vED)R9GxP@=vQFsHcB;QxVPvCBN5T1jN;7Y!Qd*K=AgG>1))x7~a9||0(@-xRNp2_3&SeBOZj^a4$RnKW7Yj;ZpJv{svzGzr@jW z@tpTvMqWZ1+zgMxui-aff>*%>K4^wcnB!W1fQKQX7XuAb4$`KW}+>_lG}vWw6sh8)O+ybG+qLp~HhVG;WO zW$6DiNl-#Or8?H%nK&qi3e1&m*58>ps3P9#PS)SKhY7tnRUfX7Ys`BVie>LY&SvLd zz=`btOJ@JszJ=m|2F#7yIUjfjXYg<1eBiC@`AT5#S3Ktfr^7tYy4q3k~kVgFe$`_F>V#k+tG5$%d5i2M(x{D+hOVdVdq zhz|Xt%u&uGLWe7xaC*Y!%nJJ-IA8HB=XlWe=(>VWv@Fv37;$DZlIa$QAkG)Q*>~BpGu_f+Q>`6Nn#~${% zqW9Do&z{c&#=yHpT6S^H$PWIGvz`Cr#8LiZDgRMBmCop$3detF7Q!6)7>ryDVKDaC zgUU6?NLUSP;YPR>ZUU=?d)s-2BA%s;d-BWH+eP%YjOcBd(Bpz0%pNCtTTSS3aSs>FpGQNzrv7`}o5-1-Jj2i7 zPM})S+yf85W*A$)OSupE5Tx*46MWwrmr}RDY6yptts*xd*TWT15l`BaR%OW21k!vT zX`e_Q>?JRt4r=$12OEE;)FkmtOhmb106gG@UEFWb5Fj6Mzm9SG20H?zYa9Ch3G4?z z_G0fteu{sig|_|NR%v$9*4t^*eYEY!Htg+4a}#}oqOGhQ-YM$;6>RYR{}mcA|BE=5 z^W2X>0K^)u}r~?}`K^t^~3;qnp;WJne%339?hr3`qJPf}F z^e;6%@CqD-5%?Sy@!ebnw?Zszg}Z^@!)qP~jg#_Ed#d$O{*f8jb)(e(~pmL z7pebeY5$RV*bTVn<6nR*Bz#dX^*`5_AWN~A?MDAE0sTK@WfJ;-v>8>Y=>H*Wu$yqN z#lH?|b~1+SMgPx7UlM7E(qb9K!r;a1qQygVmJZsz$_@Z zClc;C=m#&nN*sTJa;OGd9O=D{^xlEK0n&`!5=nkUlOM>|RP-%zZ^OSG*@(SqFa5D3 z@&nmJ_}-)Rw?;^BWC!-nNb-a0yOG{m#tOK9e_o*ryL*cKILDjCNe90 z2`{ol(wdftHf)Kc?_#XZ85IV|hXTlsS|a+;C6YtfTx4n75-B78a_(P&Ec9FQ-F_@& z3Dj^u6YjP6*C8vh8>jdlxqmfMlNcarpxqZh{SY7-rU23D0wk0AFAJHCBDx-P4(43^ z@{oomblYYEqyP%_0q7V6(EkmP5-9Bqp#2MA{)_b=XMj|e1&EQw>Z(N6f1oCX^&hB3 z|E-Qa(B@#)f5OqN2xI+c8@d&NtpDsl|0jm^pI-Jjgs}bt&Cn9VeyivJary;tHerCY zhqL}O!un4V>p#hyZ%14`j17Bn??d)ucM*>}j`bg;2fG({AO3^LA?%uq^nYg2(?_P$ zer24a{b&4_30aW6k#QyDKrZCz=>LQ=&J1Uqi7ZS~$EEata4#|ayZ?i|9D9Y2{twcK zy$V^4e+|-vy%t%gqs>jF?M>r*11s3T4i0F5M*1yHI=(jp-y5>k#P?|7dxUoANT&Y- zUG$f_b?pBi36$Q8?Ejx-EPIUdKg0J11GF_Bn4kYU>MtLJP(MRP%|GRO%F2zf2YvJ~2oxsM|KS~qX=h8I)bCsBzd7o^IqJVT>c2TLCeBG! z${b_aIjO;Hg4*~wsROfpPAtwj`XF<(k8`w-b7IHrz^wtuQ~%9Lzi&=lx;g5_xXp0@NZk~Tm3nb1l<(+SOS^g|=*mm-^D=x3(V z&*b{~|5~g0aEx;p_R=pUKRRLlKiFLFJ;V@jxM5I;@{ik;Xgs)TSx;B^uUee_j0%f9)_Pn0vv;D_ztp= zFM|zQAP%>k@FE<7OlSZv`~gr=(0m2ca6k2ODrCSV)bU|(IdG($CK9<34nR7Tz%L*X z>c9a{!hN&{55nIue*XdxXkOtk+x z+J8OmKV;LU>uK9_khuoh|1#Qt!sH_huxAs;fAIdrv$X#gY5&jB{^M6pm{|RH>%lv=R|1$o^yQ9peMX&^WHF7!9j$DImM{Yp6kuk_O>Few!9V($3ZseUG1~Ux9 z$M7lq9fC=VrSLmA3>(z+f(odE4!)yKzNapv`X87cWH0tUWPbwlzk8YgNn)NCss0Px zi~I}MZRR-=K@;efvR?04*|3R1vsKn1W+9g%vT~bZIp~g-754D)<($N2~F#d=7h%T|( zsfTG3?TjlOTf1eH_!?;|o64wnP1L){2l#eghucWE+xc!fW~pa&)VqXx8hekCdK1}4 z*nZqxz0|Wl>RqGhYX!r(fQ0uW~W)n!kq0kEBYd{ zl4CL}x%lVdHb4JIT~V-&_mAcM>0cCYWRC!pMx*~S%KP))72~`=>1l+jbM${m&l=Li zM7q_IMs=9YLG*v1o_>$j5B-;L`ahT*xHZr}X(Y}jXyzX3e+*iq(0_rpNc3L_+p&lK z4|FBb|4F3(lT807mHy8@`ahUm+v)$n0MF{#O8+N;{txCs+*JJ+O()|+&@ygKcQZb; zGd^VOnn~M{g*n?uI|Dgov^z!285@{8rv1sMpHV#PiaLdl&I^6RwAOed-12i)Jn&lJR{Qa}N`I|L6Gr zr}+M7{Nxnh)i?}Y%v2g!2m287-{4Kk=NNQT)+hKhQmIq^xtcP0Ql|``)+ztOoEAkH zUPIdg2Jn(LJ{aVkhm2lyfdj=21A6*f&_f#Zj&@4lc&GFerlYesiD z0)JRbodg#k`+CvnxKAQP;TH{KPni||L!cSk$v2AR=pjuGWWi_DgVQ|Qm&nS!q<2V{ ze2aN(gIOt!EmF#MkRRK~j|B27k^F+dly^?;^D;Y&z(V4uGY4=K& z!7JI$5=B4dW&NvA$?f$@UPz&0DDz5wbcYlitx*ckdZp;_Yf`+mP${`!Qc6d?Qbzdl z39nS_C{!x<7AnTnLZ!;>rTi~u{EPm-Z?XKH=dEMH)9gh5Ka%smI0MYe8DO^H#q|Hs ztqwfN8Y;TkM#jI;9L4%Kw4!(Igf`6W&=HURzv+Z@m7Spf`;PQv1VOe4O_`F2zs+rBDXtPyv--962skM~_SO_;IP380Gut{EIQx zaKSwK7xq7lN&VQESdljNfZ0dJ#DQ!G8{@(2h>2eeHXakGTNyzyQU2PDu%z<=W4o zeD5i#NIE5@$*0hRIVDvIr}*#XDfv5o=h-0Ex#6!;be?*EYrBp!?uljXd{?bvjzrfU zvtu{w*K8WIVYesn?*Z1lTl^RY?qMx`7yTX@(FVpGt=OGM75SQYvxfcU{6n0pw{t*> zP6bHymw%C><>=kq`WG?15Fmr$%=&fP=Owrhy_1@j2ng+U-}qd`XmR1T;!*D0kzgHi_N`+QOXT33Kn(C)gAkcN+-%XB}w zOlClRrsc{1j`)!dlzu%cTY#xH>vjwdlPY&xc9nmsd;m z*H=lEEMt5`KO8hXUn}vCET&Jfgt0*|b4~CJ><82)GzZ`iXrTxypbAWIE#>D=$ammd z_!nHtIulcF8m8nl%kjSj$uykiUHT_WnPYwr>5}#0&bwI#vTu?B+MUdrt7P~^>fy?0 z(LHvR^yBtAWD9x->zB#70CZ)jw*Lw%Xuj`+pTi4~3!8YAEpR8?1^2-HV59H16Iq|k zIwO5RGyN9}c=rv6uc%K3kwf-Aab0kUJ9$6`jBkhs*|TRrdXomEFJ*xK#DH{!4ba{W z@I4Pmci;g3fE^I$zCOy!fV2~?@q~-@iUHPZ`dF_R5Zk_T#lELpaUdIf1MK&yRxB3= zr2g1|SdR`cA5^87q6at^;0@+O%9TpuHTIS(Rj~t7t?%PJl>zxWf^Q*lfcd0yrI6<+ ziW#84HNgDV0Qy*Oh+(Qv^24f>0?UA8gMMqDRXG14z$^hxK2N_;4RQ=3x&# z8zPN1NsG)@v`seFzZ&Hie78EEO)^cif1%k*wn?YxEjHG_ZIWANle~y*#jr73$wwBP zv(f%Jq-cjtig!0k$yS?`LfM{1DMwbM+N5&3O^leU;%!pB+a@&|ZDNYENo|x(>LP7o z-s2FXkG9(TE*x{A$qKPkKifqg(!~6qT^_;hF)-ksA7qz;Ks)oTcJ!(3 zQXFQNl2AMIpAGT^Vamr&p#Ni+O2Qg1G)Wb*`e>8X%$$&?@ULysDRsD+x7*Ptw@W?o zSU1|mhO~#}D2{kLx+Hp~G1kue>y>8i*WzxH*6ljQ$@8_vosjlkz0#4Ip>%GwOIL~= z{U1B}e|n|&gkI@G_VZk>Jq_YMXP2kBr)R=0Uho}ml0oFNxb-JFq;E7+>5XcZ9^G5g z?QUi);gC*4vveSbB65_$DF=N9y~42{8jrytFCg7+hqx{}&`{}lG&p?Ny`y8ST%v925ni*uj zB?T9oC4ZYk499YmJf1W6LXMKd{q)ItB|FJMW7HsN5e=e^ZD8K5K{9qVNG12m+}^7Aol(nG$`L=Akj`<%W&rUq0#sp}zT|Jh1aQ-f3|2TA?X^X^klp7Rq-UlY7m!&{C76UfV)9Fy$$StA^*!7SpRujhLD;fGrB_siuRb9{V#b+hTbf?m^>xZ zZI-N{R>uDtMQ4|zNOE5Fu zXqAdxW~tm~7UQl~<{iyay~8Xu@n$jMUc22ab=%BhK5Q1th?)McS*)XG+KYOzUo^A+ zMgJe!c*4xL(kji!mcUl}|0l&+)+%j@8l@e#jv%vi5>MAwvvlt>OAoR)zCh^Xg zQ(P%#aUac72DqPRrd7N_8pX%+4W`onznG_Jd>SP!%_3U2h4nuR->pH>g&34fWY&m< z_QWdsXp7{;INASfq5iXo!OtoA$bxVS<0*?2?XgI4ibYD2Em9g{kuu!NgDkAiTBI_~ zDn?}0*-@!R*7#Y(G~tw5WZkHf@&73OOQ+PwrzzG6i`e2VVh^>5gLoPuER6px(iD}i zG;^O8r$K3rvhe-}r7g@N?M+VlmuX68l0~|9j7s-WgVK|vQF?b-IDge5{d+Cc{|3c9 zVNeE;o;Ztm_gTd^Ymvbzrwm=NFlSINU5uMLksaqqGk2TxCDlu>p-p;_o*nfva8awc zgW6cXe}_4fdKvW9i_cvz-i!5O#lJqjO)OjMY1_@xK>j&!vnREQ4cQW3kM2UfG+}Ps zR?nENp1F&9=F;o=&s4pXl(k7Q_M)bIr7*N!DvbF`dB{7Qp;nJBy;i9XtCuR=jnVbg zrRhrDo_eWu<|`&&zEXosPi$lEqF&Mx+UTU#OBQ4XY8Bl~n`G>MM{*PDB`2v~^t@j- z_bIrLujI$niy_&dx0`9EcqstK!9pRh9i zvx;dZPpNfVnU^n6%wbmYzd)(yepX{U`XA|vJWFwoGJ&oWz? zS^0j}`z_V`{mS<{b4*oHB^ni|)!0Z3UI|rJRYsT{GspBEwTJHg;->KIoZYjh@)vO* zaU?sXWyE)-jCRrgPv`nL{rMB6f^9N~ z|Ci;{U032Ay;5eGoP~91zYOa4ANauWX^x)(1XD($^Wiu)_J!VOpPu3*-dyCiGN(jS+3e@4>8sn1=m7o>^zPH;mYNx*WFdt!Jcb=d5%-5w>-t2BrPdSv-{vTPx{W zVh&sLi?cif*lp39W_f3BR>B8oc_(95f}{(SX2pML*yg*y`TpP;@f_g$pl_Bwok3|N zc6RZ7@!}b=6E}QrT3#P-w4M02+xGgl9C6O}`d@@?U*~vxz0<}2$!O&F?47m~_d9K` z_Y((+Z*AcJbu{q5J%}3{Y~8zgbcAmw$EuI}fP?5^^b~4AFQPv582aDQ>%XU*M#{NA zYdbMrur-|E_a8YH9q)=bi5r=BeTe^zEGl%+uwafMt^^sjQ!z@GAw&!5a}r{k@NuG7mpQnVC5H@LX_( zq>fy{_rR!R-m2J)$715eD|kok3Yl_EaGhst;sAAI++hwg!7TM1*nb6MoQ!QeSF{Zh zk34vV3=@wLkD{&@X(LzB)&USKyJLZ`5@K?+8yw(}NXTw944Zi7W7!%t+hm zE2Q-!##RngZ0)JM&DV7W9#wuDb#KMy|B&BbI?X(hxzt{(wgv@c+58VT5%*_SP-_4EXFoRIbl6Vib?+xY%(3fQ_+ z0b5Uq`+wi0^r53X6COkT*G|X)8hm+DhK_Lme=uMhc`<|IxOWP4NCV z&wuw$%FOhhn!rGUA=?;-_OforYGP3yc{7O|J)9lIP!U!{p=2zW1H#AJ})yUe%B8A z#ptu!NuBtTO+UCpGWYJF9d_^@_zuZGvIA~-NRwl}?7Km-E2ecL6%Hc`?w)wWA> zYP+E*&SfODFSP%y+Y{r)#@RP<9NRqwEqfU*EhtvYqGu?J{)r zc3EQC$RpRu=)UbTwspIVf0k{YyG~B9d}`-)GJVN)_@Azm+0SjK|6@D%|J%8D-!AdJ z+a>Y9c1a%L{m%!tOZw66k~z3tvdrh0&$F&@^>+Etl#Sj;AE1WNE!;odB90l}e=Xg@ z_}?vz|J}m)&n@D4_ZIP1ZxP@7xA6NV+U^K`R`U4IDRY7e_W?{VQl!s ztun$7*^gFlrT^zv8Bgt$iQG<^e1E4*#kytsWV_77?v&ZoopLqh&+l*L{o%f7`kLZRh^Co%`Q*?tk04|83{~x1IancJ6=MWy!Sv%iCnozD;f* zzu}qdW381LIAnG@UOUY6yUKS*5Y+QwJ|=Nftu9YGEIt{3}9+r)9nHgR6PO%Ain^~m+& z-nWf$m~G^6Kp*VF#jbN_lh{V3P-{CPe8vuzS#n<(j;4sDa>z1z6{Y?Ia_+obKm zZTPRYG5)qqI+^cczMFMDS8wCH^=9e2^w<3Vg`4FVYM8lO?5VqXhPs>QUw;3Bc~@=) z|2w~baf08!_>kYf_=w-X=vtxggx@Q;M?j;1+yH zTks)$McTQ}cf7boI$7Rz?H2BTx&H6{d+8e!;+H8wVQr6VLSL*)17gsp`x5yl2%-_KEe<%LzyS7MzG)dB? zKEnStf&UHtX*~7_yph@IP?9iyk`8y(r`VcX5p(w#NUL82^9t|HuE!?~WXO1V1L~ zpTKX526OoT&@j^@X!JDxKQvy$Z;B=_;Q#pu{|_FY8N5ET#B+xiWF8M-4E^c%Z%9>M>E%1l>KgK+=JG50cRL+drZpNLbagifMAAkFgM zBjJ$Qg8l{iuPncuc;E)Uv(Ud{dK>zXbYCI<3-oRDkIa9c_y_2A?qw%P`!v%Gc{DVl zEsF0E;>(D8i91n0nny`AL;6MH*U_8k?@^3ppCWyV`2S-5kLaJ#TU>ws70b`;<#~~^ za;*O{^Pgn--OPUjm00#~h@V1}*ZzV3`^)csjPSc3CpKjE-rwV^MTzY9Y85#s!1Uy*0YCrg||f5Wr~Em6j=Q2bqf=i^d-r{>^) zmKf($`sLr#$Fjyf6u(n*=qvI$>J}mW5E?@n^tWgMEh9(jkMag_!$+K_oQvm~-a!A1 z{wJP;yG#78%tt4rlei0xqmS@AAgCAhp=LY3OLM5kbLyHj#rRzs<~!_vV4Pw@+PdzR ztH|Te(2dmNdgA{D?M7cle~EUo{9)qzI8MGr{55nOu75}TBzhJ-j!v-5LwpPg(xYHTpV;f6M&y#M#U6N1;5=>jhL~ zx`fL6@W-S16HDA*zmC5Ye?3b2@Yka>>oRD(#`yek{Q0N({}dBTGQEU9{{;V^f_Ux% zYxtaDL{{#G{J@Q_7m2oqE*Wx*T*W!KpNth0y|JVDo_-;HeGd_&Z#zoxC7<|tO ze&?czaW2;xIm-Mo=6m15=R@55Nq!dt?@pAsiEY}6J3i!hGKkxlpQLYL>Rx<4#1nh* z^*ljv0nlQ33 z$u~hc^TaXoNMB8#21Sp>A^d)=> z9bJAkK2$W{FFPpSftb z6fHCT3VIziT=HA7f57z*IZ@+h*{`&Xo7hA9dawH}{+Hj1pSBI4AfBuc3e$#>mtT|U z!QayV&3;BL@Bd0#UuM5P!G6B>KT5|Tj*Ca|VeEWGx*y~?xryU~ebC3gIJ%SL1ofX} zzoJ1jgoZz0zoJnzhQ?2FT%bubg{F6MT%cJrhvpw(zoIxwpyYGxSCmE>lzo8X0_89K zCn->e;z8bPrw(Q6P%-iQJnQn@SR^j-oS8q4PYz|5{#J6tX_jYLpCnGPJ|4nn$Nc=s zcky-L!~1||T{O+Q89G0$->KsHbbN_tW8zWLju8*DeuQ|C`61%|3;6!}-j$=o$51co zL*1wcbt3b>6Rh_rh+Cg~SK5f1*|vqeqQp()6()|jxGy0#zDz%L2wd{6c&U^5ZDRMY z_^n~o$g~SNnwIJ3q?5LW?{VorTG#)R90xY~>HbIb8MO6F+}opfSpEg(uV(rz(^aHU zJIg-DvO(fe^q*Pw2UKSIT9)&b)$n&r{~BG#{68`OUzomuX&c}9|H$+wi2nuGa zvHWJ@+tAn1PUb(ux~tG$rvF#szhe2d#6Kf`34MX-KSRw--%8v~{1=M*cGAb0PNV%y z=ZUZ6zF;qD?nL*J=05aWmj45N%nc{-|GtI)_g(sbi5r=BeTe`2BmBR_Ugmu)_p?6m z;bjs$j{h5l`R#!S|EE5B?=_5nUn9-LExaq-O5DbLJ8{R$*GT97W%_@vq5tO^`hTvW z|K}R|fA}Bm=-6ZbhxC`Ok%6Uaxc|KdKg2ck|6C&@#G}lQ5s!a#jZ6?vGCxH;&H5SQ zS?1@6=h;4XFaO{E;5GbC1OMOtqvMi#aNi?Eez`pV-O1 zZhZd_;yTIxM;_$;fc=mBD1d?|+5aevA}IRu0@t+#X@2yAw2)8h2>$;W{QtxqJOgw- z!TVN+dEbh-=VjjiBkp7VDDkm<3-teS|BnXI5E@1!XcUd1@pIh&qd&d>$Mg)EMRVx? z^ZtK;_t8)WWgo+rh4QF?il~IjBMVX)SdhGF1KQ;8YP2)cub@@*8|KdtyIAK(Utrv2 z7rGNgnE!L)pP=W_OXwNoKyK8^`ex$8w0Spi7xT|C{dY_cb1o|4qbwUoFS7gy`piD= z^O=8{={WH&<}<|i5!+ezL*k#HXV6Q?#j+ynN12WzAM+uUX8KnwZ({mY;=e_2qBiFL zfp!0gZ2Zo^C(s#|M>)5fIJcXLTi$2?eaQa%)gQ(B|Lqt4D4iS=U99USjrITAkN;83 z?>F5`ei5#%4cueck)y`(gBmaJ{ej#}dytoPKIFgbR}$d32&OoGI8MUE5$5~JV*m}J zAvBCe&?p*1 zwS4ZD?Xp8YFW1Qzx4dIJ3fJ&Asb4xyi+r_j^rFnR_(i++xtL(iiZ&|jk$ z(M#xW&@a#t^h$d%w7rH`}h|TTNIPGGWnfg=X8TYsv~|Oz2OVFl5$NLneJS8Zu#1(S$uc zChX-O>U^;}GfKGLF=E1AuN9gS*L%lJ*gt8)d6V0Q-Q>UF%bBppl)f=sHsOe=+s23~ zchh6iZ+a3Yj1QPFVOpT(F*Q{4hD$p-O&Az9VV|jw9y8_Yc~j5dIi^k6)@Z`c;otRN zoOr*-nmB6O>UTqCtKZF=8vZV4>h`|V`TaR_wA??6OwumYusKFDn}h$YixkcHJnD?z)waiOlLiD-6OmA%I@1__wBO#4%xj=c9RVYt;|fw zLX+iglciPDJ+kLUVSnwpRrc(ZJ-cMjZrQ`ay|U*v*>k(>xkL8slRZ`j7SSHFsYSI{ z_TDJ$SGwT$vi>$JyhAL|y(X2lr*D(nZWMO4g?*b@Z0+sa<@Ote-F>@NDXS@NH!H3E zeuvy~qp;(xhPlHmw)Xu#*>|I`^Q~suXBJ!g|4zB{&b#Ex@)fyD_zCE{(4WiQa`&}z z_ucXrvS0Q;DEr^@wjGwQ%2%J0ugTZ$k*_^_vwV$ZPdzF3$UXPTJwKFteku1Xl~7sk zm9NX!_y0h?A>a6_eB+zHlyBVsn0(`id{e%8zkKr{`R0?t!UJ-^e9(Pz;6XX?kR14d z+$Z0XZ+%t1MR-uYE#JORzWx1sWbj+$Z1pnSAG2`AfN9?*FFTfB!-Bklg=Wx&Qn33aKBE@5*;yknhR&?vd{ulD`(*h2gYuv}Bo7_nV~M~5e^?&=nmqhNdH6^21Np%LJ_qCx`Jwz9`M2_@ z{FVGW`S_RO4rTmzdd_tahNS=89N%@KV#C#5+r^Q5shvi9mk}Q8J zKZWh5kI5l9WIpI;a_AX3^pYIWJWb;t7E+Pu@P0Y`kQ{zk4*y6FKPiU~JuQd% zu-dFQ37%#41vyO1T?}{0GxE%SdFJc#%tP|b!}83}rVY65SE3AEHueNvn;g8LaQva$wIpNnoGi@CLQEFovXGF4q%5RlAuS6z zS;)&mK^BU#P?Cj;EH=oZT^1d(=#<4qS#-&wTNXXC=#@pEEc#_JAd5j+49Q|x79+CQ zEQ>9&*eZ){ve+(*9kSRdi(RtVEsH&}*ei>DvUpS$kI7=cEDp%xpezo_;;<}^$l|Ch zj>+PB1_}4G$BiqvNR=2)3P)pOS7^xCrk6P6qBX6 zEG1+qDN89?N(&DCrK~LFWGOE=@t2CiHx#GUt6_OHDzEm-s{``tl)O48ug=SINS4F0 z9FgUyEH}w=vn;pBa;q%2$#T0acgS+5EO*Irw=DO_a<42OmE~iy+%L-mvOFluL$W+7 z%VV-UF3S_LJSodlvOF!zGqOA@%X6|!5lLB2$#PbfbF!S5<$^31Ww|8FWm&GsN`tJ} zWyK*YPFZP`6_>2IWyK>aURm+UieFX&vJ#Y)kgSAdB_b5!F9S?Q9MZdvJ(m7}t9OjZVEWmHziWMy1dCS+w&R;FZST2^LcWmZ<^WMy7fVzLsK zm4vKhMA+vIqc9Pg3iy>k4R93PP5LvnmXj*klF|Kk&Kd{T~2$?+LE zJ}1ZL<#(F?L#lGU=TR^*KadBZMmIOGkdywNCcxa1ADyy1~Iyz+)m-tfyC0eK@RZ-nHH zu)NVLZ}iEV4f3X4-gL^F9(gk;Z-(T}u)NtNZ|3C9lAQF*$)KE!%E=}<*&-)9X@7wlv6`; zYFJK<$f;2|H72Jf<Da%xsi&B>{GITe#raXFQcQ%O0Ml2chZm6Nwde(jLA zl5dTeV9EqDCP>R$WrE*C`TRO#f~*OO1i$H(-!$k+WGdl$!F|zBY&hBB9jS`+3>W>+JeYrZ>ltDsx(<&%Cadw zX1ghq+my*|%H%d>l1`bVQzq$@*_>3%tU7C>Bem)u$qkY78Ze(vR*uM^^|5S zGessABY`z*N`%(rY9f=OKFt&qv!>`-vz65hRwGzVFeh)%^P%U&)IRoh;x$VvrJiOdbR#7b25=qCRcaBw@-Ss4urNPnbLtrfLM%)^#Z6>I;EQSm{lLIY5#o z5rH*p@=h9>dSp_VB9bQ4q_woZhnTL2*f{W7^=d2VaZN^Mx`Ir?sKBhPVYqnll=Ie7-<(M|xnTRcO z^=YQ^e9X3ala}D)h_q(Tq%djoOr37v+$9KG!RaOwMXaDc9W~RY1CGcFn&q^e57`sU zm|)flDC4yKVqo&MQvv}~4n8KO<8&jfOGHY7`XW;|GiN0fsrlh4~@CZHInN$E7j zI5|`a*n;5W$Rsx#I?aZxZ{&NBV8{d`CLo_ilMlhg$ZX1-wSmdJ(K-gLh>Xmf$;y;S zU?sj7Sc&VEHD7$zabf_gf0?2~}#?Ya7d$*aDV zIit;VJ&8Hp>d=tar>Uzo<(AWKla{~~OJp`N5h+Z>mhSo*t30ObYb;K;Db4+M%mi@) zYg;ll+g{9{?>VO7b=Q}YA2S|PrpF{FxHw&(V^fbQ)WdfS*Ofn=V0`l=#bn_#D@fos zn-SEfsk^`4_I^_*zp0bo)X8tMCHOcpTbep?_L_hU{Z zB4--pj8o1u${Dwu@yHpkoC(UAkemt2nI<{YDQ5=c%&?ppl`|7^W=77;$(fj(NywSJ zoGHkevb@tE@7U!XhrHtwu8v$A-|{w9Fj<=s3XXSWDJ?gDT zQ}rlbkFr)YJ#coKl+%M&G~<=CGvqNt9 z(~WvjA37>$bD^`@xSY*Toy~XEqX9Wv2%XKdu*f#0hO^}qk>hMRLlmq>P4%d~9`)3t z*x3>rm56{RDpQqm)Y`nQVep+r;aeCjPV_VHTWg7?pF*UZKAm z*^vV|Q6q98H}W7a@*zJ8pdbpNFp8ikYC_GZ6}6#mWb*DqM^Qf-K!a!qjiCuNg{ILA znniOcj?!|@MZH{<@1i_c2kJy!h`e0nt^`V=l$>+J?S|V8uNxjWJZ^a0)Za~cZt`=Jmz%uY@v4q23ELpRhGP2YuOF3EgVV1J8?87YO zW!Z;W%FMD4v+Tnx%vs9MQs*q(S@vU=GP9JKrR}pYWZAFT9z-3o)FDgTWMRs(PqMU8 zmVJ_CpJdr5S?ZT%pJdr5*>S`^$ zXO`n2n?YHWLwN*qwuoq>Y#CMLTn_#m{5kk@@aN#q!JmUa2Y(L!9Q-->bMWWj&%vLA zKL>vf{v7-{_;c{*;LpLIgFgp<4*necIrww%=itx5pMyUKe-8c}{5kk@@aN#q!OzR# z=W_7p;LpLIgFgp<4*necIrww%=itx5pMyUKe-8c}{5kk@@aN#q!JmUa2R}a|bS?*f z4*necdHD12=i$%8pNBsWe;)ok{CW8E@aN&r!=Hyg4}Tv1Jp6h1^YG{4&%>XGKM#K% z{yhA7`1wLRmxnVCXCBTxoOw9&aOUC6!*0Dl4g z0{jK|3-A}K>1YZfh5`0{Z&v7X}SAwYo4?muAt^|jeN~2~3`-f7GAw0S%CMASDZ^4`pOxV$v(L&fmDy)yxXK)p zW!TEFm0>HxR)(z%TN$=8Y-QNWu$5se!&Zi^3|kqtGHhko%CME$cV+m>?7K3IWf-|Z zIah|W%zi7wT86a(V+F`sU!ndL>R+M$73yE1t`+K9 zp{^C`S)ra4>RF+l73x``o)zj@p^g>mQ=vW;>QkXU73x!=J{9UxDav~dQPhH3QJl}k z=)aq)&!paKNU_#L)|6vH-gC1#m-+X&%)jR*8+SMAL9_CnCxpT%BJX(}s1bRP4+Rn1 zdD+f8j7HHEpNr9dH&vgRea}m#UK3eUKD)fq`sB~cn>P!8o$;k+GSdl8l7yu*9mK^r(I0jrI1+MIWgvxByB zkh5d(ytC=Nvv}S~gPEDB^Uk)5YtK6q=bhawt*;@?6jMWJ*yy}7U{W}!kC`DYZSSC_ zj!~oYjtLerMZF#4M(1f0$DCCddxm+6bxe4C;*0dFRNV5;Ajf*$maU@-yX_#pj&^R=M6fY<&@R zzF6KY$!6;%&7L>>rd+0|M?-yPjs>KrzQEBerXc3Y2$~Qxrn&xGnHQ&^sW%oo))6yl zu{)f1#F1g4mFtnUm%yStzqQ=t+Q(E9rHo9WRt2mXYp+;E8$gZ}2Sv;}yjzm)*lL7N)f7PdGWhdvUbqaiZoS6!9N(@=08jvizA zE*&i6Gs{QE0cRCtLyE)9N{!<}lE-jLDHHPFE)fu|XhxJD}QToY!0OA4Whdc=K;a)^OJX+y-(fUGr5$yzIF>q7&G>5dLWew`^<>n_V$k4M%<*=B51)+T0A zTGl4xvNoNNwYjjYC5&cJR@PFK&$Im+&-Sb<^aSy%5z3v+KwQJnRu5tgkRxY55RJm8EHcUyCD`%DKW0ixy$|+gp z7^^l;BGS;GQ+2se2(_YNG=fM&pG}oM8yt^l24$q`CcT?{-K6Ixt?F(=?WhA$hMVo( zq^D1&O5aP>%{Cq%3Zh|QU-abTmUh3;DNY&>?l;evd^5KrE>hD2A zD1nkv4X|y12go~+L*x^LEl55=>KUYrApF5BBL5)#Av|u6 zQ!*(W z)VrUyA0WR0+GBuv4U*3w`*x7BhB^`34pH{dEFzB~(hsq(hADrTvWIDxVd^~0_Iwvq zhpE#r$JX%J`V1JuH17x~BcP8^%@LY@gd`(mKSuUr6g5tD$Ju0pz9_K3mFszy)&lKgQKu)nfRg`KPt8_V4=~AlZ66;>FqF1R(4^oxhqbj{dReEiz^vG1{iK)^9Q>Evm zO3z7^-jOQ3BUO4rs`Ow~>7}R^$d+D+D!mO=dK9Yk6jXT%uk!R=<;l9rlXaCR=qgXo z)e`yf4N>Jux5^W2l_%FKPoPzvI;%WARx4ogWLT}xyp^1+^YpOZ&@1cql&m}Ks8!av z?OEprV4aKKIz7?G>-DJ?q;6Hxplq~*4=X`F6*5A>-4g&d(yJb*}v{( zTiRgV+k<9N60yz~KrN_G*8OcLChMH#>wySjd4ReEil{8>LFyDN$$E%RJDNsWS?7#f zZ|^`|h`ie85pC5$-W`<18Modsghph&GrS(B8l6 zX1>Rb$gRgrQ&KOjgUfWim-L*Y>%Fv4FRk7ST^}vmN49;`tdAD$qnM-A;3(;iB@nIO z&vgGWG-*^VM%Iem7|CUQ0PI0_=O7ylk`X8L`f#hPk2qv~l&X)h!8p_76fnuUDdH&# zpQc%+;h3fpGo+sZl{0vKhTS=nK{UfG19x`aIL~ zNm=KjzaATt^*EsMimWGSpaf|XG+%0Dn=vP}}mBbbJo}n&uim%g&y-sKDIvug=c^aCF=sF#e>vTe{)8V*IXW}{?bL(`(t#j(E z)5*3@=h8ZzDC?CeRF;i~9&{AtWW(-9Q)nJ#P)Rnpux{`aw!sb6hLdRqR5rMvZn%7C zRyN$dvO%xKh9@c;9O@e!>Kop1G>OQ|n?h++Ko!}b`(eXJIv;uYNJ|$4F9XO1T?`wX z3LA7OY|xdk5n!HEU?WIAT$naOdD#dzBDN1xb~u1WP+T@5)SrvbMwI0o`WsE8X`)^n z{u@m(*=VM|&8%zgl#Lb`TUggJDjThA)5@~eV`vyr?^f7b$+IH5RmV-gmBpXAd8)ABhx(!bv+J=!%bNp{`rP&yP zb%c8H3oE=K~(?aoQ`+vIKF0dL?L! z1e}})8{BkmB-u7eos%#p+5aipGDRDtNS7v`G}~s_uNmsWfYSyyr5g;=Y;cpg!Jx|q zoxU4%>~7FOyFsVvMu~dSS-L@|=mwpj8+2}Na7oxGvtQ`U+oZE;v%!Ues2L?>lbi2N zx@|V?b7&rwQAIW#J`_MRC@z}}SZ+45&eMs8&^VeztoM+Qrzo3V@}~!Hlb*XxFX=ch zHoeDWldiT+y4p6m>D#1xZIfev(@)<1Ce(*wh&=oy*$j-JQP~W#JV-ggv}}f`Yp4Y= zAEvx8=_1sR8?;Ss&^9BzXbe#XH)fkr);F=fsY5nfD7%GaE#%)q9b4OFvu#*5J3@#y z;bv;Hll(i$w-fFzmUl%Fd3KQ}$M$B|lx*_lzS+%m&!BAfh7tUIEboKmD0v(uuVVwU z*-snrCB4ZF*5&|p8=!3msP~WqMG$owI*Ld;L|Y88-7w{iu-ypt8ljEpBHSFMeMYJ4 z74DHXI)8;H~G@p^p7adh4ON^xxP-MpEm9n-WU-_4sD z+B?H?2B0>%o!MlNXOn@9O$IME8Hm_q0AiEdl}$R+H|a#*qyv1DLHSKOy=x6aC@VF- ze`|CT*6eXql$s-gnoz6M=vk@p_+R6Jw&pBJjlu6)V~^DM9Tm zwjR=O%Te=^H^1gy^O0wu19eI**eJCSAU|y2z)Sa=NK^H~DsxwwvYMvk#-M49TiY6gmbE;2F*sSvQ%431YYYU|7znI!J+3jx zRx7YQx3x6})@lr#)wn*_xV5bnsVf6wHI9=SgHbgGpK1&=)flv?F=$g`0H(&kOO1gS z)jKAtZ%)*7M$}BR@IoB2BL{M#M&v?nt^6hTqcgiKy7s1>!LcGQ77 zQ5Wh)J*XG;p`+*+qE7PzXb=sdVKjn95p|s(M-ylgO`&NtgJuyN^YbW%;wXWVD237p zMxJ2Qd=BMN0TodRl~F|$`$xs#kHH^&ze++)^&r}Tl z82mB#WAMk|kHOD(gW}i96#HGp;E%!2FQBOy{4w}r@W&ze++(}z*G$W82mhmDgHZ=;(DoK@W|{M=6{?yD6~ zuZr(%#do$!z@LCW0e=F1zQYyY;VJgc34aoPu3ah#e-i#A{7Lwe@F(Fgc34ap)B>YMElkjt| zr;_j|;ZMR(C#m9DNG0Jgc34ap)B>YMElkg|u=Xpsb;ZMQObCcqpQPG2;Qt+qX zPr;vpKLvjZemryv51qn8r&92z;7`F%kB3UZpMpOHe+vE-{B(q>6#OapQ}CzY=bluh z;OAegs}%ex_*3vRfT>dObHA#%M^GvFQ}A=GP$~F%rc*rADSB2^3jP%Q^suND{M=8d z6#OapQ}CzYPr;vppZg5OGoVVrpMoFrm7-ThrQlD&pMpOPe;WQY{Au{p@bk>5((tF@ zPs5*vKMj8x{xtk)_|x#G;ZMV#hCdB|8vZo=Y53`^R%!Ut@TcKV!=Hvf4L|>8UZvqr z!_WP>O2eOqKMj8x{xtk)_|x#G;ZMV#hCdB|8vZo=Y53Fdr{Pb-PfwHLo=S0TRcZLs z@TcKV!=HwqUMQ7@pL;HqhM#8wm4-h9e+K>x{2BN&@Mqx9z|S*-%D|t2KLbDaYAOSN z2L25E8Td2sXW-AkpMgIEe+K>x{2BN&@MqxX`CHKortn@Wyq5~^rOLpcfuEi+MbDVZ zz@LGi9x_D_naaSQfjx{2BPUZ&chjDtgjX2L25EJmaVg{2BN&@MqxX znO||gp)&Ai;HTG3(d(u%@Y7kZ=&V;f7b!aI6-GRT5l>;nQyB3SMm)vylA;q|(TT4x z;wd`v6-GRT5l>;nQyB3SMm&WPPi5i9h^H{(DU5guBc7tuU(x5FFyg5!{21{R_dY5M zKSn%-5l>;nQyB3SMm&WPPccZKFybkC1{Fp;g%M9-#8VjY6axneBc7rULt(^I81WPX z2?`^g!ic9Z;wc6d6wi2yfd$3Dg2ITWFybkUc#57zg%M9N(4a8lDV_rrMm&WPPhrGU zJQFI6cnTw)!ic9Z;wg-H3L~Dvh^KP!W5iP!@f1cpg%M9-#8VjY6h=IS5l>;nQyB3S zMm)tphQf%aFybkUc#1&{g%M9-#8VjY6h=IS5l>;nQyB3SMm&WPPhrGU81YmNevEhu zBc8&Dr!e9v`fe0PJcSWYVZ>APnktNViY`;dJ*C2kr!e9vjCcwoo}wF7VZ>8(r7Dbg z3L~E4zEjcvqcGwr23ZtFJcSWYG1#IQY*85TR33hecnTw)!ic9BfKeFn6h=IS5l>;n zQyB3SMm&WPPcb;7FybkUc#3;kg%M9-#8Y|r^YGKxr1J3R;pcu=F_5D$=qU_(Di1${ zI*NN>MOUxFprMNpWAUxGz^2^b`g?#r?UW3tG_yt$21*81xhdJ%vF}VbD`_ODha|ias@k zK~G`OQyBCV20evAPhrqg1^6-ODF(0<20evAPhrqg1^6-ODGYiFgPx*$TVc>s81xhd zJ%vF}VbD_;^b`g?Mc22&prYir_Y{L|3ge#QSwzuas4(s+ zo=H>@ehhpH1E0dcrx>(U82A(hK2?Oj2tUs)3L~GQk5SRbs4(&=jC_jcUxkrR72(Io zr+5}t82J=NK82A_VdPU7`4mPzg^^ET(eF|frD#4GjPnF=u+^0(LWAIZY_%ZpZ68sqb6oZtC;Rwa^y~60H zF#0KsehQ45%;zD*C|` zLnMkJ62)_+;<-{`22_{<6=pz%8Bmqs#|)_GFH@KS6+2oC zh{6o0Fas*gfC@99qHjdeH=-~EDhz=NL!iPCs2KcJm;x33c&ZFP1Hq~Se+7Pwf{LLY zg;7vd;HRHkVHQ-F1yu!p41=lyKc+#&aFD9NUx6P3p~66@Fc7K={1x~s@M9)a3>hiR zgbFjE!c3?t@K@lkz+ZvC0zU>rg~3o^FjN)zF&QdMh62{oe@)QZ|rJL*84s0($Y9@LBa&{1>@^`ika zh=$NG8bPCI42`1+G>N9rG@3!PXb#Pz7>c6=N}?1>qYTQT9Ll2tDxwl9Blzv`+u^sv zZ-<{BtCe@6`0eo9;irF1kqIe%cN{ea6}jKXyNDhu;oAwm)r$AM2mC!%x4Sw!_c(inhaV zhu;oAea)J_W{oXS)9kr>1~p?dn*M1`|Fm|% z&k&||!0&+H0lx!&2mB8B9q`kKtuYg7`mwbGeh2&x_~~QR4*2P3)DHL^@YCO@9q?mB z(GK_>@G}miF(7IO{0{gX@MA{Q4)`7LJK%S~?|>hVp~g(89q>EgcfgNJP~#HRI0Cf; zeh2&x_#N=$tJ4nn>08&BsWeW8czP&a@ML ztYjLGn0CU?kg#U#OJgq4Sh+NQEsaY{(;utxRB8NDnjvG29ZF-|(b%Ch)+deiNn=LQ z7@jo7CXL}q<6_ddm^AJ!?Svo8lE$c{>DSd5l{7{r?S$V6zY~58OB%zH#;~L@ENKi& z8pD#N4_ISb(oXn!nM(6AmF8tC&DezIWh&hWKQB{hUZ&CvWourh(v9%*GL`0KD$UDO znwP0GFH>n=rqc8$YhI?(jqvkAmF9&i-3Wgp{EhH8!p{pj@bl7@ZiK%PeoQ%j@bf~JZiK%P{zmv4;ctY$5&lN_8H3T7d^9E>?SkJ0zYBhb?KNYR+66x^b7_X~ zHT~q;1-}b^hVwOsAkC1zcERt0-vz%5ei!^0g|rKP7yP^grg;fW^AebL!S90K1-}b^ z7yK^xu?1*e4%56Grg=F`V=~g1j5IHYX^ci1%Yepcr0J*En2j_qhiME)n(!2|V|>yWpR^l( z#uqehb&XqHGv=$^@bi+IcEj(6pO@9N8-6$ZSR%9=emDGX_}%ck;pb&Gjd@CAp3=O~ zrrq#kozT44rg^bVW1-Nz*rplB(3q>V8-5H{+6})OemDGX_}%ck;djIDhTj9f2YwIy z9{4@*d*Ej*MDuc-=H)nzNlSa+_rTBih{kQN8MoGqlW4}SHDe{(13xd%X%GA!_&xCR zBAsTOPxCUJ_Q3CfAETG%#X61ML-TT-#_***@O$8AJWzY!_rULg-vdA6gW3Z>V}#lR zzXyKC3bhA*5BwhZc|lKO9np**Y7hJv$TT(*?SbC|zXyH~{2urjU(_D>J@9+r$7-S( zZ`7CtHHI_IxTE&M?}Z=pnZ|~qd5b`M;rGJNn56c??}gtBKjZeAw+l3H7icg1UiiK6 zd*R2prWwD~UiiK6Gmfdf@MC7vUicZ))L!_#@O$C+!p|ED+6zB#DQL`#8b5%>_@?m$ zXgmSh3%?hBEHT;(zZZTl{9gFI@H1wm@eOEv1KJBe<5$`XKgKzYk3f6j_rmXmA5Vey z!O!ar+6TW6eqM9XKKL=%Y5WG-2S2YoXdnDoZ?q47jCa}xzYl)=2-*j~4}NSp+6TW6 zejofk_SI|EA@hoT`{8)Fi4}Kr~KKOm`<7Lo3__rZ^M zLi2it_QUUopVu@ruW4vM{C@a(T|;9o)PDFGYu0}F{qXzY_rvdppYdnyhu;ssAAYP}F|-8LQTQ`2Fzv;rGMuhadBz_QUUo-w(eZen0$v`2Fzv;m6~l zd2K}V+K9&Mq5beP?yddsGxn`9K59Swe)#?H`{DP)?}y(HzaM_a$F(1RKl}ms1Mmmn z55OOQKLCFKehid406+FB9e_Uoe*pdf`~mm_@CV=zz#o7=06+FC9e_UoKjWo30Dl1f z0Q>>?1Mmmn$B?N5@blV>=Cv2iYcHDDUieW;1V3ZE4x@P;M)Nw1=5-hy zfIk3#0R8~{0r&&(2jIu#sRQr_;19r$;ZtMy)B*VM#pnS1*u!)H{viCAgLM%8ApDH& zYK);8W2nYHri1VY;Sa(egg*#>5dI+iLHL942jLIGAA~;$KZa5rgdbC>4#FRVA9JY= z!XJbmPl67@AA~;$e-Qp4{P+}f5dI+iLHL942jLIGkC#UW;Sa(egg*#>5dI+iLHL94 z2jR!$tb_35chEujF|_I+{6Y9Jw(21KSmJaLeoU@91V2Vs9fCguKZaM0;Z=v=55XUT zAM>kbyjzFh55XUTKLmdWeynvGW30woq48E|46+)loesetfuZKA^5TEX^gfy1b+zr5d0zdL-2>-$AGIt@Q2{Xh^s^J z<9X5{_(SlA;19u%2TEi8(;@gV>go{un00j+{xJNQb~Uy@jZaF4;Sa-)UrLAJ$1|nF z@Z+1(Vfe%Fhv5&yABH~+e;EES{9*XR@Q2|K!ykq}41XB@F#KWo!|;dU55tcQQHS9V z!ykq}3_sp19fm&)e;9sjiaHE`82&K)cvo~7{xJMu_`~qy<0)GU44E8z#e+2#r{Fv=^1pWy8cx-e8etc&dL$r>-AAvstKjvtS4O3%~ z))Dye;^+wc5%?qUW17|x_#^N~;K!$3;gTSwuK!XJe{3V#&- zDEv|Q@r39o{89L$@JHc~!XJeni>b!kt)uWq;g7-}g&*&oj=~>>KMH>o{wVxW_@nSg z;g7-}g+B^^6#gjuQTU_q|KA+lNlu+@ux{JF?_DF2e!v(EHmNZ=NT*phX7u5Xa|&+* z9%)Id5@UY<+5eyY|Jnba{r}njpZ$)=#}WDbXa9fp|7X9uruonQ|Li}p|HS?i`%mmY zvH!&W6Z_o)&WZhQpytH>6Z=o>Ke7MB{uBF8>_4&J$?Tlie`5cM{U`RH*neXGiTx+` zpV;rVa8B$$vH!&W6Z=o>Ke7MB{uBF8>_4&p#C}Kb;}&sF?1wex#QqcePwYRj|HOWG zigRNBiTx+`pV)t5zw_NWvH!&W6Z=o>Ke7MBerLUNV*jcAr}m%Pe`^1!{caxT)c#ZZ zPwjXAIH&fX+V4_4;r%>Fa`-H* zpV@zA|C#-6Oy|u0GyBi%KePYL{xkc}?01hhXZD}j?=El7?01_tXZD}j4@bN@!I)e{}20r*zdk|e%SxR{vY=Lu>XhsKkWZu{}20r z*#E=+ANK#S|A+lQ?1!Jmec=4C-;M12u>XhsKkWZu{}20r*zb0Be%SxR{vY=Lu-`51 z{ILIr{Xgt?S35uK|6xCNf7tKlc7E9J?snYWj??t{VgC>N-Qdm-`+wN~ z!+!U;^TYlh_PfiSANHTye{TP|{pa?d+kbBVx&7z%pWA&+T_^Kj-$J+kbBVx&7z%pWA-R;d!`+wU1(|&kne%k-jekc9&)Bd0K|FqwI-~6=yr~N(cupZ5Q>|EK*w?f+^2Py5k<`Dy=8`+wU1)Bd0K z|Fr+7{Xgw@V>mzU|7kyFFhA}8X+PvMKkff%KkPH^5a*};7{dIt|EK*w?f+^2Py2t` ze_{WH{ZPZpT3;QqZ zzp($p{tNpr?7y)8!u|{UFYLdt|HA$Y`!DRju>ZpT3;QqZzp($p{!9BW?Z34D(*8^P zFYR~FJD2uf+J9;PrTv%oU)q0Z|E2wx_Pb%9OZ(ma4$m={_Fvk6Y5%4Dm-b)Ue`)`v z{q91?&HG&1@9us0kGZt}(*8^PFYUjy|I+?T`!DUkwExonOZzYFzqH?N>0H`>Y5%4D zm-b)Ue`!CsIG6Tc+J9;PrTv%oU)q0Z|E2wx_PbM_U-tj9-~Ijkvj3O;zwG~I|1bN0 z*$<}7FZX|FZv= z{Q%AUvY#WEU-rAXonQ9?zQL2 z{ww>h?7y=A%Kj_+uk63FAJ&;G`>*W3vj58dEBmkPzq0?z{ww>h?1y^h%Kj_+uk63F z|H}R=`>*W3vj58dEBmkPzp~%G_*~h4W&f4^SN8J}b7lXP{a5y1*?(m}Ff>>8yDc8K z#pAYk+!oK3{a5y1*?(m}*gAa0T-$$b|F!+s_Fvn7Z9jB0*Y;oAe{KJ@{nz$i+kb8U zwf)!jU)z6e|F!+s_Fvn7ZU43X*Y;oAe{KJ@{nz$i+kb8Uwf)!jU)z6ezq{vg_dM73 zU)z6eKkqT_py%5DYx}S5zqbF{ehy@=?FXag+Wu?%ukB|;=GuO@({pV}Q$g#{L`oZ|uLZ|Hl3s z`)};OvH!;Y8~bnUzp?+u{u}#m?7y-9#{L`oxstiD|Hl3s`)};OvH#ZoTl;VAzqS9? z{#*NR?Z37E*8W@jZ|&y{=GOjO`(fa@wg1-sTl;VAzqS9?{#*NR?Z37E*8W@jZ|!Fk z=GOjO`)}>PwVzoS-eq{LxwZe+{#*NR?Z37E*8W@jZ|%Re|JMFn`)}>Pwg1-sTl;VA z=N<-7o?H8G?Z37E*8W@jZ|%Re|JMFn`)}>Pwg1k3PGautzq9|&er96s?7y@B&i*_5 z@9gI&=FWZqdhYDMv;WTiJNxhKzq9|&{yY2c><2A}yO=xs@9e*`|IYq9`|s@Md*;sm zJNxhKzq9|&{yY2c?7y@B&VFuV?(Dy_|IYq9`|s=*RhT>bVeEmi=g$5+`|s?(v;WTi zJNxhKzq6nHm^=IL?7y@B&VIgV?(M&~|K9$4`|s_)xBuS$d;9P0zqkM1eimu&?dR&| z-u`?0@9k$t=idH%`|s_)xBuS$d;9P0hs5XJ{(JlH?PpNt-hQrW?(M&~pKqFb`|s_) zxBuS$d;9P0zqkM1{(JlH?Z3DG-u`?0@9n?0|K9$4`|s_)xBuS$d;9P0zqkM1{(JlH z?Z3DG-u?&snb&!+|H1wT`}x;-u>ZmS2m2rFf3W|-evWV+?B`?W!TtyPAMAgy|H1wT z`ycFou>ZmS2m2rFf3W|-{s;RX?0>NT!TtyPAMAgy|H1wT`ycFou>Zk+PG=tMf3W|- z{s;RX?0>NT!TtyPAMAgyAH1Ij`ycFou>ZmS2m2rFf3W|-{s;RX?0>NT!Tv}4AMNML z=F$E~`ycIpwExlmNBbY`=O*XT{zv;C?SHiY(f&vKAMJm%|Iz+O`ycIpw4YI%NBbY` zf3%-Bnn(K|?SHhNJ(@@R8Ma}N=F$E~`ycIpwExlmNBbY`f3*M6{zv;C?SHhNSsG!1 zd9+_#U>@y%wExlmNBhMFhK-v?`}w$GK8JCdNBbY`f3*Lv{eSHjC78eV|F!?G{Q?E^ z*Z#lu|F!?G{eSKMYyV&S|Jwi8e&%le+W*)7zxMOeBWf^z?f+~4U;F>sFLE${?H4+j zzxMyN|F8Xj?f+}P_`&?O|F8Xj?f+}P7{c&a!($DrI)Cl|YyV&S|Jwi8{=fGBwg0dE zf9?Nk|6lw6+W*)7zxMyNpL3nR_Oq_@*Z#lu|F!?ger9kO?|HKS$^Iw%pX`6K|H=L* z`=9K8vj55cC;Ol5f3lyI9d(9zvj53`m4Rf*TV$^Iw%pX`6K|H=L*`=9K8vj55cC;OEl=E?pi`=9K8 zvj55cC;Ol5f3p9{{wMqS$YFZtpZ#3#{Ima`{r~KjR~vS6{@MS}{(tuWv!A7$fA;^g zp8*~Qc>dY{&;Eb*|Fi#}{r~Le=jNaN|Lo`J=AZrl?Eh!~Kl}gL|Ihw^_W!g0pZ$#e z{Ima`{r~J|@8+NV|Lp%~|3CX#y!mH8dp!T_|7ZU{`~TVh&;Eb*|Fi#}{r~L$Xa7I@ z|Jnb~ensZ_XFsz%|LlLZ|Ji=NZ=UUcw*T4wXZxS+f42YGe!h90?SHoa+5TtypY4CP z|Ji=Nbe`>hw*T4wXZxS+f42YG{%8B2?SHoa+5TtypY4CP|JnX$`}x#)w*T4wXZxS+ zf42YG{%8B2?SHoa+5TtypY4CPpG%x)`=9N9w*T4wXZxS+f42YG{%8B2?SHoa*?x|3 zp6%yh=f!?qgL$$4#r_xjU+jOe|Hb|n`(NyTvH!*X7yDoAf3g3?{uldS?AJw@7yDoA zf3g3?{uldS?0>QU#eOz?UhIFd|Hb|n`(NyTvH!*X7yDoAf3aWLWM1rlvH!*X7yDoA zf3aUQXQU#r_xjU+fp%o)`OH?0>PJQ=eD+U+sUj z|JD9i`(N#Uwg1)rSNmV>f3^SB{#W~7?SHlZ)&5ueU+sUj|JD9i`(N#Uwg1)rSNmV> zf3^SB{#W~7?SHlZ)&5ueU+sUj|JD9i`_)|L)&5ueU+rg3=hgmK`(N#Uwg1)rSNmV> zf3^SBe%5|o?SHlZ)&5ueU+w4b=hgmK`(N#Uwg1)rH~Zi0f3yG1{x|#I?0>WW&Hgw0 z-|T<0|IPk4``_$;v;WQhH~Zi0f3yG1{x|#I?0>WW&Hgw0-|T<0|IPk4``_$;v;WQh zH~Zi0f3yG1{x|#I?0>WW&Hgw0-|T<0|IPk4``_$;v;WQhH~Zi0f3yG1{x|#I?0>WW z&Hgw0-|T<0|IPk4``_$;v;WQhcl+P%f4BeL{&)M|?SHrb-Trs`-|c_5|K0v~``_(< zxBuP#cl+P%f4BeL{&)M|?SHrb-Trs`-|c_5|K0v~``_(<>}RRx!~PHZm5k=Y z{tx>{=F|R9`#Cvj5BeFZ;jj*FTsq`@ih}vj59|-H!RP|I7X_`@ih>O@jHd|I2=*srj=1%l zzwH0AU)XNG?AOGZFZ;jj|FZwf{xAE#?EkX=%lzwH0A|I7X_`@ihh(wQ&&zwH0A z|I2=Do%yo=%lzwH0A|I2>OozdKxFZ;jj|FZwfe%+Dzw*T9HRjm28|J!~olKHm( z+x~C+zwOuSnQ!~Q?fn`@ik~wqM0-zU}|E z|J(j=`>>+x~C+zwQ6F|J(j=`@iky|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSm zhyNe`fB665|A+q{{(ty?pV0N}=l_TQAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{ z{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K z{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665 z|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&% z|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe` zfB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yK zAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSm zhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y z;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K; z`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{ z{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K z{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665 z|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&% z|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe` zfB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yK zAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSm zhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y z;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K; z`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{ z{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K z{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665 z|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&% z|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe` zfB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yK zAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSm zhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y z;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K; z`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{ z{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K z{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665 z|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&% z|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe` zfB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yK zAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSm zhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y z;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K; z`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{ z{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K z{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665 z|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&% z|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe` zfB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yK zAO3&%|Kb0K{~!K;`2XSmhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSm zhyNe`fB665|A+q{{(t!Y;s1yKAO3&%|Kb0K{~!K;`2XSmhyRcNkN=PVkN=PVkN=PV zkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PV zkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PV zkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PV zkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PV zkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PV zkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PVkN=PV zkN=PVkN=PVkN@v~?C1aE|KtDT|KtDrAN%?L`2YC-`2YC-`2YC-`2YC-`2YC-`2YC- z`2YC-`2YC-`2YC-`2YC-`2YC-`2YC-{?~r~KmI@dKmI@dzyGzL|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|L?&51N#r`Kd}G6{sa3D>_4#o!2SdK59~j%|G@qO z`w#3tu>ZjR1N#r`Kd}G6{sa3D>_4#o!2SdK59~j%|G@qO`w#3tu%G{r|BwHV|BwHV z|BwHV|L?&51N#r`Kd_(wkN@w${sa3D>_4#o!2SdK59~j%|G@qO`w#3tu>ZjR1N#r` zKd}G6{sa5@|M>s-|M>s-|M>s-|M>q7?LV~t(EdaF`TzL;4(;dvJGB4M{zLl@?LV~t z(EdaF5A8p+|Iq$J`w#6uwExilL;DZyKeYeQ{zLl@?LV~t(EdaF5A8p+|Iq$J`w#6u zwExilL;DZyKeYeQ{zLl@?LV~t(EdaF5A8p+|Iq$J`w#6uwExilL;DZyKeGSG{v-R3 z>_4*q$o?bykL*9PpZ|~lkN@w;{v-R3>_4)f|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwIg*#2YtkL^FU|JeRx`;YBEw*T1v zWBd94`2UXWKeqqa{$u<3|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s- z|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>s-|M>qN?0>MI|BwHV|BwHV|BwHV z|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwHV|BwIg(f&vK z`TzL;`2YC-`2YC-`2YC-`2YC-`2YC-`2YC-`2YC-`2YC-`2YC-`2YC-`2YC-`2YC- z`2YC-`2YC-`2YC-`2YC-`2YC-`2YC-`2YC-`2YC-`2YC-`2YC-`2YC-`2YC-_zuW(A|GWL~ z_P^WzZvVUe@Akjj|8D=g{U7#!*#BYwhy5S+f7t(F|A+k__J7#_VgHByANGIP|6%`! z{U7#!*#BYwhy5S+f7t(F|A+k__J7#_VgHByANGIP|6%`!{U7#!*#BYwhy5S+f7t(F z|A+k__J7zf1CR_rANGIP|6%`!{W1W_03-vD3_vmf$p9n+kPJXF0LcI(1CR_rG62Z{ zBmxnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE z16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xn zHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbH zRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowE zST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTx zfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE z16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xn zHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE1K9tu zUkzZ@09FlP)c{rvVATLt4Pey(Rt;d)09FlP)c{rvVATLt4Pey(Rt;d)09FlP)c{rv zVATLt4Pey(Rt;d)09FlP)c{rvVATLt4PgJ*el>tq16VbHRRdTxfK>xnHGowEST%rE z16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xn zHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbH zRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowE zST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTx zfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE z16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xn zHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbH zRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowE zST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTx zfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE z16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xn zHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbH zRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowE zST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTx zfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE z16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xn zHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbH zRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowE zST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTx zfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE z16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xn zHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbH zRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowE zST%rE16VbHRRh=q`_%we4Pey(Rt;d)09FlP)c{rvVATLt4Pey(Rt;d)09FlP)c{rv zVATLt4Pey(Rt;d)09FlP)c{rvVATLt4Pey(Rt;d)09FlP)d2R;el>tq16VbHRRdTx zfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE z16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xn zHGowEST%q>vj52bBm0l+R|8lzfK>xnHGn;`|H%F$`;Y8Dvj52bBm0l+KeGSG{v-R3 z>_4*q$o?bykL*9P|H%F$`;Y8Dvj52bBm0l+KeGSG{v-R3>_4*q$o?bykL*9P|H%F$ z`;Y8Dvj52bBm0l+KeGSG{v-R3>_4*q$o?bykL*9T|JeRx`;YBEw*T1vWBZToKeqqa z{$u-(?LW5v*#2YtkL^FU|JeRx`;YBEw*T1vWBZToKeqqa{$u-(?LW5v*#2YtkL_0j zST%rE16VbHRRdTxfK>xnHGowEST%q>w*T0EHGowEST%rE1K4BxkL^FU|JeRx`;YBE zw*T1vWBZToKeqqa{$u-(?LW5v*nTyDRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xn zHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbH zRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowE zST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTx zfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE z16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xn zHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbH zRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowE zST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRdTxfK>xnHGowEST%rE16VbHRRh@n zzlwXwapH^uFbcb1m^H9R7$7Eq5DbW7cRk!TC?PNkirCR7-6e1gG><{wuClE1L)}$> z-N&H;L<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFp8NbXfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1pcg(r4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=O=gP$;~)hPQ$`kMNHm`yxCQtmB3Mui>-c(k^)H&quHPY}bv~zirrYnD3AJ z?QlPL-v7Gu@2@)_>^%O@Q?G~owqfUbq#rxiBR&3f_mr<2cTc(7@sxw=)*LK-1;;vA zNY23(X%2?(=U_5;4(8kDU=U=EZUN@#yf;Vx^m8a*#$o=tPg>?$g= zD-X;rPd~dP>+G_6v%A%r-Mz%@7LT(tB?ND6f9H7n^{wqXv}e~&JG*|O*)@>NNd_soX", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "1": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "2": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32000": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32001": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32002": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32003": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32004": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32005": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32006": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32007": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32008": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32009": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32010": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32011": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32012": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32013": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32014": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32015": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32016": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32017": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32018": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32019": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32020": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32021": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32022": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32023": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32024": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32025": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32026": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32027": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32028": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32029": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32030": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32031": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32032": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32033": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32034": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32035": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32036": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32037": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32038": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32039": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32040": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32041": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32042": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32043": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32044": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32045": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32046": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32047": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32048": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32049": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32050": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32051": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32052": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32053": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32054": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32055": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32056": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32057": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32058": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32059": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32060": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32061": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32062": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32063": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32064": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32065": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32066": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32067": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32068": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32069": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32070": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32071": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32072": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32073": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32074": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32075": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32076": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32077": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32078": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32079": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32080": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32081": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32082": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32083": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32084": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32085": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32086": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32087": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32088": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32089": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32090": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32091": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32092": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32093": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32094": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32095": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32096": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32097": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32098": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32099": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32100": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32101": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32102": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32103": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32104": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32105": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + }, + "32106": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false, + "special": true + } + }, + "additional_special_tokens": [ + "", + "", + "", + "", + "" + ], + "bos_token": "", + "clean_up_tokenization_spaces": true, + "eos_token": "", + "extra_ids": 100, + "legacy": true, + "mask_token": "", + "model_max_length": 512, + "pad_token": "", + "sp_model_kwargs": {}, + "tokenizer_class": "T5Tokenizer", + "unk_token": "" +} diff --git a/tokenizers/llama/added_tokens.json b/tokenizers/llama/added_tokens.json new file mode 100644 index 0000000..4a9d3f4 --- /dev/null +++ b/tokenizers/llama/added_tokens.json @@ -0,0 +1,9 @@ +{ + "": 32005, + "": 32006, + "": 32001, + "": 32000, + "": 32003, + "": 32004, + "": 32002 +} diff --git a/tokenizers/llama/special_tokens_map.json b/tokenizers/llama/special_tokens_map.json new file mode 100644 index 0000000..2e4806a --- /dev/null +++ b/tokenizers/llama/special_tokens_map.json @@ -0,0 +1,74 @@ +{ + "additional_special_tokens": [ + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } + ], + "bos_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "eos_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "mask_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "pad_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + }, + "unk_token": { + "content": "", + "lstrip": false, + "normalized": false, + "rstrip": false, + "single_word": false + } +} diff --git a/tokenizers/llama/tokenizer.model b/tokenizers/llama/tokenizer.model new file mode 100644 index 0000000000000000000000000000000000000000..22bccbcb41ec929cf0c9dbe8f41036db82e5e773 GIT binary patch literal 499723 zcma%^36x~lS>GSn7-+l{i^1$=Sy&@mmP^t|8X3ucJu^Mhh#5^;J+d&i>{s1gJyTM5 zSE-j7jck!8gN*?L#b(T21_Ksg(w5}~OSYfYh7dwXH6er$LMjd+gb>mpgb+eVe*f?P z-W#9CIZ67=ne+Yc_APgL_kMS&x#!M(&aFEe54`gE34bs6?73&pJ>%A`5vR`KCSQoA#J*+GDw8ycgJ&rZ)aja>NV@-P;Yue*j(;ml~_BhtG$FZh8jyLUb zylIc)O?w<~+T(cB9><&ZINr3!@uoeFH|=q}X^-PgdmL}t<9O2^$D8&z-n7T@racy# z_E>1zW1(q}g{D0gn)X;|+GC+d~)uK=bmkD{{J(1Dn>&FRp%sby8`AMho-R!vb@hU$OKYkgzAAETNS?McE5nsl5|wnQ0y6G3 zk!VYaI#sFLQs8hzEUU~TRfdy*TO)i^Nz$k1zIN^fpIOnLNxRb3pv3f!s_>yqcq$56 zQB`!!M*^I!_#6U!V}$o)v)5PQ#{gMXP~={zq85odUjfzLS_S3-qYAtpuw4PMf9UD5 zx%9$jZ?59b5|@VvLy7;ED*OWATO)iR(cb1HlK$-$SeE~eAa@k{jw+)n`MV>$uZ&|S zQM>%UAP36$16Asm0N&|vWqenVo9c`|Qt2NJ|Hmp6J>n-K*@IGl668?r`=_cxPXpfL zW)v0vvsJ_;BHkMrwq%o^5AuS@Ukq|tktry@N1>Br0FEh`j352J#g4thJrr%y%Rb@zc^jl4tkl3%OJJP!Ev2p_1G z{YDt4x9#5cn^jG(#sD*|GN}rFs{*G0zg;D~6vM#pR3=`M>c1PsugPM+7ouGV(SARI zd+O|e5ahNJ&#J5gHHA-CC_4WiMzYNT5q}iqj#T;MN{}l6e^O<*s9yM|l~~J>|IezF zcMx|_rHp$0^C)j1#r~p7wE+0b3Y-A^RU}$|0P?S^+&$Ik-$XVF$#Usagw#V9Sk zKWi_Btw{0Nd*#@3HxbWpM9A>WpxvoHXK(JgbDtYSat!m&+AD{zo?67`Rz+M1_`C|d z2=MvAbEqaU7vzq5&KFck?gczM!q3aR&x!D5H=E~HiEbz6d6p%rq!o03OUC}fD$!kl zFLFfmsxOXGNBSOoNiC~#JikKGPrh`o96Lra9DZ4)=EXtS<^?WA4dKfxPBQmbl$=!H zS2~U4|EeIb{(8u-4suOn?APqged*luK66}!_R#9j-11OHXl88b(LOgig|HWz?%S9RZ;Q%KNM)W>{3_PaSxI5C6-#%_0k}B)E%y= z3cDZhGA9v}Yn?;_zr0FvC!iBz4dTOdT~+>yOmTgc>@$@cYC!t)aP)=Gqz0d6y=80KGDnfYvt zX0NK?#!zykRUsQHa^4!Qsqr7JVq#iyEch;KOgJ8iHlxxOoFOuwh$Jt6IZ190a;R2% zN66Jb7xRIWdvjknH%Bv}%FDs>&b_&t=Q@p6^#6ozW=!&y%92k7oBc=k-vZCmAyqGW z(p@3S;9-K#1bOwfkX;L?;p5enp3_f&?ygXb#EX@&F2=rn`V|s$yOPcwNYIPId75ipiEfmiI+OPnLXxi;YBYjLgHG zH8trh(nKBfoLsGKDaeOZ$;*~B29TAY2T`g0Nb)kZnAM8w^l{L+3N2|=Js-)|q}sqT z#2|8i$grP>HV;I^QhagNoGALqy6I?98&#=i_0bqc%+8tw996b>1n^)5?g4C8dEy(i zRYj>fw_QMt={r?7Ip2%P!G(xh8)(vcF*0HGj{qL3YIHfiD3?ODZB^mJk;P-yn#?J zHk39wwAU|T!f!F3WyL_VJ{p{h&&s!!!YBgT3#Y4)@~xGUJym%Qba9>+R%nY_$lq4w zyOn(3Rz=ODYhI)MM~QoTFzanI*5sxK(Vo7&$|ED@Al49`27E`*!%doKZhEk-=JcH| zDZ-(@eC~_>Zg;QyH&Gh4@^@4!EybYrU18E8)tj5VO|z2ku4EZW77()Rgp}VC^hnJ! zH{FZ*#kiD_k6&KWvF7<+%ecXZFo(vp(~nL+4vX(|L8`JG+8?@=zCM`WZ<$mSB&>&< z`u_bu&=-R>v=)^5)c#!X}&wr4&PN}=}Eg9 z#XR$eBiD|qz~o4h{zxz!Xx5RNb{qUrYx*&1ngft(HG1@qSq_N=N$?K0MieIK!B>+Fj(?4m3-?o&b&q!F;4Qcb(~BE zf>F#$ey)n>$+RFu+u76z@!o1AV2ZB#02B9$_$)qVjiXaSTZ&!`AU_{vud+7CO%7zO zUx-BgEl+B4piB@`sb8!lh=v71+ckgk-d9Cw#LGd?oBKR$b@9BXF259|3{^^A(xKDe zuOu2pLjtJ6&fDjU!@Auf{0jEjXrK!HIL4r30~G<{DCNE7%evk zm_>YBo~Zb5s*ZCY%J$<^^TEh#x%zdKU>-H^{5yJFe(WF<#m5`w5I zYF%UDQ^EOKrX)Ee;UiBEKNjHwjU|v)H%71I<5l}&kX3PDw{v~nJ`sSSiU~#zK>Dqa zzHr2o(INn-8_n*MrG_lV^O8QNR{V4%c_Xvz9Fk&2G3@RIJ=9bqhi2)tvr)~sKUHbD z!bBkl$=aJz@>e2eP&Q%^+@n$bs-^ni^kd`zkmHCFLEG;y0kS%Z_R%O2yxv=}I zi35;pGd|}3u*x-kf*25n>2E|+`1Q(a7l)fNYK_au*gvwpnP%mX{H2i; zh9&~OFDBw2S2D%4Jg?zSjN-o;*;b-!Lbv8VpKS9R<)HRYYGDSdgdB_xgNI>~OoPe( zTcQR5M689YWWQ4Gm_FMeo5c3|Etep-L~eQ*^QC|4@C~U9O=laG>!aTeKGP?IkHNMF zuzwbLc4AP6rmp(xxW5zRj)r!uqv_39 zEB(jFMa$)bn*(gYt?18x8gYo95(lu)W%>zyG?M>RSs~^lAe8HC)&6Hy&)Mv|ZRUhv z%U-x`|8vEnx1l-%+Lk9(We0n6Uo`gxFkGz%sGKlLev|yH|0T$)t|1>JMeaSOdCQ-> z_?X;5%lx|z7$tLH)Izf}^Rk68lH624kfmA|Zt zWW}Lj2P}S1#(d?!S4pQICJIE3hlTp&vhKPtjR?IZL?je=J45{3fq5hKPQ{Pn&%n-r_=PZaMWNcmSxZQRN2Ir z<%Umd`04!n(HDq}8j1=?$rLl=XI7bdD@-s_AW;LY;`5T0)1ol#&)w&QaLkx6V+tU{ zT1=1}aUp!p1>oAbt7!25KmR;Kp&Cj2O_chq5LOG^91;eb`s5U*`x-!!&ke!DVU2(( z5cPT*b|A?SV_gA_xptPk@cz7r>FMi}LfepAYV0+-ml6sPvb>Bx7=3<}yrn!QV=#n- zN$;GD15I)U8qIj-N*=0kXb(D{&VK+Vj_Rwi7iF@T3w%LjiAqeN-9|S0G2;VU%)JS@ zoN^Ei_St1MoI~Q&fVqRlI)K@(^yy0lP-BC9iq8p7F{q}{=5+DAyhl0MoaQla0V2~y zntSrxAlKEVOy2r(l0!QA-E`2RMzI=tURCs9L%lKuQ`nNmBa=PcB?Y9#d{HN=XK0HG8B$ki*3X&3>!?J(4-*+kYzi@L(?(V7iFR+V{HGTN;F0}%1*(E z7T*p?s&V?b3M&j~j_8_vag}K$+zlz%x@!@YgDB#@L*#-izcS&$`I3-ms2Nxa?L7CW zRXc{RLMCYPYq5|#KQdga>30h44AXa$!7-O)&Z3Ta`WMLarIBYK??DPpQ3JkJxr1Zn zR6x6^{cj<|msQ$a>_ry_lPLx=NK81?cdvkUrdWo&phBA3q#$=z1kC|tiUm{w?M5}+ zh~>nWSJ}2$v!x(as$z0zieN`bJqTdCdB5r_LZYi#e5WQ4;Y7+IooHR#90icVdKa`9 z{K}xWwb_$GdwRQd-cyU55H#uAzgvJTVmV*@stWhk~7%u7swC3tP@ zc71hl-TQ#JLOa(3pxiNsv_Vlo+g5Kz(66bO;Z0Dh0AqU%u{oq?2(R9$*4G7L=s1We z{MS}lnfGA86l{*iDJTb#awz{x0bRDj*pF>ja5cUQK^%lY5K+BteQk1s)!1z;0Oq)?B9kj3;o30TBmg)ihUFX#;~lRr3Xn`0tL?9=#67RiN(!{f z>NfzJby1z5PPrU=6)%oRX8I!yYalbce@Fc?2ipoGZM7CanQJ5b<5vY3tjYnL0+401 zuZn|4qet*F7mzM*+a8reHi(=GDKv#mKeqp!W7I|eDpNMOs^h@eGkf^rVk2xuT5_Tkg0qqh_sZqVmIpCT?Izmh09Aup~wC7m> zDd9pah_1EOp3s*$H5kSwbPjN)6}cq~ASL44i5=9JSLL51E(JO1R;;>#*yWN~L6#ME zJ{ISl3S(WhV({0^ea-)U0_3TW8N@WIAeZ4_TPD6P^y$Y(1R7JZ*wg$B;DK&b&Tk4r zY?tXzlJWYgoE3d$QXr9hLBePtw9o;Q0@7O8G|3GS^|Duz2hyU@(BKG!{YFooy24($VJW93n29~Sk}0GqwD8!eVS7Pro(8R1C&bZ>%#O-Omc6k zS~Lrv7(oe8$9O6nNdR&UH`U^x(U-MZ7VaiBq>a3_t=U%LWUB+pOZ`T)2F85vW6lQJ6WI73c=i(1l(x2R!1wgGPq zo{zBDNKIZredmzQv!(s$0%(O}rhS# zEG-f3Cz;imXaU4rc3t;8W z$MyhFttO`Fb<}1w<-2MIvA2|ho%3|K-GJ!0pKBpd7+e^{BJ@nyf-O|dkWvsF!cUV! zyRw+#M^ka+W_X*rVa@ZZ0Z5hoZLtOg*tGM~3}h+#u|fFii0rS&?tE%Q-G@a$PTGsH zABZ4)WiZLzL2t$A1MMcsq&ED~IU&xts1HQ}M4C~klPp$oo3dXDv~Y_w#vD|({Na!p zQ~@SuZz*OFXFpXQ5Wx}T(IO?tj4sHjlpEr79qJ=d0O`fc6r9`>@{U4YXa%E2?%>`E zbm8DD=pxB$f-RdU`f>nJySL6LoI`6|xQ67A2##wby6C-?AU!#6Q=mkO0JY05CoL$ z;dK9|poelhrKXp|tpRPDZ^u?faT%WvPu=UPcWdn!+YN zy!iS^u-VftbqY}Xk&`5cF1!1WkriZBy*+TB69zZaF>T7Bohj-IRMx$ELh^qLoJ8?wwjaB{=8r)K#GwI8igV1a>rq6`|^O{}wWK|dJdBK$ufIS}3 zb{M3qeZ8E%7LL4v<#W;tIhcvTtfa%&0c}0#?}!L=8O(;g!M+p>&fG;llRYhtAd#2v zcB~$Z*j0`JWI0$Ji`H&B=C3(4THWPIqa*B%U8!VPk_;&jlf^nKhqhf7=wSs2WqWd9 z^(#Z1vlBTbfVh*$nL|30CPxKGz4%$Zp@0r$^ofRe-IT*g2Jo_Fp0@VJO=|sr`5y2Xgh}3|^ zkS1^dQiTh+fOcCLE_r`DS+k8bERxnBsp*t z$M6i2rg8RZ6gf&O8sPn7SjhqAvOudZKvvqP86K>(u=6SfqXnCZm^BADr-r!#1h<`a zT5+-&F_p6dM437s7+*a{B4n&W9JyHfOp>jN|C;;?DG=SXqmV~n2b z;%Mobf{>KXOEGyJwMtassDby1c5FQwQwm9zfp?Aps587s_5!*b!TmA1P~mc21}5Vs zUuSsLk+cJFQAX~BH|Aoc-YEQ2K-Exe(my{GgQg0@;r?#bq;o_0J6Xe94##@hSu z8LVqaO##jnBYy5UCVzaciaKtn^Fy4yC{!KPkFL((=lvK%5Zrv z3TV%MN5k-$%?Ln7i3W0f0|2m)+<%>^@%tF!cT)tD6q`b@w`7IP>D zpaQ5)pz(YL4Z~9lF#ML#tRE}>Hh?tj!(qiTB79q!!B*^~<_3Hz#H9o)+PEzMl)FO@ znwh*GJ0#y)3Az;ymp0gHoK&rY;JNo0mdQ-7?x=&;xmLo^lRDRuwkaK}6D)D2WCo@I z&~PwZ^41XJeoAaZ5{?ltK?<=1{kHws%M4Pcnm^mRE ze$wo1!ASQ*+K{Lb2hLMyD|oMza-s+Hm3N{|`93Dl>IdxlqmCokF2HioZc$z?e4Fj_ zA(FMBvG1yS7^IJx_a7>~Vi`eJk1x5udf> z?RD!rKhXAOn*f-f)yx#qnZxs!0oAnhp~;~w#ECetGy%I{T&b}*D+sZ5G#41L}7#22~WN&QfU#>HYsmq z3Q0LDI^B{%mm}TgFb*&gV3yM#hYg6*&TZ=KVo^t(kqJSz#JO-?e0St~a3}T=+aOr1 zh3CZdP=|UlXlII#LJqX8@!4RBNgcfsu0f!byasM2FVG&7@2N5|iXuoGjEsB-2eLvfD}3XxDJ0L-y7ikwzl8f2F#D? zyD7j|hrMQt;V&c5da#a?-Z=r*L*vk-Mhl<}t3Y z`;I^tjB8uTv&-?CmV3Q4oNkdiV%uCXdl~lOK%YFNO=V z4JiBA4^XEdm|-E9Ioa|+YUKu_HTiKSP8BDr3eyAi>lw6rR9B-_^3LF{A)*b5MmQEQ zR|>EeTRYlX%pA!Aj*&Qbqz)U!GHg;OUWr*v0hT$m!!R?=6dz_kRPo-HFR%@S;c*|A zFrZs#)C9OCOg~Yc$0x+{tc3?{Qm5GJGe^|tfe?(url3v^%s`aQj7H<nxR{i`ErTREzA{)qyP&%@ zC2^wD^KgK)kjG@JnbR?cR=LSfRu;M#m%7?um!cC`sZoz>9aL9VB4((Fnj7IwP>oGM z$oDW!w1BkHeO7D+Hh;W_aGZq9JNm%1O<#(B1dWN#hO;&Ul(X4t<3A^;?87nroH(x! z>eR{Ei1es8BZ%TOO2hNrq1A&saeb=|a^Val9M)DRCdlI|7e>^IGUwofc}BJcVQ0P# zXli=zeYBba+WkexEz1x6%n2?;!~CgA==GR#w85B3$9_^s%G}XbbOvn`Eo)fKfv$ut z+Q9^5#XXo6&e)UZX$Fm!YyG%>^3zecPAjz`oo9Pfd(5e$4qXW^Nci^Ee)BrndaT4I zj@x@TaRszxx=UMxGoU*NYbD>xc#jK`U%3s99JE!&n-t)j(Rs2mE)JpQ&U7{`=?HB6 zD%d$w%wzcO&43tcRpb88RJw3D0>ibzSch#MHQ*G4ImBXQtrP~ zHx!FHlEt8gEHF~*%A~w($n`-fIoCK) zNkc3^sI{$884_W9Fgf|Ts+19{EfTf~a4FGIQXm`$tNH{$yLIUNMD7?|)$pn_t;