From 02cb7fb7e5f6411a0ad7c1468f5cba67d8eb5a8f Mon Sep 17 00:00:00 2001 From: <> Date: Wed, 20 Nov 2024 07:00:04 +0000 Subject: [PATCH] Deployed 0b5c285 with MkDocs version: 1.6.1 --- .nojekyll | 0 404.html | 2188 ++++++ algorithms/adamerging/index.html | 5921 +++++++++++++++ algorithms/concrete_subspace/index.html | 2632 +++++++ algorithms/depth_upscaling/index.html | 2992 ++++++++ algorithms/dummy/index.html | 2470 ++++++ algorithms/fisher_merging/index.html | 3246 ++++++++ algorithms/images/Task Arithmetic.png | Bin 0 -> 51294 bytes algorithms/images/adamerging.png | Bin 0 -> 157151 bytes .../adamerging_layerwise_coefficients.png | Bin 0 -> 109467 bytes .../adamerging_model_merging_coefficients.png | Bin 0 -> 47188 bytes .../concrete_adamerging_vs_adamerging.png | Bin 0 -> 147388 bytes .../images/concrete_subspace_learning.png | Bin 0 -> 95180 bytes algorithms/images/ewemoe.png | Bin 0 -> 245806 bytes algorithms/images/ewemoe_1.png | Bin 0 -> 206776 bytes algorithms/images/ewemoe_2.png | Bin 0 -> 188446 bytes .../images/fedmr_model_recombination.jpg | Bin 0 -> 171350 bytes algorithms/images/max-model_predictor.png | Bin 0 -> 129678 bytes algorithms/images/pwe_moe.png | Bin 0 -> 84479 bytes algorithms/images/sigmoid.png | Bin 0 -> 18481 bytes algorithms/images/smile_upscaling.png | Bin 0 -> 65785 bytes algorithms/images/solar10.7B.png | Bin 0 -> 81599 bytes algorithms/images/sparse_upcycling.png | Bin 0 -> 105413 bytes algorithms/images/ties_merging.jpg | Bin 0 -> 331326 bytes .../ties_merging_hyperparameter_tuning.png | Bin 0 -> 240371 bytes algorithms/images/wemoe.png | Bin 0 -> 138798 bytes algorithms/images/wemoe_loss_landscape.png | Bin 0 -> 287396 bytes algorithms/images/wemoe_lr_tuning.png | Bin 0 -> 30971 bytes algorithms/index.html | 2817 +++++++ algorithms/layer_recombination/index.html | 2213 ++++++ algorithms/max-model_predictor/index.html | 2428 ++++++ algorithms/model_recombination/index.html | 3060 ++++++++ algorithms/model_stitching/index.html | 2208 ++++++ algorithms/moe_based_merging/index.html | 2970 ++++++++ algorithms/moe_based_upscaling/index.html | 3759 +++++++++ .../llama_2_4_semistructued_first_layer.png | Bin 0 -> 35713 bytes .../pruning/magnitude_pruning/index.html | 2885 +++++++ algorithms/pwe_moe/index.html | 3470 +++++++++ algorithms/regmean/index.html | 2373 ++++++ algorithms/simple_averaging/index.html | 2787 +++++++ algorithms/simple_ensemble/index.html | 2633 +++++++ algorithms/smile_upscaling/index.html | 4277 +++++++++++ algorithms/specification_ensemble/index.html | 2208 ++++++ algorithms/task_arithmetic/index.html | 3081 ++++++++ algorithms/ties_merging/index.html | 3327 ++++++++ algorithms/weight_ensembling_moe/index.html | 5472 ++++++++++++++ algorithms/weighted_averaging/index.html | 3808 ++++++++++ algorithms/weighted_ensemble/index.html | 2757 +++++++ assets/_mkdocstrings.css | 143 + assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.83f73b43.min.js | 16 + assets/javascripts/bundle.83f73b43.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.6ce7567c.min.js | 42 + .../workers/search.6ce7567c.min.js.map | 7 + assets/stylesheets/main.0253249f.min.css | 1 + assets/stylesheets/main.0253249f.min.css.map | 1 + assets/stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + cli/fusion_bench/index.html | 3015 ++++++++ cli/fusion_bench_webui/index.html | 2381 ++++++ cli/images/fusion_bench_webui.png | Bin 0 -> 310328 bytes cli/images/pycharm_debug_1.png | Bin 0 -> 7648 bytes cli/images/pycharm_debug_2.png | Bin 0 -> 5876 bytes cli/images/pycharm_debug_3.png | Bin 0 -> 40600 bytes cli/images/tab_completion.png | Bin 0 -> 58416 bytes cli/images/vscode_debug.png | Bin 0 -> 4766 bytes ...vit-base-patch32_robustness_corrupted.yaml | 23 + .../image_classification/test/cifar10.yaml | 4 + .../image_classification/test/cifar100.yaml | 4 + .../image_classification/test/dtd.yaml | 4 + .../image_classification/test/eurosat.yaml | 4 + .../image_classification/test/gtsrb.yaml | 4 + .../image_classification/test/mnist.yaml | 4 + .../image_classification/test/resisc45.yaml | 4 + .../test/stanford-cars.yaml | 4 + .../image_classification/test/sun397.yaml | 4 + .../image_classification/test/svhn.yaml | 6 + .../test/the_eight_tasks.yaml | 9 + .../test/tiny-imagenet.yaml | 4 + .../image_classification/train/cifar10.yaml | 4 + .../image_classification/train/cifar100.yaml | 4 + .../image_classification/train/dtd.yaml | 4 + .../image_classification/train/eurosat.yaml | 4 + .../image_classification/train/gtsrb.yaml | 4 + .../image_classification/train/mnist.yaml | 4 + .../image_classification/train/resisc45.yaml | 4 + .../train/stanford-cars.yaml | 4 + .../image_classification/train/sun397.yaml | 4 + .../image_classification/train/svhn.yaml | 6 + .../train/the_eight_tasks.yaml | 9 + .../train/tiny-imagenet.yaml | 4 + .../dataset/image_classification/val/dtd.yaml | 10 + .../image_classification/val/eurosat.yaml | 10 + .../image_classification/val/gtsrb.yaml | 10 + .../image_classification/val/mnist.yaml | 10 + .../image_classification/val/resisc45.yaml | 10 + .../val/stanford-cars.yaml | 10 + .../image_classification/val/sun397.yaml | 10 + .../image_classification/val/svhn.yaml | 12 + .../val/the_eight_tasks.yaml | 9 + .../dataset/question_answering/search_qa.yaml | 6 + .../question_answering/test/search_qa.yaml | 7 + .../question_answering/train/MetaMathQA.yaml | 4 + .../question_answering/train/search_qa.yaml | 7 + .../question_answering/val/search_qa.yaml | 7 + config/dataset/summarization/test/xsum.yaml | 4 + config/dataset/summarization/train/xsum.yaml | 4 + config/dataset/summarization/val/xsum.yaml | 4 + config/dataset/summarization/xsum.yaml | 3 + .../text_generation/test/gsm-hard.yaml | 4 + .../dataset/text_generation/test/gsm8k.yaml | 5 + .../test/gsm8k_question_label.yaml | 3 + .../text_generation/train/CodeAlpaca-20k.yaml | 4 + .../dataset/text_generation/train/gsm8k.yaml | 5 + .../train/gsm8k_question_label.yaml | 3 + config/fabric/auto.yaml | 16 + config/fabric/loggers/csv_logger.yaml | 11 + config/fabric/loggers/tensorboard_logger.yaml | 11 + config/fabric_model_fusion.yaml | 20 + config/hydra/default.yaml | 8 + config/hydra/help/fusion_bench_help.yaml | 47 + config/hydra/job_logging/rich_logging.yaml | 20 + config/index.html | 2239 ++++++ config/llama_magnitude_pruning.yaml | 16 + config/llama_model_fusion.yaml | 17 + config/llama_weighted_average.yaml | 26 + config/method/ada_svd/clip_vision.yaml | 9 + config/method/adamerging.yaml | 23 + config/method/adamerging/clip.yaml | 23 + config/method/adamerging/llama_sft.yaml | 33 + .../analysis/task_vector_cos_similarity.yaml | 6 + .../analysis/task_vector_violin_plot.yaml | 6 + config/method/clip_finetune.yaml | 26 + .../clip_concrete_layer_wise_adamerging.yaml | 27 + .../clip_concrete_task_arithmetic.yaml | 25 + .../clip_concrete_task_wise_adamerging.yaml | 27 + config/method/dare/simple_average.yaml | 5 + config/method/dare/task_arithmetic.yaml | 6 + config/method/dawe/dawe_for_clip.yaml | 32 + config/method/depth_upscaling.yaml | 5 + config/method/dummy.yaml | 1 + .../method/ensemble/max_model_predictor.yaml | 1 + config/method/ensemble/simple_ensemble.yaml | 2 + config/method/ensemble/weighted_ensemble.yaml | 6 + .../fisher_merging/clip_fisher_merging.yaml | 13 + .../method/fisher_merging/fisher_merging.yaml | 9 + .../fisher_merging/gpt2_fisher_merging.yaml | 12 + config/method/linear/expo.yaml | 8 + .../method/linear/linear_interpolation.yaml | 3 + config/method/linear/llama_expo.yaml | 19 + .../method/linear/llama_expo_with_dare.yaml | 19 + .../linear/simple_average_for_llama.yaml | 5 + .../linear/task_arithmetic_for_llama.yaml | 4 + config/method/linear/weighted_average.yaml | 6 + .../linear/weighted_average_for_llama.yaml | 12 + config/method/mixtral_moe_merging.yaml | 4 + config/method/mixtral_moe_upscaling.yaml | 7 + config/method/model_recombination.yaml | 4 + .../pruning/llama_magnitude_pruning.yaml | 14 + .../method/pruning/llama_random_pruning.yaml | 9 + .../method/pruning/llama_wanda_pruning.yaml | 16 + .../pruning/magnitude_diff_pruning.yaml | 5 + config/method/pwe_moe_ls_for_clip.yaml | 22 + config/method/regmean/clip_regmean.yaml | 10 + config/method/regmean/gpt2_regmean.yaml | 12 + config/method/regmean/regmean.yaml | 4 + config/method/simple_average.yaml | 1 + config/method/slerp/slerp.yaml | 6 + .../singular_projection_merging.yaml | 8 + .../smile_mistral_upscaling.yaml | 10 + .../smile_upscaling/smile_upscaling.yaml | 14 + .../llama_iterative_sparselo.yaml | 20 + .../sparselo_pruning/llama_pcp_sparselo.yaml | 20 + .../sparselo_pruning/llama_sparselo.yaml | 19 + config/method/task_arithmetic.yaml | 2 + config/method/ties_merging.yaml | 8 + .../trust_region/clip_task_arithmetic.yaml | 7 + .../wemoe/sparse_weight_ensembling_moe.yaml | 39 + .../method/wemoe/weight_ensembling_moe.yaml | 20 + .../model/clip-vit/clip-vit-base-patch16.yaml | 3 + .../clip-vit/clip-vit-base-patch16_dtd.yaml | 3 + .../clip-vit-base-patch16_eight_tasks.yaml | 10 + .../clip-vit-base-patch16_eurosat.yaml | 3 + .../clip-vit/clip-vit-base-patch16_gtsrb.yaml | 3 + .../clip-vit/clip-vit-base-patch16_mnist.yaml | 3 + .../clip-vit-base-patch16_resisc45.yaml | 3 + .../clip-vit-base-patch16_stanford-cars.yaml | 3 + .../clip-vit-base-patch16_sun397.yaml | 3 + .../clip-vit/clip-vit-base-patch16_svhn.yaml | 3 + .../model/clip-vit/clip-vit-base-patch32.yaml | 3 + .../clip-vit/clip-vit-base-patch32_dtd.yaml | 3 + .../clip-vit-base-patch32_eight_tasks.yaml | 10 + .../clip-vit-base-patch32_eurosat.yaml | 3 + .../clip-vit/clip-vit-base-patch32_gtsrb.yaml | 3 + .../clip-vit/clip-vit-base-patch32_mnist.yaml | 3 + .../clip-vit-base-patch32_resisc45.yaml | 3 + .../clip-vit-base-patch32_stanford-cars.yaml | 3 + .../clip-vit-base-patch32_sun397.yaml | 3 + .../clip-vit/clip-vit-base-patch32_svhn.yaml | 3 + .../clip-vit/clip-vit-large-patch14.yaml | 3 + .../clip-vit/clip-vit-large-patch14_dtd.yaml | 3 + .../clip-vit-large-patch14_eight_tasks.yaml | 10 + .../clip-vit-large-patch14_eurosat.yaml | 3 + .../clip-vit-large-patch14_gtsrb.yaml | 3 + .../clip-vit-large-patch14_mnist.yaml | 3 + .../clip-vit-large-patch14_resisc45.yaml | 3 + .../clip-vit-large-patch14_stanford-cars.yaml | 3 + .../clip-vit-large-patch14_sun397.yaml | 3 + .../clip-vit/clip-vit-large-patch14_svhn.yaml | 3 + .../clip-vit/generate_vit_model_config.sh | 23 + config/model/flan-t5/flan-t5-base.yaml | 3 + .../model/flan-t5/flan-t5-base_glue-cola.yaml | 3 + .../flan-t5-base_glue-cola_lora-16.yaml | 4 + .../model/flan-t5/flan-t5-base_glue-mnli.yaml | 3 + .../flan-t5-base_glue-mnli_lora-16.yaml | 4 + .../model/flan-t5/flan-t5-base_glue-mrpc.yaml | 3 + .../flan-t5-base_glue-mrpc_lora-16.yaml | 4 + .../model/flan-t5/flan-t5-base_glue-qnli.yaml | 3 + .../flan-t5-base_glue-qnli_lora-16.yaml | 4 + .../model/flan-t5/flan-t5-base_glue-qqp.yaml | 3 + .../flan-t5-base_glue-qqp_lora-16.yaml | 4 + .../model/flan-t5/flan-t5-base_glue-rte.yaml | 3 + .../flan-t5-base_glue-rte_lora-16.yaml | 4 + .../model/flan-t5/flan-t5-base_glue-sst2.yaml | 3 + .../flan-t5-base_glue-sst2_lora-16.yaml | 4 + .../model/flan-t5/flan-t5-base_glue-stsb.yaml | 3 + .../flan-t5-base_glue-stsb_lora-16.yaml | 4 + config/model/flan-t5/flan-t5-large.yaml | 3 + .../flan-t5-large_glue-cola_lora-16.yaml | 4 + .../flan-t5-large_glue-mnli_lora-16.yaml | 4 + .../flan-t5-large_glue-mrpc_lora-16.yaml | 4 + .../flan-t5-large_glue-qnli_lora-16.yaml | 4 + .../flan-t5-large_glue-qqp_lora-16.yaml | 4 + .../flan-t5-large_glue-rte_lora-16.yaml | 4 + .../flan-t5-large_glue-sst2_lora-16.yaml | 4 + .../flan-t5-large_glue-stsb_lora-16.yaml | 4 + config/model/flan-t5/generate_flan-t5.sh | 38 + .../CLIPVisionModelPool/_template.yaml | 12 + .../clip-vit-base-patch16_TA8.yaml | 8 + .../clip-vit-base-patch16_TA8_lora.yaml | 53 + .../clip-vit-base-patch16_individual.yaml | 7 + ...clip-vit-base-patch16_individual_lora.yaml | 14 + .../clip-vit-base-patch32_TA8.yaml | 5 + ...lip-vit-base-patch32_TA8_control_task.yaml | 24 + .../clip-vit-base-patch32_TA8_model_only.yaml | 3 + ...-vit-base-patch32_generalization_exp1.yaml | 24 + ...-vit-base-patch32_generalization_exp2.yaml | 24 + .../clip-vit-base-patch32_individual.yaml | 7 + .../clip-vit-base-patch32_mtl.yaml | 5 + ...lip-vit-base-patch32_robustness_clean.yaml | 18 + ...vit-base-patch32_robustness_corrupted.yaml | 29 + ...lip-vit-base-patch32_single_finetuned.yaml | 5 + .../clip-vit-base-patch32_svhn_and_mnist.yaml | 6 + .../clip-vit-large-patch14_TA8.yaml | 8 + ...clip-vit-large-patch14_TA8_model_only.yaml | 6 + .../clip-vit-large-patch14_individual.yaml | 7 + .../CausalLMPool/llama_for_causallm.yaml | 20 + .../CausalLMPool/simle_mixtral_exp_v4.yaml | 21 + .../CausalLMPool/single_llama_model.yaml | 17 + config/modelpool/Seq2SeqLMPool/_template.yaml | 8 + .../Seq2SeqLMPool/flan-t5-base_glue.yaml | 13 + .../flan-t5-base_glue_lora16.yaml | 41 + .../flan-t5-base_individual.yaml | 7 + .../flan-t5-large_glue_lora16.yaml | 45 + config/modelpool/automodelpool.yaml | 12 + config/modelpool/gpt-2_glue.yaml | 64 + config/modelpool/mixtral_moe_merging.yaml | 14 + config/modelpool/mixtral_moe_upscaling.yaml | 6 + config/modelpool/nyuv2_modelpool.yaml | 26 + config/modelpool/smile_mistral_exp_v1.yaml | 9 + config/modelpool/smile_mistral_exp_v2.yaml | 9 + config/modelpool/smile_mistral_exp_v3.yaml | 9 + config/modelpool/smile_mistral_exp_v4.yaml | 13 + config/nyuv2_config.yaml | 13 + config/nyuv2_mtl_train.yaml | 32 + .../CLIPVisionModelTaskPool/_template.yaml | 31 + .../clip-vit-classification_TA8.yaml | 11 + .../clip-vit-classification_TA8_B16.yaml | 31 + .../clip-vit-classification_TA8_L14.yaml | 12 + .../clip-vit-classification_TA8_val.yaml | 12 + ...-classification_TA8_with_control_task.yaml | 12 + ...rse_wemoe_clip-vit-classification_TA8.yaml | 18 + ...lip-vit-base-patch32_robustness_clean.yaml | 24 + ...vit-base-patch32_robustness_corrupted.yaml | 27 + .../clip-vit-base-patch32_svhn_and_mnist.yaml | 22 + config/taskpool/dummy.yaml | 2 + .../flan-t5_glue_text_generation.yaml | 44 + config/taskpool/gpt-2_glue.yaml | 39 + config/taskpool/nyuv2_taskpool.yaml | 9 + css/material_extra.css | 3 + css/mkdocstrings.css | 27 + guides/clip_vit/HFCLIPClassifier/index.html | 4458 +++++++++++ .../classification_templates/index.html | 3384 +++++++++ guides/clip_vit/finetune/index.html | 3562 +++++++++ guides/docker/index.html | 2252 ++++++ guides/fusion_bench/index.html | 2290 ++++++ .../mixins/lightning_fabric/index.html | 3225 ++++++++ .../mixins/simple_profiler/index.html | 2667 +++++++ guides/nlp/question_answering/index.html | 2663 +++++++ images/accelerate model training.png | Bin 0 -> 529761 bytes images/framework_of_model_fusion.png | Bin 0 -> 148210 bytes images/fusion_bench_flow.png | Bin 0 -> 170192 bytes images/learning_paradiagm.png | Bin 0 -> 69434 bytes images/llm.png | Bin 0 -> 699444 bytes images/model_ensemble.png | Bin 0 -> 347169 bytes images/model_merging.png | Bin 0 -> 257663 bytes images/model_mixing.png | Bin 0 -> 410371 bytes images/model_upscaling.png | Bin 0 -> 278001 bytes images/multi-task_core_steps.png | Bin 0 -> 272104 bytes images/multi-task_model_fusion.png | Bin 0 -> 70799 bytes index.html | 2745 +++++++ introduction_to_model_fusion/index.html | 2558 +++++++ javascripts/mathjax.js | 16 + modelpool/clip-vit-cos.png | Bin 0 -> 90700 bytes modelpool/clip_vit/index.html | 5974 +++++++++++++++ modelpool/flan-t5_generation/index.html | 3735 +++++++++ modelpool/gpt2_classification/index.html | 3064 ++++++++ .../images/NYUv2-0000003446-63769b25.jpg | Bin 0 -> 76962 bytes ...clip-vit-base-patch16_full&lora&l-lora.png | Bin 0 -> 38718 bytes ...-base-patch16_full&lora&l-lora_average.png | Bin 0 -> 41192 bytes modelpool/images/clip_eight_corruption.png | Bin 0 -> 2987111 bytes modelpool/index.html | 4289 +++++++++++ modelpool/llama_models/index.html | 2213 ++++++ modelpool/nyuv2/index.html | 2505 ++++++ objects.inv | Bin 0 -> 4452 bytes readinglist/images/Chronopoulou2023.png | Bin 0 -> 404338 bytes readinglist/images/adapter_soup.png | Bin 0 -> 145199 bytes readinglist/images/branch_and_merging.png | Bin 0 -> 177873 bytes readinglist/images/branch_and_merging_alg.png | Bin 0 -> 81795 bytes readinglist/images/depth_upscaling.png | Bin 0 -> 140676 bytes readinglist/images/enneng2024survey.png | Bin 0 -> 415917 bytes readinglist/images/forkmerge.png | Bin 0 -> 203366 bytes readinglist/images/fs-merge.png | Bin 0 -> 323513 bytes readinglist/images/fusechat.png | Bin 0 -> 156862 bytes readinglist/images/fusellm.png | Bin 0 -> 206575 bytes readinglist/images/lora_lego.png | Bin 0 -> 1209586 bytes readinglist/images/lorahub.png | Bin 0 -> 218749 bytes readinglist/images/pituning.png | Bin 0 -> 362755 bytes readinglist/images/pwe_moe.png | Bin 0 -> 200222 bytes readinglist/images/scaling_smart.png | Bin 0 -> 67557 bytes readinglist/images/smile_upscaling.png | Bin 0 -> 160885 bytes readinglist/images/sparse-modelsoups.png | Bin 0 -> 144027 bytes readinglist/images/sparse_upcycling.png | Bin 0 -> 138202 bytes readinglist/images/twin_merging.png | Bin 0 -> 342466 bytes readinglist/images/watt.png | Bin 0 -> 434582 bytes readinglist/index.html | 2909 +++++++ readinglist/mode_connectivity/index.html | 2729 +++++++ search/search_index.json | 1 + sitemap.xml | 3 + sitemap.xml.gz | Bin 0 -> 127 bytes supported_algorithms/index.html | 2399 ++++++ .../LlamaTestGenerationTaskPool/index.html | 3178 ++++++++ taskpool/clip_vit_classification/index.html | 3852 ++++++++++ taskpool/dummy/index.html | 2578 +++++++ taskpool/flan-t5_generation/index.html | 2848 +++++++ taskpool/gpt2_classification/index.html | 2696 +++++++ taskpool/index.html | 2650 +++++++ 391 files changed, 172854 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 algorithms/adamerging/index.html create mode 100644 algorithms/concrete_subspace/index.html create mode 100644 algorithms/depth_upscaling/index.html create mode 100644 algorithms/dummy/index.html create mode 100644 algorithms/fisher_merging/index.html create mode 100644 algorithms/images/Task Arithmetic.png create mode 100644 algorithms/images/adamerging.png create mode 100644 algorithms/images/adamerging_layerwise_coefficients.png create mode 100644 algorithms/images/adamerging_model_merging_coefficients.png create mode 100644 algorithms/images/concrete_adamerging_vs_adamerging.png create mode 100644 algorithms/images/concrete_subspace_learning.png create mode 100644 algorithms/images/ewemoe.png create mode 100644 algorithms/images/ewemoe_1.png create mode 100644 algorithms/images/ewemoe_2.png create mode 100644 algorithms/images/fedmr_model_recombination.jpg create mode 100644 algorithms/images/max-model_predictor.png create mode 100644 algorithms/images/pwe_moe.png create mode 100644 algorithms/images/sigmoid.png create mode 100644 algorithms/images/smile_upscaling.png create mode 100644 algorithms/images/solar10.7B.png create mode 100644 algorithms/images/sparse_upcycling.png create mode 100644 algorithms/images/ties_merging.jpg create mode 100644 algorithms/images/ties_merging_hyperparameter_tuning.png create mode 100644 algorithms/images/wemoe.png create mode 100644 algorithms/images/wemoe_loss_landscape.png create mode 100644 algorithms/images/wemoe_lr_tuning.png create mode 100644 algorithms/index.html create mode 100644 algorithms/layer_recombination/index.html create mode 100644 algorithms/max-model_predictor/index.html create mode 100644 algorithms/model_recombination/index.html create mode 100644 algorithms/model_stitching/index.html create mode 100644 algorithms/moe_based_merging/index.html create mode 100644 algorithms/moe_based_upscaling/index.html create mode 100644 algorithms/pruning/images/llama_2_4_semistructued_first_layer.png create mode 100644 algorithms/pruning/magnitude_pruning/index.html create mode 100644 algorithms/pwe_moe/index.html create mode 100644 algorithms/regmean/index.html create mode 100644 algorithms/simple_averaging/index.html create mode 100644 algorithms/simple_ensemble/index.html create mode 100644 algorithms/smile_upscaling/index.html create mode 100644 algorithms/specification_ensemble/index.html create mode 100644 algorithms/task_arithmetic/index.html create mode 100644 algorithms/ties_merging/index.html create mode 100644 algorithms/weight_ensembling_moe/index.html create mode 100644 algorithms/weighted_averaging/index.html create mode 100644 algorithms/weighted_ensemble/index.html create mode 100644 assets/_mkdocstrings.css create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.83f73b43.min.js create mode 100644 assets/javascripts/bundle.83f73b43.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.6ce7567c.min.js create mode 100644 assets/javascripts/workers/search.6ce7567c.min.js.map create mode 100644 assets/stylesheets/main.0253249f.min.css create mode 100644 assets/stylesheets/main.0253249f.min.css.map create mode 100644 assets/stylesheets/palette.06af60db.min.css create mode 100644 assets/stylesheets/palette.06af60db.min.css.map create mode 100644 cli/fusion_bench/index.html create mode 100644 cli/fusion_bench_webui/index.html create mode 100644 cli/images/fusion_bench_webui.png create mode 100644 cli/images/pycharm_debug_1.png create mode 100644 cli/images/pycharm_debug_2.png create mode 100644 cli/images/pycharm_debug_3.png create mode 100644 cli/images/tab_completion.png create mode 100644 cli/images/vscode_debug.png create mode 100644 config/clip-vit-base-patch32_robustness_corrupted.yaml create mode 100644 config/dataset/image_classification/test/cifar10.yaml create mode 100644 config/dataset/image_classification/test/cifar100.yaml create mode 100644 config/dataset/image_classification/test/dtd.yaml create mode 100644 config/dataset/image_classification/test/eurosat.yaml create mode 100644 config/dataset/image_classification/test/gtsrb.yaml create mode 100644 config/dataset/image_classification/test/mnist.yaml create mode 100644 config/dataset/image_classification/test/resisc45.yaml create mode 100644 config/dataset/image_classification/test/stanford-cars.yaml create mode 100644 config/dataset/image_classification/test/sun397.yaml create mode 100644 config/dataset/image_classification/test/svhn.yaml create mode 100644 config/dataset/image_classification/test/the_eight_tasks.yaml create mode 100644 config/dataset/image_classification/test/tiny-imagenet.yaml create mode 100644 config/dataset/image_classification/train/cifar10.yaml create mode 100644 config/dataset/image_classification/train/cifar100.yaml create mode 100644 config/dataset/image_classification/train/dtd.yaml create mode 100644 config/dataset/image_classification/train/eurosat.yaml create mode 100644 config/dataset/image_classification/train/gtsrb.yaml create mode 100644 config/dataset/image_classification/train/mnist.yaml create mode 100644 config/dataset/image_classification/train/resisc45.yaml create mode 100644 config/dataset/image_classification/train/stanford-cars.yaml create mode 100644 config/dataset/image_classification/train/sun397.yaml create mode 100644 config/dataset/image_classification/train/svhn.yaml create mode 100644 config/dataset/image_classification/train/the_eight_tasks.yaml create mode 100644 config/dataset/image_classification/train/tiny-imagenet.yaml create mode 100644 config/dataset/image_classification/val/dtd.yaml create mode 100644 config/dataset/image_classification/val/eurosat.yaml create mode 100644 config/dataset/image_classification/val/gtsrb.yaml create mode 100644 config/dataset/image_classification/val/mnist.yaml create mode 100644 config/dataset/image_classification/val/resisc45.yaml create mode 100644 config/dataset/image_classification/val/stanford-cars.yaml create mode 100644 config/dataset/image_classification/val/sun397.yaml create mode 100644 config/dataset/image_classification/val/svhn.yaml create mode 100644 config/dataset/image_classification/val/the_eight_tasks.yaml create mode 100644 config/dataset/question_answering/search_qa.yaml create mode 100644 config/dataset/question_answering/test/search_qa.yaml create mode 100644 config/dataset/question_answering/train/MetaMathQA.yaml create mode 100644 config/dataset/question_answering/train/search_qa.yaml create mode 100644 config/dataset/question_answering/val/search_qa.yaml create mode 100644 config/dataset/summarization/test/xsum.yaml create mode 100644 config/dataset/summarization/train/xsum.yaml create mode 100644 config/dataset/summarization/val/xsum.yaml create mode 100644 config/dataset/summarization/xsum.yaml create mode 100644 config/dataset/text_generation/test/gsm-hard.yaml create mode 100644 config/dataset/text_generation/test/gsm8k.yaml create mode 100644 config/dataset/text_generation/test/gsm8k_question_label.yaml create mode 100644 config/dataset/text_generation/train/CodeAlpaca-20k.yaml create mode 100644 config/dataset/text_generation/train/gsm8k.yaml create mode 100644 config/dataset/text_generation/train/gsm8k_question_label.yaml create mode 100644 config/fabric/auto.yaml create mode 100644 config/fabric/loggers/csv_logger.yaml create mode 100644 config/fabric/loggers/tensorboard_logger.yaml create mode 100644 config/fabric_model_fusion.yaml create mode 100644 config/hydra/default.yaml create mode 100644 config/hydra/help/fusion_bench_help.yaml create mode 100644 config/hydra/job_logging/rich_logging.yaml create mode 100644 config/index.html create mode 100644 config/llama_magnitude_pruning.yaml create mode 100644 config/llama_model_fusion.yaml create mode 100644 config/llama_weighted_average.yaml create mode 100644 config/method/ada_svd/clip_vision.yaml create mode 100644 config/method/adamerging.yaml create mode 100644 config/method/adamerging/clip.yaml create mode 100644 config/method/adamerging/llama_sft.yaml create mode 100644 config/method/analysis/task_vector_cos_similarity.yaml create mode 100644 config/method/analysis/task_vector_violin_plot.yaml create mode 100644 config/method/clip_finetune.yaml create mode 100644 config/method/concrete_subspace/clip_concrete_layer_wise_adamerging.yaml create mode 100644 config/method/concrete_subspace/clip_concrete_task_arithmetic.yaml create mode 100644 config/method/concrete_subspace/clip_concrete_task_wise_adamerging.yaml create mode 100644 config/method/dare/simple_average.yaml create mode 100644 config/method/dare/task_arithmetic.yaml create mode 100644 config/method/dawe/dawe_for_clip.yaml create mode 100644 config/method/depth_upscaling.yaml create mode 100644 config/method/dummy.yaml create mode 100644 config/method/ensemble/max_model_predictor.yaml create mode 100644 config/method/ensemble/simple_ensemble.yaml create mode 100644 config/method/ensemble/weighted_ensemble.yaml create mode 100644 config/method/fisher_merging/clip_fisher_merging.yaml create mode 100644 config/method/fisher_merging/fisher_merging.yaml create mode 100644 config/method/fisher_merging/gpt2_fisher_merging.yaml create mode 100644 config/method/linear/expo.yaml create mode 100644 config/method/linear/linear_interpolation.yaml create mode 100644 config/method/linear/llama_expo.yaml create mode 100644 config/method/linear/llama_expo_with_dare.yaml create mode 100644 config/method/linear/simple_average_for_llama.yaml create mode 100644 config/method/linear/task_arithmetic_for_llama.yaml create mode 100644 config/method/linear/weighted_average.yaml create mode 100644 config/method/linear/weighted_average_for_llama.yaml create mode 100644 config/method/mixtral_moe_merging.yaml create mode 100644 config/method/mixtral_moe_upscaling.yaml create mode 100644 config/method/model_recombination.yaml create mode 100644 config/method/pruning/llama_magnitude_pruning.yaml create mode 100644 config/method/pruning/llama_random_pruning.yaml create mode 100644 config/method/pruning/llama_wanda_pruning.yaml create mode 100644 config/method/pruning/magnitude_diff_pruning.yaml create mode 100644 config/method/pwe_moe_ls_for_clip.yaml create mode 100644 config/method/regmean/clip_regmean.yaml create mode 100644 config/method/regmean/gpt2_regmean.yaml create mode 100644 config/method/regmean/regmean.yaml create mode 100644 config/method/simple_average.yaml create mode 100644 config/method/slerp/slerp.yaml create mode 100644 config/method/smile_upscaling/singular_projection_merging.yaml create mode 100644 config/method/smile_upscaling/smile_mistral_upscaling.yaml create mode 100644 config/method/smile_upscaling/smile_upscaling.yaml create mode 100644 config/method/sparselo_pruning/llama_iterative_sparselo.yaml create mode 100644 config/method/sparselo_pruning/llama_pcp_sparselo.yaml create mode 100644 config/method/sparselo_pruning/llama_sparselo.yaml create mode 100644 config/method/task_arithmetic.yaml create mode 100644 config/method/ties_merging.yaml create mode 100644 config/method/trust_region/clip_task_arithmetic.yaml create mode 100644 config/method/wemoe/sparse_weight_ensembling_moe.yaml create mode 100644 config/method/wemoe/weight_ensembling_moe.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch16.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch16_dtd.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch16_eight_tasks.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch16_eurosat.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch16_gtsrb.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch16_mnist.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch16_resisc45.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch16_stanford-cars.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch16_sun397.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch16_svhn.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch32.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch32_dtd.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch32_eight_tasks.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch32_eurosat.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch32_gtsrb.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch32_mnist.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch32_resisc45.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch32_stanford-cars.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch32_sun397.yaml create mode 100644 config/model/clip-vit/clip-vit-base-patch32_svhn.yaml create mode 100644 config/model/clip-vit/clip-vit-large-patch14.yaml create mode 100644 config/model/clip-vit/clip-vit-large-patch14_dtd.yaml create mode 100644 config/model/clip-vit/clip-vit-large-patch14_eight_tasks.yaml create mode 100644 config/model/clip-vit/clip-vit-large-patch14_eurosat.yaml create mode 100644 config/model/clip-vit/clip-vit-large-patch14_gtsrb.yaml create mode 100644 config/model/clip-vit/clip-vit-large-patch14_mnist.yaml create mode 100644 config/model/clip-vit/clip-vit-large-patch14_resisc45.yaml create mode 100644 config/model/clip-vit/clip-vit-large-patch14_stanford-cars.yaml create mode 100644 config/model/clip-vit/clip-vit-large-patch14_sun397.yaml create mode 100644 config/model/clip-vit/clip-vit-large-patch14_svhn.yaml create mode 100644 config/model/clip-vit/generate_vit_model_config.sh create mode 100644 config/model/flan-t5/flan-t5-base.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-cola.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-cola_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-mnli.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-mnli_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-mrpc.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-mrpc_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-qnli.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-qnli_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-qqp.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-qqp_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-rte.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-rte_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-sst2.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-sst2_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-stsb.yaml create mode 100644 config/model/flan-t5/flan-t5-base_glue-stsb_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-large.yaml create mode 100644 config/model/flan-t5/flan-t5-large_glue-cola_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-large_glue-mnli_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-large_glue-mrpc_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-large_glue-qnli_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-large_glue-qqp_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-large_glue-rte_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-large_glue-sst2_lora-16.yaml create mode 100644 config/model/flan-t5/flan-t5-large_glue-stsb_lora-16.yaml create mode 100644 config/model/flan-t5/generate_flan-t5.sh create mode 100644 config/modelpool/CLIPVisionModelPool/_template.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TA8.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TA8_lora.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_individual.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_individual_lora.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TA8.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TA8_control_task.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TA8_model_only.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_generalization_exp1.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_generalization_exp2.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_individual.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_mtl.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_robustness_clean.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_robustness_corrupted.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_single_finetuned.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_svhn_and_mnist.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TA8.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TA8_model_only.yaml create mode 100644 config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_individual.yaml create mode 100644 config/modelpool/CausalLMPool/llama_for_causallm.yaml create mode 100644 config/modelpool/CausalLMPool/simle_mixtral_exp_v4.yaml create mode 100644 config/modelpool/CausalLMPool/single_llama_model.yaml create mode 100644 config/modelpool/Seq2SeqLMPool/_template.yaml create mode 100644 config/modelpool/Seq2SeqLMPool/flan-t5-base_glue.yaml create mode 100644 config/modelpool/Seq2SeqLMPool/flan-t5-base_glue_lora16.yaml create mode 100644 config/modelpool/Seq2SeqLMPool/flan-t5-base_individual.yaml create mode 100644 config/modelpool/Seq2SeqLMPool/flan-t5-large_glue_lora16.yaml create mode 100644 config/modelpool/automodelpool.yaml create mode 100644 config/modelpool/gpt-2_glue.yaml create mode 100644 config/modelpool/mixtral_moe_merging.yaml create mode 100644 config/modelpool/mixtral_moe_upscaling.yaml create mode 100644 config/modelpool/nyuv2_modelpool.yaml create mode 100644 config/modelpool/smile_mistral_exp_v1.yaml create mode 100644 config/modelpool/smile_mistral_exp_v2.yaml create mode 100644 config/modelpool/smile_mistral_exp_v3.yaml create mode 100644 config/modelpool/smile_mistral_exp_v4.yaml create mode 100644 config/nyuv2_config.yaml create mode 100644 config/nyuv2_mtl_train.yaml create mode 100644 config/taskpool/CLIPVisionModelTaskPool/_template.yaml create mode 100644 config/taskpool/CLIPVisionModelTaskPool/clip-vit-classification_TA8.yaml create mode 100644 config/taskpool/CLIPVisionModelTaskPool/clip-vit-classification_TA8_B16.yaml create mode 100644 config/taskpool/CLIPVisionModelTaskPool/clip-vit-classification_TA8_L14.yaml create mode 100644 config/taskpool/CLIPVisionModelTaskPool/clip-vit-classification_TA8_val.yaml create mode 100644 config/taskpool/CLIPVisionModelTaskPool/clip-vit-classification_TA8_with_control_task.yaml create mode 100644 config/taskpool/CLIPVisionModelTaskPool/clip_sparse_wemoe_clip-vit-classification_TA8.yaml create mode 100644 config/taskpool/clip-vit-base-patch32_robustness_clean.yaml create mode 100644 config/taskpool/clip-vit-base-patch32_robustness_corrupted.yaml create mode 100644 config/taskpool/clip-vit-base-patch32_svhn_and_mnist.yaml create mode 100644 config/taskpool/dummy.yaml create mode 100644 config/taskpool/flan-t5_glue_text_generation.yaml create mode 100644 config/taskpool/gpt-2_glue.yaml create mode 100644 config/taskpool/nyuv2_taskpool.yaml create mode 100644 css/material_extra.css create mode 100644 css/mkdocstrings.css create mode 100644 guides/clip_vit/HFCLIPClassifier/index.html create mode 100644 guides/clip_vit/classification_templates/index.html create mode 100644 guides/clip_vit/finetune/index.html create mode 100644 guides/docker/index.html create mode 100644 guides/fusion_bench/index.html create mode 100644 guides/fusion_bench/mixins/lightning_fabric/index.html create mode 100644 guides/fusion_bench/mixins/simple_profiler/index.html create mode 100644 guides/nlp/question_answering/index.html create mode 100644 images/accelerate model training.png create mode 100644 images/framework_of_model_fusion.png create mode 100644 images/fusion_bench_flow.png create mode 100644 images/learning_paradiagm.png create mode 100644 images/llm.png create mode 100644 images/model_ensemble.png create mode 100644 images/model_merging.png create mode 100644 images/model_mixing.png create mode 100644 images/model_upscaling.png create mode 100644 images/multi-task_core_steps.png create mode 100644 images/multi-task_model_fusion.png create mode 100644 index.html create mode 100644 introduction_to_model_fusion/index.html create mode 100644 javascripts/mathjax.js create mode 100644 modelpool/clip-vit-cos.png create mode 100644 modelpool/clip_vit/index.html create mode 100644 modelpool/flan-t5_generation/index.html create mode 100644 modelpool/gpt2_classification/index.html create mode 100644 modelpool/images/NYUv2-0000003446-63769b25.jpg create mode 100644 modelpool/images/clip-vit-base-patch16_full&lora&l-lora.png create mode 100644 modelpool/images/clip-vit-base-patch16_full&lora&l-lora_average.png create mode 100644 modelpool/images/clip_eight_corruption.png create mode 100644 modelpool/index.html create mode 100644 modelpool/llama_models/index.html create mode 100644 modelpool/nyuv2/index.html create mode 100644 objects.inv create mode 100644 readinglist/images/Chronopoulou2023.png create mode 100644 readinglist/images/adapter_soup.png create mode 100644 readinglist/images/branch_and_merging.png create mode 100644 readinglist/images/branch_and_merging_alg.png create mode 100644 readinglist/images/depth_upscaling.png create mode 100644 readinglist/images/enneng2024survey.png create mode 100644 readinglist/images/forkmerge.png create mode 100644 readinglist/images/fs-merge.png create mode 100644 readinglist/images/fusechat.png create mode 100644 readinglist/images/fusellm.png create mode 100644 readinglist/images/lora_lego.png create mode 100644 readinglist/images/lorahub.png create mode 100644 readinglist/images/pituning.png create mode 100644 readinglist/images/pwe_moe.png create mode 100644 readinglist/images/scaling_smart.png create mode 100644 readinglist/images/smile_upscaling.png create mode 100644 readinglist/images/sparse-modelsoups.png create mode 100644 readinglist/images/sparse_upcycling.png create mode 100644 readinglist/images/twin_merging.png create mode 100644 readinglist/images/watt.png create mode 100644 readinglist/index.html create mode 100644 readinglist/mode_connectivity/index.html create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz create mode 100644 supported_algorithms/index.html create mode 100644 taskpool/LlamaTestGenerationTaskPool/index.html create mode 100644 taskpool/clip_vit_classification/index.html create mode 100644 taskpool/dummy/index.html create mode 100644 taskpool/flan-t5_generation/index.html create mode 100644 taskpool/gpt2_classification/index.html create mode 100644 taskpool/index.html diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..b6ca89e3 --- /dev/null +++ b/404.html @@ -0,0 +1,2188 @@ + + + +
+ + + + + + + + + + + + + + +In the complex landscape of multi-task learning, AdaMerging has emerged as a potent method for adaptively merging model parameters to optimize performance across tasks. Unlike traditional fixed-coefficient methods, AdaMerging autonomously learns merging coefficients, offering a more refined and responsive approach1.
+The cornerstone of AdaMerging lies in its adaptive nature, where it learns the coefficients for merging either on a task-wise or layer-wise basis. This adaptability is driven by an entropy minimization strategy applied to unlabeled test samples as a surrogate objective function, which serves to refine the merging coefficients for optimal performance.
+Task-wise AdaMerging is formulated as:
+where \(\lambda_i\) represents the merging coefficient for the \(i\)-th task, and \(\tau_i\) denotes the task vector for the \(i\)-th task.
+On the other hand, Layer-wise AdaMerging is articulated as:
+where the merging coefficient \(\lambda^{l}_{i}\) and task vector \(\tau^{l}_{i}\) are specific to each layer \(l\) of the model.
+By leveraging this adaptive learning approach, AdaMerging significantly enhances the model's ability to generalize across tasks and layers, resulting in a more robust and finely-tuned performance profile. The method’s reliance on entropy minimization ensures that the merging process continually seeks the most informative and stable configuration, adapting to the specific needs of the dataset and tasks at hand.
+Task-wise Coefficients. +The below Figure shows the changes during the iteration process of merging coefficient optimization of each task vector in Task-wise AdaMerging and AdaMerging++, which is shown every ten steps. We consistently observe that the merging coefficients of each task vector are inconsistent. When the number of tasks is relatively large, it is obviously undesirable to grid search the coefficients of each task, but our AdaMerging avoids this manual search process.
+ +Layer-wise Coefficients. +The following Figure shows the merging coefficients learned by Layer-wise AdaMerging and AdaMerging++ on ViT-B/32 respectively. We observed that:
+Merge CLIP-ViT-B/32 models from eight downstream image classification tasks:
+fusion_bench \
+ method=adamerging \
+ method.name=clip_layer_wise_adamerging \
+ method.save_merging_weights=merging_weights.pt \
+ modelpool=clip-vit-base-patch32_TA8 \
+ taskpool=clip-vit-classification_TA8 \
+ fabric.loggers.root_dir=outputs/logs/ViT-B-32 \
+ fabric.loggers.name=clip_layer_wise_adamerging_adam
+
Part of the output:
+Profiler Report
+
+----------------------------------------------------------------------------------------------------------------------------------
+| Action | Mean duration (s) | Num calls | Total time (s) | Percentage % |
+----------------------------------------------------------------------------------------------------------------------------------
+| Total | - | 26001 | 724.65 | 100 % |
+----------------------------------------------------------------------------------------------------------------------------------
+| backward pass | 0.060172 | 8000 | 481.38 | 66.429 |
+| forward pass | 0.016124 | 8000 | 128.99 | 17.801 |
+| data loading | 0.0063443 | 8000 | 50.754 | 7.004 |
+| merging weights | 0.050735 | 1000 | 50.735 | 7.0013 |
+| construct the wrapped model | 7.2558 | 1 | 7.2558 | 1.0013 |
+| optimizer step | 0.00098186 | 1000 | 0.98186 | 0.13549 |
+----------------------------------------------------------------------------------------------------------------------------------
+
task_wise_adamerging
+
+
+¶
TaskWiseAdaMergingAlgorithm
+
+
+¶
+ Bases: ModelFusionAlgorithm
fusion_bench/method/adamerging/task_wise_adamerging.py
38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 |
|
compute_logits(module, batch, task)
+
+
+ abstractmethod
+
+
+¶Compute the logits for the given batch and task.
+ + +Parameters:
+module
+¶Module
)
+ –
+ The model module.
+batch
+¶tuple
)
+ –
+ A batch of input data.
+task
+¶str
)
+ –
+ The name of the task.
+Returns:
+Tensor
( Tensor
+) –
+ The classification logits for the batch.
+fusion_bench/method/adamerging/task_wise_adamerging.py
entropy_loss(logits)
+
+¶Compute the entropy loss of a set of logits.
+ + +Parameters:
+logits
+¶Tensor
)
+ –
+ The logits to compute the entropy loss of.
+Returns:
+Tensor
( Tensor
+) –
+ The entropy loss of the logits.
+fusion_bench/method/adamerging/task_wise_adamerging.py
clip_task_wise_adamerging
+
+
+¶
CLIPTaskWiseAdaMergingAlgorithm
+
+
+¶
+ Bases: TaskWiseAdaMergingAlgorithm
A class for task-wise adaptive merging of CLIP models.
+This class extends the TaskWiseAdaMergingAlgorithm to provide specific +functionality for CLIP models, including loading datasets, constructing +zero-shot classification heads, and computing logits.
+ + +Attributes:
+modelpool
+ (CLIPVisionModelPool
)
+ –
+ The model pool containing CLIP models.
+_clip_processor
+ (CLIPProcessor
)
+ –
+ The CLIP processor for preparing inputs.
+zeroshot_weights
+ (dict
)
+ –
+ A dictionary to store zero-shot weights for each task.
+fusion_bench/method/adamerging/clip_task_wise_adamerging.py
51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 |
|
compute_logits(module, batch, task)
+
+¶Compute the logits for the given batch and task.
+This method computes the image embeddings, normalizes them, and calculates +the cosine similarity with the text embeddings to produce classification logits.
+ + +Parameters:
+module
+¶Module
)
+ –
+ The model module.
+batch
+¶tuple
)
+ –
+ A batch of input data.
+task
+¶str
)
+ –
+ The name of the task.
+Returns:
+Tensor
( Tensor
+) –
+ The classification logits for the batch.
+fusion_bench/method/adamerging/clip_task_wise_adamerging.py
get_shuffled_test_loader_iter(task)
+
+
+ cached
+
+
+¶Get an iterator over the shuffled test DataLoader for the task.
+ + +Parameters:
+task
+¶str
)
+ –
+ The name of the task.
+Returns:
+iterator
–
+ An iterator over the shuffled test DataLoader.
+fusion_bench/method/adamerging/clip_task_wise_adamerging.py
get_test_dataset(task)
+
+
+ cached
+
+
+¶Load the test dataset for the task. +This method is cached, so the dataset is loaded only once.
+ + +Parameters:
+task
+¶str
)
+ –
+ The name of the task.
+Returns:
+CLIPDataset
–
+ The test dataset for the task.
+fusion_bench/method/adamerging/clip_task_wise_adamerging.py
on_test_time_adaptation_start()
+
+¶Prepare for test-time adaptation.
+This method loads the CLIP processor and constructs the zero-shot +classification head for each task.
+ +fusion_bench/method/adamerging/clip_task_wise_adamerging.py
InfiniteDataLoader
+
+
+¶A wrapper class for DataLoader to create an infinite data loader. +This is useful in case we are only interested in the number of steps and not the number of epochs.
+This class wraps a DataLoader and provides an iterator that resets +when the end of the dataset is reached, creating an infinite loop.
+ + +Attributes:
+data_loader
+ (DataLoader
)
+ –
+ The DataLoader to wrap.
+data_iter
+ (iterator
)
+ –
+ An iterator over the DataLoader.
+fusion_bench/method/adamerging/clip_task_wise_adamerging.py
layer_wise_adamerging
+
+
+¶
LayerWiseAdaMergingAlgorithm
+
+
+¶
+ Bases: ModelFusionAlgorithm
, LightningFabricMixin
, SimpleProfilerMixin
Implements the Layer-Wise AdaMerging Algorithm.
+This class merges the layers of a pretrained model with those of several fine-tuned models. +The merging is controlled by layer-wise weights, which can be initialized based on a provided configuration or loaded from a file.
+ + + + + + +fusion_bench/method/adamerging/layer_wise_adamerging.py
28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 |
|
__init__(algorithm_config)
+
+¶Initialize the LayerWiseAdaMergingAlgorithm with the given configuration.
+ + +Parameters:
+algorithm_config
+¶DictConfig
)
+ –
+ The configuration for the algorithm.
+fusion_bench/method/adamerging/layer_wise_adamerging.py
compute_logits(module, images, task)
+
+
+ abstractmethod
+
+
+¶Compute the logits for the given images and task.
+ + +Parameters:
+module
+¶The model module.
+images
+¶Tensor
)
+ –
+ The input images.
+task
+¶str
)
+ –
+ The name of the task.
+Returns:
+Tensor
( Tensor
+) –
+ The computed logits.
+fusion_bench/method/adamerging/layer_wise_adamerging.py
construct_layer_wise_merged_model(modelpool)
+
+¶Constructs a wrapped layer-wise merged model from model pool.
+This method creates a new wrapped model by merging the layers of a pretrained model with those of several fine-tuned models.
+The merging is controlled by layer-wise weights, which is a torch.Tensor
of the shape (num_models, num_layers)
.
+The merging weights can be initialized based on a provided configuration or loaded from a file.
Parameters:
+modelpool
+¶ModelPool
)
+ –
+ An object containing the pretrained model and fine-tuned models to be merged.
+Returns:
+LayerWiseMergedModel
–
+ An instance of the merged model with layer-wise weights applied.
+fusion_bench/method/adamerging/layer_wise_adamerging.py
get_shuffled_test_loader_iter(task)
+
+
+ abstractmethod
+
+
+¶Loader of test dataset for test-time adaptation. labels are not needed.
+ + +Parameters:
+task
+¶str
)
+ –
+ The name of the task.
+Returns:
+DataLoader
( DataLoader
+) –
+ The data loader for the test dataset.
+fusion_bench/method/adamerging/layer_wise_adamerging.py
on_test_time_adaptation_start()
+
+¶Something to do before the test-time adaptation starts. Such as setting up the task-specific heads.
+ + +
run(modelpool)
+
+¶Run the Layer-Wise AdaMerging Algorithm.
+This method constructs the wrapped model and performs test-time adaptation if necessary.
+ + +Parameters:
+modelpool
+¶ModelPool
)
+ –
+ The model pool containing the pretrained and fine-tuned models.
+Returns:
+LayerWiseMergedModel
–
+ The merged model after test-time adaptation.
+fusion_bench/method/adamerging/layer_wise_adamerging.py
save_merging_weights(file_path, merging_weights)
+
+¶Save the merging weights to a file.
+ + +Parameters:
+file_path
+¶str
)
+ –
+ The path to save the merging weights.
+merging_weights
+¶Tensor
)
+ –
+ The merging weights to save.
+fusion_bench/method/adamerging/layer_wise_adamerging.py
test_time_adaptation(module)
+
+¶Perform test-time adaptation on the merged model.
+This method adapts the merging weights during test-time to improve performance.
+ + +Parameters:
+module
+¶LayerWiseMergedModel
)
+ –
+ The merged model.
+Returns:
+LayerWiseMergedModel
–
+ The adapted merged model.
+fusion_bench/method/adamerging/layer_wise_adamerging.py
clip_layer_wise_adamerging
+
+
+¶Example Usage:
+fusion_bench method=adamerging method.name=clip_layer_wise_adamerging method.save_merging_weights=merging_weights.pt modelpool=clip-vit-base-patch32_TA8 taskpool=clip-vit-classification_TA8 fabric.loggers.root_dir=outputs/logs/ViT-B-32 fabric.loggers.name=clip_layer_wise_adamerging_adam
+
CLIPLayerWiseAdaMergingAlgorithm
+
+
+¶
+ Bases: CLIPClassificationMixin
, LayerWiseAdaMergingAlgorithm
fusion_bench/method/adamerging/clip_layer_wise_adamerging.py
on_test_time_adaptation_start()
+
+¶Here we load the CLIP processor and construct the zero-shot classification head for each task.
+ + +(ICLR 2024) AdaMerging: Adaptive Model Merging for Multi-Task Learning. https://openreview.net/pdf?id=nZP6NgD3QY ↩
+Jason Yosinski, Jeff Clune, Yoshua Bengio, and Hod Lipson. How transferable are features in deep neural networks? Advances in neural information processing systems, 27, 2014. ↩
+A. Tang, L. Shen, Y. Luo, N. Yin, L. Zhang, and D. Tao, “Merging Multi-Task Models via Weight-Ensembling Mixture of Experts,” ICML 2024. doi: 10.48550/arXiv.2402.00433. ↩
+Consider a discrete categorical distribution parameterized by logits \(\mathbf{x} = (x_1, \dots, x_n) \in \mathbb{R}^{n}\), where \(x_i\) is the logit of the \(i\)-th category. The Gumbel-Max trick 123 states a reparameterization trick to sample from the categorical distribution by sampling from the standard Gumbel distribution \(\text{Gumbel}(\mu=0,\beta=1)\) and taking the argmax of the sum of the Gumbel random variables and the logits.
+This trick proceeds as follows: +sample \(n\) Gumbel random variables \(g_1, \dots, g_n\) independently from the standard Gumbel distribution \(\text{Gumbel}(\mu=0,\beta=1)\) (We can draw a random sample \(u\) from a unifrom distribution on the interval \((0,1)\) and then transform it into a Gumbel-distributed variable \(g\) using the formula \(g=-\log(-\log u)\).), find the index \(i\) of that maximizes \(x_i + g_i\), then we have
+If we represent the categorical distribution as a one-hot vector \(\mathbf{y} = (y_1, \dots, y_n) \in \{0,1\}^n\), where \(y_i=1\) indicates that the \(i\)-th category is sampled and for all \(j\neq i\), \(y_j=0\), then we have
+Since the derivative of the \({\arg\max}\) function is not defined, we cannot backpropagate the gradients through it. +To address this issue, (Maddison et al., 2017)4 proposed to use a continuous relaxation of the discrete categorical distribution. +A CONCRETE random variable (CONtinuous relaxation of disCRETE random variable) relax the condition that the one-hot vector \(\mathbf{y}\) must be located at the vertices of the \((n-1)\)-dimensional simplex \(\Delta^{n-1}\), and instead, it allows \(\mathbf{y}\) to be located anywhere inside the simplex \(\Delta^{n-1}\), i.e. \(\{ y\in \mathbb{R}^n | y_i \in [0,1], \sum_{i=1}^n y_i =1 \}\).
+To sample a Concrete random variable \(\mathbf{y}\) from a distribution that is parameterized by a temperature hyperparameter \(\lambda > 0\) and a vector of logits \(\mathbf{x} = (x_1, \dots, x_n) \in \mathbb{R}^{n}\), we have
+where \(\mathbf{g} = (g_1, \dots, g_n)\) is a vector of Gumbel random variables that are independently sampled from the standard Gumbel distribution \(\text{Gumbel}(\mu=0,\beta=1)\).
+A subspace mask \(\mathbf{m}\) is a binary vector that identifies a subspace of the parameter space. +For a neural network parametrized by \(\theta\), we can use a subspace mask \(\mathbf{m}\) to identify a subspace of the parameter space \(\mathbf{\theta}\) by setting the parameters that are not in the subspace to zero, i.e. \(\mathbf{\theta} \circ \mathbf{m}\), where \(\circ\) denotes the element-wise product. +We can draw a random sample \(\mathbf{m}\) from a Bernoulli distribution \(\text{Bernoulli}(\mathbf{p}=\sigma(\mathbf{x}))\), where \(\mathbf{p}\) is the probability (\(\mathbf{x}\) denotes the logits) of each parameter being activated. However, the discrete Bernoulli distribution is not differentiable, so we cannot backpropagate the gradients through it to optimize the parameters \(\mathbf{p}\) or \(\mathbf{x}\).
+To address this issue, we introduce the Concrete mask which can be drawn from a continuous relaxation of Bernoulli distribution. Before we introduce the Concrete mask, we first review the Gumbel-Max trick in the two-class case.
+Let \(p_0\) and \(p_1\) denote the unnormalized probabilities of a Bernoulli random variable being 0 and 1, respectively, with \(x\) representing the logits. Then, the probability of the event \(m=1\) is given by
+where \(\sigma\) denotes the sigmoid function. +In the context of the Gumbel-Max trick, the occurrence of the event \(m=1\) is determined by the condition \(g_1 + \log p_1 > g_0 + \log p_0\), where \(g_0\) and \(g_1\) are two independent standard Gumbel random variables. +Thus we have
+Because the difference of two standard Gumbel random variables is a Logistic random variable, we can replace \(g_1 - g_0\) by \(\log u - \log(1-u)\) where \(u\) is a random variable sampled from a uniform distribution on the interval \((0,1)\). +Substitute this into Eq.(\ref{eq:appendix_P_m_1}) and express the probability in terms of the logits \(x\) to simplify the expression, we have
+The binary Concrete distribution offers a continuous relaxation of the discrete Bernoulli random variables, which is beneficial for gradient-based optimization as it allows for the backpropagation of gradients even through the sampling process. +Instead of making a hard decision as the above equation, we use a temperature parameter \(\lambda\) to control the steepness of the sigmoid function, and hence control how close our 'soft' decisions are to being 'hard' decisions. The continuous version of the Bernoulli random variable is then given by
+As the temperature \(\lambda\) approaches zero, the sigmoid function becomes a step function, and the Concrete random variable \(\hat{m}\) becomes a Bernoulli random variable, as shown in the following Figure. In the limit when \(\lambda \to 0\), this results in sampling \(m=1\) if \(\log \frac{\sigma(x)}{1 - \sigma(x)} > -\log \frac{u}{1 - u}\), consistent with the original Gumbel-Max trick. +The binary Concrete distribution thus provides a differentiable approximation to Bernoulli random variables. +We can further binarize the Concrete mask by setting the entries with values greater than 0.5 to 1 and the rest to 0.
+ +Merging CLIP models on eight image classification tasks, using the concrete task arithmetic algorithm
+# tensorboard logs and learned checkpoints of the shared mask can be found at https://huggingface.co/tanganke/clip-vit-base-patch32_concrete-task-arithmetic_tblogs
+fusion_bench \
+ fabric.loggers.name=ViT-B-32/concrete_task_arithmetic \
+ method=concrete_subspace/clip_concrete_task_arithmetic \
+ modelpool=CLIPVisionModelPool/clip-vit-base-patch32_TA8 \
+ taskpool=CLIPVisionModelTaskPool/clip-vit-classification_TA8
+
results
+{
+ "svhn": {
+ "accuracy": 0.903003990650177,
+ "loss": 0.37700024247169495
+ },
+ "stanford_cars": {
+ "accuracy": 0.6326327323913574,
+ "loss": 1.2553859949111938
+ },
+ "resisc45": {
+ "accuracy": 0.7558730244636536,
+ "loss": 1.017554759979248
+ },
+ "eurosat": {
+ "accuracy": 0.9407407641410828,
+ "loss": 0.20871955156326294
+ },
+ "gtsrb": {
+ "accuracy": 0.8285035490989685,
+ "loss": 0.5861473679542542
+ },
+ "mnist": {
+ "accuracy": 0.9800000190734863,
+ "loss": 0.08148527890443802
+ },
+ "dtd": {
+ "accuracy": 0.5249999761581421,
+ "loss": 2.2731478214263916
+ },
+ "sun397": {
+ "accuracy": 0.6421158909797668,
+ "loss": 1.4108904600143433
+ }
+}
+
Concrete AdaMerging (Layer-wise)
+# tensorboard logs and learned checkpoints of the shared mask can be found at https://huggingface.co/tanganke/clip-vit-base-patch32_concrete-layer-wise_adamerging_tblogs
+fusion_bench \
+ fabric.loggers.name=ViT-B-32/clip_concrete_layer_wise_adamerging \
+ method=concrete_subspace/clip_concrete_layer_wise_adamerging \
+ modelpool=CLIPVisionModelPool/clip-vit-base-patch32_TA8 \
+ taskpool=CLIPVisionModelTaskPool/clip-vit-classification_TA8
+
+ X. Yi, S. Zheng, L. Wang, X. Wang, and L. He, “A safety realignment framework via subspace-oriented model fusion for large language models.” arXiv, May 14, 2024. doi: 10.48550/arXiv.2405.09055.
+++The paper introduces a safety realignment framework for large language models via subspace-oriented model fusion (SOMF, the authors learn a shared mask on the weight space of large language model), which combines safeguard capabilities of initially aligned models with fine-tuned models to ensure safety without compromising performance on downstream tasks.
+
E. J. Gumbel. Statistical Theory of Extreme Values and Some Practical Applications. A Series of Lectures. Technical +Report PB175818, National Bureau of Standards, Washington, D. C. Applied Mathematics Div., 1954. URL +https://ntrl.ntis.gov/NTRL/dashboard/searchResults/titleDetail/PB175818.xhtml. ↩
+R. Duncan Luce. Individual Choice Behavior. Individual Choice Behavior. John Wiley, Oxford, England, 1959 ↩
+Chris J Maddison, Daniel Tarlow, and Tom Minka. A* sampling. Advances in neural information processing systems, +27, 2014. ↩
+Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution: A Continuous Relaxation of Discrete +Random Variables, March 2017. URL http://arxiv.org/abs/1611.00712. ↩
+The DepthUpscalingAlgorithm
is used to upscale the depth of PyTorch models. Here's a basic guide on how to use it:
First, import the necessary modules:
+from omegaconf import DictConfig
+from torch import nn
+from fusion_bench.method.depth_upscaling import DepthUpscalingAlgorithm
+from fusion_bench.modelpool import to_modelpool
+
Create an instance of DepthUpscalingAlgorithm
by passing a configuration dictionary.
+This dictionary should contain the name of the method ("depth_upscaling") and a list of layer indices that determine the upscaling pattern.
method_config = {"name": "depth_upscaling", "layer_indices": [0, 1, 1, 0]}
+algorithm = DepthUpscalingAlgorithm(DictConfig(method_config))
+
Assume we have a list of PyTorch models (nn.ModuleList
instances) that we want to upscale. Here, we're creating a list of linear models as an example:
Then, we can the model to the run
method of our algorithm:
The run
method will return an upscaled model. The type of the returned model will be the same as the input models (in this case, nn.ModuleList
), and its length will be determined by the layer indices specified in the method configuration.
Here we provide an example of how to use the DepthUpscalingAlgorithm
to upscale the depth of a Mistral model 1.
from omegaconf import DictConfig
+from torch import nn
+from transformers import AutoModelForCausalLM, MistralConfig, MistralForCausalLM
+from fusion_bench.method.depth_upscaling import DepthUpscalingAlgorithm
+
+# create a Mistral model
+# here we randomly initialize the model for demonstration purposes
+# in practice, you would load a pretrained model
+model_config = MistralConfig(
+ # https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/config.json
+ **{
+ "architectures": ["MistralForCausalLM"],
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "model_type": "mistral",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "rms_norm_eps": 1e-05,
+ "rope_theta": 10000.0,
+ "sliding_window": 4096,
+ "tie_word_embeddings": False,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.34.0.dev0",
+ "use_cache": True,
+ "vocab_size": 32000,
+ }
+)
+print('creating model')
+model: MistralForCausalLM = AutoModelForCausalLM.from_config(model_config)
+
+method_config = {
+ "name": "depth_upscaling",
+ "layer_indices": ["range(0,24)", "range(8,32)"],
+}
+algorithm = DepthUpscalingAlgorithm(DictConfig(method_config))
+print('upscaling model')
+upscaled_model = algorithm.run(model.model.layers)
+
+# substitute the model with the upscaled model
+model.model.layers = upscaled_model
+
The DepthUpscalingAlgorithm
is integrated into the fusion_bench
package. You can use it by specifying "depth_upscaling"
as the method name in the command line or configuration file.
name: depth_upscaling
+# this should be a list of integers or string, indicating the sequence of layers. If the entry is an integer, it will use the n-th layer of the model. If the entry is a string, it will use the layers specified by the string. The string should be a valid python expression that evaluates to a list of integers.
+# for example, ["range(0,12)", "range(6,12)"] will use the first 12 layers and the last 6 layers of the model to construct the new model
+# [0, 2, 4, "range(6,12)"] will use the 1st, 3rd, 5th, and the 7th to 12th layers of the model to construct the new model
+layer_indices: null
+
You can then run the fusion_bench
command with the specified configuration file:
DepthUpscalingAlgorithm
+
+
+¶
+ Bases: BaseAlgorithm
Implements the Depth Upscaling Algorithm.
+This class extends the BaseModelFusionAlgorithm
to handle depth upscaling of models.
+It supports upscaling the depth of a model by duplicating specified layers.
Parameters:
+layer_indices
+¶list
)
+ –
+ List of layer indices to duplicate.
+**kwargs
+¶Additional keyword arguments.
+fusion_bench/method/depth_upscaling/depth_upscaling.py
run(modelpool)
+
+¶Executes the depth upscaling algorithm on a given model pool.
+This method checks the type of the model pool, ensures that it contains only one model, and verifies that the model is an instance of nn.ModuleList
.
Parameters:
+modelpool
+¶ModuleList | ModelPool
)
+ –
+ The pool of models to upscale. Must contain only one model.
+Returns:
+ModuleList
+ –
+ nn.ModuleList: The upscaled model.
+Raises:
+AssertionError
+ –
+ If the model pool contains more than one model or if the model is not an instance of nn.ModuleList
.
ValueError
+ –
+ If an invalid layer specification is provided in the configuration.
+fusion_bench/method/depth_upscaling/depth_upscaling.py
The Dummy Algorithm is a simple algorithm that does not perform any fusion operation. Instead, it returns a pretrained model if one is available in the model pool. If no pretrained model is available, it returns the first model in the model pool. +This algorithm is useful for testing and debugging purposes, as it allows you to quickly check if the model pool is set up correctly and the fusion process is working as expected.
+To use the Dummy Algorithm, you need to specify "dummy"
as the algorithm name.
The implementation of the Dummy Algorithm is straightforward. Here is the main method of the DummyAlgorithm
class:
DummyAlgorithm
+
+
+¶
+ Bases: BaseAlgorithm
fusion_bench/method/dummy.py
The Fisher merging algorithm 1 is a per-parameter weighed averaging method that assigns weights to the models based on the Fisher information matrix of the models on some labeled data. +The Fisher information matrix \(F_\theta\) of a model with parameters \(\theta\) can be expressed as:
+where \(p(x)\) is the data distribution, \(p(y|x, \theta)\) is the model's output distribution, for example, the softmax output of a classification model, and \(\nabla_\theta\) is the gradient with respect to the model's parameters \(\theta\). +The Fisher information matrix can be used to estimate the importance of each parameter in the model and thus assign weights to the models based on their Fisher information. +In addition, the Fisher information matrix can be used to estimate the similarity between tasks, which can be useful in auxiliary-task learning and multi-task learning scenarios 2.
+As the full Fisher information matrix is often computationally expensive to compute and memory-intensive to store, we approximate using the diagonal Fisher information matrix, which is the diagonal of the full Fisher information matrix. +The diagonal Fisher information matrix can be computed as:
+Assuming we have \(n\) models with parameters \(\theta_i\) and diagonal Fisher information matrices \(\hat{F}_{\theta_i}\), the Fisher merging algorithm computes the merged model's parameters \(\theta\) as follows:
+where \(\theta_i\) are the parameters of the individual models, \(\hat{F}_{\theta_i}\) are the diagonal Fisher information matrices of the individual models, and \(j\) indexes the parameters of the models. +The Fisher merging algorithm can be considered a per-weight weighed averaging method, where the weights are determined by the Fisher information of each parameter in the models.
+Example of merging eight CLIP-ViT-B/32 models using Fisher merging:
+fusion_bench method=clip_fisher_merging \
+ modelpool=clip-vit-base-patch32_TA8 \
+ taskpool=clip-vit-classification_TA8
+
Merge eight CLIP-ViT-L/14 models using Fisher merging:
+fusion_bench \
+ method=clip_fisher_merging \
+ method.batch_size=8 method.num_workers=4 \
+ modelpool=clip-vit-large-patch14_TA8 \
+ taskpool=clip-vit-classification_TA8 \
+ taskpool.clip_model=openai/clip-vit-large-patch14
+
Merge GPT-2 models for text classification tasks:
+fusion_bench \
+ method=gpt2_fisher_merging \
+ method.num_fisher_examples=512 method.batch_size=8 \
+ modelpool=gpt-2_glue \
+ taskpool=gpt-2_glue
+
FisherMergingAlgorithm
+
+
+¶
+ Bases: BaseAlgorithm
Implements the Fisher Merging Algorithm.
+This class extends the BaseModelFusionAlgorithm to handle merging of models using Fisher weights. +It supports excluding certain parameters, normalizing Fisher weights, and setting a minimal value for Fisher weights.
+ + +Methods:
+run
+ –
+ BaseModelPool) -> nn.Module: +Executes the Fisher merging process on the model pool and returns the merged model.
+fusion_bench/method/fisher_merging/fisher_merging.py
355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 |
|
get_fisher_weights(model_name, model, train_dataset, param_names_to_merge)
+
+¶Compute the Fisher weights for the given model and training dataset.
+ + +Parameters:
+model_name
+¶str
)
+ –
+ The name of the model.
+model
+¶Module
)
+ –
+ The model module.
+train_dataset
+¶The training dataset.
+param_names_to_merge
+¶List[str]
)
+ –
+ List of parameter names to merge.
+Returns:
+Dict[str, Tensor]
+ –
+ Dict[str, Tensor]: The computed Fisher weights for each parameter.
+fusion_bench/method/fisher_merging/fisher_merging.py
on_fisher_merging_start()
+
+¶Setup the zero-shot classification head before starting the Fisher merging process.
+ +fusion_bench/method/fisher_merging/fisher_merging.py
run(modelpool)
+
+¶Run the Fisher Merging Algorithm.
+This method constructs the wrapped model and performs test-time adaptation if necessary.
+ + +Parameters:
+modelpool
+¶BaseModelPool
)
+ –
+ The model pool containing the pretrained and fine-tuned models.
+Returns:
+Module
+ –
+ nn.Module: The merged model after test-time adaptation.
+fusion_bench/method/fisher_merging/fisher_merging.py
388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 |
|
LUT z0Z062RR01e^mcX@aePpx=6alsdZ<-=RExb!a`4sSj^BJ1Vil%RaTtMowr9ri=Ae|g z&)qYdbHsrUNipdZuh?Zq4-Gdap6#KxRy!d!R*L{7ual{4-rV?B#bPeldpNRzg zAvXQvsr-^^#?G32@PcRb{&`(-)ef-8(-eJ|U%OKgTt5pKVefXA2JI_AH@JzRzZf6v zL!Sl{EKkMndsbfZy@6O|sT1R0w!6J+UFmb5;KJt SsUkOJ3v?Wg%Ii |)4p$dU2pdc9+1lk$6Q5+E#rE?>0xAf<;;NU6YdnFbY3+-Qi#l?Q+-Q_=%L$ }^v*qij(1T%zrkl41Q=DBYi&DO8?oX#ZezW|+^_V2`y88;VyNrg#4Q217& z(Hj5qkJ@Fo`=Jis^Q*W;d6b5iwTGUNq}C$m(gs^UH{fwQFP)L=)AS8s3|3K)eRV!B zo+~pUwckL2>-4r>#L_jeI%%-F5QQ5p7Hs>}MYNZ2%MT0(!SnI=D0IW;ApE6o9LoWV za}e|e9~e~keWibryH5_$As8XLM02hkFC*pE^Z9QY*BzJpX|Xl2o4mbq))@WyWx{^5 zcM)j_PbuFG{WjGa`tv4kjK#HeB B_MFv>N4kB2{ZnJtsBUlGa%rL;J`iHgn<5fBXRja);g*NAtvN>fK3`ISb6<*Z0p zxLADJq>^1i{z-@3+;}^b8@?q;0+DQz&c Hc3t@Vi*F~FkJNRXEDw&=k>^XkzxDNxq0ptp_0APh7U+(s zSVUWrCrr@mS)Y@esNnL*qcwcdWzOcahbfQq0F~ZB*^r9*I#S}v_8sJh=LIbuMs(IM zSag~b?#MXPjmP`6D#?E+{PPx>Rb;O4S@xyF%zV{3D zpGl)q4bgl)I%}XH&7ZURRWVQ^9^$AbZSU{YNZ6K~Ow7`>`Nn6(xpuIrPnr`^=Y_xC zUo5dZtdA0jua~oBLm??A=MwxfQ99-9yYX>7IGG8O%&7+j#AW-TIEvJb$m%(RfM--G zyb^@L$$y;46vL9q*+XXx6+nq-7r1^EX3X;THn>@K{+WMYY`ziGm$)nTxL0r9PMl5l zjyI0`Yz0@?^k#$1oNKGp0@#L;ED_Dy?Jh=c_JH~!YI}_isP {MOY|5^V> zs3j~FL=WG^Cu021Sf!f5BC1viOM44*ZLy!>^w6+z7EHC=)FH|v6t?RLWz4}Ec>dr# zPNIV E`MI%q$P^Vv(2U7Mrn6R5U?mLF>GA!|)LkLD&^U6ZFA!8a( z!>ID+TYI|CQG%H_FqHMi;V8TE5xo7G%>v<3<1Sas-md+27+~Yf9GFI0G8jZBC#p;^ z&U}R>Ti-5)e4CxQ#B-NZ5)mA_lNr$J%y$jkvhrt%5LU6KHxx4#8j|K}A4&&fInl+N zw`Uv?` krM?r8}?}{_m zW7I-uf|Sd>94H${jL~k`u6xCOi1K0RU^6$s&7h?YS06Ki0S9qeFeABqbg1*2?wTQ% z>j_2qoz#3-g&$a1-{m)jjNE8Ep{ECz x%bUHfF)@=B{OfqUpeQH!7fAMxr5UKWd3HO|z^FNMr3snYBW=0|QU@CZCwk(>)1d z@8`GlB0l;mC)$*mU~)WKOLD`(MOw}=e7@XsI6ZwfStUxR{KA|RUua(EKpP8Gy8Nh$ zUzBH;SIair6Kgkjm;U-iXh-$__HZJ%@}Iof#X Q5h%?)oJ&EnKx2W!6B!_i{AXwVg+c^cC*J>d%KZNHfhnY3K9T{P z_{l_$5i);qiTUQ(y7uSRmP4N> {#1WO}b@A_*u=1e~P4B#zB6dW8Vku^&Tfb zlLG7VCse?3+j0;Zl*YC@&0lr6Tu>Gl#%?BxG6#(XgGTC1d n0 zHy0UDG8LE7T+6%*$5| Ds=>spen>_zGGq;A?yznw%d(TE&8}{D&hLEO zAef3|c@Erx1OF^D5~Dq_JM{;Iew+3lm -pFyHZtaV`#U$^V{MdG>N_(-qnyIejwSyyMiAtemSO(9NTO9sD1#^)g3 zfF`S+YB2tMHnSH!EnoU mKc3;yozS)WkVL>O=BGm)$KiBtkRUM~P14Q$u`MKDzIfw5xg_~? za#W4ZG(^N3Igt@!%oy*QNMW&~&R3^Ri%vTwP!M_Agl9Ooq$LMV|8w(p_EfSr2M<>_ zOT^OA&MnsHB=l`$dib7?+X~r29YT4&oA{6v*BnRL{Q*A$#X2lqfz45~fhnZyzUDHF zYcr~>dGeVT?MB;9xUPJCF?r|M5zJ48Da72m1n75A>J{HnF%d%@C_Z=rLch6GGSh4+ zhAm;}R?f)%<2fxqG0i@-zhVF2*9&e)z_-e|7KFw?2_dYHx5`L&Y1$3oz-ZfbssR!i zmm7Ahab^h{MYTR`LslDe_WMH1E^c_0iTR*k(@{~h){ws qHl#$s1~|^G6T#n0 zMg1_U39ydO@yII~Fw$ut%^V2_C~txCX+s^!$_GUmMz9F#?vx=(*+~p5wRvn@pLXlj zqs>c$pFEdC|I)cMq4j +BP|}N z2kuR#SpH@-7^g!#e;XL_<8IG58wxqm|2X?*TCk~2SmnV|JGE&|QctQxzX#k;hp?i| ziLiru^YW < z`z^aP1Vntx4rU-Z68~anG2f44c)PQOp3J_d42X;|xFHOymI|lKZ@RJ4JsvnyV!nvO zf6be8P59G2aW_Hr_k^QS&~T>aNKu#=qkCrODFnrqjo+mIY6vsR07-}GDxrOU85ju9 z*!c$=b?gfZY7?Pd)x5j1aG?WrYE6pUxk)b$&4(r4uPc*=ER94}dHJzwPU8@M1xfi? z;2rbWK;hJWu6@h1CJVjYpsA0EQ7L_gp#g_lYM7wG(#UGE=2m}FfcRLd6`U&0-fsYa zzPc}e!k3*NdMNb(SXiDmT&p5cbA2sn8r&;L0MfEn-^{Q08Q=oLS;2K_mDSbNz6W_K zGvn>I%P@fJdR|Aa>9>g6R_ugTF5gB=O)MMRgghQzBjB?8jzAmE;M=bsG9B~V;?O)# z15}MP(NG^{pH$Ru)bjAIEDFy#r#>(d?na6ETvK!JThMzTFrC0H2%E#)Eu}^`!XVyS zI$IUyH*y%T1rJqE ;zJ)J4|Vbph%a?#jt<`n7f^)(CQ=x=%AB<9t{x@kleie^LIAf@+b~b45DRvT zQ&mmwEF<$-v6;qp9(}#Ud8fSi2?cz*+1|-%CRLy2TU66}Q8AiRg!)@9Vcb8i97&OH z2bsR@_U!L;#D~%CjoJ%hn^=IQ=*Um_!=)g(=y%1WTin_qBZFrclJvmb&M&1=DLMK} zxFgguH|=fO0~Hp`RXG$KCxKtbO~q(;tD9|Gjk{!c&hxwvP+4?S;B )hzVv(v^mGC8dP?qM<7Tt~ zAPp3KI7O+auLM-WnK3F0ur1Ol*$O$T-zp7Ka#-o#hW?C#7dV0XbtnD!oW%mCHq|S@ z07#uG@nVus>~Ik1n!J2OSI9w6AG cjmzcki6I}70?u;~A4oD{j)XOVuV%;? zy6~t0hL5^38Usf$-&1x28 @+}kIL1f6tw&+zbbihzRDbLg z$B-Yd`Ib8KRl?Bn(Mg|IgOmU^wk2}rmvc>6M|#W;Ip>)+VhI47RDtW*dSr&tb^|p6 zI)-cF)?+L=h*9f*vU%Rc*)FMrREp|D)inClx(Z4w|CEWLxb-XM28Uis0AUKi260Sy zW~tM4blvi#I}4*)Vz+n1Cx> H&U}mkZ %t% 3zDACgjUFe 1 Tu%!3e|W>fC>=0jdCet>80%|S+vF>pv;#p#>};}` zvbi;Y-)r9-s7Ctlx)!g*$V)8!X}qF1&whzHZ9Pt6>VJxdNs42nE9;7f!V%Dm6Zr<4 zcY1n`j`-wrF)ceQRH`A$v+=U0&QutinZR8usE?wY(~Cei<^k(z24OP5hPjL$rhv$g zIb)oUn^xsBAkzh|AR_%hzjT2Y7`GGd26$PijwAw=DS>Xyn2+&ip0tdU*vU!dAmV`0 z5qmjFgv1q>B;bpYZA2!;YZ-(PTk=U|!GZe5y(bji=N>u9LWNFt7G-7sPVFuPre+J^ zGl_PcRQIV1AWWA7{{u`pS*T!c{r?$5o_+4|yRGW)n&$yeJo$hu%{>BcX3;>0f%+b< z5oFy5<+}e{OtcY_>`wS1`@qyH#lMjA?GC7wsRYc%4K>EGcL+aIvu_7GHI~Khv|D8U z!)op`@C~5b5tIPN`*Da$1NvM8pz$ZUhnoS8`<4~v%Ssmzl+v~MQE= bOXL7x{{b(mQbNkp>h;Lxi8hVu!L@=3f_SX==!j>{d7zumM(JgTn5GG6 zccY<|-BvA}UQf88A%+2WY98J?L~R&-2}k7~K76oYE6P*0&gh=-XufC6f$9KWEFZHt zea$7v9n+yqiv%jLAjzIVjA3@jDQyjxp8{y#d$#N7Z-v>$zEue~BF48jE=VEUt6ZED zx2E{xlhrbc_SNlpAy ?mZP6ZjZq=bHw+lnK*If!kf#0}Z5G5~p-P+Q8U2NC7(;F3aL}C^&Lyyl7a~<24)9 zXaYdd3X$C>uBTi|Lt%_X^?_iLG7IGGD*zHvD|w?i5+N&cSm_{XEMok19=7lI3b8kq z;1bMC>2tj&gKoC_*`?tW(XaO?{Fx6~A^G+1_CB1IE5RLtkLc^;fbC&o$tr3UNpZe! zvLDguO`?}VR?&>DG|{j{b~~-_G*9QKE92kWT@GNX5`c`wP}i5E)zyC(rR1b>5kaF8 zU6Xc6I_sd(xZP5ep6Wzb?`zudr8coj9~b~A!EF7(Y}-^h<`Csyl`eIlEmfhlI7(TY zW( sE!*+l&g0DcfrqGF<8_?q9Hs}Von?3O!sNXF7Wr0d z8FU5F&H`TNZe2-i70aej-Kxv|Z*vBLo?EvJ20J|~tuGRy@LsUHk0kIXeHP0Bci2Xh z5-m9o)X9GKk)JyQWEw{(G>U%BZy=NZRehLBzIhMy*JX|v8iQon)am)?^LO144@gNj z-R5rp6?7_9V~=vqxv(+3R03Ur0OcRvz(PQaZFe(qBuR1XCZUBct3!w!z7^3di I5$-NO`kn0JL@%0ogVgqdm zHjdW5nv$cc<+r??p?ZuT7+ukOHAGGgg+GG(P7sQ@H+%qVwD*}~DCqlQ-9l1uL9ufs z-yg~R`MJD~Ghc6}!2xUtQt{bJgxJBEsTZY86A>{}eV{1~JJoOXvKkwhSo{Jf{@%6) zz!C3C=fp+jawMY%7wh)N=$z-S)L6inlgBQ4w7SZQe7YV7;pMKlFtfdYZJ(QCPnl=Z zanT?ThMF&UUl+g>Q8<4txkfd4|EucF#$&3&e&fRR-Wcs6INgC+pYit80FIQW(|fk< z>D_ErKxyV&?kk-BR+1Y%{SX;e)u0z4y#9HPhiF<+Q6UQoLzY{AY$Bd~FSYbLW%46Y zQ80hstea5WM9S^{*d`CFuF#7R?RnD=$~u7f?>dX}>~DQt)0Ac(>&tnlZQgYDip8g2 zwJDlRz6;}FBFXr zgJIk4O%L3f=IPylQRmTbfMQR5kjB>df>*ToXD`6BqSYn_a8b9;9UT#w$*SuIYh*fu zw^{vdBuVyRGmb&p8Rw7z6hIQKokzzU{1-KJLqT}$Efx2UcULg2NXk#_V~hma0ZCyp zi@>-AYZhuysL`3HfDR26I;`$W;P%I$dso8k50?>zPzLczp{#0Qch=NgMw^ z!^jYMTFe-O!NaeQUVifhWV(c`c3SZ6f;G}*=o=;V2Q+sD24S*FVm^mcj^LEr3N0fm zd3T=>OM^XuFBV!zt{P%iG(jFMu9;2*107#;(b-*I#({wu1^t4QHlz1Q+R-1Z+Hk|m zDkAu6o2^y=UM$k)1qiniGF8uyYa|V1)I-bw$$tJ(6(9neIkiOc)zF0|<+y>%HNrV1 zZqM$qK+-6ZeHwEWTyV2Gpe0r4GX!ufO9V@(CyPLbkc(?cGJXPo{jU}v&NqIZCpQx@ zcy=K%N0fR!&Y#ZrESZ}aLxvy&O;v#Dhi92sSDWcE&o=pBBbDk)lj8E>^mB2PX@5E* z33YziPz}cpVV_UNy@Pgp?H}NjyMThY*!<51T8Z-biF~~t_a}V%PkE!vM~iL4iT*5* zV4u(}MZ^8tzUyYAkLgCic^05bW0DP$dw=?p*hke~_g`1iwqhynw84aAvv;kaznZJ* zkywDE523uuO)}$>gbF8b4>+pm<~ILly%gCJ04DYB$3}s<`TQ0Rkggy--{)a(h{Biq z@_CyA-m!ltiixCs8F^On6tq`pBdY0ewFCAa!U#KECKwD$r}62k0RnM+f4BcWJVHTP zTr`VJXbJ D08}J>IAOt~DHp zUTNbmCTJ*;=5gQc2-RfjE(*K~0(VS?)6Yj$D@+Vyv__PS44$tZ(0rhB^~hE&VOow4 za;@IeK7N?8@HF^Lng`;%U` <2qVhleA7+R+W;jZTZ z0AXGril4iURyrO_Sqt^@D5wU~>3L&{_&0`>2&RTnuE$4#dcNF?z;t$c?3x)paC13C zhD`ncBOs(Vtn gxPB{vvU25f@Z5u1+)) +bR~BtB7EZC_ ;SLE4l9fDQI;yP|yW?P%*o^FzRXEMxQI|xc8Ia_~-|gQZ`A1&Cj=%r+md# zzJ|M`Us}lRiMT>xUkQ!h7_D z5+nnBFX$q{Sqz~lh5jaizATbBSk8$zXX=1uyQ_~Op^IStG2BqZfKGov*=J5m$8Zyj z1-f!6UKb|)sIt^9*-@(VSL)u{ w{={WqOFhJaU zGU;rKQIxD6Y@?T01mg%RK(&I;7{;+|KHCVb(-vIr%VWgm61pQSDm#ziPsrOQisW}n zhwkJd!*2-)6 ye~biui!&{#!U+J%MASgsR{2P#V%5P-!LQB?b*ATy6y%c`5iG2Dv$PNk&1}#Pm1M zzdjcDz~ubI5FRPo6O0EL|79d9=ywyj3G=DIEWr=H1X>QsAz$(C7T4tnEk8&njqLBI zS35U?wqKy$6Ii@rrk^D`_))^7MW`cV{`9?}fR~(5@HxY{qP{pvS`R|yW-d8)KR(#8 z(kF 3o*>U$gpSUalEp3F68eXu77tC~uQm7iN+2~$q;s67A4F(4=r4fLi()1O6 z-@j8C1OTPugEuWCw`uxEj9c#vfJ>ABSo|X}V|ktd*rNK%q&QGwc^`i3UPxhIi8=m@ zyV;7q!bi(7X&AHw`u3Tqd1v(vxM?X*aNb5{UHVx#uogNY_n3ZC-(I|g6aRs!1}J?| zgctxQGB#{Hk@q&Pl{;iLsExRjPf(+o{BsMJ5sNFhCRd9h`O0M^X hJ|G&dyF3NM>P-pv5g fvu#` zH);5`s1qezBNc}!=Y5&E?$}LHnD2STp(E7kjAAFPnZ)_VVfX?5L)}b4fQmRaOS^*+ z4Cq>J$n*#OdT`PgJ%)PbQ*(9mpsvIDKpxl0NpV(s5e}u!MNYdqAvb^2k}UAQY`rwE zF&55&D)|5AU_k$p&`Qu8bpLi$X^3!qq{Vhcf4{+4JIGClDG=rNy}iB71CHSv6)yOk zvcv)0-lVKX!;Vk}ab3|o#X0zp>C!*u1(qYe>w8)2%IAvw^TL;&Td+iTI?d+_Lm8mV z)kN*86hozzF#FDZ);&vN*T=YDO?T59f~o*&_oz1VTZXFTLRNIg{L7>`uSfLA7KRtu zd42T&VA@UpOhE5FFIWQqot|5?A5?_+83fyZuA>K<#-b(_o#XcwXV=98j~xMjX-S@n zSrtFSf(gm6y{BEFi~fVlFzw^IVugg3iwlDM$B;+Y65 k8d z_}ME8oWuuBw4>4G5G43%ur3LJ;_LfdlWW03^Cy=TZsX-A>guX{MK=}deg+@w1KeFj zNZyPtA4F*%SfwSp+c> f{ictXowu(2&xt_T?G26hgK{7aF!Q @ewgpBqNdY%K?Q`r7+_UJl78k&i5GgRi6CuK0C8?{D@s*YjoBG^ zsYv=tZs36$V}6!>MCUxB`PysyZ#>>8*NYhEG6XlNYn)w-cWIN=H9VCrZ*tx*TD|Jz zruvliwYiL6>Z+Ojjc-WSP=S#SV*awladb{}4j kIrh$~L0!vIuA9FH08)x0z$#Wfne$zS1?=%4<(p~b6Cv0%`eoUCvkco(r0 zk7V*=dXNIndq46h__T7DX3VFu0!`bFzIK;gw_Y5v#a~PYq!<|Ch5ZfVuzAHEC6U6K z&IdC+Z7qJ8ji99nH`I8arK!IEd!)^4ZJ_@xyjmYZ@tYMo7mK}HB40sTvNNydc^a%! z=6ObG$S<}s3j+rSKdk=)OiM{Thq?Wbcj$uJ{k4iHOwO*OwyF5ftDnf7Ki2x0xlL<| zurOBPqNcb&q1`De_|wguA71)&U%?!%ZUTPqMN+1av4h$#f=&x9+$d-ipofkY@3xXe zFKx~T?~QGB$ZA@2{*(e;`Z59+?3%8>&XGx+?O#YJ+HRt6?B(2e3aYgl-6fm+{%jZ` zpDJ__=i6*+tMEOC@O2yhJveI9M{Kh5TFdtD?;>p<&yiqNh2e{3*Wcun?=OGLqJ0hm z*NP!*>}I5O9NGPWojb7pZlFp-S9VN8cg;@ttaYPh-&ll>Za{{WBtw&pUNuJ|y{bJ0 zRH~|%lDbopGKzM^?JwEwb;zMRU3Tj-%Pwa-y#@swH>Mwzn*+d3LJQ<0-M6e=9($?r zX2 CfWtz$??@#v&GC9H0P9d8XHs5Rml7^ zhw6QImdnp7UV8 2Ucb{KQ zJUW5~eFg4Ld@euF65)gcm((EQalr7FZH5JVrONm``VF+AY8nT%r>dxe5;KF)0bQm$ zAIlyvdWgHWsT0gilvUO*dZ50qcnlizemD*)5{p4W>>#c2VJY)$yuNJBO1BcFE`s(> zO;&vQY5>R>J6*Y218OqxBA}_B!@|P&u2{g8lvS}~P-z^qs5BK0{=(($F0y8%BzQTB z$F6z0vvJD7AIo(aboV^>rqhRBI#|C&adv>PAqka$KH}L@$AUYU(A}!y z=-xNJl?kAd*mXLgazhnlDQA%FpFwA35fwZqQVrZ#a54e}_T)aZyfZ-|Xc$Z-b$+Jl zc1?xk9A)(@gki`ve&oe+GJmMP3!uVyv&)I(Lr|C=G>(zJ4>y<&`3HpsSjkTV9iV zr}Wi#K~A8{=_ xnH{rySnefQn(IX-I 3u zK!uld0eJQH`Tr~K&BLMo-v99_AuUuSJ1s&Bm1G}eEg?%8dy l=)H+}zR9R+x5aDT}C zGwL(4AbdFQ0ANqgze5Yoo9Vv3a;G7|zWc^HtBB^Jf|?sYZ FSe& zA0u5)!|S{ASoXb;b3OuyV*A8GLM9|wL-goZJVz$-aPQUckl-pncB$GhGZ$59zY&qB zx+aM7N{r`?K`@(}!RQ{D>RM;Xz>smbydPuEo#sq>;km0;EDmH^C*u1jIVNzj0~kle zHbWMy!XRCJ-$6v*n=kq#x85)=+)4lpBo|04FIb)|$URW4AqGhkJ6indY$Ne@b-(5- z-2S&`NiLoO_#4ZCXb|M_X5iUdtLi389(oq+zfz3tK#<#?;+~V(Zwo2tIsr&8&x4Rn zYSZ(C784~I!P6;bMxX|c$r>le+tJW`hYos{&6eMOpROtL=49VJnz?+8NG&qVS~X_r zz6zElkV_La1%t9$n3o`^k)O(~)gNh~hB!jKQ@jc}T(M}`8$>p+)4vsF3IH;NazIk0 zenIafL5~CfariGqK}fHuJL>@b*aPBTZDporO#Z%uBT#^FUhOIG7QJ5)OfPPtZuIUa z#e&Z#Qh28hf_mP2!P6!ycCI%qYqC%pEYW-y!9E%ihgLMrdXvrZRgXP~6uNO_s!Fj4 zlncU3BL^#Q_GG`Jy!89VfynN7Gq-maTf~z@w@%Vum&vX=+6IV#Kb2zvCfg~ZETv#* z=mGyH<(Vro_s#&56>w)&0YQ)@gAJnz9S|KU+ S-y%{oXHb{9eS|Gy{Y;clA=`C)X6+?C;9HY=+?_KWI$Kxy^! z%GW~yfW3xFGAF*`@x*WX?H?du>QQG-N8w?qFETO`-)vNi%zz37$rRx6t=K}(?u-;a zw5hv3eEmzM m*n%5yt8 z$_? z*Rfy7{I%bEk1Z_Lep9MJoM>LKWdwe$PG}ekoSf}Spkz>XM+ImKv78Wlu>S|6;hn|3 zAb2eF!1kG@wUgq$G=moiWM%DteTiSOzP4L`Yk03!0d01)$6cLEA{>}hg!jLdR=j$Q z37czqtFda*+^x{RrVFAx02C^y5hq?Oel+w82-f<8@OKF8-<6uPm>>zd`%}QxnijH= zC~F;C>RM9j6 uFG#&OS$Uf$w( zLMLkkaScYPxdN~u6IX}5RbBt=jj^X}+!DrV6A#7mLF_^J>|UY&3|ZkUbfpy5a2mMD zoPqpI3%X*W2A3R80UuodqM#O(qYaiWAo_rPaAJ$;ThH!jN=X1euOBdDL7v|goS|5^ zPcNP}Od`#xa@tn7jr`LTDR2AyqBG;jWPV4fVO|@4_IL6FdXZ&e+J_;%8`S%J9zhbr zPT4Z6DD58qTu?#jOQ3^u!C067v0RRX>0ObbM#GFb`Dj;SPeFuUr5>K!SS#0ao#g#= zrO5!^W6KN~RSstz?7eCOFZ}~uUg)Nj-U!lPi%b3@&B_Ex8=zW=3Rb!Ga^T?2(Hn); z%lm`%GGXNMa62REk>*36<-p=Xai3+l#Y0G1C+p!kiv_!Tz68vh^%Hxt;);-7fDKO5 z0QhzD#yyM`!WDt$RG|8bUxoDA7O~8!khHTENQ LwmZc4xKh;f_=Zq zuoucmva~w RjZRW?%wqJ^MvrL6`VJYzeN~LwOhrd_^BUO?l|HO8mjnL!AHMJ~3v? z1gTyKwe|^z7Re3nm-V)$xjO$(J%0I`32FOTJA7Q#{24%H1zs5@HBB>YZn`$X%>#j` z Gvhyd#JcA1H;`l(nP^ZZ`=Mo!J%=ijJG`P58ds z5Kx${#l32;ISZKh_ne?Mp{AbO5zT_$i91VjXY*`75G-CriGJOO5JT@#&*B2B0C9n{ zlRt`V-44fG{wlND88(id=m4W%J-evNsS%{u;_JfOAq|i;`D0a^pjgApj+W%fdH6^s z)Y5HI+rkZWev-XIbrS`cM~TO6(i`vs$VPb!!+aX|c1oSDm44@G+wQA``@sOQaDUw1 zBnszyQN)6dzr~?gG+T>#1407QBhGKoZu@TZs{lqnsS4mA`?(*sH!YNk_3U)2a+(); zw*#U6>ZL5YdRxh+!@a?)O>x5A+z&KB{IUsGoy|M<6mz$pO>;{1EWHK(I)H#ya&uLB zf^Gx`3y@FSS?-DTW+b4;!dcv|4P8!a=niiIRhNE7t_a|F0cHQ?9!j)-HO*@g0J5v_ z(Jg&?_k4QT@Ai iD4fq zHVK~qBe?<{QVw*;(e=h_3Tmmw{7e0GdQk2|YXZ!rS?anS-}R85$MQfHzyaTPh!Q8> z-}>(Xa#`_~>khpc`l6HFdwJ|hdCy5(Y;+}O42 p0n~!vz+PLH7-!QzVQT ;~Q32LjdBG5bO6? c-d!sr -&H;@>Gx>xk*615?agr|rayo42lC*g$Z+;i0r$uGdltGttg3wUMA`ZTm0Emh z_HDk{MZhJS)qO=`uq$xIVRluQ(D<7)LSQi>lDHRQgXO?$7*hGfl7e*e8E#nB=;T{P z5P>4*##>s~nglE+EoOv;J*nS{hDk&c^NO8K*DE*qz^Q5sEa((I9LC04ck&kCq|O)M zrnTxIE=M}k5=cFlWmn6rt7Y-v;K^-m{$TF&DGyK4)Ee(K4s_z}MJx1GCbo&)$%w}m z`fEuJ=Lc${WN?OVI0d=QkiQwf9-;Q2RVXEi;W^<3Y{X;5_Azi&0t+d*OEA<;oK9-s z>!|6qTK&KPdH$)o3@!L0xWhY_W*JvY3BbaFPUELT+y$$5>a5YU6&Ra@%<6k#+`XwC z-n%`--7fUz`|ctO;5ByiF +yAKmz5Z+)jS9h5^>@p zN5W9AEMrwi%`d`lu7$vWU-Uc-d2A-iNbG|~R?;cIFP?!4UH+0Z+PiL?xalBVc$iSN zK{d-Cf?0L)`#=4Cni?cRs>^nUZoTTZN@d6Y9QO^f-jI7D=>5F{92|TXl0H7Cqk)cF zOF{~`^(c7(>HT)cdWYa1h&ZqMxAm*u;TpLK!4rN{Uerpgm@K%qbs8x%d2YDCrtaME z;unm53TKAiq{6&ru>eqt# Qv{Iw=Vg#q{^8w3IHo |JV z%^vzZPY4Zs-Z+Df^3((iwY16B<9${M^3((SA2mq2k9*rp 1~ -LJg^jYXX1}R<__U6;Ky1oPN^#05yxwE} zllaCjl~cg^>HYfIEy*yx8t;7V8z^YqY-K<=b?Yo{9Efw(j6?)6LKr2;;b&td&gZUj z7*Yzh*RbeN8w*~hUw@;zi>xfx>-ZCq$wo)Af)qEae{amlSaIOrh2ZY-#!osm%cg#i z$Mq$WOf|zA5fr!^GRU%V&%8l=v=(~^VqXi~mzSe$N-~UffkCX@wW2L~ )z`ntUaO^PBK7mJI_ zkiH~hdAD}42;y4EhJcA%4@XMFmsxW(diUO*o|x7DqDPbm6vd3jxNw$LjKk-{fU|tn zMizls_l7U^X;P}b>w$zNrQ5#2Y!DA{&H?*gYP}%&n4jG*L%iV(KCD f*g`c*5vK`Z(lKuGt`DdHGc-2=J>g!#DSzZ`FEyiH3UC#6*H{ARxu z1PAKd9k=(o8y^iHZv;vWgvfrqkUK^}tGZAv67*7_>E0vrr2gFophqwaj}@gkn5R=G z%wK$h$E*ny*K=qvFIXKn!%+`F%;{Waa- TyBNv*L`QV{lgTX-8269 z`N#PS#j8ht$&VfgW_|JKD7e>72y#bF#R$y({A+{PxYDpSe=18!gSJ&R5~8v=YSoOv znH>gggw9!VmG$QDo*GWD1VaII_00!RH6;)CM&ia7>AYR3FBpdx7)I(n2ZjOEtI6>V z;}&ujtb$|!!V7_w0IQE1fx5DG7FB=o7}E&lG+%`tA+GE!F2H=gvc#tDN2aC4+IX1n z4sc8XheIIjBM6vQ88kNiuvi?;Pa6OV@ANm9L*%6UaEZKtx}TtW59^a4HVg1s{<$|6 zJmZcM)PK>Xqe}dbh8i89H*v_C?rU8d3=o0A|E*T?Xql>F94M;10HJ$Ey=3JAH(GkW zrT|x~P4*k8 l}PdleHKo}7RLJpwgX$=1xMWf?U1IwP}yk~_dE7Atx%fQ{S3&B7P zn@^$ylV!Iwi=7Q#bOGCBryGi@`Z`tiHO}g2ow6V#?K&&MQ&$u#wKoc*oyJ>>gIH4> z^!nD%l8X>B)bsH+=u5IslTIkocE#d?dc#wvMiNobCVDU1yKVL!F06Dx^9g?dy>&hS z>r&Mv<%n6;Wjj_`uT@y=69n9^-4*Dl71f?9tNc!Plocobj5b0^bhEYIoXb&~J_mrb z5yRGp ?I*f;H+{_xi)R*qtCT_bHlTElGBejmg |DNB=FezhP^67uq+nu;oE2fLklAW VRQbYlEkJ)e&(tLJKh_t6@p>t<1H_@?Q9gKh9~X{_# XJKZG)q~P0OVIUF*omJ?mcOwZv?1Rd+_YXNBXUD~d#pn?u=3tcS zoBKIDh8l}|Sj>KHJO_KbhR2FsS^HVj9GwNg@d*F*^!>pCRs63L<9_kJ`zIC )PEklzr9J{73})~>{s-E@Spob0M}d&!;hF+SXj8)?w7n!%aQ(1d;l>m?|&aD z{7a@7`T;IT^yA?D%>O?g{5-L9Lh)1HKlG%FEj?xZXB>cmb^n3z|LegpNE^WN@xMz2 zt^+Xd2v4)vCdd6cdOiJ{ I}$MD=gp1z;8aZ+GE z2!WK7^S*DN*ozmZjAxFFX3rIpG#^i#_a09vMxgn #Gj$8Ra`3LFjCxx~%>{w=mdPe5jdpd0S@I z{ug=42c9y0!%qkD=uUr-ATpeYwf0eM?SUb^s@9+x&D}Qq{N)72!6H)kPSq-(pT8h6 zmC`7TS`qMjNlkw@l|c1dPHZ ;DCUHWu(PWQO+j(MdPog}pqx z_h!w0HjO*+b%QIr-Q#7@Z%s4n(riWEVs#fG-w% _)4Fg6l@SmdI>Wwb#b(B7B?I z@?r3ZuNy&Py 7$|Neft}r*rMqfn(Q&e*xx>hB9{B-_pL5&5A5H^h83JY)6Z4 4P=GhK)>@2=Kvhi{)1 z#_7^zHmAmm@sk3{{)z;4{B)OlU~lKKYgO{jR9ipm{JKkgw6`I&`^!1o=BtwmRQp)? zJgpe*^7$jRW#_9+(#&!QhWJ;!0COmZ?X28YVoV6_>BF7X36vXh$2<1as$^54o=K4S z=GJ@%CA15_v&F55yZf8mKA>q7FrNBUfb^@^u~Lk;>TiCCf9TD3_Aoow>IO8SWLEHH z^^V`}GXgFkH Aq~aUQHE@ z#W)@u3_Fp`d3WYUeJaIeV(b!hi&o(+_{g1u$bbsUynpaQ%>8q(K0mTQa8LBj)7zak z$4>m@de7S5c<_{((1~Nyf(%+8Mc+0muiuqW(Gq1k@KQ-gU5w?y$;{9%8sUGaDHHya zRlarv=hdz9akvs%61hG}Sf1>f^&)s1idXauNEOCrYN^OOI_BM}R9j3`F>QD6dZPhP ziTx?%T{hY#d%b6`!~ *^Id6(5mJ7v}_O{ds9#@idMl&@65*+tcT`<#Z z_{FlobjlASJo9(w=f%+oyL554!7yFSyOLp(AiX+7hEFa#WJl!$|5c6b)`q8wxYDA^ zfGC{RijOX9I*i^1yF0|a>d!Zp)(4r2vhgMjk7r3L(d1#DLe^TbtMZc79mRfVLAlXs z(zD8*3vZ?UYc0zB;KyRtm@m33g_q@eLBcgLuPGW)C|y?tQD~&VxiT*9OeeAR<^?(b zRJo-mkGlfiM94qtY`rtc0`6FI`!pl#eLr$T&@HJ!w&lpEiP?ayk;mdY=hFz2JJXDc zm#g}p@!GY>c*l4YR#|k5r>rtr6;6#~)ln%r8}<}(oKXIIHb}1T$ltU$o#H1?6EdJT zgKJDf*#-=zV;z=&$Hh}FoKNf3!v-WS-`bTn8{T)Er6opuXEGbq7GH%-P)0Q@2v}vP zJZ$B*=1w8k%&vRJ##Z6f?+yNywH=-!cNdS{Yoo?vm@opGnBs|*|Le*GX&+M?SE;iR zO}Mw $*9}D=kPG)TU)Im~^zd_@N6x+GHcLykZRdLl4{RjB z)oy#r>hX+uZ0WMZ2IbneqZkohKCV$1UxOy8`kKD_9OO?UeEsUo%rVFZ@APc;DgpJ6 zZ!aqlub+Dgg*WqWTPUBT*Ko5D&4-pbE_#LkJo0Rc9pNH}guPv^sN0FXYYqW$e@uUb zD|77Py&==jOgw)a!ZI9ddw5hvW46@cYF=*Rl)`|_1Ij< ~rk+1_p zm2ooWo>{rfu3m)9N~cJz(o~@R5Fy#RmO7K^*Bm+*b=N%>^m5M^X)F4|V;_G7M{Qca zpP)8$f?g2>B~xV)0hU=lD)QPo`k@PwkrYdqoxmspS~1#Us8fqG1H<4oYW&pn3zXnp z8& GFsufHAEXr-jHui~sy|tJU`?vJhjDuCY{U@>v=C8oT^ ;YXkPwgr}g 8 LfOaPgSkxC>ezHqH85LXXm+mN$eNXliYTM2j95 z4V>S8Q5Tcm-dvEv`e*q1hKZO~M04GtI}J&-nDl%ztS+5QiUEzk7f(5F89VYUw%Zk7 zC|Nz$v8dH@RzLM(!C(aFl`mVmWALuKD>I8WEhtZJ9p3RCC#mDd<}Fi5)TDE60hHiY zn`*aX&k#(G`m~jN`?GX4cRw?tx>v`ox}^&*qgOW>mq9XnCFEB##1b6o>EUs#f;}tb z;C2>{yF#@qAfPj4;@-(nug}#A%zd7;JW@-YQA=NIP?5goB+YG>a-V%Nv~Z3ph$_>~ zgxjW{#@m$7>|L47I1X8owiq}3{5b;*QGK>*kOjZ5qBvoqGHiAkm}s=Tza~8d(l|Sm zZ&FhKonE6))8EJeh%qv5XV;Qz{J^M* PZ&q z*L%4hJy&w<_}wvb{oYbj@a?A$k9~#P_Sc){LhlA>zy0BR12U+R{i6Qjiu~& ;SEfG(z*^dYXOSb+W9M|pbjt|Tot3bxjYws+# zSK>r0zEXTgfA$T#W(jlPN4#_V2Qe;NZsMVetF|tq#ur!ehL$pBZ=)$ZZRZ&w(>2(? zFUfZ!YAen?H+7Kz^&Yz$=WC8{Qw@4x^>SV}8PcW{vLKo!$&apL6P`3}uGeox8PtNg z=b|33lJm(-z07hIF&B*@3j1XaMrlFJ~y6}gT6VHbu4p#U5%}?Z;+2vP+_xEu) z4V-r1w;44dub-G5e1xq-_zmc&Qe+WUoig*^<%)c3X8h11+bJF%C63=Q?cASSi*7kv z`Q{@pOk5&xD}W`I^Ona2 I_=GHs z46{H^sA-;@nD?3FKRL7Oi`WR7lhL)4wfv){^!(2xa$~kC+qFwV5jFV1!>|Gs;$iq{ zran=BB8y)QGvdSGKH1YN#-`FEhVV1v$ShDqxjf^maCseo`Sjxc2onPHRHkqzFl5ys z35P`)-w?%KKgF8!46`+~Opb%imC#CaSedjc?Ns~Cm6R1bG %o 2GA=2OG04@hORF6`A1_PG@}det$v@Z3fuVhR&7^9-Cqng;TW;_*7mYGsz1| z6G5%4TC{2e1&ZyMQPjki75%LSpaDn|`4r-@1xAkMGqWnMMzb(QnMf<~+3*KNWeVQR zNFJK(iFS%M44nci+1>O@xcCw+5kCaCKwaU1Cm4f^V}A9QT|WJ!^v^j^SK9m^cFg5< zrcm$5F_TNpD0ge1UEHUGZc+=i+ZU9Z``vDkpUaCjlIBgn&fC+cQ!hCKoP{3hkw&O5 z=|spvl-!WM1Soyte$#t~`Qo5YK8cCCr$QEd^VlIykwS7u79OG8-LiRlN~9GA{xlX! zS#CJs%_qL!==t#KvKHq3OOh<&GGm8`@A~!79g07BLM~ljQ?Ah3VpTaomoR2h(|M_F zM2YNegCR{Az}YU9B#*E9z|XUU;>hIq6oqR4dI_ZVpSqR#9f3f#4o~^U%B=#Y;rbsr zNXN)py%)7q!x )^dDJ-pDPe=*)#@IJBqLRQ*WbVSRs1LNm Nq-tZ{H&U8wC!_kRNL)J$ z3;m{nmBV5LImde#16qlZKvZty#^uYi542BMgq}Tf7egHK&KQ}kPcEyu5|UI{wkq|< z0>3WLJh%<_HsIeeANu3Z%MT}1672S3I=s~vtBab^V>xhJeL*wco%#jYR@U^O%P;ZO zfmiB-ky4PgF%ur_;l