From 25caa8a90a96c43abcacce9f97d5a8976e8697a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=86=8A=E5=85=AE?= Date: Tue, 27 May 2025 18:55:46 +0800 Subject: [PATCH] init commit --- LICENSE | 201 ++++++++++++ NOTICE | 13 + README.md | 188 ++++++++++- configs/accelerate_config/muti_gpu.yaml | 19 ++ configs/chat_template/chat_template_kd.jinja | 8 + configs/cot_generation_api.json | 14 + configs/cot_generation_batch.json | 22 ++ configs/cot_long2short_api.json | 14 + configs/cot_long2short_batch.json | 22 ++ configs/cot_short2long_api.json | 14 + configs/cot_short2long_batch.json | 22 ++ configs/instruction_expansion_api.json | 16 + configs/instruction_expansion_batch.json | 24 ++ configs/instruction_refinement_api.json | 14 + configs/instruction_refinement_batch.json | 22 ++ .../instruction_response_extraction_api.json | 14 + ...instruction_response_extraction_batch.json | 22 ++ configs/kd_black_box_api.json | 32 ++ configs/kd_black_box_local.json | 36 +++ configs/kd_white_box.json | 42 +++ configs/rank_dpo_api.json | 32 ++ configs/rank_dpo_local.json | 37 +++ configs/rl_grpo.json | 25 ++ configs/rl_ppo.json | 28 ++ configs/rl_reward_api.json | 32 ++ configs/rl_reward_local.json | 37 +++ easydistill/__init__.py | 14 + easydistill/cli.py | 187 +++++++++++ easydistill/kd/infer.py | 247 +++++++++++++++ easydistill/kd/train.py | 218 +++++++++++++ easydistill/rank/infer.py | 262 ++++++++++++++++ easydistill/rank/train.py | 105 +++++++ easydistill/rl/grpo_train.py | 111 +++++++ easydistill/rl/ppo_train.py | 122 ++++++++ easydistill/rl/reward_infer.py | 258 +++++++++++++++ easydistill/rl/reward_train.py | 107 +++++++ easydistill/synthesis/cot_synthesis.py | 274 ++++++++++++++++ easydistill/synthesis/instruct_synthesis.py | 293 ++++++++++++++++++ easydistill/synthesis/synthesis_main.py | 107 +++++++ easydistill/synthesis/utils.py | 85 +++++ .../distillqwen2.5-0324/README.md | 76 +++++ .../distilqwen2.5-0324_stage1.json | 14 + .../distilqwen2.5-0324_stage2.json | 23 ++ .../distillqwen2.5-r1/README.md | 142 +++++++++ .../distillqwen2.5-r1/cogpo.py | 194 ++++++++++++ .../distilqwen2.5-r1_stage1.json | 23 ++ .../distilqwen2.5-r1_stage2.json.json | 15 + .../distillqwen2.5-thoughtX/README.md | 101 ++++++ .../distilqwen2.5-thoughtx-train.json | 24 ++ .../distillqwen2.5/README.md | 135 ++++++++ .../distillqwen2.5/distilqwen2.5_stage1.json | 23 ++ .../distillqwen2.5/distilqwen2.5_stage2.json | 40 +++ .../distilqwen_series/distillqwen2/README.md | 165 ++++++++++ .../distillqwen2/distilqwen2_stage1.json | 23 ++ .../distillqwen2/distilqwen2_stage2.json | 25 ++ .../distillqwen2/dpo_student_infer_only.py | 105 +++++++ .../distillqwen2/task_resampling.py | 156 ++++++++++ .../domain_specific/code_generation/README.md | 67 ++++ .../code_generation_stage1.json | 23 ++ .../code_generation_stage2.json | 40 +++ recipes/open_datasets/distilqwen_datasets.md | 50 +++ recipes/open_datasets/omni_thought.md | 58 ++++ requirements.txt | 7 + resources/framework.png | Bin 0 -> 159640 bytes setup.py | 25 ++ 65 files changed, 4893 insertions(+), 1 deletion(-) create mode 100644 LICENSE create mode 100644 NOTICE create mode 100644 configs/accelerate_config/muti_gpu.yaml create mode 100644 configs/chat_template/chat_template_kd.jinja create mode 100644 configs/cot_generation_api.json create mode 100644 configs/cot_generation_batch.json create mode 100644 configs/cot_long2short_api.json create mode 100644 configs/cot_long2short_batch.json create mode 100644 configs/cot_short2long_api.json create mode 100644 configs/cot_short2long_batch.json create mode 100644 configs/instruction_expansion_api.json create mode 100644 configs/instruction_expansion_batch.json create mode 100644 configs/instruction_refinement_api.json create mode 100644 configs/instruction_refinement_batch.json create mode 100644 configs/instruction_response_extraction_api.json create mode 100644 configs/instruction_response_extraction_batch.json create mode 100644 configs/kd_black_box_api.json create mode 100644 configs/kd_black_box_local.json create mode 100644 configs/kd_white_box.json create mode 100644 configs/rank_dpo_api.json create mode 100644 configs/rank_dpo_local.json create mode 100644 configs/rl_grpo.json create mode 100644 configs/rl_ppo.json create mode 100644 configs/rl_reward_api.json create mode 100644 configs/rl_reward_local.json create mode 100644 easydistill/__init__.py create mode 100644 easydistill/cli.py create mode 100644 easydistill/kd/infer.py create mode 100644 easydistill/kd/train.py create mode 100644 easydistill/rank/infer.py create mode 100644 easydistill/rank/train.py create mode 100644 easydistill/rl/grpo_train.py create mode 100644 easydistill/rl/ppo_train.py create mode 100644 easydistill/rl/reward_infer.py create mode 100644 easydistill/rl/reward_train.py create mode 100644 easydistill/synthesis/cot_synthesis.py create mode 100644 easydistill/synthesis/instruct_synthesis.py create mode 100644 easydistill/synthesis/synthesis_main.py create mode 100644 easydistill/synthesis/utils.py create mode 100644 recipes/distilqwen_series/distillqwen2.5-0324/README.md create mode 100644 recipes/distilqwen_series/distillqwen2.5-0324/distilqwen2.5-0324_stage1.json create mode 100644 recipes/distilqwen_series/distillqwen2.5-0324/distilqwen2.5-0324_stage2.json create mode 100644 recipes/distilqwen_series/distillqwen2.5-r1/README.md create mode 100644 recipes/distilqwen_series/distillqwen2.5-r1/cogpo.py create mode 100644 recipes/distilqwen_series/distillqwen2.5-r1/distilqwen2.5-r1_stage1.json create mode 100644 recipes/distilqwen_series/distillqwen2.5-r1/distilqwen2.5-r1_stage2.json.json create mode 100644 recipes/distilqwen_series/distillqwen2.5-thoughtX/README.md create mode 100644 recipes/distilqwen_series/distillqwen2.5-thoughtX/distilqwen2.5-thoughtx-train.json create mode 100644 recipes/distilqwen_series/distillqwen2.5/README.md create mode 100644 recipes/distilqwen_series/distillqwen2.5/distilqwen2.5_stage1.json create mode 100644 recipes/distilqwen_series/distillqwen2.5/distilqwen2.5_stage2.json create mode 100644 recipes/distilqwen_series/distillqwen2/README.md create mode 100644 recipes/distilqwen_series/distillqwen2/distilqwen2_stage1.json create mode 100644 recipes/distilqwen_series/distillqwen2/distilqwen2_stage2.json create mode 100644 recipes/distilqwen_series/distillqwen2/dpo_student_infer_only.py create mode 100644 recipes/distilqwen_series/distillqwen2/task_resampling.py create mode 100644 recipes/domain_specific/code_generation/README.md create mode 100644 recipes/domain_specific/code_generation/code_generation_stage1.json create mode 100644 recipes/domain_specific/code_generation/code_generation_stage2.json create mode 100644 recipes/open_datasets/distilqwen_datasets.md create mode 100644 recipes/open_datasets/omni_thought.md create mode 100644 requirements.txt create mode 100644 resources/framework.png create mode 100644 setup.py diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f49a4e1 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000..6665ced --- /dev/null +++ b/NOTICE @@ -0,0 +1,13 @@ +============================================================= +EasyDistill is a open-source tool developed by Alibaba PAI Team +Licensed under the Apache License, Version 2.0 + +============================================================= +This toolkit implements some modules referring to some repositories under +the same/different open source licenses. + +----------------------------- +Apache License, Version 2.0 +The HuggingFace Inc. team +The OpenAI Team +The vLLM Team diff --git a/README.md b/README.md index 3ac4c5e..a23455e 100644 --- a/README.md +++ b/README.md @@ -1 +1,187 @@ -# easydistill \ No newline at end of file +# EasyDistill: Easy Knowledge Distillation for Large Language Models + +Introducing **EasyDistill**, a pioneering toolkit on knowledge distillation (KD) for large language models (LLMs). With the growing complexity and size of LLMs, **EasyDistill** offers a versatile and user-friendly platform to streamline the KD process, supporting both black-box and white-box methodologies. It facilitates efficient model training, enabling smaller models to emulate the performance of larger ones without compromising accuracy. **EasyDistill** boasts an extensive range of features, including data synthesis, supervised fine-tuning, ranking optimization, and reinforcement learning, all tailored for various KD scenarios. Designed to accommodate both System 1 (fast, intuitive) and System 2 (slow, analytical) cognitive models, the toolkit is modular and easy to use, with a simple command-line interface guiding users. Beyond academic exploration, **EasyDistill** anchors practical industrial solutions, offering robust distilled models and open-source datasets, while also showcasing seamless integration with Alibaba Cloud’s AI platform, PAI. Committed to bridging theoretical advancements with practical needs, **EasyDistill** empowers the NLP community, making state-of-the-art KD strategies accessible to researchers and industry practitioners alike. + +# Technical Articles + +We have a series of technical articles on the functionalities of EasyDistill. + +- [人工智能平台 PAI DistilQwen2.5-DS3-0324发布:知识蒸馏+快思考=更高效解决推理难题](https://developer.aliyun.com/article/1661734) +- [DistilQwen2.5-R1发布:知识蒸馏助推小模型深度思考](https://developer.aliyun.com/article/1659288) +- [DistilQwen2.5发布:通义千问蒸馏小模型再升级](https://developer.aliyun.com/article/1653842) +- [DistilQwen2:通义千问大模型的知识蒸馏实践](https://developer.aliyun.com/article/1633882) +- [基于多轮课程学习的大语言模型蒸馏算法TAPIR](https://developer.aliyun.com/article/1635146) + + + +## Overview + +![EasyDistill Framework](resources/framework.png) + +- **Toolkit Features**: EasyDistill provides versatile functionalities, including data synthesis, supervised fine-tuning, logits distillation, ranking optimization, and reinforcement learning techniques tailored for KD scenarios. +- **Compatibility**: It supports both System 1 (fast, intuitive) and System 2 (slow, analytical) models. +- **User-Friendly**: With its modular design and simple command-line interface, EasyDistill makes experimentation and implementation of KD strategies straightforward. +- **Industrial Integration**: Incorporates KD-based solutions and supports integration with platforms such as Alibaba Cloud’s Platform for AI (PAI). + + +## Getting Started + +1. Clone the repository: + ```bash + git clone + cd EasyDistill + ``` + +2. Install the required dependencies: + ```bash + python setup.py install + ``` + +3. Explore the usage of EasyDistill through the command-line interface: + ```bash + easydistill --config + ``` + + The config file expresses the detailed settings of any knowledge distillation jobs that **EasyDistill** supports. A sample of black-box distillation config can be shown below: + ```json + { + "job_type": "kd_black_box_local", + "dataset": { + "instruction_path": "train.json", + "labeled_path": "train_labeled.json", + "template" : "chat_template/chat_template_kd.jinja", + "seed": 42 + }, + "inference":{ + "enable_chunked_prefill": true, + "seed": 777, + "gpu_memory_utilization": 0.9, + "temperature": 0.8, + "trust_remote_code": true, + "enforce_eager": false, + "max_model_len": 4096, + "max_new_tokens": 512 + }, + "models": { + "teacher": "teacher/Qwen/Qwen2.5-7B-Instruct/", + "student": "student/Qwen/Qwen2.5-0.5B-Instruct/" + }, + "training": { + "output_dir": "./result/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 1000, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } + } + ``` + +## DistilQWen Series + +The **DistilQwen** models represent a robust suite of distilled language models derived from the **EasyDistill** toolkit. Designed to capitalize on the principles of knowledge distillation, DistilQwen models offer a significant reduction in model size while maintaining high performance, making them ideal for resource-constrained environments. Whether you're aiming for efficient deployment in industrial scenarios or seeking to explore advanced KD methodologies, **DistilQwen** models are poised to meet diverse application needs with agility and precision. + + +### What's New: Adaptive Thinking Models + +The most recent **DistilQwen** series is **DistilQwen-ThoughtX**, which exhibits improved reasoning abilities and generates CoTs with more optimal lengths compared to its predecessors. This model series is developed from the innovative **OmniThought** dataset by utilizing the novel Reasoning Verbosity (RV) and Cognitive Difficulty (CD) scores, which ensure that models receive rich, high-quality training data reflecting optimal CoT output length and difficulty. **DistilQwen-ThoughtX** outperforms other KD models in the open-source community. The performance of **DistilQwen-ThoughtX** is shown below. + + +| **Model** | **AIME2024** | **MATH500** | **GPQA-D** | **LCB V2** | **Avg.** | +|-----------------------------------------------|--------------|-------------|------------|------------|-----------| +| OpenThinker-7B | 31.3 | 83.0 | 42.4 | 39.9 | 49.1 | +| DeepSeek-R1-Distill-Qwen-7B | **57.3** | _89.6_ | 47.3 | 48.4 | 60.6 | +| OpenThinker2-7B | 50.0 | 88.4 | _49.3_ | _55.6_ | _60.8_ | +| **DistilQwen-ThoughtX-7B** | _56.7_ | **90.2** | **50.0** | **56.8** | **63.4** | +| LIMO-32B | 56.7 | 86.6 | 58.1 | 60.0 | 65.3 | +| OpenThinker-32B | 66.0 | 90.6 | 61.6 | 68.9 | 71.7 | +| DeepSeek-R1-Distill-Qwen-32B | 74.7 | 90.0 | 62.4 | 72.3 | 74.8 | +| OpenThinker2-32B | _76.7_ | _90.8_ | **64.1** | _72.5_ | _76.0_ | +| Light-R1-32B | 74.7 | 90.4 | 62.0 | 56.0 | 70.7 | +| s1.1-32B | 59.3 | 87.4 | 62.0 | 58.7 | 66.8 | +| **DistilQwen-ThoughtX-32B** | **80.0** | **92.6** | _64.0_ | **73.4** | **77.5** | + +The **OmniThought** datasets are also publicly available. Refer to the Datasets section. + +### System 1 Models + +**DistilQwen2** is an enhanced version of the Qwen2 models, equipped with improved instruction-following capabilities for various NLP tasks. We employ GPT-4 and Qwen-max as teacher models to generate high-quality responses, with the balance on the task distributions of input instructions. Following SFT, a rank optimization process is performed using the DPO algorithm to enhance alignment between the student models and the teacher models. **DistilQwen2.5** models are trained using a combination of black-box and white-box KD algorithms. We adhere to the same instruction data processing and black-box SFT procedure as employed in the production of **DistilQwen2**. Subsequently, white-box training is applied to refine the students' acquisition of intricate knowledge from the teacher models, specifically utilizing Qwen2.5-72B-Instruct as open-source teacher models. The performance of **DistilQwen2** and **DistilQwen2.5** is shown below. + +| **Model** | **AlpacaEval 2.0 (length control)** | **MT-Bench** | **MT-Bench (single)** | **IFEval (instruct-loose)** | **IFEval (strict-prompt)** | +|------------------------------------|-------------------------------------|--------------|-----------------------|-----------------------------|----------------------------| +| Qwen2.5-0.5B-Instruct | 2.46 | 5.49 | 6.26 | 42.81 | 30.31 | +| **DistilQwen2.5-0.5B-Instruct** | **4.89** | **5.78** | **6.83** | **52.61** | **37.82** | +| Qwen2-1.5B-Instruct | 5.22 | 5.85 | 6.45 | 41.37 | 28.10 | +| **DistilQwen2-1.5B-Instruct** | **8.28** | **6.42** | **7.12** | **49.76** | **36.04** | +| Qwen2.5-1.5B-Instruct | 6.69 | 7.09 | 7.66 | 55.40 | 40.11 | +| **DistilQwen2.5-1.5B-Instruct** | **13.69** | **7.35** | **7.99** | **61.10** | **74.49** | +| Qwen2.5-3B-Instruct | 17.98 | 7.92 | 8.40 | 61.18 | 74.58 | +| **DistilQwen2.5-3B-Instruct** | **20.91** | **8.37** | **8.97** | **67.03** | **77.36** | +| Qwen2-7B-Instruct | 24.33 | 8.27 | 8.68 | 66.67 | 52.31 | +| **DistilQwen2-7B-Instruct** | **25.35** | **8.40** | **9.03** | **71.46** | **60.26** | +| Qwen2.5-7B-Instruct | 31.43 | 8.52 | 8.83 | 81.53 | 72.10 | +| **DistilQwen2.5-7B-Instruct** | **34.86** | **8.76** | **9.22** | **83.48** | **73.27** | + + +We have released two instruction following datasets to public. Refer to the Datasets section. + + +### System 2 Models + +The **DistilQwen2.5-R1** model series utilizes DeepSeek-R1 as the teacher model. To align the reasoning abilities of smaller distilled models with their intrinsic cognitive capacities, the models are further refined using our CogPO algorithm, which outperforms other training methods. Additionally, we transfer the fast-thinking reasoning capabilities from DeepSeek-V3-0324 to the **DistilQwen2.5-DS3-0324** models. To shorten the reasoning process, the CoT simplification operator are employed to reduce the number of tokens in the training data for **DistilQwen2.5-R1**. Combined with a rewritten dataset comprising DeepSeek-V3-0324's CoT distillation data, we develop the **DistilQwen2.5-DS3-0324** models. The performance of **DistilQwen2.5-R1** and **DistilQwen2.5-DS3-0324** is shown below. + +| **Model** | **AIME2024** | **MATH-500** | **GPQA Diamond** | **LiveCodeBench V2** | +|---------------------------------------|--------------|--------------|------------------|----------------------| +| Qwen2.5-3B-Instruct | 6.67 | 62.6 | 32.83 | 11.35 | +| **DistilQwen2.5-DS3-0324-3B** | **16.67** | **70.0** | **34.34** | **18.00** | +| Qwen2.5-7B-Instruct | 10.0 | 73.6 | 33.30 | 30.72 | +| **DistilQwen2.5-7B-R1** | **23.33** | **77.8** | **37.88** | **36.40** | +| **DistilQwen2.5-DS3-0324-7B** | **43.33** | **88.4** | **42.93** | **46.38** | +| Qwen2.5-14B-Instruct | 16.7 | 78.2 | 43.43 | 37.38 | +| **DistilQwen2.5-14B-R1** | **26.67** | **82.6** | **45.45** | **41.49** | +| **DistilQwen2.5-DS3-0324-14B** | **46.67** | **90.8** | **51.52** | **54.40** | +| Qwen2.5-32B-Instruct | 16.67 | 81.4 | 45.50 | 47.36 | +| **DistilQwen2.5-32B-R1** | **46.67** | **87.0** | **48.99** | **55.97** | +| **DistilQwen2.5-DS3-0324-32B** | **70.00** | **93.8** | **62.12** | **65.95** | + +All the **DistilQwen** models are publicly available in HuggingFace and ModelScope. + +## Released Datasets + +We have also released several datasets based on the **EasyDistill** framework. + +### Instruction Following Datasets + +To assist community developers in avoiding catastrophic forgetting when fine-tuning the **DistilQwen** model, we have open-sourced two datasets: **DistilQwen_100K** and **DistilQwen_1M**. These datasets are intended to provide a solid foundation for model fine-tuning, enhancing adaptability to new tasks while retaining performance on previous tasks. Additionally, it can be utilized to improve instruction-following capabilities when fine-tuning other similar large language models. These datasets cover a range of contents, including mathematics, code, knowledge-based Q&A, instruction following, and creative generation, with a total dataset size of 100K and 1M entries. Users can integrate **DistilQwen_100K** and **DistilQwen_1M**, or its subsets, with their own data during model fine-tuning to ensure excellent downstream task performance while maintaining the model's general capabilities, thus preserving its ability to generalize. + + +### Chain-of-Thought Reasoning Datasets + +**OmniThought** is a large-scale dataset featuring **2 million** Chain-of-Thought (CoT) processes generated and validated by DeepSeek-R1 and QwQ-32B. Each CoT process in **OmniThought** is annotated with novel Reasoning Verbosity (RV) and Cognitive Difficulty (CD) scores, which describe the appropriateness of CoT verbosity and cognitive difficulty level for models to comprehend these reasoning processes. Based on our **OmniThought** dataset, we further train and release a series of high-performing models (**DistilQwen-ThoughtX-7B** and **DistilQwen-ThoughtX-32B**), specifically equipped with stronger reasoning abilities and optimal CoT output length and difficulty level. Refer to `recipes/open_datasets` for details. + +All the datasets are publicly available in HuggingFace and ModelScope. + + +## Reference + +We have [an arxiv paper](TBD) for you to cite for the EasyDistill library. Below are other papers related to our project. + +- Wenrui Cai, Chengyu Wang, Junbing Yan, Jun Huang, Xiangzhong Fang. Reasoning with OmniThought: A Large CoT Dataset with Verbosity and Cognitive Difficulty Annotations. arXiv preprint +- Wenrui Cai, Chengyu Wang, Junbing Yan, Jun Huang, Xiangzhong Fang. Training Small Reasoning LLMs with Cognitive Preference Alignment. arXiv preprint +- Chengyu Wang, Junbing Yan, Yuanhao Yue, Jun Huang. DistilQwen2.5: Industrial Practices of Training Distilled Open Lightweight Language Models. **ACL 2025** +- Yuanhao Yue, Chengyu Wang, Jun Huang, Peng Wang. Building a Family of Data Augmentation Models for Low-cost LLM Fine-tuning on the Cloud. **COLING 2025** +- Yuanhao Yue, Chengyu Wang, Jun Huang, Peng Wang. Distilling Instruction-following Abilities of Large Language Models with Task-aware Curriculum Planning. **EMNLP 2024** + + +## License + +This project is licensed under the [Apache License (Version 2.0)](LICENSE). This toolkit also contains some code modified from other repos under other open-source licenses. See the [NOTICE](NOTICE) file for more information. + + +## Join in the Discussion + +We welcome community partners to collaborate and contribute to the development, and welcome to join the DingTalk group: 117440002081 to participate in the discussion. \ No newline at end of file diff --git a/configs/accelerate_config/muti_gpu.yaml b/configs/accelerate_config/muti_gpu.yaml new file mode 100644 index 0000000..924f190 --- /dev/null +++ b/configs/accelerate_config/muti_gpu.yaml @@ -0,0 +1,19 @@ +compute_environment: LOCAL_MACHINE +debug: false +deepspeed_config: + gradient_clipping: 1.0 + offload_optimizer_device: cpu + offload_param_device: cpu + zero_stage: 2 +distributed_type: DEEPSPEED +gpu_ids: all +machine_rank: 0 +main_training_function: main +num_machines: 1 +num_processes: 8 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false \ No newline at end of file diff --git a/configs/chat_template/chat_template_kd.jinja b/configs/chat_template/chat_template_kd.jinja new file mode 100644 index 0000000..7ce8b8a --- /dev/null +++ b/configs/chat_template/chat_template_kd.jinja @@ -0,0 +1,8 @@ +{{'<|im_start|>system\nYou are a helpful assistant.<|im_end|>'}} +{{'<|im_start|>user\n' + message['content'] + '<|im_end|>'-}} +{% if add_generation_prompt %} + {{'<|im_start|>assistant'-}} +{% endif %} +{% if add_output %} + {{'<|im_start|>assistant\n' + message['output'] + '<|im_end|>-'}} +{% endif %} \ No newline at end of file diff --git a/configs/cot_generation_api.json b/configs/cot_generation_api.json new file mode 100644 index 0000000..7c8fbc5 --- /dev/null +++ b/configs/cot_generation_api.json @@ -0,0 +1,14 @@ +{ + "job_type": "cot_generation_api", + "dataset": { + "input_path": "./cot_question.json", + "output_path": "./cot_question_with_answer.json" + }, + "inference":{ + "base_url": "ENDPOINT", + "api_key": "TOKEN", + "stream": true, + "prompt" : "Your role as an assistant involves thoroughly exploring questions through a systematic long thinking process before providing the final precise and accurate solutions. This requires engaging in a comprehensive cycle of analysis, summarizing, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. Please structure your response into two main sections: Thought and Solution. In the Thought section, detail your reasoning process using the specified format: <|begin_of_thought|> {thought with steps separated with '\n\n'} <|end_of_thought|> Each step should include detailed considerations such as analisying questions, summarizing relevant findings, brainstorming new ideas, verifying the accuracy of the current steps, refining any errors, and revisiting previous steps. In the Solution section, based on various attempts, explorations, and reflections from the Thought section, systematically present the final solution that you deem correct. The solution should remain a logical, accurate, concise expression style and detail necessary step needed to reach the conclusion, formatted as follows: <|begin_of_solution|> {final formatted, precise, and clear solution} <|end_of_solution|> Now, try to solve the following question through the above guidelines:", + "max_new_tokens": 1024 + } +} \ No newline at end of file diff --git a/configs/cot_generation_batch.json b/configs/cot_generation_batch.json new file mode 100644 index 0000000..eea1656 --- /dev/null +++ b/configs/cot_generation_batch.json @@ -0,0 +1,22 @@ +{ + "job_type": "cot_generation_batch", + "dataset": { + "input_path": "./cot_question.json", + "output_path": "./cot_question_with_answer.json", + "template" : "./chat_template/chat_template_kd.jinja" + }, + "models": { + "teacher": "teacher/Qwen/Qwen2.5-7B-Instruct/" + }, + "inference":{ + "prompt" : "Your role as an assistant involves thoroughly exploring questions through a systematic long thinking process before providing the final precise and accurate solutions. This requires engaging in a comprehensive cycle of analysis, summarizing, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. Please structure your response into two main sections: Thought and Solution. In the Thought section, detail your reasoning process using the specified format: <|begin_of_thought|> {thought with steps separated with '\n\n'} <|end_of_thought|> Each step should include detailed considerations such as analisying questions, summarizing relevant findings, brainstorming new ideas, verifying the accuracy of the current steps, refining any errors, and revisiting previous steps. In the Solution section, based on various attempts, explorations, and reflections from the Thought section, systematically present the final solution that you deem correct. The solution should remain a logical, accurate, concise expression style and detail necessary step needed to reach the conclusion, formatted as follows: <|begin_of_solution|> {final formatted, precise, and clear solution} <|end_of_solution|> Now, try to solve the following question through the above guidelines:", + "enable_chunked_prefill": true, + "seed": 777, + "gpu_memory_utilization": 0.9, + "temperature": 0.8, + "trust_remote_code": true, + "enforce_eager": false, + "max_model_len": 4096, + "max_new_tokens": 512 + } +} \ No newline at end of file diff --git a/configs/cot_long2short_api.json b/configs/cot_long2short_api.json new file mode 100644 index 0000000..1daadca --- /dev/null +++ b/configs/cot_long2short_api.json @@ -0,0 +1,14 @@ +{ + "job_type": "cot_long2short_api", + "dataset": { + "input_path": "./raw.json", + "output_path": "./raw_simplified.json" + }, + "inference":{ + "base_url": "ENDPOINT", + "api_key": "TOKEN", + "stream": true, + "prompt" : "You are a helpful assistant who is highly skilled at simplifying reasoning processes. Given a problem, its answer and its reasoning process, your task is to simplify the reasoning process so that a small language model (e.g., a 7B model) can reliably follow the steps to solve the problem. If the original reasoning process is divided into multiple steps separated by two newline characters (\n\n), your output must preserve this formatting. You must output ONLY the simplified reasoning process with no additional explanation or commentary.", + "max_new_tokens": 1024 + } +} \ No newline at end of file diff --git a/configs/cot_long2short_batch.json b/configs/cot_long2short_batch.json new file mode 100644 index 0000000..490a155 --- /dev/null +++ b/configs/cot_long2short_batch.json @@ -0,0 +1,22 @@ +{ + "job_type": "cot_long2short_batch", + "dataset": { + "input_path": "./train.json", + "output_path": "./train_simplified.json", + "template" : "./chat_template/chat_template_kd.jinja" + }, + "models": { + "teacher": "teacher/Qwen/Qwen2.5-7B-Instruct/" + }, + "inference":{ + "prompt" : "You are a helpful assistant who is highly skilled at simplifying reasoning processes. Given a problem, its answer and its reasoning process, your task is to simplify the reasoning process so that a small language model (e.g., a 7B model) can reliably follow the steps to solve the problem. If the original reasoning process is divided into multiple steps separated by two newline characters (\n\n), your output must preserve this formatting. You must output ONLY the simplified reasoning process with no additional explanation or commentary.", + "enable_chunked_prefill": true, + "seed": 777, + "gpu_memory_utilization": 0.9, + "temperature": 0.8, + "trust_remote_code": true, + "enforce_eager": false, + "max_model_len": 4096, + "max_new_tokens": 512 + } +} \ No newline at end of file diff --git a/configs/cot_short2long_api.json b/configs/cot_short2long_api.json new file mode 100644 index 0000000..1d9f245 --- /dev/null +++ b/configs/cot_short2long_api.json @@ -0,0 +1,14 @@ +{ + "job_type": "cot_short2long_api", + "dataset": { + "input_path": "./raw.json", + "output_path": "./raw_extended.json" + }, + "inference":{ + "base_url": "ENDPOINT", + "api_key": "TOKEN", + "stream": true, + "prompt" : "You are a helpful assistant who is highly skilled at extending reasoning processes. Given a problem ,its answer and its reasoning process, your task is to extend the reasoning process by adding necessary details and intermediate steps, so that a small language model (e.g., a 7B model) can follow the extended reasoning process to solve the problem. If the original reasoning process is divided into multiple steps separated by two newline characters (\\n\\n), your output must preserve this formatting. You must output ONLY the extended reasoning process with no additional explanation or commentary.", + "max_new_tokens": 1024 + } +} \ No newline at end of file diff --git a/configs/cot_short2long_batch.json b/configs/cot_short2long_batch.json new file mode 100644 index 0000000..f967fa5 --- /dev/null +++ b/configs/cot_short2long_batch.json @@ -0,0 +1,22 @@ +{ + "job_type": "cot_short2long_batch", + "dataset": { + "input_path": "./train.json", + "output_path": "./train_extended.json", + "template" : "./chat_template/chat_template_kd.jinja" + }, + "models": { + "teacher": "teacher/Qwen/Qwen2.5-7B-Instruct/" + }, + "inference":{ + "prompt" : "You are a helpful assistant who is highly skilled at extending reasoning processes. Given a problem ,its answer and its reasoning process, your task is to extend the reasoning process by adding necessary details and intermediate steps, so that a small language model (e.g., a 7B model) can follow the extended reasoning process to solve the problem. If the original reasoning process is divided into multiple steps separated by two newline characters (\\n\\n), your output must preserve this formatting. You must output ONLY the extended reasoning process with no additional explanation or commentary.", + "enable_chunked_prefill": true, + "seed": 777, + "gpu_memory_utilization": 0.9, + "temperature": 0.8, + "trust_remote_code": true, + "enforce_eager": false, + "max_model_len": 4096, + "max_new_tokens": 512 + } +} \ No newline at end of file diff --git a/configs/instruction_expansion_api.json b/configs/instruction_expansion_api.json new file mode 100644 index 0000000..c0fc218 --- /dev/null +++ b/configs/instruction_expansion_api.json @@ -0,0 +1,16 @@ +{ + "job_type": "instruction_expansion_api", + "dataset": { + "input_path": "./train.json", + "output_path": "./train_extended.json", + "num_in_context_samples": 3, + "num_output_samples": 10 + }, + "inference":{ + "base_url": "ENDPOINT", + "api_key": "TOKEN", + "stream": true, + "prompt" : "Assume you are a data synthesis expert. Given a few instructions as in-context examples, you should generate a new instruction similar to the examples to support the training of large language models. You should place your answer enclosed within tags. The examples are as follows:", + "max_new_tokens": 512 + } +} \ No newline at end of file diff --git a/configs/instruction_expansion_batch.json b/configs/instruction_expansion_batch.json new file mode 100644 index 0000000..4295517 --- /dev/null +++ b/configs/instruction_expansion_batch.json @@ -0,0 +1,24 @@ +{ + "job_type": "instruction_expansion_batch", + "dataset": { + "input_path": "./train.json", + "output_path": "./train_extended.json", + "template" : "./chat_template/chat_template_kd.jinja", + "num_in_context_samples": 3, + "num_output_samples": 10 + }, + "models": { + "teacher": "teacher/Qwen/Qwen2.5-7B-Instruct/" + }, + "inference":{ + "prompt" : "Assume you are a data synthesis expert. Given a few instructions as in-context examples, you should generate a new instruction similar to the examples to support the training of large language models. You should place your answer enclosed within tags. The examples are as follows:", + "enable_chunked_prefill": true, + "seed": 777, + "gpu_memory_utilization": 0.9, + "temperature": 0.8, + "trust_remote_code": true, + "enforce_eager": false, + "max_model_len": 4096, + "max_new_tokens": 512 + } +} \ No newline at end of file diff --git a/configs/instruction_refinement_api.json b/configs/instruction_refinement_api.json new file mode 100644 index 0000000..275e8ae --- /dev/null +++ b/configs/instruction_refinement_api.json @@ -0,0 +1,14 @@ +{ + "job_type": "instruction_refinement_api", + "dataset": { + "input_path": "./train.json", + "output_path": "./train_refined.json" + }, + "inference":{ + "base_url": "ENDPOINT", + "api_key": "TOKEN", + "stream": true, + "prompt" : "Assume you are a prompt re-writing expert. Given an instruction as input, you should generate a new instruction semantically similar to the input to support the training of large language models. Transform the input raw prompt into a detailed prompt that comprehensively captures the user’s request. Make sure to maintain the original intent while significantly enhancing clarity and depth. You should place your answer enclosed within tags. The input prompt is as follows:", + "max_new_tokens": 512 + } +} \ No newline at end of file diff --git a/configs/instruction_refinement_batch.json b/configs/instruction_refinement_batch.json new file mode 100644 index 0000000..2adf877 --- /dev/null +++ b/configs/instruction_refinement_batch.json @@ -0,0 +1,22 @@ +{ + "job_type": "instruction_refinement_batch", + "dataset": { + "input_path": "./train.json", + "output_path": "./train_refined.json", + "template" : "./chat_template/chat_template_kd.jinja" + }, + "models": { + "teacher": "teacher/Qwen/Qwen2.5-7B-Instruct/" +}, + "inference": { + "prompt" : "Assume you are a prompt re-writing expert. Given an instruction as input, you should generate a new instruction semantically similar to the input to support the training of large language models. Transform the input raw prompt into a detailed prompt that comprehensively captures the user’s request. Make sure to maintain the original intent while significantly enhancing clarity and depth. You should place your answer enclosed within tags. The input prompt is as follows:", + "enable_chunked_prefill": true, + "seed": 777, + "gpu_memory_utilization": 0.9, + "temperature": 0.8, + "trust_remote_code": true, + "enforce_eager": false, + "max_model_len": 4096, + "max_new_tokens": 512 + } +} \ No newline at end of file diff --git a/configs/instruction_response_extraction_api.json b/configs/instruction_response_extraction_api.json new file mode 100644 index 0000000..67dd5ce --- /dev/null +++ b/configs/instruction_response_extraction_api.json @@ -0,0 +1,14 @@ +{ + "job_type": "instruction_response_extraction_api", + "dataset": { + "input_path": "./raw.json", + "output_path": "./raw_extracted.json" + }, + "inference":{ + "base_url": "ENDPOINT", + "api_key": "TOKEN", + "stream": true, + "prompt" : "Assume you are a data synthesis expert. Given plain text as input, you should generate an instruction-response pair where the instruction and the response are derived from the knowledge of the plain text to support the training of large language models. The response should properly answer the instruction. You should place your instruction enclosed within tags, and place your response enclosed within tags. The input plain text is as follows:", + "max_new_tokens": 1024 + } +} \ No newline at end of file diff --git a/configs/instruction_response_extraction_batch.json b/configs/instruction_response_extraction_batch.json new file mode 100644 index 0000000..d9f0c98 --- /dev/null +++ b/configs/instruction_response_extraction_batch.json @@ -0,0 +1,22 @@ +{ + "job_type": "instruction_response_extraction_batch", + "dataset": { + "input_path": "./train.json", + "output_path": "./train_extended.json", + "template" : "./chat_template/chat_template_kd.jinja" + }, + "models": { + "teacher": "teacher/Qwen/Qwen2.5-7B-Instruct/" + }, + "inference":{ + "prompt" : "Assume you are a data synthesis expert. Given plain text as input, you should generate an instruction-response pair where the instruction and the response are derived from the knowledge of the plain text to support the training of large language models. The response should properly answer the instruction. You should place your instruction enclosed within tags, and place your response enclosed within tags. The input plain text is as follows:", + "enable_chunked_prefill": true, + "seed": 777, + "gpu_memory_utilization": 0.9, + "temperature": 0.8, + "trust_remote_code": true, + "enforce_eager": false, + "max_model_len": 4096, + "max_new_tokens": 512 + } + } \ No newline at end of file diff --git a/configs/kd_black_box_api.json b/configs/kd_black_box_api.json new file mode 100644 index 0000000..e050e37 --- /dev/null +++ b/configs/kd_black_box_api.json @@ -0,0 +1,32 @@ +{ + "job_type": "kd_black_box_api", + "dataset": { + "instruction_path": "train.json", + "labeled_path": "train_labeled.json", + "template" : "./chat_template/chat_template_kd.jinja", + "seed": 42 + }, + "inference":{ + "base_url": "ENDPOINT", + "api_key": "TOKEN", + "stream": true, + "system_prompt" : "You are a helpful assistant.", + "max_new_tokens": 512 + }, + "models": { + "student": "student/Qwen/Qwen2.5-0.5B-Instruct/" + }, + "training": { + "output_dir": "./result/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "max_length":512, + "save_steps": 1000, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/configs/kd_black_box_local.json b/configs/kd_black_box_local.json new file mode 100644 index 0000000..83a60a1 --- /dev/null +++ b/configs/kd_black_box_local.json @@ -0,0 +1,36 @@ +{ + "job_type": "kd_black_box_local", + "dataset": { + "instruction_path": "train.json", + "labeled_path": "train_labeled.json", + "template" : "./chat_template/chat_template_kd.jinja", + "seed": 42 + }, + "inference":{ + "enable_chunked_prefill": true, + "seed": 777, + "gpu_memory_utilization": 0.9, + "temperature": 0.8, + "trust_remote_code": true, + "enforce_eager": false, + "max_model_len": 4096, + "max_new_tokens": 512 + }, + "models": { + "teacher": "teacher/Qwen/Qwen2.5-7B-Instruct/", + "student": "student/Qwen/Qwen2.5-0.5B-Instruct/" + }, + "training": { + "output_dir": "./result/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "max_length":512, + "save_steps": 1000, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/configs/kd_white_box.json b/configs/kd_white_box.json new file mode 100644 index 0000000..9cb13c8 --- /dev/null +++ b/configs/kd_white_box.json @@ -0,0 +1,42 @@ +{ + "job_type": "kd_white_box", + "dataset": { + "instruction_path": "train.json", + "labeled_path": "train_labeled.json", + "logits_path": "./logits.json", + "template" : "./chat_template/chat_template_kd.jinja", + "seed": 42 + }, + "inference":{ + "enable_chunked_prefill": true, + "seed": 777, + "gpu_memory_utilization": 0.9, + "temperature": 0.8, + "trust_remote_code": true, + "enforce_eager": false, + "max_model_len": 4096, + "max_new_tokens": 512, + "top_logits_num": 10 + }, + "distillation": { + "kd_ratio": 0.5, + "max_seq_length": 512, + "distillation_type": "forward_kld" + }, + "models": { + "teacher": "teacher/Qwen/Qwen2.5-7B-Instruct/", + "student": "student/Qwen/Qwen2.5-0.5B-Instruct/" + }, + "training": { + "output_dir": "./result/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 1000, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/configs/rank_dpo_api.json b/configs/rank_dpo_api.json new file mode 100644 index 0000000..4e6c551 --- /dev/null +++ b/configs/rank_dpo_api.json @@ -0,0 +1,32 @@ +{ + "job_type": "rank_dpo_api", + "dataset": { + "instruction_path": "train.json", + "labeled_path": "train_labeled.json", + "template" : "chat_template/chat_template_kd.jinja", + "seed": 42 + }, + "inference":{ + "base_url": "ENDPOINT", + "api_key": "TOKEN", + "stream": true, + "system_prompt" : "You are a helpful assistant.", + "max_new_tokens": 512 + }, + "models": { + "student": "student/Qwen/Qwen2.5-0.5B-Instruct/" + }, + "training": { + "output_dir": "./result/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 1000, + "logging_steps": 1, + "beta": 0.1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/configs/rank_dpo_local.json b/configs/rank_dpo_local.json new file mode 100644 index 0000000..5441371 --- /dev/null +++ b/configs/rank_dpo_local.json @@ -0,0 +1,37 @@ +{ + "job_type": "rank_dpo_api", + "dataset": { + "instruction_path": "train.json", + "labeled_path": "train_labeled.json", + "template" : "chat_template/chat_template_kd.jinja", + "seed": 42 + }, + "inference":{ + "system_prompt" : "You are a helpful assistant.", + "enable_chunked_prefill": true, + "seed": 777, + "gpu_memory_utilization": 0.9, + "temperature": 0.8, + "trust_remote_code": true, + "enforce_eager": false, + "max_model_len": 4096, + "max_new_tokens": 512 + }, + "models": { + "teacher": "teacher/Qwen/Qwen2.5-7B-Instruct/", + "student": "student/Qwen/Qwen2.5-0.5B-Instruct/" + }, + "training": { + "output_dir": "./result/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 1000, + "logging_steps": 1, + "beta": 0.1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/configs/rl_grpo.json b/configs/rl_grpo.json new file mode 100644 index 0000000..33732fd --- /dev/null +++ b/configs/rl_grpo.json @@ -0,0 +1,25 @@ +{ + "job_type": "rl_grpo", + "dataset": { + "instruction_path": "sample.json", + "template" : "chat_template_kd.jinja", + "train_ratio": 0.7, + "seed": 42 + }, + "models": { + "reward": "reward/", + "student": "Qwen/Qwen2.5-0.5B-Instruct" + }, + "training": { + "output_dir": "./result/", + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "num_train_epochs": 3, + "save_steps": 100, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/configs/rl_ppo.json b/configs/rl_ppo.json new file mode 100644 index 0000000..a79347a --- /dev/null +++ b/configs/rl_ppo.json @@ -0,0 +1,28 @@ +{ + "job_type": "rl_ppo", + "dataset": { + "instruction_path": "sample.json", + "template" : "chat_template_kd.jinja", + "train_ratio": 0.7, + "seed": 42 + }, + "models": { + "reward": "reward/", + "student": "Qwen/Qwen2.5-0.5B-Instruct" + }, + "training": { + "output_dir": "./result/", + "total_episodes": 1000, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 100, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine", + "missing_eos_penalty": 1.0, + "stop_token": "eos", + "response_length": 512 + } +} \ No newline at end of file diff --git a/configs/rl_reward_api.json b/configs/rl_reward_api.json new file mode 100644 index 0000000..120a00a --- /dev/null +++ b/configs/rl_reward_api.json @@ -0,0 +1,32 @@ +{ + "job_type": "rl_reward_api", + "dataset": { + "instruction_path": "train.json", + "labeled_path": "train_labeled.json", + "template" : "chat_template_kd.jinja" + }, + "inference":{ + "base_url": "http://1157703270994901.cn-hangzhou.pai-eas.aliyuncs.com/api/predict/quickstart_deploy_20250427_6wt1/v1/", + "api_key": "NjQ3OGE2ZGNiOWM4YjZkZTY5NDM4YWEyZjUyNGI3ZjRjNTAyMjM0Mw==", + "stream": true, + "positive_system_prompt" : "You are a helpful assistant to generate high-quality responses.", + "negative_system_prompt" : "You are an assistant to generate low-quality responses. This is for the training of my reward model. Plese remember to generate low-quality responses.", + "max_new_tokens": 512 + }, + "models": { + "student": "model/Qwen/Qwen2.5-0.5B-Instruct/" + }, + "training": { + "output_dir": "./result/", + "max_length": 1024, + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 1000, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/configs/rl_reward_local.json b/configs/rl_reward_local.json new file mode 100644 index 0000000..40c6ac9 --- /dev/null +++ b/configs/rl_reward_local.json @@ -0,0 +1,37 @@ +{ + "job_type": "rl_reward_local", + "dataset": { + "instruction_path": "train.json", + "labeled_path": "train_labeled.json", + "template" : "chat_template_kd.jinja" + }, + "inference":{ + "positive_system_prompt" : "You are a helpful assistant to generate high-quality responses.", + "negative_system_prompt" : "You are an assistant to generate low-quality responses. This is for the training of my reward model. Plese remember to generate low-quality responses.", + "enable_chunked_prefill": true, + "seed": 777, + "gpu_memory_utilization": 0.9, + "temperature": 0.8, + "trust_remote_code": true, + "enforce_eager": false, + "max_model_len": 4096, + "max_new_tokens": 512 + }, + "models": { + "teacher": "model/Qwen/Qwen2.5-3B-Instruct/", + "student": "model/Qwen/Qwen2.5-0.5B-Instruct/" + }, + "training": { + "output_dir": "./result/", + "max_length": 1024, + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 1000, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/easydistill/__init__.py b/easydistill/__init__.py new file mode 100644 index 0000000..3496700 --- /dev/null +++ b/easydistill/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== \ No newline at end of file diff --git a/easydistill/cli.py b/easydistill/cli.py new file mode 100644 index 0000000..474788e --- /dev/null +++ b/easydistill/cli.py @@ -0,0 +1,187 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import os +import subprocess +import sys +from socket import socket +import argparse +import json +import logging + +# Configure logging +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + +script_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.abspath(os.path.join(script_dir, os.pardir)) + +def run_cmd(cmd): + try: + p = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, # Merge stderr into stdout + shell=True, + universal_newlines=True # Ensure output is in text mode + ) + + error_detected = False + error_keywords = [ + "ERROR", + "Error", + "error" + "Unrecognized model", + "failed", + "exception", + "Traceback" + ] + + # Read output in real-time and detect errors + while True: + line = p.stdout.readline() + if not line: + break + logging.info(line.rstrip()) # Log normally + + # Check if any error keywords are present + if any(keyword.lower() in line.lower() for keyword in error_keywords): + error_detected = True + logging.error(f"Detected error in output: {line.strip()}") + + # Wait for process to finish + returncode = p.wait() + + # If errors were detected or return code is non-zero, return False + if error_detected or returncode != 0: + logging.error(f"Command failed (returncode={returncode}, errors detected)") + return False + + return True # Return True indicates success + + except Exception as e: + logging.error(f"Unexpected error running command: {e}") + return False + +def process(job_type, config): + if not os.path.isabs(config): + config = os.path.join(script_dir, config) + + # Knowledge Distillation tasks + if job_type in ['kd_black_box_train_only', 'kd_white_box_train_only']: + cmd_train = [ + 'accelerate', 'launch', + '--config_file', os.path.join(parent_dir, 'configs/accelerate_config/muti_gpu.yaml'), + os.path.join(script_dir, 'kd/train.py'), + '--config', config + ] + cmd_train = ' '.join(cmd_train) + logging.info(f"Running command: {cmd_train}") + run_cmd(cmd_train) + + elif job_type in ['kd_black_box_api', 'kd_black_box_local', 'kd_white_box']: + cmd_infer = [ + 'python', os.path.join(script_dir, 'kd/infer.py'), + '--config', config + ] + cmd_infer = ' '.join(cmd_infer) + logging.info(f"Running command: {cmd_infer}") + infer_success = run_cmd(cmd_infer) + if infer_success: + cmd_train = [ + 'accelerate', 'launch', + '--config_file', os.path.join(parent_dir, 'configs/accelerate_config/muti_gpu.yaml'), + os.path.join(script_dir, 'kd/train.py'), + '--config', config + ] + cmd_train = ' '.join(cmd_train) + logging.info(f"Running command: {cmd_train}") + run_cmd(cmd_train) + else: + logging.error("Infer failed, skipping training") + + # Reinforcement Learning tasks + elif job_type in ['rl_ppo', 'rl_grpo']: + cmd = [ + 'accelerate', 'launch', + '--config_file', os.path.join(parent_dir, 'configs/accelerate_config/muti_gpu.yaml'), + os.path.join(script_dir, f'rl/{job_type.split("_")[1]}.py'), + '--config', config + ] + cmd = ' '.join(cmd) + logging.info(f"Running command: {cmd}") + run_cmd(cmd) + + elif job_type in ['rl_reward_api', 'rl_reward_local']: + cmd = [ + 'python', + os.path.join(script_dir, 'rl/reward.py'), + '--config', config + ] + cmd = ' '.join(cmd) + logging.info(f"Running command: {cmd}") + run_cmd(cmd) + + # Instruction Processing tasks + elif job_type.startswith('instruction_'): + task_type = job_type.replace('instruction_', '') + cmd = [ + 'python', + os.path.join(script_dir, f'synthesis/synthesis_main.py'), + '--config', config + ] + cmd = ' '.join(cmd) + logging.info(f"Running command: {cmd}") + run_cmd(cmd) + + # Chain of Thought tasks + elif job_type.startswith('cot_'): + task_type = job_type.replace('cot_', '') + cmd = [ + 'python', + os.path.join(script_dir, f'synthesis/synthesis_main.py'), + '--config', config + ] + cmd = ' '.join(cmd) + logging.info(f"Running command: {cmd}") + run_cmd(cmd) + + # Ranking and DPO tasks + elif job_type.startswith('rank_'): + task_type = job_type.replace('rank_', '') + cmd = [ + 'python', + os.path.join(script_dir, f'rank/{task_type}.py'), + '--config', config + ] + cmd = ' '.join(cmd) + logging.info(f"Running command: {cmd}") + run_cmd(cmd) + + else: + logging.error(f"Unknown job type: {job_type}") + sys.exit(1) + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--config', type=str, required=True, help='path to the json config file') + args = parser.parse_args() + config_path = args.config + config = json.load(open(config_path)) + job_type = config["job_type"] + process(job_type, config_path) + +if __name__ == '__main__': + main() diff --git a/easydistill/kd/infer.py b/easydistill/kd/infer.py new file mode 100644 index 0000000..c631cdf --- /dev/null +++ b/easydistill/kd/infer.py @@ -0,0 +1,247 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import json, jsonlines +import argparse +import torch +import logging +import os +from jinja2 import Environment, FileSystemLoader +from transformers import AutoModelForCausalLM, AutoTokenizer +from vllm import LLM, SamplingParams +from tqdm import tqdm +from openai import OpenAI +import math + + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + + +def read_json_field(filename, field_name='instruction'): + try: + with open(filename, 'r') as file: + data = json.load(file) + output_fields = [] + for item in data: + if field_name in item: + output_fields.append(item[field_name]) + return output_fields + except FileNotFoundError: + logging.error("The file was not found.") + except json.JSONDecodeError: + logging.error("There was an error decoding the JSON file.") + except Exception as e: + logging.error(f"An error occurred: {e}") + + +def write_data_to_json_file(data, file_path): + try: + with open(file_path, 'w') as file: + json.dump(data, file, ensure_ascii=False, indent=4) + logging.info(f"Data successfully written to {file_path}") + except Exception as e: + logging.error(f"An error occurred: {e}") + + +def load_tokenizer_and_vllm(config, eos_token=None): + teacher_model_path = config["models"]["teacher"] + logging.info(f"Loading ckpt and tokenizer: {teacher_model_path}") + tokenizer = AutoTokenizer.from_pretrained(teacher_model_path, trust_remote_code=True) + tokenizer.padding_side = "left" + if eos_token: + eos_token_id = tokenizer.convert_tokens_to_ids(eos_token) + logging.info(f"eos_token {eos_token} from user input") + elif hasattr(tokenizer, "eos_token_id") and tokenizer.eos_token_id: + logging.info(f"Initial eos_token_id {tokenizer.eos_token_id} from tokenizer") + eos_token_id = tokenizer.eos_token_id + eos_token = tokenizer.convert_ids_to_tokens(eos_token_id) + else: + raise ValueError("No available eos_token or eos_token_id.") + try: + tokenizer.eos_token = eos_token + tokenizer.eos_token_id = eos_token_id + tokenizer.pad_token = eos_token + tokenizer.pad_token_id = eos_token_id + except: + logging.info(f"[WARNING] Cannot set tokenizer.eos_token") + logging.info(f"tokenizer's eos_token: {tokenizer.eos_token}, pad_token: {tokenizer.pad_token}") + logging.info(f"tokenizer's eos_token_id: {tokenizer.eos_token_id}, pad_token_id: {tokenizer.pad_token_id}") + num_gpus = torch.cuda.device_count() + llm = LLM( + model=teacher_model_path, + tensor_parallel_size=num_gpus, + enable_chunked_prefill=config["inference"]["enable_chunked_prefill"], + gpu_memory_utilization=config["inference"]["gpu_memory_utilization"], + trust_remote_code=config["inference"]["trust_remote_code"], + dtype=torch.bfloat16, + enforce_eager=config["inference"]["enforce_eager"], + max_model_len=config["inference"]["max_model_len"], + ) + logging.info("vLLM model loaded successfully") + return tokenizer, llm + + +def generate_teacher_response_batch(tokenizer, llm, data_list, config, batch_size=32): + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + outcomes = [] + batches = [data_list[i:i + batch_size] for i in range(0, len(data_list), batch_size)] + for batch in tqdm(batches, desc="Generating responses"): + new_batch = [] + for sample in batch: + message = {"role": "user", "content": sample} + full_text = template.render( + message = message, + add_generation_prompt = True, + add_output = False + ) + new_batch.append(full_text) + outputs = llm.generate( + new_batch, + SamplingParams( + n = 1, + top_k = 1, + temperature = config["inference"]["temperature"], + seed = config["inference"]["seed"], + skip_special_tokens = False, + ignore_eos = False, + max_tokens = config["inference"]["max_new_tokens"] + ) + ) + responses = [output.outputs[0].text for output in outputs] + gen_data = [{'instruction': batch[i], 'output': responses[i]} for i in range(len(batch))] + outcomes = outcomes + gen_data + write_data_to_json_file(outcomes, config["dataset"]["labeled_path"]) + + +def generate_teacher_logits_batch(tokenizer, llm, data_list, config, batch_size=32): + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + + batches = [data_list[i:i + batch_size] for i in range(0, len(data_list), batch_size)] + for batch in tqdm(batches, desc="Generating responses"): + new_batch = [] + for sample in batch: + message={"role": "user", "content": sample} + full_text = template.render( + message=message, + add_generation_prompt=True, + add_output=False + ) + new_batch.append(full_text) + + outputs = llm.generate( + new_batch, # Pass the raw text directly + SamplingParams( + n=1, + top_k=1, + temperature=config["inference"]["temperature"], + seed=config["inference"]["seed"], + skip_special_tokens=False, + ignore_eos=True, + max_tokens=config["inference"]["max_new_tokens"], + logprobs=config["inference"]["top_logits_num"], + ) + ) + # Extract the generated logits + responses = [output.outputs[0].text for output in outputs] + logits=[output.outputs[0].logprobs for output in outputs] + for logit in logits: + for pos in logit: + for k,v in pos.items(): + pos[k]=math.exp(v.logprob) + + with jsonlines.open(config["dataset"]["logits_path"], mode='a') as writer: + for row in logits: + #for item in row: + writer.write(row) + + +def generate_teacher_response_api(data_list, config): + client = OpenAI( + api_key = config["inference"]["api_key"], + base_url = config["inference"]["base_url"] + ) + models = client.models.list() + model = models.data[0].id + logging.info(model) + system_prompt = config["inference"]["system_prompt"] + stream = config["inference"]["stream"] + outcomes = [] + for sample in tqdm(data_list, desc="Call remote model and generating responses"): + if system_prompt == "": + message = [ + {'role': 'user', 'content': sample} + ] + else: + message = [ + {'role': 'system', 'content': system_prompt}, + {'role': 'user', 'content': sample} + ] + completion = client.chat.completions.create( + messages = message, + model = model, + max_completion_tokens = config["inference"]["max_new_tokens"], + stream = stream + ) + if stream: + result = "" + for chunk in completion: + result += chunk.choices[0].delta.content + else: + result = completion.choices[0].message.content + + outcomes.append({'instruction': sample, 'output': result}) + write_data_to_json_file(outcomes, config["dataset"]["labeled_path"]) + + +def infer_with_teacher_model(config): + logging.info('Generating distillation data from the teacher model!') + data_list = read_json_field(config["dataset"]["instruction_path"]) + try: + job_type = config["job_type"] + if job_type == "kd_black_box_api": + generate_teacher_response_api(data_list, config) + elif job_type == "kd_black_box_local": + tokenizer, llm = load_tokenizer_and_vllm(config) + generate_teacher_response_batch(tokenizer, llm, data_list, config) + elif job_type == "kd_white_box": + tokenizer, llm = load_tokenizer_and_vllm(config) + generate_teacher_logits_batch(tokenizer, llm, data_list, config) + else: + logging.error(f"Invalid job type: {job_type}") + raise ValueError(f"Invalid job type: {job_type}") + except ValueError as e: + logging.error(f"Training job terminated: {e}") + return + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--config', type=str, required=True, help='path to the json config file') + args = parser.parse_args() + config = json.load(open(args.config)) + infer_with_teacher_model(config) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/easydistill/kd/train.py b/easydistill/kd/train.py new file mode 100644 index 0000000..07d90c0 --- /dev/null +++ b/easydistill/kd/train.py @@ -0,0 +1,218 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import json +import argparse +import logging +import os +from jinja2 import Environment, BaseLoader, FileSystemLoader +from datasets import load_dataset,Dataset +from typing import Optional, Dict, Union, List +from datasets import Dataset +from transformers import PreTrainedModel, PreTrainedTokenizerBase,AutoModelForCausalLM, AutoTokenizer, TrainingArguments +from trl import SFTTrainer,SFTConfig +import torch +import jsonlines +import numpy as np +import torch.nn.functional as F + + +class DistillSFTTrainer(SFTTrainer): + + def __init__( + self, + logits_dir: str = None, + teacher_vocab_size = None, + kd_ratio: float = 0.5, + max_seq_length : int = 1024, + distillation_type: str = "forward_kld", + **kwargs + ): + super().__init__(**kwargs) + self.logits_dir = logits_dir + self.teacher_vocab_size = teacher_vocab_size + self.kd_ratio = kd_ratio + self.max_seq_length = max_seq_length + self.distillation_type = distillation_type + self.teacher_logits = [] + with jsonlines.open(self.logits_dir) as reader: + for obj in reader: + self.teacher_logits.append(obj) + + + def _load_teacher_logits(self, batch_size: int, it: int, dp_rank: int, device: torch.device, no_model_batch: Dict): + start_idx = dp_rank * batch_size + batch_size * it + end_idx = dp_rank * batch_size + batch_size * (it + 1) + loaded_data = self.teacher_logits[start_idx:end_idx] + arr = np.zeros((batch_size, self.max_seq_length, self.teacher_vocab_size)) + for i in range(len(loaded_data)): + for j in range(len(loaded_data[i])): + keys = np.array(list(loaded_data[i][j].keys()), dtype=int) + values = np.array(list(loaded_data[i][j].values())) + arr[i, j, keys] = values + + logits_tensor = torch.tensor(arr, dtype=torch.bfloat16, device=device) + return self._shift_tensor_right(logits_tensor, no_model_batch['label'], pad_value=0) + + + def _compute_white_box_distillation_loss(self, student_logits: torch.Tensor, teacher_logits: torch.Tensor, labels: Optional[torch.Tensor]): + student_logits = student_logits[:, :self.max_seq_length, :] + teacher_probs = teacher_logits[:, :student_logits.size(1), :student_logits.size(-1)] + mask = (labels != -100).float() if labels is not None else torch.ones_like(student_logits[:, :, 0]) + + if self.distillation_type == "forward_kld": + # Forward KLD: student learns from teacher (original implementation) + loss = F.kl_div( + F.log_softmax(student_logits, dim=-1), + teacher_probs, + reduction='none', + log_target=False + ).sum(dim=-1)/torch.sum(mask.view(-1), dim=0) + elif self.distillation_type == "reverse_kld": + # Reverse KLD: teacher provides certainty to student + loss = F.kl_div( + torch.log(teacher_probs.clamp(min=1e-10)), # avoid log(0) + F.softmax(student_logits, dim=-1), + reduction='none', + log_target=False + ).sum(dim=-1)/torch.sum(mask.view(-1), dim=0) + else: + raise ValueError(f"Unsupported distillation type: {self.distillation_type}. Use 'forward_kld' or 'reverse_kld'") + + return (loss * mask).sum() / mask.sum() + + + @staticmethod + def _shift_tensor_right(inputs: torch.Tensor, labels: torch.Tensor, pad_value: float = 0.0): + batch_size, seqlen, vocab_size = inputs.shape + device = inputs.device + labels_ne = labels != -100 + shift_distances = torch.argmax(labels_ne.int(), dim=1) + idx = torch.arange(seqlen, device=device).unsqueeze(0).expand(batch_size, seqlen) + shifted_idx = idx - shift_distances.unsqueeze(1) + mask = shifted_idx >= 0 + shifted_idx = shifted_idx.clamp(min=0) + inputs_flat = inputs.view(batch_size, seqlen, vocab_size) + shifted_idx = shifted_idx.unsqueeze(2).expand(-1, -1, vocab_size) + gathered = torch.gather(inputs_flat, 1, shifted_idx) + mask = mask.unsqueeze(2).expand(-1, -1, vocab_size) + return torch.where(mask, gathered, torch.full_like(gathered, pad_value)) + + + def compute_loss(self, model: PreTrainedModel, inputs: Dict[str, torch.Tensor], return_outputs=False, num_items_in_batch=None): + outputs = model(**inputs) + lm_loss = outputs.loss + if self.logits_dir: + teacher_logits = self._load_teacher_logits( + batch_size=inputs['input_ids'].size(0), + it=self.state.global_step, + dp_rank=torch.distributed.get_rank() if torch.distributed.is_initialized() else 0, + device=model.device, + no_model_batch={'label': inputs.get('labels', None)} + ) + distil_loss = self._compute_white_box_distillation_loss( + student_logits=outputs.logits, + teacher_logits=teacher_logits, + labels=inputs.get('labels', None) + ) + total_loss = (1 - self.kd_ratio) * lm_loss + self.kd_ratio * distil_loss + else: + total_loss = lm_loss + return (total_loss, outputs) if return_outputs else total_loss + + +def formatting_func(examples): + env = Environment(loader=BaseLoader()) + try: + message = {"content": examples["instruction"],"output":examples["output"]} + full_text = template.render( + message=message, + add_generation_prompt=False, + add_output=True + ) + return full_text + except Exception as e: + logging.warning(f"Error processing sample: {str(e)}") + return "" + + +def train(config): + dataset = load_dataset("json", data_files=config["dataset"]["labeled_path"]) + + student_tokenizer = AutoTokenizer.from_pretrained( + config["models"]["student"], + trust_remote_code=True + ) + student_model = AutoModelForCausalLM.from_pretrained( + config["models"]["student"], + trust_remote_code=True + ) + + global template + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + training_arguments = SFTConfig(**config["training"]) + + try: + job_type = config["job_type"] + if "kd_black_box" in job_type: + dataset = dataset.shuffle(seed=config["dataset"]["seed"]) + trainer = SFTTrainer( + model=student_model, + processing_class=student_tokenizer, + args=training_arguments, + train_dataset=dataset["train"], + formatting_func=formatting_func + ) + elif "kd_white_box" in job_type: + teacher_vocab_size=json.load(open(os.path.join(config["models"]["teacher"], 'config.json')))['vocab_size'] + trainer = DistillSFTTrainer( + logits_dir=config["dataset"]["logits_path"], + teacher_vocab_size=teacher_vocab_size, + kd_ratio=config["distillation"]["kd_ratio"], + max_seq_length=config["distillation"]["max_seq_length"], + distillation_type=config["distillation"].get("distillation_type", "forward_kld"), + model=student_model, + processing_class=student_tokenizer, + args=training_arguments, + train_dataset=dataset["train"], + formatting_func=formatting_func + ) + else: + logging.error(f"Invalid job type: {job_type}") + raise ValueError(f"Invalid job type: {job_type}") + except ValueError as e: + logging.error(f"Training job terminated: {e}") + return + + trainer.train() + trainer.save_model(config["training"]["output_dir"]) + student_tokenizer.save_pretrained(config["training"]["output_dir"]) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--config', type=str, required=True, help='path to the json config file') + args = parser.parse_args() + config = json.load(open(args.config)) + train(config) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/easydistill/rank/infer.py b/easydistill/rank/infer.py new file mode 100644 index 0000000..e350dad --- /dev/null +++ b/easydistill/rank/infer.py @@ -0,0 +1,262 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import json +import argparse +import logging +import os +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer +from vllm import LLM, SamplingParams +from jinja2 import Environment, FileSystemLoader +from tqdm import tqdm +from openai import OpenAI + + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + + +def read_json_field(filename, field_name='prompt'): + try: + with open(filename, 'r') as file: + data = json.load(file) + output_fields = [] + for item in data: + if field_name in item: + output_fields.append(item[field_name]) + return output_fields + except FileNotFoundError: + logging.error("The file was not found.") + except json.JSONDecodeError: + logging.error("There was an error decoding the JSON file.") + except Exception as e: + logging.error(f"An error occurred: {e}") + + +def write_data_to_json_file(data, file_path): + try: + with open(file_path, 'w') as file: + json.dump(data, file, ensure_ascii=False, indent=4) + logging.info(f"Data successfully written to {file_path}") + except Exception as e: + logging.error(f"An error occurred: {e}") + + +def load_tokenizer_and_vllm(config, eos_token=None, is_teacher_model=True): + if is_teacher_model: + model_path = config["models"]["teacher"] + else: + model_path = config["models"]["student"] + logging.info(f"Loading ckpt and tokenizer: {model_path}") + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + tokenizer.padding_side = "left" + if eos_token: + eos_token_id = tokenizer.convert_tokens_to_ids(eos_token) + logging.info(f"eos_token {eos_token} from user input") + elif hasattr(tokenizer, "eos_token_id") and tokenizer.eos_token_id: + logging.info(f"Initial eos_token_id {tokenizer.eos_token_id} from tokenizer") + eos_token_id = tokenizer.eos_token_id + eos_token = tokenizer.convert_ids_to_tokens(eos_token_id) + else: + raise ValueError("No available eos_token or eos_token_id.") + try: + tokenizer.eos_token = eos_token + tokenizer.eos_token_id = eos_token_id + tokenizer.pad_token = eos_token + tokenizer.pad_token_id = eos_token_id + except: + logging.info(f"[WARNING] Cannot set tokenizer.eos_token") + logging.info(f"tokenizer's eos_token: {tokenizer.eos_token}, pad_token: {tokenizer.pad_token}") + logging.info(f"tokenizer's eos_token_id: {tokenizer.eos_token_id}, pad_token_id: {tokenizer.pad_token_id}") + num_gpus = torch.cuda.device_count() + llm = LLM( + model=model_path, + tensor_parallel_size=num_gpus, + enable_chunked_prefill=config["inference"]["enable_chunked_prefill"], + gpu_memory_utilization=config["inference"]["gpu_memory_utilization"], + trust_remote_code=config["inference"]["trust_remote_code"], + dtype=torch.bfloat16, + enforce_eager=config["inference"]["enforce_eager"], + max_model_len=config["inference"]["max_model_len"], + ) + logging.info("vLLM model loaded successfully") + return tokenizer, llm + + +def generate_teacher_student_response_api(data_list, config): + client = OpenAI( + api_key=config["inference"]["api_key"], + base_url=config["inference"]["base_url"] + ) + models = client.models.list() + model = models.data[0].id + logging.info(model) + system_prompt = config["inference"]["system_prompt"] + stream = config["inference"]["stream"] + + # load student model + student_tokenizer = AutoTokenizer.from_pretrained( + config["models"]["student"], + trust_remote_code=True + ) + student_model = AutoModelForCausalLM.from_pretrained( + config["models"]["student"], + device_map="auto", + trust_remote_code=True + ) + outcomes = [] + for sample in tqdm(data_list, desc="Call remote model and generating responses"): + # for teacher model + if system_prompt == "": + message=[ + {'role': 'user', 'content': sample} + ] + else: + message=[ + {'role': 'system', 'content': system_prompt}, + {'role': 'user', 'content': sample} + ] + completion = client.chat.completions.create( + messages=message, + model=model, + max_completion_tokens=config["inference"]["max_new_tokens"], + stream=stream, + ) + if stream: + result = "" + for chunk in completion: + result += chunk.choices[0].delta.content + else: + result = completion.choices[0].message.content + + # for student model + messages = [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": sample} + ] + text = student_tokenizer.apply_chat_template( + messages, + tokenize=False, + add_generation_prompt=True + ) + model_inputs = student_tokenizer([text], return_tensors="pt").to(student_model.device) + + generated_ids = student_model.generate( + **model_inputs, + max_new_tokens=config["inference"]["max_new_tokens"] + ) + generated_ids = [ + output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) + ] + + rejected = student_tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] + gen_data = {'prompt': sample, 'chosen': result, 'rejected': rejected} + outcomes.append(gen_data) + write_data_to_json_file(outcomes, config["dataset"]["labeled_path"]) + + +def generate_model_response_batch(tokenizer, llm, data_list, config, batch_size=32, is_teacher_model=True): + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + outcomes = [] + batches = [data_list[i:i + batch_size] for i in range(0, len(data_list), batch_size)] + for batch in tqdm(batches, desc="Generating responses"): + new_batch = [] + for sample in batch: + message={"role": "user", "content": sample} + full_text = template.render( + message=message, + add_generation_prompt=True, + add_output=False + ) + new_batch.append(full_text) + model_outputs = llm.generate( + new_batch, + SamplingParams( + n=1, + top_k=1, + temperature=config["inference"]["temperature"], + seed=config["inference"]["seed"], + skip_special_tokens=False, + ignore_eos=False, + max_tokens=config["inference"]["max_new_tokens"] + ) + ) + model_responses = [output.outputs[0].text for output in model_outputs] + if is_teacher_model: + gen_data = [{'prompt': batch[i], 'chosen': model_responses[i]} for i in range(len(batch))] + else: + gen_data = [{'prompt': batch[i], 'rejected': model_responses[i]} for i in range(len(batch))] + outcomes = outcomes + gen_data + return outcomes + + + +def merge_outcomes(teacher_outcomes, student_outcomes, config): + try: + student_dict = {item['prompt']: item['rejected'] for item in student_outcomes} + merged_outcomes = [] + for teacher_item in teacher_outcomes: + prompt = teacher_item['prompt'] + if prompt in student_dict: + merged_outcome = { + 'prompt': prompt, + 'chosen': teacher_item['chosen'], + 'rejected': student_dict[prompt] + } + merged_outcomes.append(merged_outcome) + with open(config["dataset"]["labeled_path"], 'w') as file: + json.dump(merged_outcomes, file, ensure_ascii=False, indent=4) + except Exception as e: + print(f"An error occurred: {e}") + + +def infer_with_teacher_model(config): + logging.info('Generating distillation data from the teacher model!') + data_list = read_json_field(config["dataset"]["instruction_path"]) + try: + job_type = config["job_type"] + if job_type == "rank_dpo_api": + generate_teacher_student_response_api(data_list, config) + elif job_type == "rank_dpo_local": + teacher_tokenizer, teacher_llm = load_tokenizer_and_vllm(config, is_teacher_model=True) + teacher_outcomes = generate_model_response_batch(teacher_tokenizer, teacher_llm, data_list, config, is_teacher_model=True) + del teacher_llm + student_tokenizer, student_llm = load_tokenizer_and_vllm(config, is_teacher_model=False) + student_outcomes = generate_model_response_batch(student_tokenizer, student_llm, data_list, config, is_teacher_model=False) + del student_llm + merge_outcomes(teacher_outcomes, student_outcomes, config) + else: + logging.error(f"Invalid job type: {job_type}") + raise ValueError(f"Invalid job type: {job_type}") + except ValueError as e: + logging.error(f"Training job terminated: {e}") + return + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--config', type=str, required=True, help='path to the json config file') + args = parser.parse_args() + config = json.load(open(args.config)) + infer_with_teacher_model(config) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/easydistill/rank/train.py b/easydistill/rank/train.py new file mode 100644 index 0000000..6b1392d --- /dev/null +++ b/easydistill/rank/train.py @@ -0,0 +1,105 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import json +import argparse +import logging +import os +from jinja2 import Environment, BaseLoader, FileSystemLoader +from datasets import load_dataset, Dataset +from transformers import AutoModelForCausalLM, AutoTokenizer +from trl import DPOTrainer, DPOConfig +import copy + + +def process_dataset(dataset_path, dataset_seed, env, template): + examples = [] + with open(dataset_path, 'r') as file: + examples = json.load(file) + output_text = { + "prompt": [], + "chosen": [], + "rejected": [] + } + # use chat template + for i in range(len(examples)): + try: + prompt_message = {"content": examples[i]["prompt"]} + prompt = template.render(message=prompt_message, add_generation_prompt=False, add_output=False) + + chosen_message = {"content": examples[i]["prompt"], "output": examples[i]["chosen"]} + chosen = template.render(message=chosen_message, add_generation_prompt=False, add_output=True) + chosen = chosen[len(prompt):] + + rejected_message = {"content": examples[i]["prompt"], "output": examples[i]["rejected"]} + rejected = template.render(message=rejected_message, add_generation_prompt=False, add_output=True) + rejected = rejected[len(prompt):] + + output_text["prompt"].append(prompt) + output_text["chosen"].append(chosen) + output_text["rejected"].append(rejected) + except: + logging.warning(f"Error processing sample.") + + dataset = Dataset.from_dict(output_text) + dataset = dataset.shuffle(seed=dataset_seed) + return dataset + + +def train(config): + dataset_path = config["dataset"]["labeled_path"] + dataset_seed = config["dataset"]["seed"] + + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + dataset = process_dataset(dataset_path, dataset_seed, env, template) + + student_tokenizer = AutoTokenizer.from_pretrained( + config["models"]["student"], + trust_remote_code=True + ) + student_model = AutoModelForCausalLM.from_pretrained( + config["models"]["student"], + trust_remote_code=True + ) + + training_arguments = DPOConfig(**config["training"]) + trainer = DPOTrainer( + student_model, + ref_model=copy.deepcopy(student_model), + args=training_arguments, + train_dataset=dataset, + processing_class=student_tokenizer + ) + + trainer.train() + trainer.save_model(config["training"]["output_dir"]) + student_tokenizer.save_pretrained(config["training"]["output_dir"]) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--config', type=str, required=True, help='path to the json config file') + args = parser.parse_args() + config = json.load(open(args.config)) + train(config) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/easydistill/rl/grpo_train.py b/easydistill/rl/grpo_train.py new file mode 100644 index 0000000..26b095b --- /dev/null +++ b/easydistill/rl/grpo_train.py @@ -0,0 +1,111 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import json +import argparse +import logging +import os +import random +from jinja2 import Environment, BaseLoader, FileSystemLoader +from datasets import load_dataset, Dataset +from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer +from trl import GRPOConfig, GRPOTrainer + + +def process_dataset(dataset_path, dataset_seed, env, template, train_ratio): + examples = [] + try: + with open(dataset_path, 'r') as file: + examples = json.load(file) + except FileNotFoundError: + print(f"Error: The file '{dataset_path}' was not found.") + except json.JSONDecodeError: + print(f"Error: The file '{dataset_path}' is not a valid JSON file.") + except Exception as e: + print(f"An unexpected error occurred: {e}") + + output_dataset = [] + # use chat template + for i in range(len(examples)): + try: + message = {"content": examples[i]["prompt"]} + rendered = template.render(message=message, add_generation_prompt=True, add_output=False) + sample = {"prompt": rendered} + output_dataset.append(sample) + except: + logging.warning(f"Error processing sample.") + + random.shuffle(output_dataset) + random.seed(dataset_seed) + split_index = int(len(output_dataset) * train_ratio) + train_list = output_dataset[:split_index] + eval_list = output_dataset[split_index:] + + return Dataset.from_list(train_list), Dataset.from_list(eval_list) + + +def train(config): + dataset_path = config["dataset"]["instruction_path"] + dataset_seed = config["dataset"]["seed"] + train_ratio = config["dataset"]["train_ratio"] + + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + + tokenizer = AutoTokenizer.from_pretrained( + config["models"]["student"], + trust_remote_code=True + ) + train_dataset, eval_dataset = process_dataset(dataset_path, dataset_seed, env, template, train_ratio) + print(train_dataset) + print(eval_dataset) + + reward_model_path = config["models"]["reward"] + sft_model_path = config["models"]["student"] + reward_model = AutoModelForSequenceClassification.from_pretrained( + reward_model_path, trust_remote_code=True, num_labels=1 + ) + sft_model = AutoModelForCausalLM.from_pretrained( + sft_model_path, trust_remote_code=True + ) + + training_arguments = GRPOConfig(**config["training"]) + trainer = GRPOTrainer( + args=training_arguments, + processing_class=tokenizer, + model=sft_model, + reward_funcs=reward_model, + train_dataset=train_dataset, + eval_dataset=eval_dataset + ) + trainer.train() + trainer.save_model(config["training"]["output_dir"]) + tokenizer.save_pretrained(config["training"]["output_dir"]) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--config', type=str, required=True, help='path to the json config file') + args = parser.parse_args() + config = json.load(open(args.config)) + train(config) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/easydistill/rl/ppo_train.py b/easydistill/rl/ppo_train.py new file mode 100644 index 0000000..017b8b0 --- /dev/null +++ b/easydistill/rl/ppo_train.py @@ -0,0 +1,122 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import json +import argparse +import logging +import os +import random +from jinja2 import Environment, BaseLoader, FileSystemLoader +from datasets import load_dataset, Dataset +from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer +from trl import PPOConfig, PPOTrainer + + +def process_dataset(dataset_path, dataset_seed, env, template, tokenizer, train_ratio): + examples = [] + try: + with open(dataset_path, 'r') as file: + examples = json.load(file) + except FileNotFoundError: + print(f"Error: The file '{dataset_path}' was not found.") + except json.JSONDecodeError: + print(f"Error: The file '{dataset_path}' is not a valid JSON file.") + except Exception as e: + print(f"An unexpected error occurred: {e}") + + output_dataset = [] + # use chat template + for i in range(len(examples)): + try: + message = {"content": examples[i]["instruction"]} + rendered = template.render(message=message, add_generation_prompt=True, add_output=False) + tokens = tokenizer.encode(rendered) + sample = {"input_ids": tokens} + output_dataset.append(sample) + except: + logging.warning(f"Error processing sample.") + + random.shuffle(output_dataset) + random.seed(dataset_seed) + split_index = int(len(output_dataset) * train_ratio) + train_list = output_dataset[:split_index] + eval_list = output_dataset[split_index:] + + return Dataset.from_list(train_list), Dataset.from_list(eval_list) + + +def train(config): + dataset_path = config["dataset"]["instruction_path"] + dataset_seed = config["dataset"]["seed"] + train_ratio = config["dataset"]["train_ratio"] + + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + + tokenizer = AutoTokenizer.from_pretrained( + config["models"]["student"], + trust_remote_code=True + ) + train_dataset, eval_dataset = process_dataset(dataset_path, dataset_seed, env, template, tokenizer, train_ratio) + assert train_dataset[0]["input_ids"][-1] != tokenizer.eos_token_id, "The last token should not be an EOS token" + + print(train_dataset) + print(eval_dataset) + + reward_model_path = config["models"]["reward"] + sft_model_path = config["models"]["student"] + value_model = AutoModelForSequenceClassification.from_pretrained( + reward_model_path, trust_remote_code=True, num_labels=1 + ) + reward_model = AutoModelForSequenceClassification.from_pretrained( + reward_model_path, trust_remote_code=True, num_labels=1 + ) + ref_policy = AutoModelForCausalLM.from_pretrained( + sft_model_path, trust_remote_code=True + ) + policy = AutoModelForCausalLM.from_pretrained( + sft_model_path, trust_remote_code=True + ) + + training_arguments = PPOConfig(**config["training"]) + trainer = PPOTrainer( + config=training_arguments, + processing_class=tokenizer, + policy=policy, + ref_policy=ref_policy, + reward_model=reward_model, + value_model=value_model, + train_dataset=train_dataset, + eval_dataset=eval_dataset + ) + trainer.train() + trainer.save_model(config["training"]["output_dir"]) + tokenizer.save_pretrained(config["training"]["output_dir"]) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--config', type=str, required=True, help='path to the json config file') + args = parser.parse_args() + config = json.load(open(args.config)) + train(config) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/easydistill/rl/reward_infer.py b/easydistill/rl/reward_infer.py new file mode 100644 index 0000000..338081b --- /dev/null +++ b/easydistill/rl/reward_infer.py @@ -0,0 +1,258 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import json +import argparse +import torch +import logging +import os +from jinja2 import Environment, FileSystemLoader +from transformers import AutoTokenizer +from vllm import LLM, SamplingParams +from tqdm import tqdm +from openai import OpenAI + + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + + +def read_json_field(filename, field_name='prompt'): + try: + with open(filename, 'r') as file: + data = json.load(file) + output_fields = [] + for item in data: + if field_name in item: + output_fields.append(item[field_name]) + return output_fields + except FileNotFoundError: + logging.error("The file was not found.") + except json.JSONDecodeError: + logging.error("There was an error decoding the JSON file.") + except Exception as e: + logging.error(f"An error occurred: {e}") + + +def write_data_to_json_file(data, file_path): + try: + with open(file_path, 'w') as file: + json.dump(data, file, ensure_ascii=False, indent=4) + logging.info(f"Data successfully written to {file_path}") + except Exception as e: + logging.error(f"An error occurred: {e}") + + +def load_tokenizer_and_vllm(config, eos_token=None): + teacher_model_path = config["models"]["teacher"] + logging.info(f"Loading ckpt and tokenizer: {teacher_model_path}") + tokenizer = AutoTokenizer.from_pretrained(teacher_model_path, trust_remote_code=True) + tokenizer.padding_side = "left" + if eos_token: + eos_token_id = tokenizer.convert_tokens_to_ids(eos_token) + logging.info(f"eos_token {eos_token} from user input") + elif hasattr(tokenizer, "eos_token_id") and tokenizer.eos_token_id: + logging.info(f"Initial eos_token_id {tokenizer.eos_token_id} from tokenizer") + eos_token_id = tokenizer.eos_token_id + eos_token = tokenizer.convert_ids_to_tokens(eos_token_id) + else: + raise ValueError("No available eos_token or eos_token_id.") + try: + tokenizer.eos_token = eos_token + tokenizer.eos_token_id = eos_token_id + tokenizer.pad_token = eos_token + tokenizer.pad_token_id = eos_token_id + except: + logging.info(f"[WARNING] Cannot set tokenizer.eos_token") + logging.info(f"tokenizer's eos_token: {tokenizer.eos_token}, pad_token: {tokenizer.pad_token}") + logging.info(f"tokenizer's eos_token_id: {tokenizer.eos_token_id}, pad_token_id: {tokenizer.pad_token_id}") + num_gpus = torch.cuda.device_count() + llm = LLM( + model=teacher_model_path, + tensor_parallel_size=num_gpus, + enable_chunked_prefill=config["inference"]["enable_chunked_prefill"], + gpu_memory_utilization=config["inference"]["gpu_memory_utilization"], + trust_remote_code=config["inference"]["trust_remote_code"], + dtype=torch.bfloat16, + enforce_eager=config["inference"]["enforce_eager"], + max_model_len=config["inference"]["max_model_len"], + ) + logging.info("vLLM model loaded successfully") + return tokenizer, llm + + +def generate_teacher_response_for_reward_model_local(tokenizer, llm, data_list, config, batch_size=32): + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + positive_system_prompt = config["inference"]["positive_system_prompt"] + negative_system_prompt = config["inference"]["negative_system_prompt"] + outcomes = [] + batches = [data_list[i:i + batch_size] for i in range(0, len(data_list), batch_size)] + for batch in tqdm(batches, desc="Generating responses"): + positive_new_batch = [] + negative_new_batch = [] + for sample in batch: + positive_message = [ + {'role': 'system', 'content': positive_system_prompt}, + {'role': 'user', 'content': sample} + ] + positive_full_text = template.render( + message = positive_message, + add_generation_prompt = True, + add_output = False + ) + positive_new_batch.append(positive_full_text) + negative_message = [ + {'role': 'system', 'content': negative_system_prompt}, + {'role': 'user', 'content': sample} + ] + negative_full_text = template.render( + message = negative_message, + add_generation_prompt = True, + add_output = False + ) + negative_new_batch.append(negative_full_text) + + positive_outputs = llm.generate( + positive_new_batch, + SamplingParams( + n = 1, + top_k = 1, + temperature = config["inference"]["temperature"], + seed = config["inference"]["seed"], + skip_special_tokens = False, + ignore_eos = False, + max_tokens = config["inference"]["max_new_tokens"] + ) + ) + positve_responses = [output.outputs[0].text for output in positive_outputs] + positive_gen_data = [{'prompt': batch[i], 'chosen': positve_responses[i]} for i in range(len(batch))] + + negative_outputs = llm.generate( + negative_new_batch, + SamplingParams( + n = 1, + top_k = 1, + temperature = config["inference"]["temperature"], + seed = config["inference"]["seed"], + skip_special_tokens = False, + ignore_eos = False, + max_tokens = config["inference"]["max_new_tokens"] + ) + ) + negative_responses = [output.outputs[0].text for output in negative_outputs] + negative_gen_data = [{'prompt': batch[i], 'rejected': negative_responses[i]} for i in range(len(batch))] + + merged_data = merge_outcomes(positive_gen_data, negative_gen_data) + outcomes = outcomes + merged_data + write_data_to_json_file(outcomes, config["dataset"]["labeled_path"]) + + +def merge_outcomes(positive_gen_data, negative_gen_data): + negative_dict = {item['prompt']: item['rejected'] for item in negative_gen_data} + merged_outcomes = [] + for positive_item in positive_gen_data: + prompt = positive_item['prompt'] + if prompt in negative_dict: + merged_outcome = { + 'prompt': prompt, + 'chosen': positive_item['chosen'], + 'rejected': negative_dict[prompt] + } + merged_outcomes.append(merged_outcome) + return merged_outcomes + + +def generate_teacher_response_for_reward_model_api(data_list, config): + client = OpenAI( + api_key = config["inference"]["api_key"], + base_url = config["inference"]["base_url"] + ) + models = client.models.list() + model = models.data[0].id + logging.info(model) + positive_system_prompt = config["inference"]["positive_system_prompt"] + negative_system_prompt = config["inference"]["negative_system_prompt"] + stream = config["inference"]["stream"] + outcomes = [] + for sample in tqdm(data_list, desc="Call remote model and generating responses"): + positive_message = [ + {'role': 'system', 'content': positive_system_prompt}, + {'role': 'user', 'content': sample} + ] + positive_completion = client.chat.completions.create( + messages = positive_message, + model = model, + max_completion_tokens = config["inference"]["max_new_tokens"], + stream = stream + ) + if stream: + positive_result = "" + for chunk in positive_completion: + positive_result += chunk.choices[0].delta.content + else: + positive_result = positive_completion.choices[0].message.content + + negative_message = [ + {'role': 'system', 'content': negative_system_prompt}, + {'role': 'user', 'content': sample} + ] + negative_completion = client.chat.completions.create( + messages = negative_message, + model = model, + max_completion_tokens = config["inference"]["max_new_tokens"], + stream = stream + ) + if stream: + negative_result = "" + for chunk in negative_completion: + negative_result += chunk.choices[0].delta.content + else: + negative_result = negative_completion.choices[0].message.content + outcomes.append({'prompt': sample, 'chosen': positive_result, 'rejected': negative_result}) + write_data_to_json_file(outcomes, config["dataset"]["labeled_path"]) + + +def infer_with_teacher_model(config): + logging.info('Generating distillation data from the teacher model!') + data_list = read_json_field(config["dataset"]["instruction_path"]) + try: + job_type = config["job_type"] + if job_type == "rl_reward_api": + generate_teacher_response_for_reward_model_api(data_list, config) + elif job_type == "rl_reward_local": + tokenizer, llm = load_tokenizer_and_vllm(config) + generate_teacher_response_for_reward_model_local(tokenizer, llm, data_list, config) + else: + logging.error(f"Invalid job type: {job_type}") + raise ValueError(f"Invalid job type: {job_type}") + except ValueError as e: + logging.error(f"Training job terminated: {e}") + return + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--config', type=str, required=True, help='path to the json config file') + args = parser.parse_args() + config = json.load(open(args.config)) + infer_with_teacher_model(config) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/easydistill/rl/reward_train.py b/easydistill/rl/reward_train.py new file mode 100644 index 0000000..62929ed --- /dev/null +++ b/easydistill/rl/reward_train.py @@ -0,0 +1,107 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import json +import argparse +import logging +import os +from jinja2 import Environment, FileSystemLoader +from transformers import AutoModelForSequenceClassification, AutoTokenizer +from trl import RewardTrainer, RewardConfig +from datasets import Dataset + + +def process_dataset(dataset_path, tokenizer, config, template): + kwargs = {"padding": "max_length", "truncation": True, "max_length": config["training"]["max_length"], "return_tensors": "pt"} + examples = [] + try: + with open(dataset_path, 'r') as file: + examples = json.load(file) + except FileNotFoundError: + print(f"Error: The file '{dataset_path}' was not found.") + except json.JSONDecodeError: + print(f"Error: The file '{dataset_path}' is not a valid JSON file.") + except Exception as e: + print(f"An unexpected error occurred: {e}") + + print(examples) + output_dataset = [] + # use chat template + for i in range(len(examples)): + try: + chosen_message = {"content": examples[i]["prompt"], "output": examples[i]["chosen"]} + prompt_plus_chosen_response = template.render(message=chosen_message, add_generation_prompt=False, add_output=True) + + rejected_message = {"content": examples[i]["prompt"], "output": examples[i]["rejected"]} + prompt_plus_rejected_response = template.render(message=rejected_message, add_generation_prompt=False, add_output=True) + + tokens_chosen = tokenizer.encode_plus(prompt_plus_chosen_response, **kwargs) + tokens_rejected = tokenizer.encode_plus(prompt_plus_rejected_response, **kwargs) + sample = { + "input_ids_chosen": tokens_chosen["input_ids"][0], "attention_mask_chosen": tokens_chosen["attention_mask"][0], + "input_ids_rejected": tokens_rejected["input_ids"][0], "attention_mask_rejected": tokens_rejected["attention_mask"][0] + } + output_dataset.append(sample) + except: + logging.warning(f"Error processing sample.") + dataset = Dataset.from_list(output_dataset) + return dataset + + +def train(config): + dataset_path = config["dataset"]["labeled_path"] + student_tokenizer = AutoTokenizer.from_pretrained( + config["models"]["student"], + trust_remote_code=True + ) + + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + dataset = process_dataset(dataset_path, student_tokenizer, config, template) + + student_model = AutoModelForSequenceClassification.from_pretrained( + config["models"]["student"], + num_labels=1, + trust_remote_code=True + ) + student_model.config.pad_token_id = student_tokenizer.pad_token_id + + training_arguments = RewardConfig(**config["training"]) + trainer = RewardTrainer( + model=student_model, + processing_class=student_tokenizer, + args=training_arguments, + train_dataset=dataset + ) + + trainer.train() + trainer.save_model(config["training"]["output_dir"]) + student_tokenizer.save_pretrained(config["training"]["output_dir"]) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--config', type=str, required=True, help='path to the json config file') + args = parser.parse_args() + config = json.load(open(args.config)) + train(config) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/easydistill/synthesis/cot_synthesis.py b/easydistill/synthesis/cot_synthesis.py new file mode 100644 index 0000000..a8c8426 --- /dev/null +++ b/easydistill/synthesis/cot_synthesis.py @@ -0,0 +1,274 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import jsonlines +import logging +import os +from jinja2 import Environment, FileSystemLoader +from vllm import LLM, SamplingParams +from tqdm import tqdm +from openai import OpenAI + +from utils import write_data_to_json_file + + +# I have checked this function. +def cot_generate_api(data_list, config): + client = OpenAI( + api_key = config["inference"]["api_key"], + base_url = config["inference"]["base_url"] + ) + models = client.models.list() + model = models.data[0].id + prompt = config["inference"]["prompt"] + stream = config["inference"]["stream"] + logging.info(model) + outcomes = [] + for sample in tqdm(data_list, desc="Calling remote model and generating responses"): + sample = prompt + "\n" + sample + message = [ + {"role": "user", "content": sample} + ] + completion = client.chat.completions.create( + messages = message, + model = model, + max_completion_tokens = config["inference"]["max_new_tokens"], + stream = stream + ) + if stream: + result = "" + for chunk in completion: + result += chunk.choices[0].delta.content + else: + result = completion.choices[0].message.content + if result is not None: + outcomes.append({"instruction": sample, "output": result}) + write_data_to_json_file(outcomes, config["dataset"]["output_path"]) + + +def cot_generate_batch(tokenizer, llm, data_list, config, batch_size=32): + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + prompt = config["inference"]["prompt"] + + batches = [data_list[i:i + batch_size] for i in range(0, len(data_list), batch_size)] + for batch in tqdm(batches, desc="Generating responses"): + new_batch = [] + for sample in batch: + sample = prompt + "\n" + sample + logging.info(sample) + message={"role": "user", "content": sample} + full_text = template.render( + message=message, + add_generation_prompt=True, + add_output=False + ) + new_batch.append(full_text) + outputs = llm.generate( + new_batch, + SamplingParams( + n=1, + top_k=1, + temperature=config["inference"]["temperature"], + seed=config["inference"]["seed"], + skip_special_tokens=False, + ignore_eos=False, + max_tokens=config["inference"]["max_new_tokens"], + ) + ) + responses = [output.outputs[0].text for output in outputs] + outcomes = [] + for i in range(len(batch)): + if responses[i] is not None: + outcomes.append((sample,responses[i])) + + with jsonlines.open(config["dataset"]["output_path"], mode='a') as writer: + for ins,result in outcomes: + gen_data = {"instruction": ins, "output": result} + writer.write(gen_data) + + +def cot_long2short_api(data_list_ins, data_list_out, config): + client = OpenAI( + api_key = config["inference"]["api_key"], + base_url = config["inference"]["base_url"], + ) + models = client.models.list() + model = models.data[0].id + prompt = config["inference"]["prompt"] + stream = config["inference"]["stream"] + logging.info(model) + outcomes = [] + data_list=[(ins,out) for ins,out in zip(data_list_ins,data_list_out)] + for ins,out in tqdm(data_list, desc="Calling remote model and generating responses"): + sample = f"{prompt} Simplify the reasoning process for the problem below.\n\nProblem:\n{ins}\n\nAnswer:\n{out}\n\nSimplified Reasoning Process:" + logging.info(sample) + message = [ + {"role": "user", "content": sample} + ] + completion = client.chat.completions.create( + messages = message, + model = model, + max_completion_tokens = config["inference"]["max_new_tokens"], + stream = stream, + ) + if stream: + result = "" + for chunk in completion: + result += chunk.choices[0].delta.content + else: + result = completion.choices[0].message.content + + if result is not None: + outcomes.append((sample,result)) + + with jsonlines.open(config["dataset"]["output_path"], mode='a') as writer: + for ins,result in outcomes: + gen_data = {"instruction": ins, "output": result} + writer.write(gen_data) + + +def cot_long2short_batch(tokenizer, llm, data_list_ins, data_list_out, config, batch_size=32): + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + prompt = config["inference"]["prompt"] + data_list=[(ins,out) for ins,out in zip(data_list_ins,data_list_out)] + batches = [data_list[i:i + batch_size] for i in range(0, len(data_list), batch_size)] + for batch in tqdm(batches, desc="Generating responses"): + new_batch = [] + for ins,out in batch: + sample = f"{prompt} Simplify the reasoning process for the problem below.\n\nProblem:\n{ins}\n\nAnswer:\n{out}\n\nSimplified Reasoning Process:" + logging.info(sample) + message={"role": "user", "content": sample} + full_text = template.render( + message=message, + add_generation_prompt=True, + add_output=False + ) + new_batch.append(full_text) + outputs = llm.generate( + new_batch, + SamplingParams( + n=1, + top_k=1, + temperature=config["inference"]["temperature"], + seed=config["inference"]["seed"], + skip_special_tokens=False, + ignore_eos=False, + max_tokens=config["inference"]["max_new_tokens"], + ) + ) + responses = [output.outputs[0].text for output in outputs] + outcomes = [] + for i in range(len(batch)): + if responses[i] is not None: + outcomes.append((sample,responses[i])) + + with jsonlines.open(config["dataset"]["output_path"], mode='a') as writer: + for ins,result in outcomes: + gen_data = {"instruction": ins, "output": result} + writer.write(gen_data) + + +def cot_short2long_api(data_list_ins, data_list_out, config): + client = OpenAI( + api_key = config["inference"]["api_key"], + base_url = config["inference"]["base_url"], + ) + models = client.models.list() + model = models.data[0].id + prompt = config["inference"]["prompt"] + stream = config["inference"]["stream"] + logging.info(model) + outcomes = [] + data_list=[(ins,out) for ins,out in zip(data_list_ins,data_list_out)] + for ins,out in tqdm(data_list, desc="Calling remote model and generating responses"): + sample = f"{prompt} Extend the reasoning process for the problem below.\n\nProblem:\n{ins}\n\nAnswer:\n{out}\n\nExtended Reasoning Process:" + logging.info(sample) + message = [ + {"role": "user", "content": sample} + ] + completion = client.chat.completions.create( + messages = message, + model = model, + max_completion_tokens = config["inference"]["max_new_tokens"], + stream = stream, + ) + if stream: + result = "" + for chunk in completion: + result += chunk.choices[0].delta.content + else: + result = completion.choices[0].message.content + + if result is not None: + outcomes.append((sample,result)) + + with jsonlines.open(config["dataset"]["output_path"], mode='a') as writer: + for ins,result in outcomes: + gen_data = {"instruction": ins, "output": result} + writer.write(gen_data) + + +def cot_short2long_batch(tokenizer, llm, data_list_ins, data_list_out, config, batch_size=32): + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + prompt = config["inference"]["prompt"] + data_list=[(ins,out) for ins,out in zip(data_list_ins,data_list_out)] + batches = [data_list[i:i + batch_size] for i in range(0, len(data_list), batch_size)] + for batch in tqdm(batches, desc="Generating responses"): + new_batch = [] + for ins,out in batch: + sample = f"{prompt} Extend the reasoning process for the problem below.\n\nProblem:\n{ins}\n\nAnswer:\n{out}\n\nExtended Reasoning Process:" + logging.info(sample) + message={"role": "user", "content": sample} + full_text = template.render( + message=message, + add_generation_prompt=True, + add_output=False + ) + new_batch.append(full_text) + outputs = llm.generate( + new_batch, + SamplingParams( + n=1, + top_k=1, + temperature=config["inference"]["temperature"], + seed=config["inference"]["seed"], + skip_special_tokens=False, + ignore_eos=False, + max_tokens=config["inference"]["max_new_tokens"], + ) + ) + responses = [output.outputs[0].text for output in outputs] + outcomes = [] + for i in range(len(batch)): + if responses[i] is not None: + outcomes.append((sample,responses[i])) + + with jsonlines.open(config["dataset"]["output_path"], mode='a') as writer: + for ins,result in outcomes: + gen_data = {"instruction": ins, "output": result} + writer.write(gen_data) \ No newline at end of file diff --git a/easydistill/synthesis/instruct_synthesis.py b/easydistill/synthesis/instruct_synthesis.py new file mode 100644 index 0000000..d9dd5fe --- /dev/null +++ b/easydistill/synthesis/instruct_synthesis.py @@ -0,0 +1,293 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import logging +import os +from jinja2 import Environment, FileSystemLoader +from vllm import LLM, SamplingParams +from tqdm import tqdm +from openai import OpenAI +import random +import re + +from utils import read_json_field, write_data_to_json_file, load_tokenizer_and_vllm + + +def extract_answer(content): + pattern = r'(.*?)' + match = re.search(pattern, content, re.DOTALL) + if match: + return match.group(1) + else: + return None + + +def extract_instruction_response(content): + instruction_pattern = r'(.*?)' + instruction_match = re.search(instruction_pattern, content, re.DOTALL) + response_pattern = r'(.*?)' + response_match = re.search(response_pattern, content, re.DOTALL) + if instruction_match and response_match: + return instruction_match.group(1), response_match.group(1) + else: + return None, None + + +def generate_prompt_list(data_list, prompt, num_in_context_samples, num_output_samples): + if num_in_context_samples > len(data_list): + raise ValueError("num_in_context_samples cannot be larger than the length of data_list") + output_list = [] + for _ in range(num_output_samples): + selected_samples = random.sample(data_list, num_in_context_samples) + combined_prompts = prompt + "\n" + "".join([sample + "\n" for sample in selected_samples]) + output_list.append(combined_prompts) + return output_list + + +def expand_instruction_api(data_list, config): + client = OpenAI( + api_key = config["inference"]["api_key"], + base_url = config["inference"]["base_url"], + ) + models = client.models.list() + model = models.data[0].id + num_output_samples = config["dataset"]["num_output_samples"] + num_in_context_samples = config["dataset"]["num_in_context_samples"] + prompt = config["inference"]["prompt"] + stream = config["inference"]["stream"] + logging.info(model) + prompt_list = generate_prompt_list(data_list, prompt, num_in_context_samples, num_output_samples) + outcomes = [] + for sample in tqdm(prompt_list, desc="Calling remote model and generating responses"): + logging.info(sample) + message = [ + {"role": "user", "content": sample} + ] + completion = client.chat.completions.create( + messages = message, + model = model, + max_completion_tokens = config["inference"]["max_new_tokens"], + stream = stream, + ) + if stream: + result = "" + for chunk in completion: + result += chunk.choices[0].delta.content + else: + result = completion.choices[0].message.content + result = extract_answer(result) + if result is not None: + outcomes.append({"instruction": result}) + write_data_to_json_file(outcomes, config["dataset"]["output_path"]) + + +def expand_instruction_batch(tokenizer, llm, data_list, config, batch_size=32): + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + + num_output_samples = config["dataset"]["num_output_samples"] + num_in_context_samples = config["dataset"]["num_in_context_samples"] + prompt = config["inference"]["prompt"] + prompt_list = generate_prompt_list(data_list, prompt, num_in_context_samples, num_output_samples) + + outcomes = [] + batches = [prompt_list[i:i + batch_size] for i in range(0, len(prompt_list), batch_size)] + for batch in tqdm(batches, desc="Generating responses"): + new_batch = [] + for sample in batch: + logging.info(sample) + message={"role": "user", "content": sample} + full_text = template.render( + message=message, + add_generation_prompt=True, + add_output=False + ) + new_batch.append(full_text) + outputs = llm.generate( + new_batch, + SamplingParams( + n=1, + top_k=1, + temperature=config["inference"]["temperature"], + seed=config["inference"]["seed"], + skip_special_tokens=False, + ignore_eos=False, + max_tokens=config["inference"]["max_new_tokens"] + ) + ) + responses = [output.outputs[0].text for output in outputs] + for i in range(len(batch)): + result = extract_answer(responses[i]) + if result is not None: + outcomes.append({"instruction": result}) + write_data_to_json_file(outcomes, config["dataset"]["output_path"]) + + +def refine_instruction_api(data_list, config): + client = OpenAI( + api_key = config["inference"]["api_key"], + base_url = config["inference"]["base_url"], + ) + models = client.models.list() + model = models.data[0].id + prompt = config["inference"]["prompt"] + stream = config["inference"]["stream"] + logging.info(model) + outcomes = [] + for sample in tqdm(data_list, desc="Calling remote model and generating responses"): + sample = prompt + "\n" + sample + logging.info(sample) + message = [ + {"role": "user", "content": sample} + ] + completion = client.chat.completions.create( + messages = message, + model = model, + max_completion_tokens = config["inference"]["max_new_tokens"], + stream = stream + ) + if stream: + result = "" + for chunk in completion: + result += chunk.choices[0].delta.content + else: + result = completion.choices[0].message.content + result = extract_answer(result) + if result is not None: + outcomes.append({"instruction": result}) + write_data_to_json_file(outcomes, config["dataset"]["output_path"]) + + +def refine_instruction_batch(tokenizer, llm, data_list, config, batch_size=32): + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + prompt = config["inference"]["prompt"] + + outcomes = [] + batches = [data_list[i:i + batch_size] for i in range(0, len(data_list), batch_size)] + for batch in tqdm(batches, desc="Generating responses"): + new_batch = [] + for sample in batch: + sample = prompt + "\n" + sample + logging.info(sample) + message={"role": "user", "content": sample} + full_text = template.render( + message=message, + add_generation_prompt=True, + add_output=False + ) + new_batch.append(full_text) + outputs = llm.generate( + new_batch, + SamplingParams( + n=1, + top_k=1, + temperature=config["inference"]["temperature"], + seed=config["inference"]["seed"], + skip_special_tokens=False, + ignore_eos=False, + max_tokens=config["inference"]["max_new_tokens"], + ) + ) + responses = [output.outputs[0].text for output in outputs] + for i in range(len(batch)): + result = extract_answer(responses[i]) + if result is not None: + outcomes.append({"instruction": result}) + write_data_to_json_file(outcomes, config["dataset"]["output_path"]) + + +def instruction_response_extraction_api(data_list, config): + client = OpenAI( + api_key = config["inference"]["api_key"], + base_url = config["inference"]["base_url"], + ) + models = client.models.list() + model = models.data[0].id + prompt = config["inference"]["prompt"] + stream = config["inference"]["stream"] + logging.info(model) + outcomes = [] + for sample in tqdm(data_list, desc="Calling remote model and generating responses"): + sample = prompt + "\n" + sample + logging.info(sample) + message = [ + {"role": "user", "content": sample} + ] + completion = client.chat.completions.create( + messages = message, + model = model, + max_completion_tokens = config["inference"]["max_new_tokens"], + stream= stream, + ) + if stream: + result = "" + for chunk in completion: + result += chunk.choices[0].delta.content + else: + result = completion.choices[0].message.content + new_instruction, new_response = extract_instruction_response(result) + if new_instruction is not None and new_response is not None: + outcomes.append({"instruction": new_instruction, "output": new_response}) + write_data_to_json_file(outcomes, config["dataset"]["output_path"]) + + +def instruction_response_extraction_batch(tokenizer, llm, data_list, config, batch_size=32): + full_path = config["dataset"]["template"] + template_dir = os.path.dirname(full_path) + template_file = os.path.basename(full_path) + env = Environment(loader=FileSystemLoader(template_dir)) + template = env.get_template(template_file) + prompt = config["inference"]["prompt"] + + outcomes = [] + batches = [data_list[i:i + batch_size] for i in range(0, len(data_list), batch_size)] + for batch in tqdm(batches, desc="Generating responses"): + new_batch = [] + for sample in batch: + logging.info(sample) + sample = prompt + "\n" + sample + message={"role": "user", "content": sample} + full_text = template.render( + message=message, + add_generation_prompt=True, + add_output=False + ) + new_batch.append(full_text) + outputs = llm.generate( + new_batch, + SamplingParams( + n=1, + top_k=1, + temperature=config["inference"]["temperature"], + seed=config["inference"]["seed"], + skip_special_tokens=False, + ignore_eos=False, + max_tokens=config["inference"]["max_new_tokens"], + ) + ) + responses = [output.outputs[0].text for output in outputs] + for i in range(len(batch)): + new_instruction, new_response = extract_instruction_response(responses[i]) + if new_instruction is not None and new_response is not None: + outcomes.append({"instruction": new_instruction, "output": new_response}) + write_data_to_json_file(outcomes, config["dataset"]["output_path"]) \ No newline at end of file diff --git a/easydistill/synthesis/synthesis_main.py b/easydistill/synthesis/synthesis_main.py new file mode 100644 index 0000000..dd394f4 --- /dev/null +++ b/easydistill/synthesis/synthesis_main.py @@ -0,0 +1,107 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import argparse +import logging +import json + +from instruct_synthesis import ( + expand_instruction_api, + expand_instruction_batch, + refine_instruction_api, + refine_instruction_batch, + instruction_response_extraction_api, + instruction_response_extraction_batch +) +from cot_synthesis import ( + cot_generate_api, + cot_generate_batch, + cot_long2short_api, + cot_long2short_batch, + cot_short2long_api, + cot_short2long_batch +) +from utils import read_json_field, load_tokenizer_and_vllm + + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + + +def data_synthesis_with_teacher_model(config): + logging.info('Generating distillation data from the teacher model!') + job_type = config["job_type"] + if job_type == "instruction_response_extraction_api": + data_list = read_json_field(config["dataset"]["input_path"], field_name="data") + elif job_type in ["cot_long2short_api","cot_long2short_batch","cot_short2long_api","cot_short2long_batch"]: + data_list_ins = read_json_field(config["dataset"]["input_path"]) + data_list_out = read_json_field(config["dataset"]["input_path"], field_name="output") + else: + data_list = read_json_field(config["dataset"]["input_path"]) + + try: + if job_type == "instruction_expansion_api": + expand_instruction_api(data_list, config) + elif job_type == "instruction_expansion_batch": + tokenizer, llm = load_tokenizer_and_vllm(config) + expand_instruction_batch(tokenizer, llm, data_list, config) + + elif job_type == "instruction_refinement_api": + refine_instruction_api(data_list, config) + elif job_type == "instruction_refinement_batch": + tokenizer, llm = load_tokenizer_and_vllm(config) + refine_instruction_batch(tokenizer, llm, data_list, config) + + elif job_type == "instruction_response_extraction_api": + instruction_response_extraction_api(data_list, config) + elif job_type == "instruction_response_extraction_batch": + tokenizer, llm = load_tokenizer_and_vllm(config) + instruction_response_extraction_batch(tokenizer, llm, data_list, config) + + elif job_type == "cot_generation_api": + cot_generate_api(data_list, config) + elif job_type == "cot_generation_batch": + tokenizer, llm = load_tokenizer_and_vllm(config) + cot_generate_batch(tokenizer, llm, data_list, config) + + elif job_type == "cot_long2short_api": + cot_long2short_api(data_list_ins, data_list_out, config) + elif job_type == "cot_long2short_batch": + tokenizer, llm = load_tokenizer_and_vllm(config) + cot_long2short_batch(tokenizer, llm, data_list_ins, data_list_out, config) + + elif job_type == "cot_short2long_api": + cot_short2long_api(data_list_ins, data_list_out, config) + elif job_type == "cot_short2long_batch": + tokenizer, llm = load_tokenizer_and_vllm(config) + cot_short2long_batch(tokenizer, llm, data_list_ins, data_list_out, config) + else: + logging.error(f"Invalid job type: {job_type}") + raise ValueError(f"Invalid job type: {job_type}") + except ValueError as e: + logging.error(f"Training job terminated: {e}") + return + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--config', type=str, required=True, help='path to the json config file') + args = parser.parse_args() + config = json.load(open(args.config)) + data_synthesis_with_teacher_model(config) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/easydistill/synthesis/utils.py b/easydistill/synthesis/utils.py new file mode 100644 index 0000000..162f5eb --- /dev/null +++ b/easydistill/synthesis/utils.py @@ -0,0 +1,85 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import json +import torch +import logging +from vllm import LLM +from transformers import AutoTokenizer + + +def read_json_field(filename, field_name='instruction'): + try: + with open(filename, 'r') as file: + data = json.load(file) + output_fields = [] + for item in data: + if field_name in item: + output_fields.append(item[field_name]) + return output_fields + except FileNotFoundError: + logging.error("The file was not found.") + except json.JSONDecodeError: + logging.error("There was an error decoding the JSON file.") + except Exception as e: + logging.error(f"An error occurred: {e}") + + +def write_data_to_json_file(data, file_path): + try: + with open(file_path, 'w') as file: + json.dump(data, file, ensure_ascii=False, indent=4) + logging.info(f"Data successfully written to {file_path}") + except Exception as e: + logging.error(f"An error occurred: {e}") + + +def load_tokenizer_and_vllm(config, eos_token=None): + teacher_model_path = config["models"]["teacher"] + logging.info(f"Loading ckpt and tokenizer: {teacher_model_path}") + tokenizer = AutoTokenizer.from_pretrained(teacher_model_path, trust_remote_code=True) + tokenizer.padding_side = "left" + if eos_token: + eos_token_id = tokenizer.convert_tokens_to_ids(eos_token) + logging.info(f"eos_token {eos_token} from user input") + elif hasattr(tokenizer, "eos_token_id") and tokenizer.eos_token_id: + logging.info(f"Initial eos_token_id {tokenizer.eos_token_id} from tokenizer") + eos_token_id = tokenizer.eos_token_id + eos_token = tokenizer.convert_ids_to_tokens(eos_token_id) + else: + raise ValueError("No available eos_token or eos_token_id.") + try: + tokenizer.eos_token = eos_token + tokenizer.eos_token_id = eos_token_id + tokenizer.pad_token = eos_token + tokenizer.pad_token_id = eos_token_id + except: + logging.info(f"[WARNING] Cannot set tokenizer.eos_token") + logging.info(f"tokenizer's eos_token: {tokenizer.eos_token}, pad_token: {tokenizer.pad_token}") + logging.info(f"tokenizer's eos_token_id: {tokenizer.eos_token_id}, pad_token_id: {tokenizer.pad_token_id}") + num_gpus = torch.cuda.device_count() + llm = LLM( + model=teacher_model_path, + tensor_parallel_size=num_gpus, + enable_chunked_prefill=config["inference"]["enable_chunked_prefill"], + gpu_memory_utilization=config["inference"]["gpu_memory_utilization"], + trust_remote_code=config["inference"]["trust_remote_code"], + dtype=torch.bfloat16, + enforce_eager=config["inference"]["enforce_eager"], + max_model_len=config["inference"]["max_model_len"], + ) + logging.info("vLLM model loaded successfully") + return tokenizer, llm \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2.5-0324/README.md b/recipes/distilqwen_series/distillqwen2.5-0324/README.md new file mode 100644 index 0000000..99ba739 --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2.5-0324/README.md @@ -0,0 +1,76 @@ +# DistilQwen2.5-0324: training fast-thinking models + +## Brief Introduction + +In the rapid advancement of large language models, effectively balancing the trade-off between efficient inference and model thinking capabilities has been a key focus in both academia and industry. DeepSeekV3-0324, by default, does not employ deep thinking mode, which accelerates model inference while maintaining a balance between swift reasoning and handling complex tasks. The DistilQwen2.5-0324 series not only inherits the essence of the original model's chain-of-thought distillation but also introduces fast-thinking strategies, significantly boosting inference speed. This enables these models to efficiently execute complex tasks on resource-constrained devices and in edge computing scenarios. + +## Detailed Steps + +### Processing of Instructional Dataset + +DistilQwen2.5-0324 was trained using data distilled from Deepseek-V3-0324 as well as data rewritten with long2short after distillation from Deepseek-R1. For Deepseek-V3-0324, the official recommendation is not to use a system prompt; for the long2short scenario, the following prompt was used. You can employ this method to reduce the output of Deepseek-R1 and distill your own model. + +```json +{ + "system": "You are a helpful assistant who is highly skilled at simplifying reasoning processes. Given a problem, its answer and its reasoning process, your task is to simplify the reasoning process so that a small language model (e.g., a 7B model) can reliably follow the steps to solve the problem. If the original reasoning process is divided into multiple steps separated by two newline characters (\n\n), your output must preserve this formatting. You must output ONLY the simplified reasoning process with no additional explanation or commentary." +} +``` + +```bash +python easydistill/kd/infer.py --config=distilqwen2.5-0324_stage1.json +``` + +The training dataset is in JSON format, exemplified by entries such as: + +```json +[ + { + "instruction": "The ratio of the number of molar teeth in the human upper jaw at the age of 6 is 2:1 compared to number of incisors teeth. There are total 8 incisors in the human mouth...", + "output": "Step 1: Determine the total number of incisors in the upper jaw...The final answer is: \\boxed{8}" + } +] +``` + +### Black-Box KD + +The black-box KD process follows a supervised learning paradigm, utilizing enhanced instruction-response pairs as training samples. Through this approach, the student model can effectively absorb and understand the knowledge imparted by the larger model, even with a limited number of parameters. This method not only boosts the student model's ability to tackle tasks but also enables it to perform better in multi-task scenarios. Because we have already obtained the teacher's responses in the dataset, we can run the training job: + +```bash +python easydistill/kd/train.py --config=distilqwen2.5-0324_stage2.json +``` + +Plese refer to the config file `distilqwen2.5-0324_stage2.json` in the current folder. If you need to run the job in a distributed mode, use `accelerate` to run the job. + +## Model Download + +We have open-sourced our distilled models on both HuggingFace and ModelScope. The available models are named `alibaba-pai/DistilQwen2.5-DS3-0324-7B`, `alibaba-pai/DistilQwen2.5-DS3-0324-14B`, and `alibaba-pai/DistilQwen2.5-DS3-0324-32B`. + +For example, users can download these models from HuggingFace using the following code: + + +```python +from huggingface_hub import snapshot_download + +# Download the 1.5B model +model_name = "alibaba-pai/DistilQwen2.5-DS3-0324-7B" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen2.5-DS3-0324-7B/") + +# Download the 3B model +model_name = "alibaba-pai/DistilQwen2.5-DS3-0324-14B" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen2.5-DS3-0324-14B/") + +# Download the 7B model +model_name = "alibaba-pai/DistilQwen2.5-DS3-0324-32B" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen2.5-DS3-0324-32B/") +``` + + +## Performance + +- **32B Model** approaches the performance of closed-source models with 10x the parameters on the GPQA Diamond benchmark +- **Significant Improvement in Reasoning Efficiency** (see comparison table below) + +| Model | MMLU_PRO Tokens | AIME2024 Tokens | Speed Gain | +|--------------------------------|-----------------|-----------------|------------| +| DistilQwen2.5-R1-32B (Slow-Thinking) | 4198 | 12178 | 1x | +| DistilQwen2.5-DS3-0324-32B | 690 | 4177 | 5-8x | \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2.5-0324/distilqwen2.5-0324_stage1.json b/recipes/distilqwen_series/distillqwen2.5-0324/distilqwen2.5-0324_stage1.json new file mode 100644 index 0000000..b27f3cb --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2.5-0324/distilqwen2.5-0324_stage1.json @@ -0,0 +1,14 @@ +{ + "job_type": "cot_long2short_api", + "dataset": { + "input_path": "./raw.json", + "output_path": "./raw_simplified.json" + }, + "inference":{ + "base_url": "ENDPOINT", + "api_key": "TOKEN", + "stream": true, + "prompt" : "You are a helpful assistant who is highly skilled at simplifying reasoning processes. Given a problem, its answer and its reasoning process, your task is to simplify the reasoning process so that a small language model (e.g., a 7B model) can reliably follow the steps to solve the problem. If the original reasoning process is divided into multiple steps separated by two newline characters (\n\n), your output must preserve this formatting. You must output ONLY the simplified reasoning process with no additional explanation or commentary.", + "max_new_tokens": 1024 + } +} \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2.5-0324/distilqwen2.5-0324_stage2.json b/recipes/distilqwen_series/distillqwen2.5-0324/distilqwen2.5-0324_stage2.json new file mode 100644 index 0000000..e1c92b5 --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2.5-0324/distilqwen2.5-0324_stage2.json @@ -0,0 +1,23 @@ +{ + "job_type": "kd_black_box_api", + "dataset": { + "labeled_path": "distil_qwen_0324.json", + "template" : "chat_template_kd.jinja", + "seed": 42 + }, + "models": { + "student": "model/Qwen/Qwen2.5-1.5B-Instruct/" + }, + "training": { + "output_dir": "result_stage2/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 1000, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2.5-r1/README.md b/recipes/distilqwen_series/distillqwen2.5-r1/README.md new file mode 100644 index 0000000..fa40051 --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2.5-r1/README.md @@ -0,0 +1,142 @@ +# DistilQwen2.5-R1: training distilled reasonin models based on CoTs generated by Deepseek-R1 + +## Brief Introduction + +As large language models (LLMs) evolve toward deep reasoning capabilities, deploying them in resource-constrained environments (e.g., mobile devices, edge computing) remains challenging. The DistilQwen2.5-R1 series addresses this by transferring reasoning capabilities from ultra-large models (e.g., DeepSeek-R1) to compact models through innovative distillation techniques, achieving high performance while reducing computational costs. + +## Data Generation Detailed Steps + +### 1. Generate Thinking Dataset + +Distillqwen-r1 is trained using chain-of-thought data distilled from deepseek-r1. We provide the system prompts used for distilling the R1 data and the system prompts used for training qwen2.5. You can use the current system prompts to call Deepseek-R1 to generate your own data and train the model. + +```json +{ + "system": "Your role as an assistant involves thoroughly exploring questions through a systematic long thinking process before providing the final precise and accurate solutions. This requires engaging in a comprehensive cycle of analysis, summarizing, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. Please structure your response into two main sections: Thought and Solution. In the Thought section, detail your reasoning process using the specified format: <|begin_of_thought|> {thought with steps separated with '\n\n'} <|end_of_thought|> Each step should include detailed considerations such as analisying questions, summarizing relevant findings, brainstorming new ideas, verifying the accuracy of the current steps, refining any errors, and revisiting previous steps. In the Solution section, based on various attempts, explorations, and reflections from the Thought section, systematically present the final solution that you deem correct. The solution should remain a logical, accurate, concise expression style and detail necessary step needed to reach the conclusion, formatted as follows: <|begin_of_solution|> {final formatted, precise, and clear solution} <|end_of_solution|> Now, try to solve the following question through the above guidelines:" +} +``` + +### 2. Determine the Difficulty Level + +Critiquing the CoT qualities according to the cognitive capabilities of smaller models. You can use the current system prompts using QwQ-32B to determine the difficulty level of the CoTs. + +```json +{ + "system": "You are a highly capable evaluator. Your task is to assess the given reasoning process from the perspective of a small language model (e.g., 7B). Specifically, determine whether the reasoning process provides sufficient detail for a small model to solve the problem, or whether it is too simplistic (i.e., lacking critical details) or too complex (i.e., containing unnecessary or confusing steps). Difficulty Definitions (from the perspective of a small model): - Easy: The reasoning process is overly simplistic relative to the problem's difficulty; it omits essential details that a small model needs to solve the problem. - Medium: The reasoning process is appropriately balanced, offering enough detailed guidance. - Hard: The reasoning process is overly complex, with extraneous or convoluted steps that could hinder a small model's ability to follow it. Output Format: You must output exactly one word: easy, medium, or hard. Do NOT provide any additional text, explanation." +} +``` + +### 3. Rethinking and Refining these CoTs + +Rethinking and refining these CoTs based on the critiques using following prompts: + +#### easy +```json +{ + "system": "You are a helpful assistant who is highly skilled at extending reasoning processes. Given a problem, its answer, and its reasoning process, your task is to extend the reasoning process by adding necessary details and intermediate steps so that a small language model (e.g., a 7B model) can follow the extended reasoning process to solve the problem. If the original reasoning process is divided into multiple steps separated by two newline characters, your output must preserve this formatting. You must output ONLY the extended reasoning process with no additional explanation or commentary." +} +``` + +#### hard +```json +{ + "system": "You are a helpful assistant who is highly skilled at simplifying reasoning processes. Given a problem, its answer, and its reasoning process, your task is to simplify the reasoning process so that a small language model (e.g., a 7B model) can reliably follow the steps to solve the problem. If the original reasoning process is divided into multiple steps separated by two newline characters, your output must preserve this formatting. You must output ONLY the simplified reasoning process with no additional explanation or commentary." +} +``` + +The training dataset is in JSON format, exemplified by entries such as: + +```json +[ + { + "instruction": "The ratio of the number of molar teeth in the human upper jaw at the age of 6 is 2:1 compared to number of incisors teeth. There are total 8 incisors in the human mouth...", + "output": "<|begin_of_thought|>## Step 1: Determine the total number of incisors in the upper jaw...\n<|end_of_thought|>\n<|begin_of_solution|>The final answer is: \\boxed{8}<|end_of_solution|>" + } +] +``` + +## Model Training Guidelines + +### 1. Black-Box KD + +The black-box KD process follows a supervised learning paradigm, utilizing enhanced instruction-response pairs as training samples. Through this approach, the student model can effectively absorb and understand the knowledge imparted by the larger model, even with a limited number of parameters. This method not only boosts the student model's ability to tackle tasks but also enables it to perform better in multi-task scenarios. Because we have already obtained the teacher's responses in the dataset, we need to run the training job only: + +```bash +python easydistill/kd/train.py --config=distilqwen2.5-r1_stage1.json +``` + +Plese refer to the config file `distilqwen2.5-r1.json` in the current folder. If you need to run the job in a distributed mode, use `accelerate` to run the job. + +### 2. CogPO + +CogPO (Cognitive Preference Optimization) is a novel algorithm designed to enhance the reasoning abilities of small language models (LLMs) by aligning their reasoning processes with their inherent cognitive capacities. + +Key aspects of CogPO: +- Extends Direct Preference Optimization (DPO) with cognitive alignment +- Introduces three specialized "mini-tasks" with different preference gaps +- Dynamically adjusts optimization strength (β values) based on reasoning complexity +- Works synergistically with the CRV (Critique-Rethink-Verify) system + +You can run the CogPO by: + +```bash +accelerate launch --num_processes n --config_file multi_gpu.yaml cogpo.py --config distilqwen2.5-r1_stage2.json +``` + +The dataset is in JSON format, exemplified by entries such as: +```json +{ + "prompt": "Ellie has 8 pairs of shoes. Riley has 3 fewer. How many pairs of shoes do they have in all?", + "chosen": "Identify the number of pairs of shoes Ellie has. According to the problem statement, Ellie has 8 pairs of shoes.\n Next, determine the number of pairs of shoes Riley has. The problem states that Riley has 3 fewer pairs than Ellie. To find out how many pairs Riley has, subtract 3 from the number of pairs Ellie has: 8 - 3 = 5. So, Riley has 5 pairs of shoes.\n Now, calculate the total number of pairs of shoes both Ellie and Riley have together. To do this, add the number of pairs Ellie has to the number of pairs Riley has: 8 (Ellie's pairs) + 5 (Riley's pairs) = 13 pairs. This step is crucial because it combines the information about both individuals to give the overall total.\n The total number of pairs of shoes they have in all is 13. Thus, the final answer is 13. Each step in the reasoning process is designed to help understand and solve the problem effectively, showing how the information about each individual's shoe count leads to finding the combined total.\boxed{13}", + "rejected": "Identify the number of pairs of shoes Ellie has. Ellie has 8 pairs of shoes as stated in the problem. Determine how many pairs of shoes Riley has. Since Riley has 3 fewer pairs than Ellie, we mistakenly add 3 to Ellie's pairs instead of subtracting, giving us 8 + 3 = 11 pairs of shoes for Riley. Calculate the total number of pairs of shoes they both have. Add Ellie's and Riley's pairs together: 8 + 11. The total pairs of shoes is 19. The final answer is thus \boxed{19}.\boxed{13}", + "beta": 0.5 +} +``` + +## Model Download + +We have open-sourced our distilled models on both HuggingFace and ModelScope. The available models are named `alibaba-pai/DistilQwen2.5-R1-3B`, `alibaba-pai/DistilQwen2.5-R1-7B`, `alibaba-pai/DistilQwen2.5-R1-14B`, and `alibaba-pai/DistilQwen2.5-R1-32B`. + +For example, users can download these models from HuggingFace using the following code: + + +```python +from huggingface_hub import snapshot_download + +# Download the 3B model +model_name = "alibaba-pai/DistilQwen2.5-R1-3B" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen2.5-R1-3B/") + +# Download the 7B model +model_name = "alibaba-pai/DistilQwen2.5-R1-7B" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen2.5-R1-7B/") + +# Download the 14B model +model_name = "alibaba-pai/DistilQwen2.5-R1-14B" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen2.5-R1-14B/") + +# Download the 32B model +model_name = "alibaba-pai/DistilQwen2.5-R1-32B" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen2.5-R1-32B/") +``` + + +## Performance + +We compared DistilQwen2.5-R1 series with leading reasoning models across four benchmarks: + +### 7B Model Comparison +| Model | Training Data Size | AIME2024 | MATH-500 | GPQA Diamond | LiveCodeBench V2 | +|--------------------------------|--------------------|----------|----------|--------------|------------------| +| DeepSeek-R1-Distill-Qwen-7B | 800k | 55.5 | 92.8 | 49.1 | - | +| Bespoke-Stratos-7B | 17k | 20.0 | 82.0 | 37.8 | 36.1 | +| OpenThinker-7B | 114k | 31.3 | 83.0 | 42.4 | 39.9 | +| **DistilQwen2.5-R1-7B** | 105k | 43.33 | 88.4 | 42.93 | 46.38 | + +### 32B Model Comparison +| Model | Training Data Size | AIME2024 | MATH-500 | GPQA Diamond | LiveCodeBench V2 | +|--------------------------------|--------------------|----------|----------|--------------|------------------| +| DeepSeek-R1-Distill-Qwen-32B | 800k | 72.6 | 94.3 | 62.1 | - | +| Sky-T1-32B-Preview | 17k | 43.3 | 86.4 | 56.8 | - | +| OpenThinker-32B | 114k | 66.0 | 90.6 | 61.6 | 68.9 | +| **DistilQwen2.5-R1-32B** | 105k | 70.0 | 93.8 | 62.12 | 65.95 | \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2.5-r1/cogpo.py b/recipes/distilqwen_series/distillqwen2.5-r1/cogpo.py new file mode 100644 index 0000000..10eda52 --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2.5-r1/cogpo.py @@ -0,0 +1,194 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import trl +from trl.trainer.dpo_trainer import DataCollatorForPreference +from dataclasses import dataclass +from typing import Any, Callable, Literal, Optional, Union +import torch, torch.nn.functional as F +from datasets import load_dataset +from transformers import AutoModelForCausalLM, AutoTokenizer +from trl import DPOConfig, DPOTrainer, FDivergenceConstants, FDivergenceType +from trl.trainer.utils import cap_exp +import json +import argparse + + +@dataclass +class DataCollatorForPreferenceWithBeta(DataCollatorForPreference): + def torch_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]: + betas = torch.tensor([float(ex["beta"]) for ex in examples], dtype=torch.float32) + for ex in examples: + ex.pop("beta") + batch = super().torch_call(examples) + batch["betas"] = betas + return batch + + +class CogPOTrainer(DPOTrainer): + def get_batch_loss_metrics( + self, + model, + batch, + train_eval: str = "train", + ): + metrics = {} + + betas = batch.pop("betas").to(self.accelerator.device) + model_output = self.concatenated_forward(model, batch) + + if "ref_chosen_logps" in batch and "ref_rejected_logps" in batch: + ref_chosen_logps = batch["ref_chosen_logps"] + ref_rejected_logps = batch["ref_rejected_logps"] + else: + ref_chosen_logps, ref_rejected_logps = self.compute_ref_log_probs(batch) + + losses, chosen_rewards, rejected_rewards = self._dpo_sigmoid_loss( + model_output["chosen_logps"], + model_output["rejected_logps"], + ref_chosen_logps, + ref_rejected_logps, + betas, + ) + + reward_accuracies = (chosen_rewards > rejected_rewards).float() + + if self.args.rpo_alpha is not None: + losses = losses + self.args.rpo_alpha * model_output["nll_loss"] + + if self.use_weighting: + losses = losses * model_output["policy_weights"] + + if self.aux_loss_enabled: + losses = losses + self.aux_loss_coef * model_output["aux_loss"] + + prefix = "eval_" if train_eval == "eval" else "" + metrics[f"{prefix}rewards/chosen"] = self.accelerator.gather_for_metrics(chosen_rewards).mean().item() + metrics[f"{prefix}rewards/rejected"] = self.accelerator.gather_for_metrics(rejected_rewards).mean().item() + metrics[f"{prefix}rewards/accuracies"] = self.accelerator.gather_for_metrics(reward_accuracies).mean().item() + metrics[f"{prefix}rewards/margins"] = ( + self.accelerator.gather_for_metrics(chosen_rewards - rejected_rewards).mean().item() + ) + metrics[f"{prefix}logps/chosen"] = ( + self.accelerator.gather_for_metrics(model_output["chosen_logps"]).detach().mean().item() + ) + metrics[f"{prefix}logps/rejected"] = ( + self.accelerator.gather_for_metrics(model_output["rejected_logps"]).detach().mean().item() + ) + metrics[f"{prefix}logits/chosen"] = ( + self.accelerator.gather_for_metrics(model_output["mean_chosen_logits"]).detach().mean().item() + ) + metrics[f"{prefix}logits/rejected"] = ( + self.accelerator.gather_for_metrics(model_output["mean_rejected_logits"]).detach().mean().item() + ) + if self.args.rpo_alpha is not None: + metrics[f"{prefix}nll_loss"] = ( + self.accelerator.gather_for_metrics(model_output["nll_loss"]).detach().mean().item() + ) + if self.aux_loss_enabled: + metrics[f"{prefix}aux_loss"] = ( + self.accelerator.gather_for_metrics(model_output["aux_loss"]).detach().mean().item() + ) + + return losses.mean(), metrics + + def _dpo_sigmoid_loss( + self, + chosen_logps: torch.FloatTensor, + rejected_logps: torch.FloatTensor, + ref_chosen_logps: torch.FloatTensor, + ref_rejected_logps: torch.FloatTensor, + betas: torch.FloatTensor, + ): + + device = self.accelerator.device + chosen_logratios = chosen_logps.to(device) - (not self.reference_free) * ref_chosen_logps.to(device) + rejected_logratios = rejected_logps.to(device) - (not self.reference_free) * ref_rejected_logps.to(device) + + # 2) Δ = (log p_c - log p_r) - (log p̂_c - log p̂_r) + if self.f_divergence_type == FDivergenceType.ALPHA_DIVERGENCE.value: + alpha_coef = FDivergenceConstants.ALPHA_DIVERGENCE_COEF_DEFAULT + if self.f_divergence_params and FDivergenceConstants.ALPHA_DIVERGENCE_COEF_KEY in self.f_divergence_params: + alpha_coef = float(self.f_divergence_params[FDivergenceConstants.ALPHA_DIVERGENCE_COEF_KEY]) + logits = (cap_exp(rejected_logratios * -alpha_coef) - cap_exp(chosen_logratios * -alpha_coef)) / alpha_coef + else: + logratios = chosen_logps - rejected_logps + if self.reference_free: + ref_logratios = torch.tensor([0], dtype=logratios.dtype, device=logratios.device) + else: + ref_logratios = ref_chosen_logps - ref_rejected_logps + logratios = logratios.to(self.accelerator.device) + ref_logratios = ref_logratios.to(self.accelerator.device) + logits = logratios - ref_logratios + + if self.f_divergence_type == FDivergenceType.JS_DIVERGENCE.value: + logits -= F.softplus(chosen_logratios) - F.softplus(rejected_logratios) + + + losses = ( + -F.logsigmoid(betas * logits) * (1 - self.label_smoothing) + - F.logsigmoid(-betas * logits) * self.label_smoothing + ) + + chosen_rewards = betas * (chosen_logps.to(device) - ref_chosen_logps.to(device)).detach() + rejected_rewards = betas * (rejected_logps.to(device) - ref_rejected_logps.to(device)).detach() + + + return losses, chosen_rewards, rejected_rewards + + +def train(config): + model_name = config["models"]["student"] + model = AutoModelForCausalLM.from_pretrained(model_name) + tokenizer = AutoTokenizer.from_pretrained(model_name) + + dataset = load_dataset("json", data_files=config["dataset"]["labeled_path"], split='train') + + dpo_args = DPOConfig( + output_dir=config["training"]["output_dir"], + num_train_epochs=config["training"]["num_train_epochs"], + loss_type=config["training"]["loss_type"], + beta=config["training"]["beta"], + per_device_train_batch_size=config["training"]["per_device_train_batch_size"], + remove_unused_columns=False, + ) + + collator = DataCollatorForPreferenceWithBeta( + pad_token_id=tokenizer.pad_token_id + ) + + trainer = CogPOTrainer( + model=model, + args=dpo_args, + train_dataset=dataset, + tokenizer=tokenizer, + data_collator=collator, + ) + + trainer.train() + trainer.save_model(config["training"]["output_dir"]) + tokenizer.save_pretrained(config["training"]["output_dir"]) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--config', type=str, required=True, help='path to the json config file') + args = parser.parse_args() + config = json.load(open(args.config)) + train(config) + + +if __name__ == "__main__": + main() diff --git a/recipes/distilqwen_series/distillqwen2.5-r1/distilqwen2.5-r1_stage1.json b/recipes/distilqwen_series/distillqwen2.5-r1/distilqwen2.5-r1_stage1.json new file mode 100644 index 0000000..ceabce7 --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2.5-r1/distilqwen2.5-r1_stage1.json @@ -0,0 +1,23 @@ +{ + "job_type": "kd_black_box_api", + "dataset": { + "labeled_path": "distil_qwen_r1.json", + "template" : "chat_template_kd.jinja", + "seed": 42 + }, + "models": { + "student": "model/Qwen/Qwen2.5-1.5B-Instruct/" + }, + "training": { + "output_dir": "result_stage1/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 1000, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2.5-r1/distilqwen2.5-r1_stage2.json.json b/recipes/distilqwen_series/distillqwen2.5-r1/distilqwen2.5-r1_stage2.json.json new file mode 100644 index 0000000..49b5a57 --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2.5-r1/distilqwen2.5-r1_stage2.json.json @@ -0,0 +1,15 @@ +{ + "models": { + "student": "models/Qwen2.5-0.5B-Instruct" + }, + "dataset": { + "labeled_path": "cogpo/test500.jsonl" + }, + "training": { + "output_dir": "save/Qwen2.5-0.5B-CogPO", + "num_train_epochs": 1.0, + "loss_type": "sigmoid", + "beta": 1.0, + "per_device_train_batch_size": 2 + } +} \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2.5-thoughtX/README.md b/recipes/distilqwen_series/distillqwen2.5-thoughtX/README.md new file mode 100644 index 0000000..ac2b115 --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2.5-thoughtX/README.md @@ -0,0 +1,101 @@ +# DistilQwen-ThoughtX: Optimized Reasoning Models with OmniThought + +## Brief Introduction + +DistilQwen-ThoughtX is a series of high-performance reasoning models trained on the [OmniThought](https://huggingface.co/datasets/alibaba-pai/OmniThought) dataset. These models are optimized for chain-of-thought (CoT) reasoning with balanced verbosity and cognitive difficulty, achieving state-of-the-art results on mathematical, coding, and logical reasoning benchmarks. + +## Detailed Steps + +### Direct Training + +DistilQwen-ThoughtX was trained using data from the OmniThought dataset, which includes 2 million CoT processes with RV (Reasoning Verbosity) and CD (Cognitive Difficulty) annotations. The dataset covers mathematics, coding, and logical reasoning tasks, validated by multiple teacher models (DeepSeek-R1, QwQ-32B). + +The training system prompt is: + +```json +{ + "system": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant." +} +``` + +Using the OmniThought dataset, we can run the training job: + +```bash +python easydistill/kd/train.py --config=distilqwen2.5-thoughtx-train.json +``` + +Remember to filter the RV and CD annotations to ensure they are within the desired range to train your own model. + +| Model Name | Parameters | Base Model | +|--------------------------------------|------------|---------------------| +| `DistilQwen-ThoughtX-7B` | 7B | Qwen2.5-7B-Instruct | +| `DistilQwen-ThoughtX-32B` | 32B | Qwen2.5-32B-Instruct| + +### Process Your Own Data + +To obtain the RV and CD values of your own data, you can use the following prompt to call QwQ-32B/Deepseek-R1, score your own data, and filter it. + +Prompt Template to Calculate the RV Score: +```json +{ + "prompt": "You are an expert judge tasked with evaluating the Reasoning Verbosity of a Chain-of-Thought (CoT) for a given problem and its answer. Reasoning Verbosity Evaluation Focus: Assess how well the CoT’s length and step complexity match the problem’s inherent difficulty. An optimal chain is neither missing essential steps nor padded with needless digressions. A simple question should be solved with a brief, direct chain; a challenging one may justifiably require a longer path with reflection and error-checking. Scoring Guidelines (0-9): 0-1 Minimal verbosity, straightforward expression with little to no elaboration. 2-3 Clear and concise reasoning with necessary explanations. 4-5 Moderate verbosity with detailed explanations and thorough reasoning. 6-7 Extensive verbosity with comprehensive justification and exploration of complex connections. 8-9 High verbosity with deep, exhaustive exploration of reasoning; involves extensive elaboration, nested justifications, and consideration of counterarguments or alternative perspectives. Given Problem, Chain-of-Thought and Answer, you will: 1. Analyze the Reasoning Verbosity 2. Determine score using the above criteria 3. Output ONLY the integer score (0-9) Problem: {problem} Chain-of-Thought: {thought} Answer: {solution}" +} +``` + +Prompt Template to Calculate the CD Score: +```json +{ + "prompt": "You are an expert judge assessing the Cognitive Difficulty of a Chain-of-Thought (CoT) for a given problem and its answer. Cognitive Difficulty Evaluation Focus: The level of reasoning competence required for a model to follow and reproduce the chain faithfully. Judge the reasoning approach, techniques, and overall difficulty. Higher scores correspond to more advanced concepts, abstractions, or multi-layer reasoning patterns. Scoring Guidelines (0-9): 0-1 Elementary facts or a single trivial operation. 2-3 Multi-step arithmetic, explicit enumeration, basic rule chaining. 4-5 Early-undergraduate logic/algebra; one non-obvious insight. 6-7 Advanced undergraduate techniques (determinants, dynamic programming, layered code reasoning, etc). 8-9 Graduate-level abstraction, nested proofs, intricate algorithmic analysis. Given Problem, Chain-of-Thought and Answer, you will: 1. Analyze the Cognitive Difficulty 2. Determine score using the above criteria 3. Output ONLY the integer score (0-9) Problem: {problem} Chain-of-Thought: {thought} Answer: {solution}" +} +``` + +## Model Download + +We have open-sourced our distilled models on HuggingFace. The available models are named `alibaba-pai/DistilQwen-ThoughtX-7B` and `alibaba-pai/DistilQwen-ThoughtX-32B`. + +Users can download these models from HuggingFace using the following code: + +```python +from huggingface_hub import snapshot_download + +# Download the 7B model +model_name = "alibaba-pai/DistilQwen-ThoughtX-7B" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen-ThoughtX-7B/") + +# Download the 32B model +model_name = "alibaba-pai/DistilQwen-ThoughtX-32B" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen-ThoughtX-32B/") +``` + +## Performance + +The models achieve state-of-the-art performance on various reasoning benchmarks: + +| Model | AIME2024 | MATH500 | GPQA-D | LiveCodeBench V2 | +|----------------------|----------|---------|--------|------------------| +| DeepSeek-R1-Distill-7B | 57.3 | 89.6 | 47.3 | 48.4 | +| **DistilQwen-ThoughtX-7B** | **56.7** | **90.2** | **50.0** | **56.8** | +| DeepSeek-R1-Distill-32B | 74.7 | 90.0 | 62.4 | 72.3 | +| **DistilQwen-ThoughtX-32B** | **80.0** | **92.6** | **64.0** | **73.4** | + +## Reference + +For more detailed information about the model, we encourage you to refer to our paper: + +- **Reasoning with OmniThought: A Large CoT Dataset with Verbosity and Cognitive Difficulty Annotations** + Wenrui Cai, Chengyu Wang, Junbing Yan, Jun Huang, Xiangzhong Fang + [arXiv:2505.10937](https://arxiv.org/abs/2505.10937) + +You can cite the paper using the following citation format: + +```bibtex +@misc{cai2025reasoningomnithoughtlargecot, + title={Reasoning with OmniThought: A Large CoT Dataset with Verbosity and Cognitive Difficulty Annotations}, + author={Wenrui Cai and Chengyu Wang and Junbing Yan and Jun Huang and Xiangzhong Fang}, + year={2025}, + eprint={2505.10937}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2505.10937} +} +``` \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2.5-thoughtX/distilqwen2.5-thoughtx-train.json b/recipes/distilqwen_series/distillqwen2.5-thoughtX/distilqwen2.5-thoughtx-train.json new file mode 100644 index 0000000..464b63b --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2.5-thoughtX/distilqwen2.5-thoughtx-train.json @@ -0,0 +1,24 @@ +{ + "job_type": "kd_black_box_api", + "dataset": { + "labeled_path": "distil_qwen_thoughtX.json", + "template" : "chat_template_kd.jinja", + "seed": 42 + }, + "models": { + "student": "model/Qwen/Qwen2.5-1.5B-Instruct/" + }, + "training": { + "output_dir": "result/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "max_length":4096, + "save_steps": 1000, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } + } \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2.5/README.md b/recipes/distilqwen_series/distillqwen2.5/README.md new file mode 100644 index 0000000..a4c0b4d --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2.5/README.md @@ -0,0 +1,135 @@ +# DistilQwen2.5: Combining Black-Box and White Box KD + +## Brief Introduction + +The DistilQwen2.5 distilled language model series is built upon the Qwen2.5 model. This series leverages innovative distillation techniques to enhance instruction-following capabilities. As a result, these distilled models retain the excellent performance of the original models while requiring fewer computational resources. + +The distillation process involves carefully selecting, rewriting, and optimizing instruction-response pairs conducive to student model learning, thus improving model comprehension and execution abilities. Following standard fine-tuning, we employ white-box distillation techniques to enable the student models to better acquire fine-grained knowledge from teacher models. Experimental evaluations demonstrate the significant improvement in capabilities of the DistilQwen2.5 models. + +## Detailed Steps + +### Processing of Instructional Dataset + +DistilQwen2.5 begins with collecting diverse, high-quality instructional data from sources like Magpie, Openhermes, and Mammoth 2, along with proprietary datasets. This data includes Chinese and English instructions, scoring them for difficulty and task relevance. This process is very similar to the recipe of DistilQwen2. + +In addition, we have open-sourced part of the dataset used for model training, totaling 100K entries. This dataset includes mathematical problems, code tasks, Q&A, instruction following, and creative generation. Users can incorporate the DistilQwen_100K dataset, or its subsets, during model fine-tuning to enhance downstream task performance while maintaining generalization ability. The dataset is in JSON format, exemplified by entries such as: + +```json +[ + { + "instruction": "The ratio of the number of molar teeth in the human upper jaw at the age of 6 is 2:1 compared to number of incisors teeth. There are total 8 incisors in the human mouth...", + "output": "## Step 1: Determine the total number of incisors in the upper jaw...\n\nThe final answer is: \\boxed{8}" + }, + { + "instruction": "This is the text of a lecture I am giving tomorrow. Can you go over it and make recommendations to improve clarity and flow?", + "output": "I'd be happy to help you review your lecture text..." + } +] +``` + +The dataset is available on ModelScope and Hugging Face. Users can download it using ModelScope's scripts and command-line tools. + +```python +# Validate SDK token +from modelscope.hub.api import HubApi +api = HubApi() +api.login('your_token_id') + +# Dataset download +from modelscope.msdatasets import MsDataset +ds = MsDataset.load('PAI/DistilQwen_100k') +``` + +### Black-Box KD + +The black-box KD process follows a supervised learning paradigm, utilizing enhanced instruction-response pairs as training samples. Through this approach, the student model can effectively absorb and understand the knowledge imparted by the larger model, even with a limited number of parameters. This method not only boosts the student model's ability to tackle tasks but also enables it to perform better in multi-task scenarios. Because we have already obtained the teacher's responses in the dataset, we need to run the training job only: + +```bash +python easydistill/kd/train.py --config=distilqwen2.5_stage1.json +``` + +Plese refer to the config file `distilqwen2.5_stage1.json` in the current folder. If you need to run the job in a distributed mode, use `accelerate` to run the job. + +### White-Box KD + +Unlike black-box KD, which relies solely on the highest probability token output by the teacher model, white-box KD focuses on the distribution of logits produced by the teacher model. This approach provides the student model with richer information. By mimicking the teacher model's logits distribution, white-box KD can transfer knowledge more effectively, thereby enhancing the performance of the student model. As an example, we take `Qwen2.5-72B-Instruct` as the white-box teacher model, and generate the logits by: + +```bash +python easydistill/kd/infer.py --config=distilqwen2.5_stage2.json +``` + +Next, we run the training job by: + +```bash +python easydistill/kd/train.py --config=distilqwen2.5_stage2.json +``` + +Again, please refer to the config file `distilqwen2.5_stage2.json` in the current folder. Remember to change the configurations when needed. + +## Model Download + +We have open-sourced our distilled models on both HuggingFace and ModelScope. The available models are named `alibaba-pai/DistilQwen2.5-0.5B-Instruct`, `alibaba-pai/DistilQwen2.5-1.5B-Instruct`, `alibaba-pai/DistilQwen2.5-3B-Instruct`, and `alibaba-pai/DistilQwen2.5-7B-Instruct`. + +For example, users can download these models from HuggingFace using the following code: + + +```python +from huggingface_hub import snapshot_download + +# Download the 0.5B model +model_name = "alibaba-pai/DistilQwen2.5-0.5B-Instruct" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen2.5-0.5B/") + +# Download the 1.5B model +model_name = "alibaba-pai/DistilQwen2.5-1.5B-Instruct" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen2.5-1.5B/") + +# Download the 3B model +model_name = "alibaba-pai/DistilQwen2.5-3B-Instruct" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen2.5-3B/") + +# Download the 7B model +model_name = "alibaba-pai/DistilQwen2.5-7B-Instruct" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen2.5-7B/") +``` + + +## Performance + +The table below compares the performance of the original Qwen2.5 models with the distilled DistilQwen2.5 models across different parameter sizes: 0.5B, 1.5B, 3B, and 7B. The evaluation metrics include AlpacaEval 2.0, MT-Bench, and IFEval scores. The distilled models demonstrate improved performance in instruction-following abilities over their respective original versions. + +| Model | AlpacaEval 2.0 (length control) | MT-Bench | MT-Bench (single) | IFEval (instruct-loose) | IFEval (strict-prompt) | +|-------------------------------|---------------------------------|------------------|-------------------|-------------------------|------------------------| +| Qwen2.5-0.5B-Instruct | 2.46 | 5.49 | 6.26 | 42.81 | 30.31 | +| **DistilQwen2.5-0.5B-Instruct** | **4.89** | **5.78** | **6.83** | **52.61** | **37.82** | +| Qwen2.5-1.5B-Instruct | 6.69 | 7.09 | 7.66 | 55.40 | 40.11 | +| **DistilQwen2.5-1.5B-Instruct** | **13.69** | **7.35** | **7.99** | **61.10** | **74.49** | +| Qwen2.5-3B-Instruct | 17.98 | 7.92 | 8.40 | 61.18 | 74.58 | +| **DistilQwen2.5-3B-Instruct** | **20.91** | **8.37** | **8.97** | **67.03** | **77.36** | +| Qwen2.5-7B-Instruct | 31.43 | 8.52 | 8.83 | 81.53 | 72.10 | +| **DistilQwen2.5-7B-Instruct** | **34.86** | **8.76** | **9.22** | **83.48** | **73.27** | + + +For evaluation details, please refer to our paper. + +## Reference + +For more detailed information about the DistilQwen2.5 model series and the methodologies employed, we encourage you to refer to our paper: + +- **DistilQwen2.5: Industrial Practices of Training Distilled Open Lightweight Language Models** + Chengyu Wang, Junbing Yan, Yuanhao Yue, Jun Huang + [arXiv:2504.15027](https://arxiv.org/abs/2504.15027) + +You can cite the paper using the following citation format: + +```bibtex +@misc{wang2025distilqwen25industrialpracticestraining, + title={DistilQwen2.5: Industrial Practices of Training Distilled Open Lightweight Language Models}, + author={Chengyu Wang and Junbing Yan and Yuanhao Yue and Jun Huang}, + year={2025}, + eprint={2504.15027}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2504.15027}, +} +``` \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2.5/distilqwen2.5_stage1.json b/recipes/distilqwen_series/distillqwen2.5/distilqwen2.5_stage1.json new file mode 100644 index 0000000..998ad16 --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2.5/distilqwen2.5_stage1.json @@ -0,0 +1,23 @@ +{ + "job_type": "kd_black_box_api", + "dataset": { + "labeled_path": "distil_qwen_100k.json", + "template" : "chat_template_kd.jinja", + "seed": 42 + }, + "models": { + "student": "model/Qwen/Qwen2.5-0.5B-Instruct/" + }, + "training": { + "output_dir": "result_stage1/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 1000, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2.5/distilqwen2.5_stage2.json b/recipes/distilqwen_series/distillqwen2.5/distilqwen2.5_stage2.json new file mode 100644 index 0000000..14891c7 --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2.5/distilqwen2.5_stage2.json @@ -0,0 +1,40 @@ +{ + "job_type": "kd_white_box", + "dataset": { + "labeled_path": "distil_qwen_100k.json", + "logits_path": "logits.json", + "template" : "chat_template_kd.jinja", + "seed": 42 + }, + "inference":{ + "enable_chunked_prefill": true, + "seed": 777, + "gpu_memory_utilization": 0.9, + "temperature": 0.8, + "trust_remote_code": true, + "enforce_eager": false, + "max_model_len": 4096, + "max_new_tokens": 512 + }, + "distillation": { + "kd_ratio": 0.5, + "max_seq_length": 512, + "distillation_type": "forward_kld" + }, + "models": { + "teacher": "teacher/Qwen/Qwen2.5-72B-Instruct/", + "student": "result_stage1/" + }, + "training": { + "output_dir": "result_stage2/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 1000, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2/README.md b/recipes/distilqwen_series/distillqwen2/README.md new file mode 100644 index 0000000..fe1bcb2 --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2/README.md @@ -0,0 +1,165 @@ +# DistilQwen2: Refining Instructional Data for Black-Box KD + +## Brief Introduction + +Knowledge distillation offers an effective solution by transferring knowledge from larger models to smaller ones, ensuring performance while significantly reducing computational resources and inference time. We introduce DistilQwen2, a lightweight LLM based on the Qwen2 series, optimized through enhanced instruction following and diverse distillation techniques. This enables more agile and efficient deployment in resource-constrained environments like mobile devices and edge computing. For ease of use by developers and enterprises, DistilQwen2's checkpoints are open-sourced on HuggingFace and ModelScope, empowering more stakeholders to innovate and realize value through advanced NLP applications. + +## Instructional Data Processing Guidelines + +For the training of DistilQwen2, we collected data from well-known open-source datasets like Magpie, Openhermes, and Mammoth 2, along with proprietary synthetic datasets to initiate the distillation process. The focus is on providing diverse instructional data, predominantly in Chinese and English. We also leverage prompt templates to conduct instructional data augmentation. Here, we provide several commonly used operations to re-sample and augement the dataset. + +### Instruction Set Expansion + +The instruction expansion operator is employed generate a diverse set of instruction variations, ensuring that student models are exposed to a comprehensive range of instructions. After instruction expansion, we can also call the teacher model to generate responses for new instructions. An example is calling this operator is as follows: + +```bash +python easydistill/synthesis/synthesis_main.py --config=configs/instruction_expansion_api.json +``` + +If you need to run the job using batch inference, please refer to the config example `configs/instruction_expansion_batch.json`. + +### Instruction Refinement + +The instruction refinement operator further enhances the quality and diversity of the training data, which also preserves the semantic integrity of the tasks expressed in instructions, ensuring that the rewritten content remains faithful to the original intent and task category. After instruction refinement, we can also call the teacher model to generate responses for new instructions. An example is calling this operator is as follows: + +```bash +python easydistill/synthesis/synthesis_main.py --config=configs/instruction_refinement_api.json +``` + +If you need to run the job using batch inference, please refer to the config example `configs/instruction_refinement_batch.json`. + +### Instruction Resampling + +We also consider task balance when selecting useful instructional data pairs. The task distrubutions are defined based on our paper in the reference. You can run the job by: + +```bash +python task_resampling.py --input-file input.json --output-file output.json --api-key --base-url +``` + +The dataset is in JSON format, exemplified by entries such as: + +```json +[ + { + "instruction": "The ratio of the number of molar teeth in the human upper jaw at the age of 6 is 2:1 compared to number of incisors teeth. There are total 8 incisors in the human mouth..." + }, + { + "instruction": "This is the text of a lecture I am giving tomorrow. Can you go over it and make recommendations to improve clarity and flow?" + } +] +``` + +After the processing of intructions, you can generate the responses of the teacher model. + + +### Open-Source Dataset + +In addition, we have open-sourced part of the dataset used for model training, totaling 100K entries. This dataset includes mathematical problems, code tasks, Q&A, instruction following, and creative generation. Users can incorporate the DistilQwen_100K dataset, or its subsets, during model fine-tuning to enhance downstream task performance while maintaining generalization ability. The dataset is in JSON format, exemplified by entries such as: + +```json +[ + { + "instruction": "The ratio of the number of molar teeth in the human upper jaw at the age of 6 is 2:1 compared to number of incisors teeth. There are total 8 incisors in the human mouth...", + "output": "## Step 1: Determine the total number of incisors in the upper jaw...\n\nThe final answer is: \\boxed{8}" + }, + { + "instruction": "This is the text of a lecture I am giving tomorrow. Can you go over it and make recommendations to improve clarity and flow?", + "output": "I'd be happy to help you review your lecture text..." + } +] +``` + +The dataset is available on ModelScope and Hugging Face. Users can download it using ModelScope's scripts and command-line tools. + +```python +# Validate SDK token +from modelscope.hub.api import HubApi +api = HubApi() +api.login('your_token_id') + +# Dataset download +from modelscope.msdatasets import MsDataset +ds = MsDataset.load('PAI/DistilQwen_100k') +``` + +## Model Training Guidelines + +### Black-Box KD + +The black-box KD process follows a supervised learning paradigm, utilizing enhanced instruction-response pairs as training samples. Through this approach, the student model can effectively absorb and understand the knowledge imparted by the larger model, even with a limited number of parameters. This method not only boosts the student model's ability to tackle tasks but also enables it to perform better in multi-task scenarios. For simplicity, we use the `DistilQwen_100k` dataset as a tutorial, we need to run the training job only: + +```bash +python easydistill/kd/train.py --config=distilqwen2_stage1.json +``` + +Plese refer to the config file `distilqwen2_stage1.json` in the current folder. If you need to run the job in a distributed mode, use `accelerate` to run the job. + +### Preference Rank Optimization + +For more challenging instruction tasks, SFT alone may not yield optimal results. To address this, we further refine the model using Direct Preference Optimization (DPO), enabling more granular fine-tuning and improved performance. Firstly, we generate the student outputs as rejected response. The contents in the SFT datasets are regarded as prompt and chosen responses. Please refer to the following script: + +```bash +python dpo_student_infer_only.py --config=distilqwen2_stage2.json +``` + +Next, we run the training job by: + +```bash +python easydistill/kd/train.py --config=distilqwen2_stage2.json +``` + +Again, please refer to the config file `distilqwen2_stage2.json` in the current folder. Remember to change the configurations when needed. If you need to run the job in a distributed mode, use `accelerate` to run the job. + +## Model Download + +We have open-sourced our distilled models on both HuggingFace and ModelScope. The available models are named `alibaba-pai/DistilQwen2-1.5B-Instruct` and `alibaba-pai/DistilQwen2-7B-Instruct`. + +For example, users can download these models from HuggingFace using the following code: + + +```python +from huggingface_hub import snapshot_download + +model_name = "alibaba-pai/DistilQwen2-1.5B-Instruct" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen2-1.5B/") + +model_name = "alibaba-pai/DistilQwen2-7B-Instruct" +snapshot_download(repo_id=model_name, cache_dir="./DistilQwen2-7B/") +``` + + +## Performance + +The table below compares the performance of the original Qwen2 models with the distilled DistilQwen2 models across different parameter sizes: 1.5B and 7B. The evaluation metrics include AlpacaEval 2.0, MT-Bench, and IFEval scores. The distilled models demonstrate improved performance in instruction-following abilities over their respective original versions. + +| Model | AlpacaEval 2.0 (length control) | MT-Bench | MT-Bench (single) | IFEval (instruct-loose) | IFEval (strict-prompt) | +|-------------------------------|---------------------------------|------------------|-------------------|-------------------------|------------------------| +| Qwen2-1.5B-Instruct | 5.22 | 5.85 | 6.45 | 41.37 | 28.10 | +| **DistilQwen2-1.5B-Instruct** | **8.28** | **6.42** | **7.12** | **49.76** | **36.04** | +| Qwen2-7B-Instruct | 24.33 | 8.27 | 8.68 | 66.67 | 52.31 | +| **DistilQwen2-7B-Instruct** | **25.35** | **8.40** | **9.03** | **71.46** | **60.26** | + + + +## Reference + +For more detailed information about the DistilQwen2 model series and the methodologies employed, we encourage you to refer to our paper: + +- **Distilling Instruction-following Abilities of Large Language Models with Task-aware Curriculum Planning** + Yuanhao Yue, Chengyu Wang, Jun Huang, Peng Wang + +You can cite the paper using the following citation format: + +```bibtex +@inproceedings{emnlp2024, + author = {Yuanhao Yue and + Chengyu Wang and + Jun Huang and + Peng Wang}, + title = {Distilling Instruction-following Abilities of Large Language Models with Task-aware Curriculum Planning}, + booktitle = {Findings of the Association for Computational Linguistics: {EMNLP} 2024}, + pages = {6030--6054}, + publisher = {Association for Computational Linguistics}, + year = {2024}, + url = {https://aclanthology.org/2024.findings-emnlp.350} +} diff --git a/recipes/distilqwen_series/distillqwen2/distilqwen2_stage1.json b/recipes/distilqwen_series/distillqwen2/distilqwen2_stage1.json new file mode 100644 index 0000000..483196e --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2/distilqwen2_stage1.json @@ -0,0 +1,23 @@ +{ + "job_type": "kd_black_box_api", + "dataset": { + "labeled_path": "distil_qwen_100k.json", + "template" : "chat_template_kd.jinja", + "seed": 42 + }, + "models": { + "student": "model/Qwen/Qwen2-0.5B-Instruct/" + }, + "training": { + "output_dir": "result_stage1/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 1000, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2/distilqwen2_stage2.json b/recipes/distilqwen_series/distillqwen2/distilqwen2_stage2.json new file mode 100644 index 0000000..181e268 --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2/distilqwen2_stage2.json @@ -0,0 +1,25 @@ +{ + "job_type": "rank_dpo_api", + "dataset": { + "instruction_path": "distil_qwen_100k.json", + "labeled_path": "distil_qwen_100k_dpo.json", + "template" : "chat_template_kd.jinja", + "seed": 42 + }, + "models": { + "student": "result_stage1/" + }, + "training": { + "output_dir": "result_stage2/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 1000, + "logging_steps": 1, + "beta": 0.1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2/dpo_student_infer_only.py b/recipes/distilqwen_series/distillqwen2/dpo_student_infer_only.py new file mode 100644 index 0000000..a7cb65e --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2/dpo_student_infer_only.py @@ -0,0 +1,105 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import json +import argparse +import logging +import os +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer +from tqdm import tqdm + + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + + +def read_json_field(filename): + try: + with open(filename, 'r') as file: + data = json.load(file) + output = [] + for item in data: + instruction = item["instruction"] + output = item["output"] + output.append({"prompt": instruction, "chosen": output}) + return output + except FileNotFoundError: + logging.error("The file was not found.") + except json.JSONDecodeError: + logging.error("There was an error decoding the JSON file.") + except Exception as e: + logging.error(f"An error occurred: {e}") + + +def write_data_to_json_file(data, file_path): + try: + with open(file_path, 'w') as file: + json.dump(data, file, ensure_ascii=False, indent=4) + logging.info(f"Data successfully written to {file_path}") + except Exception as e: + logging.error(f"An error occurred: {e}") + + +def generate_student_response(data_list, config): + # load student model + student_tokenizer = AutoTokenizer.from_pretrained( + config["models"]["student"], + trust_remote_code=True + ) + student_model = AutoModelForCausalLM.from_pretrained( + config["models"]["student"], + device_map="auto", + trust_remote_code=True + ) + outcomes = [] + for sample in tqdm(data_list, desc="Call remote model and generating responses"): + prompt = sample["prompt"] + chosen = sample["chosen"] + # for student model + messages = [ + {"role": "user", "content": prompt} + ] + text = student_tokenizer.apply_chat_template( + messages, + tokenize=False, + add_generation_prompt=True + ) + model_inputs = student_tokenizer([text], return_tensors="pt").to(student_model.device) + generated_ids = student_model.generate( + **model_inputs, + max_new_tokens=config["inference"]["max_new_tokens"] + ) + generated_ids = [ + output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) + ] + + rejected = student_tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] + gen_data = {'prompt': prompt, 'chosen': chosen, 'rejected': rejected} + outcomes.append(gen_data) + write_data_to_json_file(outcomes, config["dataset"]["labeled_path"]) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--config', type=str, required=True, help='path to the json config file') + args = parser.parse_args() + config = json.load(open(args.config)) + data_list = read_json_field(config["dataset"]["instruction_path"]) + generate_student_response(data_list, config) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/recipes/distilqwen_series/distillqwen2/task_resampling.py b/recipes/distilqwen_series/distillqwen2/task_resampling.py new file mode 100644 index 0000000..319dc18 --- /dev/null +++ b/recipes/distilqwen_series/distillqwen2/task_resampling.py @@ -0,0 +1,156 @@ + +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import json +import re +import logging +from openai import OpenAI +from collections import Counter +import random +import argparse + + +predefined_distribution = { + 'Math': 0.167, + 'Code Generation': 0.083, + 'Writing': 0.017, + 'Computer Science': 0.017, + 'Reasoning': 0.167, + 'Complex Format': 0.017, + 'Code Debug': 0.083, + 'Common-Sense': 0.017, + 'Counterfactual': 0.017, + 'Multilingual': 0.017, + 'Roleplay': 0.017, + 'Biology': 0.017, + 'Technology': 0.017, + 'Ethics': 0.017, + 'Sport': 0.017, + 'Law': 0.017, + 'Medicine': 0.017, + 'Literature': 0.017, + 'Entertainment': 0.017, + 'Art': 0.017, + 'Music': 0.017, + 'Toxicity': 0.017, + 'Economy': 0.017, + 'Physics': 0.017, + 'History': 0.017, + 'Chemistry': 0.017, + 'Philosophy': 0.017, + 'Health': 0.017, + 'Ecology': 0.017, + 'Grammar': 0.017, + 'Paraphrase': 0.017, + 'Others': 0.041 +} + +predefined_prompt = """ +You are a data annotation expert. Please classify the task type or domain of #Given Instruction. +The task type or domain should be in the list: [’Math’, ’Code Generation’, ’Writing’, ’Computer Science’, ’Reasoning’, ’Complex Format’, ’Code Debug’, ’Common-Sense’, ’Counterfactual’, ’Multilingual’, ’Roleplay’,’Biology’, ’Technology’, ’Ethics’, ’Sport’, ’Law’, ’Medicine’, ’Literature’, ’Entertainment’, ’Art’, ’Music’, ’Toxicity’, ’Economy’, ’Physics’, ’History’, ’Chemistry’, ’Philosophy’,’Health’,’Ecology’,’Grammar’,’Paraphrase’, ’Others’]. You should place your answer enclosed within tags, such as Math. Do not return anything else. +#Given Instruction#: +""" + + +def extract_answer(content): + pattern = r'(.*?)' + match = re.search(pattern, content, re.DOTALL) + if match: + return match.group(1) + else: + return None + + +def classify_instruction(instruction, client, model): + message = [ + {"role": "user", "content": predefined_prompt + "\n" + instruction} + ] + completion = client.chat.completions.create( + messages = message, + model = model, + max_completion_tokens = 1024 + ) + result = completion.choices[0].message.content.strip() + print(result) + result = extract_answer(result) + if result is None or result not in predefined_distribution.keys(): + result = 'Others' + print(result) + return result + + +def main(args): + # Load dataset + with open(args.input_file, 'r') as file: + data = json.load(file) + + # Initialize OpenAI client + client = OpenAI( + api_key=args.api_key, + base_url=args.base_url + ) + models = client.models.list() + model = models.data[0].id + logging.info(model) + + # Classify each instruction + classified_data = [] + count = 0 + for item in data: + category = classify_instruction(item['instruction'], client, model) + classified_data.append({'instruction': item['instruction'], 'category': category}) + count += 1 + print(count) + + # Count occurrences per category + category_counts = Counter(item['category'] for item in classified_data) + total_samples = len(classified_data) + + # Resample according to predefined distribution + resampled_data = [] + for category, target_ratio in predefined_distribution.items(): + target_count = int(total_samples * target_ratio) + category_samples = [item for item in classified_data if item['category'] == category] + if len(category_samples) == 0: + logging.warning("No instructions are provided for the category: " + category) + continue + if len(category_samples) > target_count: + print(category) + print(len(category_samples)) + print(target_count) + # Randomly sample the required number of instructions + resampled_category_samples = random.sample(category_samples, target_count) + else: + # If not enough samples, repeat the existing ones + resampled_category_samples = category_samples * (target_count // len(category_samples)) + random.sample(category_samples, target_count % len(category_samples)) + resampled_data.extend(resampled_category_samples) + + # Save final dataset + with open(args.output_file, 'w') as file: + json.dump(resampled_data, file, indent=4) + + print("Resampling complete. Final output saved to '{}'.".format(args.output_file)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Task and Domain Classification') + parser.add_argument('--input-file', type=str, required=True, help='Input JSON file containing instructions.') + parser.add_argument('--output-file', type=str, required=True, help='Output JSON file to store resampled instructions.') + parser.add_argument('--api-key', type=str, required=True, help='API key.') + parser.add_argument('--base-url', type=str, required=True, help='Base URL.') + + args = parser.parse_args() + main(args) diff --git a/recipes/domain_specific/code_generation/README.md b/recipes/domain_specific/code_generation/README.md new file mode 100644 index 0000000..31f0db6 --- /dev/null +++ b/recipes/domain_specific/code_generation/README.md @@ -0,0 +1,67 @@ +# Code Generation: Combining Black-Box and White Box KD + +## Brief Introduction + +The code generation distillation process involves carefully selecting and optimizing code generation instruction-response pairs conducive to student model learning. Following standard fine-tuning, we employ white-box distillation techniques to enable the student models to better acquire fine-grained knowledge from teacher models. + +## Detailed Steps + +### Processing of Code Generation Dataset + +The process begins with collecting diverse, high-quality code generation data from various programming languages and tasks. This data includes code completion, code generation from natural language descriptions, bug fixing, and code explanation tasks. We score examples for difficulty and relevance to code generation tasks. + +The dataset is in JSON format, exemplified by entries such as: + +```json +[ + { + "instruction": "Write a Python function that calculates the Fibonacci sequence up to n elements using dynamic programming.", + "output": "def fibonacci(n):\n if n <= 0:\n return []\n elif n == 1:\n return [0]\n elif n == 2:\n return [0, 1]\n \n fib = [0, 1]\n for i in range(2, n):\n fib.append(fib[i-1] + fib[i-2])\n \n return fib\n\n# Example usage\nprint(fibonacci(10)) # Output: [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]" + }, + { + "instruction": "Create a JavaScript function that takes an array of numbers and returns the sum of all even numbers in the array.", + "output": "function sumEvenNumbers(numbers) {\n return numbers\n .filter(num => num % 2 === 0)\n .reduce((sum, num) => sum + num, 0);\n}\n\n// Example usage\nconst numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];\nconsole.log(sumEvenNumbers(numbers)); // Output: 30" + } +] +``` + +### Black-Box KD + +The black-box KD process follows a supervised learning paradigm, utilizing enhanced code instruction-response pairs as training samples. Through this approach, the student model can effectively absorb and understand the code generation capabilities of the larger model, even with a limited number of parameters. This method not only boosts the student model's ability to tackle programming tasks but also enables it to perform better across multiple programming languages and paradigms. + +To run the black-box KD training: + +```bash +python easydistill/kd/train.py --config=code_generation_stage1.json +``` + +Please refer to the config file `code_generation_stage1.json` in the current folder. If you need to run the job in a distributed mode, use `accelerate` to run the job. + +### White-Box KD + +Unlike black-box KD, which relies solely on the highest probability token output by the teacher model, white-box KD focuses on the distribution of logits produced by the teacher model. This approach provides the student model with richer information about code structure and syntax. By mimicking the teacher model's logits distribution, white-box KD can transfer programming knowledge more effectively, thereby enhancing the performance of the student model. + +To generate the logits with the teacher model: + +```bash +python easydistill/kd/infer.py --config=code_generation_stage2.json +``` + +Next, run the training job: + +```bash +python easydistill/kd/train.py --config=code_generation_stage2.json +``` + +Please refer to the config file `code_generation_stage2.json` in the current folder. Remember to change the configurations when needed. + +## Performance + +We trained the model using data from nvidia/OpenCodeReasoning, and the final model performance is as follows: + +| Model | LiveCodeBench V2 | speed | +|---------------------------|------------------|--------| +| Qwen2.5-3B-Instruct | 11.35 | 2.3x | +| Qwen2.5-3B-Code-Optimize | 16.62 | 2.3x | +| Qwen2.5-7B-Instruct | 30.72 | 1x | +| Qwen2.5-7B-Code-Optimize | 35.32 | 1x | \ No newline at end of file diff --git a/recipes/domain_specific/code_generation/code_generation_stage1.json b/recipes/domain_specific/code_generation/code_generation_stage1.json new file mode 100644 index 0000000..3b370e8 --- /dev/null +++ b/recipes/domain_specific/code_generation/code_generation_stage1.json @@ -0,0 +1,23 @@ +{ + "job_type": "kd_black_box_api", + "dataset": { + "labeled_path": "code_generation_dataset.json", + "template" : "chat_template_kd.jinja", + "seed": 42 + }, + "models": { + "student": "model/Qwen/Qwen2.5-0.5B-Instruct/" + }, + "training": { + "output_dir": "result_stage1/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 1000, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/recipes/domain_specific/code_generation/code_generation_stage2.json b/recipes/domain_specific/code_generation/code_generation_stage2.json new file mode 100644 index 0000000..d2a1857 --- /dev/null +++ b/recipes/domain_specific/code_generation/code_generation_stage2.json @@ -0,0 +1,40 @@ +{ + "job_type": "kd_white_box", + "dataset": { + "labeled_path": "code_generation_dataset.json", + "logits_path": "logits.json", + "template" : "chat_template_kd.jinja", + "seed": 42 + }, + "inference":{ + "enable_chunked_prefill": true, + "seed": 777, + "gpu_memory_utilization": 0.9, + "temperature": 0.8, + "trust_remote_code": true, + "enforce_eager": false, + "max_model_len": 4096, + "max_new_tokens": 1024 + }, + "distillation": { + "kd_ratio": 0.1, + "max_seq_length": 1024, + "distillation_type": "forward_kld" + }, + "models": { + "teacher": "teacher/Qwen/Qwen2.5-7B-Instruct/", + "student": "result_stage1/" + }, + "training": { + "output_dir": "result_stage2/", + "num_train_epochs": 3, + "per_device_train_batch_size": 1, + "gradient_accumulation_steps": 8, + "save_steps": 1000, + "logging_steps": 1, + "learning_rate": 2e-5, + "weight_decay": 0.05, + "warmup_ratio": 0.1, + "lr_scheduler_type": "cosine" + } +} \ No newline at end of file diff --git a/recipes/open_datasets/distilqwen_datasets.md b/recipes/open_datasets/distilqwen_datasets.md new file mode 100644 index 0000000..0a81b7c --- /dev/null +++ b/recipes/open_datasets/distilqwen_datasets.md @@ -0,0 +1,50 @@ +# DistilQwen-100k/DistilQwen-1M: High-Quality Instruction-Tuning Datasets + +## Overview +To empower community developers in enhancing the **instruction-following capabilities** of large language models (LLMs), we open-source **`DistilQwen-100k`** and **`DistilQwen-1M`**, subsets of the training data used for the **DistilQwen model series**. The datasets provide diverse, high-quality samples to improve model performance in key areas. + +## Dataset Features +- **Scale**: **100 thousand**/**1 million** meticulously distilled entries. +- **Coverage**: Balanced mix of: + - **Mathematics** + - **Code generation & understanding** + - **Knowledge-based QA** + - **Instruction following** + - **Creative generation** +- **Purpose**: Optimized for **instruction tuning**, helping models retain generalization while adapting to downstream tasks. + +## Use Cases +- **Fine-tuning LLMs**: Mitigate *catastrophic forgetting* by combining with custom datasets. +- **Multi-task learning**: Improve coherence in mathematical reasoning, coding, and creative tasks. +- **Research**: Study distillation techniques or instruction-tuning efficacy. + +## Use the Datasets +```python +from datasets import load_dataset + +# Login using e.g. `huggingface-cli login` to access this dataset +ds = load_dataset("alibaba-pai/DistilQwen_100k") +ds = load_dataset("alibaba-pai/DistilQwen_1M") +``` + +## Reference + +For more detailed information about the dataset construction process, we encourage you to refer to our paper: + +- **DistilQwen2.5: Industrial Practices of Training Distilled Open Lightweight Language Models** + Chengyu Wang, Junbing Yan, Yuanhao Yue, Jun Huang + [arXiv:2504.15027](https://arxiv.org/abs/2504.15027) + +You can cite the paper using the following citation format: + +```bibtex +@misc{wang2025distilqwen25industrialpracticestraining, + title={DistilQwen2.5: Industrial Practices of Training Distilled Open Lightweight Language Models}, + author={Chengyu Wang and Junbing Yan and Yuanhao Yue and Jun Huang}, + year={2025}, + eprint={2504.15027}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2504.15027} +} +``` \ No newline at end of file diff --git a/recipes/open_datasets/omni_thought.md b/recipes/open_datasets/omni_thought.md new file mode 100644 index 0000000..e7fa4e1 --- /dev/null +++ b/recipes/open_datasets/omni_thought.md @@ -0,0 +1,58 @@ +# OmniThought: A Large-Scale Chain-of-Thought Dataset for Advancing Large Reasoning Models + +## Overview +The rise of **Large Reasoning Models (LRMs)** has revolutionized **Natural Language Processing (NLP)**, enabling breakthroughs in complex tasks like **mathematical problem-solving** and **code generation**. These models rely on **Chain-of-Thought (CoT)** processes to mimic human-like reasoning. However, progress in LRMs is limited by the scarcity of **high-quality, large-scale CoT datasets**—existing resources often lack: +- **Diverse reasoning problems** with well-structured CoT processes. +- **Multi-teacher distillation** to ensure reasoning quality. +- **Fine-grained annotations** describing CoT properties. + +To bridge this gap, we introduce **`OmniThought`**, a **2-million-scale CoT dataset** generated and validated by **two powerful LRMs**. Each CoT process is annotated with: +- **Reasoning Verbosity (RV)**: Measures the optimal verbosity of reasoning steps. +- **Cognitive Difficulty (CD)**: Assesses the complexity of reasoning for model comprehension. + +We also propose a **self-reliant pipeline** for dataset curation, ensuring high-quality reasoning traces. + +## Key Features +✅ **2 million high-quality CoT processes** covering diverse reasoning tasks. +✅ **RV-CD scores** to guide model training for better reasoning performance. +✅ **Multi-teacher distillation** for robust and coherent reasoning paths. +✅ **Optimized for LRM training**—improves reasoning ability and output quality. + +## Experiments & Results +Extensive experiments with **Qwen2.5 models** (various sizes) confirm that: +- Training with **RV-CD scores** enhances **LRM reasoning effectiveness**. +- Models trained on `OmniThought` achieve **stronger reasoning abilities** with **optimal CoT length and difficulty**. + +Based on this dataset, we release **a series of high-performance LRMs** with superior reasoning capabilities. + +## Use the Datasets +```python +from datasets import load_dataset + +# Login using e.g. `huggingface-cli login` to access this dataset +ds = load_dataset("alibaba-pai/OmniThought") +``` + + + +## Reference + +For more detailed information about the dataset construction process, we encourage you to refer to our paper: + +- **Reasoning with OmniThought: A Large CoT Dataset with Verbosity and Cognitive Difficulty Annotations** + Wenrui Cai, Chengyu Wang, Junbing Yan, Jun Huang, Xiangzhong Fang + [arXiv:2505.10937](https://arxiv.org/abs/2505.10937) + +You can cite the paper using the following citation format: + +```bibtex +@misc{cai2025reasoningomnithoughtlargecot, + title={Reasoning with OmniThought: A Large CoT Dataset with Verbosity and Cognitive Difficulty Annotations}, + author={Wenrui Cai and Chengyu Wang and Junbing Yan and Jun Huang and Xiangzhong Fang}, + year={2025}, + eprint={2505.10937}, + archivePrefix={arXiv}, + primaryClass={cs.CL}, + url={https://arxiv.org/abs/2505.10937} +} +``` \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..3331459 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +transformers==4.51.0 +transformers-stream-generator==0.0.5 +trl==0.17.0 +tokenizers==0.21.1 +vllm==0.8.5 +openai +jinja2 \ No newline at end of file diff --git a/resources/framework.png b/resources/framework.png new file mode 100644 index 0000000000000000000000000000000000000000..163fadf24ff4b8b8928786e17454245e287dad51 GIT binary patch literal 159640 zcmce;V|-oR-ZdPvv6IGj5H=FXih`5D-vGOJQLJYhiI= zdmDR4Wd}oJQ$Z7ZBU2@DAto*^P7n~Pcw++tB?&sJAwyIHgQ0O+YB)zXrSNcol7ZiF z_Xtrx(NOn5cU+o|&L%eUW-kcD7hDYQ_JMAg0>+cTv7V$s_!cFlsKkIvS7!!NYx>@2=Gy^u|d;HprI3jbR7x1|8R0uPc zfYWX1uv$m~o3aifqQkU=+@uPS>yFU;;^GjB2wx%?DJ5Ddfkr1^K~qIDPqBzV(l13y z7(6!#ch2a5PXHMlY=JFA#WybG&p7}ib7UhJf8QyTm@CjVR4I1c=7R~~$Z?u#NSevYg3ti3VL(8` zEI}ZESD?TTF7N{!;5@MZxdTp@2mU|TVE%u89OoZ32LTZPkrWYBaR)ush15ZpMgRDX zzqGcthEG)n{0aeHRWH$N7!aenMn!697~~r@``(1wPqyzQC~Wik4xUd;{7iq^SU>&g z!8Gj)4~9nC4f6#I0P^o&TgU;2TeY&QWMcj(1`ybw|Ncb=Mpt_o^#Ah=CMc#x&zX8J zg7JSmferlO5$8Y4!47bU0)eO7y06>{W%`f$0Rm&20++}0w47hR_5gb}nZw)raIq28 z8#)>+3}G|?h+wsx$+KCmFVk*jP%4(f*j9UfdfMs^0O$0&|CGb;7fQ(g=mE=AHuk6e zJ@{a-XcR)nbCw3Sy3lOa3nHgWb+UFlv4s4)UU%IAZ9X`)k`Y zZ`eEP&lm?d1ay~9>Rk7aSe(`z+;S^0s-VV7t7dm)@Y1v!(gCZH$8 zk<|7W{qgQ4b-F$);P2Dn!2-7y(_0@j0)Y6=r9Fqwi!~d+3Phz1jE#+J@yuZ9nyeR7 z633Hi)k%ayKSAN#jEBc|M`1B4{B-{&5`~HIaK0v7rBNqyJ~x@muUH~Oy*m+)cw_b{ zFhtZSanw=l?+PIK3U*$~kgs)Clg8(R8;XEe)6Cs$zabV+B3kSG4%29zLX;YhghTl;+uvB6D<8iAFv1U7y%~33Eg*>Rsi61zQ4ZN0r9RiPKb(-;`^v)4FiYQ0|{6|K~c=POGSK`F%$ASzTVp_uiFx-f-rFSmq$ zdOcv8N@1`jMPK~gRmi5u7IS&HSy_lyw+~bK!nHN3({vjG2vFYt%oCY#Uc4*@3caFJ zl}ek1iow<09fl#UGO68aixdu@m>`5idkUA68IR}fkio^UGo9B0z~-w3GI{4PaF7&X z1G1J`U>jD#aCLpJcVU@LCCcLmu>Kko5mN}@pWyl%gI|fi(A56)efz3%zdfbNO({kN zA>mNW>;4I4O+KADJ)h1y_U{^|3K59hl|(_lR4be;iuh40QSfhNB}9hTUu<+O!UiY? zgh(mQQBsI?Cmr~|>dFicTKuzXRgsZjvIAaU8Y2d!Y(*Y3fQ1$?gR#oZxR*!CplUc{ z?6Us5A^VqE>%skFw{Q*jC;!U~{u1+tOfe81gv9w@77+L#Q5A|o0C^RP+57)CoPUgm z4TOdPEW5K#+@DVUuLu91RuzzzuluV%3hQdSA3obA3WGLuu3Y)#C)TP6At7O-^&+Bv zmv29C?I^T3Y;D$WJN{|<2+SkTrD&tYbR4-;o5H>g0rv_oho`e%E)x>l#5hb^SU1pS zIvlJU7fBfg><%YpsB@;ZUPL}`_eF#NaAqJ9g%8npm?)eb0CYM%qfR)Qmqot8ZQab@ zwna{VH_KCsxw^VqY;<-^c>R)HZFghj;o&j4Lcn7;n8@P#Wr|Tg;^F1BGhHZNrrBUt zbd2RT;0*Eko^Y^uR4?sINh&(73?q&wS7sw_a2k{GcGYi+Y#I9vk8oLSB#72jdTTLf z3u`noUQXEcX2Lw1c~@-aT3)eF=bt}g>VyL4;$3`mdZ(1reF#5i@uK_TNP2fD1*KRD zUEqeIY4fHVDMX6>*IdNg7ZZWQ&XYSI&?XyCBJq0}&Y$+_6LdTShrxT_DbH7(AmqLE z4GcEe71m{?NP+e&0JZ;m^gu$ga0lq2m^_|o`s9(3dTqEXHWCg-+U{XO~Ys~CWN&-k<>Clr@$rf!fkHMo7#us)tXQ`4~bc(?*8(SXNg0oOyf&Y;~lscESXAuOJ{EvN+(@o5{tp)MQ>>Ps2XCb8W{Q_%2mv zVV`^T7|R^`itYoLdI#wQwDLud78{DiNcH#}f8p`FadOec_Y5uI%Pn=D)Y440MZd&J zScrr#*w5Iu7LM5Ln#?EExxbu}F=VfY66j_~8JbUBhMUG4Bktd>pKMfOl#J;_A290S z2vQoYA}R-j*bEH&>ON7$0izETNO;VoeM9YLyJTO=I^#jH`_tvE%}t_)Xgm(N{e189 z^K&W1EY5F2$$W0-IxVfO72Emp!HeJZM_>BBd)^&Oq%#-S`b3kS zoJ|)(>vcTe6z6ce$@ROvJ>MPvZf2Y*mJS0t&_ZA&Um<(^w%p;xF{Dlj^b%C6 z73gxge9>F*DsN^tk} z)==yyI8}&&f#KS7D2|x#X^mNO0n$+%_lFsr!88Y4TEk;6`W8JORj-Q9KFSS5N3^?$ez?d8k1b&x^J;eT9w&32-S^BMI|9j7PbwHX-v& zQZ1vjjNo_>!9-^gRJ|dQ2$jLb8r|M@w~O5(zu6L5C2(O63!{xT7i%8(OAyg0OvtBR zWE8@TTC}s}258`FOg#Xug0S4&+~eo*%+SLBh%}5ehz`dNZ<(o5TU~9f&?@@!iFZ z>1iE)A9{KRqp9-uskP*wG2swwBYxMrlB!;N2=7>?$a%vKsd{La$1eV?g9TV9=^j&ccHY#ByUsZnV- z`L28rPeOgBiVS5xJ8_Zvh6SJ{xnFJf^e!WY!0n<3T}fqiIfKhz5Gq#>#BZvOSJ2|l ze-DK%^pYfq)a~eLu&r!U0(rha<5r``f!2!0Wp=DVUjH%jWA-9OPt7E>)_U}_iA1i} zk^!DQ5=Ke#q1y$2kI<$hqW}rBms6uY094q_Qf8veu3tYaIAeewdh2>#+p(&MDaxh+ z=|VqLg*p+ZI#-PemNDcot26NcDTY?k+XP}v%pbz$`HUKk*9kj~d18t6EY>WVUQi1; z#3E1vp89)E3(2?b;A`IbrU#K$PN56QXWQM0bV&ORAB9MW6N?f1j6rMx9H()7W1ZEB z(Yo)xYcx<@gc%xW{Qw+c0uDP~s;->{`eULlY;Z56w8#!Cn#$`IfbJxFGTsCuMw z`F#2!r?tm2Ipo*5z3%A{KTkPJum|H9o*p05jLz`)gt021QOLvzrIS=4{|JVJ4Lwu-_K2l-oLa4?SBT?)me86$w&vEo%U30LDZprpAaVEq z*g>m*pUmx&h(_MC+!zq6`8ho*L#&lu0E^J!e~V%n(&l0XfTwIOFuP-}McaplPq7g^ z!CY`m5ZPxa40VkM8|=<+U|qtticN@bWEX`$=w~4`x6xq{?R2+VI3s2)D|h(V7yRs` zL(N+E?TdA1wh{#Ii+sRAbo+IPu>0@oEHd@mp(~Z_+xc7}5wmNwK)h;=ggl`G4u*3z z#*qVn|1u1f*8B|lhsC9=q0up?G2Hhiyn87_ptlq20Sw7zc?Pdj}_anQ?6`KJX^8o4zNEU%U2S_wRn|z>ak?LHcw!r9!7MEu?9Y)|!B6nm?tN zxbR&}b)1U~mZ8yq*ViUH?WA8Y);ICnc@F}Yub;bzlm*segqx^l`S4O5w*w;9%z+X< z8l2b{yBQ@W-_(J`cJa%VMFeh+NwBx`+vtio zWfrA#zn>W=dQ}Yahk4T#s0q&y6DWel+J^cfW%AHT5Wv&L;)QwRo0~b#*V>^O85y~* zN?3*8zc}mXU+oM^+xDL>H+` zpwsHeoK7Jexi2;>zCwl?2n;2PQ3DSN(lh5P)sc(`W0HH|i+O#XlMo#o9TUW##VNkE zyPnO!ba~!c<$0sEf~EVT#BU1O7F^Eae|1)9 zl3d^QQv?f;{f)TY8zz4^TL^g_0UAAbZ5w>tq1+DTNItKa#`N^u5r-Sz1L>6Scd>9C zB1l2ZYN%i_UVk-|NLxvF1N2rV6G>`f$V%s#oQv%q-zX5Q!5wAm+MmkCY6b zjmy}+@cG+H5A`vLKexh|k5AGNgsv2tkRXf~ed{oBS7%Nqwh5<*IieS?RjX0> z+}|G+qDRq4B?N{D8&<8Yk)JQi)=INnuW7rBj$%8MB>-Hd@-8Ll#HKp~(b3pKm9hua z3EE{?dro5AHV_1FA6`Y982fS5JAm?6wdytcjoI7Gf_dL(-LWj@sk!Y$>)01&DcSgMy5;3o~mR zr*#fK-N~Im#n}88$5Wwlrd+mpwL*~%0{Y?~?K0&;L}=TjgK~vJ%Oo?q@t8T5tJ!R9 zm+8Oh#zG^6o^>iX49 zy4<^FU-s_T!laafc|*&)g-SFV6TW*})K|+r-5jLuDTArrX0*w9bDKTY578^<=Q2-KoP@3dd4HB`bT@e*g| zA|=e+AJ1$+io#S=+`69l2vX-g8NppEo+mBvdLZmQ@zYPHh_3E* zN=mQDvq}Duw{-K1Xg}-7fvW0Rw!jWUd=rdc zmE4o42(#8$8|Au-2(b^tyvN368{>xUtxl$tuq}7E+mO7eVT%XdpyZW`(ki zL}K%OEtTGIAs+)#U_il4)Px!bx3Hx!C+(Rjl?QN)#JZy0jIJx>SlP>Gfj%@i{eba~ zto-g43r%w+jJ(L?lw;f&u)kzVvyRip`Orxc+dI#~88=WS0WSl82tJ0l}Cg%DK|BIaj$x zIh8LymFrMPv*H(J6O+u+UX$hzY3>Zh zsy`LRk$!BDId-at%ZzC?S`Y_;CDCiE^-ap{_D4bpm6U}D2yRu$iNw3k9mRc-Bq3obe|@SSz0tiN+*N$ZO?2oh z-k3cu-ce{Zg5`>EMNL zUws&#jtNQn#f5xPgZKITU?g31o#Y0OIZEM!N)Y)rd0wtGVw-a7K)3`%1|T4TLewCw zwWL9mwK+Vjo_Xk25G8|SUZzu#gvm+$x`58txFq({UZ(6?rzG=>%0f_xc%@W$SzTBX zzLGSSDPt((>p)zsm$C(-Ofj!lthD~nQQ|Xf7oZn6SMKasdRDVJF+&SN*&{PC_TH6v z##cNCb>E7t51k=Rd!6I1m30rN6omE`aRw_whH(G(H`&cTd{1nWuW)25E}KF`XDWwV z9!9*4GsGKK37GU~3jN+OZ}sME7%R*^>QgjwJj+*04ItDL`FwYhIV!eaihwbEloL*I zWWta_qe_#U$Zg4;G-Sd&tN$pzuaB;mZUKbrly-!)h?yA+6$XD1U#Ak2I59K36^5IU zeL_i@+dw6klF0o$A}o}T8W3HRPC^LAa!@@8XgyIOgmd~W$XTI0N2`^bFTo=olZ`sv zJur0Ho*X<&%4IuCBoW_e0aC5gkf^gCf!#CpK#}5i%4^w-_|Q5Ga(JiQ~w51YVlv zf9*+-*d!qiQNze=#-|a+oqd=oALML!Afi&O_y5xW2JnbtFAqcuHvOIMwkt-|2{TY< zvJUfvm)DJ=>lvN8pU3JuweMWctUeFxPrdUAgO0XmA)%zmY4U2RVG#!*11V|oSf<_8 zij0w7^vxtvDqrhmWkO-n=rU^%dq_09i+5m3%jtEDHN$_g1$RF+YXCmQN(Gl%*TQAI!HnX<_rDkUNWHI$5r)5m|arj}(t#=}t zZ#zR_K8!`%z@1f7yFpLV9`h@cCN{FE+V4+^fdo@Vc%s*ULv$Ye+yK2q%@983+a7hs z(wta|b0FCey6r883$wB#fnsFh2^q1iL*C(d3cKWVPUaM;7VW!j`!v#G4zs3E(>qgJG`Vf z(pbzVh6DpI3qW8ZJ@75{sE`ATi;F+$<(-$w@1>JKgN?jq6vULx#(|y*B5t6|7G=Vq zRoLW+c7bb2l55+CV2SSxt>9 z=#C0a7SAAtls+qZ*ij#2_@Ed3_=I_;BJrlFK9f{G!qzc%7$OwJ0;{uvu3JgG!c!R) z&ZK=>Vz@cAY%yb1ODDIbV!0>fH7j{mpQPEr8}PP(?f`*`>j5@8WTkRMiy58bK3zc| zyE?So7&%3Nfyu{xY*O`3K~h9+nPJh(4i_e42L8taAlIVGO&+Xr6y}CYHM$HEA6bjO zQZ?4fX4BwSC2!z9r2@UO3@)uVT+K$jVmFniz9`dxLYaN!YUyhYRl!Ebi+rtn>(Zlq zT0z1x4L#XZU{r#5AU09@qFSUecXO~f!7yuocM!R9!mkYrcxh`2`uanE}Ro%xadyJms5c`u#Z-?H!bCAH3#2{!MCc~?FV@452nE{b%qOxmzaEA) z3IaKUlpai5k@C3h?}awNkUG3YxL&&F- z@(WWdsccrzJPVeUjdW^LF8M=Z0oMyyr6kpK0<;4l)kiFvK!_XYaX63tBWuVcDi}Hv zS4^mb+GK=Nc#<~cnSxlGP*LJ$P(Fv$>&GWSOtQOV*w`aQOF*5~l(vTVV-)hWHp#A= zSeJg}zC?VR1&SnA`m;{PH2}?TMBJB0rIg(=_T;wn((9OU1MAqMbP^>H023h(3T2U7 z7cPL#YA{yBc6rH(U(}jkV+gAx%qa&%KEV&s^r#Gbf^oU$!y0vQSfHX=1>;BHFzK+B z7IgSfi3MnrJy1>AmPj2X# zkckk=S|>|^c>?t$t?c$#wYH_3`&Y4ICbRXiUsPHpA#dUR&$u#H@LVr=Ui&gW z46+^r<%*%#mK%i+A+9EMrIUI_ZBswvL`6kV)Brv>&u7hm8U^)ip~Tu3 zjv~ww@%``Z@E(}H@ESiER+-dFeK9+`pMEHz5*30pC?82q~5`qH&9M-lhdTn<14WFui z<7DmDe8W$dosNUM%eoVQwWCujL4+aV&kXyc6+(g8+1U|ovFWxsLvd%3iX-*`Fz~P# zy>P|`|5Y{(%+dufB5mbY+_h@OLMWZJ$(^&`54cDFBBoVD4PvorPv*)i8(zd9F=^OL zhLArI@_hzsFlL5>(Mb8h-d-M3Cj}L_ZN@tjKO(Eo3;>_IG;W~u|KYtMQ7-Si0#se^ z4;R1Zid`gMh;AVjj- z>7#vus4wOUq19{`4ZOJnbcqyP5xy_?2&xq-LZ@l`-%d9phY*n-{xaC0*)R|gKgCC6 zRKGawf2!MLVU@@M@*PQ3il213mkbm>T>nj`0E)HfR9m%(1_Yg{6!QEh7&7T@dmy@ygK^_8 zJ59t)Ebuk)59RlolW{ ztVt^SQy6=3z)bGN1u_4~UA(-!4Bc!)oDL?^-|#8rvao@mJ0|1F{g7%4G48U8a1#xA z9ED4y!-BCL;$N9Yeo*(pr2gi~1F9EN+po7>asWOzqR-1c+fbHFuf*(3sl2_(ur{=d zmAcSYAXgl_ESzDeZ31Yp;JbI?A6pP`87u(6{v&t+;X`_@{rwB4^=6lirb^*OcA>V+ z?S88_%#r!z{5-$$;E->(PJ!)DgLXA6)?lV1o8n8T+?AmcI!W3p`oFPAG1&< zM_GW3B1;inE6?8+ZXpH)!=jQTWAj&6pNw23(xHwK1DSJ9E-u)clT61d^!Da7<*b{3 zqzA;v5?1l(KpZPRG8VX_Gv@XUVCg8n?aTa^N$Z9QMg~ok?Ge&I!ZL>dkO=$$?9$$C z^+FeVaDo4=7$DFmLgf4U*z=u<03ezTNbA^91Ik4NG>DMRz~H%b6Nj;Ves*;9n*|~X z%B}7$8W>Ca^9100lN!Qo-O;`^73vaG?ANol);| zBbOR5RVZc@350-+`(h}xKDo!pkI@(@+Os_vOZe?%fu33*L76wCoPs}@fY&q6`Dn(k zSoIe0na3TO#e6(z>%>ZPQ6OZbIG$3UVRtMWzs{)tnf7c`>R-BwcYH81s@3+LAnx^t z%`H#dLBwo<5tZ^Uc^@E+Hp{3$Rupl5K=DgDFyQT3|3x6*XQcV#R5PCHTLD$8zmyQ^ zIJ6$m!=M-OPgRr`J?G;kSax>y{K(d?ELy=le(yiC@budEBe>np!)28>wr3CIc_>w& zX~1$5H#3QR!Ei*=0-Z4EwIEdcwmFBvfQTLfTR;d!cjGTIF58FWd1*w>#)?UFUljMd zV+?kiB_imnv2+$=Aep%h$};{$&DrtQ=b6>>?wmZ3)w{pjutTv!ZBN0E8nRUPuU-Mz z$IT5AfhbMNwF!giEDZPCDXb_gda(}AJE7GwM*U8M+rw!iViz{8M%zHB=#xAHl(&3k zVI4yVp=fCLptL4FTMw{rkW)`m3x~61_f-#|@Uy zgidSx7eym7G#3V z;}sKet!S+)GYBG+{A014B`Mo%naT_rQ?;fw+cmb7v%Wo*SY8fso&h=cpTA z@NE#_VX0I@)^>p6uim-y=~IGl0m!6Z%*L=~a(Fa5$t=K!Hc>6@=;-JaO1vejez&oz z*kA|!#$*`$Cq_cPLOxvRBt$&t1pw(PDbT(of;b$FkVp(D0if&lXTqn_=kcl#tpX1PF6h+3!;pBA+%@EvMyt+BMf3I$5<^E4F7Y9R^-36!3 zasF$W#ay;NfRM=ug3kxOyS5G-=a&J=rb{ngHLmNxc&4624o|D3j@e{RP=8PGg=!J(SC zm~g@p#3RQG)lvDvVHql*=8;P<&ZkQ#yE{T9ekG_Ojps|{;92r#!hylCGV}va|FUS< zz!)M<+1>br-Qsr}xaY%pH475CTm~!b1EH7kNL8KZ%_YJtGSO`9N-{27u9|*dG@iPB zexZ0QoMAoM=iu6euWZolQ}E~;FoE5|xi1zfr8GeQY{M#5t%;igTztds%XC!`zj$US zfP|t5kA(Q<@v)T>p(RW0x{s&v%-A$tRa!8%xzRt{@=GWPSIA@1fw zNG_k}ty}BxA>$rXMJh*c4UPV&`5Idt&f3`SGN^stUvZj${#2|oi7)%4Tz21~vTOuJ z`ZXFTx@Ds~$#Wm>YIt%kbBzJoeu%1Lc7c>Ct}lM?-a%(f%VajdfVRc;EW0R3xZ2@FXgO0N_8#b= z*kS=%_f5lY7U!)u3;uv^5FmNu@I1m~GLoQZE}eBx*xy9EsQOU9H$=lBDw{GUl#Dr( z_}ruwNJ5D3+Jr<+%P+p^4Y);Z)$9=P8a- zKfvJr%Qa@Z-vuJ>Vl@SOv=0?K+WeUm@@&%i?8y3df0STdQ+fdf7*PJu0~F#Sz|o=e zo<}c9H0p@zSw>{Y59jJ_w0Yoxdm@A#B;$pnQL;0ej|bnFGeoQsaHmuLDSn-(5Y=qw+qVqG~LS853(V zJ6;ti2|yzsO$swTYctBPtgSf?DtcP&3VY|}Hrl!O*-|n%S}X*AM#9ZkHXwE?yQe>{ zq|-ztmkZ2lbu5%=l_GfzFpcXJ`vI|{(x{Y-tI=SF0K_^=8@wpegX2GhfTAM>0UL-u zbT*VS_3s2VI;|weTPMQZ>+1w}nHo2~w9g=sBu9)nT_U6>3oUMg&<$8}1GXEZ4qQ&i z>UOz()cNIyOVzSPBHPbz4K1Y4o|lhu=+tTo=lI_YF7g$QPBLVrYrh>-0O@*F{B1x7cW_SfbM#P{^c7aPA!-%`=or=Yq2A)AviY+j1UwAVmak z)|&<$DM|@9F@5-_t9C%8wOe2;*gJKXORuLjn;=Z&@`m`XaUY_8c450(h(780^!xYj zYnyvo;{Mh#$@&xMKKgWev(h|ni_-(5-cR@ZoA=!G;ECS>5v*V0j$^5xV~xo~<)b^;;Jxd}Lpp^{v7(Fz*P77utJ5{lvn$>w zIC5NCHM-WX3uWmDnAKS!Ti}Bvp~$a@*-NBP(Y5%L!HE|`NnGGD_M)T)|a2traTE-)%-ErT^%A9THmDed}c~L9Wmgoi6pqk1#(MXXQ zP==^-UgUdTqY6UO+sX}FP9-4h(8nQ1)GIJcc!>h2v}1HGeXb=`Cx17Crg52OVKc6Y zlwc?T__nP01s+>=DzoVI!h6qLSZ+o zR3va<=QF2exf?<$}D1yv6Evk7Fj7Ke~hl!->Y zRpBq@BrZ;$x5q1RPdD4BcDIhLg9j0-z@gypx~1n(Pe&)bcg;e@6^=Q}z)X>7xd2*n zI^N;!*@Yc?Hb7cMnk2Cs$Xgzs{s4kK1yOHW38fMxH++AD{y**4lJS927>iWwU$<7@&NO<)zh^MZx%sP>E_4hM3hn}fv@XfnRFp5dsAjaIT zaKWcoLO{81abZ1!1B~Y#huju*>l%kj}1b?TE!h=$Q4$q*KS^ zI_g1Tvmhs*`PUgOEVf5*Xf(tqQ2tH)9fsmh>J_wvbK*C#`Xul^)izWX+P}DnHJm_> zX4NXvHsYj(E=1(_{(5oGuI zs9LZ@W4xaxu1Pje*|4|d?u*~PQeAbhOw56`;`lF1{;+vj<_x{Op&tFIQ@`m1A8Wzi=PH9t{=t#(?`)pe=uf#t=|l z?E;u^Pu?n-CjnxPZtUG)9igx9ee;DehLyGx$4Y+K%DIo z+?>efF59T`DO30TIJcdv(b7um?Tph3I8IqGLYoSy&#}#1fR!Dlw^{n3ZgFKj!g~F* zRBJ~L6^o5kL_bRzEx1i4S+_W>N~h2-JXq7AK;{Nzqt%5wYsQ!$Brlx*D{s6m0w_8U zuf|&(eiJy4aE18k5uHyuSm^T7{Sd4aQ0Hp1VG|~fxSJPt8f6O(BumucU)Lb%& zD&KKH6r9wPj-vax(eVu|zv(f0jr%^Z2Lz4!9sf7S1*p#)wVwC5LveDYy24tC zYBpHN%~t2s5^{R4`0v5UR{=Hpx}cqRQe3e47q_;;?K0f_9h2A{Gxv?QAWdb=Zo@@T z>QBX!X%j{F)EVWT2-VIG3)v#m3v<1~+8SgS`_5b;lA3u=`)j`+OnT+DHgU5K$bU z3#Imq{DPa%LbzdK7R78otbb?l@bolLDa%Lfvm3Ee?42Z z*$CEeHH(xAMOutJrHC5jCV!az)I$ie*R;eroUBB3kUikzn>B*LabGmB)B^noho z;rel7TClLFWDvaCz@6AFgf6H0r>@2s1+$dU8c(ZzIOw=c!#!1^A>g+PZBJb6Jm+ zXz7Z$R4c`@`=p8R_vy1kXH2ETlk=SpH;mJ0V!0?ypu`j3vRdm4J>C$Rt8OOV@PbqN z-TD0@L_tWHU09N-oa?{F-@Jn74i2SJcgVy%@!y*WrK+6<6w1w+lfZSHemkN{Vbltu zKXHR`ieg`?HHNI1ud2$)p2P`o2m?v~H0>33v_W(!DN`WX0%J>(Sa79mCVBIHAAydk zMc)mbPDch{oo)(2kNYxPJXu!3Zrk>$N53Ak;0PP3D-v=+1P3+Xnrg0ImeOSGGiQ18 z*(@aV!&!h!N2n0(y7F^t6?*(|_>j({|BWPF4u>*b*|Oo_0zw*-Tlp7yYwJe*?;-XW zmabF={zCkIkTF*yOw_=3h@grs09G*N?zAK;?hTbQd8KgIuBx4dpXZ(m0 z0_3-qd21P#+@Aeb2W0ZFNT|(d#nsM~2y+0kS%udLsUWajq_VXD(X$8n7u|*y%R_Kc z2a`!)QFX3y4tw5xieGcE=+Lp<_riC4T^1;Ez1A-Kf~SS!l+&cP`~gkchH^fX zhGcXvQZDiH;Pb_D&$Y^vg(t5M_HXbgKsH796Ib+K6vhYU6T!54D6-xU1Gb6WPAbY# zYGrb)tzdf#7zmtTiaggfTu$dr?6#&0%7X8#AeKr)Srs-RGc(Pi`uN93zxEOj{JLW3 zN&3DiMUnMP_UW3|luYn*9t4Ts_UfTXxnHNHJG5__H0IL(X2;NqY0_vjNWP`ar_#lm z)D^QqX^nbyzEzx*1E2126W?Mn2^cmuAGYa$hRE<|vtLsX8PufV13JxKX$>va)O{m# z9NUM0Xd@TafK|rQtnN0Yc}Gs#PTGP`J%H$lzpQ{?G>Gi0*Bu^R_A#{(=GjxdS-%nR z$R^U7=Upn|St+lMV2Z$6$Z!ffuz0}~{M9b*4A{~a_nuhOREJ{&mfERl12>t`HwYZpFcpcvhc60j6DOVUsF+c7)7`O^Q(wg>*e5L7TKo z`P;z+$bg-O3NT(ubYz3YAF?xa<0mDh=oM6RHwnpjCT}JQ_GN2*>~5Xi-PA=mG+I9! zVVd-N4ASFnlaEJ}IdZ>LIwfIKU{DyLZ)*^`Ch@lvSt=vzFzCem?cE1TUPNsoOxJH~ z0d>QITM3yWb14?u6+-ehjR@kw>!fn!k=2I2fdrEjk^DIBAEy!P+0YTGlayB^Bb5Q> zw-#SRX?{r)8+$t^PiD}wc;27TJ(yaqwM5+--l@KNug%WK%IwMd(i%Kbc&&z4A30^%Vkksfqkn~#Ek zCxQ*n_Q`3YrR#Txcg+%q6Er0Z!`W}LxP6!+F&F_FRury4$V6PYa>0=?71K7ed%@qo zNRL1FpEI!wFt>-sv^~w{zxtSjO*OV5M;C@IbbZ>J!%T}&9i95Cmg`5&$~sVY*^pHb=6H_ojnQ0)<9Yefth7fy*=SnhJxtVs-CP4dG@@|5XwRB3$G z559X4?Ghw)7<)^gA;C-U!?AX2g;N!oN}EYzb>;MQwZkv*DNafJBEdHie6=9!TF>%a zo5&IL#F(LQg2M{N3smL<0SD->%uHaIn3z3GXMBfca*YuxnLnz(^^qQ)_Z~kz@*ERi zj(jvZV0)87y|Oy0>SKX5dKBg^IfVqn*lv8%eYz`#3_}uy@!Eg*oZeTG!k#il_CN0e zFqAE6jAdUV6Aa5bzEiB0LXfEFF;w?zhSX;!-i$)v= z>qQ|(R6sOUnMkk2Y72h%hC7kVA8WT3q8~Uq5k3>`urVP_;{CHXMtG@NeYS|&dckI? z)`y=tYG;EY;|HJDF)jY_4yA-a(s?gHG64?36~{J zh%kiIe-Rh|e#DpP7R3NT`p|gf^I!5DMj0Y9m99dmJciE{2vUb{eu|)527tbR6(@a_(4^p+gfvM zw{mM4$D-~E#K(}Ywg;7q!@?3(rO&=t9}6vU+X@1))gIx*76d@IzA#Y5(FMh(UKG;} zM6|xm-e6ATb_8>L2cNUQ@^BX;!i}R+6xyB0L^GvL5(2)_Vcd$o8u5A6FFeuCeDmWP z!}sIaR?TDLk3J+G9yH2CQc(j2S~Lnv==RWW<8bF+*yXDGw=3zNshvJA44`s}>=p*x%zhtCag>$XNWg)*y_EwFQm8hJb<>(3-o8y>>zrvK z-S?dw7Xh`C#F{b<^aU$5@hnR@7VjM$)e2=KNVt8qMp}|B#>t%4zz~6$SOHO$b$iKqPRf1C*zQWpJDdGIXt?5zFW)U*Dte_lKc{%SjT&{gw)(5MHhS z<(ytIUp*bX@kgePVP#x3k{p#4`$(3vON_RNFjY$S%{q zO5=2b#hTQGswH8xSnhE+vRWe)6~9keLTX1g94zqQ^4`Z`GZ_i}AL+14=WVUUHY1!G zRwLvuf$Gw)+daVlV!l}iS~Q_M&^NjXA>cnRCNg$w=7YnNOWnjv@YU-2<`O?r97VA; zEia~L5wX31p9P*iFXp^^M=~K?X&2gPW*zfqhR_F{UblC^`IW%GS=N7EFF>0Tsou9| zaesrj4HhXPb~EOx)9tE4l*8}MG%>)y5Z|*uF1-HG83{RNM$%=12tN6>Xik-@ufAd#5b2jlq%*pm@7~3P(?| zi_;Uc@xs%$Id506L-Rx zmx)CM#L0Jms?oT( zbp@`m4<+IS!Dic!mD?f3Ztsgt(a&tcZSULQ@g^%kkbE>Kk+r{6!l=f>49}MsO71l= zaBbD=o`^wsJzO$$b{#?`Oi2S%;>Arj z?03npP839YB?|I@#323+>4TIKEgX#+cB2cMv~o@H`EV+G=gb1xz!sb5z+#Rnx^FCn znZ<2}PIMvoDZBKZ@BTLFz1+lx9AZWAX$Z?ldOJ!om59q3qt18?btAoKh6qcOeQWD~ z2si&J6GAOv7*HoK_hdx!!kM$W!#&-+jav-EcM2767o%#dw=Eg;8nC3|Da507R%*1N3hZXG2{~9NhH|(aTnnTH*pq(p_vC}?4*<)+Y z-Zr92BkbaYn1FXxatuVi9H27Te#hT=_&@fPP%23-1b7;^hrogCcMA1C%%MpKvTU-o zopV^VqHb_%ov;J)w`&ipq`!JQa5xR=v^g)pLViFOK$+eb{VDSz>yw-&A?KL`iwONb zoX%Lko_n*JbQ;fW;gHC3C5@jiv4o(5#;-k1t8@1`v6#sA3lFnvs>x0(rs_N+AoGE@ z(KZqfGs0-)cdPsBSD}Tg(`*eEpFMq5W<89k6V*ReHW^nJf4?`_*aiSI1(TJp6DMKA zmh|?I(VR!?&n4(k4;Khyqm(8EO%G&}{<}V&mTIZ1;Q0;tGP@GtmDwEPXp-0OQo@^y+0`n*UssJINIZ>oB)eQ89rU* z)}*?_VYm>{@5XnGj@DDD-1(7MMYZZ*y{<(y_{Ms^meV;cIPM$oQ!OhyEaFTe*mS( z8qkZeOdai5o#!Ds+&^SxC&d~X5z?mHyv^2826gJ*SitnCa(LyJ9;7MH4 z8|nz2<6@@((uI_mYO~i9?VGs*fuO%5X1Tfk&W;z(GaN>|MS+RcN=Z{_i7Qs&n7U9u z6=P7-DT{#D8_17)a~8JioeY@U&7QJwgwyZ&>V%0jYFO>fc4(gIYC)a(5zDd-flBgU ziX#8aLe|H9anoftUk3}xdN8`H0lh*O5bhV3gLwX4iS56(g}PgKIX}}6E$mJqXX%HR zycqx4G~qhTXiv&>> z89q;KZIX+Wg#hRUJ07LQeKkpV@|mJ$9UU*)m$RDHdC#4+#(CxlEs1;of=;gQC^Hm-_;*Dg zv(YILb?e9>Hu{?hpaZy~sSnDh`%V4@=j0*cqEJ*(DzUF6xgVBBcq`X#syShTm0!Ae zGB_PqfA?sN_N$W|QsEJbznM!n6-JzhOFfylBSmD4xwr~;gKAmcx02ejVIq8)0 z&;@+g;oFA3^=NjH7^0`p!!PCYLUDqVlim`Axy$v{mGez4tpsyp%mtkOC+MQ1aby4g z7R{7IXoiB(3L1(a7pNPPK-2tCG3k}?N|g(Hmr_at?%A8RBqmyDyb?!t#FQHg6 zjO>qe->GRXlqhZWsrzkkF+#J=4-F^1_LF4w*|VterBSsKe_$poB?p_91Jt-^`Gazp zuaKR+Niv7R2ReanNtPV{K61tU)0~^|p2v9|pKiA+q{*myVqdr{%BM9RyPACe-Y{M= zi(gXOe>|@LZyTB#E|l5g5JzGlqW17DETLp_H+10*e3H7>eM9!&6B+{o&{15%~m1IqZQ1-a1>vq0aZsO3T}QN>s0#8av^ zL=`&s9`bmJ%B<@}A$e6?ow(FThMUdz_2N@_KPYQX6)_xc!Ake>##R8IVoQRQ1;}L9=1i7mtjxNtRdtI{3N|u z;N&?Ie}tAYAlkZhT!xwL$k}T;q|x~LzTBc^URVm&#|+3X1AYfsBzmU)e>I{|fiQ{7 zNkb~THxVC`yOc^baMa6Q#D$@NCbg_hUH;n>_&x>&%1q|TNm4{vLqcDzPF9`C6e;2R zh=eG%#gb?`@$5I6oJ?)72qq#eF~SBZ@*o%W(7?#K;&pxLXKQZ9QNtmmPlw!Mh-P%Z z15IwngXG5%hO_h|dK)3FN>iKCg;M3t+J@5G+2KPP({46wvBMv|zDHN~cNR&uL2O8&w%;N_0N&L&oE0v;`w zG7jNO(7)4mipbl0j}U6o-{E=JdWACy0S6D++4m{9_USc47xHNPoiXt8pUILb0#UST zK_1x|OLhjA7XfTKW}}I;ip|0{iPab-2Z*<$Ph7IowVr*e9?4q1b}i z@eDDmQxSu5=*$f{E0Z=qWuWIpsoCAQsX{jUqbIN0E}}T?z+!prYbVT>h~5j`RN0SlMbx0$)g(21Npt_K@VPH#8pPbJuTaD$9p8=q}cHur|VAt?rUk3UK?Q8OZE@)N|C13`=6{DdaW*A=5mu zza+}$Em+J3DWWPE*IeUg>ctU{L!y6plK%4Rv*+9Lt>S+TbMt^QUlN^sJ5WA#{b5Xq zF;f1$`a_Z#40gFvmuj#}VfJ$XryC41V%Qg?-OU;+J>9*;#DqNa;&D56SAgAafoSAo zDKa)zWNIicz_Zr*o@yjnwr8>q#Lb2`N0?Fw49Zv!0t&#!Pt?3OMzS$=q$cAkaZ$Wk}3d~JpMhq@uhd;rD^aX#p zTqbdTIsK|%3%cU4>C<6_l^FqxI5~#h2xB$)>aH_E-)LD%Xe##u2ClM#VQ{8T>JjNZ zX!jPqRne^3yLHe%-Gd`AX8j~DZDFd^Wwj44h`cEFPwjwb8ktp88kguom?`607-T;> zTZdh-t#=@Dxa}`EI??R}2{<6|j`FgKlrnh;NA5VQSMPuwjC>O1Kn^~%zfJ~+3(N3r z^*%D=O=2wnN>R}AG3>%*JQ4SF;L;`4hIs`^nmpvGH9{kz)5~(Y5a-Lj@NNzdQG|vq zip?3h2r6@5nF)O1LqQsf`k-uT6%aKp6%h;XKON*iJ*KyJ=t#8hp%#uID>a$AHmfS2 zqmVc6Sfn~1mvJL2_EKa|#ZI-4FGJ!W|#dhKg5x!h_G=H3_TseFYc;iH8S^|#de zT%o)UsOP}_3F?@4ecq0kb+NSUp`~iwe8cP2R7TxUhMLsi4nVVXJoC7Iqh!i%_fOmL zyd8A?HE_LfXrxH~>88fA>E9YB^prqSkhgqGnY0WJ5NtKe98WduP>mk(&-m-)E|5j%q>bP$%lfv@3be{y zb7Mp(Bun5)5;qiJ(Cqcr$aUYI*nT`~h<|I~aT)m@IpiY}k)b=; zPSo{Ou&z!x5_r1a=;==O04J68HS6wP@kEg=Zd#~Ax67R=D8Hbu`5Wo^&qU|*eAmy0 z%Y@B{*H-+xXn+F>2TGPDj%8W=^Oe>T*BgWE97POaSX1+->*Izs>=H`1kBLw_1%p?an)&Z_f#-j|C~7wbRRzc=)KxObkfK z6wb?PM=Iru_pYqq{^J}H+f0b{2P2xa0Up&pahC#DMQG^R7#+TUvctspGJnSh{HlvqxS~tcerdqi3 z66rK-Y0)5+`rjAI!}@I>`p%d+ixe^=PM10;z`dv7)50tD>cuQ^-@I}FYin1%#z*zO zaoVJUXJ!JQGk^U_CH@!ZcrRSL^;T()yrS2J-oI^?V|%0Y=Y!|3d$j+24mnW(T%<&k z&tngtA}ZU+5}fe?Wv2eyl2x)@4{Gu5jax2@;>C=U#DV;sjx8{B2-(rjO0LMxM^mTi zw0&QlRgGUQjF21?HjX=saVoCzDk{Lm!-ly9;4-|Nqrg&W&G6~419`W$zWz( zb|}IIq)kA*uva<6^2_(;1mg7{#J2vL>2ogZXixe4F#bB*Kw}))&E36nEK)C0)JHjn zp|O0+iUU&uU(aNQw3As3Z~K&))j$`Xr{^WCa%^BcU2$HgG)cXGEh?Rlv?V z&VV(S2#GdZCXPu~&tPL9cFa4mcGPu~tuosf(~z6RZS|G5!hBPuTRx4hQSSOTNc#7ipM6#dw=W8MYj1-}z8C}7q86OPP zK~zinS8ZO;Y(Ps;NhcM3hWo`7f;%}u0U8xxyfwOCZT0>FDDBSpBXyPC9ry=a2uNQN z`sE2sU2zCa54iSNzHp64x~8V38T600k92uoE-@A+)iR9Ki8ih8lUo%P7ZxbYC`)3!R6DhZJrX#?wxwfOq=E zM(3E6diBJ;lJI%4T!#8d4xfkeszNM@-`VM}-Z`P*@naalo78!|POl$36SeVhV04(D zF)WCI-tXh|r7-8c?-22oAN(O`d}JtrZo)M@c+7tA3uW`emXmNNB-Y?SlSx2lI8mO$ zZ6!`sI!b9{^qbT4iGt_n6GT?Ghb9pV%dnMd$y7q_=Lj~X=+_<8_J8Y?8he?~H~Iz3DJmdY{I$~@D`C+ik&aFd z5uZI-YIKoh@$;K_jYCQuXs1^y#Tf!&d(C%*tl(_MqqEZ6gI7d@cNJ;-}Zs{g*6bKq(uog5trI^JwbR}!~NVjAC_sVh*^lZj;0 z>@&L`?L_UjY%K}1)BO#{GZHq#j_JLpt@T#lP*BYJvtxQh+)%LD2qV&dEmd@r-8pZ> zA4Hr;m=OM!Pnf@L4r{BF&S~Iyii6f4A|7D4^(W4JL&z7r4DjG_-0PPl%6o)_Fggur$?HG$uBNPvxaY2#wlC5#~S2a<3M8Gccfs6h2%O2QN z$i^)1hrAIhslg7!dNTZBJwcop?-_kB*?gSn(D7;#K3X!2D8c3rMs09CY48lLYP0o{ zyq$?YzJrlni8C!J$@h@R7v&U~dHIs-8vWmTh0>U}R=eWpFQ66TBt{0apV2?`Sa{|G zHQtO*bQd~bt*1A7_AY9O-tjdS zaHZUUpX4o^;6Otp;Ld~g;W+97{kwP)of=tgH0?+dJ$A3mT=%bEY6Y8uD&V;F1u0lp z_RSa-jz%JSWjYRR!`uADRcFOvU*z=p%Un7s(pqylKCx8C zMD2m<^9h4l>l*@EAcim!F&9-ZOeBW2D9f=gYG8}^-a>=zVh~V`fBW!UoE{nTEOorU zF-$DQ^xzKrJlEf~I9}y;-kH@EF-P#BVb*)wJ5am7iiSn%0qwdk%d176tf>@utS_GW zPB=LaTmMHBSI~y*NBghvCSpy6Y-?4C7}O|vJ^r5S(dPlh-iM3)jEW`$hK|8U779_S z*GK%59&?SH%L~F+MT*Dm=*o5z&sVHDUW6_1OdZ|%{9GK|A8Yq{5r<53s++aW`x{bt zoGz*B^!eUYqaG*3XeYg+-rI{xcw8OL?wDU4szNw=PXw_bNyLE6v7BzrA-X?HyDDtG zQt)1@g?)FN@g0&D?^GPy*rCYON0!OVZ>4kf4*lt#Gtw_jke8~bMOmfx7qhA?X0)Is zIK2BRlEJlA4)}Qww6hDAdw&^I@*_t{Z3%6OA69U66;L@!v9qDI|st zP@KVfN241cH;K1sY4CB8#$3VK041OTPExGXVfkgj>PW0HZP@8-Z>7QJ5;jC}Qsn2c zqEU@L2hw2BPlG}f{~87yQXq+rZ722maQ=s+xAt^NPH+RDod2nktk&%`f4bTRC;?=2 zT6G19piG*uv7IEN`hKK-v8)mhP{ zkI?c-rY4ek6&N)y4)*`#O~rdkn%TPItKlg92+&X?)ujDHhe0I@$+ZMK#k>AXDB*Q` zh}an4;HU7&95t0fRhvfTf&`er-sR&Z6 z3Zkrle>B)X*&nD0p+?IEgK1wka_9zqhRYW4a=Rkpu*6txw5e{ec!OrIrz<>VnY*za zSrGnJA;q>-!0yybpG8TaChZ;TYyZ}ItYs23gbi^jlidB`mClwKGn^8|WBO_Rh?6NTEA zmd10c8(!Mqa#Lq&&Ct#I+ zQ!_opk@LyadZ3e=XTy(%%-J$(&A4Tc+)J$4_?8G*6o31j@^k^5Uc66Vz5iPh9*5;L zutmb+`DR}k8hRn6%Y5cHkdqcD{RQmf0Mf4bl=9;=2=!(%xRH>YoSZAcy*EcQO<`g41A3LA&$^PZ#XRBQB z%585Pn3AO6pdUzWvE>9D7;GA~iqgr72S`Y>=3^r6RDE++>{=kn^=9&$$7>aVbTVIQ z%QdQQK0U|4}W^W~>2R^w9F!=#J z_MTQjcSkKRqIn^58IJ!} z>v48FofCE(LV1XQBp;5B$(AA6=`Dvsfn7%*b_>|{-6ROK+$Q9f69NGr2Ew}_%R5@# z=?zA2A(zxxTu%{HdDqKLfvOVkCXuG0kgy=7`oeIVG7KnO{&i*@*&B;^Fj06g98eLA z`;mAY;Xo>c8umwIntmzJTOcTVb0kZH%3brwaE`z98%k1Gv4?Nc zCp%(Veh9(t3TOMF8&{5`jtDdu_IvrHno({Z!w>whvvOHqeNHxk6Y<~=8PY@#{0Zuf z;4n5i=TD4WVVSc{02&Bf{Az)4?buroP4sor5nj+Ztg$%)RI{XeP6taZq*rm+$~HA*B2|_UkS{@WeD{X$N5-rOIH^d!KU7j4;m<~( zAMiGxGp><7r>t{?xjV;t!N){#2*>kLEJ(Q9xiI%iZj2J~yJMnY$1G^O^Xu36#z1f4 zyQ`Ay(eC#UONxNV>`oLApPLQ8RjJkLKuY~8kxvP)p>2bdFjr)~M@kK|&WR%AU+%Nz z4ee=7QoB277_d!zJt+zL8NdkvrKf{Wz`nSOe8L-Xh&>P?+7~}KNy2Oo)f*7$9Y|{d zn+E02W;#RX%NZHJHj!BG{J1NEhLxeuR}h->$NDY63e)IT45ffVpytlLnI|0^w%d$f!(6vckAkcvYRVd^Ls80!# z#`gF!K!Fzm?0E!koD7W--B8qgd4FiAR>q-d!h{d~ic$Lf6Np1X5XkKxzUqNtBeV5B3<$p+`UZ95ht5wZg`~bzgHVtjD>1* zh&!|Nb2?jDWdz%4EwuDM(pDG$`E$YrylJ>d0TRM)8eYw2I`G0Rb)0*Hc!l~ zn8(grM(z}&0kYA6Bq~<{-4+F`E>_PEa(1!n-x#h_%pUkH;y`40PZ=fyLLZ!uOYXEW zkB);blp0bT3nB9)JE=fd@v~+W`I7W5z}P{ibgd-W>l5nkfz($ec;$CO1wZb{?jgD_|f4-0yF1+H~R^w)$E-emcc32-5h!{e^Kz55KM*Q?tFF_ro5qvAxaKGP7p?VkLzk^xP=n1)(2lM{A@vZUU z4m~}F+i<+>gM5ZC7hSqW6GZBjm5;sEifzQB?-FKMDoOvtU#wKsLRor-=VQDJ!wk)F zkE8`T@lg-%u3HF?>(@a8ig;TJ4Wka#Pz;eF#0)OG!8pqeF2{)M7S7SP2!Fs*rF<)% zy*CneXhQGxlGR-D@W<87`gWIZ-Zs+9QUvea5HZ9rSR|^~u$0~_j}(CGqPgz?!GciE z-v(B{agC*a%oKlEgE~N~)e^2(MRX zAg(x4=W^-nta>U7N|z_=z%r*fV0b-{@6Sr|2_^L?PQj9!LR!4P)f-^wr6n`1dS2$X z(DhTI)#3XBJn|(Dr+Pieegx*^9&Nnzk>u^ysPWh`Y2+t_D`?Pmdc$iqNhHH?RQebx zcnI`C>4ni5p!rYp_6iA%ChY+CPqNR)c@)Nhl}BRnWKM6F6`6>1nPJLh!L&qqiBa>Y z2n6Slm5MWj9O$7nr4c3j`t)^`^HN;J{T_pvLTERg4q_ZHp{k8@{t@PSmgNXU&x&^1 z&rrFEuLuYle?BqbeYKCYrj{khiBpVMIgQqhM>mm^IQalcJbvNGpj1VDq*J$!n=daw z5CtcXCKQ4J`PoA;?>_ZAdtX2Vp7ToHpwRK9q;al zP@5x3Z42Zh9rmS>)g~rzNBPY<^UQJn2HTVL4udkBP7yLEl zXVg;ZiZ>2DbX8a_SMLPu9WfK9<>1qoI}7!YS*6|)w#o>EQ~GG%_v0K2-ebMY_AMT- zzbL-XTt-{uV~XHWd6O$!faY3i7NY3HglonCe7n zQ8%Dx;{cK17+6m0H-wQOj`Mi+K-2%7@H75$n7O~PMjwK?oI_RU4*Eq*Z{A9g6 zrmE6x;;@N0vnw#EttDlSVssEla-X}liuEI@=3Cl;?kQo$5O9mgWpETB@+O!5*6-}k z4>?>=I$%-qzqF28RU1hC^ELbOxeg=Ya?#)crf&`obP0BPHIWmtoAfFZiI@){kJ}R) z8(i{qB(gY?v_RK}LZBIq<6pfG{f0u&{f45ibkQ?XiKZR)GV+5>$>L!V%#r{spH?uy z;|^~+XeJ#`3OD!6fCd~D1&@h6Z^UkKM;!3cxy1{$UGQq%I+|KLr(0N^_3gqjSlK+M1sS*Uazc$rvR8eg1?S$e2YY5~=)Sr!#>gHEUAnCu)<7l>(@~w<+;^H->qeFO?yX@EhL#`kG;E zO1Nv+=z~fc$;9if2f)x0ZOy6hk-?%ts|EWb>OM%a$ z$J*T5UfOAa0*E_(ttB2STfB7Con}1#wuIVsGuk;LX&KXXe{M^CD#M*mb_m1KxXauP z^oIu`QhX7U=Ff1Wg&rn_3nYr0bO{E#L`nRB39cx03?Zu4p`FfAM+BZ>wwr9YGY8^X z#^mET3_1CBGE}dCeG08_xWOFJ3m(yHlJ2qjD5opf$pVC1)!kwSZyFCK4T^x(RBi04 zYXw~bivfjyC!H*<)70M^!6VMy0=Dbl=No-4_zsc%=2ld*?ES>xStdKfpH_d68(s)E zcaim-yP%x`ABBlvku!$0yj-cUtZ3cL-Q+BbJUhK&rPRe!1*Yr-Ks`R1uMV&h#3_aZ z$8eC2wzd`hMykw^T-NFo*R_`I#*C3~-q@&ninedSp2|^5A$q5X5pUd&yxi{72Ysg4 zit<~AysMhb0&mOBybf3}ngxFK|9)Vyp)USDCq#9cpP_!qLlf|_&k=onk-8X;I;G+| zvC^nsw#E=I>vWP%W>SDZocJwW2=64RicP0oQPAKL%ufr02*mTjJUA*SO<;%v*1J5i zfGevh>a)&~OY3aep;X}!DU#34pa17M*VO%*dui1B6V{rbS44!@rIoM5{@->tg6bzl zeEQAT{^_Nq|Mype$;T67LKjj%J5?-ad4XNMe5fy0z;U?t-Al?nI6$;2Y2Zyb(QTEa zlupS96%}8x>+y4fLc*s%2Ve^+PDOMoyKM^va$;Qg^sWSx=Q1SK^A=we`->`ZbcO@n ztvnnGxGFZAVhnEaXGTrnNB%OFyL&O5a`*eRT-A^y-?>HO&;RDg{jUc_84)B1S?XFl zQvn=EFvh>dbK2>uWJFU1cB_qH%PcR2yQ~@atTm-7A}~sP0zPRcCB;LMCCu#~ENoDB z7kx0%D!BX??4-Y-;@3GKT`s>!Hrc%Q-($crq1|+bL?@%vrzc6nu{$lKyI5!8nU3`b z^`e;y68?3l1pWU!e#}*%dp-<@6hCojVD7>2a71dh#75#7i*=^{Pzu=GuL-fJehjtJ z3Ix1B9fioeK}5pgrL~qXDoTq(%cPGdITf7e%+*N|78%k^=-6pd2opz~2(rf6pEMlr zH=1`bi-n;@g(ore8!tCoOpH>p|J;#nwJmEn-t!&-)4MQY0@FHLT^PgRuR;?GMG+Pm zja|NsM597X2CiIK&WLO9Ug#teVKC|BvoWfwZ9Gof_1rd>Q?`^r&vXHA9+OdH!`QF7 zdUjTb=KuA86kgtq)Qd2LjnF{EKB^2(VF*+VPMpH7Uk&H&9t$F(|0 zek0fWBo?XbX$MmfV_QlT!CGk&eFRaQxX1I93dIp=R;;!eB4W3;*ZZ=^Bk7C()kaE% zDjHy@RB3i+`SU>Y(VZ?;#DW${rlmu@M4}o&LnfFpvPs$$ag_1)XXMN)+yw zg9toKz@d-_>~S7F{I^FDkK3`}%U|%H4BFuT_vlH2e`GYlvf2+_R8G7JY;=aq+(~uy zrGPyd5AQjANi%t8CY5{d+3O20V7K$hj%CZMHZA`YAr4J4Jz&taDP*@t<)b=nzCeEh z;Jp3jwR=d%(~Zp(?Vq>X;9?QBI~(Zo=<-NxBHLx zO~XVSdS6b<{b0gAQgjYW=i{kbC(bmL6C_b92{X-o!N`RyOI}$#hCv08fyiNnR;R;g zjLY>_6`V6-K6m{l->7@hx*v^}YO6H{0kO(o&8JByMxSP(<>UnLTQE`E3%pp zib@X2V||%99G4GKX2%dZIvBz)yb-Rn<6y3>8UG6x@!Tx<7AZQ=Be6y}016dnx3{Y* zl$==5M;6F*As0Yb==yqprSUsmq%p_dOSq~lP{`SEr|D_h_W(umS$8NvKFTM5Z&>s( zXL=1opO}YJHoqvo_+WL~_wI?L^cek~ z)+>M?m9`^F_ifzK%iYHxrp_hAw885yA1tQTMonj6yw)P(#H$1MuiZttLkcE@iJ!<; z-tFF}RDWE;uq$jsJOad`z5*%Dy@P`y*FX5bUG=SjZYrc_2#wp(SY&w_&=(dAJbpdj zP&V4EB`X)p%h|UAn+qe>|Nbq^0^XJ?!3el}jg|{^rN}o@YOYsX!q+Fc=fH+rWnlg0 zF+gqxU3Du!?&|7Nt<>gdeIPzt6s;Oxto#n%fS&AT_$O=x*7&jS2E}=^(j27a8nDxD zzb@{r9Hb4&RDE2LM9+86tn6{hXQf6*S}z&$Bl`??rNy*B8=<#4#4o+}>=oO2J_PHUZ=%(yGiRP%F>? zq_Kkt-QaBODVuZlg7Y=DNs{R{XTI7X9u z10JFOI2F8wHViNi?vCdIPq`v6=+bi*t2bnAA#b_HV(+*#!M}e!&%MK)Ls`zNlE;ml%RIMU~k1wVvtQxp#0{7ZWAlm{7#%%h#m zm=w#{Y5iR;9xKj9y+~VcT9hhB3zt>)_~5HHRf;E_ZN_`Q6iN$4b!w~J;q-n_g}Eh4 z28=vPeSxsF8dW-drZFP|9mMw4UGCTZtIc-IgK!_ByG3xM14Ogg>g?$*S+q4Spdxr-&2q4FG{=-`Mhr;@s=$9NQKJT zs57X}%b`4oBjMWBM+^MVMQ9KB2LPdxjKflQi8k^2YTHPQ=6y!IrJ_^#1GN`I?sex` z#&bbrzcy~Scn&b3n;4d*u8NvSxDZ$Qg3}N8U^W|F6QDCW=46qCIizs=2i*FB?@Gv&oGwDB@#bfTr{qbDLzqPLm zz!G0+bZYrT8+Q{BD3D!n!)jy}*?+4>h_Y_>$5a5@Ziz?`oMF%Ki2M~ZR{OKrc#aSz zpXKcI%zG(d0Fh@YyW$PRC%D*3Zsc;gT=QeyhhI^J=+XdajYXNbaC14$m(!^uld!Gk zDxfdEi@u9Ov3Sd!PJ$llmfvKSCgXIXNfcnfF{xyUg`@bsD`%C-nMZ9JhGTI5?(gh! zJWijVh^4LUKMYI`hbb3yd1kslpNU(qD=w7Ps+Z~V|8j5;jFlB~;Irk_?RKcZ^Xmd( zp=t>@x5&dLyXKqy9lf(19!{{J0nHFqRJrb?f{@}v0A+UKr!O=FbAHcquM92*Fjf14 zPe1rm*an+keOw1pJPa)mSWJ8;c|Lkyc@K&StVj~3*QiSD0=8+#T#4vT0z`O9>xp$q zoXz1ldiryLHZ7tZnS*Nmb*DvcQN9!7x0>tTzJTYq75Hr&F4sK_M1qYBAVW%Me7H*; zDS>TESlG6Zq2zZ)S>j$whOmT#GRZnD?hN^_Kq;OD$p%t20>z!}$g=^j$QB!Q7KznD zw#B$cGdqx_OTJ8df7e3N=WQ7C44QhRA_IJ04;UE_HitGppR(ADn-K4e_^2CfcIzR# zB@c<-kDg{k?-F}s?=HQ}UlY6Je%UjFurgXb=wSFcjfm12Z0HLc{;W5hpg4mUA3I-b z{$no8m2{IR#C_%K?(PmTjlLBN@Jgi>sZhFrl~R;MNP#Ah5>>jLmPn8R8WK`K=m2)) zP1^b%bSO^L>3Ej%=Kj7A*pndjgeS7h<8nfgXHbS{=B{v&p4fEuLlpXv&_JXJJ(A|8 z_zMF4dL*gio}_A_DP%gA5rtUf-cvudW1=K5`{Cnp*teh19mFy? z>e>_~SMPig`5Zd`TbOiE1XQMH(1XC~N02;XAaI40Vj5^&DUwK-0NCONV1N&@7YDpd zQ2U0yg`uJ9$M~(P)fr0(Lx}o}jU-T0<`F$J410odqxALbBS2CL2neuy+!*r!u3ww= z7Ns(B1Ss)P=~;=qN`b9qfHFwq`EX{K3sQ;D$}-&Rhq@aH_P)oQsG&Q_CBRR0ir2Fv zw0M@bXVMwWwD6yL-JS{YDH2y`sd}T}&uy|wd_fpQn`YD%Ama74-vFq%HrIQob0fPs zATd!Y)=sqtjP=TjHXEV(I-&Z!W;AEK_eNw)p*qz+qn;b_Pfl z6cpOrrXU)5e%_gX`O32jEFDPtIZW;`C0ig2+riy}#YaQefe zMY>UxgRvtqLr0@v%aC7MfG_X=K0S|1zB}4Vm!Yevt@>ntWVS>^p(D{8CGXHYnupbJ zXMbT#r1FK>IyLyUOBmd&b%q1no4a@D-=4V~DB2B6_mau!O& z5V3o|II}8lfr7H(#BaqpBdfAvvIXD1y}gwH4CR9kY(9N9fVv%ufQ*bIWAV4G(Ot7g z0<^!>Vy=kYa-PPCL*gZt&%+TE%vS;uxph44iww;7&yV+s^CQIns{Q?@EB%zvLr`?Z z!$$bjBqlG|v1aypL7%t42#|ZiaOK+bYN}omhdu8 zUvDC-iS^J*lOKU!;Mn1m?E2TquL5R-$T#Y-EY<4BSuUABb^f=q%_;WlPR+1gxDjhu z_=Wzr{gv9=p2TwhwyI&!M-Du8l1e-C{tObT$U z#1{KIqg8Lgr2@BnbIbv%>if1(AB878t9v7^*?9)65TxzKz!7ZyVEBZ)EZpnnU z6#2L+bSg<-OfL%Euj4TTd2k^Uz@JDUZZuFP%__-kYSLEYM!#{jG!H@~C5eEqsP(ON z_BzTNFNkWhP zo?;0S^=$C98z zka?IFs&6D4Td&+Iswq1Tm5FcaTHQt7VlIFdxB6cJ^Y=x*oCue`UqZ2D%yxu_X4UY! zstU&aHoWi%R+WUL<08ERVp=;isPi6SG2IGF#BUUalx?!$*fSKMMS(9hTPRuhEtGbC z2%q*Ghu%UV2a_PkRVU3lgo&~hDhNlS4X(UgckmmV)r8AfPC3k`=0*d@;D(gNFh7oj&remF(npRm-XgDuf_>HfCLtFZWo+ zoR~;P2CgghV4~u6dY0i0T;)J&}7TZ7SGnQ+`i$f*8`Idnk{a2s{od=C5$m%4`|8kXc7#FE!WTF%w% zX#0KL5WLoU>uDLP*Yz$l_qID*S)F$`Cf!#`620-WJTAA)O{RX%4qENLQb^;0XolQW zV6^Mc`6b}%;IMbYGrQRvT_C|nB7U3K0Z*}X_Qr+CbXvz|v;EoK=w33lEXPaCXltV- z{PLVo*IVex!0-Mh1Spy%NLiwkppLMZ5E-1rr^ zRZB(0*Q*HTLw7{M{l%U{MT8aWl#OK(E_g*Q$YIxbs>b^xAS18&9UVUtE;&|pIPIf| zne7r8pQkj1HamUGl&Zh&0|ggx_o?g%W3~qYuNz(DL~HgG72`@j3cVMc1)g>~-uxnM z8Wo$@9k3To5edW=4xHkpGI_p+PAYVpKWvsMlZH=Do$0SF3N4f=AK#yGsw4LUC>vzc zi=d66d*RX zyjIvj1;)HgQ6dct`g?=K&rFI=}0FaTr=yi=sRy2!h0m;*3am0ZmxH_zx zyUtFIPOBAm(}oT!W@J>27Rk}XN~001onm=h7NJ`2c0Bg=?ea4y9Hjz2RmsI9G`#xl zaSzeM1Ixw(r?7Jo*JuCpy~b)6BiH5UFpd)VCnTs|>}Y4^HWc6O_+LBi=1_$^J{$X) z(9U4MW>z|(#X5884*_S8F-3b0FqR7eeGdu0%J*J+16jyA^!6w%J^d|)&Q^wrmsSrs4Rbvfkng1vh-Z})W&5eW9D_}5@|H;TV% zfawy%uf0AJ9IEYN;(?_IZ)bq3ZsUjW-|%1OBK*dr*191x?f^19 z0)ZQ4qP9DNnuYNcf1iWUX-x||e+Vb>aAh8oYx;;x+;G~ApjkIW0Ot#_#j6|P{@ZGL zat3iBeH`vtSc=^`>IQGq&%v)gS`O46>tD>6cecAW8hppi!{Jh~m`#}1y^OJMF7S0= z(7uXCr6s>FbD(rzZnevXBb=i{Z*FCD8BbT+H8^h4eb712Mc%K4u#mEyF{vEdE_Ap; zjTzKhT3(ch`p!c}iCWL4{7*lluMP5z)2;_tTa4ioQpjJ{7-ad6btvbLw}`B6gl zmCO>RSciaWo@K>xrUNqD?^7jM zy`N69IgNN!Y%M3fI4VrnbblDv!1$ii+H3=L6I|V`FdUpKbvL|Dd5i|8SAPO{fJK-j zXx>sjvTqSA(&Z}K*;of^`xj}ne3b6D8`cdEm()&<$8z;ziRk>r2oRZn*r$~m?RqfE z%1d$+54-j$`ej3FTDx6=v&9u1k;MgeWP5JUKb8uH3 zP-YdEnu~v#UIiQ;*qGIE*lpxDUg3)J;C&c0(HzBAT~Cdw)eDVbyj~yZV{imFbj=+K zWv4%ksR|UvDO@ezGtpf>)~&|knWH-AyIz^GtTwz5-!9&tQ1EBC=?p}o+Wx1o31lh^ zl=UY_ir9;g^|l)7fH5X(u%3&ch`l{;zM6Jku8{T zNMFxS42Gi)1WBwAJ*T8rT^HYS#?ADqtR3$k_^K9Zf@Y-0;xex+7j1q>;IufM;NWw6 zM2q&CD@TR>H49pA@#JrP0aitsYdu3UnH9;+R(+UYO=nO@cy=22Iey^XDT`8c3Lu!A zPw}QaUJl)0b4(viPk>ebpx0^0v|gyD%liZ*mxJ>hfE}UD@gi|Szs|6D9hE5>&|OOU zwQEEv!&es~5(-Ey<3y)i&xg%7zLr|P2HsEDsBDa-^r_Eo`M-+`fWIn9JYd|N3X%o< z4oY}?SaUc8C04Bx-&<8Bb1?!c+^*FVDzOJQG{_Ui1f-y00 zwb62fpzih11SmcVje=jkt?WK$NX?7#u>ou@PC|oDFCF_uOp#ZH@2B!8xn)w!s_U{JKWI5wA|QeuI+I`px*!CR&ip8Aur16TB5 zdoq*Ho-E~-O6bB~%c{(sF_Xz0>$a>>Y2$_Aa@Fve!FmbvzmEblI8U26KPI11$j34E zBDQE`vIJ80sfgAggM5k+D<9ql7LnkvQpK<`MToeX4l^0b-1oWo@%S}Yjt+; zD8v48LXsP?m$&j;8?>G)Xt;+^KLMb-|k~f08R(XWKQFcKUM2O{++L~YB0^0OlU}R zocU>S9*XL=*^19xa)@<+K-p{jhnt>PV=2Ji{RSM^%C%@!5C%8(p!Rb?XedNrvKq=5 zv+V-x9;yr`s|AwUDT?iVc}9-%IQF>k zO(rrj&b;K!fsY{h8OK-{nBlxjc_GxGl0E#Qh!q%JMqt+BF=80P{+qy|lFOpIHtxI}QlURX{;! znM?*xh*BQVaGSBtbp+dd0OrIYWL8)-@Yd{UNJJr)y+}f3lR($6+@che1<>U3Vt08` zF4}1(g+-qZ7wG#|9aiPDU~K}`JSikRRCIGJ zMRuBnw6|JkPK41SEJ)S=c!CmNK}1UnEWYxL5B#DDp>kbK-kprVriD}#7L4YE@Ak;Z^uB-r+>~1zHrpznGW+i7| zw!h$QtC2RXI9xihb@lJb_6Prgz?Szp7%+WNI|b%;5eKU&vcZED_MQbpt`(6>yDyEj z$y`<}H)al4PTK!^X#hs4po0CBDdPr>dvcK-z4!Kez^%!bgC34S=$x1tf%c74>bEOvBaNlLvixzv89%cHTZ zJo5=XnpcMF-T8dK4bOx_NL+>Pn|i~{JNZDQ1)Jq)Cv&*fd0YE8T06_&16pcz&dUD` z%8=aH00StD)BE|!tvc^Up^E>@$#CBB(;z?J?;kV-aRas^Ad6##$-HKYIzoM(UXJ>G zm)U|y$+~+3mQs+sN|)3s85EZ3Wf+WFA(X_g$IClO$-D*#JU-j$Sc;b5YgaQM2w1Hs z0gz!Xc1z)a511}Yjak6|{Zu|%V>VRcRgUdQJAX6Q&f5}j#UumlME&=_l0es?7^rAb zPzL%diHN(zGK|mFMn#LGRCkaV9VEiWsE5uJ32!MO5iaE!&bgUo-XN}zuT4+~c%!4X zp`&FAsTA>_tcV2P?+S@d;PL*fY05W!|LkLJHrvwXP*%ZP5!clkJlYd^tXy5Vz0iy9*m@<0jWRa zN4ea--@2UB81;~*Az{^^SN~?c;R8C)%_L=kO_$RF<~Gkg9R3#nxHV?%MgsEjxgc)V zGR?sA{lyA?K~Wr~fgcG2Tuv^0_Ss5^CSSlBSh0vK6!PMIbv|WM%MAsR`AbKge>jtf z{h-P%=o9Ck7%m+CLG2YPdFVZcxOKbxvid(~cx~_kVQ@{YfskuP+$1?7md+!-z_sR! zADpCu;j4gEV2?~OT*Uc2=IjJjMb~$Kq^XrFJ^=AKYi}b#6+UnlY~`?9 z1r8PjlIS;XkYf=v90o}6Ax3yZL5I`56JE);>`oRzm@*KGUGJ7$WhAceGUfWZ4fkg}13Em|zy65Q zQ!|6{{ZMj`U5x@gN|M#G(VD-(@M7`EBR$f$6W;Kyjf;M**h0JgHK5w26;A4< zI^dlDxCQdMWg>}7wpzmVn-Wh(zYHg;PF}Q4|S96TiSWBqLb~VO$xOD>6*5GBfDD8Nft9uP7i=RTJD($4ROX)~V(*8Rr zN42y!ex>$LTztBBu)i(}S@iS>`AM1WU}C|cf}kP2T3s1V_%;3; zVk+D|GO0L{9@d#}jh)#Knw!q}J8(hGND!pFofw;D4{`<`@i=>JY((j-7rdjnp6F3$ zGa+s#vqm1Keav(IuF#U`JZev{6xpScR@qXg5ozJd`(PTmi%c;-9_Vh7pN+^sEJPHU zrxXo`DiQR(NZB;@lyt9Y3()Mp3rP9^D3!$1WJF@@_WA~=eT87u!=7jdv``bpPrW2} z(9;^^g3cGw%1G93faT8wdFOe%%J_^MeA$MN_SFqn?p4ti`L6k4vi5--p=O8tU?c<7^tYGWzs??ILmU^V zS5*%#=`^+MgPg69TH?zUShYdD)&-P@wqqwybV0R9Xs-8Kuvji@R_HcSsU1SR4C&iZ zc{8Z|z(|}Tb%b8r-q|Z-79}8k#($=0#APe+cK;|UPa2Y9@foCH?_QpPdWkMb6n9eD zB8aBrat382D=HPg_2ovR&y1JZe81%g)}TWs8Eav8h6x zFiA&CLSygi#7HtcToXQ-p|EI7zOBqtI~7rN1SAEP{6-L55;g0PkZJ}O$PihL6i~F; zrnLk?Dum`2O8=sJX}t(>OuZ@4Y1*i)_XoKPTn#fm`70OmZ#-OV*t$DR{hSDVUX1>% zRfOrDQw^Ii0r8(zReofVifdfG7`h1al#>P0^%4=v#nrd`o5(}T-4f1P@BN6!sq(z0 zW{Aww@R@&_R>?z%tMpoxxrkK1knQ(*ylSK1#3)Cj8!<{%LmH#_UCoy$PD;-Xio#!O zJH{yI3mNs9Qs>@RFb8IygfHBruQrou$qgAXiSxs*vfXUwq>m*v0~oAv2;D~1)vZ~F zd_Uro4$JEx6all!X-)3odZ>trFV4xPLw_5MYs;h=`bzQ8k<>W;dFSxE*n(ha+8^0} zC05=dBZQ~)6_qF5t1B=jZ-M1;O@ z5t?+gHE)KnhK(67eWOP(m&ZX$O|3=6{czn5U%9_d1fuGJosbq|f*rNy(o;WbdkP9) z=Rxvv>p)VOl)+f@5OOE~R40&GiQq8X6M@<>fZ>MDV);%`wM=(tbCP*zBi%sd5`}=! zfR;vSWJx$pkn8JuhLxKbz2n2|XEhW4%+*_CLF&`YN`X7V>ez`{i8B4{jC@A^KrCAE`elDYo{DcP`6f!S$e>K^jI$*&>GF(QSU*d&}!ddfx&GrRs`d{W?z?C zplq>#ilyLwjWGiu3Pk?Xkx+UEGsMyZ>g1f*H9NzS`}wP>vG?W|+pii_68L1QJ%w$S zo(eAGVa}>+(v}`s%wIINvr;Ib`LTsykcRPf2WDZ0X0=2ZzSGQtS@IxzUG|G5*-Y*_ zQXN2QES(YNF{a~-Z^y6_soTLT;4C9E;=p|wgSvS0jN@^!5&y-@mNfcxWcp z2XZ{>h%q$KGv;%*X1!f;GMvK%ZDJn$6)q`))+=K4O@|_+f9(DT@gA^P9cXkwN3*sR zMU12M4f}1@N2tL9U;5+n{2UVk@RNIgN?{(3KwjQS z*JI1vqA130bmt}Fb+B}xX@ay4ZXNAODnxLySmfGfS28V|e6q}b%WR>L z!csgn0R?{s^L8WH4!Ng4?~Ecw)y|6X<(v7fV*|sQ$>fDYxt~i8coWRj2#hpkpEiRj z``3cPW(H5WGKY_%rojlZOYT=4rBuH7nZMIgEWU^(gUu+bykk(| zlk-X7)k}avSg4(2nGNI@K)=as(p zOn?g<_y3*l;F=(7$HLX9&!BCAJF&phC6@uYFl& z!3g|@CQeQOjn{;NjW$1cIOs?gi`gVmQiy1DZLm$*vXcowPtKrRez49Aka#lYSNsB!=iMnyT|nnSibGF}xEc;P;Vx&G0z?^2iT3eT}6? zKtG@oxux3RtjOH3sRVvlg%3+h+kH)}J!bx?f~@~tuhr$(fI6@k(e%b3f z?E(27;&pBi#}0xS&XQG&Pk;85<3~m;gg?gaq)p7KReyAfZmBf%-tafp)Zmiq7aUTL7*$9!>g73zPZ+v0-}bZ~;Lh4LfXdJqh~L zmU=fnO6H$Ly^B8(1ehN1Oglo^%!#zLs??;>{bg}Z1LTFayU_`A5WDj_oPXsiIq2hI zSe^)1JzSB?zVXE)SAa?E0UUIx_Y3?NmY`YhT)IKFdg?up-7TY$6_gNU(A(PCd$U|mcSWSx(s!? zc4}Lo_Z5XVok-NLc8g^`G^)k=0(196QnK#($*P}xNT=6hN}nr3ok53I%z@RDBr|6Z z&{;&0?3fR6&^7mxWYc*XXC?W0upm`{g2R3v=614xuhov!#f0V)paI4EAyAe@Z-GIP z=Oy+lgOJz?h!ifs8LQm+UH&ftGT8{vQvl@$@hk`aks2OPXAVIfX9;$=7m!6cRMHC; zhj_!BSuboRyBz@*h^Ie->15(DFA&_8VX|$P^TuzZcs9op@cZXbcmz*} z=2cSw9TMC!^a>0@pn=kmr4-Ct5Ql_;6xIwN9`mpj)RaG0-*ef2=c9BLt-Ix~9M~fb zN?3J2bjOPO6OGPpj#{S()`)xVeqc2iF`_n-!pTdVc06>ymZ<6wV>p)(mA^$(I+*a% zHr2cc!efRlcifb9 zYgb2peAs`DMZSWrQ&s!#m^`gC(YJFw@Dql|@~73ZEd(N~rE3)L$ZIuOri9qKo-F1n zbAPKtMo-CPmSu(ri&6GMJg2!n(VQH9*8B@O{#$uFClCr|;CsNnSXBr831>^Vhwpaz z=s*||5i>BJqzcRZ<=kc7I*ZlV5z8XcOZ)wL+Qj8_%rBG6IaxKBxuQdvN#Z*kL1O;? z%VO3yabzg%N<=V0tjvHeaVi9?LE&vEWe7j2evSdD?b7I3vJS6E^oOLhM<-)5Pi6^3 z$EQ4Hw7@N(^-ul@V(9|?UQaQGMSrS1vT8&f%nXZY5@y;;t(6_E%H?ha17GJElmae^ zn1i&}uGlP`ZOW5(+m-kQTC~9_lHo-JXC#rH^!|8BS)Z6)y-P{Mh4o)gV@C{Jhy(mL z*^NzRqb(*)GW8Re3~3qCh3|dLVL`B2#Hj$s|le~ zbsOtLS*qObj$~|ZRHRiiu|i>e1hMZFhOdKBX()+dNbE>I==IKQ(mdO>e?_8Hgw?Wy zbP~0`F;HI8c0n~f(;w*I7AoeY=aUuske$?XsPE$l0aNQsL)51)!&r@y+odn)cqR=% zk`8-AVv<_1yt4qp=y2a$BqusBq=@$tBfb5V9{jh(BUh<#olzO(Id zCIjD%@*>p^>rusNIYZFoiu8ITeYxh;5bQ^aEm^N0Kday!4LJ>B(Y<@;BnvN#H znl_MJoy}F^A94Qui88_VemAN@e0BDBL#@L&vS->dTD!@PP2&3mK_FplKbANGOtdss z;d}T4QUecOJp~0~(&+dY*05Vas;d-*GYn@kS_846#6> zVsIOdo5o##vEgs7ckTaAPPDWTz@gby;^w*rG--g&bn%p(cN7#z<{HZlkyp)bzNxMX5sR+FuvJ<1RMkz&vH z)5ee=J3_>dZwsyRj6s;7*C&x%LW60+4isb(VQt3{hn77M*b#5gU%p1z-fvNtk|R=} zSx8lzc}_htNu(sS`rVg+l-X7|02To>4nCc+c@#V_LCF|pUB!f{(I)Y--83c%VYVN9 z);o_(4Xn3D&I8gPvmj*0Hs|WiNEPwT}FOzL~DkF%b6gX&Sav$g0N6b-6;k7K!@L3VDmEe`iO5=(uJr1Ju3wo zrHx%VOMD2>Zit&LI)t`Xq?b6Co`@ejCX7a5*2WqgZkdadrkC15vInDh&*7Lw(5< zs>$q#OYF2Y66y^xAq-?)Vmx5gf;2%ZO82kFL;yiS@;FYOtr{{v!|y@dzUg%bd1C`s zRg)UKT*}@Gi`BVHyRZHDR!XQG2XE0Q)#@CQ!v#TK<=fGANcxs!BRS>udhTWikzW|(m|^8&cWjIj5M`j zshHFD=<1Oy3VBxqM99pRT|eZJBXC-l?#9~rWf!i-v>qdbS6=Db)h@SE-xXX|f_jJ` z5rjyC@llVekR7}Q_AO**`1A_dqgBOHb%7&E6H7wmVo9yum51cu5NjdHanPvIQkuH! za9kpltuB9;7hL{)Ob8aDxe?s+`y}Q!{X(#b6EckT1|>gwQimhwzkIss^mvSpeITcE9jCF(H{(>+2Ax|LQfgQ(S5KO-SZyW)(G;V* zM688V*#kpHR!nWt_X(;9Kh(T8X~&yH>j(kxqhokLLYv>@p=z-Ue#J(9o6l7Ggq>*! z1y;`!oz0(L#snoK$Ic-=mpa&m+cDlbZ^2uk~>4HDISu*gSWy|fL4HOQ)oUehrv}2VxhaCsgd;|oK zQP-fBuF!q5y|my;(4-6Xgx-mupRs&Gc2ywW-upZSUuT0`FMbZK$LYatbBbqH2 z0<~{!j{a=0S$Ys&|1i9N>HAf9s(Skh>L-1ASm;4uumCv7Vw|O9=_hlA#U(hSp zhf*$rOPjE>8rpo{b_s4Pc=$vBlv$z91o#c&(RRQwN;Zgrjz|_WY(ElN(FX5bp}2FJ zG%%@vVsZ)uDRl|{;FZ}v`oVk({jcJpf=d;hcz22>PD2iRqQ#xL%$NtKJ@F6*89i(F zsEaR?3C!^ygI2#Ri;Qp42j@}Isi~H8<03&~Zbu$m z8EeS>>gxMMxa&V7r|_cnAG7{<#MN3ia!Co>y)Omt$=o^!)YC~dAOH_5QJbS&6zFK} zDy{LVwrgu0PSf;rhfQK3-(lHp)}|s0H_sEUOFWM)W@xS^!MMp6oL6el5r4{`9Z#H~ zCi8j9_%$JrFQJu-9XMOa7cbIcINR@|>I;nvZ>*F>q7I9{G+4b`m(S1CqmV{Qlt#=S z&5mPETXkK+t#;Y5I&|mTPB>n&dL^p(`T!5miZ~U-p)$~MCa67T+hB0=A}|yQBzTb? z>78zi#(b*I&sT6e?e5SDrp#|r(u~>y))hA3%USd{mncYv=Kn;aF z5W5gl^Ex}H*D*#(F6?w3IrEG9gF&7Vhm6oDwCN9(sgy=2dXQuB_!OF-1cJh8ftCxX z3nGb@4}k;%bx6mi5t&Gc@h)#UL_toeVs0@YlgV(3#iDsf1jU2F8iNhSF)a@0m1w6a zLJhlEnLjG#Za@r(_qf*Ee`W`QI%w2dSdxAs9CkE^vgPtKl!2RaH^vvf6#SD9M?uye ze=E19=c#qzWR%J^yQq&|_$4Z(w?X4kX zk~sJWBhE-F05b!UH;ey4ll2@x6fE-|R!3vEW<%($9vK=zv0m!{mGoZt$#b7Ix4E%) zg6{231eofWkng1eF{V;D1wTI~kQLCn^(3D>6MwA54HGeQa=Y;2GYUu~IP)~#@5Vpl z;|0zoZ2q&SW=IaMh~d&rJw%*d`(VNc5N{FyGR~7^RD(MkoWgWE8YGCa8TV_n>$ZRn z2jC1pst1PaA+>LVTL=yQdcPE4q)kN1hJq4SwDpp$h?CCe23cduz>Eo!4fYj-CS(o} zJDkqcg*!6zj>CSNqf#bCjsJ>l4^ytuiJelZDl?`k3iwnK-}0lH<02@(9yMB+U~`*Y z9TVrl;&F|Xf?3S$E_R)&zMTCv*w<=B#`g3T!dR>Hx42nrQTySf>{+>`;0bDEKT_tG z-JPcHqzR(M@7hO~OE$U9w7|y!|Lyk6eb}FXBpV{5T{4zFn{)2bOn%D00zRpj_;Lk% z|Htt9%&V8`^?)n#`goog&tyxB&0)?m82JvFTK{hyRfu2&h;ZiSq2(j4UnHad4<_mf#cAs#iIC&^y4h zczX0u6lj1>r|=F6y#6U(oB}B^?z5E82G=&NIZL} z@IT04PNW2OZai{z{}F>Y?+rufau|d)EyY$Dn$6;@DgMPe!4NkXIiAK4uhFl4qupiN zh1c#oKd@x}_b0h*TB)zM+qdGmP^-06`9BpQ&viub7*uG4sTjXNi$P6Vf@tP1|9kwq zBMcssxrodJ=ka{XQ9&-+_5E*ZM~!BkM07})zAJq&!+ygZv7^qWM5Whb zOPed2CaSv5s59lv)5x#orPte=cDybh%|h;uJRBb*E?Ocl!c5uD`2K&!v&>iV)OaiF zKa5PaYk$u%66LY)i$@N*R$|BU44cOj8wGfIKi?EPN2|x}*0TqAgFc1;9|`aicvS|B zZ@*0ROGu&b1D3lRKR{vN2}Rec42$8qm+}Y3<+g3KmYJZwF{Vu_gp=eq@e-U6VJcW~ zztmxdKw-IokkD5dfFDCIR?6fOU&{U+t`ObL3VXro#e-`Jqz^w(;3%5K;t-AbTW0e` z&uxxWgY~oVsI1oSDF|zm$>6}jcYtw_wfNiS6{CdLL#&eHVmrQ07jE@N-$@gNlKr6T zr}5PKv*qlVeBwl$sV^4i1wnC9%s(}bcb@R!LzUgO7fm4H*HLK9O0{NW_~VVx_=dQS z%!X`#)KrLqn#lHvx-DTGIv`FGP&yX>={xzrRXB|4kv99oXMb}=a^&L?Iv=Nlr~|B@ zo=8X#sH**8LILvOLb=}jZm;LaBoq0FW-9;d^@d|82S|t{K%2BR()1GU91XTfAPX9C z!V0PM#CE{E$Rxs4xKAlM*JXw~&jt;k(Wj67s+67EWhIN%jYM2kdV^71-uYJB> ze)ge5iCfOLpF&|U7<9YE4%!Mz+=$$~Z1DQeeRQ@jI||aR)8V(@r|h(~3>>Z3z@u2- z3+5`l!m*^#>TUCzWj6HfI6Z9(NLR&;9|>_(vtE-Ha@nXCJHo^*WcQ(fopOGy%NM5=i6XD^B|;MF7=uE;}tXbe33t=DdmG zV@`_d{V|hYCBc?15(@k5V0V`UGU}l&WKQuB*X?Y*)j63a#Wl^eNGbtB<{sBg(m>|F zo1n>)DK=$RcFSF=>HGfazz?)#1h}RBk-C*DnV(#F>o*QppeXwE3mq z0KMJgnyjY(Czsp>0{(KAmy5lqjRrlsiOEYgIh)DFw|ZgSn%|g({@*n*!nGSMb+c$x zSq^HkFdabs;f{AJ4k9CdCy;6oRA?55dGN&Nf8L#-I?*aER}t=4MH4VfIcdJQTneRB zdM9dBBGeJ_c^-m6x=T}78UezqcMPsu9xa!g}p+LoI$I4$PBBg(NGFUCB zM8n|Jhq3M`)`tE4{NI2K|9YUDj~El`jSi3?r2ukgCPwk>nN|MMOAR3R zWddg3$hCISdXc;%D6wc%*XsR`@5JAS0OZe~(pc)_ZWA50@#}Dc9iLC+(pB!yQbgS5 zb95BH2t*0F@Z^Z2%T;(DkB1A=I*a_?AYj0+Gy(^lfKkrFgutFsUH6wEpnLzgwTxQt z)Q{lxbR~eRNueqwXx=Hg30IBIs%~p?&(!F9JESI)K@z$s0Ci&2s}=ecvxHu!Nz+aU zGmH@a>GgHI)6j0<2@KE!(h3Isi9KWa??+s173kT1F2!`{jbT8Z)isWROd4^FW0r@y zyYWjXqWzR*4ne7zT(E$uw=o5FB7 z%-=2jd& zWi_|Oz^Z1gCg4g40@)e{k^!Jt)E8QzixuOUlmlK)u{MQaVW!Y^z(}O`>WQ58sf@-n zpAo@>14@UepdK#^_&jg0q9)c5JT+y~807W!_4oh&wG&Ww2w{YOn`BPVX|^v09N24k z9v8Z;Sd?!+YKwI$LkKenpTGVZ1lj{ojlUo!2>p6O#-4|&B+>Zbw+WaP@`K$Unk(U- z3~h{K)={~s-3@eMmGD~$0fcJu6>Se5rCfGAC3@bCz7n@`VhEVGPnvQ^ygy13KS6u~ zD&#othY<_7$xm8b&c};QOevF)1W-G;BiidVv&aOER}UySPJM}TK*?Rdo>l@SN0!0# z9Snzg(@TVLW}4yMbKS$bYz;EQJTkc&Z|K3h>`cewLjaKTO_xf8?|_D zKy2C2Ev;#TOTdHdVngPlcqH3zbqI4i?JJ3cZsKS1_pJ)|-2-Mp@a!u0zBowr8h!cJ zWuo6lL5$i9IS-ctrYh94KP3;i6jYotCS_iP(ieIFtaJu8Z^dF4Y4zh`WdO{OI_)X2 za6uBsNSAkNLEjjbS98P-Ct6D62u3uSV-ZL6$26H!O>l{I;t2VfMR7=X^pVvMUB}I| zHVS;@=!~jD%t7n0d-Tpn0o84pCGw+?njsSHcCZkXtMCst5MON_%OCi>9A?wzL2r$= zSkDCiLV8$B6*URw0LS`IP&!8j1HQOOW~Bg>t4RPe8@OFiPl13<*P^x7U?mC;h0yRP zjKSFnetQUq%@PT)*Z;YYssyaWRbviY9gW(E-`Bj|E}FgMFvIlVv$8kB z`v_l@0WfPNo^W|K)K>^ie;0zSRjGOjdXG}W@4Nnh{PPeyfA=fpv;QKFJK~=UD1Ktw+dU%1<&Rc#n9Zv9-k%&pmfK)G~ln@xyI*U_3w30!@=IFSt_7`XfScyVTrh1hoF|mlr*SV^hDO*@*=|$pcVD2tK<0*&fK*%mCqX!f1}+}?wcEa<4abz%Z@G%zfV3^29bQo<9{0ulK?)TQ^TMZjYH^O3y**4UrV^=V3Hv6H4?!C3ZJNR}|~I6}L3q7~% ziVs;bb=m9=XNey?;||%1t7(#6oSc%~$)1rUI5aPgpmjwL$0h%vj49ErJQaYIFmf#&ZmT zrS!bn>7C5$ZV&KV$kcw}AcYRpeeWb}VxMXGZo=dD))E2+2^Dj2vC*DPE^D4UqEOGx z% z34Ra(I>TOKh-!<^sc0OurPCh)E$VejX4=heophPnzXn3n81zV*-|QR0XE#YCVYry< zTw@uUs{@gPhiB_09Y)-nYVKhPWr45`5pECScC~hU)kbstcGr91U!aA6Wz9-PgE6P@ zJ=+Oofsb_=@+(6K9|jgZkepBi@Vv2Vel_QafrDrsZ?tpn2#2gEZ^Md=th!rDo;5KP zXnsy_vIh*$y6&i~HT``!(^xyySZi8aXae{h!0*P#r>6tY+NgQPIYrifw!q26)OxFj z#o-ixEWzUDcyWiI1js5zh_m=W%T~8tlhpn2kEiYe;XMpqdpZScbA1{cyM>t;8aZBf$40J2G0WI|rprAGg$|UJ zX2k(k5J&~pLQUWB@E`C7;|RB0h&N_|q;gv4++5#q0E!Cl3Me;_m|}VWQ>4|vc2@_6 z$Dp!=yp7tuhSz91(#n*>dW$gNOD_NdA}a9cUu4X-97vfwt_(AI-5EOl-ZM$WS@54n zSD|UuD#QTab)m5*%}lK5S#r_BD!Y?_ArVMDONhGb{Z$3%o_)&&6><=#?@&Yan(#?R z9MKhEWu!@L#|xjCv_`EDQh+}e3~=Ut6UAmJ27DfLP*K+j!cV^?0z6C5TCD)^3kNeT zmj2ED=m`M3Mx)ne2 z2%uV~MEuY59r~5>R0AHjy9J0{q-M8Y*JP#{X%C7RZfA2^kUCweA+%fT4nuKT{zfb; zgoFeE0V>>s0kK)vy&j}Be*AC`)nI$VqQk(PEDVArhLY84__G@U+zzWY*sZ5Zl}0NG zcArtMP%@^eU7d*@UCS_oNSWo7)iGp+?$S+z4)f_N#0B8Zv)5alxik>8JQzcF`^9R9 zU}FnQEf&Y~eHVff`lX6Q8mML`Y@2LUmd2Tvgg`zZ0Om&#o7J2W;BB`8_7pdMu~doK zauZ4%77yqY`foAu8bVV4{l5w!)rNu8tzb^-<#zc!Nz10uHL9=Ady3(Ljk zk^;Hlyg2L)f+^IQQLva}ihz=!&O7IX9zKe|FNEh}y|woBEL~Qqqmu%OQ5E^jtOSw*X-F}y@RN5X6@`TCFH#a6eX>)E22bMe~3;4eU8trsE^!jvPurUU=H z6XoDorvM=)+(pLOyt&ZkY7-s}sm=vZaZ3O4HmO{HJ1DaarAa2YaZB2X!=KAn-Temb zV`MgTT^Mfpw}QOEW>~#KyOHMpbR{XCag=tSDNNkP2S5>6BX`(u@W^Dcm9t<5+nE4l zbS?yUED;KAt_|2{Jg_TlA%PTtg*}@>s{x%f5{u7^<{Xg^BcH`lyil&f+bKG;4;=8q zNGD>7vOVjRLut&)lxBV0#50_{D2qY=b88WbgK+$%jLRuOm;Ljd>im~#jXwYBT65l@ zQrm+Il9B6g4qzoS;hXJPhsm8#OV^2^@yL~dSfK~8_W}#;7=NnP@`v$vueYb~?9xPD13*jY1RN1!n3)>} zxOiysD97(t%wVW6DqU~Sx6a2qa^@y0y!Gx(#9}COU%npAhu-3SXISb63@I$dzA&g1 zY@s`~tvAtJw+K=j|Mg`*A%Hy*_#Sr9j&wSm4dJ(Wz9alybB^ByFgT4iixeUAk20n* zI+C{^@7|3Zt*&ROD7#<8Kxr<7fxsxWSat|ynFNy1Uss;(co3z)=C2-@1?*p?7lF;R zDX_X=pnMuiazvV_Xy`~G;m}6hl3D!t?l}F%ij+mRJ>nqBhuf^dN-IlT70cm>mnXNS z(|xrN>yjc848TqJfdxuu9q-72M;RyEm-qk#+A0HQi^bg)LPVV~3g8W!VnSFL4q#Mi zU%~!!-~R8*Ez*^GGSiW(tTo2JC&zytApdy@7?F)2wF5;EfM&W44CYLf>+}B-yS1BT z_|+?xws7H+SnGZ2gLXvyYrDumiF=V=!JgMyc4oZ%J0_IZ6QRTPTLe^!vtz3Em;e34 zJ&j1v0~@{COiJ5Ac>7dLUk-h{r4ZO*M2o@!K+Js5m#+&P646*=y|^3|M*U%G$bw)$ zgR>k2uzsw9P;zp$ky7xdOEM(_X#s_q>^4q!VPgsru)DY>9}n+PzV8pg;JjCam!D79 zQGRc4u^>o|MQ$dg3v_(Wug-f{!*|l9PD%znvcdJ#srJa!u195kPSafl*tjg zA56jw2w`FKd2)=z=WT+O)&vkOk*JihqCC_oTo|C1?LfRmX`iA=)cFHIGP!p4boOIm zy}7>CFdylfJ7U~Q!5I84C2jY=pD*x~c=CtH_l3cyfe3}iPwWeW*>R$#?3k>@6Yxoo zr$Qly@xDJ52a3MCPxnQ_m@D95%Vs))^bZ;ZkH;0C$!4`XR~YQMG^}WFyb(TqcXG>_ zc2Vh~@c*s~0<`;w&@vM-wMfg%L`R4!g;^IrBqJ-`)fX%#LlF5Ko|K_jd|9;$)$70i zA5~u&RaMljO&s9R-3`(phc0Olr8}jiTafNr@B~b>QAoJM?Fa>O6Mqm9Kv)1TGln+?x~*U6dbFemPibMMI{^$a z1#EWvz&c~DO>~Gmcyu#YU-ecd!CyOxYG4Px$NzacVq#J-RS($REmA0lEH7_exmb-f z5z$Gc4(BRqQ-XkyKvt?%K0o0d%}QOS_jqIXKr{3*sW#BV^W{Le!!#x`{y#PNe}2v1 zh6;;Gf+&(ipgmG>M@_gc-5Bt3;UO3HR8PUr{wdixgTc*a^kE~BQqaYc@(_W@1j1yb z`6e$TuIDW}^8fxRe<~v`ILE0t3ZB3V=h}N32fT(TW1U88{I4e~sbg8fhhIc6R=dUC z@&t(i4sU=y?*q5lFdi2jnb)1rW9d8Jo6t_*=aYrVLf-$on!;$Y1l1iQ@o6~`oa_9e zM$ty12-*MH;V^f}0-qeYh*9apHP+q3HA|sFdNN?CA_X|gIa?#G`8`P}Wp^1@MizW^ zVV+QJ{qLK`kfH+bK1Jcvevf3_>9!VDH{AKWz(nKAxy`!)m-|{;xOTFDeQ1?N^>O@@XPuke7q?_H?cIU%L-jluRbS!e7t~ zh=CG@Lcm(`Jzx&lGIdqj#$F$;(gU0R$uM&sS%= zHqwOvi@*C34BA8+%9RKyf*ALzuDG8w0cqiQ4EFC%wT^N~3_88Vn;rfX0QQsN_IV7V#-c>d43eCvnw5mqpIm1xW;c=%k0gWTW?6*I^M}2b0`<~Cp z_dX!3_-$HIFTnjUhm@~s>NtI#$-F{4QAo*`S*sI=PU+jQ7Gi|&pAN4u#Vr1^U~52? zo-+<*1RMrKQq4G=Zj#bzVg&I5sKf)$yh+|yc2Ti@NPdT4_RJ@AiGU4l!UYOhB%x55 zWPZ4gnKqUIFM$$@Oe|UMn~&d9jx#dOy1+(L3K{$yl{)qBt(Mw_RQcyc-LJm;LZ(gR zW#Y&R_SG0BM@i>P0o%#7zWT@|mCqGDAw;UCxe$klJHd4N=AuY_Yj=F% zK3;5$(X7-4yk`b-=tp%`dL*QM3LXJmP}o?mHBl6Eh&P?Z;s!hS^d=Vwp;uvytsv%= z=Z|pNr*)GcapHx0&3~N)V&s7{O>;YMn=CMZ%flVOP#adaDzR-vQU58AfP%|gQEU&w zBuL!+9dQ7$@GeIi0e0C^pkv6KSX;b07yuLGpPrunodBq<6MCkI313b(Py<7ymEyFS zc|I!}nmx=`n8}n~87hR8<6a*Rbpr37pCTY?cYNZmR&u}pdSqt0QgnO7aBw3=ET6Na zi!?ptr6}3%r*bY09rK;2 z^W&zHBqhLjC|k7a2lJ1pCrmhZ#G9kVtx8y?Y0#luMnvi4QRR zOE;5xAk;>&I$YZA-hCtG2 zbsgH#TcHil29wFI^ApZ)!80Wik45gQwPsOa6d}LM73BP2tr>}@L@VfGhh5eS(=bm_ z{}Eijshn8I*%b)$y?@uV@}}jZv(;$74;lZya1v{>l2zxmsWJgioS}f!vZY3!YLl&w z^+F9bfJ7D7kQOnGz}+4^z_xEI`ZOs=E7M|fS3xASphPAb$y;G4mr%@Za&4l^KFdb< zuSy?MpKXs=94s_((0z#a*KM>vWy9r2dA-_O3h*(vpH3u}V|)+QuHrM=|29Ge%xs0N z=SoP-hBHDuZ^@62eS4%@_0!F5vobP&-t3L{gwc4lDV73-!~vXPB3f0InWpwzR-?rJ zxgYPHyJH{LMmQy(CZ0p;k*=9GQ0m2jb0~vt2plgQuum5UAz|V)-Av&sg6~iIEHHnt z>2PQ7jHQa(qzYkqVK8s`QG1C0c-Z~Qx9Io;hrImRA0N2z}t+r3^F zaqBt1Ubq(;81|Wy_f{;Rmwp1U2{Gt~vQ>)5hA*!l-KKQ#L@hSqtMBCelafu-B&mCym!Dk>d#JqxZuOxz71yWo?(aQN8z@w z1rch5%84bDwpDT{|A5ijd!w_O2p6MQgQa$>4h8sD-(wVoOwIuM`UXX4w{4(2N~#!) z{7A#1oYk+I0Os712%0PmYGtXgrqNPK3%;`XnvmklFaJPx1uew9*A_6ABm;5}NKpE+ z?>XAQMn0D8I-j+VD&>b!;XFaB?azBm9Df&7`bs|UM?7;OxRwj?FU;^A zjpw(g%TY@Wr(+)7o7yY~Tf251?B4Ba@6kQCiMh%oo?X<1iw@>u1=?bQS@-QG3V6`T zXOwKFHeIg$2q@gp~>qV58+ z^+rYp4yZjE=AyFQ`?;s@y4ikpePU3^YHcHmB6j(4YwVb`V%5;3g;jQDPd&$oBQl%n z+r~m4l3S^JZpE-l-N#GU&@F>MI!*2rF=TT;T#rRms8 zOc+%*Z!N}_HlwcSe8@`)N#s~9)h%W8d3;D>P2%Y6UtQZ8K-+L!5S=mW2@1$SRq{6@Dzy9kEIYBfu@;pB5wO7WIRKz;Fh za&IcRxucBKcB;`WBT_cqR6lmN(B7xw4O~s_ zggb(*CHLiOmAhJovhgkjJhHh8Ikf;6xy{ZrLb+GFmO`?LeYSu6O1ZhCqSJJT+@2g zY4FyO4b*qxX;92$(1ak`|`)> zUh}tM2YQRbP)~AZ&(n!#FE`7J;BRFVDJ(S1zx_q@!BM%bsPn3ft~X!sms(A^Dh+DG z^JznhPi@U%5`N~02dS1f>Ca|!Jw!Ox*$XeJ09DL#Waldx9B>sM$+KX@gatH9l#F z+I(lI{ilvE-{@mW-}6}Wag)_I;nK-+V-^LDXA2ja$?Y&J=mPWHN`3q3dt$YLQf~_5 z-cU7Sw+O`2iM;xqYL~R8lg*HNZm<3~vt4VikE6Gz>6~#0eW2kiE_R{KrSTdNDnUlx z`>38CdYTeX+vjm23%zRR#xHD3E=A1vk;*~u6-LJpxQZ0==Iq)QXTLskU4-m{EOq)q z;<}A3k(7A=p1~>|$AaJv4{w9u;p(>c@?=$#oeF{E)t0bBP@0$(n`;V%yCteu=Hn=Lik1lOl` zaLTvDGJ$t@8}NX)Z;17b?Rw2QIx)w$F+(2-p_{C{jMhtBk(G20?2|~BixquAuX_$Q zgIZ}enw;Nvca!;u!z$0KExv`iGxJsRiB3Z@cVZ&GPZP?*&?O8%ven`SZXhicLgxGngEK9=b#TLJ{EzYS#p>0~u?>l+c?oS9 zua3;|PX+ZvYAF&4ZXxA?w?1eC=41M~v5Sv}?RUslyPeanR+tl^9Ah_4hs@i?SqZ%Q z@0VKtG~65@rW=TOT%@3I_1UpojSr^EVJ4l9@Wm=mcew#Qt%l~eGCwG}zT}u_e%WRc z3yU>WOWR4q%2;=f}huzzD$~pzI#|mXl;R_1II4Z1wMcE#e0uD$G ztvX;xcjA&9>h5>}U$x=n1&@&B7~K4m!IBVtb#j=w$^F!QgK_3{D;eB3w)8pAMy6te z-C2vC(#$Ch3^t?m8)3_(8RVb&QVs%vtZ-b5vb{A6k%@`*!~t3k@7RAeJ1on4T;jXR zs@MF3(f5(F>aU%3c+)t-UJt>V)t@TleSDuF&v_X_Uf=8Aj28mY2vaeKTBDjsi2L20 z`qmq^P4Oq7?~LNl7+m=BiEG*>)eI1V!#P&9s5AhiG9z=0NvcM>DrO57u`5o5uJgZX z_S9`KA}Ey0PL2Qi&aY?lU?vydk2nDcgP3@9b4dC)T^P0d?(UwhphGDpj_QY zI(ug{OM*l#AKyP>WRZ(uM(M-p1Ur_a)GOdXnD4vQ1q5Ya6P1NrT9jOd;<_G*UFbB6 zbD5QeB9vwsq51_Xh!Aigw)mZxJDt#Fz4It|fFH_x-+~F4$Z=d53fYek++uD?`VbLv zTWLr8E3?<=|B4E-(Ry7v*(5t5teW-?y1H)nyqDOhcbO!U)Nes77$EKn5O{GEJ{fv9 zmqsqUx#)sP@*$;ZfRE2IJqR*(eF1{pZ`t3c61aywDWnt zTr;lqURkJm!}R!me%Lf>9etZwa~q*7)gmtL2a{f^L@-)u7nJD{t3@2fd3^!jdZC5q z&z8mKzcpRDoHj9@8veDW%kPJW$3N|`F1|}q(X$yB%^L8U&X;2m^qJafo~@3VEguh5 zimkUi=X7~Kz*p)G)N&A6T0!SPt|w+@vP6Y5?0U|L%YOGZ$LY_8A3r&-{u^aKP z`_-B`eF@bP%a8T*KiD(l3T-UFPQA6^l1-ad^~ua?oS&}J3KcV#cyfVlrbJpuWWtE^ z?V9&~=@=cV1xr0kH1gL4S?vN7C*;dUMwPs>ks2eScR5I-nLIkrq1`~aAYp?Jf4Z|aFViGAca(Id%19e+!+p!#E~ym#=FiV& zHTTLt4TdrBQ6>{L;`Y{2XQ2XLw)4rCc~y3A6cWvm$J{5bx@B@tyH(0EgkJ;pLH3?Z z?;1YX>CLF2dO`GC{Bi^r1o$bo4OO3Zq81npo!K;ThjUJvIbic z`c%%>atI0kXEKd)9mAx{{nsz`$+>c2=GM7(WUxnr8$sTpE2lr)9zu4v4wB>!L<5OcjIX%=>;OaeGSnEjgdP6VFGwRJvPF zVpdZ*wwZ6RpI7LJJENUsGz`c?j4AER3jan^zN^Vnye82Xc{_~Ll(Hp1if*CppLbf$ zc1gX|w0%E+tFk;ECg63!cw=RiPtIw}XQwT*RSe~Ox``l%o*oO`<{?}b*ZUtuhS5`l z1zfLSP=Bx1U@<2kU21_LXO$B>Y!JsB_0s7}xF<&a-Jhxj^7&aTWML2xNjorM!IGyx zm*4F8Fu2h#j>X#3j4eU9GQxTUU*wbTXkhpn;3Jfj%9W*qMdbR)Yj+Mku#|R($Olik z0aaXQ4`R$nYM75O)m`tKk!qWwi(qDc0Y?38B!Vug1kJ=M6H~rK#&ClN0j2ZcAm3%;!{PRPn1O(90I(um~k6K*QcdUGR=2rL{IgNHO z$yfSzLciU&=?@0^WiojOLL)(d1mW-Wg}FIF68NhSKN8qo= zJij~^4%=2)L= znH}p1B}WZKNh(vzhNVr?e*{3E<<#BAFrUT6l{B}}pf)e*ofZe^lCZiMs6HhK6BFMO zgg<}R=k`lZOO&fm#0tiQ7tsy#`2I@*hOz}H1g#L$IkO!v1d!yqT%7Wxj)U7+mIGE z5_1_RUt|Y_TEL}8_gGMIaTyy83w}M6dtMP_!XZ}8fe&L24Jca|1`epG-5%K-0`jm~tauf!%g;w;&p9R#=( zYEYf~%M(+heSaFNZxK)E?-;@k%#-?YLQzS`55d8xgRJNT2V8GC)dJbRsTVFN)vL`O zhDg8m=g%(;=^H7+LI5R8b1B?nJgcOe3{qPf_1GOn8B;L|&uB5;u{YV+XrkHePoo^d z^wasVNDxg8&?9r}$QDw!x!v*tJx^Kupin|{(Q>|{+!C@+pV5S-wmi~#eBc_7CxT)# zTSs*YNtw8c?YtB{Q&8m_gOn%JnA7>5 z7`0^_1C9m{K?zs{CgCkafJ$ybqQ=zP9;PpkL2pO0vb}NF5U30^Mly2d5NiE-u91}d z)w9d3Oe%y2*Q3P&{&m`74$PLhH2(nuSi}ZDh!98|H>#_MZHZ-#!F&4WSL@vVYGmG; z7D^7!vNS$NbRsG66c!_#GfVJSl7v6C3bZ^7@Jj0_(`_nS<|PgoI~u88^9}+zDNKpAp>h2MTbaTBDzq~lz9Zti zAO93|s_|o>bHA*&E&5FmRhEqEdF!#o;Nt6;>^QCBb^2&eAc_Zb1?yntxR@D}R|u}J zg?ORjP7$(a0c+_HO-Ah|Ov9}{NKrzTpgqDhHx5u_)z0_J_1M`y_f2z^@U(R zV#p_*)t0T56i!wA3MU>(sDDd;(8P9Fj{LPh178*eBLeRY*%D<6wQ1lC`IM?W+O9S# z+<@!oCwOy7Dt;u%`T*Buw}ZlmNCX!8^Okd*64Q9zo>zpZz+g1f5}*@{ zIR?E)CD5RxNnsSH31KVHxT6vmL@E{81Kae7d0cvZ)^@B2TQ-_S`rxKQDIz-vXD6>6 zZjC)b!wJ}5k&5_Fg!>aq7{Ei6`RC*Nq5~oH6CuvU=tOXG9c$=g@a(LN?u{YY2SN2_ z(ss2NfZYO-;1WqvVyb~E3h%866-EUkMJ5D~E*>b?oy~y*H zv?_}n0-aD6|APK;BZ_dGQDw8Bj8_d>-AR0fO2S|iZWj#xmN=ti*gxajY!HN|s$e;T zRnD!{k1_iJ3^5^08o^dIFCKI*q}{f8@=hTjA5vh21y*e&QhhubR*HA9w4RPyvef2jWj${;lsc~s8`CgeNs(lj;`j&6tc?1dDPaLBP z?KU;!GpcTw4d0-RUze(R>K=`lxwr){t{w*D%xGO_brI;@1b>AMNd@N2>V0p@b%j z6q8nv7||fT{)&8h?&ZTn{dy;y(dn42+q9o2UkwC^)FEX9&~hrUkaT}=h!h2W23=fi zo4Ijs{k*(P1f&GZ`^OVQz3cdSJlSb8bGhlU;GzxaASe)miaa^{wSd(n8l%{}>4W2x_zeLX*^^D>^ZP`4Rg*$H3{a zQ0*eX>S4RTZbS{`x!rFSV`kda#S>yQS*^uDn6h_%xDQ9u$x3iJWeog|&}4fX`jVg@ho>Tp=iIm7dJSLxIi*KGH}xWueQpFn;~HfV2s(tzTPRVYUy?qFWQt6iH*ds|uX4amJaaN%eKG|(ZYexw ztD>95MA;zjOT_~9Pyi{#PymJ5iMZ%X;E#~=~&d#_)O)U<5k_%jS(zHDIWCk zq~;B|dXA%0Wy_L;Y7EvW1Q}+-R4CjzKnw@7RIg1@SQz3{7N7aL4NY3>!v`b!GujcW z{w{jAs9O4)3&q?0bxcxVixLtv0gft~v6^N z!MbY77|EX2!9T^*3iO2R+mGj3>C%aWEk53V3c4BAw2e9Be6c(94nJfSe(k?L@RLmc zI{&M_c;0buM97qK93$nB8s2Nlf#8OkD#~2ZGRlA8Q#O>t5yA!Pi)clVN4?4Q_|)cE zibLPSj^&<5ma_1w?Hu~M{9K6Ja>1P-@UJ=v{}pc0OyeQ1hWS{DkbWq1*bVJj58@^m z5OFe<--XjHW}{VRD{XdwFxv6n{jD)R%S~lZkbG!$};SQtt+Wy`027ZnUG_8LiOIJYe1VO&R z=n@0Hkg2jvldy?|w#IBr_u05lB+C?x*6nRgBAfARC5HiOhh=1m%JwXvcXLgzyu?Ku z{d~oZ0(CCs2}A_NF6--%R1(@RL{>IF*kxj>#1-zYsR|k2d0h=L%5KqOki|osaO8dn zT#?B?Krm_cdON(D_vc1!LLcm~&xlYCUE6!Rt#%?I#Ip%Eb`vR}j38G9ujF&R%T|uD z%Mj-rJ-%v|PAyaHXTg25^z?Rc758jFd|AU}02@EbQ z8-XZ2zojIV{!{}g{DX^GLRB&Mmm?Q^GNZfo)ftBgOASJKKcnikRz1UOy;sWq%pZlf z@rfaSJ9}@m^(w^b;Y*L(b<`5k8=0ITKKe~Zl2Ncz>|mVQeK6Q{DSY*FBzHe_MnG-A zC@98hJxc~yd&|vLw~nt^I_PN52i*HJ9v{$*n^Dp6Q|5KJ=#`y#<0kybG{o6okm?)r z+s}ectCR6?nr2$EMHJE+hy(-yrxjz~JrocwBkRm{FaomtKAE`wVll{dpEm5KkVaaL zMNQ7;(qH&^^O>B-o?fF=JNRIhwfH;trVh9CZ`njsyCI{S;xt+5$RC>HS(+GWIoOg9 z6LQId7=J&pdiDzWY+oS2n>xmhGWn+(9O9&`1ymb$GRAzrIoUsS{3}AUicT`2e@{8X zbMaV*pjYYl(Av}D*EAN^KficMx<LmH$`sWnGtnnvE9}uV^1BzvnM}TLI>4cs*o9^H=8L zNgU#&(Wj6P>{JseF3%i^lMF-+NR+|}6x@~&^=-#GUd5QyFaqQNMb)}j3hz*EF561U z{yXb|nA)={25vfhI>uUP6&N(4N*RQS4Xfv z(;B{ybl0oA?Stv+pS4z>dR4YQ2Qx~c0m-!0&V+Phj;d-=g{5fb(1!x#zQreI)UdOR zq5>eEkY5BY{06j!#VS^&Q9a3M5A9#!r*Y|*RS4aGQ0 zq@ES^J)$Dt3gH!ro_+mWU^QJOG`o#}d33VOQGb=n;Pin5dlrENnRU3iUJBQ>u4Y>r zZ1H(G^e+*{VLYQLWsy?9Drcq8nSOUKG4A&)rXDyNx7EPmTZChywKkBK*8~p6Xw+|_ z1_7{|kku17joUc*B41n-n+y9(}W7!*N`e#YG3(9z1UMnx5=~Zf`PLNX&eF0B8Q!)JW>7B_aRzG-6+lu z`dQK_;EC1Ih_47Zk}y@Wa_8&*73P<-NeoCmCHGe9@$L&odyyTZf+Xyr24jTNgVbnZ zay*(4^wgY#G{c%2%|vE~J-E~J~q!bu+gMnwWFNl-62|uqwG>H2sa+( z-B4MxZA!_&0G@lODIDn7+{4HKW+zW0RB8>1rD=YzG;7?Y} z0#JjH62-lDD(A^m>=9L79z-2L(=Wyp~afBH=jV-zOZ7p7mIFVXQ4Q;&^D zRj9sXW63yjGY6lv|Js0e-g%o-P^+yRSfd=kc6T_>UMjssQZ@##n3ydz7gl;fm4xZ_ zJw?sGvqRi0T-}3j5LDBUZ0CZd8S925rc5KL&XHQV=tAD%1>7KjB7QizS|rZXe?6u- zZsz5^ejFDTPX!_4mNRlpU>WBg+4tf|hsa7la}~!GAH3pgeWcv@<-jUDwKVAvAPU8- zF@lHp|BdVI`Ec;Yt;5|3r7P>&=7&lqiJ68YoMjyFy%xYBc7Mc1?#1ds?G&5XbFm2f z8k|NUY~tT3Q2&_5t!gOtYP;^<7AvgL0DbA zdgadMKF>9U&jd}Rgu~7J^dQGy$eE^O`{ZaK^~eC{yVTO?M2(3`gJsNt==?!%2lkWA zNXNudps;8jCJm=fCdu~9V_9SMTJI-Zl~^nylf@|yvdy;A$cI~C!iJA+h4J{CxH(Fi zsv*bz&vIGoM&D!eu%VJeBm$E49pbu9&?(+v3&qx2(Qpas>r-)zf**M>%>-N!qEoOa z)Mtqj#MoqHV{Jdt9PqPpG>j2mg0j)~C#92UQRZ~)V*M;P;?1Y6@Vc{(z%}p=H0JQn zC6@R!7#0&pj;49WGqzK^Q{8}_{bKYQcWf5|-`gHJ@c=dmd_QBfOs zMSXcIOQ*+>OcduUD&gMfYr_c0ZWi(|>vctl)tdY+kqpBKuFN{~{?ui+O7C)8bzDKN zT~)C3aOiFNiZ8dDtJ^OzM@H?5oMF24OCg60Yc$EZ-${z)ame)`+|1#LgLj@n1jBKC z#Se&&^5W1MD~=1A?7cF9f{X)S&o9HKZOwA6BpD4C?r96^9I%OaAdQ3DTgxE3KW?$A zVIj!94kqU{F5A5LQ;W?Hh{(^YfAlPWl^}<{g=PsP8FcuT=S>k~2pSJ0dvqg@l9c`0 z+!lFc)AoEnk~~hXn7|??vGr5K%~LXSD_Yxk6>eg#-SVOmzDwr z3Q2@Wf2-Q}+TpbEa6~9r&~UlLGBF>dSZ;3?h1s~e@D>Cvy}vN<;<#YknfD4yCHItR zUA44GOhdwR3B;Vw`7|!n>_ktdz$kMNdtk*`b;Hx!PmLjFIQB^)F$6X?pw3J&Pu`=k z-NNWC>joD9`pkZI*u3h{NlB8|K>S6~ALc6O#mH2#S;M}p!{4sMQY@<@ygY?bF&A-5 zbd$=*6*`%`*vG%+8K5&fnp2oPlZY^g_NB`H49O>^$71CUb?waF!sULChS|NuVQzi zVo~Pu+E{BA)Ho6(Hs%ZcX6+Bl*mqPZ2sA7vE34E1#0^QkReUsPC7?F~`}@cDGboX% ztRaC|#Y=oG;^{&Wj_IO-7;D#Q36bd=+OfUDnSBRrfFRt~MV@X*v~V$d($w#pTRkO{ zEe(UL2uz^m78h#?Bb2|%fK^%lzH@Y?LdzyEZJ-C=DS;Z+Sw%PmKP>jqqwf|L8RbQb zs)6c7ea0YaX6>F(?7MDmXd4ve!ND#AOn1y23>r8)1^J%!z}Ejvd{z`ci@IiexHxWu zuy!K&ido*W$bfNi0KLGxWR6`}maS_|*w_~rCaG{qqd;q(gL22@s(HQrZxpb16pPjb zWnGFl|4{${LLXHzw7qF*v&J}L7T(>W#mbD0jmzDNZt}T@;LEkn)<^VoW)>0xwUR-O zHmtk1Xi@s^AC1Mki?PH8)n!6#Z%RevgigSg)FTL({~=eC#fVYgfIjY-|GFLx5k}t5yrQp6^X^akEW0C|6Ejq3+o){Xsmq%QjPSIg3*x+`M*B_f<2|7 zqBmZZR<7cU@NY#L`D`q}_<#Nh6*xt_d*!DpH!DD@>HA*_{QunAx-NkR#IQHc-m!jH^37wRcq^aIXPk~Q?10{ zv0F7@3*XLPCuY&&zvx=)+6NA=4N+X%$&63z3aOnbb)QG=wdUU%@XM8Jy)C-*9$#zs zm2O9?IrL;YS_1o*#*opWn2mq?QKnffSG)Dgc?bibK&Qr%3y1*(@f^T$rR_5%{Fh=l zBn)}LPNVQB1BS=x`)52xWh%pft*cT~AR7oz@Qb@BaQcq*y(DA&dkNelNn#)fLxM@Q zC>N)2SZ$W%n$dQ_k@xH0>Ebw-r#UsT zLx1mM$-yUMb^pp{w;+im;@~`9@X{?{NbdfU>%7m2Wzo+jU93n^ScV@pRpP`|0(dKG zdV%R1EarJ#jMfy(RNAnwkGO()xF`2+Uq8bd;lJxr_ovGRG;xK1Gb5*3dJG~OvD`nx zPAN7tO(dX5fkI)(<2(pK8So~N{&J$~7LI4?xP9S~#_9Bi&Lq@qFZ?4wocE?Y-mdAf z2f`{*tQU(=uaATi^Q7Z8Y@!q5Fjk~VAMeiL_4)lmi3f+cKN~3tetn^k4ksEiaVS$yC59?O;Chmci9YP2Z6 z?gY7xxb6w@4o0WV&kdIM=BHZpJa_zXIxjwh&8Hu5TfUdRw8#bG(9{xkjz5_wZ@tGX(qJK&yl50+ zGVXozc9LN~*bL-2ez7etw|txNxwLBAqyLLSQ#L|99M^Vbo6-X7P;-YX(>^{ZM`fso zgzpKk9tykImX*O=}7pEEamNNNha=D|vzY1Xsxqe#mStm-YuLT?pupFjBVt^u*KcyY6?Z!^XC}*y2i%%PAy~fr={uvT zc_2t{1wX0P>FiqGR||>mEWqE=y=3ord7j&1?z5o#QyGMXBHn2X^RA|1x!rK|JjEhY zytjrQUZZrH9+V~QBM)#L>q;x|zSw~_`{l&`QNIqOJ&zCQPzXif{^=kZk!U)cN#N+@ zv~qw*?=+wH8FMvNn?7D{sylvlt+rS&`dVi5XmGPSWt|><)?60k&O{iCGK)Q(A0|~} zOO>siNZ`a5vAe{2HeLQsh+x_sE!Rt?7Nl|EUn*<+@`GUSewaZsk}!_ej^iJqS%CFT zgnfT(3ijRx3j-|Av)Y>i>RoDoOOb%P)5lu}dQy*``aVC6oQ`5OJ=e^MC!v# zFZ%Am1#;OD{vBK&hzKlln7{tx5+i{+eIDNfdc8|WXp*m39<%XVyDr*l0laHKka5(H zHV%`8utsw?Djkb6+nn~pmAr+Sf8&3=0D@soHuM;F4jzs%FDu-yGrvx-ap|G&z(eV7 zAUOazun_QOBR&YG0?8L;tq!$Sh4l^G^!ycKiOb^u^SEG_82V2vOaW?g_#;Nho{@W# zxsq{mtJ8 zyRb&NBAE>xGI@I2H`F4QYeI&ye0pcNbTJl7$rViaB3dHf%ACmRA)CO57~OkOc*!Ug z9?v{eKP2Hw*6w;uqrs}?kBC7mX-6O)K~WGw+?F&TC4=;Mu`M{dt2 z`2U40!C%7FG{AT?h zClXt~{RMiaRVvGNs>w9}PddX++;-V;N!HJwNhg3OXgO%ahYcfdTg)E*ToRW1vst z<#FDt6G^~^2#iJIsth)2nsOyc$!f+kM5^TF<7xlWSg3S*Zs(P40aUYLbynf|ax=9~ zpF=^Jf<+~jeCgsV;(Fx@ZKrpQ7KgAl&tX2dP8hG55EYyx%ggT0%&YM(fqkIV;c;s# z4=3ImN~-O#986P6zKw{_l_2g%-rol}aWmH;0&w3&3iu#fv0;6DP(fO~+0EMXU z2Zpg`wcNhoT-&D?%geq9 z)3A=Ku}ytZ7e~zWZ0VvE4^)pc1eboHbb32PKxj$EX22g6|M*@>PUjF17Ba}DGFJ|N zsS*u)7HW2w@K8iUd;M1MQO$9+a3!l@D<$Y3WVq!=<2dMPw~K^sSe8|gp%Q_?eK-)n zfKnCf%nfXW{ijJ<7_J^Rfn#*PPO+nCrtKBXV?cJeR4Y)aS1+GxXC$8e#_Egb`DUx` zuW`e6gCys})iEPnVRtdi#*%>unR7i+!aV0SKpU@Ti)JP<^^ za6ub30?2e0)(ZI*(jY4*ZcX0%-m?CIp52U6(L||5qq?#>b>>=jLR*<3-Ei2HJvH18 zvmZz4EYY@ffLYSEQ2^${<=!OxEYee0t?6pUh=-C`3N2*5R||8=CdTR?$Aia8IV83H zFjQcf|62tQnFjoYp}&-vd@tNzyC34HaK4lQ2Kvh-A=fF$^?Oq>gbm)K7{h%yg8;X& z6hZiXi^;zC?+W#D6{XZKM+;uEW}DItjLyLb;%fok$P# zHo-nmcjDwizaObArMheI|Nh1Sf0tiaHf*_x#>i|@{0DI4+*JZYLf%iX_VD@v!{?bo zFVVt?{nb`{09lBcl_PP@)Y?#n1i8l8A_VvccJHeV03I|Q+&KwX-&$bi@+#&M_{Km?4V*}FSq6ito-lu&#`Yt)5JQX1XGot`=-d4jrL&My{O@g zLUrm?+?so={U=KN*ZDCICJN)GBB{b8F`LB?Qd(c**Ez}3B?D-_oL~i81tTKSQ&4$3 z=1z*?KtFk)l$a*J^lc)3*E&8O`Ju$jQqT50e-NTs;?}icTa^?W4^}Xhz+jd7n!KD`~-oK^zR_ekdS`_Yvs#?<}8{$DAKPaA=B!WL2JTqxD(FkuN2*?)#PaK1iN18(#tEPKV)HUO-!D3fasYd7_Q@Z9G zz^S|7FD6+EyaB;VUJ$aH^A{bnrk$STtThh6@W=ox7db0?4hQO95@ z-7y883%HO!jYuSDMl>~+U2eC@f-XN&QB&yle0Z?Ft8=MLRMng5B}@8j>IT2CX0Rtr z{oQNC-8u08b>7=p94eafbz#Y`axA0u!lunS>P<#`Cxu{*%=2o!SNZq#oehNU&JI7K zWJxa_Qnd-ADOfMJg%tq(Cq&{ug{8K79O%B;mo>XH@WQ`QT**@{0YK&Ga`1haTafL-Q;g2H>LzIOp{V$Keyad&93Qt<7@ zxU%HF?2Hw6Nu@^V^iE0|-(%-sVNoW-d~JK)F`~}Yt+$TIy;RA2Ukt~(9q>yFNWnc( zJ(ti~j4l)f)HaGjN887Bzb`HS)^s)ittMV-SzBk}{__X5B3T)a3w+KGM|M?%Q3$uk zi;7-u4#fZ^whmdDQ5Img@EGkXTcSrh&7}FyuyV-t-W*-9woVVj90F=P8&Mx0(l?G+ zvtfX1mirzmk=Eyu*fDw3JpdI_jBO#=_N=vgDEMVlC`rpx_|G^_H}x^gjx{iumiV2H zxrC70hDE1*u4{LNsFzI0O6EWJ&&|Eq%>Oc~jO4&)Rt+q262apiT@0`t%(4*54=S;Ew$5hT@mdcd&*qqyH;jv;PZR{2H_7@&=yf&7en%} z%AfI6Sjt#~iQ9&SK0r7X!fdHQ=ua2R_ozSYRQtjR2u_twVNc{EDH0m?S1#W!UXIZe zc#T(80&M1CX3>v;a(3ZIMF`4WZuPRxsqM8cdg9vXCPICp5=uk)4h>oFF2as=KWNtr zt12WEe08|X-Z=Z>b#+$FjuyKZnD1I?3r_$C7*SL_9y7?G?xVfY7_@+W^kn5XCPW?R z14KYL1i@7j5AM%viyw)bMpFFs8js6fG8V@Tj-)yO-hGHl`(1=~*;hRQNr~sI@5BV? zG9ShRLHCabyU9-97bgLH9E(fZ#n1H0|hgF4W5mfzc=*f6Nw4 zf&ba|hfMlRK}>Tr*O|d`I9#L&d)`w2)C9WT1@Y{bx56P{;_sOYq^d~=UrSXN;Xq6_ zuRW&d=F|^$$v1Y#U4??{gFnse*XJt+6h~UI>zHL ztCBjAyQ-hD+fQNz6--(DiJn%z>Hg|Z4OTUx7N^Rgj*rPVoZVTcfFuO^!#LAI6>`6Q z7}@y_^f9=^0^OuckB&VoZtKpFPcLe|B_Lm7fH(lP0#E4%gu7oT*3%+4>8)Wd^CQe# zt=XUe*e&BofL2_Y$J5J9&BWzQ740QDF0?@_rS48;i3DC+H)j(n@p)>t>SOK`akcH4 z>_fPq?WQ_&o5e8-Wre>k2cr>lsIWEVFzrp3=hub(X*X`vDUEZ8#XWy8ehcEtc>k59 zJcHtTL1%I$`1DUFg+PN62TS-mr^f?L;iSjy4SHDLiX4ZjM`9W#rSNQxlW|ybBe9@5fnyHiL^)kBGKg_dpM;q2K2$fH`iv}d7I;rg zB<6*@M56cLP-*~S{FX1JaoXdjVN)K0Mu0@jWl;$K=!QKevvHa8w><}@=ND)r_4f2n zsqM(DIO?m$-@VTX4AD6pv`AC+7E0T(BLsPIy{vTEE7?@Yob!)dlbEmP{7>(K3wrv}YZJUkJ*k)tf zZtOI+_3b>*`+nz~U-{SGd+)W^o@~-%H%ce~7Md0IhXj<9M96tstYg&#ltxv6Q1R zgfJwNd0?PUQwlx#HGS zKU6U)r=$h^j#^3*wKl)*nCvMOk&8nfaKl~yu}ZylRo?(ZA?*O@4#t=fQ$@}y^EZzfps#-3KpJiPJrP35bRB}im_3PB;Hk;u09~Wa zAi`d6qs6TvzStlKYS1x-AdR@FO$(%0@R0`BAaJ~L8k)a*g171P-m~s7x3$DP2taRM}gf$Rk80v1K zrDG}ExGvbGhEr+|ky9c3DNyCK1$*70J`i^Adq6)mb0mRT{trR~?_O%eWj3K{@cuBI z>>r`{^VeKS3Hk8Y9P6@^7#4CePqOdPX3~0DK?&?*l)!hYqWX&>uk$VenjZK)rC_0t z+OvXWYhR@cx&UZbH;MV>;cwc-Sh|KZ z!Q8md91WJaa|;j6zVe?E>>l?ecS;i-zY$`^rGx3)T7Sq16#L-&mTC1krS_H#$EK4h zVo(StD*9G&S}>6k4HEs|NhJjx4xCsAr|$_>v;2<^+SnD9c1v9^QI2GvhhO#c$m%-# zjq1Dx{l}tGlg`?|FeuuGeACavj_0(%*y^}D;SOvNZvW*>`k8JO2@F%->g$YPc|fYE>%Ua{iYRb92gZ&b5mDp_ntd|61g6|}1~^ao_NESCaK$WJI> zIhn@5zlR5g5_OkcYiVHqOdEdG>MS4}6TO4e)lHY%lt2l5IDKM%{Jb|a=66slCN^ml*FnvKck zh1F;Wy3ZKq0xZk7%q%)sls5f27uKa6|7v_ZTCoHFdzBUvP>PV=e*NVMn^xAFc($BZ zbzJpiqFbPIO#|i~J(_GR@wFNPe#x$J>4J9`!Pix=sA=Hv7~-#u+oU2 zOq`H?bST& zQX;-B4hN)JJ^1`Rh;EhHiQjmgVrupL$A!kZi!oc%z0XHGp#3k#zX=1f5yVEfE9q?8 zVnTKFRG9rCke9z2!X-PeQ)x=R-b;8TRelz%WS~kK!OET;s9>NPIM{1)K>Bn>uNa7m z^Y-&%K8@MCd03=oDo9-}&cJx?+WYj#xMfuSdVeCTd_U~(wJJ^un$oAJ(_O5HL{uMw zkWKU}Jg{hQRf2y#Tt;;4E%=R%F}94%tA!nk8s9?6OvWLmo1Us}r?E5@&kOB13rB{Y zkE7%1sRv{uUYyk-czoW|70keD(Q1L3=6iD)z!ngbDDc05!%+0R&#N z$~i)zHk1hnma9HynX4c(q2hyWg8uon)x6Tg3s^sE^2ovctFuId+VFlXr>)cB4V;HJ z{ZLj<)q&qc@0p0{=ZtvuosB7iU!@m>d}9elKN80?kAGnnFs%muuf*}cDsf+x#qB*! zGvbggZl^6CWj&wtpp$=ax}aF#qUGVSQBq8$^n760EG^{Ieoo-(BXv04Lkkq%O=k1R zpIQ_s=L&kc31vdqp9Lxyty=*lvMcZM# zdTYtMS^TtsYGE&h_>&)kfRI|D9F=phh)8TI&S^EJyuW(!N;2hn)AE*Zd*(1<&}^?3 zGWdnoYsm`M8wDG3LAZJ*+FX;(gvD%CgF*J=d=9uv3BU!~ao2)d+MVeB^k;l&>?eVe zFWdYFn&(QPFjLwzt3)DUtwjB*ah%N5IHlB$rva#bvY;+#?CE2kHlHd8N=%%wKE#|O z%7YERFId;RfTuM7xOf7@%rSU!m(!l;v>q;wE?B7uJEgU$20!FmF0iC^=~GHc^93;{^T$|IVRT5d( zbxGu^x?#RCq5?Hzf`NiS!-Ov?cXlay)Ke=i_DqMXrodN=(OXq-Q0XC92}myjsk$No z5IkGBmWl_?d1Drh3d+q1t}J;5-hj3HqRN}>A!C!2#Y-|`JlM8Tp&ZqwkD7&HwQ zY!8-hK=TpbWk6ezZO|8gDZ2H;rP(8@)c*vkT&q#4RLVf3kQTJM*j$~HhNqS-R{ycP z-)&+=nz}L~fFSqhG^=-bCWBY@3b)@KE5cm+A40_#PtnuEvtB6@8f=>2g9SRvqL zUjyBh#i3G9*Pz>|@A`#e9wJRvT9R07j;#!YnEi3R&xyqK=;&+BkuT0%U>5;G3MvkP z)tx%xOCHY)PNe{3LKCu{6^mg?%R`w8O?j(oTo|9pB@?$P(zn1xv2{k_e_m!`4-KQ^ z7SPWWK}36lO$2s&WODIBgUw!MDOta_zSt(xU@zXgYtZm=JX!`OjEOG&uaPHP8Rc(8 ziDrQaY(&ROw7OzJ{36VyZI53qi@2>CTwjUVN-+TB*%0nvhwoSNbU+?;8q*kiYO>W; ztg5W|wILkA3ysL>axS{kWWDK$Nn^HhmJsRJDp*k(`!1P8ga&ZJWcKE#q6zVMdvb}W zYjb(oEDkEHZ!aGze!-a?G;m?*(bmTdGc4pcSg2@{K3J}nXZl0H(vP96)Sv4spoBg= z)87WLoB)ULkufL?e~MF}S2pL&H@GTtkr_-ZdKoY;(K7;@Qox5E0P^*Sygx($d-b-syyqFo1&R@=`c@O&u+xVN_r+oy>?Ef=TIc&lFClUYnv z0N)P8n`;lcx?H+RyQO`57?+oq`5w&McOr`SB~o|vIak{dCi;6ikuf%cn#JPi0vFg%C7Zwb z=)zIhnI<*C6L=Iq;=B17;iCw%dI46zbp3GNuRzDs!dFVvipQkANFR|;>RtM!w;3qN z>&(*KTzWlRz8qo+fyfn?*luSLPH*K3{cNKI0fO|94U;M*H(S7_h7M zQou$C;!~B3%_Gw5)TbZe3t*GWY*)V0JFTY2=(*+a7DJ9BGv?)@H?t4}(Lckssb<6WK_ z$32AcnVKUC$3U+g*Km}3b9h1$ptPkpha4d?d;}!cVn8GZ*tcB2BA^H35b_+Fyyu$L zHA&Y783+MJEn(pncvxW8R1e-#Q^K|1o|5{(mZg8}lc{XB9D5Ie*$ha1Iu4N;>6w`D zBD4b!bfouof7uIlEA@|QsaFVY5-C@~enxJw6ljbApv!fL9GUe0Qmfn~ccdyF0_M9`Q)AE7es|@~BPc$?O4mVovRyH@d zisU8q@b^Cnnx&uBG86dTKmyFSVp$FtD~&bM;P6BP!Stc*yg31f-NK=`yfmri(oT^( zF9bipPL~JDlEltEvI(3o4m5$rqFGe~xJYL|EQdDnq3aarp)Fa?6bpgrUR+9xPKWEd z>R-)n6OHhP0-)|bC=K(!yC4X!+e}za7TPmg@Y1vqQ#6HUA_w-3=|6f}2tX=)7s*yY zmG{*jJO^gKPb&3Y*=^**w*w}%Ez-ioYHF@aYmR(s^L=}J`!R}4OG&~h*Kch}nbxj?Q~W)Swf;&|$>wDzKDmso!!0evW`fRJPg;Tfl= z{2qV6fYDl7L9pa%N{QRg_vo)O4u=CU=&R-V2R$S!HgXL`b|1#$q^hNkKl-^(%gQ)#_$C`eb)Tw@~i)@hgtiQkg3shT+i5UPwBUL7F}?9TfXAG>=9oK*GYbVtk0~REcx&EdqtLVv8BQnUb15CJN0lyQs3{4XFio-($>eql%Q~Vdoi1%5@HUPj?M6W`m+lkIp zq@TID&lHaT8?q>%{qR5yLAyXZlwb^!_)LW;=SVCi%ieHO_;P)5*+g&l1add44Dmy0 zzD#=7wIlZoeCjHhWR}7syE*x6p~K<}&kia$eGufHenm`H?ZH^yJH@lVFF@Q1?kA*| zApI~N5s^Z1*}qDsH%J8$TCW!e*X}q&z-RM#AlH|6+mNR7KKi`_RnuRg~cs0WU4Q<_C<2Y3Kr;A{vr)zr+K_R zWq1 zg9wU>S@$EVB-v4q7Vo+ z=M9X@<*d1!E=fH|#u6|%9W5jW!`bI;j%fiEO0)n#3l=4$xCKD1e7^i$T3L~ChD9iE z%-Adzj9Zhqf3bZwR&VXKJ{WCKU}8hA*KZyO0phXrHARNs<$Qy%9|RLCQ!*YCNo!%=>UqW9!P)TN&_CVA+t%TTKukq=!_)2X zbXGxmD_n2<=C1H6Lc`|%SM}>~Gojp28k`4rN))?CHNEI8Gna;DH+$gqq%LnmcI_=8 zg!k)<_=H$~;e~3*k^A47uPwoEi71pX*0G*jA@z(*GeOnb#tGoEvwM!g=e;>JDd2`8@~!Sb^cIT7o%d^S zucTBya-@7xUiS>*&Q2u$0%2cm0_oEs9;%>{s|1Ol-KBN?5Z88$$9L(6AH#<0Gp2@j z{e`_$eY%?Mt9Z$v4Fo~I#d2=I2=d$Mv1)5oGK?1Z=fExPdOfmv%c&l#kj)h6gDLLT z#xxpY@)uUH2=uYs-2TyrBLWA9v#(2w0e0wD=^z4RWgr+tA?jcn{afj;M15ekI^m+o zCXlVl06+c)WPRoZMI8=k_&6J_Z+^Wn<8dJSdh6-*zoe1d5*9^jV4@dwQT|#E=j$b_ zfQ}QIA&HTh%;KrJWu!PD0+nmlMOvs*Nps$376P~dwG-uMnf7Aas%XmEsD6L@nGM2E zh0bT?!-4l}tk5~R`r#aIJ)n(~;QCWx2!bVx*<9Km7Rv(>KbE}J=Z~Cz-E}Iqk~Qj* zkb0r|i6WdUHjMNu!I$fakolX>nCylc*O-s=$@451`L3$nPgH>ZxW$gmVUdvOPw*)p z)x-E985YU|c$8wLY1~9AS?`h9OMK%PN*PDz?tczJ@Atg!BV(lT0bW1?0r5#;)YYm4 z1q8X;QHO&zmf)9Eu`C0)`Ate%6OSkm00c^Ejk75qAj2t~EZKQEDP2eckczKp6Y1iq zsPn~ZQ>xFzQ7h41qB4aVjY2l1y;~j($!zJd8%CGmG>Jb1iGB?^-2|$mQYx95vQpiy zgmz66Llvv%9WnnLVxt^irZ@B_=2^cbdSt4juy6#Dq0x3g%8CynR?-Ua!5xoYxQkM~ zV0yQ^aLAP1zWD*mTdtx-ddbKPM*ELv9IY)&h$kE>Mpt>s zy?{rvvc{`GU(#AZ1o7evn0&6#?eq}Q`oB`GBRB|&j{qCH3k0!?8a{OUZxFwcB9KY3jYs3s z0c4suu+AoXS<^!$$72WXyX+kZ**rGrv+X4DAmP)CEpW^kYf~QH3!s~rQ@GbtVo4?RvL}D zf&4`*Z5FSz?O!|DvxbU)k5Tp)>S29k%m52IhvPP_#Z#ymI1kV6MI8Lf_TS)RBe=ct zq0Gg3QQf3I)r8m@ftt9#Jer4jdQ|ied|2=b?Rql><++*4nH6_<_pS14BR6}a7#35_ zXi>PFnHa?K%RQS6k&6C zAIUzB>!$9DpA$`>pjRCx2MsK@GpVxLmy<5MUgN0BrBwetRkBJNRA0 zQ%OM|q^`P)r&~$1@hY`p`?*LISz_cA8prc9Yf__DhXXREtaIx>LL+MY1fGGQL=tg( zN~QdK32FU;<|bL-_5bgOeRCnN{c9A}Sh$PaO}k~c)yb+$umr{^{}6Q~F2}p+xzwg( z={nn6y`;JBuUKx%S|f5zB_SeXXrB_0e7;dcttl1d-~09xOCyY0(Z=#P=X~MOcTa=^ zr-RJ~s#Bw`XX4#(W^*DFD~{NIAc7Oj zD}J96wZ`9>yz&0#ewW^dK`ksLmzHE1_FK86%C-2f3nXsC(|MvsyHni?rDk~Q)kf{B z*68Eiq31G#&NS9)hAFtY5lHM(kSN`VFcrA_mj~HCiB-~SJNHlbu}|-oMAB&u@W;z6na;R%XfA-`aBCWJi^p5D(wyapAytsqp`zZ`Jy?s^X zteH4~OTD37Dyw72!mXFR-hEQ3X6A(=n->09fI4F_7WRicCuE#JO>o6&k7 zc{KDvmuk5tWE2(g>X2Ifk8)+pk;OaZ6DJa)+4KHN{CyT@IRax?{!V;44Qdh#dU_ZLO7$ZxdTt@pBjF-r4dFu;;^z5j?a zFUyEv7P250tKLU1)QhG55^Kh>!|6r&VKF*NHvKa@D|35+KDr^9I*XiB8hK|L#e|{` zS@a~n)Ez1B(!I{m<#6?F-ghGlUZl>xAI;q8C~`7U9ePKa5Pvi)fc!+mrmy;9{Boo^ z@RH18k%9Q)_0rJZ5yHKZghL^uZz`pS5w|@YWm3|G@t{mFkwf zx+(Jc>+Ai-uzNenrx%0quaCMrCc_-?z{Dw}E^>?c@JZ{!(}#OxhfzDjVDX~Lc0mal zOb1za<_r**c2N#4ha;c!`7+iXb4nDDIM*hV*2OH)@(D!nixHJd>IQ!vx;&2#+9Lse z9Hh!AGb+T}+jFFr!3hDeAG3sV&v}pVfUTbgc4!B$x0c)vm3A`#d{1Dp)?jzLS~P;x zUs;H{;(LRFtcXz<5&Fc`>-h*Xyg-A~Yya630YGdMlS(APNxm+SuC^!&^Xu|=V+!gQ9#vflQ6}xQ zJ*Rccy%L;x=ua9jKbh~tY&q?X+Hgvta8Pk8KIvV^TKJ{?eZ}YPk+b~%LZF+WK)1-) zVv?jTcyADYcOczH=1@-OVyAN}I9*EYCcDQUJbIb!k=zt>uUbou^7Lju-S$`Zn8#^B z08~d@xq1R^TE_h<#uZODeHRaIKCC30+&m8oq@VBrq0jPe%}m=Kj=saT!S7^k=Rd1P zWLyF_?90_QQJ#&imGw3&f6@Bgjt6`i;x1kc$Mn44U+aI+70cDs!eTNjNW*OdrlA}J za`dN19RE~WvNo4f!V*M~Wg_`bxpf9n{E@;9SD3GF=dV7Xr$W4NOdUDL^puDeq-A)u z`|{9xZ)c8dul)lW28*&HMFVN*rz#mr+P!2#!z=p+srL;(Zz`8v{ke;P^MdUULeY%Or9#^F02OOVnY-<;UNH|@8cJ8(u=sY(ajylED zh|>N!ZM?ZU!1uv?Ui`(vJ7hgDX>dC-S>cXV=ew6pmo!?{++%b?YxA5Rc^&r?-pOGd z>T59S9&8Eim;}JXEfAi4s&$!MlplDQzoY3%nbC3j#if=XczaJU@YqO`UY-al9GSN5 z%#5d7tojA(V$3{PyS&YJs{JaBGr7+->@?}M?~V?Qo!zsKi%5n>mE>j8!28F3jmqt8 z^dJp7uKlOXfYI3mELK4?w0QX+O3#ab(f@lNeuGGmz(%S`kfz+z`b2$kX{~jw4dO9g zjid)cc>+0V7hCa{#>z76I)^QuwE~KE)4f`~oUXX##8|u^gB(v6Hpm(|-EXRyi#4T} zQ~LDgEf;E@ZR!&fj4WQ0UIj4sG2T=qWimO;Q&T#}{Jl3f}B%t0$o-CWVl^8l`U4%ooyR6%3xHiBwef2#LI2+H<%pg9NSKA zYC0BojYiv*6CBk$-20(_mrV0-8$|i<6d#M>KWlV?2f zElO>UcCx%+pYI`$#{E%lM#HnO-G0@t$8@8&Ce&s>l4!Jz1i!Sx z^Jel)Gr}2IOsG)z2E`kv=~>3}3?yIXOXSP4G9GR*)N5TkexUTWm2C#}Lx{RCV;10*5;$d9<2tgX*yt++vutlm z(l$QRGsrF1tB&gWyqX(yoHr551GcL5SS{mULx$ITsVE77f!(;?FDnca8BHT>Pr4@Q zk$9JJQ$Fde>+zOV?K<}RBg8Q(ZUWKYo);{FrXIRnsWW&-7vArN2-8P%Tg0!tZ04Fx;9kKsy9wqq+!Au#;;NtWn|7Ml9*3hc!}JNmH6L> z{_*TdNIzZx(q;EbA;HabFApnS<56DZf=R%7T;71XJ4bmVcvpK0ipaX%sD|3P*npAN z#7d~>+$$lc6k98z8f|GP6R?O<*{NuH))+9_FH(d}xX7$pTjy>K6mq`bYtybZZJBt3 z!(Lj!6GaiK`Z_LjhCT}=Dc zO~OCX`QEM{=`FkzSS2;PPrusi|Xpf&^2o1a_b?< zxi*p=gvM{-(B>$EtKofli#X54p&8FLPT$=@fC7dIn_8$KGq&b7goTOy3^pN-2$R*B zr01}X99~WdtFum!q-&q!cysw@pCo>HmvFG|DKhI2H0`OBEWRVE!NfY&{S|}8+*9=o zzNGy)W)ti!uu3+n=xD^_)nn*i9RBc$`mE94qtk)KgYvtB=@D5>>q+&B2uqJJ1}a5` z|NZ5_1rSgm$pw>AOfNW)VuP_q9-=b)oWTyqF*Q|fOeRy;KMS?Byy6lI^86a0>o_T6 zvK&yS392IKn&=;3K8e}zX_`2C@Me>+eL9)?Qv~g|Dn`kr#K)7$!k`k|-#|K}w)QkR z8k!SeG?G7kWi;MG#;DGcQ1K`^oN(Zcvo|*1kA;8D47#qe%L^IVynj;TjrTr>bD0;g zI4{H1D~e@E?!|yAEYQtvGENN(PvX4yYTS?O4ax86je#^P5UwHQ3v9yodFU3&>P3|o z|36P;JI0rh`f^Z4lMD$AAO6VlRd)4oN{!3hlauG|cjvsbmcw|1eUqJ0HiK6@(CewZ0+YOm8pkPe2DujlW^2T*OQ`U5|~Z0^0p81(3E zH^`tC7I%_Vq-L=R3(TjxGh|~DGs!8HNNirg z6rx zd68lROqipscRzMa=7;um6&$;3nkEg0QWLsPn^ExBgMo8q(m8Qc``>MMA{gdtS8j)o zGEj#L$J~!+t$8z5_bUrA5GXaz*J#zSsZFx=)Z9{DFEX^OV*$jtdxn;ikL`xPt<$Kg zm}Oo)^4BcKtyI~bn^@C`?fd!8Z*a|4es>#e$?$#h^s&#J?O2j1x#9J6()i&c(`dD> zmzuzNHuhbTH{CV>AC^^$IMRp=MP?ak3}2&d!J4y`2-tK9iTMbkCebJB1it3t_iXO} zaPWokN3s_sRKlHVN`HC1$I?WZo#d1{E8Qj?9jOqJ+melh+?trkT-n7y;lRUh23SMl zegeWTWk=^Efcy4UApe`LHG_9>>lnABt-!_x)&2Ut@8pPHcUx+$^|8o?+0g8*+~1I> z-^W&)m@a(T;2}a+w#qp8Pu#$haV)PlK_;Hcnu;J;4$M?+sc$&^k9lU-4&=5z#Wz)-O4j z`VhTtcnzlX@{m_G`H(Mb9b1-jhDCo03%~SQZFEXNt2Z7?`AbHR_u~beDp0e{GlBbn zh&VBw#jRW#{v{^0E~S&fTx63)Z%S+fw{ntyd!6X2r9!=7HO&gek7<$HE#F@p>u0OD z&^MeC79N(s34C7L44)3EdsIKvpssn{#ryvwWj7!_-IdXedQ3AJ2Wx29?ZgTW8qD@l^6q4#{eNSYxzzT z9^Wb}uKj9TvPxoh-`aDTnpt;})tmwj*6@$VB77>nO|!8<5>9q@w)YGFLOl``^aB zHl_J*i`GWO^yWR^52q>%(CR%Ij~yQEkNMR*oI|L!|IF`me_i|+#?^dSMqInFt8-)y zX8ZDd&MPvzyqbI*c2EIS$j+(XR;9Foh>;mnq zIOF#owf(!e#P^u@J9T^MQMyKko#;fztmmwcbySfiVG~ds%T5mUZq|RrPS^ZODDmbmY!&_8_FFx(C7htg#ilp$R{P?c z^^<+3(P-SL2+_@gF+P7k%0OQtAL+xJSNk=U<=L%D2#a$xE{A<713L$fZ@?4lnrDP3 zo1;IMR^jgpE#6xxFG5h5f-BO$xi_@3Lu?lJv_xuU+L7Ti#I;J#9;#B-*3OEGEKe5ScEk?z&^cG%(eveo@1F8)d@ zAZ?~%6i1+TIoK&1G38$YS(uHK<0CuMGSh(3ooF6SpG(AV937MX2GQB2DQe$$M(j?Y zU#Qh!Eg1SUbL|6CyyRH6I`l0trrWAP`I=kq`vBP!ZXZatKHxcu%)76epM_weT7K4Z z>?<1d3zON98kN1~o6)(`_6hdM^z6WZ(4DZkiYrwcE@prBd0J)1(qQsW1;65Mfg{_voSM ziRc2-@W^?u^p|?VF(V4|o%VKJvEV!RryG0oS z)ei>Gk5gKxy7vInpHELO%jsrCPz46%U$YMunO3y@hDYbx>+6kq>zZXfGa_L{-N%_g zxl(N`>Dv!KcwGHj{y;L(z)Ug?+^s4}p!gR_=5#*MbWv-f_~d6h&IGaMcwNB|jlPdo zc=tD$PK>4Z`=+dDS2T9RkviNsavnFk>n4tR`;|jNMq|qSw!VfK;|j;5$i4NZ5w(fi zemKSmg)%;s@k`ZPZ3hzD#{11If-~1)CA1A~1UOINf(At!2$$3Su3e<;xFCFhy2|tL zrGpnN&lMU@v)#0GXh=Wc^qSFOz0_=L>6ZP12>sqG*GeF;6+uM3Zm(V=35BfNNx^If z3o26be*Jt!PjCZ|ow`IE$0v&vA-Fk-3OHXP_}NAdPu-O4R0yrKhW1UvD}L?qMyF7b z0O5q4TKp>g;bq;lzpvVPJ%!`<8tzYaMHn_dAjt*P^+rO8cBuA_&rPWp1#fesY?6zvrIm0s-0r)g zj4NVmVNV0OiWFB!zK4PE_{+A^Fac3ZG~rWet|dYxwT#ueE;vGE+;pb@%L3TlcaTCb z5!4}qbt53+>nl+tCy5w76}l-ljogpnl8wF2e7w6*52loR?hTo+cmNIL01Ixam_Gyo zFVsHQFF+Khv%#l=C5eFXE%5RCa;mpi%kqo*!}`|qE~yuMn^~V%wjPySi)AIu@)Eyuj!=#(XSvJ|Sv$ zawgI)vs)iL*}faTXJCkZH+bH;;(?iLW|0~NECyaa$Q>p`)gBplp#3+={BZHWTHQnn zb$eD;YqIPXC7O?g*exh@b=Mvr^5}Nk;ZKeXHgtt6?kWRhV5Lm*#00+IGTtjo>D<~p zVaWU@;m+#h_mwdh(}sPT-DlUmhUn@1E2V*=FC#V?kH2~%KJjx*rRXp3wd3#Md(UO1 zWKj8GqDu{Z_x77`pA#&^;Xa}Qq0iG1 zYt~vjD2^8@YY$4F%)~9fpq78jVaTp{ZI;62omm#OU2a}iX{X`4>I4SVxm^*-9oAUb zx9anWqzL7&%gx&;33&zBrvz3aMlb3R)nX)4Ew+m!csMf%T`ABZQF2y7FzHN+nIWWb z2?2#Er-p-PS)nwRC5EscbL=#>8NWXw-|t(iUJU8@jWrf1RI(B~!j$-y+w7*vr9|;O zL7UP^{u4QNn&(yZY>4tPR{h)3No$TEIimRB(kOhRK)Cfz_x(5bu%Q4Q0UTbND=ifD z>gwN7RSG4|LB!RiDEJBfB!A6zL6p(+m4h-} zEuqP2jKh1upDT$v-@k?#rRjg;Gfd=HIvgf3NpIKhTC!h2)dVM3GP;D@s%siCsTVyB zdwyUDBf|-u*glEIEJzfOvRUQoqC70iOEJ8-dI_xSnLk>`Qh0`g2}OE?(J?61ii>Z} z-QRo~tn%40amSQ)1jjWyXh)N16A{*ZR2#92Jg=M(^)E+nEsaO=8SLBLY&abkxTr6? zS$M29QpD`pT=Ra3s>ye38>;Y$Ad45BhL`9r_qG8xh0aK#2Z;!ZkS*^MuIqGY$NSHM zLFt(Z7b&$JgcEN5r9IFzd8dS5v{J?W;=teXkHuOqsYHxxNzvTG4zqZm9ii6yI;58=x(e%TWLJ{ta6@xI%4M|tv21sR zI0UxWK7u=PoXtmv$g{$y5|zhZ$bGxld4Rs-Ej5levu<@IXl*j{!27sLU>6HBF9q30 z#IhU`DS+>g5n+_PE?JU1fy!dPD8cAxm%-U8<#RU)U*IC%5AWc5>ouhoXB*2=dL-bP z{y3a#XnGy@@p?k2xkiFfdt)?d)6}tE5!cJrZq_)uY}0a#*57v_5WpUDSiH`JH_=M4 zo*+EBeCf$>@hofNaq#nnQN)_;0{%z!s70lO{w$9p5IzR;^+e2YDC-H~IrqMI2XN9) zI6rjrkhL3HIOrn8e5d(L-{N>|+8ec4`54Nj-I}v%3ZX5OmceTqY)m^m>5r8}Tz?4@ zMey00>->99g&fHGlIMC-xVY{}D=3Ag0u#Y)wWM z2B>p^+m>}$*J+Di4gb`_ba96cV>5>}T6b(qzI8Gz4@QwYb(7GINY~*o$8G_!`EE;$` z5X~FbIzFRsaa0G%**@_rHnRBEIm;g>{K@Q4>cMPcFL+#J=#_0cKw-8g>^|>)YlI>eo>&#$2J@X?`r=F1r4@8?5V4_x$=covj^qsb%`>|~zTWHy zYUwFnmbxFliW}(nk1(1ebnNJ|xURYLgn?u@oxb}}z`usjc!q!R%?aB|0Hz^&R{KCVPiW6xNA;L~`WD~wVNCfzhF z2ny#L1J$!&4ceu6o&Lgz%0Grr|I4zEo`b*EdQjf$m4zAK6ttecpahg;bJ2XFhAJX6 zqVeDc@wINV?v_vU6^kCz%q%6}VA^?v3D^-q!v8i03YJ|%gjO4|NvUmI&8>yIh!MZq)K`{Vk!(OA1MPyQ&&)=uZ0@zZfn(I76C*x?>XnsV#L$~pGh8bt04?>G3A(7 zx7Nn-@@tkjVE~qlK>8nD*Ff-h=ydr-QT~T`#%CeShm|zvZhG4i58($mBI*Y%S!*0a zFu`^oGi_p~37mOy_}hh26b|%GqZ6FmH`dq4Or9AZPv^vOw7)`^#Qz_!=fnCEr~6It z@z3HSf)J5WO`EiJeMqyV(h001WhLM5Lso_eNPJG#@)Q#%hSNXw3J4*%yo6KDfO4cp z6D!Q}!ednh|1Z50xCxuofPDTh;Ww!7v!6o2sOV?`|MmJSPpwa7*;0?lfb;5Y-v1+7 z(>g-T9lip2_k>m>RdOF_S zZ~vqE_hEcCR@;uqXoSJ?8>MZPdV9RjHoJA$Z4+&@d1Tc1gPzX?(P;IiV808CN4?o{ zY#WQ;!JBnODi-*N-Ou1PC3Yd)b}u(F#dRydc+{L=ME} zf4L89l35J)W^RW>C~|w-5p-8*+MYFE)jait9-=#Dk-P)AdUw97krd`el&&|@X$pEiHAJw_ONWwsy~PxNZ^qeDjr7~oan*09 z8@?lvPg_Dj;^He9?7z9;glT4b5-DRD5c%5AUg;~qY~SJvy32$BeVlGrf9^K!W2AtgEZX)w7knwWNf+P- zsGzXR4>z`*Sp3Q__G6?ly;tX*?<1>!98Na|qHyxGmV)7MJO>+XH}Lk(HygW|8{ZXJ1};!Iww0oWd^(i+m8_cCV8-w>#%iTC)ixi=3-{NX_Y83 zB;ky8a_jC?bGNUmGw_;u*##5nVJ|t`=wRa1Ste}KJR1L9#-?L$;k$HI%MoR7^^3`9 z?9xUyYiGGu2Z_YDSEN)B^}A=yE}b0P$fivzO*<~vh4hE;;|&3iIc6*CV>*RGh$?2e z-tZt-OHU2V9ElUfbL#{4$+czHv#O3KiEuP3lhHDz5~9u*WOe++&M0gwXa=9KEv=VH z-uH*6yz@B;7n5em4yKuEzM0#Rn8jGtWye^9Ap0ReZ&#p#D7k8J^@h4RCF5Qrw3E;d%ifg040mGd(&i?rmcArbnDV`^(3kJ5Tn#7&CWK z@Esc`1gj78W;`7T1fOFR82oqv_6tzE+x*#Mamz6;gSaakPeAs=z~Qz&$a>hX1v-z7 z_MUKZC)86JH)w!J?7g-`foK`@@=m<4{b=ocobl-v&Mss}o%=ka`Roy7Dw`8OGsy>b zlldoPK`#!SWRuRhC}$;PSPWlOsc`{7&5LK4UHi<*>vk3RVh(omV95%u0xyTj01E>< zlB}EM)FtvfQz#8=uiUzQ1}u%mf@Un5sozO{$$tELa9RJH^hQEtNY4lPs9Z;c~WoiDh@uhK2k$Ej_g&B*Yl*Mfa(l3wjp z1F!XVYy6@2-%@Wzac$QNI5SNy7!(@i^{cM4<2|i;i7kk1Z)@`E4ciVBD!KJzI@KEV zFpl-6_lpZ5o6(%PY4G$e=#*MY3zh14{hnhDlI1k8ERlCW!fP~bel%Y_qA|WxPbncD zNV(l5uz||o4H{sdAGrT^`czph+4BKur6X4-fJOI}LaSM=zVY!IBat?9S^XF~OK<@L z;C20axnCzmNTwjb$JUnw!X9}Iv;O&_zQu{zQkeu^&yK(B5)Qt2y8HRYS`?EFUl}77$3PrL*fL*Fo;lBR& zzV*BVDE(aplW4Uq$QoZo415B10Sc2|C;%`eNy+5Col|sXHgm)^oGVvnS_uZPZ@(PC z*-Mb+i38Nfl@=ffR}W}VG+NDa9FG48(phaA6SUr59wFaR2af;c+tGVH?j;)og8J30 zEU!3#DcpN|&MT$YUgHn=GTh|{ZoF8j9Z#dlV*a8p+7B;7f5PL1iu6b< zCXu_7zr;;z&gnmjj_1pg<6N_o0cknGL9;`E)O@9|fZ6uZeM-Me3;nxN-y38_K9mC2 z|3}tahE)}P`@(ccgVNp2CI#v4Mp|IgNQ0DggLH2iBoqXsL%Km4k?vG!kgj)n&b`k$ z@Be<$5Ad+|UTdy7$N1GixS?dW%p@eF1Ua2A3o8Q^PUnh=a9;$+jF~6g-CGY*x{Xve z$!1QJ32mh)YE>=wYLeyNz3z~by?M9TSIxto^8xudJy+i9;8#M4bZdIZHbp6`hD>h| z;?Ev>$#r}>>YJNAs+}YDg%ZPyPbGl1I~72-=*m~ir=Ae7eeQVBwCdSvY~PV>O11{e zX&ILj=`;2Cp(h_}H{W{pE>3Co;G%KqIOMM70Z;FC6u0c2T#kud4Qg$6u;rw*5;dh;-> zgL$~#_#QU6^~hu@WpL)+Thn5C4@wu~5T|52cNkz(ir8in+@3QD+7sgvQ^N3w(TEGJ zM$#7^oxc5MlcM#GiU->KM0j5nrLfb{FvV&o zsgHN9{#Z9EXI=C=gakdU+4PGpds;Y#HA#0LC0Su&M=E{8AwdI^C{5y!@5z-1Gw=g>UFxxHTemDHE~rnlB@M`^8#jL#M~&ft`SPZ&bN)&4hG z=Zo&xTe&D^&)dIt!NX;mMG*xIuA|RJWw!?6kmcYGpHa~f-l1ZXLz3Iw5VR2xyXnJN zvMh83*UolEIC^WHRt%UwKd7w{K zmn`h%WQ2_v<{5b^d^<(2DPw`~#xZBe%ox-%Vm=QSbKtRaRAsFIansMiiXe2uPc+gI z^QRSWT4&=++;+ZeF4FJe=mcSrig6*{d1fTYt0|5b{%QdwFEt4s%qhq6^fh{41%#d4 z_RH(9EL3fC!s|WU#?m&Du>uM#7qlmjL3w8?hwtGtUsQcN+xl5@#QSsSXJc;B4w{sA z%)31ClGJcGgq439mCjWDvl-sSevCb04!cJE%rsb)-~!g|q=R5<4>|#({*O{K#12p| zR+hcaeBeRsL!TuHc9H!;3A4#nm-4|7fDCB;zr_JK%z|Cg$$%BRU%i7>t1 zSo`jfI&34rC<%w8V&m)H|b@+Ue{Dw5EQi;rwGC#r&n6j1R63 z7Jl$ZgWpSI%JUAPrJ9-zSB_w@Uuvx6KYhPn+2rT3j14WMuo_O5Emg_f>`ij>a?)eM zZ+xBaJzb8&PpVJ9j71SjoA?)vTJZe@JE^JdH2XlxobySJO8%==B(H0Mf%oq<@X%hs zt7s5$z&OB2F3d&xI@h)d(}KiU5`KDH3ILfhBe+V#6$W&QIS;-%9-OtWHiHM2%4 zlo%h2;eabG7>Nu+~fMfZ}nM>%=BD?{GbW%6AQlQhf-ig9pwy^|Hs zIq^je3%uByR`N{#JbkTs=iqg@v@TkD_d&uB*Sly;nNeeiZ9x6^Ephfs$5|~g(&f49 zv2q`?Sc%_~veYt2jOyf$Z(e=KhGR!Bw0wzfaRO5t%NJj}^)t&u-kJoHu!z5oh<}F@ zV~6nC%AG`>;&;%;gVK?mgvYXa6epU6h{MqGL3HvjpWU>~s^7Wl$mizsqTH6kUN6{(` z9^oGnzK@dp*g0%kREivLU*7qp3$!v?LJ0PA6^ZB;k;60Ud*Sa&V2NUH&Rt0WYhE_D z9-g>p3s@*z1jzR%;_h8-^*Qu+f=ULL2SO#&bZI5Nb1zF;qj^gH^~R_2>!0!iQUP~6 z5=oWtA&_a$kg+u)+!Z(4;_t61u}|ON5mEF}Z<>uMNM8)10X?w7?d75UgVv08#}=s`rhg5-!UGzvPuc6bi0K9+XI`9q{`R=T@>7=I4@bVUVGbAH>)gE^ z9RD`ct|!2?=*T5pPlh!gQn`J;*ZtbzbRMC~J(U31W?AY0G~9y%yRAj7Pfno)Vll42 zg=mQhQARh{_PaFuhc=vCrd=j+47dGDo?562y-nn1=`RTt^n>HF4 zjzM(EO$w{(7IDQMoxMjt=c-r2EzF&2#vNk^M(F7hWum7)co$@jlZj9Z70&p%xIPbW znK)I{BJjkbW4gvEmbRy!$!^D8I%i@IS#8bufc90P6P`+;*XGN}_Z{WU8C^Cu1i{;m zg~k?4G9gKzo_{-@51W+qzH#cjDgi=up1`A6Sot4ra4*q9Bo>l=RAtuLiZ!BC!`aKE#X2Ft~m@5cGoG8fcM!+(_29hPJC!g)^7Ma56PYWBXuI%KFFSErUuOt5%H!hD|6Hx|^sYn1-i-zHT<7=j zE80lr5wv&Nv7cSx!l0l z`{K5j-k;SqajQXgbP5BGGWh_}ae3$C?U#a*F4ORS|7P@`b08+W!iOwYjha>q0c;Bt z^gglC@l@hYs*6KgQewmzg7`mLUU__6+`xBO2lY=4X`b1m0Cz6ij0O5|Fa7~}BsOIt zBNrc}#o(FJ-MAuJ_fG~{rob{8 zkn4INQRk|8`~!0emp7I&L$E|g02PEejHy6@kcHafgZdB!Jeo7OX2 zF%KM*oj_)tik@Qgy`SiTX-8SqOq>Ve`A;^Q$b&aucW3WYYRy3M$}%XHM4Uqt_9C%a z{1}mEOz^;Yp?(|&dqJ3Uo%T-=G9qru4!G)65W7B{`n!%`v|{q=4GHgX9}DmH!{dtT z%(ry6v%asZ+J8F0v&)eVN_~+at|iqW>ZTD9M@+bGxS!AqAE3MYj^ zzBQQ5YzCO9)V*xJj8AWWqGuCYo_@TU{vxku;prR0XFH?w{k1or!`=t-R1f-K-0SlR z@wl>eOnbxoIn%%)`>$T9n+!_GE( zb!z*>-48?~vY}8U&yBjb;Re^SJl!a|3c(1n#UQ~jnNH8CDEB+doQYREr;2&!YeQ)~ zG%^IB3j3<~&DZeqaKTDN^Ge_5`sWw1jQ!D8#fV!{{(QA}!-fSh-I!h*AuT+v_ry2v z_w*Q>l+jsC%5yBovU;!Cw-dkocthZ(^I77--e8+XV!Q#&-5omm>h=BTjj-iStt@D} zpMCMCvpkiy`hIt>+{=G^xYVfd)X_fr@_mMgL0#3&7mHdiQ~^YsIpTU#S?U%%v&a4e z`^6d8TFWVsb4n2oE&)5}?n+qp1zT_Ove(P_J+EQjT;4$P?8uV$S`hZt;Cx8=)laqz zvH`2K8$o{#0L<8=_`GpblqtqJ!+w4BA{YY!p06Fkrbog%gGs`pv^#20EYXdMRi~i> z6h!)cBAfWn%8TTo$)}m6qTA znxHCmBcb6jN=fDnv89K864^4sP<9I7z-d*<7f{?C&7!eWWYB1zLhfnQZ zW@AApgNNV!LD2bVK5H%<7i8NC)fLNN-=(N9F3HjO-O{S>`(Zv>5oHl)XDW~NpY$1YIx1ncu{@GN8$eRT5CTozU7Mgs@-MWR;hhe z0C@~Z$^1(Ha7)Xh-J*KbfhO7!(V<_>S;=z%3$v@Pt5U1{VZK#paoZ`@uQ8u4kPI~&D@seTql}AC?obH-`}>FT z7u`w+>uUgI$lIq7BLj4^eA#V;qo;WyM`&xm7W^3jXC(mnsXnhj=x|&4&#OKU{){Gr z*?ZQDBR&KcV&!GA=iy?9=6Dvr@<1$!Mc|t2Acd}SNIC;37CK(7=1MT?!jh3e_i#G| zE?ENYjR*#b4g)vj(mQMG01d&n71=gisI`juiI3;Ogp1d~t@G?%yvEX7(f{H~{x5j| zR#-A#6mbOdAU{uIk!z6TzeC!u`2vj5IN~yD9DU0;^aoty-P&n`{hShL_~`Yjj2UBh zc0%Mz-n4r11l)TYt-Us`wTylM-_71gr`k;N?hc6R-@TSv1nWaP&u5o}paW7@te7$& zr{49|dg`NW>aMvpL>v*b5oT{da{0}gDGeAcW}VvoKMvEx zi@f_aNwe0R;~B<&8ArF$jXKQo&lloodJ3k?<4i~++up(3{KMga1S6)X4A2u7>FJ_HOz-M&3zjpQQDAWG57~nD+nIyT? z?Sn?WZhN}fuQ{;!eZd_2>THC|$eJN{7+-14HnjZp7waz+Vt)DvHw%^%c`I){viR&a z_v1g?|K~d@@m7-BoSz-;pdR(wr#cZw24*Pq{(S7^&fRfmhE2ZzY=l@T`QCeZ2x*wC z8uLbI2s-g7{$jqU@8$J3nO64&tBi&7<##`B4qE$Th?;iac6`XAAZD)@p9F9qi{nyb z!`=07zq4Vc^rTZBlgomeui%844PyuKQFS`fn#v8FFyc&uX)8;V6ja z8+ihbi$SohbIBRVuU}I|o;CU07IGwFe%@WT(VBvmL%739Aj~A2M&=ZJ)?MDK+aH#? z@oa}Ht#fn5wo@NF{D~>=Evd<`AJFBHzX6o8at-6|PcOo=|F{kRu4$>SmLo_*@9|Ej zOpBq3%rWK?`JAnuB1|L#GWK@4xhCh82dk3*9v6NIBwid%J@$tfDW0DKT4$^?*7y~- zrpn&s+u&alc!Gj*3}Abz5?r6X8AKBK9R9x-GFZL=Nf^Y z$%=;IOfM3fBBZAkz&<0@I$upzB1Ec8J3A_4O5s0wpylJ+g1RTYg*wy14}C1b{e(-q zGM?A$7Z0yDB$ud8fM^v)pEA>5kCnnT_)Cx@d0)NhKR!TLc#uRNgk4?y zp~*af9*%+W%8_CxAG6~(%EalCTSP=oTI`}RGh`nHh=ef zgnF;rz&hT0yD#@YR+0bOxSCyXoB4#e(w5mwHeZU7g;6ldoVZVjGCg`2`|GLw+T5>d z9e>L;X=Y{;zKe+JCq*s|wpO74fKJSEiwc}`EH*zbc*&4tB_6G`8dc(8U&3-vTjdid zH>EgQZ(*N*mcLd8oes1pI*AVD?DFvPcc}eufJUa8A#NLx@YyCNEVy3$9<;NX_u3Xc z`mVKf2{x1JW{X=Qb~~H5t=8?7veX@D#^$?~U#h+t0$<|m)E1_Whc^m`lQa2Z57{{a zNvu!&lB%h+7{UK+Lt;yMnKni&d`MB>ZkL=t&8+eBHFI7YCjEVA9%8=^8SnX5u|a66 zyS-TDvp{0Y}`o-1hi(6`g1D# zP2zGIw>IQUj z9VRX>hebsji9?mDlt#&IJy3%Dq_~@nA!(m`c4Rn%NB5k5are*9FIN2@(srF&Qdx{7 zNk!Mdo@1lhThyq~-iZt0dMVWbjGDvgV`Pqyk6I;4!vHz8_ji)6fhe~B`jysC1b=*G zF`Qg-dFQ&9>!lcH*swD-Sibm<^7L@2Gna?`G3wW`AYI~f%#jUg)RLv8T+5|d)#;D2 z7&Neum-(tWsFhg%U=3;nc88(kna(G;g{v_!y9YzxGX38*K_1%n8-HH272EHKU#+ zIibV5HJgMZ2r3BYdppDYadg96PA-8JfrSj7mRZ1gRm$nx4WZ8`c~b(F?b%d+$(mS7 z;YX8|)}ivWkzzoCcO+id@cjV!eeh==0feF~7e~tB?0LYNMkz!V#1#HUuA+Cx=V+?3 zj{WJLcV9gQr#hK7Q3s^tG>xOdOdF+wt5$A_^Y@ML-~BPeRMz==g_J8r#)be$`2_KR z3~N==;T(e>Sc8w5Sjo7G9m1!rtd1pU#pFn5dlSw*y@8zlF)xy`)J0gcU;1adp->Gb zJ)ZPlX?^wdxH(T)i$$MewbI>;GF2)c_X>j(;kEW-MZCRCToqkYQ|!gDs{q|``X_sx z^q6fbGJOyZkZATpDLM)c&;KSsME`TpLx985Hz7P<@wL~ra_GY1m@BOsc+}6-L*F-S zuZpx-9l(;J-=g{ynwQU2qV=J|f5gWBNU;`Y$X`BG(V|SRJnL}!o7H^j-irPRsFRt( zrjBRIwEN<c*xdo|7A&v{ecat}(MyFQc0`@g&g1Y?dDe4`%AEz9d{yJg z`j@iQ*l4%~`wep@1gj0Ca=-X(x_JmvSzSZF$%I8%0nUua>&W;n(oTw+)`4a^8NkxC zlN;3;csRp=~h4{!jRjo&SLN=?ftr<#I z6)3#QrPgZ(DO3KHAqm(5r)A**4;Mr%M&GId5vZM+Hshwf>fuh=prGIW%G*P z*06&Y@8H9Wjer9(SF-rE{D9lCXpvf}>YrqwQ%KeJ1MEYs%SI2g)v)L)b7*uImCO&P z^SAD0XWlPJ1m4*q#cMa_buxV(1<9PH&eDp zHPZ#HyN~BA0C3WW$7_7F*$bD3LoH8)1}m@93YMl<461QF9#)D7S{FhvhIZsTjJqhV z?CB{?LMd8yc)S`n!UarS?AR9yxNlBu7n}YJmiO#z%YQc(h-)Vr+(ih!K&kC#^^HIn zA%!U}g)S0DA){&aLwoJ40w0o#6hri%Xo77{h0MQ2+@b5FXmv1Lw}-yO53DpRpROBi zQ=@0D4EG6H#Dt*z`3XQ5qCTh}ZS;^rsDmc=OEOH78yyYluZ<4Xk`Bb!)r?$0v2J*X zo)ry_dqc+s^jklQ51>^m+y1zc1Z94(|YWlxpJBwTlR2R^8(WNG5qS5Dfe}5Cq?D_!e zw$| ze3Vx*f$1;YL?GakD;x9O!?SUj%{PM?kRDYZpD($-%>P$={VQ75make(I|CRI@0j@or z1dGsbuv9(Yi70!b}~{wl@ka&eC~Oj!yP7J`G} zo`}CafG)slcQg}Rk}#XMzL&R~&e@lHp-%N}AG{;=Y9sJbNN_W}&UG_UP%wKD! zD4lDjE+?wde17wh)-`W9*3E^*<`8+Gx>2d-I{~}D;8oJuJ(PB;_K9(^GqAIL05sRE z>sI}q%VSJ1u3RzyS7ucDhFxLY$Llu9&;S1U@--{`i8Mv!V!v4M&BvTjp%&#m&*`tG zrmd6;scHl@DY7wy27~+GJodkXs?kQ8JyJP4p(~ny3$&$KQF^%V^&p#Q;~{pxd@m1J zKv9mqNHb`cpezXib6*N7LxZ?Wl3!qw2IGY+glR3$V4{m=o8Ez<4E>MeuVJSA=dceY z${CA_SfxRcV#G;Jp|ZC?yn@q-{-Z0OuDBcbr6>r~>u}e+1g*Y)-0)_93^Ga?;M52w zLEqy#Uk)O?Lm#iVa7Kn(E#M~l_5-w0cca&?mfdm0{llJn?|e_|hh0Md|N7@FiNW2%B=%*3SxIC1|hDq?PW?3q(efj3JE^OQn< zL(5zJ(^o6!R~+~QX{i?8Be3pMm0DS&U~62)R2w>c^mBAV3k7-Te+j5?)7PWKt^Y#o z9^KvrtI!v@ejgo4VT|Q>-~1p$#whghI#j7Tlq;srBIL{gqVcbi3IjdCSJ_{^26i0bKtn5~P?;KK^+<3{b7LvrHA5 zFyYCnN(2~{y(fa2;QL&mqT55&1|tJ*$dn}S?{{;k`;V`1gp~Yd(5k~VZ(nsFsyW(#nw`mM3vZ7c186^gY z%UdQ-F2?KZE?B_dDR~~}Rgn=v--J4xl!>~D;$5rNH=>L(eD9?rFr#|}TeoT)(fNXB ztHCih>-W8Eb!9#=;^U45reB5k<9)OjCOYc`gS89rNS(Z1X!ZKqF6RuvVsxomG8(8; zGf;^YoI(*ojrr)@`7rB+-4yB)bcKczdyYaEhPfUsf&81=;wl02SCx3FFG0mqs{Z45 zwo1#*Yi=pa&Quu;qVW_|3~^@Ljg%~NU&;hZ(YLCVCkry}BvIa&7Rzy}#=#n-@Hk#wX__~xvsO*b2GH-&zQ{U{U7@_NwMg5%_00|kq?3x1fwK0dEtzzS@Yo^ zzJxAVfX8v*djF-mWUn_E3GG}8A1Oi!f{p}}FdU#=6IRPJMbM-U-xPIw>l20p(Q`WO z_JA-FJHJF3PYBDpBD4?^ujqZ+mIfce`)cF(;BZ?)}@>27a8nQT#<&fFQ zcc(BObevfA**0Kr{3`PXN6D0K5tE9o6#sD+F#t}jl_Ml z(tLaR<$Ar)n4bvgIQpCr5Q*_wt6wIK57W>buNiD!N`zclT(8UXh@La*K1DC!OxNjlIj!E0XQ^T>(;-gbzA6mN@-YY zr@u~~tAn1xPU-dtu?f*Hpn3gFj=hw;+fR^ot2ATXICndmoK+<@zRlx-0h~1V)nC#J zI2g?#Y`9wQcDQ2z3qljf5~e7*=X7wV5enG_*OdtUwPFyb-hzi>MSy5ndEPi%3JpBIwrYg=l2p zh{bqbVHZk8*g<(LlduSabYAL|Yk!Kjf^%u!>7|TH#O}5|7wZXv1e-7Ie+H7E%dPW` zNUB9{YQ>$Nx?)#U=SXv7A!xC%A?KDI!jWI+ zlrCcsS-&o`oNHLp!=nAPq8>XjR2OvQHiy6P0#01rM`6M?4p##|l^u=hzbg}RDR5`> zlea=2h&-XGsiE$jVtedFYkh_RA_XpGqKm>9Ce&*j;P(Q6Tlwvo7xh}|iVnRm_E=V6FFNzTQV z!}xMR^^3Uz=1nnUt$GcAw!|v^=&_y;z-_LcsraJD;D;HFw%cOOe+?XG$F0OjM=_B~ zQJ{GFZF_(E;lm_?zDBI;ez^!DezON(CO2^O&~k0~hF4>4kgoBEQLzM<>%Ck{ znD|)w6t&-+5(kCW>_*8xBM=BwafmnP!I0+J8M0s9`1!#lM{$xHlij*)J8kw>p5fH( za#f)F(x(eSvjADQ;C(7`IoapYrMyTY0zyBUYhnp@6$48-QjplzkP0Cf3s++=HJITw zFfX5P^KrM|`vdq28%BFZlY>B?fSS;7sO$xH_sMxj7kk(9UX89UiJ3y2Mum^R5XTS~ zTbuh1A{Qb_E_rS)k@B9*ZlTX1rxr2v-|hHF5bylRA?}u#GhRLr{er^GFcvQ=s(NOH zj2pObID}eUgaT?tn?mg?R_2`mw>=z;IHz;%SwoRS9%DI1jD1eU90rdjmnGoHs?gLH zmX6v(6&y|$%TM)^E8NYw?Lzy97}aHiy7$tSckjX!QmG(eH}m2x$>%4$A=&9 zaWLilRtOe*{jxHtQEQ)IJnM_!A~BGhkL@bYv|1F3zBW2vti&jg z8EGY;?&7nsMk^#ix{U=D34{o#=^I3{k8lRaS@8S%kf#PH_Cnz_2f0rnEGB1f>DqUK zFMUd33RCml5F}dXC&(NGe%zryt~-CqY$LdgiGVXqfM6iRb<4T;9P_+C42#e#RLHQp z|C#>D5uU*Wbc43ar-nK+E#tn%tR}KP2B(5<7y0XN8|JHl231ERVnHdSe8D6UO`pvV zdyoFUDGc~fhY7lv=P(PtKO&|I!9JJE!n=O2&7Z+z60YtoaIbMb({nfvB?5MzE?@SV zD|F)Ci{2TU&wUOA5#0tv-V>3Dh<;I}VT_9nr?ZN&{^K=I%f_k158+=D4vm^*{$8a- z>9LbQm;qOXLuxYYIwrR%L8*t$Gg(D#*nB8I zw7(wNgFFlgrJ-4zhDKH?oHTCIl{-9vu0xnrhEbu}<*_a7l}HB;gFs_j@#l3PZ{pSr z*ONS^nyZ!@Bs>X(3DgnAIT$sqCVpMuQwMR86TN^KOQAR;>&&Kt0rX>%m{1%w0ZA#o ziFr??F~z9NeM7Kd)0v3WV`(G=z7Ri!;h4sA=OXS|{@&t~sOy%C|F_fArNh>6*q2^d zV)-}M-f9;LSa^4JGwz1f4KwhWW^(NJP_;zN5&l+a*2QI9{xSjLI4xx9IQXjW9v zGYnF`-+rCAYIjD}eLHPxCp%V?b~r_cqYZ5#^m#;FH-Rq^&=>CgVeN;#?GvpZBA#~( zBdDxeHQswleqJV??5nljHHsBy-4`G2Nj{_Sw20nMKXzz5ScvuX07wJD&u=s{9h_JV z=H{~&wQk#SQ9Iuufv%VsIHuGQ$V+a7wCu$>ELa58jsEd8ZsVq(nYmTFcPH#ox--Rcqf?2w2^qLg6- z;kDw_EM!vCf?BFEgq}4uA&vGKlL$WMlqp*8VWIjWJeR`f1{&w^XfoqCQL>%|ubh%2 z;LW7mw+WAryc~@3oa=Mg(~U#q{%;mQEd&}fV%{y(P_%k!ZMlnx(i<|25hjj9z$u0L z(RBkh;v1&TNUH6SpR|fhM<@wP?#}6|5Zd(L7TpkygvfTsqJ-dWyp$1*N?_f!f%tOH zqk6Pq$Vd9BTn%`pD?PRT-p!6BycvA%>hWnz)N^{enA`=t#mDsSqA@Oc?yJazyGKUr z6{vvD!P{w%9^r4^)>c2Qk-o;ddEV2g_lCbqms zUoioxW4dQ#WtbRi<7CclEQJ}F^oK1kVW>r!yB#&n5et}du)B@rqmJN5vC#P71jY63 zyhsfs#i}YYb>rDLRjTIB26cEj(OpWoc+lMb@TV3u56q$--=JegMkl|go}Iwmz{jxr zjs>)fK*)rfABzEqfJa9Yu%%aX!rS;Gi0Xfp&4Cv-$Gr5|dJJMQL6t5rvWHSUPrLD#wz$%+Bctl*`CKT?cuths4)Ah_~7B_Ve)9aw>B`AGIm- z69yk!Qz(r%FynTC>cDB-sXo;bR(>QyX?$rWMm;>#k4Bh*K)$jG#wUd>y);b`V2z56dbGwH!$KS34eNdlKpo! zjimYKOc=fR`N6kMP&X(|j}*#}7x!=U(ONPPcYE6*tBv&bn3i1ol?3_zggnEtB1r7h zw7HKrd(LYzFMeBK?OjhI)n3a_|3Ql*CP>IWnYyo;mlB3A>fQ`OU*XX%B4MDXp#8ja3wPwsK}_ zW^y^@lvISb?mftVPARn`y%spka?s%rFqzN_s+s}=EDRCp5CS0NW>MeB<;~G5F_M`H zSW zcXwKIlzd9J)_cn!jJt3YZ-RV{D<=qgmLx{dhH#cQ^3!XgdMScO9O=*C#oo#{F}$#o zJ+yr8)l7}})>l;=2q?4{G%Bv!f0{Yet{|TJmM4#{58b1K##Ch<_6-=jzPM$5O=uh< zu>xzTqs`0~oVzB{#kN6Z47X$jZEFkK6-zIaiFWL*H6li(dmlLe z>XpwQFCn)BrYqioL;owwMhvM>w{bOYtm0_Baer}H#1{#P@CKi1ljOR>=&YVKc2TRN zJ^1AIwm)o6PH7 z9`ss`4Z9Mbzv$OJf>=6XT(&1vCTPU(jb*DxBxEsrSqs`1;wfbpS4cmvQ=>59RpiW| z-?5Vx%ZPbD%zKC?iKoJn!g?lqm!;8`aRoQj zhrAN`(I$`(m|%2BQ%E9G_Bh%yooYjoziANOjN|aayUUoyPdXGFo-?8{`tj`k@M|*5 zdU&X?8xhRK9lz*L?+`*o=#ela9i@SW2&(0ge$C|X!Jb7JuMB>itj16Mu-~T?wKh)l z0!q1&9m*|OpFkhN!lu(~Ss|3bQ{1UprQ~a(@T-cZpNCyUp$$!;#5*c;bY0LVF}joSV+tbhP2;0_ z5EDL{iCkmIus1^W+b{(g|H{0VIp5@Scyy#VCsm=|`NNb>9qqvoo6xx*B&_b{#Elr~ zs%$6Ljc%o{RiQ|nMMczBgb3FuynF_Efs2aqs-6HN=_YkY+7HCD|` zOLfKZNCnc3L_f5?zR%5?K8QulJbd)`{W9#6x#G$y7ifZ8eun1dL`yX=zWka7tSi_1 za84Pe8qAn0>y{{mRh|_fLk5I0Ct87I9};S-S#i+yi&TLYM;9~Q|FMz>Z%b3ZhOiH# z=I0sw^Cn;*T%+R-82m4fJJ2@7(Af*VBK;3#(?5ybI9&neh!6Wa_5aXyXduyIg4;!V z5b?|9e-vu}C%ScA1UTk%2Boh4pPx|o;DRK^9;Ya5`Tyq+f=g-kj9V1?{)e1JqL%gq zs5a>LZ!BKjfD~o?9~;#a+7S#6lWzzA_pgQ#!SnP36xV=|%Y@xhl4+w->x{mL3}D97&8`+KTE$U-(Uk`P88t|o_u0&o@r@xs=zO@$ zK3Hux8i=Q01K?&7G6tc-w|Bo0E>D46R1tsxmYaRi)4*B@F!T@0KrV=&ja&8?=&_cz zzXQy?5k1}K2sE`+mm93T_?}T0NUpMQioN$Y=Q{u;bpx`yo2#|Jt;O)!1UMu>;JyNc zQv&dEbSqiVnkl?OlBKpp`loaP%y0^D(Mjs-Ff)iEtOtPpPXXwWcV>;8Rd3;Cu1SEE ziSBi|PLk`^z=DrJPo%FT78e+5r1eFY)0odLy*4F8)-B^ zdY*+!=}rCGSWk-69FVbRzlsuA>ymMCQ$8Pp=RwPBJ(>tsxl)-{$$`PtIxutU&we${ zH_v>5Y6=n&c0d|JlZtZt`-`x(4Qea>p5XFJ5*s9!VAM@Qm9zsN;Gw=DBz@YU_Xko4olL)h>p@TcE+>NMn8dp&lCHF9L=y)B$=+ z`u6IW3uebGi;bXAlJ9#YGz{o;{@^Sj_tKxuGUI3|0rZ0NFM-|Ww|a$5ED-{Di|R^E zU%1S$%CvR9n$zc2N1NPkf9A;^WpqgKL1u&*LbdQkPTS+Q3WLP-LfN|H4h?IuLQnPSWX?xY@ ze&d_hF(;tYqn9Nar-mSOA;H>8BxqeQzq`Yxj&9mi>Fn&R&ww|bJ?1i=1G52|6MqIs z%b5XrtQJ746Q!QQm&hQW!E+3LA$ zwogVei&h#nbxd*!_rKxFl0YK6CVev`^UFLACEYC#F3e?BqaQ82T1-Lp-I61afLv|R zf@cz5aY@F5u;zCj_hu+!S%R%pA4!A6z4IZuAC*xk{Mm~$@#4-D;^09_Ulriq&g!#K zUHwtxeZBv+vqDk&EM8`Uk_-VBiPz(GzB^X)`sdaMi9TA~;be?gkeKdrkO$WX^;YOZ$^#HF_UEEoJCp54b5jR=+%$>G27O9Cm z0XJ&!Z`?{GBs8nj<6Hw_+gGoL#Y?T)JXf6`%b0)qVN(gLold`9@ zPhq?(A4Sot%Jse;mCKH%lhYoNLaUqiD1<$qli#B!<$@z8io~87vE_GZb_JI#vqyv4 zHZ|_>@z_s2Q^#;VSJz3CR?kXR+l;K;??XO@@94q#hf9StgF+|v*)BUu)(jkB)Ae@R zUmPWPYwYJZvn!&*55WPY?WolTnDm8xv}0t5{i^>R{&&0|P(!A}0s(tNcVL^I;JW2*QQ z>j5#eBgrCO&pp0&J~GaDvFkq{@-R@qIF7X`j7RCRaHEaQXse4dK%_0D2HQBN>%frd z{MEN}5!e*0+gGPrDpyhOYgRB+juShgyD9rs{Dd|a$IiK1WG48U-C*qM*sM> z3HS$>i6Q^ZJmfz~7sT4_;OJelF04PjOlVoFY+7Xn&nfkAscC6174uR_F;b%XYQUH4 zVJ}!CpW=w&p$lfi<3j=ak+a)3W@Ss1*{h&3c=!E$dzbr+Mo5Zn3+1xvSWaJPFds&lX~8a%AtwIJ5x}J zgwVP?+LP9QpakU{3IYZZXFVojX88liX4R9pyZ~Z}#e}DS<0f*Q3?z~g zjG6TXmQ=)ZO(mJor@%nH<8PNMhrFx2YkN0-*^FkY&6x!BwP#8_j(ej5 zS_;RX77#R468Q8u;2eJUkh4TrHkaN&Mk@gME#dGu-_|nLEhSo{F<}mD2_eN~BmTOKO2AL7@~;$~bf~$*3#1r|0I!y~~E1nMoCRn-ng09O3Rb*`r_s z`3?*%D(eV~5rwxmd-1X5UNn9Lirp%p zC{~L4jDm;|nt+QEjB@NyaLGLu#w}$L(}lWu^-sF+!vr`n9Nlj#|G6O~B#>t4*{S*7 z)2!f{Nrol*8JTNZA=-;WSkfWxQubW~DYnLfpS4l5^w-hlxPfaQ)a1> zOhk+Dz=#aTkQE7C9jx6W>9g-~A(Uo&R$^1z-IztuG+#uzF}8lwwBZe&mHuCjA!4M&6G^8!R)J#}=R6LEQ2p@fR1h9CMe(Bu zc_S#$?2FGZ2-zW42UG$b3h>+w(4N@#f*Ztsr#3S*wlXKrV zKF9GsWcK{IKKsV3$Xecf_RS*^0-}k@_1nJ%;4?}u4iffc09UW1&TlKVjqaf5+pk`- z)lLJOtUmZWOpkS*Ie9lo-}Ebx8^Zar@8*9&mCX5MG+tZ>b&A!a5()EJk$SJ)2r94o zxQV9X4w6p8>mj>$ax7Rz9tbRZlJs|-^Q86!5J$@hV!6QGw4FMyj#O0*dp>W-Ip6J7 z?Q-Lqdr-roF1q)z?RUJ~0ADXdt|r)-ms19i^~wP&rTsLr^AYfFX2r4CPk!$5 zI8e5j68Gg`k7=QBmLWL$V1|`k1W3OOdcT_Gg^cYFD9~yC-Ye>_-=}ZiIv&egFL0C{ zKVwbw69g_!8T}rupG;r2(i}^lAHu~jkp?w32Fs@-3(82T2yUF@`4EJ*n)sTVGg|## zr*k8LXrB#Ny7Y~8wU)AWIPg>3^^GNmCMYP7-hbEW(!3Wv_G>}CH_3uGLR;iKq0LM@ zLJ*q@pVHqB2O2lFf92J^M0257Nsx4?2VCq2bI�DkPx@tX;MD&J1DpQmy8@fK`^4 zo8Xhn#;+(y+h?RN!Dn9@sW9=~I679*B}AN?x5vWhd%D4$G{EKZ2`6Mh&GVkFH6ZBj z=eX3&UwLz)X)$E5-~nPN><4Sqq39bJrTp4cG#qg9*t0yA;ybCFg-p2N!6G!x7Zq?N zuZ-Q?v%F*3X(bogw!Y|ensU>12zbkPBcFoD-*>jEnX*C_4{uIi+;r^?PQnmmM&OSA z!#Q#RP}M3qX#}ddgIk_iwUdyqnn*Zj66LscLh`!Z4S}< z>A^6F6l-!4-Q)bhif*_$Rx9-1+f4C#wT-^ks4~`QvN$O!K#GOQGf{Fm zL;|-r1kzctkaS;Bndan2SCPhv9GBKZeOD0h7*bO=ByBTPN^WAsY0Bs+rJf-hQfqqi znxbyE)xwP-c=~-h!dvzYzI#XTl7x5(eA69q$$kJ+8{_aF zYv@0_@#_vKKHcflygO{0UM=tDIci|&%KFAMV*gGeV#8Bs9P{BQIZj?fD+45Q-f3jROqHIzy{3dnp;R;PgUrl_D@>s+LG#C9Je-6(CY zF5U(R1v-??`aoIkx`uhy;p(@U|cjJpCqZTJ2LX^9lwFKLvOP8c=+>UG&3`4j^Q3CX%Pr%-A1QqLJZw*KPYL(y=CMR89l@=; zS*Ns(uI@)IE-z%Su<-(VD}%DzH0Gr6_q;s?_6P0W+gHETRHXr&@qbSZc}VOz1{DPa z1)uP4ZHE;Ki-XR%Lh0e%r=GzFiP8;SLxBA_s=cR4x(iV`GRC z-22{;TGlQXs+gxDa?d0m%tB*$5F8`s@j@Mfv%dlh#d08>DQLA#iGhHY$1ku!F4 z2p{LP1ee32omZV?afOhY;_ygxw1qDdfKr=?KuRFXwYpoozIVfBCc1f2K`xbYX`CPXV)7c zXZGWVGG_t?*J-WP3S1&x6ScgBAL}&j)3U{z<5}^(ZR0>k1r38k%5o7MzQQc<=N;G& z+-3v(F+#=k&F=suA23{yyNycoJOpjDd+spWgfgDs;{vNS4YbA}3PzQ;P%d0`bZ{-; zy{!&%X+n)y{7l7@LtWgUiKD8uD=))KQbxKlktuV;Tc|(RBR(RCQfNlG$|VDTUw@76&}SBi(+gTb*wuMGp*JbIbjgI2rHNu8?f$4JGH5;*3m{SL*nkuQxVQh+q#9{!X}|GveNiJB+dP zDG!?d^lv@QJ3eOAseNudU-wiV8h*Ro)Fu=NpkSv zSFY0vB~$JiCp~rI1r4AFDj0U{obiiKOONo zh@qlB;hXL(yhNCHg5ok@Tb+t9{=)6C!l}>kht~a6^eR))wo<*w86gwLpmTlgusL8Nndb2vSnnev0y z0Hq*cqom{%5v%y$6>fw(tXaqC@VM`Z@!do}ZT)<6%Z|mWWy<`sxQ(XA76E_uL|WOe zqKJ=I#C(t1bw*yO^h+HtPy5>cdHbS=;QlTCp8a{#!%ZDWuZQI>a;4Qkq+yYJi4ABW zNT%!mDsfU=T(cyu=D#;5Xus5Qx9mYh243m)1KB^Rff8$=;*qHr6K~6SzsMgeW+S{h zw*O7tqY>XHlxoCeUl}l}eE$j^Ycf)-&TNzLiFlwa;=ktpYps`&ZY2dEM6l725v)#L zc4c1RnDY|MJ8%H*n5M(R$LR!X|&l~*+b7>q}AOBx-Y7J8xubu z$HMZ|lI2|kaD+HNKOB!*;qH(->W_Bz!7z1C)HyL;ONC@k0SP>*NRT<4M_NzUAyl%$ z^(h5y1|$|<%w@~$00sFB8P+{J6_ofn%x&6etx@r?GmScwKsme*dfQYn=vwom_NK&k zKIO9o4qsMX?UW$vG5Oc=K^+b+9tHmgbN1JRHP`+5_WQ5Z$4@U@twwm?e0m9ZV1_Zo#dNJ9oq+aL5z z;GG(}y}H8#dsiTUJ=b?_j`uoJjnYzQjB03}9Z?})FkBynU=KkT3V0(KQxbQ)GJZT%6<`npe)RHfy-!llbwJ=Ek=5Zy-il{nEyL zg7X&uW5MtN%r_`>p>Xlw@mX8T@ACuMNK*c36tm6x=3kr(OBNvH48*C{ zY5!YMF(0_}t$XlHN}NvRUs?ET&kP6Ck4FDxO9qzLH60XDj+Vu_57$*>(A0h|LPIU; zJPLvi|2&lNIae^9okN}5gj!vC4h3#VlH!3{1c-j;k ztX%H=vNFDr)I}Iw`Gk9^GZOboUjgZ3HP?RTn=R!TQRg;glspz%r+7K5@oaW2=9AJf zE#r9=0bj6o;^&=ZI_7W|95ZRrget(8&U|sf?*H(5cmDcww$bOgDGiytUWQGxwOZi# zSUL1a_6u}3sp|6Nb=L$Wmbjd>JK^QxTJo;+a;I_H);R&`o~db7crh>?PdB-L()GZf z^_}tcVUI#`J?Sm8XwIgEW4`?i)9?4)o3r2j8$SNy2nE)QIF*5Ubp{C6k-%7iENmbi z&8={kvMH9V5Ug&iHKg;5XO{YhZon+oYGF496xo-L>y@v z-|*JoSzA1wnh~?uS_a14d3)ntIPUKXF~WWBwU5WSh*{>P*v2svtW@x2T>`GCR6w~NM_}(wf zgEo829KxiBgns9v=`)C|qTJ|{ck!zKfXc6QKsMyB+yHb=sn2&B8Nsa>7C~S29;~-e z5}UgGXmkp;m~UYNrla?2lRGS%`chTTFBcA zS4)wl#+&(jOav1O)W-zaC9`h6lNtF@pNro*{IjArMP z0abFBI2o0LxJV}ZX^)4qHyTs%-!8R(|76F{l~7~z^@j$mgHqHyJ^xLX^xKWw1mdrT z|3wbNLT-aMO6<+jzxcY!9CH=1Wi1|7di~K&W2YbF*rrR@#)Sg|;icKvS%0u|eLv%O zbS>cg>$z8G_E;`;u&xw`E}!_7nrm|VKA`CWKqqMd`6*znMxM5K)9GW03zK>QQ8j61 zaQBVQEgJTdH-(j7fN`#P^}gObnJ3-FmLlcX)cBGypEHwGU0UsELwQ7p-EE(DWA7um z{@hQ-cRLkSy4E-ko%TAU?IcEh7y0t;;XSKDB6TN(M~OZ9(&K#wO~GaX4%|}Rg)Aw#G>$X`wjtz^J(dor~`tf|@?94UN$uVF+ zoOoY!tjaAeKwhaf>;3#tLNZ1f{jxBCKqlGXHLhS&5O8jSA$X6;eX%RsFz*aq38~mQ z=`y#CJYMBZG5B~(M)$>&FRvWlANHASuyAJY$^1ztP0qhq*E;-m-ts4dNq*+9^ox$U zSRO;2fHC(j!!|D`bmhH>GfVmQ`+FgRJ13FNUYpy3oEdAp+6I5vwnh$n3)62Vt2~so zt4~|Y{2~#r>UIs~UpznZnw+H!_XEhqu*2KaD{wkU^7GAV)MRw{HKrIXFWU*E*{;zH zanwESS+WA6e2Jp~;U!6rtb<0=DqQzT6vE;6=CA<1vYk#KJg}Oz(ZWlwTziuq(dFVd zCxjx8-h`4<1+0cs;APk%0K7o9{Ng43(N&g}o4Dx{M9p**E7&L!!TycIcM*+rc@9h&;%+-RSviYj+h(AtE-Mgm>8_Byt@e+Ww zvL31?P|%h;d7?tjoq)llfSu4R&wu@lATidz>#4yh2hun|V=%i34A#Z>`6mNJ(>T~_ zWrS`Pur~Y)D(a}N*U6{&(GE5}cy&2M!uN9UcM(+hXL0xOFcsje!1uygd26Oj^hUhV zgE;*aJXxC~DLFMfd&iTj0R-)Jk1*~9>;lvx3e0f|zuCc(Iikc}oLU(^#(DGJe1z3RC1vSAi`Lb~( zWPy(t|CWn}ZsLh~Epr_zEB0Bhb^tKS7NAh$lkM^Qy}UXO%5piGXb5ZNTuaalAw4HW zW%1Zp~m2 zQKT*98a3fP_+i^vZ^ePpTuAwd3=pmU6;1E^R6EDoqj&@3{Wevz-aa9EAa63O@f)gT zY0$@Ix#g$-g&Dv_{CVB4%|Ul@x0k-F8xqHBv=fApK$pIp^?2hlGx>bADn&D0i~R65 zzB)D8iyFEiz*7nN1%lpgHE#ZR2E1bQ5zcCdY8RTC=u-WwN{kX7oO|OPx5rnq-tg%H zp}TN-Cz>iX?Z`+bXCQ07w|;F@klQZHK&qroV_F=b9^xU$W$;qMFbp&3D8Lc-t5i$nwpOXPrsa~S>-p#}@~x(~$34O#Jj zbcrZ`DFOg;N94uCi5h~{^0uPn^37KI4UTUzwv>|{WxD&C zD3g$YaK7~VXf}$-Yw^WHYiE|9hRMT1K%7Lu*~n{QkV7wwB zDt#>pC1$X(u+7kV14spei@Y7&I4AmL4`6IN@O&tX?(b-{gsFOtH9$gR*ZPn9j^cEQ zmX9Mw!N}KKpCt!+Pf`3`u47?vgPe9N5t=QUk`iVgq;&8?@1c4Ge&Ev+iA90`5-G_i zIF#-%u%m_0ilS@mMvv|6|ME&%LE?r{$kS`vPu}73#!SiFW%ET5s-zh3flLKCewLIZ z{j37`f5OSGR&1RIlQ{?zyZ6y&=udvts-)oA_J@}ZVZCr$b-POX1bDVWpdY(z8Miv8zeJ_b9-D>RR9P63(y!MV? zNMs-3SyD`cAY5VOY=gfx24QIG@=~|Xo4)V<88uH%U;y`=?oH5x-t^n|)X&lwz^JS* z0EM&GQ|ByVp9p3en{>N_3eHvxrc_S;0w>BkfO$ecKP%fmd3}<$#DZy|j|IYkTo5yeSQWuhB*Lot@$i)J}U{Nqb{&PilMx^$*|vCf9EgT3NQzPU)j11 zI!s*r%J692r`eKiRF;%Ms9b*kxb*Dh@b2JAOPcgKaFmznp;_)#E!%&x^C3$#QUFlh=^=?nT1(T?9 z%p46ZxfNjs4Bt5HRjZM;v4yD~m!F;LdiH%SyH;sm#(|N>0A?c?U9<-z8W)3rf3%4} zc7w^o1cU@9vCj@Ay&{9x$o_mwH@KStYdL<7*qEy~+k3X{htJc1KnzaG9_`#PK~~X; zEEZ9R1e_9e~(|K^A#jFT3}qD3d_AZf#{2(+^?Q7;bh;t?A4+ayaD!6Kyo5gLSNR+?RIXg?dHtPL z>UbXN;DeMqUEb&04m?cq{5`hduABSm13b~d=!Zd6;I3qJ#WzfxDhpinue_SVB1n>? zcm23v!){y=8`hWpelWz#dPO+FXuUIESWEfRsrqzUzA+j7=$z-?@3wP7Qs1$;6*UEM z!HY8)rZFW;0R@s6IjRqbhX;aXEwkY<7?KT}$bwNmkgoaHR$uqEh0pt~$3ukoSJ7kj zTNfSUZy$`!{tFBi20W3r=lnxzX)f!J2RVtip5vHQ{sHMy>fUvZ7#oWqgKJMlX&NQM z!bEVlIYVjw-+|7WDp*ebEAW9nJgogR)-GXy(D+eh`z9*boP=&RGOJ zAHz$A$#S{DtWzKsC%_w5_80JZ!gli&PSlJBVLOh|pj%>vUuv4qzSi&jso6PJ*;IK! zo5R!ck^j)|MDm`)(*o}VP{`KITeszRF3iD~Xgn5vZ%Y-b+#CPzLFG!t8wG|{ZS{r> z1WC+TwZFdgOB)$FeMD{W{bcAGZ@H0cWH6+B3jQ?*- z$ZIAN@ZGo@aZ%*htYz{hz4ry$E3hDIJ&NwYZ5=T3NrBZpOS$<2)br1AgZkm;F?*fB zX^dLBJ!=P|dV}Ff?X9?MMhD9vb(1Rj3-$JYUt%KL7lxNVBHws9`Kl+WJmA<4QF_GQ z8{FWg7z<%~s=q0z!8={%{Mb3zqSc&^SLaWXpBwHMYiat1K>?S{^BrylA1+@8e|TC9!v|reY|{C zgbxUejt5N;&V{eJUnSksioKtTD66bwr54DY@EFa5^$9`XA0If=mY@9Qg* zD8_iyMGWt6cPcdFZ}jb3aH}0oW(m6>YA{ zK*87jKHe(Z{u|Zk0_aM>b1`er{|4V=-?YT4@ecI^Hq+9bu;mwU5O}l=fCfTo#L!6s zRmDuq4b;Px6CC(iutLf`P@LMAud?BE!uWFC$O!<44s|{PoFQQUuK>t1(cKJ~dyE#u z9_OvfG1~-HF9O(xIZ>0@yGa}hW&nz3H}#K$KJk|-yO9j!hUYbu5KAWjK&G2AC7|KJ z=r?-^6Y@Eb$)=<{ttwqeM}Jr?Y*7th_t2+5A_%DS_%C^XrKE2J^HyfIfFa)3vuL@$ zUk#S%qgNzLe|nF z3z?Ho6sG_ewFUSs5%&} zQw)}g-+b)5WRGl)6(G=-sTMIw3FAyLB)TA-^TunyqqcUkK!HN}pE9kgK7}wX(!ks4 z53Id~McE9>o(fzx(IT@cpuv z07p7`^~skt-mnMZoYsIt4>`LD5N5OaVs$tE`@*4|Tw%%}a0EQ4G=RPOsmXJr45<4c z$l^*x_3i?`QA1|{iADK7k?%ji4hI>@29@C=(M&gg%JG-$#JNX_*NUr74U*ie83+6q zaW=GxeaCl>8*~|L5cf*8lhOgM&b&+3vve0}Xxioye|443vo$2UdwEh^eR}iDMK?Ex z{^Sh!Ze^a4`TF zBX!8BS36AX+kf*R%ajy0nePq%YnC|suay=Uf);Xlwl?XO(!7uKhn114zMIzX6&XOY zEU0900Lhvyj)~UGjD7N7uoaJ*xc5j&hxXHfvRFm|cH3@#v*1GIV5ERwjTdjfNx3xp z5zk$q(T@wngh)^709cI8=@!*P)qauPnZoz+fu973M~gZ_nTq>DYUv$mWC4KGE5K?k zVcQoQWtCkyW=`pri^{^#6_H&lPIT5|-bVgk#7J@2o^J$e&dPLXev|?PWPo4wf7!6Wk5qFIN*$7nVpsuCz$-%TxM2-kKok%AH(+T> z%yrtqLX8fR$m1;79B>f(#lEY2YE|TJhiFU0+>17am7ElO$x;`>u_PlI(c9}(N977pnASks3eTL_WrBp4M{#VVZ_w%XU+O@Hl zQoG7^0pZ<}qSS}}8v9-;#7z3hG~{EM>`#4(CN8xnu|)P6R5d07##qzekg&L}pEq86 z2k8N3Ey9lD`X2Y#KImN4O08!IFetOwqyzq_5ulVQX*^Crh3T@xG2c6j(lm11F^+-1 z49Feq-6p*SMPAe9)P;Q6e~YvB+8hClvQ%J~5MK>GsdbsF=|4HHz5_K(gr6Mo(*tWR zkMRtGM?GwmWG?YNh`S`W-ouoBHG8d!d3PX!GF6wqF!*nFni2g8sS^aKTa+}Jn@Fs+)PHt8G`o^)QmYy&uV9l zhxg9by(_5=+ETv+@T!OP$C@u^2pCE{QdTkLITCusOoH^7?UX>vJe@&$jMUOb6>HaE z25B?c-vRjt0pA4Q8fx!O>l9kz=~K}FY%+hG`P>yhMZ$-MqmG23=WUIE_Vnga@SiW+ zN;v8rrMMqM9w~Fw+{s=F#itkzByF-s@4%K1K3zQIcXm|+{Vkm1O(ZP5icTPcE{B@O zlgvtGID_O+&BBVde9!=IzjsPK5LMp{+?fO_l`2RV9i9S+G0C;m_CNpQm;Y-Uri@B~ zPwt3ph;i;*y+FrFmsr~Lfe$bm`AH*%oVPRFt2n*;Xui`>Y7u4JVQ8b@$#;y)Q7{kk zm#65Be=LT+S)KHsK0Ar1HBOAHT_arvKv)TzRhqBMr}Bu=N5!~&zE<4;Ke?b*z^kPy z@br7DueufDFSyxdC>^{c(E^~2a=iYJPBNKAkI2coB5Ad!Fj{|(WzwZy>tP9z@DY%F zig6X-!VkZ5R**utwGv+<>+i>S{9O#_yoGQLlK;)njZe+HhWY{}PsO=;@wf@&;0@W> zU}+fD^jdc;%P$wrObg0r)1Z&sO1!lSbID%3Xf_J+k;<~YDMxDn8{LGiy5IORZksjz zd3yWpR}>`RNN{`+qY@BE(@e=<&ei}`QSC3aHdc4{BP@guA#Y4y#RKx+jBFHn!lOtU z#BHxvn>-W1C&bVdE?V<-coRwh-}?^*oZoj|w_kwA2g0xm{5>DiujJ~k#R@lnjT%xI+c6^9BJuS2V4Dq*=E=)#Qe9tE1XDF%n|n}(%GELveE6aZf&*d z@sHD4Zq;elk_FVv%iLI}=T79k(!E25vt{e%;^OvWL!+7lUeftSg)&5gf7=6(%kRb9 zuX;UFRko@z3|NBVY5;Pj{5BziBANFvf{ZFiXM}*OY|SxQWG=uK*qQX7J_DNnC|zad zlCI}?+`Y*Htts=Do#0iB7N6dANzahdwN4jU;60LbeV8zblQMf(`v0*297dF0oID2) zOPjcoq-kTRu*miCtpP{1)$m2xy}r)J)y^h`|G9WfTQY_}S63~?zM7gEsp=>{D~WU< zl1xXIs&4RP^TxfJh^G&sukl>cKo`M!jKSM~o~wr0IiFa)w{D+5F-HA}_5Tb2D|0^Q ztjl>3a6~Mday*Q}V>?(WF(A$+-}NCFC^-fybTiSDFS%Yp{C-vsB7b_yYxStEhi5#G zW2djsoIRCsoqqt7h42G?fmX;C?MnuA;tZ*@rd^<-h+f^F;i;0zrpz4{gMX?`RKih1l=Vx0ZV8PTu^qBfsC&82%yJx+r=hJX*%(6@zC!8u_{}v z$mXotfodwT`f#D=0BhX$h=%`deyYjzfQexc@`S8fa}|KKzPOgZbPqgl*Nt@o?$wU# zZB>BRS9Bqv;SR21BSl5ofkFRgHBk7*wHeVBLo_V-*_h#!KOOxG#+-PAvxkvaPhj`~ z5OVku4xfLivu~gPLg^0)jX88)vnzjabl#M@gPjWojF4FH({rN(QvI7f?$i#itlI+! zxi0f3JE?RLeeVA+!a_w-Aqn(T+s=wKnT zW$cq8WVXa_AaoaVo9VB2k9=?C!P0vx8M*aXUORwjMwx=$kof>~N5ZB)0>*3oa1t^+ z&QBTeSi7lR13RULjVXJR2YMLrl$K^vC15KQ?R6$mZMFu$DrICQp%^qZ_^4fhOF&TR z8rnA6BVvWQhxHS(X=k`lr4#Z7%l8hbcVMpLFCNnV%wt1=zkwyp+zq8Tq?s?xSMbf@ z`>Jo`H(a-j1Xef)N~eKpML$|dVbeD)k5f06*!Ei0}L$ItCm=7UHbZ(RgamYU9ix6|MPW{^w zH%gz&hcHe2au@a+7~x&}@(0?YcCH%-9IW>|x#P6nA`7p}IR~_#-9tP)c~wR1m0^>= zw-ZnaF5@KuP=gYdHVg*;OaRr4rnhIGEShZh`!yc4t`z?eYuEtljm#pqO>PD-t((9X zOrld9e-~}3yBrrbFhd#{{+HZ=q$kD&Xsh5hY?(dLfInjAO+8=;Rt=1|fc5R?_`hgo z>;~bXUUq;Qh5h;C=o3S8#^qt*6;5KoRhvb2~E%dsh zaP&&F0Zzv!6iM8Tq^aN(? z+2>W^HK5vGz`E$DXZ+eQS3qXzc?PB7QyHSwBOp?lW5ojfI%#IHi1K5J415H*OH+X! zZ9D=qV6Xr;aA|4!N-(1|b2K9h0FvFpBu<PN7JR*-o?b|7wVe`j%L{-b=Uu+u2n1V_ z7#^$+oA2IAuIkD{spBXX0AS}lSJ@w&I0hTiV_qyBk5#@p_o}dNZ<$QQ%N-X7 zpD=Sjw5WNa#dqUIy|dWHdMM-~=(yA?Ze> zPxSbb?}}?3Jz0w|FM6c@&LJetXO|Q%6n8_4#@2+%q{yI~iB03}z|Eh}r53T@KhJWdyyrohN zgfLh#jPDPf-Y?ar>2;YPM^e}?5#OC)=WVLyBxo4?FTf)YT$}lUV0jzmX0_8KPv9K< z#`$3vDIH5O1^3XY5^#2{aM)8fhYDY}0!FLZD&j`e;9@78#gv`jOA_#kZ9w2ulDRvU z9z?Wzo$*Z~$Qv)0>zCcocyi0{lQR^=s?#1hiw!sv2?hqBh1OiHzbSbOY%&LF9hEZ|5Z2K2#>|3i@w z1pb9rk%@^+f`#3;k`O8dr(0+Uht@@o`J_iE#M9-3T^IL9k&4hsw&)uu_QC z%MhIyA~toFuRw9lKeMj$6<}<#^0dN(ftc$e#zLnl2EhRB$%Aw%E$f{He$sm!;BE^s z&Zz2Jt$g1{MBLg`ZC+cX<`($ma_~VV&>dirAil-bYO*?eryNV&&=;KgLSX%@8$`L$ zOX4p4z{kFI=>D>ssJWKzdV=lncv+_W?7>>bXP|+h`w`6* zD?#`;&u&K*4&tsWrrx|0^D*u$^nTZAuOFb!$BY+p&UuP+ip#VND`+1}D#PoR*sW+^&-FK`$Q?$E59Ht`%auUNi_MjN zG}F+q0Y4&x&ql?@ z^X~Rrb6ydH9a4PBw=gV9d}(`CA5X~WCu6;QG?+~yEoFD!yJfOGZEUkFl(CLc`VI>g`UN?>S*6{|=N6bG)@kc`O<6n&P2aFkMbr z3R294^L1$`{>fxVSE_j7D$#h`Cmbs8yX2)L=(vlSoRbZEdkq2`10cw`MI5UGi_-QZ zjFD71?#Snqj1dmmAv~w$96=%HQvGXn752}PiDt;#5^(3gJ6d`z21WjrH-0_;0MWN? zas3*^a+5^YPuScJ3G0gR9gkiRfMXSS%HQ8LSIP`W*QDvu%NnQ99J}vK?4NKIQ5yt# ze6jQjBg!TE{Q+*`NqyKA&Cp4^^ECW#7zRP4tMcnO%z|}8voXB5I#F9{Da<5HO|>@y zn;EqrU$1lDM)tVMLapW=i*Z>avP$<^^@V^-buO46=soqV4Kw$WfVsUOEBLM)g!7xE z?Jiq&`D8xpQYDO<>d_{qo=k-ByzrVdR{W7z&bhX%L1K!#>Cw+U`=rEg*=4sgy>pHp8q%aRG)96Z@4emb|a-KjHY8n;0FLVN6e#Gm|{n1_6xpbmb?$+ zjusEIC{jj_&91f%XZaS^=GY#~?ThCs;>}${o0A8y$IA}QTfVA=2Z5Md5s*p`IwZB` zb1AGAUh;&p=YfYJ&~hf4B>nAsMxzaGS0a=_J{u(n0TL4#LyyLlz=bF~R5Z%0(s*F8 zC=_^K>F52H&2%ZI^a6;HS;c-@D@U{pp#n3uBm=7(+k)7Q0^>CCiyl`fsE1=)GB04_ z_U~K=l^$i{HG$6H9nziM&Pz_KYrCRYfu$8KJqkS_MeGG*p_3xVw(#|Rc#Lv|x^M{{ z0)cl}DBYN3b9dh=AJwVYQOFh&y_NFVWlpiGs|JNp4BdMtMiT z-R*TqnZAqNB7xmW(tOB5)f|}`Aa32-bf}U@;a9D<8fo_>|6NoNI5BRN9C-_Rx0`Bl z^A_{q-j(nmZgJzrYa`!_Vfg|7j5p6*a47JSK%j15T2^g$%T_@kD2YC8)+L#}q`F#l zj$PS@8rQ;Afhp~8U!G{}=JRpW<1Y_&8b`@*Glx7l(e5(-y|*Cb-|+atNRb3|vf3L> zmhrYG*(`H3kZWBY_6%9zcXzSeZn4)LtbzbRa@oa1U{c3t_s7I#G)I2&yBpK*d!MPm z6qTpDDOOl_R4|_olIY@+jG{rT?@P~)T|aScuI7J}_quynmb;ktX>p4_{XorNEO<{D zG8DPO!4M4BGGEPiU-aJgJf}w)a@E)Y!C=EZ&N>?~si%*x3XC8c(R}cs>t|1@ZA9z} z^CQ*ZK(lojnwOnHF)6MJ7U_}{KKQ*0bjf=^RldG2uX&x6H5 zo}uzdLtWzcf4d8GV7-He$R~SA#ISqH(+Pw~L&|ht132;CZ@DFN_$2S7=0)-_gX_PF zY?GK$vGBC+Q%Ei2-R|?QSjbW$Sl@Oj1d3z8-#ouS^GVCDJc!l91%IGeqzQKQ5o377< z$=xT}!Z10rm=wqmg5j_Cjz=3_uMDdZmGdyT=Kc|Vi&uH=5@ zcx#g*C2KK6ep!CD%QC}kU;09&8jPwZ51Q(k}Ce@aAzPTu?tbz@AMDX+ny%geA7k(WD38Z zg(Upm}mII}D~Z4&;<-tyL7VD3Eo$rF{~#nqEY z?s$*MqRtqxCO}O#&*pP8rOOG2^?@=@Cvu!yc`-GFU(ORn;T1jqTz;p>EH((Q{(~AR;rh7`BIJ7Wpfm|U zA{2bC>L&-QCQOyTa|9LJM7f0tW8H6#}B1CrK5@_ zxF+dHj)eL-0YBAxN_7Ju)PsoAP3N_^1#S|0^edfdZwEJWw8L(Ef@3iS>Zu?Dgkad{ zw~egXIf=guVy7E<3+xff^Ic$3ysQiF998}5e9Iq&Q!wgI1i?^`URMS*!3I5umZJz2 z1CP2J3Fmyz1t(2(aMe{~nO6Rtc#e%th4WzM&(wostmxDn;49jyWRfu|*%G3~6G@}q z_J9kvbfyH{$3e`#@Zwo?n6wp_E!*X7Yo0QZzn^iREesPH6VE~$n~NUiuq5}5&3v=h zhWmz@bbkeJnfx>8pyfOMzgm!HB$2YW8CUNG&IIsJ)z7`H@!}&?IlVxmCfK(!;Y!I; z{{$j9g3Q30JwZuuCxEH1eTIy65qe;U^o1m#FyPk(z>I;dkOevBa!0n2NznWvKy2&! z8t+A2Pp-wq4PJe?mOPQ3sx4Vb&%_>Ns-4X49AxA{R1fL^d2pE%b1R8i;Nz%oQbvhX zQ#}y;yC!?mTZh_H?IJ=lWIm%H!9EvU=Ol#rK(}wnNP<_<|LK=x+xvcW{E0Pt1Gh7^0(g8mPNDV)h)z(VJTR!i?acLz!$@f{azoim!jcoKuSt7f zpBj*f`N$nz`PIN!%T4HX5>6)4pMKI5&=+5nvP+8Zo9Fx&o0-xdZT@eT6Piyoyab$z zl3b-IyAPCBTln9_p^0NEC<#^W`)dm9arv!nO67D;Q?qsqT7v>ghfmNh{ug6NP^|F~ z0bEhX*{j~icV0G&Y|$vRk^+ot^|5|VhXu1cK^sy?uz#$R{zw&zki;Hc`Md5C>VQ&Z z26}pJoUS#ZiK_TWtRFa9yx~DSgPW9yIIQV;S!pA&ukiHErd%h8W3X~cXH$x(v6RJefW{B=PrCj|3U9Mz{{d$CT^9ZY7COk zGI(C@)%n+4HnmNppPX1-=U$GK9xb~piUrciuDq8uJDb)ReNN0r-TUZM?g~XA^G8O> z%)YUErg4YL<;r|{*y3N>0$#@kDL1bdKGR4V6*Q{7M|^HGA0{I|{=; ztAcDV2mT2KWGiD(iB?y&@e#khc@TOsV~q#$z5mAXrQb_l2};V3sj*zFXkr198>T*G zif+F@iQW>Lk~8(Nx{Ijiqtdj!_W+7`zh{1=E34NOi zXDHty9|2!cO)_)|GT=+Wrm59p4{hFC*4Jot)?`o5p`dQeI%)`UKqpk_)8vzJGxo@A z>=0m@Qe9aV{T4oQn(sKpEZoeyp&9To>mf)rERTL>86dfxT5vZtlS3ox*i^_EURc_O zXu@<6?gTl4lvFTaC2!FBf9rn}H$)fmf!bh^Yn#C!;Aa86!Y-2#zJ+2h7rv z_#2j5ggn4|!s=?{z+GAjH1>u+&{_o#)^w$ukN-fc~@(!4)LbMKDPI)j4MmPX|L9e$s{x-u}3uDw3AHO7)MDaoY~9!DqO5lg=_pD zuHG^%s<{2yhGBrAyE_Ca>F!495CM^p?(S}p?x8_Cq*H06Q@T5)yW!c~?-Td`J@!}e znA!8o>ssqvT)70xJ9?d=62=r96>dih(@^`|=W?LE#XgBVo%Qo(gm;yTBpTp^=`jK@ zM5%U(#UKPk7Hrhm^f{f_tGsi z+LVPwQl;I9Ln{d$#e3ggKkhW9fhf=zE)`Bf-5*;Y9da=yHrJLDF$t#tM~|5EP^N2> zQeaW@VbS5D$?$EBs*o#r3>tieH7vwu!IDO{d`%c;JmmXJ+dPo9n=#UF+gD72%c}-d zxsAy|5KV}UAdp&&(?$|ip6wbEN8{eP3eU;$ZHY*#T>U?QOUf0<77{i16PkR>-!b^B z-GPMr3KgD%CLhw3@-3+Px>ul79QY&D&5#HG`(qs=RCVppZs2}o?j>9S9;hDcUu*nX zopI~`%o_klX_xGJbGuATJ@^j+9ybxFlFGo~OaL?;q&u$DaFpa=+@GALWkbm^Bh?VF+|_58-JLqN_01566|E z#Nhf1h>W;8#jS~6CHcS)MGn~-N~brdAFKig%diGVh>3(5zBfZo(q5uFM4_MLCnm1r zmb&-_aK|E@1WgHTyT*b!#)K{+y#Y)#;2=Ro_k9a6AVxl55|#_s6g!DtVG^Mx#Jqw; z_-4A@xdA2kvhldlat=5|mX#6oUc4jpTB@qE51T*LK2X}vr@ zDQ>ZOjV4#G-qz*Xn65~~iZ{8s%^7)R`!WHO<`Q!IBm!hda~~Tk*sj0lYL$Q?{|5MM zgCnyrNCNU=21#7!-3jU`wjYDJoI5UF~;^&f6G3x0_%y_ zu#cR=S%e2r9~*gjtlq9wY8st?-b3h#iY8|Ql?1?u(6314A7@cjt>JuotiX91BY}+_ z@9}2tJZ0mxadZ@nW1c{GQ$2en=z@8gzN%j2wt&M6S}~KheHRaBhRvRpK_`ITkgXB= zCSfI+HVPh(T$K!+^p^g;@NJ{^3H8{{s@>mgfF?JixNeMpV=Ay~+U{ux%JR9^_BicC zRHS8F*d@&kU6wLtrS@14l7M_2Y-Y%~mUEZWGInG%=nB}vV~I+i^FDRJLH+*w9tD?+-nx!Xh!t=Z1U%?6Ju@|pF>xM1~J;Csz zQ|=7ACYsi8>kVN+!VUX?gMJ2Qv~f9yRx<6wA{bQF3F+FeOb(|2gqdP(@-1UwWWHLD z#L8+TkpzBd06d6tPB4v}=37+?bb&P4=XY;LJJ3gfFimU>NcbZh z-;v}+qM%MX1{uV>{@|iSU2s1Jc$4JY-J3y;=SM^w6uQQu+|O<^y*UwizY$hMdC&*z znpEE}8|@$~Sbmj8Bs!Z~Mn6k`c)IyAA1eRD2-}~rKm_FXcoudh*=@Wu0c2+~0pTtj zrpsG$AI$E7CNLmMKNoJUms?>^z1s8(w`iTceIYc_dNla!R4oG&`X-SlfPm=Yjr^m* za^d^y7GE=Rkae)fO29XWOmSaQd<4wsj4?OH9|LTL6Q~g~U1Ne_L$tr~Y=z%$x=`oaykN1rl4zl z$5|VGuOBT9a}$sinB{-GSNvh4ump71Hpy40`VL<}KPhDS(o#ToL7)HSSIDi$=Kc%U zknu-T>NEqxbq490ah|pPyAK`(f3g&2&@XRLa`qef$Y$INrBK1 zBnj%kTTH{tDBcx-I)u&u4Wx&D4yToC2QuYt9xWu1e)5*DQko1U-H<8f2FixvdX`PL zvO0Tm5QC8koN3eA5tSO;2N{ZpLkobdy3i9yJb)3gnqf|Oph@Go$^xcJ_#P14ZUDjp z|6@7n0w{o%62$;YbV^|@U=Yx9(D?J?rX|6`A;m7``j`72BJ(K|~FVhz1hix?LxuC(EKsNlX28qv$mHO?_s6$|TPTtG}Y(~Mq*sUmK z@G?CurSr6@hwI)vk|IT6)G8kT1jJp?HKq~~{KXfAr(Uo%(4v2hnzLAJ_(@rX(vV88 zeHH&k-uTZqEJq-vm>A8C#p zv3kJD`WU?_j75HZA_^T?_A(lAI_SUsq(Aswrw3&WAv`t>!2EC%CAHq{z3fN9;P4jm z1iPZ+kNc})Bc$aZCER1 z*qsp(JqwDc>Je}`7{HOFIG=tOfb^FRPC6s9ih(6d3Tyy6y+PY)$@+6Fz074nI$EZ_ zdh=|Q>0`v_4CSN(qbhgMj{GQ0aN$!i^(9hh_PjY}naLlr`}6X6;degW_!(};cz?O< zHzzBI}YWJgDLzX%;Le8S~cQDFI%_) z-I=6l=GqNUX~N*VpN-2&WLur$H{@MaQ)QpVr1m<=Q|8WTwZC4JCF(&8+CoZiw-S_j zc&4klmgQJ!Im#N%N&ZdSK{bbwm2*tWW+tA$%SW=&0zr_eS1Qh2{4y~5Zvs2>1!}v* zv@uxQe*_8LJYJk1a*l1eAL;~9$G!yvnh6>-5^mvO?cArY(lJP0l|`uXKeDvmSr*By zgKB*GZ28xWWCH$Wp2h`N*#Hxts<`U!cHV22c}YMAK$3m)YE(s~V5j32z!hhm`tAWS(l?bIGJm(iWHQa^$i{rTr4OcHWdaqUI^)B zGd?H1o;qEyfRjPU0NVIN^jt`s;-&c*UtZ0W)%YAc3Q5LZVSOvEG)#8+?~L*OrxTJ4 zS5-*m@tn4=y|*xscOLs=&$1CWAPC}pzhA`RFd@}kf0G#|CK2Bs*e8`N|DGsW(qIaw zS`5Z(aZ}#dt-^73NP^w&8RT*BvvH~DN(=bnYTy8U-#h6Fhd?7#QlURcBOc!0FByN? zUN+aZ4@21{0k<9T7kJN4GE`!aD`0b!AJM6wwAJ;v*iN9&@?3c@H@CWF^8Pohd6*MC z2oTD;OyD@jiDRHUP(LO0^O&|I0A$))Jl7|jL_@>L5`Iiw^X+rVl@K}7^_amF*=QD> zae{h~QpUMiw%34hz?=Ni_}44|bP%|e@9A=K``nc*-*B_AWU~6Lms*tpmB(Yr?$-6< z2^)X*k8&BiAs8^AmEG84ynF9?^Irca)%q_A!pm=U7L&cSMx*|HKP3Ap!3}n+wt>gG z^KA)JZgq=#jK0=X1L^dR1oyG1DD1g_0^ZKf>)!936ijwPUQDz?$fp*qPX9u~ZRHZm z%4om7EdfFiZ})dn8exF2h#bVs6a22-uS3#tQ?W5$4>9lXueRBzI7A8$-Ttc!Kp`fp z9kG%;3RwaouWJB9ipSkFFD(F0necc!aT$ouv6q{xWScJ#xL*ilydy)Ze0A-`Ic00l zp}MO_JCeVG83^Ny^Kx@gQSjocIg>bTWjM`S=>@><8159*R9qbN$;@BOlsvxPs*0it*!CU|+-FUR?J^)6LwuU!wMlC12fIRkCCTJ=|H!@SbO-&h?1$WB$% z(jdVJM9DoUNcR1}CTP{G3yFSk?~`6i;fT&4px9#;_PXY(-U*s6nSGt4W)Op8fch?1 zqMBFe-F!yk;WdG3WEo$pqyBO{XqHKH(jusSO@>*bU0W!$NxMO_8mYEJc?y&hplFZN zWP%_1tSVEel!cgXl}xEK)eF)TGK7Yz{oj7|9SWvitsfHgaH5OW?@J^FYl#9#eRx2! z6zcu3Y+CEfWjit4$Vtza%ySnry6eTVlo0lmE!l}IdSd($V6>OKZ^0|i&>OM__r_4z zQG2#tj0*A-?^Q1PrPwS6Dd+P&MpFpN^tRsDY~FtUE9X-^$pXYRM=#8z0|sKT$ncY{ z5EPS{a_#fu-w6e2wR5_wKf^M7l%{o&#_nD8`y*I|m?8*RVk208&0#@xX@O&g@1JMS zX6zPR5-ub>0w6?UQ{PK*=I0msOaZqfbh3&hBjkHTzxUP0effgvJU%RWvQe?AlSLl@ zp#JuRJZl_aWEuo+03-?b_3=wlFjSq|qNbxjG^+onN^SXy_I1-vSvE{E{U24hEE!IQ z;&Pz?#L1->)(Kd8RTneiiG#D0DZqd)|7fv9K4}u}^0p zhVk{V#rc`6{8mT6Q>X>Q$6s~0{rVl%k|H0TPhOspv}5p9z>b`t+qk2p<{=E2{`i40OY<)~6{ER`!0aF7 z{kdttF8Knz0ghjd{KQ?XCfP=5Y6nXKfvFl#2vHL&)gttAKwh=FDWAen*PWBeejf$# z(ZoKUa?019?`b!w{vQB^C?90*%BVZ^>Kw3YzfUTi2ACaaTR)OhLUlzUEgCIa*28R) z9L%VNKz;ojsC4l;45bHi@~|WrLON7a9P#bE4HN5ms~kbbcWRUhIJF8}@(M?ApJ)@8po zM+~nro@`v@yBL|RSv+&Na~jJQn3X~?B|Q;5?-v^VovHslg->ZPnTeG?ON%l3XMx=` z;e_?`y4PK=wx}C`{e$Gx`nA{kGBSXuF!>%&X=wcd9!Kw0wFZ+}DE{87b|LALzO9%3 zX6nNl)>9<}1VWqxSe~Ip0AleuJZsn*%!ZO7?A5R}MLHZY5xwd34ap@C*fD4DQVTDx$tvV?UJ(9I_4|K+K1X;E5a2(YIe{LT!9ZHpadszMocSG@RY*yrS!(2FhpQ|K8okHd8cYqNf;!}3r3a*&{ zmOA*eShpi!AEPaog?@WFl)_r{6U*~^gX(M}_uuaif=f;mqEZ3`4+HrRTW(cc4Jara3WCyqP=t#Q+PslEGGROcLq(`UH0Fcht>Zx(MC=H zw5R$FM)P=U6Q`@3ZS5NuGk)&f#V(0@uE)jBd!X8VWbrf#10F92j-bjFzy2}4%5LKY zezd==n8hm`KTC3|&|it0Z~i}rQx1d=g6j+(dXurl=y5@wsv<-^Q8kyt~2Ij5(|g>w2u>=HxZUg`QjhE1JRj(z8&+RESJ zL`H4q^o%Qx!0J3i8dcE{-e6dU|lkxC|H+rDHw%$yZAny2LNZh2Cl?9A*xqt z* zL5m;(8j=mrk(0f@dGI^%wLc0K(+2@D_%ZZ)%ZXrmla@+sZw-*vDy{gv_@oNDRR9Y2 z9-$)Nr+cJfGXZQQ`WHv^ zGL-}JE{L2SyEh24_!j>5L;^Ugxpu#H%xia`SsJMR4EQG)0J~MPQ8d2{GQm6R^X9pl zSJv0K*dxH0-wDFsZrY4a0QQFuz~ynPX6PN~-;Rydh(8Vf&2y_>H$x)3UzQqA{7LNz zxuJy6^q}lN*Rr4gldbT-6O|}|kP@#B|5I@TDBgS`hYXQ<@&VkG!qS`#pQNHeA-gg?sv1?mD6Vo7v%fCN;!$wGG zwwz{sjhAr#H#M*|GXRjGEnRiI3jXVV18ji@nBUS2J6X2@*>=jSh1R|o$j#sq!EQ$H zL?vWXx&zX0*dpnPYp5hcp{KoP-?0O(kAg5n*iJ?PxM$)kXltOXMiee~iV9eNUd;=| zdC`ub0o=InO0)`~RLaZn`ipf@J&3uok;Ul!{XM0Rb|xMqxXMMQ9Lve zFvzVP-3^`#5=%cH*u-SKU347vP$C1bHz8d4l?tJ8+Q zU;zw5h<``eQ1Vb-w_f7N=f;8CSv?d%3&4K zJj~{nfyk>`E{i3tH12X5xZ}g^ExCT2>sB3=*NG6|ENL*|Rh#cL)<$P$1e|9iT}7=! zP4*`WCTdLN6R5s$Gf_E=KtTZTIToZ{4t2&VmSacqljqsar;9yLGqso zBnJZmPHfVa`r|SJ0utH4NDQ@h0L;;J;U|(e*KvBw0%6RR@c;Rz{a|o1%ZmbW-LDUH z3Q$ZnCNli)z;Mi5J494MN}Gkc0dS- zm@e2hx;Wy}aS{PEca-f$nv3^8OwV`H?M$s=%}#p{?Bx1ff7WKkT#2>|v~g{Hr-FP? zdYkIhF`M3&z`(iFpB=4iQisb#`ol(a+-KFdUHcY5msU$rlI`AE?rJ3Gop0j>df)wx z^QB+j1zelCZA`Xej{6Qh-|w}!-L)QMl1)8b+nFnEqv;$arA^XkE#Z(ax-Ygn`~`7 zE?RFCf0%q;1UAEro4dENBtG%Ncvm+YF`{uj6n>1^m5Y=Q8I=4hbo15$#&5Z(m9)8u@&)sH-1^yK>sjMm3t()5^Aj~_~5*dfR2hPocs zgx&h_ezHavQ_z;AiI`=f9?lB>cz8r^7dH8|T7qY`Q{yU)GgmhG zj>vcHVPpXqh0AfW?W`$T(WZF*fmo$49I4HezN7x?=V5!Jv#j8tivof#l#*dFi6Hp% zp`X02NXRbCyLVzvU&hhF&BYhqnD!gq`wC_ix|jRyAbB?@U}lLv-%3XF?-1SyduGUE zy#)>qvElrLomGiK&-&p?RW+V0T%ZdAKVzQmE6Z6qNVh~saQed%{c?p3YqBAOm!rj< z_4?&_W|Twt&x7Q_I9YOr&weX(@}f@)2j^hOwu~xH_d(*mCIEZk?5t54srR9*K3zdpLU9?)jvb@KqXlsMQDE7JJD@uD5!%HlLz0#*>a{ zd0^TmS*Rk|dj4LW2erX=nXxa4k7u!2?xlIBaM5J+v8T8EP@FytC6Kxf+x)HD(F&P` z3+xcR6*^SFslrgi_HaoL87!xV+R$#Tn>Bso^zX~tK0cBi>B-eWfs|v!xNd#HsTF0pIFT8V_M;A5uUJisP&_A3p1*rr! z|BaF0U~YkgQIJne_f*;nrx-#`$xlVVG!hed{n_5NnFc-zQ}w`gg$YGehYoKrDee)S z4tT4FY~H^Ma*fD0xZ$YG;Nqq7waoa{W*N;g4SC3{W^CD(pNzLH?}kb$(~LF(v=e{c z6z83%qREr>ks^gJu(sQ#+$;T1l9!ulx242TRGQb&-q zA`Wyu;w)Na=Bl^Sz@g>Byvy6+hXxJk?Y5c--JS0w{X)M$g#R_BdwJ*d;DeEh)*)Eg z7CXWmf^8U}W8HU|5F z(%$|X+m$OU%W^U0VaYwx=R4upk+iOgcETRjbhD0^=RZ-grQeaVYZ?u5+E!JSyto%s z9~Sf(NlF=Tl0-C_?)Fk`9n{u-`Hdl>e^bD$eII@y$jr==Hs!{14PWyP@j>|UYIb#g z$6szGtQ^4`oh|QaDm5Eg#F(YCGK>dd$Z1AN>E~VbHCu7oVPq@+l2@wC5|@n}3RfSJ zwkV=?nL8h8gl-vLy%sSsj%9p`%FNFCQ|l>&Atu&nWZ`PmGgiM;qrx}>YRC3vV>r%g z)=tRW^j>Bd^(L8QdzzO{Ah2Pb z<1p^w>HBR~gliTC&+K1JD@N3%nCOnR4b!Ls_v#M1qr@2Jtlc|_^7$cBr>3OQT2mjp zt?~$Lxe^~dH*vEpbA(?6Iz}d)pxtXJD-^2 z9p-=TFgJ)GXJEg>AhK`dYJVh771;Dq$GDzI`7-&%cfPHgO+0!1GvATo{QBC+9E(FF z!qx*f1+n0P*v&jzLTk*44fWOl*Vdtlz5jmi}4-wZll zA8%pGUD;lBd!Ztz+kacure(I`px~sVzogI`?LILTm;Q1)65+c`-MJSPu0C=>y!ZI7 zn+j_2@T~G&r#?6k#<|KK=}@(bbpk$tjIEBsu8-OT-1p3-u7<{VPUCh4N4Nwju98F} zV6J}{D*I)+thC5>p3Rz?KyKk;X1jvrPu>Yk7Gr7NYSAcEqILl7kg|Pg{mD{~Da9Zw;F5Im)q#^Ab z?aZp0x!!m15o2-Zy6^_J(kQX=a++JCkvcFZhsopHi+A?6|Auc0tSWy8dtiqqdyrG) z(Nna{E`6OvIS~>)?D+(Wrkb0}OWn1h2>dyJ2F#tz(|7lArAm(~f1SRIp<=n4{X=bT zw`09Ki-_3{?|cf0S~8~JKnqR`9C>_0z?1ZIRN`i2FhtvR*Djhoi#xSd#1h)<`rM4j z+q@)r{L?KYJVa48!PA8gFFuykwP{_mlNX#2K&D@r7;@6ekWz1HIQ*ea_d1{?ZxYYO zc+xClE7$9UFJCERRPbhxM($Q5RZ!bk?n#x>Zjq|2%y;`~vSz$~&1=_|&d8L5IdY#m3oSXliQOONY+MY1BPD zyvN4L$?24!rt@+!D!tn}lI!DYExWnf@>}OU)5x(UcUr337N!n1&4^A_Vqpwf*O8!< z5yCaCg3@W6dZ5oxbn6~!rMJ=7m)i-|GyzXcL1q2I02+G2vRo5p7zwuJHp>7Ac=R3} zEmH6Xo51{UZjt%AX(Y*qu?q4co3b7CfTekhjYtn>X6@So;*CRXa!!j-2-9RQDn3qT z_vcTl!ZSP;$i(Ow?aoQqH8`zhct$Q)EQ>e&v_vOVV7o|RrrFP67_+f>MmxKMLyxKy z{els_x{O}M)Dct3H`}dE&gLX8U;Et6aTcnT(dlm8^>Z2gJA$9S2q}34Wt??RR*z_V znX+inbs?|7M#j;*AHwbh=rnO8hiKAp#C+Plw#&`5!dDw6{A;l^H4j#7Dm(G7CTCU& zSd%U3U*7#QEv$OgT&tK&n+s7MCx#CeBHGQ2auLhuYP#P#S4H}onIsNdII3+-<~3r2 zvd0&5GEY$+vQ+Pi^@VRwUQ6K*HB7G*rg!YW#(#218_%?>2UdM<;RB?@-*O$;<2^Fm zW)RJMcxS?WlzTnQRu5zNN38;Sygyx4@A#+i!;HXds;%WjvC!SO310VqWFrh97xo?e zrq0@vk6gA(xz+c93}RBX5FOJ{rbK3Af11-cD<1TYt@D!U-ZO>&2 z=JFT!^?wx582v6OwtgE`S_?lR%IcIWF`&o0{-=RYP5u+r0Ee z{-rmb6qc5+2aY4Nkchz>c-9-m93h+3dU8VoX#=ajO6-IZiiCCd30zr$`8Vdb{9s7L%CYFu4Yac0V`0lpLK z@}T5W_gwFZ0Npe5dy%WyRS&xKfDs-jg>c{qNnysw@IMR} z>%*TL7e$okwzzIKOcRW#O`NZ)`t%A_Txb4gwV1CzJ?{9U$P!O!N1esi_x&tanZSE! z4YByGP3cz61LDIQPYGVzW&C!@`l{^#C+u;rua!O!QZxy#MCII8@92pZ?$PdcV`i&! zO`ADtI?sQetYrR(@Xi@wJHvBh6k1K8+9%6i7+dil4$##lIF~)LWed{#Jd_`D{;}H% zQ|yxY)r#)$Rl?(MewltqgA?z76$>QZk6;zyCI3$5{F8B;P}_uBF^xYvxpH1^7vpxY zG(%BlQhb;qZ!!OrH1yN<3f9yqvd*lv?zU(Q&3?Jlf~zDP}X@1u-3n&h{h3zI?uR%Tz{ zRq0biIJO-JjcoRqG9NQBa463-I+&U zK0j0K)DoxGf??hq_+)vACGsa{R*6zo{f#oCN0p^X{>Y-+fT4krJmG=H!+2?=S-S1* z0}Ko5jymS>+FWY(6ANXPfwa{@MJom0VL+Iein<8RakBbsFaAPw;Z^B{>>mErCX~q( zWQqB;uVbtCTU&@XrdSjh2L`&(^TL2GDAJdp-gvxX@{$GO5rx+?U@p5}-VYMI;eb!S z4A875ijG5PyC=zDnyhKgr3V{x!f;c{84BpV(Q9$cgf_UVg^eDNBkJ&!(-5u0UF{7_ zVj%N7>T;Vo?`m_TINT8K=&@O-iPp!)b7*6q<*JGh6k{BPcuMT>mP`8CC>;qafVf*z_rY`TOEGlSR3ME`NCqoIc*NWTp3=KbXo)njKZPeL~wowfFtX;Lhx@){ZTN3 zwJ!MVWFitGPhOfQ5xs!jIN+&@V-!&#lGlC{=)VCBY8pvozok1P0EuAvif^`_knHLN z+C}Do&OeytEdb(56!G!=PBf`cMnZZ7RI*E@qQ^1?d2Lo&_~lqVp6*=$2_?Sr>GKaD z?@9%TtO&aF`~`eyn77){@x)EM>p#{XoVS|bsa1d0z~JKthyTi6^%Fv>gKEtG1|5#e z)+Xqi$*fxR6cW(Svy+|1%LES0o6DdOcNTx-z0K2%R?VPXgGntYQ+E1GYVZRop#Y$w zU?x48M$(}bmzi$YM0#>(9;Aw6d2qxBAB_e`!3AUagE|J5Nw~bbH1Ihd!8}9|O%*Ge zw8vn>r+iNZUp7n{PtFWK3Q-T4ye2!-!C{_tF>@kLk3v4-uJyT0#~sgK_jQzfhv+1{ z($@R1ap6e!oiy|i^s=?dsxG| ztqxY~7=OOcU}x>^4kt_dV~P~V;wi&tADcI&WxCT!31%?+8qQCq70n7BRY)A-#n zV3ug1CwQlil{=Y*k>5PBLb!Vr<)YOA8ZjQnx%KagBYjq_c$>2+^mVz;S|iM-l3I24 zxqF*KWQ4fC0U;;{m0VcW7emWGgr8fXv-h0WVsk>9=X9g@`ah!)qioZgkH!g9vs$D~ z0T`R0Bdp3qj0bC7eS{6%&p`SCU z{s2D#v1L2udctk)XVfa|d|sa(x%M{*`BO1Vf)L*X6As2)FUXOQDF*L=r^t#lMJ%paPr=(t zgPG>E2~x$E{q)tK4XkF<{egbXu{yrVKXX@=e%#vMr=plhaDawRJ_+Hcafog~*+-E- zwpxwmzxW>9_PDe2`L#-OHWL+I}Qu3=@`y=+u(P)4Et)1jHJEYI$FPfqkvQtXQ2e*&t5JO{k6vbc$Yh}!U~O4>)D@@&&; z-vY0XQ$16rTb*{bB?vV5r^&or$^<@=dwuq2AAYvs>yz_*w~4xdW`oiz0zVw!*pzjb zcKy$Zenju{$K}!yVA!_K#gY5doC^PufCImS`RcFn1&z=k4_-2}n*y%Wy-z9iO`3su zY$Tn5JKjg4X^bn+QfISO*pCRG{ap;!!Q;a+j=TI0H(7)S^^F)&WM99F7dy$6+N}`p zju?HB*pWVz;vI?_~)%%3kvi z4w~K1HmYMfW}S}~xC0wD0ju(i)5_BOSV~y&Zq-r^hQPI)Jumbnz^}kG7)OJ6w9@+d z4dTG{yYzGb@hKae7mh=(R;p0~a?S*lbO|vr7={PmC<$H67t_2wzLpI5pi}1Hie!m_ zYGtdudLxkYNk4(x@hE?+(d-^(N&=V&Z__}ucbY>1I-8LOJYYmmaLS(#M}5>eIURmI z^L()MZeUKXhRICF=zB0k1Dx;thsOM?Cpt01j_S8er>I53n7jSrC^4BpO;91NCH3~+ zLa*~DANw1m&JTmy z=}@A7x?&Cf9X*2S{y{SgMOMlO4p+<`I@yAESdZw$axFBYy-aTZ&P~bPdU+xl#HgIP zZ1aTeGupeq*}7NWoNb`s7p8G+Py}--4`iNnMBd4pY_!;Ia#)qKZn`XaD1n@mL!&bl z;A9nZj234<9FlI5z!4%Usjm`6_8-Ay)Uy&zfDc+B{M;^1CI zxo4|OQa7I|GI#L1^*Qv?fgB&FHS9&k>-zX1=PrN;iF4+5YXHZ?gH^YQva7en<6`R) z__-|r_^n2*siKkVJmSS6S_J@Uts`0zecO-3CfE_SoGD)6cCs?V2*@^+r6Y7mb@f}^ z#pRQ=))0RZfvSQU-uBTMIY5|>8fz6U1)!Uml(V*ZJ*#4rq1Mt#vh+go5Df+bwb!TW^X z-bxzDP}KO)K}u9Wm8m%B-GkNLzAY{wpUk{_LE3@f}m;T-r$A@$zq3V2k~7a$P@}1&SUi2;>P!wnJ!ORC$azgbi70KCx%k`yc<-l1AQ!=4csUg zqH6P%zSzabQTZLx68r6Lq=~AvM+h(NVJ*Lw;GJrw<#q{+PqamTW|z5u#-U#%7_aXgEtRYkJR41$3|fcQiS(bsWK2x6ItGf{aj=GzEr=x?Y2rx3k0ArKMr?MvMo0| zg#n@z<}?a(&qT~0;AHh8rEq?NQE{jI@h}5D%~nsB{jtd+<^ADQPBq@=R?{Ur((cMW zIT)6?-?J3+5D-x?&MDZ`h;28JGT>fpe1lRM;5z4{VfyFv1f=3Gpo8?|T+j1+0v6|z zcgM2j$st0;aKQpp#mwtzU_)ET17qEi)qL;(k`1qrjK{yLSAaRfLj{AJZ6w zLbo6j6{2$#CIb>-7CffEDYkKfc#HHN@z z1f6djCWCU=kLHPazb!cpqOpt+o*7P&0`l599Se3o`moKemy?2--)O*w5_P5BA!d`A zR~pzN{8R^k!&}&x&w$cd>XfKP7Lpu(%eEGDz8TPf5SZ~gRsZJ`5ZeV?iOihNWAmdY zf*>Lrp$=mZ18yM<5Y)?MC-s67jLi`$bekmrP2+<TX&c7 z6Axrre?KE9T2!VbPJ_&-eVDN@;fLQNZFti>U`tS92N*|E@6uWYFyRnDa*>4W z`ABO-3Ug~#LGHYi89|}oh4jt7mKWG3 zJb7KJ{^tYcMHJZ_sLZa7`1&;h&)d57y}e^!Fi{$g88{dGOpD2bnX8WVHBQW)UAceS zL-zA+O~*d@EPQ-QwbDm~osZ2wlsHmJJlIJG`b8vb| zEYN<#e|opO{ua6W)bFX@isD0LeH?Z^j`4@-R|DYRBPKHogBx_lfqne!f4r#amHV6a zYfO5N>g+?H3D9lzb5C2rpd$^X%N_^pE{y3ftS&DRkus)@w$|;tB)T#FQ&EIuP*b9y zQ(`M(jLOM+``@hA2ctkQGn*8k$_9HZ;{!fv1B#7Sc`W@Fn<+NiN@ zqp_{VXlz@JZJUj4+qnDxzW08)<9y4=8Do!R@4eP~=5Nl4J2Ro|A#dYr#@2j%n>0PU zfOoW)dVBZ9_h_@D4vYqJfn6}zWAptrn$+i!0CPAY6PTxoS}0ODuAHl!HcdB|)*UaE zvSK7oAT&%L{n4?_>^JPsACjXMZs)@PT7}PEp%Oe4^T|!$<}>Dk%WHx~Am9@26NV8- zDeAB4#@mr4aO{o5=W+8#vG|8nl$Bb3*}k;d{8v_aNl1qL@B# zIx4Z34_=xSN|**~c zri69H#Q&;cp8&xX3mDRc$2Q$ue#oA=4(!yJzH6^854nKy{O0c1 zcMYnhr;Ai8yMlw=^_*3?r&p06*GvAKLMx7G#^BM}3b?)!5$XuJst(N> zQoUk#PafB67H7-#pj+hPOVXgGWR+qlbeNopjXv5Z(V?HD*|zUTGlgV#=blX9A022L+U zU5AY}2fFo>FSpQY^|{3&SZWuM^Agy~aYgf)Oo?`>(EYIvgILV>t$eysrEYFQ$+J`@N&S>v+tjydrS>^$;5o7_Yw3hTul0H)Ny(`#oB1|YUJmt`Q6G?ZldF_ zns)7SPhY-r`?DO-i@$Qx=l?+MYO+9uOh1xjRT^|Ewb+f<;5fV!?&bnUmCBYIIan%G zI+2QIjHJJ?@fOQY6iim2d%79=0XT}I*N=a(H{74ue`x^T_>{rhn6xrDQHW`FB9GZ8 zQX5{7I?@2&E!jv zz+QWYMCJnOsE4vYmOA2DuVre09tZj9x{&?W_AcdKnaE3mH{lB8_-I_t=J7`7x2Oyz*&UM7a@jO?y^IQ-<)7tm9(b)>(;Yg} zX>5+dIk+7Lr%247Wy7>_X(9|T@f1=*iK9t5%Mr=G)5E{{HYT%KD5ldIk!cp;boaqz z@{_CA#TfNE8W$1HN3B0xqH#(pg)3yPZ+^CC8!Ba5Vj5phCX_XF@4z)V>Bex^9CjOw zrn1*HzTc%d`qRo+232Pc!b-yj zU|!)Y#d4yrPp>6MkXC~`15u(is?!)w_}yPaYbViXAtFGw?|tTIt4(&rK+w0OHM-0T zoBO2ykX)7mprd)}$|u8Oz%VTdnC5^`w%GJKKQd8!d|;I`TWb#S!%+EgMX>~%uppoM zV`ef$TeMhDGv$ZCi5YtdKY+g$xGnF-s=Bp90dR3(mz>k!Q9*VaG1k@9vOSU7a4oMX z*?b4~sNq*1$J`M+64N%!gQeWzZE7<=A6>ZEfix(Qx5R(d?+Za;%YT7>mC-EzwoEjh z5mXqI-CZT3=ERy)B!yyG(yP7I`aem=!M&@WB!F_QDm48 z1Tkx}$kZNg)gZ#EkoF`)zMg>CIQxpr>^L$KPeJYGDeGQZ^^LCB^@Eyeo_4aVLBBbO zh~r7JZZiA}R6l0tlX>MlF*v}XjkezA8Uv^nz%-eFC2=}*W2<}LiqrffUL}Ayjzz=J zP8rAWgi5{<`W^5B)w||*$w`J;WR#MWb3V;pE18vk4^=fd$>+5lwLamsMzqlo_-B%V zi!OclKj$iRB;x5+V4*oa3zkNEV{;BSR!$+AV=a)~FZlCG-G9*PeVFANU{Ni$?G>7Y}7@}DQ50{i1Xp$YB zG>dt+h4e2cXd5g1hzeOxo~#n&js5}x>s%&KIv5U1`-70p90jPRhy(^tqChV-NlqA` zH1kRT6Y=DCi@Bofi4UoV`z}Ant(7P(wxHUX1WH-44K#JXSAL@pG#b6Xusy;oC2+Kd zbxRt^wVX7G)V3Yk^HhR0vaf=RyRq$oV9t)o6$Mo`>7En9zMtDhH^QfV*RBD9XF7~t zPj*Y$fV)z^~tVD|Uto=7>*(rVJa+jaLl`_dx%D-p@n@S~}`k?Mo~L8x7@ zLES#n@?bt3Yi3vx_sxO>zzxkCi6}%!_mhOA%*ua%fG0N) z3VN$TY?1Ez`h22NEKl=!KMP84t*tHBnfniX!xkLhit@Zo!rnm5P@f@cw}+n?RKv&6 zQ79jDuJ6uEL0xG~Wi=}ToZU$qHM?s|wMI}oZh(1lHtIi1FjlQ5;9FsQ1q#=50p%g2 z(bfm;e7)rn?4C#<+M#yN*B+Mc%F}YA6NbGCKw$MXxSY=@d+fev;`h+&oubCcW(U7`G=@UZ7nyq9YtC66O+YYUr_K2Z6O;Pm zdsAiZ)7eu6^${1?(km&ZRo!oznPw9264OGKzdwRi+|9@1zq#1OA#d`&355~eIrgxi zp)}dtLJR+UV$!W^y7=<;EnE8<2IEI^`PU(+*N_KmbzB7qZhYSA3F zYgB^97E3iG>~GWBMTh zx0^eh{mGBI2HOhaJZmY{7(7%_KZv@|t6!~N_dEkp=n*VtKcp`|+`m(D1@IkV8Aar8 zEa4l|Y3u|+CUP@c{t!^%_L5T0?%Z35Mzl~|Hczr$YZXj0mnzwo8x*P!QVndA1eWV;tq^igjVSw%*685?QG-l6R!d_FXQZ?w26+NFJ zb!6Gobh11qFcf6NHT!yLw}G|;gE^ki=b=DshDomKh!@^W&zk^b2si8mH5!&;(59QJZsJSse;MVp33awmlLx>Ow9ZpKEc% z?*h_A#4U#G1v>>E@<9up!oPYZlV0b0@#hSVjxBF#f#g z6rAxmG%ye!mTc zC7Pq0%2MjnxL*Y{VZi32P1K+$k8 zF6s2+GKgDb921j5g$#UL?+;KizWRYErS3g!&JhbF&|E(e+NvO80gGg+ktF`DgN%l z^${wb6-(KIHNlP5nm`w1@nG};L-=#IeSZ0$5+AQAlaGdU! zel}80%28@WfMk{I6Y5+dQ`tm=5-p6NBVL}b7FCq1J;cLrM9H^70^umi-3hyGZ?CRt z4P@Ku@=NX3jo%Lp&s!g{4NUB(e%?LJqP991!ytI9YwJ!8*DXgRq2phdCoxh7Oc#o- zmZ0+z^=1?jtnTAkJg-E}*Y#C9U9Jh^&Qzzv8kcjv;MN5#z1OY{0S^FFnfH)fHo6Z? z85(aeQQd*fpqKAlkQ&`n@AKo!#Hx$KX+dni|7~)WVy~Ov2=8HqPFB0wy5Dp-s3Z$qrh#5IxDMigy=prS;PHN{B8Hp11ENkT3MzpHTh)Xu>ueSi zDUZ7&RPiX>#8%f!TdGRez;F;~S>UC3sg_E5us%QlGy!AH*X3ci0>by|=NHOA#Uyxn zy(AsB`gQps z3LM!wQa&2Y)uq`onEnO&LDK&&KP|PAOEvi|u~BWs%klkHRpz6$$?n?}RP3z2ZVBuo zO#2XPFnt!KUT0`F{1@DNyvpVJh12!*Mk^v`B7Ja1;e9ub9PqJx(1m;Ezl{nMO>}dj zd~xR};{6fNg9(n4)(pq3hxONZfY_THp1Vw}YrW%sG7_-xALvdmSF6^HiKn>d ze5BHDW&+x0H7EJKiS%KSo>4T}py*-ZDJ97dxKC(~m5ToWJQ`s*3TTd@+p-AY0lk(e zu+cSLsfZKuZj%|L7la59sB}!T;p7#t93q1x1t>OB?(j;52z}4xT9v8yFkT4P_9_Sx z$Nr#X=YW$9Dy3zJi!1H@T*HQq%OdT~G&8_dWU`| znC$s%E(`;>Nlt>X5ClY*x^6xQ{HMdL{**6>t-wPHhy!B`v!F8n`arU8|IBA4WDsj( zbE%KG{anG!6#?4M*_~N#gEcwv7*Tf@IQt0yv)lNkSzNqHTby{DVEapG6K}{G*oFCX zjwDbv5#C>{r=ui5U8@6i7Q(D%ll57BlfYTy8T5*sDd4RMvtPFz>5ANGya`F>-P1`U z5m4P%R9Hu4RfNc9ao{zoTEMqiCm5x1c9BlDe8ZX20J=1uEr9PvUHw5bJVWIBVIuQj zuE!nK=f3*m%IGoVvItIiOgP{ zxdrHG#bF^s?C?4kL{# z(H>yQZZr0SdGjyyjaHd*HB=89G6cn&GZIrXlnnm9jy^aHkzCHG*OVXf?`33YKPWBN z^FkMX=;;^w)nm$#zril(_nqFS)rp!dwyc?L`lwfC4C*wUk~^n=II(q%=#Ba>JJAhL z+r>H*mLVUrT|a;D$8gQEttjP5d8dtEI=#$OQ=#b*iC{rep-?EgZElAKi6i=PDIav2 z!xJKr%f-jqZgx=mVNO;Z0S`t6q4#7P{*QR7PV<88A&AOmvYR{4{Sv~=3jvtBj$h-n z^fh1I=74gw4<=tm>MOn9c$^G!#C=MDhx+(xyKl$1vYrOu1&mm8s;Onu*zR@)5Jc%c zIGed+1LHCm88Rf+R~pEd20+g6K{4Ns?l0|3PS1!aWV%1quEAnm3FL+&|Lk}RjbXOq zTE{RgLwC499H$1bOXR3nyBL#Kd^HkD<}hI~awu7b*})Zp{&~6l<5=+h*eWeRZoJS&Hg~-8XQ>+Pn}<5^5CZyDf7eI} zxt#oKpqIsA{}BY;&WHkeS(ExL;A5dgJ%l0(pZKr_*`JhtQ>pt;FB@vmYlqc2>B;i7 zTC(*Cw|&8Ui}9eF<#Gu}&p)_>OjuZ0Fwi{09v7J~Z=mb1k3TbzN*%w+UdC?OCsCBZ zrUv)oK!tD90+~><$hgGj@qQ>JK5{tX@(Hfo(|-*JN^4ApbRog#WZaU4vTr<14>8K?@`DQb4TtY)aNqzSbI=+Va$uDR z0Y(CU$mKN&83W&XkhXAJ;nEkD8pFL<30*=<&3AtPj0 zU4FM{L*d#pB|fru{qo9=Tw%6?5B@EM!Ohscdo z$NJ5$JDF<26P#%JkeC>E-3Yw_H%d#(Hysh8x=h**Gz6Y9zgVsyDJ1lFk5M)(JHWL> z>vQ{0zPdP^L>R5t=%vOM7ye!0f1vNx87!T8j`bJZi7jd{$?m0eHfbLu;ue+hKifoK zlRDLervCh9vl+#blz)bxjAj)lPBaaiX7GT9iT zk5`1{0!5&}RSM{VB1~RAWp07F(cZQT*q1J??gpfI^~4G6dJ2C7&tC#__&VK==j9r9 zoPuWB8(e2Dsx0c?9lT&H<{nzfpV7o@E@ZFpFKF&wGep96JOEl?@`*$*-Sl3CeLgaC zukDUNLS33&2WOXK+O#6XQyHFMzA47AUbtCg&|vAE!sb*p@RpUIc3k- z0~@=D%wzd3&5-0RxF>ELD#Z+9p?1?~_o=og%=!tEx-aLVe33;G+-UtHQ}ACts*3AvXpn`#O+Ps@Rvy1nJ;ERR$qe4+cYjgV z_2iScQE(TTTQr|7k`b8x(8K#z7+x4YGyN-JfD`I!N-CD~5xF-Y4|bbA2jQwRAO~I# z1D1vuitpF?3w`3cX}Ri-4cap?DI=Qj?$g$`kuY?>dx}4JCl6^?D6vp5?aQ0vX9H*b zYY=$U&;1##O9%b49NjzV!MigvHsJvNJeT&*t82xuNMHj;RUI-i{pOQPlXd;D|SIT;uT^5g(}O3!&u(Cm?$ z-6R&ZB6+V|f-9FF7k*f_VL=Dpd#1Gt4@LD8b*uff8pZdNSVIx+ENzekIV$ZB0LhjBke<7viyx3{SF1RijC-490V^=(L@R@T?&)l2P{} zdx+0AXcReMb-{)8X|`iMV9af954MAYnDzJT4te4UD@-8OAmyN!Cg~AJ zU4{08af_*9o)*K%BFCZRe&*t$yl~`{Km`@Bu~AODB{68qTh&L6e-6KXy}S}VgEenN zvv=|!!zD{QJmF_`0YC74vH;e;?l<#!Kf)W3+hCjTrnf%Q>ExqS1Bc2Dxyz7@v##Cx zptf`!81SW18e=oZl>cw8#eF?%RMJN_B3iS((FT9$7>AMPwDE#X^>DZ~mt8UX?2Mus+Q@?94=+WN$CdwA z4i`E)^utmY`Q)GHpT__1a|I8B0iV*s0*hT$|A&UFq@{w#r9hHxs8`{6VR5~?il%Gh z|B2B-{7B{rH8rks&M|B-|No~K5vHA?#u^BJ^~($11Uj~R5%Wbvz5{ioKY>J#0y>Sq z(<2F3u$!^d4C~OZ`1X_+Vej#|yqLpseyM}VccnKZPMhNE9uxk+slA4;2-0jbi>VvY zP=TZ*aOpN#)fqHGL+_(zK!F<(53 z)8BH&hd{uL)9E5Tje1Sa5WmE92XIA-8C<`p40?kUeSmRdJ&*;noXzja@r6iWZP!Co z6uh6^eMsSOV!lOI=~q2RacI90G2K3mu4HT{PyI~{DjM1fFgM%hfkmv#L+R<3Q%GL^&^Bs@{Aq^X9%ar^uA^2R#LlmGzG}ZH75#c)gjl?rs>?OStP#{sREqxBu zzgLZ4iLelf%q>~Q#DLYLw7C^R+Cb8%MIOzRs3tlf3dM;8EJGi<8ojIy%zF--^R(I3 z5x+;Jv%CggukaNyW^$9LciMz~3A{p!M)`Jr&CxuEqK5d!j6N8DXQ7yuAiDR<+#|c4 zrUu}@h7LmLblX#aXkK(nid2p$f$jI3CCf z&Oxd!!%LFAcKt0v_5%7hqFd?$r<_uIJ>(uY6;Kce9X1PIzY3s7m7FAgIR@=v&bn>e zdNScaM%q2ci&I(TL0(|J5^t(!5zAyS$8;K$SP6lH+>x$*!Cvu_VBgVI7Hv@Pk%r10>g*=A$)Oy*uxES$BtF*X)7lrSiB_2B=>f(na)En_vCF)jHsc! zO5Vp&R%o}&jm#CdghFA}!=wx~+eK{|~F-5oCjaXfvDDZTbsPqgc{OELZDC2xc& z=EU*c26q`4t`N>Ho9R6L8L)1g#IJGk3czfB8i>ovM-tt0SwFBrYdg0>3N$Eyy!M4> zhZr03If(RfY`A3i-zVrZoXi%~tHNftRQ7m#s)bb5?(*SJq*fYK;nmIx9i9EUH*Nwx z=_lNS=zaW?|6)(!pX(p&-f&uosk0s-0q+;(AWTPh$eOjkCK24r2TKZ>9I1#f(JOYt zHwrGO!>IlkqO>Fo1Ar4lMCFze@syS*vJL;)3oxRhrFI@}DX`hiQZ>EShUAkSZt2I+ zh`4)2kI5HXLN$$rCINO^!kmx3A%&`nRM5AYoG!n|^=4x%5+spT!uQgl?l|Q}w{0ev znF#%WqvAF;^LaZo(BLXWM9T&MskN*Y$j2dr+u$`E%TOnLD{_u7l?M)573Uj`o_b-~ zkeYa7ybl|7(_D>taw#$OC-V`V##|5Ur`YI*9zF3p)3phAzIy7A&oS$qkqZa^-O%}r z8*h#NFcYhfu|i`K{@>WVKZ&%tj9nREbc3B%cTAvP03k%>kdJnv7$fq=U| z*ciAF`Q3kksM`M&|2;fTTm1dWOsxkXAnhkWexl{>0r5qP0)WxMe=wQ=NV?a_1Ad?c zuuri2LjU2T07g2OD;in?B;Um^0_o&D&LXwrz8-^t(R)dX2`gtQ`)X$-n_*V44}*G*A2j0EobYJ?I;RGtvUs9nAnPLxjd2ZxBbkJOU1U|(7I8NiBs5g3>Lh93boz z?TyR*9;VhSm)Su)GqTB~=k3nUNjSveFI4peOCLe$`YI04(UDa7=N z2iTzcRenzh3#2brTX-|-BcXkBAa~DpIpZew9^Bh+gWYzIe{D*swce=Ja$7F=$?nf9 z(cPVEL^(HuZdO1*;A^v^ib!gK_!+pkf1O}9pC9;5otR?Pj8U87iqPvLZTE%k`dBJ+ zl?|PcPOB~sh2Q%ipm4s}5ije=TNs$$-mtodDU!>M7qRbGnmVVzMrCt+-?;vK?P4sU zfXU$R(ZqA?U^?{+5tmJZ7$sXMj%o3JyTfrl;*Pzdv(e%H1SRlZP0CDeu*waReZR!0 zGOHMJJtVYMb309e7WT%oYC&Nv7rL-k{bBR{5=XU8bayG-GjOZi-O(7+0LlyeMc0L} z4c!<_(rc)lHkPu@D-`UxBXe`X_wzLw{$ONeS4X*YRDE*6RbB^Ai@j>6 z?eXT`s?CVUkZyTba4-NsS>{YQLI*h+9Yg9D)it9#8_Yl92qT1>++_M($vAv{^`pIx zjwE&KNlL%7TnWN@pR|QN%ZIR)iZQ7jM!mbR`INT{cMRN?AJXU^u$1p+prvy>U+=A0 z5N7O+bP;h*T&CM~UVGjiL=hp7I*cPHZr&bFRqktm#X0Snko2bM0)(NY8GSf=rSIp* zV*9Y&2fM|r*bR{KHsiry*dMm&otRB|=~H1*D|wjX=;L~^7I(7*6#p%I5ZcI_Xbjgj z9=2U}CfvcIl6uJ#y#NRgnIKkGg^3Em(qH@ip@7(X{HNYBmDB6#T+w|VpvL5H5zZJR z(5Dd-%zp|bSr$ot0B^aMT0cWs&LrV%wQAjSh1bf|^Rt?(Ll|A&d_Z)6HJON?iP@zd zwgYDVJ`*SZ>Iuu8%(PU$087GvZR)k?>kmENCRJ#t?L!H=-#%#At)pDp2g_`-K} z8=Jo5pIscaNymWElqkLU=^94Vr}=R3)|!RHdSe9iVtjI!az4ut=P zoX3)==nmyBG#Q7y+n&^K8B>`zVkTJ0GbwNWnj|m_7SQ_S_#ky3B(qwXa#vrNRYzA= zx-09Nttz!~K3zjRg^_uKk?{Osr z4yO_)?Dp4279Hv8pZQz7#yuikL65j5NOn5=$?T5Mf*i^ z-++gBkF!9H@))N2KmqOFkfKV*ixqgZ@rrv5eN!~^K-Q{UpcZ)lN z&B2c-edRf~^5}NKi1(c$`iSw8PA=swZLxei^rPNdeU;JmA^Sb#@%$w8HJ)1(_w*j& zU}SqJfqf*<70;$#jq!%PR7+`*GOO4L5I3H!Bz5}j|hdx(@ zD9)|JGIymtZ+VNY_29rx#cG->7;P38Jhsh0Rz zEVIp(xc4@#*MgUs98*l{(8B?)EN#B}1xD$R`glEWK?RA9HuKPvrn>7@x9S?bVoP_y zNGlrekG&bBLn4}(6~i-eyepq_wuS{q%rSiV>%R7Zx^T0>j86Au>#noqzqp}Nr=4o@ zy<1k`q4p>UgzY>JUsOu;nL*D$drcVSs>5W0FH;0AmwgTf+!QVR5@2acV@tx+`R?4) z>9E}!tV)k~1CE$uJQ82;e5yK{#$Jvw5KdB0odcM1(5hf=nRC+l-EaEg38$Ov^b}U5 z1|L6lZ%0LefW?B&NE}wF6F@5v?^CGK=CfRw_WO94veLJb` zFKJ=WQCa3XIG^Eaum9N`#4K?Nl~1dshInG8?lv=T!$tY#= z3*gXEQpI$qWEsLf@J7!Uowet(b+}A;YISSyy`JnS}Z+u&y-2tQk zsVX=gwNtW;glZ*=}FHKt`6@3=dx3#C)~qsg@8 z<~-BY{rKGcK%_-~K(#jBv%#Ez(HaWwVsV7IqWV?*Bf3qcb`fOtUN))uYxf*I?*r3i z$XreqK`E~lV)|{}S&8oa7kI=NI(DzM&qH-ysGZ-L4J7L8A&WoW=ih%l@;kB=;D&TkS8iPV}2U|&EMvc zu|=st?CNq2uHEXFtFu^G)Dz1ni9#IKU*lRv_G0S;3NeH8b%hY^m@Jl=)$Oq zfy>P%FLjavKm9DITN+qBK-7q5lew7qpyGl-r=N#$WDsYfjlf~0s7(_4rU1kZ3}Db| z{56K`iC`ylx>(~rS!pOwX%e;v2MlqtjVQy41y@bn65J^c-cwW@roDTbsWG*tzlra* zR+tjT;>bdczo+Z_n39{Fa~`~=yfXP~48d}qG+j!&|*9|3nLZ z_10(*T!MD&Wpu~icOb3W*fVjWP2UG10mG95dzdX-R%c5&>NkOPvH_t=rHDmYAH)*C z3Yh0ti@PPkKW}zloG*zKxTSBdfFQs5yq^Ky2kH_DVfQBHp7*6kus zzxVI{$H(o1n$Y%l06`Mh4@xLO{VEFJDmZd2$A;xM6;AdfOCjVsN#1l@Jw$J|h`j9W zy9|E%{}-~vX8}g?#Z-~FpiW{Jl)%b;d>|;Za-GZAXERXn6ZL|df&#^~cgSFlp_4Hs zwl0Q7u3Ia-Md^j_n*fTbL1PIlx>|~RxAo0VqSc>l*?{@xNIs^}zT0vt*9X0M1_Uw5 z$|?My!l8Di_?_f{*=SBaTb*Vn75(pXhYSqo(hqee3Q(p=53mssD8LlMWBi7YlxQ58 z=+|o!fZFCm-_@wir$IXs?;7Lxs*hR+Qz;Dcu{rEGjfBF@Tj>6Pow_vB^+Dpx?Y`EcRjrB3jYtEvmho#XnPq#cnR7ZYOvdP z^PSc-7_adn*71KI%0#5=F4AYmIRjnoo+cPm`{f8_efuDPj+$PY>R%C1ap8p#l2@wt z1mQzI@nG^`HPRpnBS@W&Uvug*omP{bfgGu_Fi#RKtVN(L@Xeo*15a2+UOg$Q6+(oi zGs{g_MP*E)PzFT*0#mC}t2W!|5m1<*LXRn|N{u54m+}LRb)IyGRrWKYHfdrwn@+vo z4{1{BfD{AX?ZB*Zw}de0#eOv@bBcC(WnrQ<^P5?K5eD@LcF z;D950W6|oWWHLi0J2!B4&l}KJVxXN(3V@ZM;K~pc@(+P^+8z87fc!g^kk3s>GM>Da zqF5$1UQO~tE;@ly&Q_Mwilu^bVcM#d#jL1q)kLR+Z{;^{Z_@A4zP zl7wSD!iFRg2e;{>A;Q<{KH+g_1zBR|1iT^<(7u7_D!8mh?g48k)_sfLrIfM$rT*m?2AYdT|S!1-1>QF;N{cBiz9lf*}IXl5` zlnGo6v6L{vwDuEMx&A^RK{RoI-zdPTSFW;>Q9JBtIA3XmlG{oM>*J@$#IDlD^oP6G z+Zkaf0hZ9e>>o;e>_sgru7g0015_81IEtpwpd@F|4;0mQA?#{xY#YgX@{j#J$j`n} z*7V&Gl8XpZFTZ6-QPmQaG|r~Yzg^G*HFHCVxqu@;zRqRc;{L4^0u&_)Qca5rV&k*I zl*Y`@-KB^)KuEx5hwIZpWoe#)Q8BPklWgcDJzVKvr+q;0--eJ&7Y-6 zS`?uuGVWcE_c4k7qvbWL1vf^!LrHxLOt?nvk}8^6Fr`Hw2OLt^!aFwF^9(LHq;_G6 zNF9PMOCb7zfJaJT{bdI7p)TO^w;bzO`$&q~`JsL`n)sCkba>Kmw;7{ceVo z>pg+;Geli*5*}o~nCIR_)1t0MqRaF9W8IM_$&W%Kuh6cfUo%jL0gi8j_YC+~4bjqE zss)EM;piL7FH`m)bs~yBlph!dJ2qKfM*0u|>O;{4HUa@CkQlX$AuEYM^Tf;UEnYBo zKgBPJ!3EL8Dcv+4mX!%YBZLRR%3wy} zICxV=*dWf2;#SiN*d*N(=~m~8-`SzAceN-CIB+(Ink~CJSV%}@E6MXNmxj9a3#R&> z<=SJT6O~X@3AdNj!xxT!84lzCc}NIS=rTX_(@ZFYKByIY3wZ{G;2aBDq*bS{u9Zq{ z9zS*pU>7=4dxKwP;&t1-CVmDpR}^RS`%=H6Zd5PrZjF6D>!Am=$t z?4I!L1J^HK>GMDCyvO1bR1xido4qB9j(a1npZExuQeVH#uL z`6!8yV-4>%@;U4jEz#e=&m)MyVrR)TnFi;g>8t3MQP+Noc=kWM=B}Pb#^W4F`$iN< zE`&GN@X0i@^)|5!`2-GQW_Ax9@_jMFx1Zo!juD9jxS>8vIdOVhovLPB+huC`_3#33 zVU-yn=fU5_M)6R>Jnj-T?;LZf^!js(Vg0g5-qmTQ#+6eT;to#6bkS0E(XeK^Nl2_+ zw}Ka|4?=HS&H6s}OmCR;25Mppph_(p<2A6W-1KHo%ZkB@Pj#0IOg=J5d}q_wmu!n> z(21-omJolp>H1~Rn_rFDfz1B3acma61mQqeDgiP9wJQ>L> zB`ykQvRX3uwKv#p3NRP;7Hp1y$Kuf2AZWxfd1RR$U)UazIS@(E{`ih zClCbu9?EY2C~l&w7=d&PQxjEa5vfF%j8nSEcq^2tjypIadH z>d6KRf+MAH9?VANSitj!1#;hQQ+x}JO4N*q{LoNvb2tY{R68v1DA$!Di_4{H$kZ|&e})RqzK6sVx>-2NhqQm=GL<$c8=b1ne1_sHEAi(*y9RgLMmT}z4iz=~7cyYd<83*3B0HbPSEM^571_94QN{8b`E zE6xS7^@4EX@*PO{;eXWF&HM%G&Rsq)OJJt(bHaW0KlX!`6!k`50jyI{h*vr@A9kNk z^>;99gz2hwHJX!xnZ8@l4NEt-9flY3G>jB?c6(oed}Np6?q_ z-P$nGsCO9k;i&JCPJXDIdUf6L945ohKDevJhcptlkUoOb(oGVMOk8QOY4>B{(P|ynDJ5ZeZhhB>VRXJy11n|^!SnEm}v9Sb9eW|Cjw>9f$ z`dtGroC|dO>~LKk2XE~O0?oJqSk&;m@}K}50WSNpP+z2aG8Dr?U>9FaushPj!?WgY ze_^NSP)+J0S-=WG8dF7q(5a#Bvq%R!ZE|Ab(F`IMN*raJ(=Yz&isZtcv)zDAkV|){1s)JK^)pKnFL~0C70EZsTpthX z&%fS1f-M*(8zV?A=C&2jE2`=@J#x97UtpcMY&I99KB1rJ+Yo7Zauz?TksCZB?~E%= zM@xj+JoM$5?E)mD^2)!vj_$z-gD>1d(?h@}d>$KxddWkhgPtz#j0MSMG7LYyu1oB8 z0K&RThjqCco$QnUbTsOFYs548297wGZi#kd5N3#mtiZf`gSg=Ny-KFqD*tJ6iH^T) zr-Ktr#||9a3tZ3F-i$bTv!+IRck4HqEa7|?^h=&Mcj6p~M)jybrfTNJF<6sD8c-jZ z)i}^OdSSz$1>&`-#E_N7};#myRw`*qRqt=EtU3-cdSjifT+q0fl1Mp$C+ z8}g0CjBNtG0q}uI%>^yhH=2PMEo1M5tUNG%@OM@jvj=T9n z872TUBB-GEA&f0PO>vc~S&A8@F;}-@WxV{y&xdV8eD@*_0Y65JK22ruHb1qk=u>Yh zLq0@hglgxu-|~a~rdR7O5`jf8W^Qz(*CGK_<^olb4N>=7P|!R$eY)WXkbq5z^JTc? zO^83R8h*Mr(p0}08^sg|Qy^18zWKU(Ss^2z-M0rq8scA8-S{DiK9CIe0cw=a?t|5H zbNL$oM{V>L;RZe9dY*<`6!QjtSD{S3I8_<^yH1E9rhq6d=npw8Gm1+M?kW&Jb^Tj` zkExm?A1$%y2JoaGFxO$zCkwOkmOs7T1Wo@4?@F1j)?&*{`#S>igAj5PU~cXkmqJy| zQ+fZeNHTGAY~>+Zs?7I5T{QxxO8EZ_P%<~#;+Sf!=8F)bOr6-?^hH9R$W8CtYWd4P zX=JbE35?2Tm3g-?E>ASeB5nAiF$c?RxvC(h!9V0`h=y4Iq#BLr&l-_?-k0;lBD)+- zPYQ1ZJ@^~&56q9Nhc-Qc>*^zSgW*Dd-z!19G4k?Pol!FUVbvyXAH#o*UaR#xP^=!a zH=I=w4EEt>D{I**z1#ydO6F+;d%%*Kn}Gig6a=GRO-bzMn)}omPRyEn z@!0|hmD4V7$H3n0ws@m$PECVz_5MH7xs;NK({4;iC8n+0v7tpnSGE)6x{QTC3m-fLsnGQ$LkLz$*-n>e?9rYs=NK@sEU~}kBTvSRB=Jq)pqz{oFbp21_W8i zP1j{4?mkf%o1De-8Q3%f~Qz+4@lHVi32lH zOmIj|EVCdj(B>bmUpq^fljA12asC|e&rzl(2VUU(4BE{#=JqwtVoaW=(~a!U4B2EF zD3KQjnQd(KkD;_9(I>DnJkvS)5D-Ty#3gGxR=a`Fb?B@;L}0jRPv($+a+U)IL$3S9 z&3_dWmeg!F6EXg@7XIf@99itHFQt?gT+4b!^##3Q_&3XRdHRQsY}TULIC6ifYh-|- z9jW;^pi||vCbinByCX-sz!u?u6(W!=lUA`3qQ_f6?ZJI}A~E1;^*r!nkImwnG z>5dusYtQza3UOF5d(Yv<9m`~7yVBuO8wCcWvI5Cp%TH`bcpQRN52|^Ew4W> z?(ha*)@nR4=^9{!c*$LwEczawv8q3T0@}@!`QhB8kIB z)3aD$nN0}9OKM&TK~|BYD+8HRdmU7x_=Rd}T1qMc6aO5!%nE9+bNRHoPBN+EO*j?0 zGQ25hHpjZSiuZ}j?=S;2+}fVQxl!lWsRTbUu0gi$Jtfk&1i!SgMfeG9ku$H?=NzX= zx6S_BnoL`^qN&O=LbmtA?W8iIp2TFXles<}hJ%B<3yQA1-pxQ_sf`b~KegKzxSZT? zK*}Wgqk!~czcL$^kav8^#rI1ZCiIYmwXsdHz=Ea}cR&_+6jVL-z`MhjH!VGpQV(L< zz)P%ZG6)52G-IPot~B@xAY?r?I| zC}D45UGY>3q@~~>5Cm@$jfzxk6UwEy((oZzfQaxukv60dj8Gay*rA6CbMwj_bJcB- zdliI(!9INXrTjlD`2SPbnTJE!wqbnM#!}NHM23{CBN{_yrZDz3OH@dfB&UBqG_eRMv#=R`2rt_stx~^Vc)?%slr!_Z-i4o#*egL<@>U z;KQx1qRE?yXZ7VmE~6oeB$?zq1kHN(+xmS!4Ona4n4~?g{fY3j*~*jq_jS0YRK9S} zs|(h+P8@wx#l7OYr1R^3Te>J`29i6?@^Y=uYF~F^bmZgBaBfo*lyzNL zYIf%vD8*r$c`|qI)G+x%NNvi!6z&FP*KheDCS}!1Dp*0iz%Nh5*NKqhC2UV4hV(4m zk3f*~u({*who2RGI*(pHeC7wdkz^ok7-9R&WqCx~?Os3U4bl>b^V03?ld#)*W7az) zvA629`)iAvxi62>;FM0)aHx>eG3adYqTUl3MXF^2>6j z6E2>!J@t}x>BFlnIdRLUqTC>BKBMWGCNIsKC@UIoHXHyFp4~7&-)Tv>6^0(#OcFn&S*}Q*Zk>5_j8rTJJ;tGb8`-5My>LlijN;= z4c$<_MjykYAXfW8*|o@9#oYSqy-Q@*0OA9A%?su-90t~D#aW=oA+Ev#-^z0nY3xA}ZS)~i(#=5pOI`Ou(qus-8YXks8JA5d*} z_+$`2C6hxG&coRK=D6F_fLpUs<`oaz0LZKVB)O&97$cv4D@+x=EfoZ9JrsgnB* zHj=Do`^zS(9GVAOXoa3~_v;0cC)RzgQM?GB`!|n1g!Ak_aBq3f8=L2571()GMklU5 zVs@Tp6f9<3DU0pS#H-sBg*LKIFkBnuXG=fqA)ronSXQ|wOSymHLU@09|N4Tr({*$P zW0Y8-St`jY^J#oQi8+SN_3HxotBC2iT&=g_Y!XNGg~sA6*d#|wz7GH)#CwOh z4vs!jz!c=qV8RWN9?I5B?~yB&uar~u=`|Ds5|ftA^ccgXK;Go@&`XLiEa{EBP<%dM z3-mmbL!CN(D^@^8rM1q*;JNq`7IN4W7}G31z%5vd9@8($KLtgcW;XJ%V>_FLwfh6> z*k4_Qzpl)U6^ax=sN@oE$V&MMbdY@TdJUM)g~k~V&&aZjLd!YVVuB)Sk`%>|!)drQNsW?04RiK!X8cncIBbCd3UMpFKv`alXBqGoq zJ0ZHUi+UjVk;rS4z+EfIw50|S)jFCV$2C4>2APUdrgZ4!GKTZCM=AsbD7lFr)dQ<)kO3bHb5Ifhhm{;qny%H|!lxJ#g7*+w~Q zg<)UDC{_6EtK%qbReSs=hW!n%dW--aYixq21A-M-Ccjb6M5_qMsT7MHkbo*G39ICn{eLml`tKh?hf(!J|QfBiP zEa}?6#ub$GfI#;o0N6gz8Ab@Dk;vHxs-^oV*UggW1 z)6*@Co>E8Su#ua(4MFb`Z^uhpSt8x{kMGm-d%L{_SU|wL`gz4pFA~5-gTi!X;;OZ%D9@YA&27_f8p?Z z2G?msh~}mwQl}f_W{NrY<)f<>%9%ljx3@Q4by5`-=%;T{w>PGa{1|6Xu(7~enx?W0 zVohIvW(T*y04pqIRDJs^tY-kE?FGx&z14d5P;1Z4Rn!cfLRC}`Ro9u)obVX*GdzVbc3)&JpP`L4N}2d0scs_!j2D?2oIO^XY zK?WsdRi#{^b0IyWNtY;#B&;2&O}UMxVL8oJd~bRYA7)Wl3GG z>TZtBJo8RMbJE&zzrdNaV9^fa@(eCVprAH)W7XXsz7V;)D%~LFkIdVLj3TMN4l7A= zL26&n&Szv;2^ZJD3=$3#u0`p2+BSwOgR*W82v6r&6;caN<6myy%dY$egbND|=&S0M zJNHWsft+WSG!TI7$s&Ge;ImMQL0zq{W8@+?fRKsWwLMAU5DwQoQ13t26{b)$-#IDw zYaU^3fjh2?+vg95W)P`6ZatFiQq!0nE(?pwtC)zwE(PnUAF#ePceWn&;71ZHv!<7?cuB#mLHpa$_n(fFS~RI{UKk4 zDO>*X;fM^al&N+fo;nHB+Lu@M^=Ze^a;VeU&%AO{?3a#?qia zI8Y-~Pu*y#uSxi11%{G#1Il;kkMfW6^t&68d!5CLcMRM4^x5L?$hZDr5e$I9{ys1z zVc!#ojH`D12=EP{e;=5E5M(4=?cD#KN|A$cO7{LI4%~bHdn%U2u!u-Gj{IjpFtmvf i<<+@poY{f@KcJNg>-d2Wlf6&~cxbEZtG!UM4*D